diff --git a/.codecov.yml b/.codecov.yml index 69114fc847b..ebcd2fc3b13 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -9,14 +9,14 @@ coverage: status: project: default: - target: 90% - threshold: 1% + target: 95% + threshold: 0% branches: null patch: default: - target: 95% - threshold: 0% + target: 100% + threshold: 1% branches: null changes: false diff --git a/.gitignore b/.gitignore index 85f471a077f..9c932eec0ae 100644 --- a/.gitignore +++ b/.gitignore @@ -15,10 +15,14 @@ docs/doxygen_example_output/* tests/nosetests.xml examples/output/* devutils/sizeof_SIFD -gs.error +gs_error.txt *junk* debug.out tmp* examples_bin *~ .cache +*.egg* +build +dist +.pytest_cache diff --git a/.travis.yml b/.travis.yml index da2fcf109e9..c95f91b1697 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,15 +5,10 @@ branches: language: python python: - #- 2.6 # disabled until Travis fixes their issue #6732 - # https://github.com/travis-ci/travis-ci/issues/6732 - 2.7 - 3.4 - 3.5 - #- 3.6 # disabled until Travis fixes their issue #4990 - # https://github.com/travis-ci/travis-ci/issues/4990 - # i.e. has python 3.6 pre-installed. Otherwise it can time out from having to install - # all the python 3.6 stuff from scratch. + - 3.6 compiler: - g++ @@ -21,83 +16,54 @@ compiler: before_install: - export PATH=$(echo $PATH | tr ':' "\n" | sed '/\/opt\/python/d' | tr "\n" ":" | sed "s|::|:|g") - sudo apt-get -qq update - - sudo apt-get install -y python-dev libfftw3-dev scons libblas-dev liblapack-dev gfortran libav-tools + - sudo apt-get install -y python-dev libfftw3-dev libav-tools libeigen3-dev # List current contents of directories that should be being cached. - ls -l $HOME - - if test -d $HOME/tmv-0.73; then ls -l $HOME/tmv-0.73; fi - - if test -d $HOME/boost_1_61_0; then ls -l $HOME/boost_1_61_0; fi - if test -d $HOME/des_data; then ls -l $HOME/des_data; fi - # Add ~/bin and ~/lib, etc. to the appropriate paths where scons install will put things. - - export PYHOME=$HOME/virtualenv/python${TRAVIS_PYTHON_VERSION} - - export PATH=$HOME/bin:$PATH - - export LD_LIBRARY_PATH=$HOME/lib:$LD_LIBRARY_PATH - - # Fix a directory name in 3.x installations so boost can find it. - - if test -d $PYHOME/include/python${TRAVIS_PYTHON_VERSION}m; then ln -s $PYHOME/include/python${TRAVIS_PYTHON_VERSION}m $PYHOME/include/python${TRAVIS_PYTHON_VERSION}; fi - - # To get coverage of the WcsToolsWCS class: - #- sudo add-apt-repository "deb http://archive.ubuntu.com/ubuntu $(lsb_release -sc) universe" - #- sudo apt-get -qq update - #- sudo apt-get install -y wcstools - # Hm. This didn't work, and I can't figure out why. I get the following error: - # Reading package lists... Done - # Building dependency tree - # Reading state information... Done - # E: Unable to locate package wcstools - # Perhaps someone with more familiarity with apt-get can figure this out, but for now, we'll - # live with lack of coverage of WcsToolsWCS. - - # Only get TMV if not cached - - pushd $HOME - - if ! test -d tmv-0.73 || ! test -f tmv-0.73/SConstruct; then wget https://github.com/rmjarvis/tmv/archive/v0.73.tar.gz && tar -xf v0.73.tar.gz ; else echo Using cached TMV; fi - # But always install it to /usr/local - - cd tmv-0.73 && sudo scons install - - popd - - # Only get Boost if not cached - - pushd $HOME - - if ! test -d boost_1_61_0 || ! test -f boost_1_61_0/bootstrap.sh; then wget https://sourceforge.net/projects/boost/files/boost/1.61.0/boost_1_61_0.tar.bz2 --no-check-certificate && tar --bzip2 -xf boost_1_61_0.tar.bz2 && cd boost_1_61_0 && ./bootstrap.sh --with-python=python$TRAVIS_PYTHON_VERSION && ./b2 link=shared && cd ..; else echo Using cached Boost; fi - - cd boost_1_61_0 && sudo ./b2 -d0 link=shared install - - popd - # Get the des data needed for the check_des test. - if ! test -d $HOME/des_data || ! test -f $HOME/des_data/DECam_00154912_01.fits.fz; then wget http://www.sas.upenn.edu/~mjarvis/des_data.tar.gz && tar xfz des_data.tar.gz -C $HOME --wildcards *_01*; fi - ln -s $HOME/des_data examples/des/ cache: + ccache: true pip: true directories: - - $HOME/tmv-0.73 - - $HOME/boost_1_61_0 - $HOME/des_data install: - # Travis doesn't always have the most up-to-date nupy already installed, so use -U - - travis_wait 30 pip install -U numpy + # Install the requirements + # Use -U to make sure we get the latest versions of everything so we notice any + # incompatibilities as soon as possible. + - pip install -U -r requirements.txt + + # Also some things just required for tests + # (This includes scipy, which can take a while to install. So tell Travis to be patient.) + - travis_wait 30 pip install -U -r test_requirements.txt + # astroplan isn't available on 2.7 + - if [[ $TRAVIS_PYTHON_VERSION > 3.0 ]]; then pip install astroplan; fi + # Note: matplotlib is only required because starlink has an `import matplotlib` in their # code, despite that not being a dependency. - # Allow 30 minutes for this one, since sometimes scipy in particular can take a long time - # if it's not in the cache yet. - - travis_wait 30 pip install numpy astropy future lsstdesc.coord pyyaml starlink-pyast nose codecov coveralls matplotlib==1.5.0 scipy pandas coverage - - if [[ $TRAVIS_PYTHON_VERSION == 2.6 ]]; then pip install simplejson ordereddict; fi - # astroplan is not available for 2.7 - - if [[ $TRAVIS_PYTHON_VERSION > 3.0 ]]; then pip install astroplan; fi + - pip install matplotlib + + # Finally, a few things for the code coverage + - pip install nose codecov coveralls coverage + + - pip list script: - # This lets scons work even on Python 3 builds - # cf. https://github.com/travis-ci/travis-ci/issues/5961 - - source $HOME/virtualenv/python2.7/bin/activate - # But now we need to manually set the python, since it's not the default in this virtualenv. - - scons PREFIX=$HOME PYTHON=$PYHOME/bin/python PYPREFIX=$PYHOME/lib/python${TRAVIS_PYTHON_VERSION}/site-packages BOOST_DIR=$PYHOME && scons install - - if test -f gs.error; then cat gs.error; fi - # Go back to the regular python environment for the tests - - source $PYHOME/bin/activate + # Install GalSim + - python setup.py install + # If galsim_download_cosmos.py changed, then run it. - - if git --no-pager diff $TRAVIS_COMMIT_RANGE --name-only | grep -Fxq 'bin/galsim_download_cosmos.py'; then galsim_download_cosmos -s 23.5 -v1; fi + - echo $TRAVIS_COMMIT_RANGE + - if git --no-pager diff $TRAVIS_COMMIT_RANGE --name-only | grep -Fxq 'galsim/download_cosmos.py'; then galsim_download_cosmos -s 23.5 -v1; fi + - cd tests - # Use this rather than scons tests, so we can get the coverage options. + + # Use this rather than setup.py test, so we can get the coverage options. - "nosetests test*.py --with-coverage --cover-package=galsim --with-doctest --cover-erase" # Without cover-erase, this will append to the .coverage file - "nosetests run_examples.py --with-coverage --cover-package=galsim --with-doctest" diff --git a/CHANGELOG.md b/CHANGELOG.md index 97705fec45e..5dda2b1df5b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,8 +12,12 @@ Dependency Changes GalSim as the Angle and CelestialCoord classes. We moved it to a separate repo so people could more easily use this functionality without requiring all of GalSim as a dependency. (#809b) -- Removed dependency on boost. -- Added dependency on (pybind11 or cffi...) +- Removed dependency on boost. (#809) +- Removed dependency on TMV. (#809) +- Added dependency on pybind11. (#809) +- Added dependency on Eigen. (#809) +- FFTW is now the only dependency that pip cannot handle automatically. (#809) +- Officially no longer support Python 2.6. (Pretty sure no one cares.) API Changes @@ -43,6 +47,8 @@ API Changes InclinedSersic has been changed to disk_half_light_radius, since it does not really correspond to the realized half-light radius of the inclined profile (unless the inclination angle is 0 degrees). (#809f) +- Removed galsim_yaml and galsim_json scripts, which were essentially just + aliases for galsim -f yaml and galsim -f json respectively. (#809f) Bug Fixes diff --git a/INSTALL.md b/INSTALL.md index 3c9ca3c3353..f0567b71e11 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -5,918 +5,479 @@ System requirements: GalSim currently only supports Linux and Mac OSX. Table of Contents: -0) [Overall summary](#0-overall-summary) +1) [Overall summary](#overall-summary) -1) [Software required before building GalSim](#1-software-required-before-building-galsim) +2) [Installing FFTW](#installing-fftw) + * [Installing FFTW yourself](#i-installing-fftw-yourself) + * [Using an existing installation](#ii-using-an-existing-installation-of-fftw) + * [Using conda](#iii-installing-fftw-with-conda) + * [Using apt-get](#iv-installing-fftw-with-apt-get) + * [Using fink](#v-installing-fftw-with-fink) + * [Using MacPorts](#vi-installing-fftw-with-macports) -2) [Installing the GalSim Python package](#2-installing-the-galsim-python-package) +3) [Installing Eigen](#installing-eigen) + * [Installing Eigen yourself](#i-installing-eigen-yourself) + * [Using an existing installation](#ii-using-an-existing-installation-of-eigen) + * [Using conda](#iii-installing-eigen-with-conda) + * [Using apt-get](#iv-installing-eigen-with-apt-get) + * [Using fink](#v-installing-eigen-with-fink) + * [Using MacPorts](#vi-installing-eigen-with-macports) + * [Using eigency](#vii-using-eigency) -3) [Running tests and installing example executables](#3-running-tests-and-installing-example-executables) +4) [Using Conda](#using-conda) -4) [Running example scripts](#4-running-example-scripts) +5) [Installing With SCons](#installing-with-scons) -5) [Platform-specific notes](#5-platform-specific-notes) +6) [Running tests](#running-tests) -6) [More SCons options](#6-more-scons-options) +7) [Running example scripts](#running-example-scripts) -0. Overall summary -================== +Overall summary +=============== -While the sections below detail how to install GalSim including its required and -optional dependencies, this section gives a brief summary. A minimal -installation of GalSim requires the following dependencies. This dependency list -includes a canonical version number that is known to work. In most cases, other -recent versions will also work: +GalSim is a python module that has much of its implementation in C++ for +improved computational efficiency. GalSim supports both Python 2 and +Python 3. It is regularly tested on Python versions (2.7, 3.4, 3.5, 3.6). -- Python (2.7, 3.4, 3.5, 3.6) -- SCons (2.1.0) -- NumPy (1.11) -- LSSTDESC.Coord (1.0.4) -- Astropy (1.1.1) -- Future (0.16.0) -- FFTW (3.3) -- TMV (0.73) -- Boost (1.61) - -A few optional dependencies provide additional functionality, but GalSim can -otherwise be compiled and used without them. Basic WCS functionality is native -to GalSim, but for users with more complicated WCS needs, we recommend -installing starlink-pyast. Thee Astropy WCS package is also supported, but note -that it requires scipy as an additional dependency. To use yaml for config -parsing, the pyyaml module is needed. Faster text file parsing for reading in -bandpasses and SEDs can be enabled if you have the pandas module (but the code -will work, albeit more slowly, without this module). - -The sections below give a lot more details about how to obtain these -dependencies; many are available from sources like pip or easy_install, rather -than having to be installed from source. Third party packages like Anaconda -often include many of these dependencies automatically. GalSim and all of its -dependencies can be installed via fink, for users with Macs. - -1. Software required before building GalSim -=========================================== - -Please note: Mac users who want to use fink can skip down to Section 5.ii and -use that to satisfy all dependencies before installing. - -i) Python (2.7, 3.4, 3.5, or 3.6 series), with some additional modules installed --------------------------------------------------------------------------------- - -The interface to the GalSim code is via the Python package `galsim`, and its -associated modules. Therefore you must have Python installed on your system. -Python is free, and available from a number of sources online (see below). -Currently GalSim supports Python versions 2.7, 3.4, 3.5, and 3.6. It is likely -that other Python 3.x versions are compatible, but these two are the only ones -actively tested. - -Many systems have a version of Python pre-installed. To check whether you -already have a compatible version, type - - python --version - -at the terminal prompt. If you get a "Command not found" error, or the reported -version is not one of the supported versions, you should read the "Getting -Python and required modules" section below. - -It may be that there is or soon will be more than one version of Python -installed on your operating system, in which case please see the "Making sure -you are using the right Python" Section below. - -### Getting Python and required modules ### - -For a list of places to download Python, see http://www.python.org/download/. - -The GalSim package also requires - -* the numerical Python module NumPy (http://www.numpy.org). Currently GalSim is - regularly tested to ensure it works with NumPy 1.11.2, but other versions will - likely work. - -* the astronomical FITS file input/output module PyFITS available - either as a standalone package: - http://www.stsci.edu/institute/software_hardware/pyfits - or as part of the astropy library: - http://www.astropy.org/ - The latter is preferred, since this is now where all future development of - this package is happening. Currently GalSim is regularly tested to ensure - it works with astropy version 1.1.1, but it is likely that most recent - versions will also work. - -* the future module, which is used to ease compatibility between Python 2 - and Python 3. Currently GalSim is regularly tested to ensure - it works with version 0.16.0 of this module, but other versions may work. - -* the PyYAML package for parsing YAML files (http://pyyaml.org/wiki/PyYAML) - Note: PyYAML is only technically required if you are using the `galsim` - executable for parsing YAML config files. Users who will only use GalSim - in Python (or use only JSON config files) may skip this dependency. - Currently GalSim is regularly tested to ensure it works with version 3.12 - of this package, but other versions may work. - -* the LSSTDESC.Coord module (https://github.com/LSSTDESC/Coord), which is - used for angles and coordinates. It is a faster alternative to the - astropy.coordinates module for the use cases that we need. - -* Optional dependency: PyAst WCS package. This is a really nice front end - for the Starlink AST astrometry code. It seems to support pretty much - every WCS encoding there is. (At least every one we tried.) Their - preferred installation method is via pip: - pip install starlink-pyast - For more information, see their website: - https://pypi.python.org/pypi/starlink-pyast/ - With this installed, you can use the galsim.PyAstWCS class, which in - turn means that galsim.FitsWCS will pretty much always work. - -* Optional dependency: Astropy WCS package. We already mentioned astropy - above for astropy.io.fits. Another package we can use from astropy is - their WCS package, astropy.wcs. They cannot read all that many WCS types - (compared to PyAst at least), but hopefully the functionality will include - in time. Unfortunately, this package has scipy as a dependency, which - is kind of a gargantuan package. But if you are willing to install that - too, then you can use the galsim.AstropyWCS class. - -* Optional dependency: Astropy Units package. This is now required for - GalSim chromatic functionality, but can be omitted if you are not using - this part of GalSim. - -* Optional dependency: Pandas. This has a very fast function for reading ASCII - tables. If this is not available (e.g. when reading in Bandpass or SED - files) then we fall back to the (much) slower numpy loadtxt function. - -These should installed onto your Python system so that they can be imported by: - - >>> import numpy - >>> import astropy.io.fits [ Either this (preferred)... ] - >>> import pyfits [ ... or this. ] - >>> import future - >>> import yaml - >>> import coord - >>> import starlink.Ast [ if planning to use PyAstWCS class ] - >>> import astropy.wcs [ if planning to use AstropyWCS class ] - >>> import pandas [ for faster ASCII table input ] - -within Python. You can test this by loading up the Python interpreter for the -version of Python you will be using with the GalSim toolkit. This is usually -achieved by typing `python` or `/path/to/executable/bin/python` if your desired -Python is not the system default, and typing the `import` commands above. If -you get no warning message, things are OK. - -If you do not have these modules, follow the links above or alternatively try -`easy_install` (or equivalently `/path/to/executable/bin/easy_install` if your -desired Python is not the default). - -As an example, if using the default system Python, connected to the internet -and with root/admin privileges simply type - - easy_install numpy - easy_install pyfits - easy_install future - easy_install pyyaml - -at the prompt. If not using an admin account, prefix the commands above with -`sudo` and enter your admin password when prompted. The required modules should -then be installed. - -See http://packages.python.org/distribute/easy_install.html#using-easy-install -for more details about the extremely useful `easy_install` feature. - -Another option for installing these packages is pip. See pypi.python.org for -details about getting this installed if you do not already have it on your -system. Then - - pip install numpy - pip install astropy - pip install future - pip install pyyaml - pip install lsstdesc.coord - pip install starlink-pyast - pip install scipy - -### Third party-maintained Python packages ### - -There are a number of third party-maintained packages which bundle Python with -many of the numerical and scientific libraries that are commonly used, and -many of these are free for non-commercial or academic use. - -One good example of such a package, which includes all of the Python -dependencies required by GalSim (NumPy, PyFITS, PyYAML as well as SCons and -pytest; see Section 2 below) was the Enthought Python Distribution (EPD; see -https://enthought.com/products/canopy/academic/ for the academic download -instructions). - -The new Enthought "Canopy" package, a successor to EPD, provides the same -functionality. However, it has been found that Canopy on Mac OSX can give -problems building against Boost.Python, another GalSim dependency. A solution -to these issues is described here: -https://github.com/GalSim-developers/GalSim/wiki/Installation-FAQ#wiki-canopy - -Other re-packaged Python downloads can be found at -http://www.python.org/download/. - -### Making sure you are using the right Python ### - -Some users will find they have a few versions of Python around their operating -system (determined, for example, using `locate python` at the prompt). A common -way this will happen if there is already an older build (e.g. Python 2.4.X) -being used by the operating system and then you install a newer version from -one of the sources described above. - -It will be important to make sure that the version of Python for which NumPy, -PyFITS and PyYAML etc. are installed is also the one being used for GalSim, -and that this is the one *you* want to use GalSim from! Knowing which installed -version of Python will be used is also important for the installation of the -Boost libraries (see Section 1.v, below). - -To check which Python is your default you can identify the location of the -executable by, for example, typing - - which python - -at the prompt. This will tell you the location of the executable, something like - /path/to/executable/bin/python +The usual way to install GalSim is now (starting with version 2.0) simply -If this is not the Python you want, please edit your startup scripts (e.g. -`.profile` or `.bashrc`), and be sure to specify where your desired Python -version resides when installing the Boost C++ libraries (see Section 1.v). + pip install galsim -See Section 5 of this document for some suggestions about getting Python, Boost -and all the other dependencies all working well together on your specific -system. +which will install the latest official release of GalSim. +Note that you may need to use sudo with the above command if you are installing +into system directories. If you do not have write privileges for the directory +it is trying to install into, you can use the --user flag to install into a +local directory instead. (Normally something like $HOME/Library/Python/2.7 +or $HOME/.local, depending on your system.) -ii) SCons (http://www.scons.org) --------------------------------- - -GalSim uses SCons to configure and run its installation procedures, and so SCons -needs to be installed in advance. Versions 2.0 and 2.1 of SCons get reasonable -testing with GalSim, but it should also work with 1.x versions. You can check -if it is installed, and if so which version, by typing +If you would rather install from source (e.g. to work on a development branch), +you can do - scons --version + git clone git@github.com:GalSim-developers/GalSim.git + cd GalSim + pip install -r requirements.txt + python setup.py install -See Section 5 for some more suggestions about installing this on your platform. +(again possibly with either sudo or --user). +**Note**: If you use Anaconda Python, you can use that to install most of the +requirements with their conda installer. See [Using Conda](using-conda) +below. -iii) FFTW (http://www.fftw.org) -------------------------------- +Either of these installation methods should handle most of the required +dependencies for you if you do not have them already installed on your machine. +There is one exception, however. FFTW is not directly pip installable, so if +the above installation fails, you may need to install it separately. See +section 2 below for more details about how to do this. -These Fast Fourier Transform libraries must be installed, as GalSim will link -to them during the build. We require version 3 (or greater presumably), which -is often distributed as fftw3. See Section 5 for some suggestions about -installing this on your platform. +The other dependencies should all be installed by pip, but we list them here +for completeness along with versions that are known to work. In most cases, +other recent (especially later) versions will also work: +- Eigen (3.2.5) +- NumPy (1.13.1) +- Future (0.16.0) +- Astropy (2.0.1) +- PyBind11 (2.2.1) +- LSSTDESC.Coord (1.0.5) -iv) TMV (https://github.com/rmjarvis/tmv/) (version >= 0.72 required) ------------------------------------------------------------------------ +There are a few others modules are not technically required, but we let pip +install them along with GalSim, because they either add useful functionality +or efficiency to GalSim. These are listed in the requirements.txt file that +pip uses to determine what else to install. But if you install with +`python setup.py install`, then these will not be installed. -GalSim uses the TMV library for its linear algebra routines. You should -download it from the site above and follow the instructions in its INSTALL -file for how to install it. Usually installing TMV just requires the command +- Starlink (3.10.0) (Improved WCS functionality) +- PyYaml (3.12) (Reads YAML config files) +- Pandas (0.20) (Faster reading of ASCII input files) - scons install PREFIX= -but there are additional options you might consider, so you should read the TMV -INSTALL file for full details. Also note, you may not need to specify the -installation directory if you are comfortable installing it into `/usr/local`. -However, if you are trying to install it into a system directory then you need -to use sudo scons install [PREFIX=]. +Installing FFTW +=============== -Note: On Mac OS 10.7, the Apple BLAS library has problems when run using -multiple processes. So if you have such a system, we recommend getting a -different BLAS library, such as ATLAS (and making sure TMV finds it instead -of the system BLAS) or compiling TMV with no BLAS library at all (using -the SCons option `WITH_BLAS=false`). Otherwise, Galsim programs may hang -when run with multiple processes. e.g. `scons tests` by default uses -multiple processes, and multiple people reported problems using the Apple -system BLAS on OS 10.7. +GalSim uses FFTW (The Fastest Fourier Transform in the West) for performing +fast fourier transforms. +We require FFTW version >= 3.0. Most tests have been done with FFTW 3.3.7, +so if you have trouble with an earlier version, try upgrading to 3.3.7 or later. -v) Boost C++ (http://www.boost.org) ------------------------------------ +i) Installing FFTW yourself +------------------------- -GalSim makes use of some of the Boost C++ libraries, and these parts of Boost -must be installed. Currently GalSim is regularly tested to ensure it works with -Boost version 1.61, but it is likely that most versions released within the -last several years will also work. It is particularly important that your installed -Boost library links to the same version of Python with which you will be using -GalSim and on which you have installed NumPy and PyFITS (see Section ii, above). -Boost can be downloaded from the above website, and must be installed per the -(rather limited) instructions there, which essentially amount to using a command +FFTW is available at the URL - ./bootstrap.sh + http://www.fftw.org/download.html -(Additional `bootstrap.sh` options may be necessary to ensure Boost is built -against the correct version of Python; see below). +As of this writing, version 3.3.7 is the current latest release, for which +the following commands should work to download and install it: -followed by + wget http://www.fftw.org/fftw-3.3.7.tar.gz + tar xfz fftw-3.3.7.tar.gz + cd fftw-3.3.7 + ./configure --enable-shared + make + sudo make install - ./b2 link=shared - ./b2 --prefix= link=shared install +If you want to install into a different directory (e.g. because you do not +have sudo privileges on your machine), then specify the alternate directory +with the --prefix flag to configure. E.g. -If you are installing to a system directory, the second needs to be run as -root, of course: `sudo ./b2`... Also, you should be aware that if you are -running `b2` a second time, you should use `b2 -a` to tell boost to -recompile everything rather than use the existing libraries. + ./configure --enable-shared --prefix=$HOME -The `link=shared` is necessary to ensure that they are built as shared -libraries; this is automatic on some platforms, but not all. +which will install the library into $HOME/lib and the header file into +$HOME/include. In this case, leave off the sudo from the last line. +Also, you should make sure these directories are in your LD_LIBRARY_PATH +and C_INCLUDE_PATH environment variables, respectively. -Note: if you do not want to install everything related to Boost (which takes a -while), you can restrict to Boost Python and math by using `--with-python` -`--with-math` on the `./b2` commands. Currently we are only using Boost Python -and parts of the math library so compiling and installing these two will likely -be sufficient for the foreseeable future. +Alternatively, if you do not want to modify your LD_LIBRARY_PATH and/or +C_INCLUDE_PATH, you can instead set an environment variable to tell GalSim +where the files are -Once you have installed Boost, you can check that it links to the version of -Python that will be used for GalSim and on which you have installed NumPy and -PyFITS by typing + export FFTW_DIR=/path/to/fftw/prefix - ldd /libboost_python.so (Linux) - otool -L /libboost_python.dylib (OSX) +E.g. in the above case where prefix is $HOME, you would do -(If the ldd command on Linux does not show the Python version, the command -`ls -l /libboost_python*` may show the version of -libboost_python.so linked to, for example, `libboost_python_py26.so.1.40.0`. -In such a case you can tell both the Python and Boost versions being used, 2.6 -and `1.40.0`, respectively, in this example.) On some Linux systems, -ldd will not indicate the Python library against which boost was -compiled; in this case, continue with the installation procedure and -any issues will be revealed at a later stage. + export FFTW_DIR=$HOME -If the Python library listed is the one you will be using, all is well. If not, -Boost can be forced to use a different version by specifying the following -options to the ./bootstrap.sh installation script (defaults in `[]` brackets): +Probably, you should put this into your shell login file (e.g. .bash_profile) +so it always gets set when you log in. -* `--with-python=PYTHON` specify the Python executable [python] -* `--with-python-root=DIR` specify the root of the Python installation - [automatically detected, but some users have found - they have to force it to use a specific one because - it detected the wrong one] +ii) Using an existing installation of FFTW +------------------------------------------ +If FFTW is already installed on your system, there may be nothing to do. +If it is in a standard location like /usr/local/lib or in some other +directory in your LD_LIBRARY_PATH, then GalSim should find it without +any extra work on your part. -2. Installing the GalSim Python package -======================================= +If it is in a non-standard location, and you do not want to add this path +to your LD_LIBRARY_PATH (or you are on a modern Mac that hides such system +variables from setup.py), then you can instead set the FFTW_DIR environment +variable to tell GalSim where to look -Once you have installed all the dependencies described above, you are ready to -build GalSim. From the GalSim base directory (in which this INSTALL.md file is -found) type + export FFTW_DIR=/some/path/to/fftw - scons +For instance, if libfftw3.so is located in /opt/cray/pe/lib64, you could use +that with -If everything above was installed in fairly standard locations, this may work -the first time. Otherwise, you may have to tell SCons where to find some of -those libraries. There are quite a few options that you can use to tell SCons -where to look, as well as other things about the build process. To see a list -of options you can pass to SCons, type + export FFTW_DIR=/opt/cray/pe/lib64 - scons -h +This command would normally be done in your .bash_profile file so it gets +executed every time you log in. -(See also Section 5 below.) +If you have multiple versions of FFTW installed on your system, this variable +can be used to specify which version you want GalSim to use as this will be +the first location it will check during the installation process. -As an example, to specify where your TMV library is located, you can type - scons TMV_DIR= +iii) Installing FFTW with conda +------------------------------- -where `` would be the same as the `PREFIX` you specified when -installing TMV, i.e. The TMV library and include files are installed in -`/lib` and `/include`. Some important options that you may -need to set are: +If you use conda, FFTW can be install with -* `TMV_DIR`: Explicitly give the TMV prefix + conda install fftw -* `FFTW_DIR`: Explicitly give the FFTW prefix +This will put it into the anaconda/lib directory on your system (within your +active environment if appropriate). GalSim knows to look here, so there is +nothing additional you need to do. -* `BOOST_DIR`: Explicitly give the Boost prefix -* `EXTRA_LIBS`: Additional libraries to send to the linker +iv) Installing FFTW with apt-get +-------------------------------- -* `EXTRA_INCLUDE_PATH`: Extra paths for header files (separated by : if more - than 1) +On Linux machines that use apt-get, FFTW can be installed with -* `EXTRA_FLAGS`: Extra flags to send to the compiler other than what is - automatically used. (e.g. -m64 to force 64 bit compilation) + apt-get install libfftw3-dev -Again, you can see the full list of options using `scons -h`. -Another common option is `CXX=`. So, to compile with `icpc` rather -than the default `g++`, type +v) Installing FFTW with fink +---------------------------- - scons CXX=icpc +If you use fink on a Mac, FFTW can be installed with -On El Capitan, Apple instituted a new security measure wherein system calls -lose some of the system environment variables, including DYLD_LIBRARY_PATH -among others. If your system is set up to use that environment variable to -resolve library locations at runtime, then this will cause problems when SCons -is trying to figure out if things are installed correctly. To override this -behavior, you can explicitly send this environment variable to SCons by writing + fink install fftw3 - scons DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH +(Make sure to use fftw3, not fftw, since fftw is version 2.) -and it will be able to re-set this value within the SCons processing. +This will put it into the /sw/lib directory on your system. GalSim knows to +look here, so there is nothing additional you need to do. -One nice feature of SCons is that once you have specified a parameter, it will -save that value for future builds in the file `gs_scons.conf`, so once you have -the build process working, for later builds you only need to type `scons`. It -can also be useful to edit this file directly -- mostly if you want to unset a -parameter and return to the default value, it can be easier to just delete the -line from this file, rather than explicitly set it back to the default value. -SCons caches the results of the various checks it does for the required -external libraries (TMV, Boost, etc.). This is usually very helpful, since -they do not generally change, so it makes later builds much faster. However, -sometimes (rarely) SCons can get confused and not realized that things on your -system have changed, which might cause problems for you. You can delete -everything scons knows about what it has tried to build previously with +vi) Installing FFTW with MacPorts +--------------------------------- - /bin/rm -rf .scon* +If you use MacPorts, FFTW can be installed with -This will force SCons to recheck and recompile everything from scratch. + port install fftw-3 -Once you have a successful build, you can install the GalSim library, Python -modules, and header files into standard locations (like `/usr/local` and your -Python site-packages directory) with +This will put it into the /opt/local/lib directory on your system. GalSim knows +to look here, so there is nothing additional you need to do. - scons install -or +Installing Eigen +================ - sudo scons install +GalSim uses Eigen for the C++-layer linear algebra calculations. It is a +header-only library, which means that nothing needs to be compiled to use it. +You can download the header files yourself, but if you do not, then we use +the pip-installable eigency module, which bundles the header files in their +installed python directory. So usually, this dependency should require no +work on your part. -If you want to install into a different location, the prefix for the library -and header files can be specified with `PREFIX=`, and the location -for the Python modules can be specified with `PYPREFIX=`. So the -command would be +However, it might become useful to install Eigen separately from eigency +e.g. if you want to upgrade to a newer version of Eigen than the one that is +bundled with eigency. (Eigen 3.2.8 is bundled with eigency 1.77.) Therefore, +this section describes several options for how to obtain and install Eigen. - scons install PREFIX= PYPREFIX= +We require Eigen version >= 3.0. Most tests have been done with Eigen 3.2.8 +or 3.3.4, but we have also tested on 3.0.4, so probably any 3.x version will +work. However, if you have trouble with another version, try upgrading to +3.2.8 or later. -Note: if you specify a specific directory for the Python modules with PYPREFIX, -this directory should be in the sys.path search path for the version of -Python you are using. You can check with +Note: Prior to version 2.0, GalSim used TMV for the linear algebra back end. +This is still an option if you prefer (e.g. it may be faster for some use +cases, since it can use an optimized BLAS library on your system), but to +use TMV, you need to use the SCons installation option described below. - python -c "import sys; print sys.path" -If your `PYPREFIX` directory is not there, then Python will not be able to find -the installed galsim module. You should therefore add this directory to your -PYTHONPATH environment variable. For example, if you use bash, then you -should add the line +i) Installing Eigen yourself +---------------------------- - export PYTHONPATH=$PYTHONPATH: +Eigen is available at the URL -where `` is the same directory you used above for `PYPREFIX`. + http://eigen.tuxfamily.org/index.php -The installed files can be removed with the command +As of this writing, version 3.3.4 is the current latest release, for which +the following commands should work to download and install it: - scons uninstall + wget http://bitbucket.org/eigen/eigen/get/3.3.4.tar.bz2 + tar xfj 3.3.4.tar.bz2 + sudo cp eigen-eigen-5a0156e40feb/Eigen /usr/local/include -Finally, to clean all compiled objects from the `GalSim` directory, you can use +In the final cp line, the MD5 hash (5a0156e40feb) will presumably change for +other versions, so use whatever directory tar expands into if you are using +a different version than 3.3.4. - scons -c +If you do not have sudo privileges, you can copy to a different directory such +as $HOME/include instead and leave off the sudo from the cp command. In this +case, make sure this directory is in your C_INCLUDE_PATH environment variable. -This is rather like a `make clean` command. +Finally, you can also skip the last command above and instead set EIGEN_DIR +as an environment variable to tell GalSim where the files are -If you are having trouble with installing, you may find some helpful hints at -the GalSim Installation FAQ page on the Wiki: -https://github.com/GalSim-developers/GalSim/wiki/Installation%20FAQ + export EIGEN_DIR=/some/path/to/eigen -You can ask also about your particular problem on stackoverflow.com. Some of -the GalSim developers have automatic alerts set up for the tag 'galsim'. So -yout can ask your question there, and there is a good chance that it will be -answered. You might also try searching that site to see if anyone else asked -about the same problem. +This should be the directory in which the Eigen subdirectory is found. E.g. -If you are still having trouble, please consider opening a new issue on the -GalSim Github page at https://github.com/GalSim-developers/GalSim/issues -explaining what your particular problem is, and hopefully someone can help -you figure out a solution. + export EIGEN_DIR=$HOME/eigen-eigen-5a0156e40feb +Probably, you should put this into your .bash_profile file so it always gets +set when you log in. -3. Running tests and installing example executables -=================================================== -You can run our test suite by typing +ii) Using an existing installation of Eigen +------------------------------------------- - scons tests +If Eigen is already installed on your system, there may be nothing to do. +If it is in a standard location like /usr/local/include or in some other +directory in your C_INCLUDE_PATH, then GalSim should find it without +any extra work on your part. -This should compile the test suite and run it. The tests of our C++ library -will always be run, but we use `pytest` for our Python test suite, so that -will only be run if `pytest` is present on your system. We do not require -this as a dependency, since you can still do everything with the GalSim library -without this. But it is required for a complete run of the test suite. +If it is in a non-standard location, and you do not want to add this path +to your C_INCLUDE_PATH, then you can instead set the EIGEN_DIR environment +variable to tell GalSim where to look -To install `pytest`, you can also use easy_install as described in Section 1 -above (see also https://docs.pytest.org/en/latest/). Many third party- -maintained Python distributions, such as the Enthought Python Distribution, -include `pytest`. + export EIGEN_DIR=/some/path/to/eigen -By default, the python tests will use the pytest plugins `pytest-xdist` (for -running tests in parallel) and `pytest-timeout` (to manage how much time each -test is allowed to run). These plugins are usually installable using pip: +For instance, if Eigen was installed into /usr/include/eigen3, then you +could use that with - pip install pytest-xdist pytest-timeout - -Sometimes the `--user` flag may be needed in the above command to make the -plugins discoverable. If you want to run the python tests without these -plugins (serially!), you can still do this via + export EIGEN_DIR=/usr/include/eigen3 - scons tests -j1 +This command would normally be done in your .bash_profile file so it gets +executed every time you log in. -Note: if your system does not have `pytest` installed, and you do not want to -install it, you can run all the Python tests with the script run_all_tests in -the `tests` directory. If this finishes without an error, then all the tests -have passed. +If you have multiple versions of Eigen installed on your system, this variable +can be used to specify which version you want GalSim to use as this will be +the first location it will check during the installation process. -4. Running example scripts -========================== +iii) Installing Eigen with conda +-------------------------------- -The `examples` directory has a series of demo scripts: +If you use conda, Eigen can be install with - demo1.py, demo2.py, ... + conda install eigen -These can be considered a tutorial on getting up to speed with GalSim. Reading -through these in order will introduce you to how to use most of the features of -GalSim in Python. To run these scripts, type (e.g.): +This will put it into the anaconda/include directory on your system (within +your active environment if appropriate). GalSim knows to look here, so there +is nothing additional you need to do. - python demo1.py -You can also create executable versions of these scripts if you prefer by typing +iv) Installing Eigen with apt-get +--------------------------------- - scons examples +On Linux machines that use apt-get, Eigen can be installed with -This will put executable versions (with the first line `#!/bin/env python`) in -the `examples_bin` directory. (We do not include that first line by -default, since you might specify a different python to be used. Running -`scons examples` will put whatever python executable you specify after `#!`.) + apt-get install libeigen3-dev -There are also a corresponding set of config files: - demo1.yaml, demo2.yaml, ... +v) Installing Eigen with fink +----------------------------- -These files can be run using the executable `galsim`, and will produce the -same output images as the Python scripts: +If you use fink on a Mac, Eigen can be installed with - galsim demo1.yaml + fink install eigen -They are also well commented, and can be considered a parallel tutorial for -learning the config file usage of GalSim. +This will put it into the /sw/include directory on your system. GalSim knows +to look here, so there is nothing additional you need to do. -All demo scripts are designed to be run in the `GalSim/examples` directory. -Some of them access files in subdirectories of the `examples` directory, so they -would not work correctly from other locations. +vi) Installing Eigen with MacPorts +---------------------------------- -5. Platform-specific notes -========================== +If you use MacPorts, Eigen can be installed with -i) Linux --------- -The vast majority of Linux distributions provide installable packages for most -of our dependencies. In many cases, however, it is also necessary to install -"-devel" or "-dev" packages (e.g. `python-dev` or `libboost-dev` on Debian- -derivatives). However, as above we stress that you should make sure that the -version of Python that Boost is built against must be the same as that you -intend to use for running GalSim. + port install eigen -The solution may be to install Boost C++ manually. This can be done by following -the instructions of Section 1.v), above. +This will put it into the /opt/local/include directory on your system. GalSim +knows to look here, so there is nothing additional you need to do. -ii) Mac OSX 10.8 and earlier ----------------------------- -a) Use of Fink -- the `fink` (http://www.finkproject.org) package management -software is popular with Mac users. Once it is installed, you can get either -most or all of the prerequisites using it, depending on whether you want -to use GalSim with the fink version of Python (e.g. that in `/sw/bin/python`) or -the system Python (`/usr/bin/python`) or something else still. - -It is in general a good idea to update fink prior to installing any new modules: - - fink selfupdate - fink update-all - -If you are happy with running GalSim using the fink version of python 2.7, you -can install everything with the following command: - - fink install galsim - -and it should just work. However, there are some caveats that are worth knowing -about (assuming your fink installation is in `/sw`): - -1. This will install GalSim as a module of the python2.7 installed by fink. -This is not the default Python (usually `/usr/bin/python` or some other package, -such as EPD, if installed). Any Python scripts you write that use the galsim -module should therefore have `#!/sw/bin/python2.7` as the first line rather -than the usual `#!/usr/bin/env python`. Similarly, if you want to use galsim -in an interactive Python session, you should run `/sw/bin/python2.7` (simply -`python2.7` may also work) rather than just `python`. (Of course, you can -always change your `PATH` environment variable to make the fink Python the -system default if you wish...) - -2. The executable `galsim`, which parses YAML or JSON configuration files, -will be installed in `/sw/bin`. You should not need to do anything special -to use these, since `/sw/bin` should already be in your path if using fink. - -3. If you want to run through the example scripts (such as the demo tutorials -`demo1.py`, `demo2.py` etc. and the `.yaml` and `.json` config versions of the -same demos), you will still need to download the GalSim tarball. But you can -skip all the instructions above about installation and just use the fink -version. So `python2.7 demo1.py` (assuming `which python2.7` is the fink one) -and `galsim demo1.yaml` should run those scripts for you. - -4. If you want to work with GalSim as a developer, rather than just a user, -then you cannot use the fink-installed GalSim. However, the process above will -have installed all the prerequisites. So `fink uninstall galsim` will leave -you able to install GalSim using the master branch with: - - scons TMV_DIR=/sw PYTHON=/sw/bin/python2.7 BOOST_DIR=/sw/opt/boost-1_58 - -from within the repository base directory. - -To run the unit tests, you will also need pytest, which you can also get from -fink: - - fink install pytest-py27 - scons tests PYTEST=/sw/bin/pytest - -If you want to use the system Python, or some other version, then the fink -Python installations will not work. You will need to manually install -NumPy, PyFITS, PyYAML and pytest, for example using easy_install, with your -chosen Python. - -For the system Python, you can use fink for Boost, but you will want a -different package than the boost1.58.python27 that gets installed using -`fink install galsim` above: - - fink install scons fftw3 tmv0 boost1.58-systempython - pip install future - scons TMV_DIR=/sw BOOST_DIR=/sw/opt/boost-1_58 - -For other Python versions, the fink-installed Boost usually will not work, so -you can only use fink for SCons, FFTW and TMV. So you will probably need to -install Boost manually. This can be done by following the instructions of -Section 1.v), above. - -b) MacPorts -- this is another popular Mac package management project -(http://www.macports.org/) with similar functionality to fink. Neither TMV nor -GalSim are currently on the official MacPorts distribution list, so users cannot -find them by searching the MacPorts site. However, it is possible to install -both TMV and GalSim, plus the other dependencies of GalSim, using MacPorts -following the instructions below. - -It is in general a good idea to upgrade all modules, prior to installing any new -modules: - - sudo port selfupdate - sudo port upgrade outdated - -Below is a list of steps to take to install GalSim using MacPorts: - - i) Take the `Portfiles` from the GalSim repository: - https://github.com/GalSim-developers/GalSim/blob/master/devel/ports.tar.gz - (If you do not clone the repository, there is a "copy" button on the website - that you can use to download the file directly.) - ii) Place the file in your home directory. - iii) `tar xvzf ports.tar.gz` - iv) `cd ports` - v) `sudo portindex` - vi) `sudo port install python27` - vii) `sudo port select --set python python27` - viii) `sudo sh -c "echo file:///Users/username/ports >> - /opt/local/etc/macports/sources.conf"` - ix) `sudo port install galsim` - x) Add /opt/local/lib to DYLD_LIBRARY_PATH -Some users may find that the last step results in an inability to import the -GalSim module. In that case, you can clear that addition to DYLD_LIBRARY_PATH -and instead add /opt/local/lib to DYLD_FALLBACK_LIBRARY_PATH. +vii) Using eigency +------------------ -Notes on MacPorts with Mac OS X 10.8: -The use of `sudo` in the above commands may elicit an error message that says -"dyld: DYLD_ environment variables being ignored because main executable -(/usr/bin/sudo) is setuid or setgid". This is the result of a bug in Mac OS X -10.8, and will not prevent the installation of GalSim with the above steps from -being successful. - -Notes on MacPorts version of gcc with Mac OS X 10.5.8: -If you have installed a MacPorts version of gcc (e.g., "mp-gcc47"), it may not -link correctly with the other MacPorts installed modules, which are compiled in -the system gcc versions. To check what gcc versions are available to you, try -the command - - port select --list gcc - -then switch to the system gcc version (either 4.0 or 4.2) with - - sudo port select --set gcc gcc42 - -and compile GalSim with the system gcc. +Eigency is a pip-installable module that bundles the Eigen header files, so it +can also be used to install these files on your system. Indeed, as mentioned +above, we will use eigency automatically if Eigen is not found in one of the +above locations. So the above installations will take precendence, but +eigency should work as a fall-back. -c) Homebrew (http://mxcl.github.com/homebrew/) -- another package manager for -Max OSX. Currently GalSim is available on homebrew, so it (plus dependencies) -should be installable via +Note: At the time of this writing, installation of eigency depends on having +cython already installed. I thought I fixed this with PR #26, but it was +not quite complete. There is now an open PR #27, which I believe will +finish making pip install eigency work, even if you do not have cython +installed. But for now, you need to do - brew tap camphogg/science - brew install gal-sim + pip install cython + pip install eigency +(in that order) for it to work. -iii) Mac OSX 10.9 (Mavericks) ------------------------------ -Most of what applies above for earlier Mac OSX versions seems to apply for -GalSim on Mavericks too, although not all combinations have yet been tested. +Using Conda +=========== -However, it has been found that GalSim and its dependencies can be sensitive -(e.g. Issue #483) to the fact that under Mavericks the system `gcc` is NOT in -fact the Gnu Compiler Collection, but in fact Clang masquerading as such. This -can lead to problems when linking libraries, as described in the following -GalSim Wiki FAQ item: -https://github.com/GalSim-developers/GalSim/wiki/Installation-FAQ#wiki-what-should-i-do-about-undefined-symbols-for-architecture-x86_64-errors +If you use conda (normally via the Anaconda Python distribution), then all of +the prerequisites are available from the conda-forge channel, so you can use +that as follows (from within the main GalSim directory): -The best success seems to be achieved in Mavericks by *explicitly* specifying -`clang` and `clang++` as the compiler to GalSim and all its dependencies when -building (as in the example above). + conda create -y -n galsim + conda activate galsim + conda install -y -c conda-forge --file conda_requirements.txt + pip install -r requirements.txt + pip install . -iv) Docker ----------- +The first two lines are optional, but they let you keep the GalSim installation +separate from any other conda environments you might have. -Karen Ng has created a Docker file for containerizing GalSim. See her repo: +If your conda version is 4.3 or earlier, replace the above conda activate line +with - https://github.com/karenyyng/GalSim_dockerfile + source activate galsim -for instructions about how to either use her image or create your own. +which does the same thing. They just changed the name of this command to use +the conda executable instead of source. -6. More SCons options +Also, if you prefer to use the defaults channel, then (at least as of this +writing), it had all the items in conda_requirements.txt, except for pybind11. +So if you have conda-forge in your list of channels, but it comes after +defaults, then that should still work and pybind11 will be the only one that +will need the conda-forge channel. + + +Installing With SCons ===================== -Here is a fairly complete list of the options you can pass to SCons to control -the build process. The options are listed with their default value. You change -them simply by specifying a different value on the command line. +Prior to version 2.0, GalSim installation used SCons. This installation +mode is still supported, but is not recommended unless you have difficulties +with the setup.py installation. + +Note: Two options that are available with the SCons installation method, +but not the setup.py method, are (1) using TMV instead of Eigen for the linear +algebra back end, and (2) using Boost.Python instead of PyBind11 for the +wrapping the C++ code to be called from Python. If you need either of these +options, then you should use the SCons installation. + +See the file INSTALL_SCONS.md for complete details about this method of +installation. + + +Running tests +============= + +You can run our test suite by typing + + python setup.py test + +This should run all the python-layer tests with pytest and also compile and +run the C++ test suite. + +There are a number of packages that are used by the tests, but which are not +required for GalSim installation and running. These should be installed +automatically by the above command, but you can install them manually via -For example: + pip install -r test_requirements.txt - scons CXX=icpc TMV_DIR=~ +(As usually, you may need to add either `sudo` or `--user`.) -(Unlike autotools, SCons correctly expands ~ to your home directory.) -You can list these options from the command line with +By default, the tests will run in parallel using the pytest plugins +`pytest-xdist` and `pytest-timeout` (to manage how much time each test is +allowed to run). If you want to run the python tests in serial instead, +you can do this via - scons -h + python setup.py test -j1 -### Basic flags about the C++ compilation (default values in parentheses) ### +You can also use this to modify how many jobs will be spawned for running the +tests. -* `CXX` (g++) specifies which C++ compiler to use. +**Note**: If your system does not have `pytest` installed, and you do not want +to install it, you can run all the Python tests with the script run_all_tests +in the `tests` directory. If this finishes without an error, then all the tests +have passed. However, note that this script runs more tests than our normal +test run using pytest, so it may take quite a while to finish. (The *all* in +the file name means run all the tests including the slow ones that we normally +skip.) -* `FLAGS` ('') specifies the basic flags to pass to the compiler. The default - behavior is to automatically choose good flags to use according to which - kind of compiler you are using. This option overrides that and lets you - specify exactly what flags to use. -* `EXTRA_FLAGS` ('') specifies some extra flags that you want to use in addition - to the defaults that SCons determines on its own. Unlike the above option, - this do not override the defaults, it just adds to them. +Running example scripts +======================= -* `LINK_FLAGS` ('') specifies some extra flags at the linking step to use in - addition to the defaults that SCons determines it needs on its own. +The `examples` directory has a series of demo scripts: + + demo1.py, demo2.py, ... -* `DEBUG` (True) specifies whether to keep the debugging assert statements in - the compiled library code. They are not much of a performance hit, so it is - generally worth keeping them in, but if you need to squeeze out every last - bit of performance, you can set this to False. +These can be considered a tutorial on getting up to speed with GalSim. Reading +through these in order will introduce you to how to use most of the features of +GalSim in Python. To run these scripts, type (e.g.): -* `EXTRA_DEBUG` (False) specifies whether to add a flag to keep the original - code information in the compiled library (-g3 for g++ compiler). This - increases the size of the compiled library, but makes debugging with things - like gdb easier. Probably end users will never need to use this. - -* `WARN` (False) specifies whether to add warning compiler flags such as - `-Wall`. + python demo1.py -* `PYTHON` (/usr/bin/env python) specifies which version of Python you are - planning to use GalSim with. If you choose not to use the default here, - then you need to remember to use the correct Python version +There are also a corresponding set of config files: -### Flags about where to install the library and modules ### + demo1.yaml, demo2.yaml, ... -* `PREFIX` (/usr/local) specifies where to install the library when running - `scons install`. - -* `PYPREFIX` ([your python dir]/site-packages) specifies where to install the - Python modules when running `scons install`. +These files can be run using the executable `galsim`, and will produce the +same output images as the Python scripts: -* `FINAL_PREFIX` (`PREFIX`) specifies the final installation prefix if different - from PREFIX. (This is only needed for things like fink, where they install - into a staging area first before copying over to the final location.) - -* `WITH_UPS` (False) specified whether to install the ups directory for use - with EUPS. - -### Flags that specify where to look for external libraries ### - -* `TMV_DIR` ('') specifies the location of TMV if it is not in a standard - location. This should be the same value as you used for PREFIX when - installing TMV. + galsim demo1.yaml -* `TMV_LINK` ('') specifies the location of the tmv-link file. Normally, this is - in `TMV_DIR/share`, but if not, you can specify the correct location here. +They are also well commented, and can be considered a parallel tutorial for +learning the config file usage of GalSim. -* `FFTW_DIR` ('') specifies the root location of FFTW. The header files should - be in `FFTW_DIR/include` and the library files in `FFTW_DIR/lib`. - -* `BOOST_DIR` ('') specifies the root location of BOOST The header files should - be in `BOOST_DIR/include/boost` and the library files in `BOOST_DIR/lib`. - -* `USE_BOOST` (False) specifies whether to use a local boost installation for - some optional boost header files. We bundle the boost.random implementation - from a specific boost version (1.48) to make sure "random" variable generation - is deterministic across machines and over time. To make it fully self- - contained, we edited them slightly to not include many of the complicated - workarounds boost has for specific compilers and such. However, those - workarounds can be reenabled by setting USE_BOOST=True if your system needs - them. - -* `EXTRA_INCLUDE_PATH` ('') specifies extra directories in which to search for - header files in addition to the standard locations such as `/usr/include` and - `/usr/local/include` and the ones derived from the above options. Sometimes - the above options do not quite work, so you may need to specify other - locations, which is what this option is for. These directories are specified - as `-I` flags to the compiler. If you are giving multiple directories, they - should be separated by colons. - -* `EXTRA_LIB_PATH` ('') specifies extra directories in which to search for - libraries in addition to the standard locations such as `/usr/lib` and - `/usr/local/lib`. These directories are specified as `-L` flags to the - linker. If you are giving multiple directories, they should be separated by - colons. To add the library `/blah/libfoo.a`, specify - `EXTRA_LIB_PATH=/blah/ EXTRA_LIBS=foo`. - -* `EXTRA_PATH` ('') specifies directories in which to search for executables - (notably the compiler, although you can also just give the full path in the - CXX parameter) in addition to the standard locations such as `/usr/bin` and - `/usr/local/bin`. If you are giving multiple directories, they should be - separated by colons. - -* `IMPORT_PATHS` (False) specifies whether to import extra path directories - from the environment variables: `PATH`, `C_INCLUDE_PATH`, `LD_LIBRARY_PATH` - and `LIBRARY_PATH`. If you have a complicated setup in which you use these - environment variables to control everything, this can be an easy way to let - SCons know about these locations. - -* `IMPORT_ENV` (True) specifies whether to import the entire environment from - the calling shell. The default is for SCons to use the same environment as - the shell from which it is called. However, sometimes it can be useful to - start with a clean environment and manually add paths for various things, in - which case you would want to set this to False. - -* `EXTRA_LIBS` ('') specifies libraries to use in addition to what SCons finds - on its own. This might be useful if you have a non-standard name for one of - the external libraries. e.g. If you want to use the Intel MKL library for the - FFTW library, SCons will not automatically try that, so you could add those - libraries here. If there is more than one, they should be quoted with spaces - between the different libraries. e.g. - `EXTRA_LIBS="mkl_intel mkl_intel_thread mkl_core"` - -* `IMPORT_PREFIX` (True) specifies whether to include the directories - `PREFIX/include`, `PREFIX/lib` and `PREFIX/bin` as part of the standard - path lists. Normally, you install everything in the same place, so it is - useful to search those locations for some of the prerequisite packages, so - the default is True. But occasionally, this might be inconvenient, so you - can turn this feature off. - -* `DYLD_LIBRARY_PATH` ('') Set the DYLD_LIBRARY_PATH inside of SCons. - Particularly useful on El Capitan (and later), since Apple strips out - DYLD_LIBRARY_PATH from the environment that SCons sees, so if you need it, - this option enables SCons to set it back in for you by doing - `scons DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH`. - -* `DYLD_FALLBACK_LIBRARY_PATH` ('') Set the DYLD_FALLBACK_LIBRARY_PATH inside - of SCons. cf. DYLD_LIBRARY_PATH for why this may be useful. - -* `LD_LIBRARY_PATH` ('') Set the LD_LIBRARY_PATH inside of SCons. - cf. DYLD_LIBRARY_PATH for why this may be useful. - -### Miscellaneous flags ### - -* `PYTEST` (pytest) specifies which version of pytest you want to use - for running the unit tests. If you specified a non-default Python, then - there is a possibility that the standard pytest executable in your path - will not work (since it might be for a different version of Python). In - that case, specify the correct pytest here. - -* `CACHE_LIB` (True) specifies whether to cache the results of the library - checks. While you are working one getting the prerequisites installed - properly, it can be useful to set this to False to force SCons to redo all of - its library checks each time. Once you have a successful build, you should - set it back to True so that later builds can skip those checks. - -* `WITH_PROF` (False) specifies whether to use the compiler flag `-pg` to - include profiling info for `gprof`. - -* `MEM_TEST` (False) specifies whether to test the code for memory leaks. - -* `TMV_DEBUG` (False) specifies whether to turn on extra (slower) debugging - statements within the TMV library. - -* `USE_UNKNOWN_VARS` (False) specifies whether to accept scons parameters other - than the ones listed here. Normally, another name would indicate a typo, so - we catch it and let you know. But if you want to use other scons options - that we did not list here, you would want to also set this to True. +All demo scripts are designed to be run in the `GalSim/examples` directory. +Some of them access files in subdirectories of the `examples` directory, so they +would not work correctly from other locations. diff --git a/INSTALL_SCONS.md b/INSTALL_SCONS.md new file mode 100644 index 00000000000..e4462cd36a8 --- /dev/null +++ b/INSTALL_SCONS.md @@ -0,0 +1,677 @@ +Installation Using SCons +======================== + +Prior to version 2.0, this was the only installation method for installing +GalSim. It is still supported, mostly in case some users have trouble with +the setup.py method, but not recommended for most users. + +Please see the instructions in INSTALL.md first to see if that method +will work for you. + + +Software required before building GalSim +======================================== + +Please note: Mac users who want to use fink can skip down to Section 5.ii and +use that to satisfy all dependencies before installing. + +i) Python (2.7, 3.4, 3.5, or 3.6 series) +---------------------------------------- + +The interface to the GalSim code is via the Python package `galsim`, and its +associated modules. Therefore you must have Python installed on your system. +Python is free, and available from a number of sources online (see below). +Currently GalSim supports Python versions 2.7, 3.4, 3.5, and 3.6. It is likely +that other Python 3.x versions are compatible, but these are the only ones +being actively tested. + + +ii) SCons (http://www.scons.org) +-------------------------------- + +GalSim uses SCons to configure and run its installation procedures, and so SCons +needs to be installed in advance. Versions 2.0 and 2.1 of SCons get reasonable +testing with GalSim, but it should also work with 1.x versions. You can check +if it is installed, and if so which version, by typing + + scons --version + +See Section 5 for some more suggestions about installing this on your platform. + + +iii) FFTW (http://www.fftw.org) +------------------------------- + +These Fast Fourier Transform libraries must be installed, as GalSim will link +to them during the build. We require version 3 (or greater presumably), which +is often distributed as fftw3. See Section 5 for some suggestions about +installing this on your platform. + + +iv) TMV (https://github.com/rmjarvis/tmv/) (version >= 0.72) +----------------------------------------------------------------------- + +GalSim can use either Eigen or TMV for its linear algebra routines. See +the appropriate section in INSTALL.md if you want to use Eigen. To use +TMV (which was required prior to version 2.0), read on. + +You should download TMV from the site above and follow the instructions in its +INSTALL file for how to install it. Usually installing TMV just requires the +command + + scons install PREFIX= + +but there are additional options you might consider, so you should read the TMV +INSTALL file for full details. Also note, you may not need to specify the +installation directory if you are comfortable installing it into `/usr/local`. +However, if you are trying to install it into a system directory then you need +to use sudo scons install [PREFIX=]. + + +v) Boost C++ (http://www.boost.org) +----------------------------------- + +GalSim can use either PyBind11 or Boost for wrapping the C++ code to use in +Python. See the appropriate section in INSTALL.md if you want to use PyBind11. +To use Boost (which was required prior to version 2.0), read on. + +GalSim makes use of some of the Boost C++ libraries, and these parts of Boost +must be installed. Currently GalSim is regularly tested to ensure it works with +Boost version 1.61, but it is likely that most versions released within the +last several years will also work. It is particularly important that your installed +Boost library links to the same version of Python with which you will be using +GalSim and on which you have installed NumPy and PyFITS (see Section ii, above). +Boost can be downloaded from the above website, and must be installed per the +(rather limited) instructions there, which essentially amount to using a command + + ./bootstrap.sh + +(Additional `bootstrap.sh` options may be necessary to ensure Boost is built +against the correct version of Python; see below). + +followed by + + ./b2 link=shared + ./b2 --prefix= link=shared install + +If you are installing to a system directory, the second needs to be run as +root, of course: `sudo ./b2`... Also, you should be aware that if you are +running `b2` a second time, you should use `b2 -a` to tell boost to +recompile everything rather than use the existing libraries. + +The `link=shared` is necessary to ensure that they are built as shared +libraries; this is automatic on some platforms, but not all. + +Note: if you do not want to install everything related to Boost (which takes a +while), you can restrict to Boost Python and math by using `--with-python` +`--with-math` on the `./b2` commands. Currently we are only using Boost Python +and parts of the math library so compiling and installing these two will likely +be sufficient for the foreseeable future. + +Once you have installed Boost, you can check that it links to the version of +Python that will be used for GalSim and on which you have installed NumPy and +PyFITS by typing + + ldd /libboost_python.so (Linux) + otool -L /libboost_python.dylib (OSX) + +(If the ldd command on Linux does not show the Python version, the command +`ls -l /libboost_python*` may show the version of +libboost_python.so linked to, for example, `libboost_python_py26.so.1.40.0`. +In such a case you can tell both the Python and Boost versions being used, 2.6 +and `1.40.0`, respectively, in this example.) On some Linux systems, +ldd will not indicate the Python library against which boost was +compiled; in this case, continue with the installation procedure and +any issues will be revealed at a later stage. + +If the Python library listed is the one you will be using, all is well. If not, +Boost can be forced to use a different version by specifying the following +options to the ./bootstrap.sh installation script (defaults in `[]` brackets): + +* `--with-python=PYTHON` specify the Python executable [python] + +* `--with-python-root=DIR` specify the root of the Python installation + [automatically detected, but some users have found + they have to force it to use a specific one because + it detected the wrong one] + + +Installing the GalSim Python package +==================================== + +Once you have installed all the dependencies described above, you are ready to +build GalSim. From the GalSim base directory (in which this file is found) type + + scons + +If everything above was installed in fairly standard locations, this may work +the first time. Otherwise, you may have to tell SCons where to find some of +those libraries. There are quite a few options that you can use to tell SCons +where to look, as well as other things about the build process. To see a list +of options you can pass to SCons, type + + scons -h + +(See also Section 5 below.) + +As an example, to specify where your TMV library is located, you can type + + scons TMV_DIR= + +where `` would be the same as the `PREFIX` you specified when +installing TMV, i.e. The TMV library and include files are installed in +`/lib` and `/include`. Some important options that you may +need to set are: + +* `FFTW_DIR`: Explicitly give the FFTW prefix + +* `USE_TMV`: Specify that you want to use TMV rather than Eigen. +* `TMV_DIR`: Explicitly give the TMV prefix +* `EIGEN_DIR`: Explicitly give the Eigen prefix + +* `USE_BOOST`: Specify that you want to use Boost rather than PyBind11. +* `BOOST_DIR`: Explicitly give the Boost prefix + +* `EXTRA_LIBS`: Additional libraries to send to the linker + +* `EXTRA_INCLUDE_PATH`: Extra paths for header files (separated by : if more + than 1) + +* `EXTRA_FLAGS`: Extra flags to send to the compiler other than what is + automatically used. (e.g. -m64 to force 64 bit compilation) + +Again, you can see the full list of options using `scons -h`. + +Another common option is `CXX=`. So, to compile with `icpc` rather +than the default `g++`, type + + scons CXX=icpc + +On El Capitan, Apple instituted a new security measure wherein system calls +lose some of the system environment variables, including DYLD_LIBRARY_PATH +among others. If your system is set up to use that environment variable to +resolve library locations at runtime, then this will cause problems when SCons +is trying to figure out if things are installed correctly. To override this +behavior, you can explicitly send this environment variable to SCons by writing + + scons DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH + +and it will be able to re-set this value within the SCons processing. + +One nice feature of SCons is that once you have specified a parameter, it will +save that value for future builds in the file `gs_scons.conf`, so once you have +the build process working, for later builds you only need to type `scons`. It +can also be useful to edit this file directly -- mostly if you want to unset a +parameter and return to the default value, it can be easier to just delete the +line from this file, rather than explicitly set it back to the default value. + +SCons caches the results of the various checks it does for the required +external libraries (TMV, Boost, etc.). This is usually very helpful, since +they do not generally change, so it makes later builds much faster. However, +sometimes (rarely) SCons can get confused and not realized that things on your +system have changed, which might cause problems for you. You can delete +everything scons knows about what it has tried to build previously with + + /bin/rm -rf .scon* + +This will force SCons to recheck and recompile everything from scratch. + +Once you have a successful build, you can install the GalSim library, Python +modules, and header files into standard locations (like `/usr/local` and your +Python site-packages directory) with + + scons install + +or + + sudo scons install + +If you want to install into a different location, the prefix for the library +and header files can be specified with `PREFIX=`, and the location +for the Python modules can be specified with `PYPREFIX=`. So the +command would be + + scons install PREFIX= PYPREFIX= + +Note: if you specify a specific directory for the Python modules with PYPREFIX, +this directory should be in the sys.path search path for the version of +Python you are using. You can check with + + python -c "import sys; print sys.path" + +If your `PYPREFIX` directory is not there, then Python will not be able to find +the installed galsim module. You should therefore add this directory to your +PYTHONPATH environment variable. For example, if you use bash, then you +should add the line + + export PYTHONPATH=$PYTHONPATH: + +where `` is the same directory you used above for `PYPREFIX`. + +The installed files can be removed with the command + + scons uninstall + +Finally, to clean all compiled objects from the `GalSim` directory, you can use + + scons -c + +This is rather like a `make clean` command. + +If you are having trouble with installing, you may find some helpful hints at +the GalSim Installation FAQ page on the Wiki: +https://github.com/GalSim-developers/GalSim/wiki/Installation%20FAQ + +You can ask also about your particular problem on stackoverflow.com. Some of +the GalSim developers have automatic alerts set up for the tag 'galsim'. So +yout can ask your question there, and there is a good chance that it will be +answered. You might also try searching that site to see if anyone else asked +about the same problem. + +If you are still having trouble, please consider opening a new issue on the +GalSim Github page at https://github.com/GalSim-developers/GalSim/issues +explaining what your particular problem is, and hopefully someone can help +you figure out a solution. + + +Running tests and installing example executables +================================================ + +You can run our test suite by typing + + scons tests + +This should compile the test suite and run it. The tests of our C++ library +will always be run, but we use `pytest` for our Python test suite, so that +will only be run if `pytest` is present on your system. We do not require +this as a dependency, since you can still do everything with the GalSim library +without this. But it is required for a complete run of the test suite. + +To install `pytest`, you can also use easy_install as described in Section 1 +above (see also https://docs.pytest.org/en/latest/). Many third party- +maintained Python distributions, such as the Enthought Python Distribution, +include `pytest`. + +By default, the python tests will use the pytest plugins `pytest-xdist` (for +running tests in parallel) and `pytest-timeout` (to manage how much time each +test is allowed to run). These plugins are usually installable using pip: + + pip install pytest-xdist pytest-timeout + +Sometimes the `--user` flag may be needed in the above command to make the +plugins discoverable. If you want to run the python tests without these +plugins (serially!), you can still do this via + + scons tests -j1 + +Note: if your system does not have `pytest` installed, and you do not want to +install it, you can run all the Python tests with the script run_all_tests in +the `tests` directory. If this finishes without an error, then all the tests +have passed. + + +Running example scripts +======================= + +The `examples` directory has a series of demo scripts: + + demo1.py, demo2.py, ... + +These can be considered a tutorial on getting up to speed with GalSim. Reading +through these in order will introduce you to how to use most of the features of +GalSim in Python. To run these scripts, type (e.g.): + + python demo1.py + +You can also create executable versions of these scripts if you prefer by typing + + scons examples + +This will put executable versions (with the first line `#!/bin/env python`) in +the `examples_bin` directory. (We do not include that first line by +default, since you might specify a different python to be used. Running +`scons examples` will put whatever python executable you specify after `#!`.) + +There are also a corresponding set of config files: + + demo1.yaml, demo2.yaml, ... + +These files can be run using the executable `galsim`, and will produce the +same output images as the Python scripts: + + galsim demo1.yaml + +They are also well commented, and can be considered a parallel tutorial for +learning the config file usage of GalSim. + +All demo scripts are designed to be run in the `GalSim/examples` directory. +Some of them access files in subdirectories of the `examples` directory, so they +would not work correctly from other locations. + + +Platform-specific notes +======================= + +i) Linux +-------- +The vast majority of Linux distributions provide installable packages for most +of our dependencies. In many cases, however, it is also necessary to install +"-devel" or "-dev" packages (e.g. `python-dev` or `libboost-dev` on Debian- +derivatives). However, as above we stress that you should make sure that the +version of Python that Boost is built against must be the same as that you +intend to use for running GalSim. + +The solution may be to install Boost C++ manually. This can be done by following +the instructions of Section 1.v), above. + +ii) Mac OSX +----------- +a) Use of Fink -- the `fink` (http://www.finkproject.org) package management +software is popular with Mac users. Once it is installed, you can get either +most or all of the prerequisites using it, depending on whether you want +to use GalSim with the fink version of Python (e.g. that in `/sw/bin/python`) or +the system Python (`/usr/bin/python`) or something else still. + +It is in general a good idea to update fink prior to installing any new modules: + + fink selfupdate + fink update-all + +If you are happy with running GalSim using the fink version of python 2.7, you +can install everything with the following command: + + fink install galsim + +and it should just work. However, there are some caveats that are worth knowing +about (assuming your fink installation is in `/sw`): + +1. This will install GalSim as a module of the python2.7 installed by fink. +This is not the default Python (usually `/usr/bin/python` or some other package, +such as EPD, if installed). Any Python scripts you write that use the galsim +module should therefore have `#!/sw/bin/python2.7` as the first line rather +than the usual `#!/usr/bin/env python`. Similarly, if you want to use galsim +in an interactive Python session, you should run `/sw/bin/python2.7` (simply +`python2.7` may also work) rather than just `python`. (Of course, you can +always change your `PATH` environment variable to make the fink Python the +system default if you wish...) + +2. The executable `galsim`, which parses YAML or JSON configuration files, +will be installed in `/sw/bin`. You should not need to do anything special +to use these, since `/sw/bin` should already be in your path if using fink. + +3. If you want to run through the example scripts (such as the demo tutorials +`demo1.py`, `demo2.py` etc. and the `.yaml` and `.json` config versions of the +same demos), you will still need to download the GalSim tarball. But you can +skip all the instructions above about installation and just use the fink +version. So `python2.7 demo1.py` (assuming `which python2.7` is the fink one) +and `galsim demo1.yaml` should run those scripts for you. + +4. If you want to work with GalSim as a developer, rather than just a user, +then you cannot use the fink-installed GalSim. However, the process above will +have installed all the prerequisites. So `fink uninstall galsim` will leave +you able to install GalSim using the master branch with: + + scons TMV_DIR=/sw PYTHON=/sw/bin/python2.7 BOOST_DIR=/sw/opt/boost-1_58 + +from within the repository base directory. + +To run the unit tests, you will also need pytest, which you can also get from +fink: + + fink install pytest-py27 + scons tests PYTEST=/sw/bin/pytest + +If you want to use the system Python, or some other version, then the fink +Python installations will not work. You will need to manually install +NumPy, PyFITS, PyYAML and pytest, for example using easy_install, with your +chosen Python. + +For the system Python, you can use fink for Boost, but you will want a +different package than the boost1.58.python27 that gets installed using +`fink install galsim` above: + + fink install scons fftw3 tmv0 boost1.58-systempython + pip install future + scons TMV_DIR=/sw BOOST_DIR=/sw/opt/boost-1_58 + +For other Python versions, the fink-installed Boost usually will not work, so +you can only use fink for SCons, FFTW and TMV. So you will probably need to +install Boost manually. This can be done by following the instructions of +Section 1.v), above. + +b) MacPorts -- this is another popular Mac package management project +(http://www.macports.org/) with similar functionality to fink. Neither TMV nor +GalSim are currently on the official MacPorts distribution list, so users cannot +find them by searching the MacPorts site. However, it is possible to install +both TMV and GalSim, plus the other dependencies of GalSim, using MacPorts +following the instructions below. + +It is in general a good idea to upgrade all modules, prior to installing any new +modules: + + sudo port selfupdate + sudo port upgrade outdated + +Below is a list of steps to take to install GalSim using MacPorts: + + i) Take the `Portfiles` from the GalSim repository: + https://github.com/GalSim-developers/GalSim/blob/master/devel/ports.tar.gz + (If you do not clone the repository, there is a "copy" button on the website + that you can use to download the file directly.) + ii) Place the file in your home directory. + iii) `tar xvzf ports.tar.gz` + iv) `cd ports` + v) `sudo portindex` + vi) `sudo port install python27` + vii) `sudo port select --set python python27` + viii) `sudo sh -c "echo file:///Users/username/ports >> + /opt/local/etc/macports/sources.conf"` + ix) `sudo port install galsim` + x) Add /opt/local/lib to DYLD_LIBRARY_PATH + +Some users may find that the last step results in an inability to import the +GalSim module. In that case, you can clear that addition to DYLD_LIBRARY_PATH +and instead add /opt/local/lib to DYLD_FALLBACK_LIBRARY_PATH. + +c) Homebrew (http://mxcl.github.com/homebrew/) -- another package manager for +Max OSX. Currently GalSim is available on homebrew, so it (plus dependencies) +should be installable via + + brew tap camphogg/science + brew install gal-sim + + +iv) Docker +---------- + +Karen Ng has created a Docker file for containerizing GalSim. See her repo: + + https://github.com/karenyyng/GalSim_dockerfile + +for instructions about how to either use her image or create your own. + + +More SCons options +================== + +Here is a fairly complete list of the options you can pass to SCons to control +the build process. The options are listed with their default value. You change +them simply by specifying a different value on the command line. + +For example: + + scons CXX=icpc TMV_DIR=~ + +(Unlike autotools, SCons correctly expands ~ to your home directory.) +You can list these options from the command line with + + scons -h + +### Basic flags about the C++ compilation (default values in parentheses) ### + +* `CXX` (g++) specifies which C++ compiler to use. + +* `FLAGS` ('') specifies the basic flags to pass to the compiler. The default + behavior is to automatically choose good flags to use according to which + kind of compiler you are using. This option overrides that and lets you + specify exactly what flags to use. + +* `EXTRA_FLAGS` ('') specifies some extra flags that you want to use in addition + to the defaults that SCons determines on its own. Unlike the above option, + this do not override the defaults, it just adds to them. + +* `LINK_FLAGS` ('') specifies some extra flags at the linking step to use in + addition to the defaults that SCons determines it needs on its own. + +* `DEBUG` (True) specifies whether to keep the debugging assert statements in + the compiled library code. They are not much of a performance hit, so it is + generally worth keeping them in, but if you need to squeeze out every last + bit of performance, you can set this to False. + +* `EXTRA_DEBUG` (False) specifies whether to add a flag to keep the original + code information in the compiled library (-g3 for g++ compiler). This + increases the size of the compiled library, but makes debugging with things + like gdb easier. Probably end users will never need to use this. + +* `WARN` (False) specifies whether to add warning compiler flags such as + `-Wall`. + +* `COVER` (False) specifies whether to add unit test coverage of the C++ layer. + +* `PYTHON` (/usr/bin/env python) specifies which version of Python you are + planning to use GalSim with. If you choose not to use the default here, + then you need to remember to use the correct Python version + +### Flags about where to install the library and modules ### + +* `PREFIX` (/usr/local) specifies where to install the library when running + `scons install`. + +* `PYPREFIX` ([your python dir]/site-packages) specifies where to install the + Python modules when running `scons install`. + +* `FINAL_PREFIX` (`PREFIX`) specifies the final installation prefix if different + from PREFIX. (This is only needed for things like fink, where they install + into a staging area first before copying over to the final location.) + +* `WITH_UPS` (False) specified whether to install the ups directory for use + with EUPS. + +### Flags that specify where to look for external libraries ### + +* `FFTW_DIR` ('') specifies the root location of FFTW. The header files should + be in `FFTW_DIR/include` and the library files in `FFTW_DIR/lib`. + +* `EIGEN_DIR` ('') specifies the root location of the Eigen header files. + The Core include file for Eigen should located at `EIGEN_DIR/Eigen/Core`. + +* `USE_TMV` (False) specifies to use TMV rather than Eigen for the linear + algebra code in the C++ layer. + +* `TMV_DIR` ('') specifies the location of TMV if it is not in a standard + location. This should be the same value as you used for PREFIX when + installing TMV. + +* `TMV_LINK` ('') specifies the location of the tmv-link file. Normally, this is + in `TMV_DIR/share`, but if not, you can specify the correct location here. + +* `USE_BOOST` (False) specifies whether to use Boost.Python for wrapping the + C++ code rather than PyBind11. If this is set, it will also use your + Boost installation for some header files used by the random number + generator code. We bundle the boost.random implementation from a specific + boost version (1.48) to make sure "random" variable generation is + deterministic across machines and over time. To make it fully self- + contained, we edited them slightly to not include many of the complicated + workarounds boost has for specific compilers and such. However, those + workarounds can be reenabled by setting USE_BOOST=True if your system needs + them. + +* `BOOST_DIR` ('') specifies the root location of BOOST The header files should + be in `BOOST_DIR/include/boost` and the library files in `BOOST_DIR/lib`. + +* `EXTRA_INCLUDE_PATH` ('') specifies extra directories in which to search for + header files in addition to the standard locations such as `/usr/include` and + `/usr/local/include` and the ones derived from the above options. Sometimes + the above options do not quite work, so you may need to specify other + locations, which is what this option is for. These directories are specified + as `-I` flags to the compiler. If you are giving multiple directories, they + should be separated by colons. + +* `EXTRA_LIB_PATH` ('') specifies extra directories in which to search for + libraries in addition to the standard locations such as `/usr/lib` and + `/usr/local/lib`. These directories are specified as `-L` flags to the + linker. If you are giving multiple directories, they should be separated by + colons. To add the library `/blah/libfoo.a`, specify + `EXTRA_LIB_PATH=/blah/ EXTRA_LIBS=foo`. + +* `EXTRA_PATH` ('') specifies directories in which to search for executables + (notably the compiler, although you can also just give the full path in the + CXX parameter) in addition to the standard locations such as `/usr/bin` and + `/usr/local/bin`. If you are giving multiple directories, they should be + separated by colons. + +* `IMPORT_PATHS` (False) specifies whether to import extra path directories + from the environment variables: `PATH`, `C_INCLUDE_PATH`, `LD_LIBRARY_PATH` + and `LIBRARY_PATH`. If you have a complicated setup in which you use these + environment variables to control everything, this can be an easy way to let + SCons know about these locations. + +* `IMPORT_ENV` (True) specifies whether to import the entire environment from + the calling shell. The default is for SCons to use the same environment as + the shell from which it is called. However, sometimes it can be useful to + start with a clean environment and manually add paths for various things, in + which case you would want to set this to False. + +* `EXTRA_LIBS` ('') specifies libraries to use in addition to what SCons finds + on its own. This might be useful if you have a non-standard name for one of + the external libraries. e.g. If you want to use the Intel MKL library for the + FFTW library, SCons will not automatically try that, so you could add those + libraries here. If there is more than one, they should be quoted with spaces + between the different libraries. e.g. + `EXTRA_LIBS="mkl_intel mkl_intel_thread mkl_core"` + +* `IMPORT_PREFIX` (True) specifies whether to include the directories + `PREFIX/include`, `PREFIX/lib` and `PREFIX/bin` as part of the standard + path lists. Normally, you install everything in the same place, so it is + useful to search those locations for some of the prerequisite packages, so + the default is True. But occasionally, this might be inconvenient, so you + can turn this feature off. + +* `DYLD_LIBRARY_PATH` ('') Set the DYLD_LIBRARY_PATH inside of SCons. + Particularly useful on El Capitan (and later), since Apple strips out + DYLD_LIBRARY_PATH from the environment that SCons sees, so if you need it, + this option enables SCons to set it back in for you by doing + `scons DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH`. + +* `DYLD_FALLBACK_LIBRARY_PATH` ('') Set the DYLD_FALLBACK_LIBRARY_PATH inside + of SCons. cf. DYLD_LIBRARY_PATH for why this may be useful. + +* `LD_LIBRARY_PATH` ('') Set the LD_LIBRARY_PATH inside of SCons. + cf. DYLD_LIBRARY_PATH for why this may be useful. + +### Miscellaneous flags ### + +* `PYTEST` (pytest) specifies which version of pytest you want to use + for running the unit tests. If you specified a non-default Python, then + there is a possibility that the standard pytest executable in your path + will not work (since it might be for a different version of Python). In + that case, specify the correct pytest here. + +* `CACHE_LIB` (True) specifies whether to cache the results of the library + checks. While you are working one getting the prerequisites installed + properly, it can be useful to set this to False to force SCons to redo all of + its library checks each time. Once you have a successful build, you should + set it back to True so that later builds can skip those checks. + +* `WITH_PROF` (False) specifies whether to use the compiler flag `-pg` to + include profiling info for `gprof`. + +* `MEM_TEST` (False) specifies whether to test the code for memory leaks. + +* `TMV_DEBUG` (False) specifies whether to turn on extra (slower) debugging + statements within the TMV library. + +* `USE_UNKNOWN_VARS` (False) specifies whether to accept scons parameters other + than the ones listed here. Normally, another name would indicate a typo, so + we catch it and let you know. But if you want to use other scons options + that we did not list here, you would want to also set this to True. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000000..b9125fd1bab --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,5 @@ +recursive-include galsim *.py +include *.md +include LICENSE +recursive-include share * +global-exclude .gitignore SCons* diff --git a/README.md b/README.md index 81d34df9fa1..83e9d01eb32 100644 --- a/README.md +++ b/README.md @@ -25,9 +25,24 @@ development. For details of algorithms and code validation, please see http://adsabs.harvard.edu/abs/2015A%26C....10..121R -Distribution +Installation ------------ +Normally, to install GalSim, you should just need to run + + pip install galsim + +Depending on your setup, you may need to add either sudo to the start +or --user to the end of this command as you normally do when pip installing +packages. + +See INSTALL.md for full details including one dependency (FFTW) that is not +pip installable, so you may need to install before running this command. + + +Source Distribution +------------------- + The current released version of GalSim is version 1.5. To get the code, you can grab the tarball (or zip file) from @@ -42,11 +57,6 @@ Or clone the repository with either of the following: git clone git@github.com:GalSim-developers/GalSim.git git clone https://github.com/GalSim-developers/GalSim.git -although after doing so, if you are not a developer, you should probably -checkout the latest release tag, rather than use the master branch: - - git checkout v1.5.0 - The code is also distributed via Fink, Macports, and Homebrew for Mac users. See INSTALL.md for more information. @@ -104,26 +114,15 @@ If none of these communication avenues seem appropriate, you can also contact us directly at the above email addresses. -Installation ------------- - -For installation instructions, please see the file `INSTALL.md` in the main -repository directory. - -There are tagged versions of the code corresponding to specific project -releases and development milestones. (For more info, see the "Tagged versions" -section below, and `devel/git.txt`) - - Getting started --------------- -* Install the code as in `INSTALL.md`. +* Install the code as above (see also INSTALL.md). * Optional, but recommended whenever you try a new version of the code: run the unit tests to make sure that there are no errors. You can do this by running - `scons tests`. If there are any issues, please open an Issue on our GitHub - page. + `python setup.py test`. If there are any issues, please open an Issue on our + GitHub page. * Optional: run `doxygen` to generate documentation, using `Doxyfile` in the main repository directory to specify all doxygen settings. Alternatively, @@ -195,6 +194,7 @@ As the project develops through further versions, and adds further capabilities to the software, more demo scripts may be added to `examples/` to illustrate what GalSim can do. + Tagged versions --------------- @@ -288,20 +288,20 @@ Summary of planned future development We plan to add the following additional capabilities in future versions of GalSim: -* Easier installation -- removing the boost dependency in particular. We are - planning to have v2.0 be pip installable, rather than using SCons, which - will make it much easier to install for many systems. This requires ripping - out the Boost Python wrapping and replacing with either cffi or pybind11 - (probably the latter, but still TBD). This effort is proceeding in issue - #809, with changes being merged to branch "noboost". - * Wavelength-dependent photon shooting. Currently, the chromatic functionality is only available for FFT rendering, which is quite slow. For most use cases, photon shooting should be orders of magnitude faster, so this is - a near-term priority to get done. (cf. Issue #540.) + a near-term priority to get done. (cf. Issue #540) * Simulating more sophisticated detector defects and image artifacts. E.g. - cosmic rays, saturation, bleeding, ... + vignetting, fringing, cosmic rays, saturation, bleeding, ... (cf. Issues + #553, #828) + +* Proper modeling of extinction due to dust. (cf. Issues #541, #550) + +* Various speed improvements. (cf. Issues #205, #566, #875, #935) + +* Switch docs to Sphinx. (cf. Issue #160) There are many others as well. Please see diff --git a/SConstruct b/SConstruct index dc7bd16539a..4908565dabf 100644 --- a/SConstruct +++ b/SConstruct @@ -84,13 +84,15 @@ opts.Add(PathVariable('FINAL_PREFIX', '', PathVariable.PathAccept)) opts.Add(BoolVariable('WITH_UPS','Install ups/ directory for use with EUPS', False)) +opts.Add('FFTW_DIR','Explicitly give the fftw3 prefix','') +opts.Add('EIGEN_DIR','Explicitly give the Eigen prefix','') + +opts.Add(BoolVariable('USE_TMV','Use TMV for linear algebra, rather than Eigen',False)) opts.Add('TMV_DIR','Explicitly give the tmv prefix','') opts.Add('TMV_LINK','File that contains the linking instructions for TMV','') -opts.Add('FFTW_DIR','Explicitly give the fftw3 prefix','') + +opts.Add(BoolVariable('USE_BOOST','Use boost python for the wrapping, rather than pybind11',False)) opts.Add('BOOST_DIR','Explicitly give the boost prefix','') -opts.Add(BoolVariable('USE_BOOST', - 'Use the local boost installation for optional boost header files', - False)) opts.Add(PathVariable('EXTRA_INCLUDE_PATH', 'Extra paths for header files (separated by : if more than 1)', @@ -171,7 +173,6 @@ def ClearCache(): shutil.rmtree(".sconf_temp") def GetMacVersion(): - print('Mac version is',platform.mac_ver()[0]) ver = platform.mac_ver()[0].split('.') if len(ver) >= 2: return ver[:2] @@ -276,6 +277,7 @@ def ErrorExit(*args, **kwargs): if sys.platform.find('darwin') != -1: major, minor = GetMacVersion() if int(major) > 10 or int(minor) >= 11: + print('Mac version is %s.%s'%(major,minor)) print() print('Starting with El Capitan (OSX 10.11), Apple instituted a new policy called') print('"System Integrity Protection" (SIP) where they strip "dangerous" environment') @@ -392,7 +394,7 @@ def BasicCCFlags(env): else: env.Replace(CCFLAGS=['-O2']) sse_flags = ['-msse2', '-msse'] - env.Append(CCFLAGS=['-std=c++98','-fno-strict-aliasing']) + env.Append(CCFLAGS=['-fno-strict-aliasing']) # Unfortunately this next flag requires strict-aliasing, but allowing that # opens up a Pandora's box of bugs and warnings, so I don't want to do that. #env.Append(CCFLAGS=['-ftree-vectorize']) @@ -411,7 +413,6 @@ def BasicCCFlags(env): else: env.Replace(CCFLAGS=['-O2']) sse_flags = ['-msse2', '-msse'] - env.Append(CCFLAGS=['-std=c++98']) if env['WITH_PROF']: env.Append(CCFLAGS=['-pg']) env.Append(LINKFLAGS=['-pg']) @@ -421,7 +422,7 @@ def BasicCCFlags(env): env.Append(CCFLAGS=['-g3']) elif compiler == 'icpc': - env.Replace(CCFLAGS=['-O2','-std=c++98']) + env.Replace(CCFLAGS=['-O2']) sse_flags = ['-msse2', '-msse'] if version >= 10: env.Append(CCFLAGS=['-vec-report0']) @@ -532,19 +533,6 @@ def AddOpenMPFlag(env): flag = ['-mp','--exceptions'] ldflag = ['-mp'] xlib = ['pthread'] - elif compiler == 'cl': - #flag = ['/openmp'] - #ldflag = ['/openmp'] - #xlib = [] - # The Express edition, which is the one I have, doesn't come with - # the file omp.h, which we need. So I am unable to test TMV's - # OpenMP with cl. - # I believe the Professional edition has full OpenMP support, - # so if you have that, the above lines might work for you. - # Just uncomment those, and commend the below three lines. - print('No OpenMP support for cl') - env['WITH_OPENMP'] = False - return else: print('\nWARNING: No OpenMP support for compiler ',compiler,'\n') env['WITH_OPENMP'] = False @@ -717,7 +705,7 @@ def AddDepPaths(bin_paths,cpp_paths,lib_paths): """ - types = ['BOOST', 'TMV', 'FFTW'] + types = ['BOOST', 'TMV', 'EIGEN', 'FFTW'] for t in types: dirtag = t+'_DIR' @@ -727,9 +715,13 @@ def AddDepPaths(bin_paths,cpp_paths,lib_paths): print('WARNING: could not find specified %s = %s'%(dirtag,env[dirtag])) continue - AddPath(bin_paths, os.path.join(tdir, 'bin')) - AddPath(lib_paths, os.path.join(tdir, 'lib')) - AddPath(cpp_paths, os.path.join(tdir, 'include')) + if t == 'EIGEN': + # Eigen doesn't put its header files in an include subdirectory. + AddPath(cpp_paths, tdir) + else: + AddPath(bin_paths, os.path.join(tdir, 'bin')) + AddPath(lib_paths, os.path.join(tdir, 'lib')) + AddPath(cpp_paths, os.path.join(tdir, 'include')) def AddExtraPaths(env): @@ -910,6 +902,15 @@ def TryRunResult(config,text,name): return ok +def CheckFlags(context,try_flags,source_file): + init_flags = context.env['CCFLAGS'] + context.env.PrependUnique(CCFLAGS=try_flags) + result = context.TryCompile(source_file,'.cpp') + if not result: + context.env.Replace(CCFLAGS=init_flags) + return result + + def CheckLibsSimple(config,try_libs,source_file,prepend=True): init_libs = [] if 'LIBS' in config.env._dict.keys(): @@ -1031,16 +1032,26 @@ int main() config.Message('Checking for correct FFTW linkage... ') if not config.TryCompile(fftw_source_file,'.cpp'): ErrorExit( - 'Error: fftw file failed to compile.', - 'Check that the correct location is specified for FFTW_DIR') + 'Error: fftw file failed to compile.') result = ( CheckLibsFull(config,[''],fftw_source_file) or CheckLibsFull(config,['fftw3'],fftw_source_file) ) + if not result: + # Try to get the correct library location from pyfftw3 + try: + import fftw3 + config.env.Append(LIBPATH=fftw3.lib.libdir) + result = CheckLibsFull(config,[fftw3.lib.libbase],fftw_source_file) + except ImportError: + pass if not result: ErrorExit( 'Error: fftw file failed to link correctly', - 'Check that the correct location is specified for FFTW_DIR') + 'You should either specify the location of fftw3 as FFTW_DIR=... ' + 'or install pyfftw3 using: \n' + ' pip install pyfftw3\n' + 'which can often find it automatically.') config.Result(1) return 1 @@ -1090,6 +1101,76 @@ int main() return 0; } """ + tmv_version_file = """ +#include +#include "TMV.h" +int main() +{ std::cout< 10 or int(minor) >= 7) and '-latlas' not in tmv_link and + ('-lblas' in tmv_link or '-lcblas' in tmv_link)): + print('WARNING: The Apple BLAS library has been found not to be thread safe on') + print(' Mac OS versions 10.7+, even across multiple processes (i.e. not') + print(' just multiple threads in the same process.) The symptom is that') + print(' `scons tests` may hang when running pytest using multiple') + print(' processes.') + if xcode_version is None: + # If we couldn't run xcodebuild, then don't give any more information about this. + pass + elif xcode_version < '5.1': + print(' This seems to have been partially fixed with XCode 5.1, so we') + print(' recommend upgrading to the latest XCode version. However, even') + print(' with 5.1, some systems still seem to have problems.') + env['BAD_BLAS'] = True + else: + print(' This seems to have been partially fixed with XCode 5.1, so there') + print(' is a good chance you will not have any problems. But there are') + print(' still occasional systems that fail when using multithreading with') + print(' programs or modules that link to the BLAS library (such as GalSim).') + print(' If you do have problems, the solution is to recompile TMV with') + print(' the SCons option "WITH_BLAS=false".') + + # ParseFlags doesn't know about -fopenmp being a LINKFLAG, so it + # puts it into CCFLAGS instead. Move it over to LINKFLAGS before + # merging everything. + tmv_link_dict = config.env.ParseFlags(tmv_link) + config.env.Append(LIBS=tmv_link_dict['LIBS']) + config.env.AppendUnique(LINKFLAGS=tmv_link_dict['LINKFLAGS']) + config.env.AppendUnique(LINKFLAGS=tmv_link_dict['CCFLAGS']) + config.env.AppendUnique(LIBPATH=tmv_link_dict['LIBPATH']) + + compiler = config.env['CXXTYPE'] + if compiler == 'g++' and '-openmp' in config.env['LINKFLAGS']: + config.env['LINKFLAGS'].remove('-openmp') + config.env.AppendUnique(LINKFLAGS='-fopenmp') + print('Checking for correct TMV linkage... (this may take a little while)') config.Message('Checking for correct TMV linkage... ') @@ -1517,6 +1598,87 @@ PyMODINIT_FUNC initcheck_tmv(void) return 1 +def CheckEigen(config): + eigen_source_file = """ +#include "Python.h" +#include "Eigen/Core" +#include "Eigen/Cholesky" + +static void useEigen() { + Eigen::MatrixXd S(10,10); + S.setConstant(4.); + S.diagonal().array() += 50.; + Eigen::MatrixXd m(10,3); + m.setConstant(2.); + S.llt().solveInPlace(m); +} + +static PyObject* run(PyObject* self, PyObject* args) +{ + useEigen(); + return Py_BuildValue("i", 23); +} + +static PyMethodDef Methods[] = { + {"run", run, METH_VARARGS, "return 23"}, + {NULL, NULL, 0, NULL} +}; + +#if PY_MAJOR_VERSION >= 3 + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "check_eigen", + NULL, + -1, + Methods, + NULL, + NULL, + NULL, + NULL +}; + +PyMODINIT_FUNC PyInit_check_eigen(void) + +#else + +PyMODINIT_FUNC initcheck_eigen(void) + +#endif +{ +#if PY_MAJOR_VERSION >= 3 + return PyModule_Create(&moduledef); +#else + Py_InitModule("check_eigen", Methods); +#endif +} +""" + config.Message('Checking if we can build module using Eigen... ') + + result = config.TryCompile(eigen_source_file,'.cpp') + if not result: + ErrorExit('Unable to compile a module using eigen') + + result = CheckModuleLibs(config,[],eigen_source_file,'check_eigen') + if not result: + ErrorExit('Unable to build a python loadable module that uses eigen') + + config.Result(1) + + eigen_version_file = """ +#include +#include "Eigen/Core" +int main() { + std::cout< + +int check_pb_run() { return 23; } + +PYBIND11_MODULE(check_pb, check_pb) { + check_pb.def("run",&check_pb_run); +} +""" + result = (CheckFlags(config, '', pb_source_file) or + CheckFlags(config, '-std=c++14', pb_source_file) or + CheckFlags(config, '-std=c++11', pb_source_file)) + if not result: + ErrorExit("Unable to compile C++ source code using pybind11:\n" + python) + + result = CheckModuleLibs(config,[''],pb_source_file,'check_pb') + if not result: + ErrorExit("Unable to make a python module with pybind11:\n" + python) + + config.Result(result) + return result + + def CheckBoostPython(config): bp_source_file = """ @@ -1670,7 +1872,8 @@ BOOST_PYTHON_MODULE(check_bp) { """ config.Message('Checking if we can build against Boost.Python... ') - result = config.TryCompile(bp_source_file,'.cpp') + result = (CheckFlags(config, '-std=c++98', bp_source_file) or + CheckFlags(config, '', bp_source_file)) if not result: ErrorExit('Unable to compile a file with #include "boost/python.hpp"') @@ -1697,6 +1900,7 @@ BOOST_PYTHON_MODULE(check_bp) { config.Result(1) return 1 + # If the compiler is incompatible with the compiler that was used to build python, # then there can be problems with the exception passing between the C++ layer and the # python layer. We don't know any solution to this, but it's worth letting the user @@ -1712,16 +1916,26 @@ def CheckPythonExcept(config): #pragma GCC diagnostic ignored "-Wunused-local-typedefs" #endif #endif -#define BOOST_NO_CXX11_SMART_PTR -#include "boost/python.hpp" #include +#ifdef USE_BOOST +#define BOOST_NO_CXX11_SMART_PTR +#include "boost/python.hpp" +#else +#include +#endif void run_throw() { throw std::runtime_error("test error handling"); } +#ifdef USE_BOOST BOOST_PYTHON_MODULE(test_throw) { - boost::python::def("run",&run_throw); + boost::python::def("run", &run_throw); +} +#else +PYBIND11_MODULE(test_throw, test_throw) { + test_throw.def("run", &run_throw); } +#endif """ py_source_file = """ import test_throw @@ -1842,118 +2056,64 @@ def DoCppChecks(config): Check for some headers and libraries. """ - ##### - # Check for fftw3: - - # First do a simple check that the library and header are in the path. + # FFTW if not config.CheckHeader('fftw3.h',language='C++'): - ErrorExit( - 'fftw3.h not found', - 'You should specify the location of fftw3 as FFTW_DIR=...') - + # We have our own version of fftw3.h in case it's not easy to find this. + # (The library location is often accessible via pyffw3 if it is installed somewhere.) + print('Using local fftw3.h file in GalSim/include/fftw3') + config.env.Append(CPPPATH='#include/fftw3') config.CheckFFTW() - ##### - # Check for boost: - config.CheckBoost() - - ##### - # Check for tmv: - - # First do a simple check that the library and header are in the path. - # We check the linking with the BLAS library below. - if not config.CheckHeader('TMV.h',language='C++'): - ErrorExit( - 'TMV.h not found', - 'You should specify the location of TMV as TMV_DIR=...') + # Boost + if config.env['USE_BOOST']: + config.env.AppendUnique(CPPDEFINES=['USE_BOOST']) + config.CheckBoost() - tmv_version_file = """ -#include -#include "TMV.h" -int main() -{ std::cout< 10 or int(minor) >= 7) and '-latlas' not in tmv_link and - ('-lblas' in tmv_link or '-lcblas' in tmv_link)): - print('WARNING: The Apple BLAS library has been found not to be thread safe on') - print(' Mac OS versions 10.7+, even across multiple processes (i.e. not') - print(' just multiple threads in the same process.) The symptom is that') - print(' `scons tests` may hang when running pytest using multiple') - print(' processes.') - if xcode_version is None: - # If we couldn't run xcodebuild, then don't give any more information about this. + # Eigen + else: + ok = config.CheckHeader('Eigen/Core',language='C++') + if not ok: + # Try to get the correct include directory from eigency + try: + import eigency + print('Trying eigency installation: ',eigency.get_includes()[2]) + config.env.Append(CPPPATH=eigency.get_includes()[2]) + ok = config.CheckHeader('Eigen/Core',language='C++') + except ImportError: pass - elif xcode_version < '5.1': - print(' This seems to have been partially fixed with XCode 5.1, so we') - print(' recommend upgrading to the latest XCode version. However, even') - print(' with 5.1, some systems still seem to have problems.') - env['BAD_BLAS'] = True - else: - print(' This seems to have been partially fixed with XCode 5.1, so there') - print(' is a good chance you will not have any problems. But there are') - print(' still occasional systems that fail when using multithreading with') - print(' programs or modules that link to the BLAS library (such as GalSim).') - print(' If you do have problems, the solution is to recompile TMV with') - print(' the SCons option "WITH_BLAS=false".') - - # ParseFlags doesn't know about -fopenmp being a LINKFLAG, so it - # puts it into CCFLAGS instead. Move it over to LINKFLAGS before - # merging everything. - tmv_link_dict = config.env.ParseFlags(tmv_link) - config.env.Append(LIBS=tmv_link_dict['LIBS']) - config.env.AppendUnique(LINKFLAGS=tmv_link_dict['LINKFLAGS']) - config.env.AppendUnique(LINKFLAGS=tmv_link_dict['CCFLAGS']) - config.env.AppendUnique(LIBPATH=tmv_link_dict['LIBPATH']) - - if compiler == 'g++' and '-openmp' in config.env['LINKFLAGS']: - config.env['LINKFLAGS'].remove('-openmp') - config.env.AppendUnique(LINKFLAGS='-fopenmp') + if not ok: + ErrorExit( + 'Eigen/Core not found', + 'You should either specify the location of Eigen as EIGEN_DIR=... ' + 'or install eigency using: \n' + ' pip install git+git://github.com/wouterboomsma/eigency.git') - # Finally, do the tests for the TMV library linkage: - config.CheckTMV() def DoPyChecks(config): # These checks are only relevant for the pysrc compilation: config.CheckPython() - config.CheckPyTMV() + if config.env['USE_TMV']: + config.CheckPyTMV() + else: + config.CheckEigen() config.CheckNumPy() config.CheckPyFITS() config.CheckFuture() config.CheckCoord() - config.CheckBoostPython() + if config.env['USE_BOOST']: + config.CheckBoostPython() + else: + config.CheckPyBind11() config.CheckPythonExcept() @@ -2010,14 +2170,10 @@ def DoConfig(env): print('Debugging turned off') env.AppendUnique(CPPDEFINES=['NDEBUG']) else: - if env['TMV_DEBUG']: + if env['USE_TMV'] and env['TMV_DEBUG']: print('TMV Extra Debugging turned on') env.AppendUnique(CPPDEFINES=['TMV_EXTRA_DEBUG']) - if env['USE_BOOST']: - print('Using local boost header files') - env.AppendUnique(CPPDEFINES=['USE_BOOST']) - # Don't bother with checks if doing scons -c if not env.GetOption('clean'): # Sometimes when you are changing around things in other directories, SCons doesn't notice. @@ -2040,17 +2196,25 @@ def DoConfig(env): config = pyenv.Configure(custom_tests = { 'CheckPython' : CheckPython , 'CheckPyTMV' : CheckPyTMV , + 'CheckEigen' : CheckEigen , 'CheckNumPy' : CheckNumPy , 'CheckPyFITS' : CheckPyFITS , 'CheckFuture' : CheckFuture , 'CheckCoord' : CheckCoord , + 'CheckPyBind11' : CheckPyBind11 , 'CheckBoostPython' : CheckBoostPython , 'CheckPythonExcept' : CheckPythonExcept , }) DoPyChecks(config) pyenv = config.Finish() - env['final_messages'] = pyenv['final_messages'] + # Make sure any -std= compiler flags required for pysrc get propagated back to the + # main environment. + for flag in pyenv['CCFLAGS']: + if 'std=' in flag: + env.AppendUnique(CCFLAGS=[flag]) + + env['final_messages'] = pyenv['final_messages'] env['pyenv'] = pyenv # Turn the cache back on now, since we always want it for the main compilation steps. diff --git a/bin/SConscript b/bin/SConscript index c631c97f272..d4c3def773b 100644 --- a/bin/SConscript +++ b/bin/SConscript @@ -7,7 +7,7 @@ RunUninstall = env['_RunUninstall'] install_subdir = 'bin' -scripts = [ 'galsim' , 'galsim_yaml', 'galsim_json', 'galsim_download_cosmos' ] +scripts = [ 'galsim' , 'galsim_download_cosmos' ] targets = [ env.ExecScript(script, script + '.py') for script in scripts ] AlwaysBuild(targets) diff --git a/bin/galsim.py b/bin/galsim.py index 7bb34ac7eb0..d5db452019b 100644 --- a/bin/galsim.py +++ b/bin/galsim.py @@ -1,274 +1,6 @@ -# Copyright (c) 2012-2017 by the GalSim developers team on GitHub -# https://github.com/GalSim-developers -# -# This file is part of GalSim: The modular galaxy image simulation toolkit. -# https://github.com/GalSim-developers/GalSim -# -# GalSim is free software: redistribution and use in source and binary forms, -# with or without modification, are permitted provided that the following -# conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, this -# list of conditions, and the disclaimer given in the accompanying LICENSE -# file. -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions, and the disclaimer given in the documentation -# and/or other materials provided with the distribution. -# -""" -The main driver program for making images of galaxies whose parameters are specified -in a configuration file. -""" -from __future__ import print_function +# Equivalent to python -m galsim ... +import runpy -import sys -import os -import logging -import pprint - -# The only wrinkle about letting this executable be called galsim is that we want to -# make sure that `import galsim` doesn't import itself. We want it to import the real -# galsim module of course. So the solution is to get rid of the current directory -# from python's default search path -temp = sys.path[0] -sys.path = sys.path[1:] -import galsim -# Now put it back in case anyone else relies on this feature. -sys.path = [temp] + sys.path - -def parse_args(): - """Handle the command line arguments using either argparse (if available) or optparse. - """ - - # Short description strings common to both parsing mechanisms - version_str = "GalSim Version %s"%galsim.version - description = "galsim: configuration file parser for %s. "%version_str - description += "See https://github.com/GalSim-developers/GalSim/wiki/Config-Documentation " - description += "for documentation about using this program." - epilog = "Works with both YAML and JSON markup formats." - - try: - import argparse - - # Build the parser and add arguments - parser = argparse.ArgumentParser(description=description, add_help=True, epilog=epilog) - parser.add_argument('config_file', type=str, nargs='?', help='the configuration file') - parser.add_argument( - 'variables', type=str, nargs='*', - help='additional variables or modifications to variables in the config file. ' + - 'e.g. galsim foo.yaml output.nproc=-1 gal.rotate="{type : Random}"') - parser.add_argument( - '-v', '--verbosity', type=int, action='store', default=1, choices=(0, 1, 2, 3), - help='integer verbosity level: min=0, max=3 [default=1]') - parser.add_argument( - '-l', '--log_file', type=str, action='store', default=None, - help='filename for storing logging output [default is to stream to stdout]') - parser.add_argument( - '-f', '--file_type', type=str, action='store', choices=('yaml','json'), - default=None, - help=('type of config_file: yaml or json are currently supported. ' + - '[default is to automatically determine the type from the extension]')) - parser.add_argument( - '-m', '--module', type=str, action='append', default=None, - help='python module to import before parsing config file') - parser.add_argument( - '-p', '--profile', action='store_const', default=False, const=True, - help='output profiling information at the end of the run') - parser.add_argument( - '-n', '--njobs', type=int, action='store', default=1, - help='set the total number of jobs that this run is a part of. ' + - 'Used in conjunction with -j (--job)') - parser.add_argument( - '-j', '--job', type=int, action='store', default=1, - help='set the job number for this particular run. Must be in [1,njobs]. ' + - 'Used in conjunction with -n (--njobs)') - parser.add_argument( - '-x', '--except_abort', action='store_const', default=False, const=True, - help='abort the whole job whenever any file raises an exception rather than ' + - 'continuing on') - parser.add_argument( - '--version', action='store_const', default=False, const=True, - help='show the version of GalSim') - args = parser.parse_args() - - if args.config_file == None: - if args.version: - print(version_str) - else: - parser.print_help() - sys.exit() - elif args.version: - print(version_str) - - except ImportError: - # Use optparse instead - import optparse - - # Usage string not automatically generated for optparse, so generate it - usage = """usage: galsim [-h] [-v {0,1,2,3}] [-l LOG_FILE] [-f {yaml,json}] [-m MODULE] - [--version] config_file [variables ...]""" - # Build the parser - parser = optparse.OptionParser(usage=usage, epilog=epilog, description=description) - # optparse only allows string choices, so take verbosity as a string and make it int later - parser.add_option( - '-v', '--verbosity', type="choice", action='store', choices=('0', '1', '2', '3'), - default='1', help='integer verbosity level: min=0, max=3 [default=1]') - parser.add_option( - '-l', '--log_file', type=str, action='store', default=None, - help='filename for storing logging output [default is to stream to stdout]') - parser.add_option( - '-f', '--file_type', type="choice", action='store', choices=('yaml','json'), - default=None, - help=('type of config_file: yaml or json are currently supported. ' + - '[default is to automatically determine the type from the extension]')) - parser.add_option( - '-m', '--module', type=str, action='append', default=None, - help='python module to import before parsing config file') - parser.add_option( - '-p', '--profile', action='store_const', default=False, const=True, - help='output profiling information at the end of the run') - parser.add_option( - '-n', '--njobs', type=int, action='store', default=1, - help='set the total number of jobs that this run is a part of. ' + - 'Used in conjunction with -j (--job)') - parser.add_option( - '-j', '--job', type=int, action='store', default=1, - help='set the job number for this particular run. Must be in [1,njobs]. ' + - 'Used in conjunction with -n (--njobs)') - parser.add_option( - '-x', '--except_abort', action='store_const', default=False, const=True, - help='abort the whole job whenever any file raises an exception rather than ' + - 'just reporting the exception and continuing on') - parser.add_option( - '--version', action='store_const', default=False, const=True, - help='show the version of GalSim') - (args, posargs) = parser.parse_args() - - # Remembering to convert to an integer type - args.verbosity = int(args.verbosity) - - # Store the positional arguments in the args object as well: - if len(posargs) == 0: - if args.version: - print(version_str) - else: - parser.print_help() - sys.exit() - else: - args.config_file = posargs[0] - args.variables = posargs[1:] - if args.version: - print(version_str) - - # Return the args - return args - -def ParseVariables(variables, logger): - new_params = {} - for v in variables: - logger.debug('Parsing additional variable: %s',v) - if '=' not in v: - raise ValueError('Improper variable specification. Use field.item=value.') - key, value = v.split('=',1) - # Try to evaluate the value string to allow people to input things like - # gal.rotate='{type : Rotate}' - # But if it fails (particularly with json), just assign the value as a string. - try: - try: - import yaml - value = yaml.load(value) - except ImportError: - # Don't require yaml. json usually works for these. - import json - value = json.loads(value) - except: - logger.debug('Unable to parse %s. Treating it as a string.'%value) - new_params[key] = value - - return new_params - - -def AddModules(config, modules): - if modules: - if 'modules' not in config: - config['modules'] = modules - else: - config['modules'].extend(modules) - -def main(): - args = parse_args() - - if args.njobs < 1: - raise ValueError("Invalid number of jobs %d"%args.njobs) - if args.job < 1: - raise ValueError("Invalid job number %d. Must be >= 1"%args.job) - if args.job > args.njobs: - raise ValueError("Invalid job number %d. Must be <= njobs (%d)"%(args.job,args.njobs)) - - # Parse the integer verbosity level from the command line args into a logging_level string - logging_levels = { 0: logging.CRITICAL, - 1: logging.WARNING, - 2: logging.INFO, - 3: logging.DEBUG } - logging_level = logging_levels[args.verbosity] - - # If requested, load the profiler - if args.profile: - import cProfile, pstats, io - pr = cProfile.Profile() - pr.enable() - - # Setup logging to go to sys.stdout or (if requested) to an output file - if args.log_file is None: - logging.basicConfig(format="%(message)s", level=logging_level, stream=sys.stdout) - else: - logging.basicConfig(format="%(message)s", level=logging_level, filename=args.log_file) - logger = logging.getLogger('galsim') - - logger.warn('Using config file %s', args.config_file) - all_config = galsim.config.ReadConfig(args.config_file, args.file_type, logger) - logger.debug('Successfully read in config file.') - - # Process each config document - for config in all_config: - - if 'root' not in config: - config['root'] = os.path.splitext(args.config_file)[0] - - # Parse the command-line variables: - new_params = ParseVariables(args.variables, logger) - - # Add modules to the config['modules'] list - AddModules(config, args.module) - - # Profiling doesn't work well with multiple processes. We'll need to separately - # enable profiling withing the workers and output when the process ends. Set - # config['profile'] = True to enable this. - if args.profile: - config['profile'] = True - - logger.debug("Process config dict: \n%s", pprint.pformat(config)) - - # Process the configuration - galsim.config.Process(config, logger, njobs=args.njobs, job=args.job, new_params=new_params, - except_abort=args.except_abort) - - if args.profile: - # cf. example code here: https://docs.python.org/2/library/profile.html - pr.disable() - try: - from StringIO import StringIO - except ImportError: - from io import StringIO - s = StringIO() - sortby = 'time' # Note: This is now called tottime, but time seems to be a valid - # alias for this that is backwards compatible to older versions - # of pstats. - ps = pstats.Stats(pr, stream=s).sort_stats(sortby).reverse_order() - ps.print_stats() - logger.error(s.getvalue()) - - -if __name__ == "__main__": - main() +if __name__ == '__main__': + runpy.run_module('galsim') diff --git a/bin/galsim_download_cosmos.py b/bin/galsim_download_cosmos.py index 2916f962cef..e055f7ae62b 100644 --- a/bin/galsim_download_cosmos.py +++ b/bin/galsim_download_cosmos.py @@ -1,466 +1,5 @@ -# Copyright (c) 2012-2017 by the GalSim developers team on GitHub -# https://github.com/GalSim-developers -# -# This file is part of GalSim: The modular galaxy image simulation toolkit. -# https://github.com/GalSim-developers/GalSim -# -# GalSim is free software: redistribution and use in source and binary forms, -# with or without modification, are permitted provided that the following -# conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, this -# list of conditions, and the disclaimer given in the accompanying LICENSE -# file. -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions, and the disclaimer given in the documentation -# and/or other materials provided with the distribution. -# -""" -A program to download the COSMOS RealGalaxy catalog for use with GalSim. -""" -from __future__ import print_function -from builtins import input - -import os, sys, tarfile, subprocess, shutil, json -try: - from urllib2 import urlopen -except: - from urllib.request import urlopen - -# Since this will be installed in the same directory as our galsim executable, -# we need to do the same trick about changing the path so it imports the real -# galsim module, not that executable. -temp = sys.path[0] -sys.path = sys.path[1:] import galsim -sys.path = [temp] + sys.path - -script_name = os.path.basename(__file__) - - -def parse_args(): - """Handle the command line arguments using either argparse (if available) or optparse. - """ - - # Another potential option we might want to add is to download the smaller training sample - # rather than the full 4 GB file. Right now, this just downloads the larger catalog. - - # Short description strings common to both parsing mechanisms - description = "This program will download the COSMOS RealGalaxy catalog and images\n" - description += "and place them in the GalSim share directory so they can be used as\n " - description += "the default files for the RealGalaxyCatalog class.\n" - description += "See https://github.com/GalSim-developers/GalSim/wiki/RealGalaxy%20Data\n" - description += "for more details about the files being downloaded." - epilog = "Note: The unpacked files total almost 6 GB in size!\n" - - try: - import argparse - - # Build the parser and add arguments - parser = argparse.ArgumentParser(description=description, epilog=epilog, add_help=True) - parser.add_argument( - '-v', '--verbosity', type=int, action='store', default=2, choices=(0, 1, 2, 3), - help='Integer verbosity level: min=0, max=3 [default=2]') - parser.add_argument( - '-f', '--force', action='store_const', default=False, const=True, - help='Force overwriting the current file if one exists') - parser.add_argument( - '-q', '--quiet', action='store_const', default=False, const=True, - help="Don't ask about re-downloading an existing file. (implied by verbosity=0)") - parser.add_argument( - '-u', '--unpack', action='store_const', default=False, const=True, - help='Re-unpack the tar file if not downloading') - parser.add_argument( - '--save', action='store_const', default=False, const=True, - help="Save the tarball after unpacking.") - parser.add_argument( - '-d', '--dir', action='store', default=None, - help="Install into an alternate directory and link from the share/galsim directory") - parser.add_argument( - '-s', '--sample', action='store', default='25.2', choices=('23.5', '25.2'), - help="Flux limit for sample to download; either 23.5 or 25.2") - parser.add_argument( - '--nolink', action='store_const', default=False, const=True, - help="Don't link to the alternate directory from share/galsim") - args = parser.parse_args() - - except ImportError: - # Use optparse instead - import optparse - - # Usage string not automatically generated for optparse, so generate it - usage = "usage: %s [-h] [-v {0,1,2,3}] [-f] [-q] [-u] [-s] [-d] [--nolink]"%script_name - # Build the parser - parser = optparse.OptionParser(usage=usage, description=description, epilog=epilog) - # optparse only allows string choices, so take verbosity as a string and make it int later - parser.add_option( - '-v', '--verbosity', type="choice", action='store', choices=('0', '1', '2', '3'), - default='2', help='Integer verbosity level: min=0, max=3 [default=2]') - parser.add_option( - '-f', '--force', action='store_const', default=False, const=True, - help='Force overwriting the current file if one exists') - parser.add_option( - '-q', '--quiet', action='store_const', default=False, const=True, - help="Don't ask about re-downloading an existing file. (implied by verbosity=0)") - parser.add_option( - '-u', '--unpack', action='store_const', default=False, const=True, - help='Re-unpack the tar file if not downloading') - parser.add_option( - '--save', action='store_const', default=False, const=True, - help="Save the tarball after unpacking.") - parser.add_option( - '-d', '--dir', action='store', default=None, - help="Install into an alternate directory and link from the share/galsim directory") - parser.add_option( - '-s', '--sample', type="choice", action='store', choices=('23.5', '25.2'), - default='25.2', help="Flux limit for sample to download; either 23.5 or 25.2") - parser.add_option( - '--nolink', action='store_const', default=False, const=True, - help="Don't link to the alternate directory from share/galsim") - (args, posargs) = parser.parse_args() - - # Remembering to convert to an integer type - args.verbosity = int(args.verbosity) - - if args.verbosity == 0: - args.quiet = True - - # Return the args - return args - -# Based on recipe 577058: http://code.activestate.com/recipes/577058/ -def query_yes_no(question, default="yes"): - """Ask a yes/no question via input() and return their answer. - - "question" is a string that is presented to the user. - "default" is the presumed answer if the user just hits . - It must be "yes" (the default), "no" or None (meaning - an answer is required of the user). - - The "answer" return value is one of "yes" or "no". - """ - valid = {"yes":"yes", "y":"yes", "ye":"yes", - "no":"no", "n":"no"} - if default == None: - prompt = " [y/n] " - elif default == "yes": - prompt = " [Y/n] " - elif default == "no": - prompt = " [y/N] " - else: - raise ValueError("invalid default answer: '%s'" % default) - - while 1: - sys.stdout.write(question + prompt) - choice = input().lower() - if default is not None and choice == '': - return default - elif choice in valid.keys(): - return valid[choice] - else: - sys.stdout.write("Please respond with 'yes' or 'no' "\ - "(or 'y' or 'n').\n") - -def ensure_dir(target): - d = os.path.dirname(target) - if not os.path.exists(d): - os.makedirs(d) - -def download(url, target, unpack_dir, args, logger): - logger.warning('Downloading from url:\n %s',url) - logger.warning('Target location is:\n %s',target) - logger.info('') - - # See how large the file to be downloaded is. - u = urlopen(url) - meta = u.info() - logger.debug("Meta information about url:\n%s",str(meta)) - file_size = int(meta.get("Content-Length")) - file_name = os.path.basename(url) - logger.info("Size of %s: %d MBytes" , file_name, file_size/1024**2) - - # Make sure the directory we want to put this file exists. - ensure_dir(target) - - # Check if the file already exists and if it is the right size - do_download = True - if os.path.isfile(target): - logger.info("") - existing_file_size = os.path.getsize(target) - if args.force: - logger.info("Target file already exists. Size = %d MBytes. Forced re-download.", - existing_file_size/1024**2) - elif file_size == existing_file_size: - if args.quiet: - logger.info("Target file already exists. Not re-downloading.") - do_download = False - else: - q = "Target file already exists. Overwrite?" - yn = query_yes_no(q, default='no') - if yn == 'no': - do_download = False - else: - logger.warning("Target file already exists, but it seems to be either incomplete, " - "corrupt, or obsolete") - if args.quiet: - logger.info("Size of existing file = %d MBytes. Re-downloading.", - existing_file_size/1024**2) - else: - q = "Size of existing file = %d MBytes. Re-download?"%(existing_file_size/1024**2) - yn = query_yes_no(q, default='yes') - if yn == 'no': - do_download = False - elif unpack_dir is not None and os.path.isdir(unpack_dir): - logger.info("") - - # Check that this is the current version. - meta_file = os.path.join(unpack_dir, 'meta.json') - if os.path.isfile(meta_file): - with open(meta_file) as fp: - saved_meta_dict = json.load(fp) - # Get rid of the unicode - saved_meta_dict = dict([ (str(k),str(v)) for k,v in saved_meta_dict.items()]) - logger.debug("current meta information is %s",saved_meta_dict) - meta_dict = dict(meta) - logger.debug("url's meta information is %s",meta_dict) - obsolete = False - for k in meta_dict: - if k == 'date': - continue # This one isn't expected to match. - elif k not in saved_meta_dict: - logger.debug("key %s is missing in saved meta information",k) - obsolete = True - elif meta_dict[k] != saved_meta_dict[k]: - logger.debug("key %s differs: %s != %s",k,meta_dict[k],saved_meta_dict[k]) - obsolete = True - else: - logger.debug("key %s matches",k) - else: - obsolete = True - - if obsolete: - if args.quiet or args.force: - logger.warning("The version currently on disk is obsolete. "+ - "Downloading new version.") - else: - q = "The version currently on disk is obsolete. Download new version?" - yn = query_yes_no(q, default='yes') - if yn == 'no': - do_download = False - elif args.force: - logger.info("Target file has already been downloaded and unpacked. "+ - "Forced re-download.") - elif args.quiet: - logger.info("Target file has already been downloaded and unpacked. "+ - "Not re-downloading.") - do_download = False - args.save = True # Don't delete it! - else: - q = "Target file has already been downloaded and unpacked. Re-download?" - yn = query_yes_no(q, default='no') - if yn == 'no': - do_download = False - args.save = True - - # The next bit is based on one of the answers here: (by PabloG) - # http://stackoverflow.com/questions/22676/how-do-i-download-a-file-over-http-using-python - # The progress feature in that answer is important here, since downloading such a large file - # will take a while. - if do_download: - logger.info("") - try: - with open(target, 'wb') as f: - file_size_dl = 0 - block_sz = 32 * 1024 - next_dot = file_size/100. # For verbosity==1, the next size for writing a dot. - while True: - buffer = u.read(block_sz) - if not buffer: - break - - file_size_dl += len(buffer) - f.write(buffer) - - # Status bar - if args.verbosity >= 2: - status = r"Downloading: %5d / %d MBytes [%3.2f%%]" % ( - file_size_dl/1024**2, file_size/1024**2, file_size_dl*100./file_size) - status = status + '\b'*len(status) - sys.stdout.write(status) - sys.stdout.flush() - elif args.verbosity >= 1 and file_size_dl > next_dot: - sys.stdout.write('.') - sys.stdout.flush() - next_dot += file_size/100. - logger.info("Download complete.") - except IOError as e: - # Try to give a reasonable suggestion for some common IOErrors. - logger.error("\n\nIOError: %s",str(e)) - if 'Permission denied' in str(e): - logger.error("Rerun using sudo %s",script_name) - logger.error("If this is not possible, you can download to an alternate location:") - logger.error(" %s -d dir_name --nolink\n",script_name) - elif 'Disk quota' in str(e) or 'No space' in str(e): - logger.error("You might need to download this in an alternate location and link:") - logger.error(" %s -d dir_name\n",script_name) - raise - - return do_download, target, meta - -def unpack(target, target_dir, unpack_dir, meta, args, logger): - logger.info("Unpacking the tarball...") - #with tarfile.open(target) as tar: - # The above line works on python 2.7+. But to make sure we work for 2.6, we use the - # following workaround. - # cf. http://stackoverflow.com/questions/6086603/statement-with-and-tarfile - from contextlib import closing - with closing(tarfile.open(target)) as tar: - if args.verbosity >= 3: - tar.list(verbose=True) - elif args.verbosity >= 2: - tar.list(verbose=False) - tar.extractall(target_dir) - - # Write the meta information to a file, meta.json to mark what version this all is. - meta_file = os.path.join(unpack_dir, 'meta.json') - with open(meta_file,'w') as fp: - json.dump(dict(meta), fp) - - logger.info("Extracted contents of tar file.") - -def unzip(target, args, logger): - logger.info("Unzipping file") - subprocess.call(["gunzip", target]) - logger.info("Done") - -def link_target(unpack_dir, link_dir, args, logger): - logger.debug("Linking to %s from %s", unpack_dir, link_dir) - if os.path.exists(link_dir): - if os.path.islink(link_dir): - # If it exists and is a link, we just remove it and relink without any fanfare. - logger.debug("Removing existing link") - os.remove(link_dir) - else: - # If it is not a link, we need to figure out what to do with it. - if os.path.isdir(link_dir): - # If it's a directory, probably want to keep it. - logger.warning("%s already exists and is a directory.",link_dir) - if args.force: - logger.warning("Removing the existing files to make the link.") - elif args.quiet: - logger.warning("Link cannot be made. (Use -f to force removal of existing dir.)") - return - else: - q = "Remove the existing files to make the link?" - yn = query_yes_no(q, default='no') - if yn == 'no': - return - shutil.rmtree(link_dir) - else: - # If it's not a directory, it's probably corrupt, so the default is to remove it. - logger.warning("%s already exists, but strangely isn't a directory.",link_dir) - if args.force or args.quiet: - logger.warning("Removing the existing file.") - else: - q = "Remove the existing file?" - yn = query_yes_no(q, default='yes') - if yn == 'no': - return - os.path.remove(link_dir) - os.symlink(unpack_dir, link_dir) - logger.info("Made link to %s from %s", unpack_dir, link_dir) - -def main(): - args = parse_args() - - # Parse the integer verbosity level from the command line args into a logging_level string - import logging - logging_levels = { 0: logging.CRITICAL, - 1: logging.WARNING, - 2: logging.INFO, - 3: logging.DEBUG } - logging_level = logging_levels[args.verbosity] - - # Setup logging to go to sys.stdout or (if requested) to an output file - logging.basicConfig(format="%(message)s", level=logging_level, stream=sys.stdout) - logger = logging.getLogger('galsim') - - # Give diagnostic about GalSim version - logger.debug("GalSim version: %s",galsim.__version__) - logger.debug("This download script is: %s",__file__) - logger.info("Type %s -h to see command line options.\n",script_name) - - # Some definitions: - # share_dir is the base galsim share directory, e.g. /usr/local/share/galsim/ - # target_dir is where we will put the downloaded file, usually == share_dir. - # unpack_dir is the directory that the tarball will unpack into. - # url is the url from which we will download the tarball. - # file_name is the name of the file to download, taken from the url. - # target is the full path of the downloaded tarball - - share_dir = galsim.meta_data.share_dir - if args.dir is not None: - target_dir = args.dir - link = not args.nolink - else: - target_dir = share_dir - link = False - - url = "http://great3.jb.man.ac.uk/leaderboard/data/public/COSMOS_%s_training_sample.tar.gz"%( - args.sample) - file_name = os.path.basename(url) - target = os.path.join(target_dir, file_name) - unpack_dir = target[:-len('.tar.gz')] - - # Download the tarball - new_download, target, meta = download(url, target, unpack_dir, args, logger) - - # Usually we unpack if we downloaded the tarball or if specified by the command line option. - do_unpack = new_download or args.unpack - - # If the unpack dir is missing, then need to unpack - if not os.path.exists(unpack_dir): - do_unpack = True - - # But of course if there is no tarball, we can't unpack it - if not os.path.isfile(target): - do_unpack = False - - # If we have a downloaded tar file, ask if it should be re-unpacked. - if not do_unpack and not args.quiet and os.path.isfile(target): - logger.info("") - q = "Tar file is already unpacked. Re-unpack?" - yn = query_yes_no(q, default='no') - if yn == 'yes': - do_unpack=True - - # Unpack the tarball - if do_unpack: - unpack(target, target_dir, unpack_dir, meta, args, logger) - - # Usually, we remove the tarball if we unpacked it and command line doesn't specify to save it. - do_remove = do_unpack and not args.save - - # But if we didn't unpack it, and they didn't say to save it, ask if we should remove it. - if os.path.isfile(target) and not do_remove and not args.save and not args.quiet: - logger.info("") - q = "Remove the tarball?" - yn = query_yes_no(q, default='no') - if yn == 'yes': - do_remove = True - - # Remove the tarball - if do_remove: - logger.info("Removing the tarball to save space") - os.remove(target) - - # If we are downloading to an alternate directory, we (usually) link to it from share/galsim - if link: - # Get the directory where this would normally have been unpacked. - link_dir = os.path.join(share_dir, file_name)[:-len('.tar.gz')] - link_target(unpack_dir, link_dir, args, logger) - -if __name__ == "__main__": - main() +if __name__ == '__main__': + galsim.download_cosmos.main() diff --git a/bin/galsim_json.py b/bin/galsim_json.py deleted file mode 100644 index 5f546749f9a..00000000000 --- a/bin/galsim_json.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) 2012-2017 by the GalSim developers team on GitHub -# https://github.com/GalSim-developers -# -# This file is part of GalSim: The modular galaxy image simulation toolkit. -# https://github.com/GalSim-developers/GalSim -# -# GalSim is free software: redistribution and use in source and binary forms, -# with or without modification, are permitted provided that the following -# conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, this -# list of conditions, and the disclaimer given in the accompanying LICENSE -# file. -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions, and the disclaimer given in the documentation -# and/or other materials provided with the distribution. -# - -# For backwards compatibility. -# `galsim_json` is equivalent to `galsim -f json`, although if the config -# file has an extension that starts with `.j`, the `-f json` part is -# unnecessary. - -from __future__ import print_function - -import sys -import subprocess -print('Note: galsim_json has been deprecated. Use galsim instead.') -print('Running galsim -f json',' '.join(sys.argv[1:])) -print() -subprocess.call( ['galsim','-f','json'] + sys.argv[1:] ) diff --git a/conda_requirements.txt b/conda_requirements.txt new file mode 100644 index 00000000000..3bb643b2cd9 --- /dev/null +++ b/conda_requirements.txt @@ -0,0 +1,15 @@ +# The requirements packages that can be installed with +# conda install -y -c conda-forge --file conda_requirements.txt +setuptools>=38 +numpy>=1.13 +future>=0.15 +astropy>=2.0 +pybind11>=2.2 +pip>=9.0 +gcc>=4.8 +fftw>=3.3 +eigen>=3.3 + +# Not technically required, but useful. +pyyaml>=3.12 +pandas>=0.20 diff --git a/devel/README b/devel/README new file mode 100644 index 00000000000..47f02165546 --- /dev/null +++ b/devel/README @@ -0,0 +1,245 @@ +Guildelines for GalSim developers: + +1. Style + +For C++, we roughly adhere to the LSST C++ style. At this point, the best bet for new +C++ developers would be to look at some of the existing code and try to make your code +have a similar style. + +For Python, we mostly adhere to PEP8, although please pay attention to the first rule: +"A Foolish Consistency is the Hobgoblin of Little Minds". In other words, please do +break the "rules" if it improves readability. + +Biggish things to highlight/add/modify are... + +* 4 space indentation, rather than 2 space. + +* No tabs. Just spaces. + +* No using statements. Now all namespaces (especially std::) are explicit (equvalent will be + adopted in Python, i.e. no "import * from moduleX" or "from moduleY import Z"). + +* Use C++ std library when possible. e.g. MAX -> std::max, Assert -> assert, PI -> M_PI, etc. + +* Will be readable at 100 character width (this is a departure from LSST style, which specifies + 120 but is slightly annoying for laptop use). + +* We adhere to the Zen of Python; open python, type "import this", hit enter. + +* We use all lowercase letters for all Python packages. That's a bit of a Python convention, + and while it's mostly aimed at compatibility with case-insensitive filesystems, we think we + should stick with it anyway. + +* We will adopt the SBProfile capitalization style wherever sensible for code filenames, as it's by + far the most significant chunk of C/C++ we are currently using. This makes include/*.h files + capitalized. + +* Overall capitalization rules: + * File names are CamelCase** + * Classes (and structs) are CamelCase + * Free functions are CamelCase + * Member functions are camelCase + * Public variables (including function parameters and kwargs) are lower_case + * Private variables are _lower_case + * Local scope variables/functions can be whatever the author prefers. + * Note that when using camelCase or CamelCase, acronyms should still be capitalized, i.e. CCD and + PSF, not Ccd and Psf. + +* Python unit testing modules are placed in tests/, and called test_.py. + +* C++ unit tests are also in tests/, called test_*.cpp. + + +For vim users, Mike Jarvis has put the c.vim file in the devutils/ directory. If you put that +in .vim/ftplugin/ and add the line "filetype plugin on" in your .vimrc file, then you will +automatically get the formatting to match what is currently in SBProfile. We don't (yet) +have a corresponding file for emacs. (Sorry.) + +LSST Style Guides available here -- +http://dev.lsstcorp.org/trac/wiki/DocumentationStandards +http://dev.lsstcorp.org/trac/wiki/PythonCodeStandards +http://dev.lsstcorp.org/trac/wiki/C%2B%2BStandard + + +2. Workflow + +Prior to version 2.0, SCons was our only build option. It is still available, but we +don't recommend it for end users. It remains to be seen which build system is more convenient +for developers, so we have kept it around for the 2.x series. + +With setup.py, the work flow is as follows: + + 1. Edit code + 2. python setup.py install + 3. python test_whatever.py # Check if your code works on the unit tests for this module. + 4. If errors, goto 1 + 5. python setup.py test # Check that the new code didn't break any other tests. + 6. If errors, goto 1 + 7. git add -p # Add only the code changes that are relevant. (Not debugging, etc.) + 8. git commit -m "Summary of changes" + 9. git push or goto 1 if still more to do. + +With SCons, the build steps are slightly different. + + 2. scons install + 5. scons tests + +Note that SCons only remakes files that have changed, so it's not too time consuming to rebuild +each time. However, setup.py does not have that feature. So, with the setup.py workflow, we +highly recommend installing ccache. This keeps track of which cc commands have been run +previously and stores the results, which vastly speeds up builds using setup.py. + +You can download ccache from + +https://ccache.samba.org/ + + +3. Commits + +Please do your best to have each commit be atomic. + +That is, the commit should, as far as possible, be a single conceptual change in the code, +complete with whatever additional unit tests are necessary to test it. Ideally all tests +should pass for every commit. This makes cherry-picking and bisecting much easier. + +Sometimes when working on some change, you may notice something else you want to change as well. +Maybe a typo, or a semi-related change that occurs to you because of your work on something else. + +In any case, go ahead and make the change. But when committing, use `git add -p` to select +the lines that go together to make a single atomic change. Commit them, and then go back and +do it again to add the other conceptual change as a separate commit. + +This is also a useful command to let you notice any debugging lines that you might have left in +the code, which you don't want to include in the commit. + +Another useful git command to become familiar with is `git stash`. This temporarily stores any +local changes, so you can (for instance) run the unit tests on the current committed version +to make sure they all pass before adding more items as the next commit. To bring the stashed +changes back, just do `git stash pop`. + + +4. Array/pixel indexing: + +Numpy arrays in Python use the (matrix-style) indexing [y, x], whereas the SBProfile class and the +more arguably natural ordering is (x, y). PyFITS, and a number of other Numpy-dependent Python +libraries have also adopted the [y, x] paradigm, as well as a number of astronomers who do a lot in +Python. + +We will write our Python classes to accept arguments in the (x, y) order, particularly the +galsim.Image class. However, it will be possible to create an Image using a Numpy array, and also +to get a Numpy view into an image. + +This places the boundary between our classes and NumPy. Our classes would be (x,y) in both C++ and +Python, but we wouldn't make any effort to fit NumPy into that paradigm. + +Jim gives a couple more reasons on why this is a good place to put the boundary: +- Square brackets will consistently be [y,x], and parentheses will consistently be (x,y), due to + the (usually annoying) fact that you can't overload operator[] with two argument in C++. + +- Even in Python, [y,x] is really only used for NumPy arrays - note that matplotlib's plot + function takes 1-d x and y arrays in that order, for instance, even though matplotlib expects + arrays used as images to be [y,x]. + +Jim gives a nice example of this functionality for what he has in mind for the Python API of the +galsim.Image class: + +>>> import galsim +>>> import numpy +>>> arr1 = numpy.arange(6).reshape(2,3) +>>> print arr1 +[[0 1 2] +[3 4 5]] +>>> im1 = galsim.ImageD(arr1, x0=10, y0=100) # im shares data with arr1 +>>> arr2 = im1.array +>>> assert(arr2 is arr1) +>>> print im1(12, 101) # (x, y); includes offsets passed in constructor +5 +>>> im2 = galsim.ImageD(x0=1, y0=2, w=3, h=4) +>>> arr3 = im2.array # arr3 shares data with m3 +>>> print arr3.shape # shape is (h, w) +(4, 3) +>>> arr4 = arr1.transpose() # arr4 is column-major +>>> im3 = galsim.ImageD(arr4) # can't do this +Traceback (most recent call last): + File "", line 1, in +ValueError: Cannot create image from noncontiguous array. + +This last point is important: Numpy arrays must be kept in c_contiguous storage order, i.e. row- +major. Some of numpy's array routines invisibly change arrays to Fortran-style (i.e. column-major) +storage, or disrupt contiguous storage altogether. While developing, keep track of this using + +>>> array.flags + +in particular ensure that + +>>> array.flags.c_contiguous == True + +Check out the np.copy() function in Numpy, or array.copy() method to see how to make a c_contiguous +array, also see np.ascontiguousarray() or the array.transpose() method. + +Finally, the FITS standard is to begin indexing all arrays at (1, 1), and this is the convention +SBProfile currently adopts. However, we think that our image objects will also carry around an +attribute for the coordinate origin, so this should not be too much of a headache at the interface +with Python/Numpy (famous last words). + + +5. Compiler warning flags + +One of the SCons defaults is WARN=false. This is recommended for end users, so we don't +saddle them with a bunch of warning messages if they use a new compiler that we haven't +tested on yet. + +However, developers should always run with WARN=true to help catch bugs. This tends to +catch a lot of things that cause portability issues as we use the code on different systems +as well as outright bugs in the coding that are otherwise missed. Not all of the things +that come up are bugs per se, but it catches enough things that really are bugs that we feel +it worthwhile to make that the default. Developers are expected to fix their code to get rid +of all these warnings before committing. Even if you know the warning is benign, please fix it. +We want all compilations to be warning-free on as many compilers as possible. + +Note: because SCons automatically caches all parameters you pass it, you will only need +to do `scons WARN=true` once, and then it will be set that way for all future scons +commands. + +The setup.py build always shows all these warnings, so you don't need to do anything special +to turn that on. + +Even if everyone does this, it is possible that you might come across warnings from someone +else's code. e.g. They may use a different compiler that warns about somewhat different things. +If you know how to fix the problem, go ahead and do so. If you don't, please email the +person responsible (or go to our GitHub page and comment on the commit that causes the problem) +to ask them to fix it. Basically the same thing you would do if code failed to compile for a +non-warning compiler error. + + +6. Branch names + +When starting work on an issue, create a branch with the same name as the issue. +In particular, include the # before the number. E.g. when working on Issue #999, +the branch you make should be called #999. + +git checkout -b "#999" + +Note the quotes around "#999". This is because bash uses the # symbol for comments, so +if you don't include the quotes, the command won't work. + +If you want, it is permissible to add extra text after the issue number. + +git checkout -b "#999-some_cool_new_stuff" + +We don't normally do this, but sometimes it is useful to help remember which issue is which +(and therefore which branch you want to switch to). + +Why do we do this? Especially given the annoying quotes thing? Because if you follow the +instructions in the file .git/hooks/commit_msg, then git will add the branch name to the +ends of your commit messages automatically. This in turn lets GitHub know that the commit +is connected with that issue, so it shows up in the issue thread. This is often quite useful. + +The instructions in commit-msg repeated here for convenience: + + Copy this file to .git/hooks/commit-msg to enable automatically appending the + branch name to commit messages (when on a branch). Ensure it remains executable. + Branches should usually be named according to the issue number they are for. + e.g. "#12" for a branch that works on issue 12. + Then commits will automatically be linked in the comments for that issue. + diff --git a/devel/credo.txt b/devel/credo.txt deleted file mode 100644 index 151fbd7592e..00000000000 --- a/devel/credo.txt +++ /dev/null @@ -1,232 +0,0 @@ -***The hopefully not-too-crippling dogma of GalSim development***, v0.1 - -1. Style: - -Use the LSST documents on C++, Python and Documentation overall as a guide for new code, -exert pragmatism with pre-existing code. Unless you have good reason for hating LSST style, -or the style suggested here, in which case share it! - -Biggish things to highlight/add/modify are... - 4 space indentation, rather than 2 space. - - No tabs. Just spaces. - - No using statements. Now all namespaces (especially std::) are explicit (equvalent will be - adopted in Python, i.e. no "import * from moduleX" or "from moduleY import Z"). - - Use C++ std library when possible. e.g. MAX -> std::max, Assert -> assert, PI -> M_PI, etc. - - Will be readable at 100 character width (this is a departure from LSST style, which specifies - 120 but is slightly annoying for laptop use). - - Python 2.7.x will be supported, not Python 3.x... - - We adhere to the Zen of Python; open python, type "import this", hit enter. - - We use all lowercase letters for all Python packages. That's a bit of a Python convention, - and while it's mostly aimed at compatibility with case-insensitive filesystems, we think we - should stick with it anyway. - - We will adopt the SBProfile capitalization style wherever sensible for code filenames, as it's by - far the most significant chunk of C/C++ we are currently using. This makes include/*.h files - capitalized. - - Overall capitalization rules: - * File names are CamelCase** - * Classes (and structs) are CamelCase - * Free functions are CamelCase - * Member functions are camelCase - * Public variables (including function parameters and kwargs) are lower_case - * Private variables are _lower_case - * Local scope variables/functions can be whatever the author prefers. - ** Note that when using camelCase or CamelCase, acronyms should still be capitalized, i.e. CCD and - PSF, not Ccd and Psf. - - Python unit testing modules will be placed in tests/, and called test_.py - - For executables that perform tests (e.g. in non-Python code units) these will be called - test_* to match the naming conventions of Python test modules. - - -For vim users, Mike Jarvis has put the c.vim file in the devutils/ directory. If you put that -in .vim/ftplugin/ and add the line "filetype plugin on" in your .vimrc file, then you will -automatically get the formatting to match what is currently in SBProfile. We don't (yet) -have a corresponding file for emacs. (Sorry.) - -LSST Style Guides available here -- -http://dev.lsstcorp.org/trac/wiki/DocumentationStandards -http://dev.lsstcorp.org/trac/wiki/PythonCodeStandards -http://dev.lsstcorp.org/trac/wiki/C%2B%2BStandard - -Broad reasons for choice of LSST style: These documents just seem to be a fairly sensible -source of code and documentation formatting guidance, although note that we have diverged in some -places. See also Peter Melchior's slides for sound advice in general, and on documentation: -dl.dropbox.com/u/26820102/talks/software_engineering_150410.pdf - - -2. Version control: -Git - -Broad reasons why: Modern/distributed. Slightly better general purpose/branching capabilities -than Hg, albeit at the cost of a mildly steeper learning curve. Neither is rocket science! - - -3. Repository hosting: -Github, with push/pull access to all those in the GalSim-developers organization (based on -the great3-code@princeton.edu mailing list). - -Broad reasons why: Code review features, wiki features, popularity within GalSim-developers, -project forking. - - -4. Documentation: DOxygen -Broad reasons why: Well-supported by many in GalSim-developers. - - -5. Builds: SCons -Broad reasons why: Seemingly greater experience among GalSim-developers. - -One of the SCons defaults is WARN=false. This is recommended for end users, so we don't -saddle them with a bunch of warning messages if they use a new compiler that we haven't -tested on yet. However, developers should always run with WARN=true to help catch -bugs. Even if you know the warning is benign, please fix it. We want all compilations -to be warning-free on as many compilers as possible. - -Note: because SCons automatically caches all parameters you pass it, you will only need -to do `scons WARN=true` once, and then it will be set that way for all future scons -commands. - - -6. Libraries: -FFTW, Numpy, Pyfits, TMV (+BLAS & LAPACK if tuned versions present), Boost.python, -Boost.shared_ptr, Boost.random (flexibility to other RNGs) - -Notes: will add more if really useful/necessary, but want to keep this list as short as -possible. Matplotlib plotting in Python not featured by default. - - -7. Array/pixel indexing: - -Numpy arrays in Python use the (matrix-style) indexing [y, x], whereas the SBProfile class and the -more arguably natural ordering is (x, y). PyFITS, and a number of other Numpy-dependent Python -libraries have also adopted the [y, x] paradigm, as well as a number of astronomers who do a lot in -Python. - -We will write our Python classes to accept arguments in the (x, y) order, particularly the -galsim.Image class. However, it will be possible to create an Image using a Numpy array, and also -to get a Numpy view into an image. - -This places the boundary between our classes and NumPy. Our classes would be (x,y) in both C++ and -Python, but we wouldn't make any effort to fit NumPy into that paradigm. - -Jim gives a couple more reasons on why this is a good place to put the boundary: -- Square brackets will consistently be [y,x], and parentheses will consistently be (x,y), due to - the (usually annoying) fact that you can't overload operator[] with two argument in C++. - -- Even in Python, [y,x] is really only used for NumPy arrays - note that matplotlib's plot - function takes 1-d x and y arrays in that order, for instance, even though matplotlib expects - arrays used as images to be [y,x]. - -Jim gives a nice example of this functionality for what he has in mind for the Python API of the -galsim.Image class: - ->>> import galsim ->>> import numpy ->>> arr1 = numpy.arange(6).reshape(2,3) ->>> print arr1 -[[0 1 2] -[3 4 5]] ->>> im1 = galsim.ImageD(arr1, x0=10, y0=100) # im shares data with arr1 ->>> arr2 = im1.array ->>> assert(arr2 is arr1) ->>> print im1(12, 101) # (x, y); includes offsets passed in constructor -5 ->>> im2 = galsim.ImageD(x0=1, y0=2, w=3, h=4) ->>> arr3 = im2.array # arr3 shares data with m3 ->>> print arr3.shape # shape is (h, w) -(4, 3) ->>> arr4 = arr1.transpose() # arr4 is column-major ->>> im3 = galsim.ImageD(arr4) # can't do this -Traceback (most recent call last): - File "", line 1, in -ValueError: Cannot create image from noncontiguous array. - -This last point is important: Numpy arrays must be kept in c_contiguous storage order, i.e. row- -major. Some of numpy's array routines invisibly change arrays to Fortran-style (i.e. column-major) -storage, or disrupt contiguous storage altogether. While developing, keep track of this using - ->>> array.flags - -in particular ensure that - ->>> array.flags.c_contiguous == True - -Check out the np.copy() function in Numpy, or array.copy() method to see how to make a c_contiguous -array, also see np.ascontiguousarray() or the array.transpose() method. - -Finally, the FITS standard is to begin indexing all arrays at (1, 1), and this is the convention -SBProfile currently adopts. However, we think that our image objects will also carry around an -attribute for the coordinate origin, so this should not be too much of a headache at the interface -with Python/Numpy (famous last words). - - -8. Compiler warning flags - -By default, SCons adds the flags -Wall -Werror to the list of compiler flags. This tends to -catch a lot of things that cause portability issues as we use the code on different systems -as well as outright bugs in the coding that are otherwise missed. Not all of the things -that come up are bugs per se, but it catches enough things that really are bugs that we feel -it worthwhile to make that the default. Developers are expected to fix their code to get rid -of all these warnings before committing. - -Even if everyone does this, it is possible that you might come across warnings from someone -else's code. e.g. They may use a different compiler that warns about somewhat different things. -If you know how to fix the problem, go ahead and do so. If you don't, please email the -person responsible (or go to our GitHub page and comment on the commit that causes the problem) -to ask them to fix it. Basically the same thing you would do if code failed to compile for a -non-warning compiler error. - -When we eventually release the code to the public, we will switch the default to not use -these warning flags in case some users have a different compiler that none of us tested on. -The code is regularly tested with various versions of g++, clang++ and icpc. Maybe more. -But if they use something different, we don't want the code to fail because it is a stickler -for some C++ detail that we didn't catch in our builds. Turning on and off the warnings is -done with the SCons option WARN. Use WARN=false to turn them off, and WARN=true to turn them -back on. But again, you shouldn't really have to do this. You should fix the warnings rather -than ignore them by turning them off. - - -9. Random number generators (RNG) - -First, we realize that it's unlikely that *any* library RNG would come with a guarantee that a -given seed will produce the same sequence in all past and future versions (unless it's software -that is never revised, like NR!). So if we want to GalSim to have this property (we do), then we -need to package some fixed version of something with GalSim. - -Gary has created a subdirectory GalSim/include/galsim/boost1_48_0.random which contains the needed -files (and subdirectory) from Boost.Random. If the compiler flag DIVERT_BOOST_RANDOM is defined, -then all of the include directives for Random.h are directed to this directory instead of to -boost/random. And I hacked all these Boost files so that their includes for any element of -Boost.Random are also sent to galsim/boost1_48_0.random. Hence a compilation will not reach the -local boost/random and there will be no name collisions... - -***as long as no module that includes our Random.h also explicitly includes the normal boost/random -files.*** - -In other words we need to have a rule that any use of RNG's accesses them ONLY via our Random.h -wrappers. This would be good practice in any case. - -Some of the hacked Boost.Random files refer to other parts of Boost. I have *not* diverted these to -private copies - they will come from the user's distribution so there will be no name or code -collisions if anyone uses other parts of Boost. I've confirmed that the Boost.Random 1.48.0 -routines work when the 1.46 versions of the other Boost libraries are included (that's the one that -fink likes). - -At the Python level, we want to be using this same RNG. This means we'll need to make a Python -wrapper for the C++ Random class, and use it, which means we need to avoid the temptation to use -numpy.random. Work on this wrapper will begin ASAP (we'll probably want to put some effort into -making the wrappers for the C++ RNG class very NumPy-friendly anyway). - -There remains the risk that future Boost releases will break our "frozen" Boost.Random. We'll need -to record that possibility for posterity. - diff --git a/examples/check_des b/examples/check_des index e9f4a1d8840..a49b990b00e 100755 --- a/examples/check_des +++ b/examples/check_des @@ -20,8 +20,7 @@ cd des -python=../../bin/installed_python # For python scripts -bin=../../bin # For galsim executable +python='/usr/bin/env python' # For python scripts if [ ! -d "output" ]; then mkdir output @@ -35,15 +34,15 @@ fi nfiles=1 time $python draw_psf.py last=$nfiles || exit -time $bin/galsim draw_psf.yaml output.nfiles=$nfiles || exit +time galsim draw_psf.yaml output.nfiles=$nfiles || exit # These don't have any check, but at least make sure they run to completion. -time $bin/galsim blend.yaml || exit -time $bin/galsim blendset.yaml || exit +time galsim blend.yaml || exit +time galsim blendset.yaml || exit # Using the real galaxies takes a long time, dominated by the pyfits I/O (which preload does # not help with). So use parametric for this test. -time $bin/galsim meds.yaml output.nfiles=$nfiles output.nobjects=1000 gal.items.0.gal_type=parametric || exit +time galsim meds.yaml output.nfiles=$nfiles output.nobjects=1000 gal.items.0.gal_type=parametric || exit echo 'Checking diffs:' diff --git a/examples/check_json b/examples/check_json index 8ceb12e6212..9b696b3e9cc 100755 --- a/examples/check_json +++ b/examples/check_json @@ -18,47 +18,46 @@ # and/or other materials provided with the distribution. # -python=../bin/installed_python # For python scripts -bin=../bin # For galsim executable +python='/usr/bin/env python' # For python scripts /bin/rm -rf output /bin/rm -rf output_json time $python demo1.py || exit -time $bin/galsim -v2 json/demo1.json || exit +time galsim -v2 json/demo1.json || exit time $python demo2.py || exit -time $bin/galsim -v2 json/demo2.json || exit +time galsim -v2 json/demo2.json || exit time $python demo3.py || exit -time $bin/galsim -v2 json/demo3.json || exit +time galsim -v2 json/demo3.json || exit time $python demo4.py || exit -time $bin/galsim -v2 json/demo4.json || exit +time galsim -v2 json/demo4.json || exit time $python demo5.py || exit -time $bin/galsim -v2 json/demo5.json || exit +time galsim -v2 json/demo5.json || exit time $python demo6.py || exit -time $bin/galsim -v2 json/demo6a.json || exit -time $bin/galsim -v2 json/demo6b.json || exit +time galsim -v2 json/demo6a.json || exit +time galsim -v2 json/demo6b.json || exit time $python demo7.py || exit -time $bin/galsim -v2 json/demo7.json || exit +time galsim -v2 json/demo7.json || exit time $python demo8.py || exit -time $bin/galsim -v2 json/demo8a.json || exit -time $bin/galsim -v2 json/demo8b.json || exit +time galsim -v2 json/demo8a.json || exit +time galsim -v2 json/demo8b.json || exit time $python demo9.py || exit -time $bin/galsim -v1 json/demo9.json output.skip='{"type":"List","items":[0,0,0,0,0,1]}' || exit -time $bin/galsim -v1 json/demo9.json output.noclobber=True || exit +time galsim -v1 json/demo9.json output.skip='{"type":"List","items":[0,0,0,0,0,1]}' || exit +time galsim -v1 json/demo9.json output.noclobber=True || exit time $python demo10.py || exit -time $bin/galsim -v2 json/demo10.json || exit +time galsim -v2 json/demo10.json || exit time $python demo11.py || exit -time $bin/galsim -v2 json/demo11.json || exit +time galsim -v2 json/demo11.json || exit echo 'Checking diffs: (No output means success)' diff --git a/examples/check_yaml b/examples/check_yaml index d0234113eb3..9b7ed5630f9 100755 --- a/examples/check_yaml +++ b/examples/check_yaml @@ -18,46 +18,45 @@ # and/or other materials provided with the distribution. # -python=../bin/installed_python # For python scripts -bin=../bin # For galsim executable +python='/usr/bin/env python' /bin/rm -rf output /bin/rm -rf output_yaml time $python demo1.py || exit -time $bin/galsim -v2 demo1.yaml || exit +time galsim -v2 demo1.yaml || exit time $python demo2.py || exit -time $bin/galsim -v2 demo2.yaml || exit +time galsim -v2 demo2.yaml || exit time $python demo3.py || exit -time $bin/galsim -v2 demo3.yaml || exit +time galsim -v2 demo3.yaml || exit time $python demo4.py || exit -time $bin/galsim -v2 demo4.yaml || exit +time galsim -v2 demo4.yaml || exit time $python demo5.py || exit -time $bin/galsim -v2 demo5.yaml || exit +time galsim -v2 demo5.yaml || exit time $python demo6.py || exit -time $bin/galsim -v2 demo6.yaml || exit +time galsim -v2 demo6.yaml || exit time $python demo7.py || exit -time $bin/galsim -v2 demo7.yaml || exit +time galsim -v2 demo7.yaml || exit time $python demo8.py || exit -time $bin/galsim -v2 demo8.yaml || exit +time galsim -v2 demo8.yaml || exit time $python demo9.py || exit -time $bin/galsim -v1 -n 3 -j 1 demo9.yaml || exit -time $bin/galsim -v1 -n 3 -j 2 demo9.yaml || exit -time $bin/galsim -v1 -n 3 -j 3 demo9.yaml || exit +time galsim -v1 -n 3 -j 1 demo9.yaml || exit +time galsim -v1 -n 3 -j 2 demo9.yaml || exit +time galsim -v1 -n 3 -j 3 demo9.yaml || exit time $python demo10.py || exit -time $bin/galsim -v2 demo10.yaml || exit +time galsim -v2 demo10.yaml || exit time $python demo11.py || exit -time $bin/galsim -v2 demo11.yaml || exit +time galsim -v2 demo11.yaml || exit echo 'Checking diffs: (No output means success)' diff --git a/examples/demo1.yaml b/examples/demo1.yaml index 0d6e3bfe8e5..5fa092cbffe 100644 --- a/examples/demo1.yaml +++ b/examples/demo1.yaml @@ -28,15 +28,27 @@ # writing the corresponding python code. # # The executable that reads these YAML files is called galsim, which should be installed -# in your PREFIX/bin directory (after doing `scons install`, that is, where PREFIX is either -# /usr/local or whatever other PREFIX you set when running scons). So to run this config -# file, you should be able to type simply: +# by either `pip install galsim` or `python setup.py install`. If you used the latter, +# the output should end with a line something along the lines of: # -# galsim demo1.yaml +# scripts installed into /usr/local/bin +# +# telling you which directory they were installed in. If that directory is not in your +# path, then there should also be a message telling you to add it to your $PATH +# environment variable. If you used `pip install galsim --user`, then it was probably +# installed into a directory called .local/bin in your home directory. You can have +# pip tell you where it is installing things by adding the `-v` option. +# +# In any case, you can confirm that `galsim` is in your path by typing # -# If you haven't run `scons install` for whatever reason, you can instead write: +# which galsim # -# ../bin/galsim demo1.yaml +# which should show you which executable will be used. (If nothing shows up, then `galsim` +# is not in your path.) +# +# Then to run this config file, you should be able to type simply: +# +# galsim demo1.yaml # # If you don't have PyYAML installed, you can use JSON files instead. The directory json has # JSON configuration files that are exactly equivalent to these YAML files. The YAML format diff --git a/galsim/__init__.py b/galsim/__init__.py index 3a348dca570..89cd1cd3402 100644 --- a/galsim/__init__.py +++ b/galsim/__init__.py @@ -76,13 +76,15 @@ lost profits, business interruption, or indirect special or consequential damages of any kind. """ +import re # The version is stored in _version.py as recommended here: # http://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package # We don't use setup.py, so it's not so important to do it this way, but if we ever switch... # And it does make it a bit easier to get the version number in SCons too. -from ._version import __version__, __version_info__ - +from ._version import __version__ +vi = re.split('\.|-',__version__) +__version_info__ = tuple([int(x) for x in vi if x.isdigit()]) # Define the current code version, in addition to the hidden attribute, to be consistent with # previous GalSim versions that indicated the version number in this way. @@ -181,4 +183,5 @@ from . import cdmodel from . import utilities from . import fft +from . import download_cosmos from . import zernike diff --git a/bin/galsim_yaml.py b/galsim/__main__.py similarity index 66% rename from bin/galsim_yaml.py rename to galsim/__main__.py index 81087b6527d..2de141f6052 100644 --- a/bin/galsim_yaml.py +++ b/galsim/__main__.py @@ -16,16 +16,5 @@ # and/or other materials provided with the distribution. # - -# For backwards compatibility. -# `galsim_yaml` is equivalent to `galsim -f yaml`, although in most cases, -# the `-f yaml` part is unnecessary. - -from __future__ import print_function - -import sys -import subprocess -print('Note: galsim_yaml has been deprecated. Use galsim instead.') -print('Running galsim -f yaml',' '.join(sys.argv[1:])) -print() -subprocess.call( ['galsim','-f','yaml'] + sys.argv[1:] ) +from .main import main +main() diff --git a/galsim/_version.py b/galsim/_version.py index 36db5392267..b2ea87f5c1d 100644 --- a/galsim/_version.py +++ b/galsim/_version.py @@ -15,5 +15,4 @@ # this list of conditions, and the disclaimer given in the documentation # and/or other materials provided with the distribution. # -__version__ = '2.0' -__version_info__ = tuple(map(int, __version__.split('.'))) +__version__ = '2.0.0-alpha' diff --git a/galsim/download_cosmos.py b/galsim/download_cosmos.py new file mode 100644 index 00000000000..322c740b13b --- /dev/null +++ b/galsim/download_cosmos.py @@ -0,0 +1,455 @@ +# Copyright (c) 2012-2017 by the GalSim developers team on GitHub +# https://github.com/GalSim-developers +# +# This file is part of GalSim: The modular galaxy image simulation toolkit. +# https://github.com/GalSim-developers/GalSim +# +# GalSim is free software: redistribution and use in source and binary forms, +# with or without modification, are permitted provided that the following +# conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions, and the disclaimer given in the accompanying LICENSE +# file. +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions, and the disclaimer given in the documentation +# and/or other materials provided with the distribution. +# +""" +A program to download the COSMOS RealGalaxy catalog for use with GalSim. +""" + +from __future__ import print_function +from builtins import input + +import os, sys, tarfile, subprocess, shutil, json +try: + from urllib2 import urlopen +except: + from urllib.request import urlopen + +script_name = 'galsim_download_cosmos' + +def parse_args(): + """Handle the command line arguments using either argparse (if available) or optparse. + """ + + # Another potential option we might want to add is to download the smaller training sample + # rather than the full 4 GB file. Right now, this just downloads the larger catalog. + + # Short description strings common to both parsing mechanisms + description = "This program will download the COSMOS RealGalaxy catalog and images\n" + description += "and place them in the GalSim share directory so they can be used as\n " + description += "the default files for the RealGalaxyCatalog class.\n" + description += "See https://github.com/GalSim-developers/GalSim/wiki/RealGalaxy%20Data\n" + description += "for more details about the files being downloaded." + epilog = "Note: The unpacked files total almost 6 GB in size!\n" + + try: + import argparse + + # Build the parser and add arguments + parser = argparse.ArgumentParser(description=description, epilog=epilog, add_help=True) + parser.add_argument( + '-v', '--verbosity', type=int, action='store', default=2, choices=(0, 1, 2, 3), + help='Integer verbosity level: min=0, max=3 [default=2]') + parser.add_argument( + '-f', '--force', action='store_const', default=False, const=True, + help='Force overwriting the current file if one exists') + parser.add_argument( + '-q', '--quiet', action='store_const', default=False, const=True, + help="Don't ask about re-downloading an existing file. (implied by verbosity=0)") + parser.add_argument( + '-u', '--unpack', action='store_const', default=False, const=True, + help='Re-unpack the tar file if not downloading') + parser.add_argument( + '--save', action='store_const', default=False, const=True, + help="Save the tarball after unpacking.") + parser.add_argument( + '-d', '--dir', action='store', default=None, + help="Install into an alternate directory and link from the share/galsim directory") + parser.add_argument( + '-s', '--sample', action='store', default='25.2', choices=('23.5', '25.2'), + help="Flux limit for sample to download; either 23.5 or 25.2") + parser.add_argument( + '--nolink', action='store_const', default=False, const=True, + help="Don't link to the alternate directory from share/galsim") + args = parser.parse_args() + + except ImportError: + # Use optparse instead + import optparse + + # Usage string not automatically generated for optparse, so generate it + usage = "usage: %s [-h] [-v {0,1,2,3}] [-f] [-q] [-u] [-s] [-d] [--nolink]"%script_name + # Build the parser + parser = optparse.OptionParser(usage=usage, description=description, epilog=epilog) + # optparse only allows string choices, so take verbosity as a string and make it int later + parser.add_option( + '-v', '--verbosity', type="choice", action='store', choices=('0', '1', '2', '3'), + default='2', help='Integer verbosity level: min=0, max=3 [default=2]') + parser.add_option( + '-f', '--force', action='store_const', default=False, const=True, + help='Force overwriting the current file if one exists') + parser.add_option( + '-q', '--quiet', action='store_const', default=False, const=True, + help="Don't ask about re-downloading an existing file. (implied by verbosity=0)") + parser.add_option( + '-u', '--unpack', action='store_const', default=False, const=True, + help='Re-unpack the tar file if not downloading') + parser.add_option( + '--save', action='store_const', default=False, const=True, + help="Save the tarball after unpacking.") + parser.add_option( + '-d', '--dir', action='store', default=None, + help="Install into an alternate directory and link from the share/galsim directory") + parser.add_option( + '-s', '--sample', type="choice", action='store', choices=('23.5', '25.2'), + default='25.2', help="Flux limit for sample to download; either 23.5 or 25.2") + parser.add_option( + '--nolink', action='store_const', default=False, const=True, + help="Don't link to the alternate directory from share/galsim") + (args, posargs) = parser.parse_args() + + # Remembering to convert to an integer type + args.verbosity = int(args.verbosity) + + if args.verbosity == 0: + args.quiet = True + + # Return the args + return args + +# Based on recipe 577058: http://code.activestate.com/recipes/577058/ +def query_yes_no(question, default="yes"): + """Ask a yes/no question via input() and return their answer. + + "question" is a string that is presented to the user. + "default" is the presumed answer if the user just hits . + It must be "yes" (the default), "no" or None (meaning + an answer is required of the user). + + The "answer" return value is one of "yes" or "no". + """ + valid = {"yes":"yes", "y":"yes", "ye":"yes", + "no":"no", "n":"no"} + if default == None: + prompt = " [y/n] " + elif default == "yes": + prompt = " [Y/n] " + elif default == "no": + prompt = " [y/N] " + else: + raise ValueError("invalid default answer: '%s'" % default) + + while 1: + sys.stdout.write(question + prompt) + choice = input().lower() + if default is not None and choice == '': + return default + elif choice in valid.keys(): + return valid[choice] + else: + sys.stdout.write("Please respond with 'yes' or 'no' "\ + "(or 'y' or 'n').\n") + +def ensure_dir(target): + d = os.path.dirname(target) + if not os.path.exists(d): + os.makedirs(d) + +def download(url, target, unpack_dir, args, logger): + logger.warning('Downloading from url:\n %s',url) + logger.warning('Target location is:\n %s',target) + logger.info('') + + # See how large the file to be downloaded is. + u = urlopen(url) + meta = u.info() + logger.debug("Meta information about url:\n%s",str(meta)) + file_size = int(meta.get("Content-Length")) + file_name = os.path.basename(url) + logger.info("Size of %s: %d MBytes" , file_name, file_size/1024**2) + + # Make sure the directory we want to put this file exists. + ensure_dir(target) + + # Check if the file already exists and if it is the right size + do_download = True + if os.path.isfile(target): + logger.info("") + existing_file_size = os.path.getsize(target) + if args.force: + logger.info("Target file already exists. Size = %d MBytes. Forced re-download.", + existing_file_size/1024**2) + elif file_size == existing_file_size: + if args.quiet: + logger.info("Target file already exists. Not re-downloading.") + do_download = False + else: + q = "Target file already exists. Overwrite?" + yn = query_yes_no(q, default='no') + if yn == 'no': + do_download = False + else: + logger.warning("Target file already exists, but it seems to be either incomplete, " + "corrupt, or obsolete") + if args.quiet: + logger.info("Size of existing file = %d MBytes. Re-downloading.", + existing_file_size/1024**2) + else: + q = "Size of existing file = %d MBytes. Re-download?"%(existing_file_size/1024**2) + yn = query_yes_no(q, default='yes') + if yn == 'no': + do_download = False + elif unpack_dir is not None and os.path.isdir(unpack_dir): + logger.info("") + + # Check that this is the current version. + meta_file = os.path.join(unpack_dir, 'meta.json') + if os.path.isfile(meta_file): + with open(meta_file) as fp: + saved_meta_dict = json.load(fp) + # Get rid of the unicode + saved_meta_dict = dict([ (str(k),str(v)) for k,v in saved_meta_dict.items()]) + logger.debug("current meta information is %s",saved_meta_dict) + meta_dict = dict(meta) + logger.debug("url's meta information is %s",meta_dict) + obsolete = False + for k in meta_dict: + if k == 'date': + continue # This one isn't expected to match. + elif k not in saved_meta_dict: + logger.debug("key %s is missing in saved meta information",k) + obsolete = True + elif meta_dict[k] != saved_meta_dict[k]: + logger.debug("key %s differs: %s != %s",k,meta_dict[k],saved_meta_dict[k]) + obsolete = True + else: + logger.debug("key %s matches",k) + else: + obsolete = True + + if obsolete: + if args.quiet or args.force: + logger.warning("The version currently on disk is obsolete. "+ + "Downloading new version.") + else: + q = "The version currently on disk is obsolete. Download new version?" + yn = query_yes_no(q, default='yes') + if yn == 'no': + do_download = False + elif args.force: + logger.info("Target file has already been downloaded and unpacked. "+ + "Forced re-download.") + elif args.quiet: + logger.info("Target file has already been downloaded and unpacked. "+ + "Not re-downloading.") + do_download = False + args.save = True # Don't delete it! + else: + q = "Target file has already been downloaded and unpacked. Re-download?" + yn = query_yes_no(q, default='no') + if yn == 'no': + do_download = False + args.save = True + + # The next bit is based on one of the answers here: (by PabloG) + # http://stackoverflow.com/questions/22676/how-do-i-download-a-file-over-http-using-python + # The progress feature in that answer is important here, since downloading such a large file + # will take a while. + if do_download: + logger.info("") + try: + with open(target, 'wb') as f: + file_size_dl = 0 + block_sz = 32 * 1024 + next_dot = file_size/100. # For verbosity==1, the next size for writing a dot. + while True: + buffer = u.read(block_sz) + if not buffer: + break + + file_size_dl += len(buffer) + f.write(buffer) + + # Status bar + if args.verbosity >= 2: + status = r"Downloading: %5d / %d MBytes [%3.2f%%]" % ( + file_size_dl/1024**2, file_size/1024**2, file_size_dl*100./file_size) + status = status + '\b'*len(status) + sys.stdout.write(status) + sys.stdout.flush() + elif args.verbosity >= 1 and file_size_dl > next_dot: + sys.stdout.write('.') + sys.stdout.flush() + next_dot += file_size/100. + logger.info("Download complete.") + except IOError as e: + # Try to give a reasonable suggestion for some common IOErrors. + logger.error("\n\nIOError: %s",str(e)) + if 'Permission denied' in str(e): + logger.error("Rerun using sudo %s",script_name) + logger.error("If this is not possible, you can download to an alternate location:") + logger.error(" %s -d dir_name --nolink\n",script_name) + elif 'Disk quota' in str(e) or 'No space' in str(e): + logger.error("You might need to download this in an alternate location and link:") + logger.error(" %s -d dir_name\n",script_name) + raise + + return do_download, target, meta + +def unpack(target, target_dir, unpack_dir, meta, args, logger): + logger.info("Unpacking the tarball...") + #with tarfile.open(target) as tar: + # The above line works on python 2.7+. But to make sure we work for 2.6, we use the + # following workaround. + # cf. http://stackoverflow.com/questions/6086603/statement-with-and-tarfile + from contextlib import closing + with closing(tarfile.open(target)) as tar: + if args.verbosity >= 3: + tar.list(verbose=True) + elif args.verbosity >= 2: + tar.list(verbose=False) + tar.extractall(target_dir) + + # Write the meta information to a file, meta.json to mark what version this all is. + meta_file = os.path.join(unpack_dir, 'meta.json') + with open(meta_file,'w') as fp: + json.dump(dict(meta), fp) + + logger.info("Extracted contents of tar file.") + +def unzip(target, args, logger): + logger.info("Unzipping file") + subprocess.call(["gunzip", target]) + logger.info("Done") + +def link_target(unpack_dir, link_dir, args, logger): + logger.debug("Linking to %s from %s", unpack_dir, link_dir) + if os.path.exists(link_dir): + if os.path.islink(link_dir): + # If it exists and is a link, we just remove it and relink without any fanfare. + logger.debug("Removing existing link") + os.remove(link_dir) + else: + # If it is not a link, we need to figure out what to do with it. + if os.path.isdir(link_dir): + # If it's a directory, probably want to keep it. + logger.warning("%s already exists and is a directory.",link_dir) + if args.force: + logger.warning("Removing the existing files to make the link.") + elif args.quiet: + logger.warning("Link cannot be made. (Use -f to force removal of existing dir.)") + return + else: + q = "Remove the existing files to make the link?" + yn = query_yes_no(q, default='no') + if yn == 'no': + return + shutil.rmtree(link_dir) + else: + # If it's not a directory, it's probably corrupt, so the default is to remove it. + logger.warning("%s already exists, but strangely isn't a directory.",link_dir) + if args.force or args.quiet: + logger.warning("Removing the existing file.") + else: + q = "Remove the existing file?" + yn = query_yes_no(q, default='yes') + if yn == 'no': + return + os.path.remove(link_dir) + os.symlink(unpack_dir, link_dir) + logger.info("Made link to %s from %s", unpack_dir, link_dir) + +def main(): + from ._version import __version__ as version + from .meta_data import share_dir + + args = parse_args() + + # Parse the integer verbosity level from the command line args into a logging_level string + import logging + logging_levels = { 0: logging.CRITICAL, + 1: logging.WARNING, + 2: logging.INFO, + 3: logging.DEBUG } + logging_level = logging_levels[args.verbosity] + + # Setup logging to go to sys.stdout or (if requested) to an output file + logging.basicConfig(format="%(message)s", level=logging_level, stream=sys.stdout) + logger = logging.getLogger('galsim') + + # Give diagnostic about GalSim version + logger.debug("GalSim version: %s",version) + logger.debug("This download script is: %s",__file__) + logger.info("Type %s -h to see command line options.\n",script_name) + + # Some definitions: + # share_dir is the base galsim share directory, e.g. /usr/local/share/galsim/ + # target_dir is where we will put the downloaded file, usually == share_dir. + # unpack_dir is the directory that the tarball will unpack into. + # url is the url from which we will download the tarball. + # file_name is the name of the file to download, taken from the url. + # target is the full path of the downloaded tarball + + if args.dir is not None: + target_dir = args.dir + link = not args.nolink + else: + target_dir = share_dir + link = False + + url = "http://great3.jb.man.ac.uk/leaderboard/data/public/COSMOS_%s_training_sample.tar.gz"%( + args.sample) + file_name = os.path.basename(url) + target = os.path.join(target_dir, file_name) + unpack_dir = target[:-len('.tar.gz')] + + # Download the tarball + new_download, target, meta = download(url, target, unpack_dir, args, logger) + + # Usually we unpack if we downloaded the tarball or if specified by the command line option. + do_unpack = new_download or args.unpack + + # If the unpack dir is missing, then need to unpack + if not os.path.exists(unpack_dir): + do_unpack = True + + # But of course if there is no tarball, we can't unpack it + if not os.path.isfile(target): + do_unpack = False + + # If we have a downloaded tar file, ask if it should be re-unpacked. + if not do_unpack and not args.quiet and os.path.isfile(target): + logger.info("") + q = "Tar file is already unpacked. Re-unpack?" + yn = query_yes_no(q, default='no') + if yn == 'yes': + do_unpack=True + + # Unpack the tarball + if do_unpack: + unpack(target, target_dir, unpack_dir, meta, args, logger) + + # Usually, we remove the tarball if we unpacked it and command line doesn't specify to save it. + do_remove = do_unpack and not args.save + + # But if we didn't unpack it, and they didn't say to save it, ask if we should remove it. + if os.path.isfile(target) and not do_remove and not args.save and not args.quiet: + logger.info("") + q = "Remove the tarball?" + yn = query_yes_no(q, default='no') + if yn == 'yes': + do_remove = True + + # Remove the tarball + if do_remove: + logger.info("Removing the tarball to save space") + os.remove(target) + + # If we are downloading to an alternate directory, we (usually) link to it from share/galsim + if link: + # Get the directory where this would normally have been unpacked. + link_dir = os.path.join(share_dir, file_name)[:-len('.tar.gz')] + link_target(unpack_dir, link_dir, args, logger) diff --git a/galsim/main.py b/galsim/main.py new file mode 100644 index 00000000000..209f6a34070 --- /dev/null +++ b/galsim/main.py @@ -0,0 +1,263 @@ +# Copyright (c) 2012-2017 by the GalSim developers team on GitHub +# https://github.com/GalSim-developers +# +# This file is part of GalSim: The modular galaxy image simulation toolkit. +# https://github.com/GalSim-developers/GalSim +# +# GalSim is free software: redistribution and use in source and binary forms, +# with or without modification, are permitted provided that the following +# conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions, and the disclaimer given in the accompanying LICENSE +# file. +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions, and the disclaimer given in the documentation +# and/or other materials provided with the distribution. +# +""" +The main driver program for making images of galaxies whose parameters are specified +in a configuration file. +""" + +from __future__ import print_function + +import sys +import os +import logging +import pprint + +def parse_args(): + """Handle the command line arguments using either argparse (if available) or optparse. + """ + from ._version import __version__ as version + + # Short description strings common to both parsing mechanisms + version_str = "GalSim Version %s"%version + description = "galsim: configuration file parser for %s. "%version_str + description += "See https://github.com/GalSim-developers/GalSim/wiki/Config-Documentation " + description += "for documentation about using this program." + epilog = "Works with both YAML and JSON markup formats." + + try: + import argparse + + # Build the parser and add arguments + parser = argparse.ArgumentParser(description=description, add_help=True, epilog=epilog) + parser.add_argument('config_file', type=str, nargs='?', help='the configuration file') + parser.add_argument( + 'variables', type=str, nargs='*', + help='additional variables or modifications to variables in the config file. ' + + 'e.g. galsim foo.yaml output.nproc=-1 gal.rotate="{type : Random}"') + parser.add_argument( + '-v', '--verbosity', type=int, action='store', default=1, choices=(0, 1, 2, 3), + help='integer verbosity level: min=0, max=3 [default=1]') + parser.add_argument( + '-l', '--log_file', type=str, action='store', default=None, + help='filename for storing logging output [default is to stream to stdout]') + parser.add_argument( + '-f', '--file_type', type=str, action='store', choices=('yaml','json'), + default=None, + help=('type of config_file: yaml or json are currently supported. ' + + '[default is to automatically determine the type from the extension]')) + parser.add_argument( + '-m', '--module', type=str, action='append', default=None, + help='python module to import before parsing config file') + parser.add_argument( + '-p', '--profile', action='store_const', default=False, const=True, + help='output profiling information at the end of the run') + parser.add_argument( + '-n', '--njobs', type=int, action='store', default=1, + help='set the total number of jobs that this run is a part of. ' + + 'Used in conjunction with -j (--job)') + parser.add_argument( + '-j', '--job', type=int, action='store', default=1, + help='set the job number for this particular run. Must be in [1,njobs]. ' + + 'Used in conjunction with -n (--njobs)') + parser.add_argument( + '-x', '--except_abort', action='store_const', default=False, const=True, + help='abort the whole job whenever any file raises an exception rather than ' + + 'continuing on') + parser.add_argument( + '--version', action='store_const', default=False, const=True, + help='show the version of GalSim') + args = parser.parse_args() + + if args.config_file == None: + if args.version: + print(version_str) + else: + parser.print_help() + sys.exit() + elif args.version: + print(version_str) + + except ImportError: + # Use optparse instead + import optparse + + # Usage string not automatically generated for optparse, so generate it + usage = """usage: galsim [-h] [-v {0,1,2,3}] [-l LOG_FILE] [-f {yaml,json}] [-m MODULE] + [--version] config_file [variables ...]""" + # Build the parser + parser = optparse.OptionParser(usage=usage, epilog=epilog, description=description) + # optparse only allows string choices, so take verbosity as a string and make it int later + parser.add_option( + '-v', '--verbosity', type="choice", action='store', choices=('0', '1', '2', '3'), + default='1', help='integer verbosity level: min=0, max=3 [default=1]') + parser.add_option( + '-l', '--log_file', type=str, action='store', default=None, + help='filename for storing logging output [default is to stream to stdout]') + parser.add_option( + '-f', '--file_type', type="choice", action='store', choices=('yaml','json'), + default=None, + help=('type of config_file: yaml or json are currently supported. ' + + '[default is to automatically determine the type from the extension]')) + parser.add_option( + '-m', '--module', type=str, action='append', default=None, + help='python module to import before parsing config file') + parser.add_option( + '-p', '--profile', action='store_const', default=False, const=True, + help='output profiling information at the end of the run') + parser.add_option( + '-n', '--njobs', type=int, action='store', default=1, + help='set the total number of jobs that this run is a part of. ' + + 'Used in conjunction with -j (--job)') + parser.add_option( + '-j', '--job', type=int, action='store', default=1, + help='set the job number for this particular run. Must be in [1,njobs]. ' + + 'Used in conjunction with -n (--njobs)') + parser.add_option( + '-x', '--except_abort', action='store_const', default=False, const=True, + help='abort the whole job whenever any file raises an exception rather than ' + + 'just reporting the exception and continuing on') + parser.add_option( + '--version', action='store_const', default=False, const=True, + help='show the version of GalSim') + (args, posargs) = parser.parse_args() + + # Remembering to convert to an integer type + args.verbosity = int(args.verbosity) + + # Store the positional arguments in the args object as well: + if len(posargs) == 0: + if args.version: + print(version_str) + else: + parser.print_help() + sys.exit() + else: + args.config_file = posargs[0] + args.variables = posargs[1:] + if args.version: + print(version_str) + + # Return the args + return args + +def ParseVariables(variables, logger): + new_params = {} + for v in variables: + logger.debug('Parsing additional variable: %s',v) + if '=' not in v: + raise ValueError('Improper variable specification. Use field.item=value.') + key, value = v.split('=',1) + # Try to evaluate the value string to allow people to input things like + # gal.rotate='{type : Rotate}' + # But if it fails (particularly with json), just assign the value as a string. + try: + try: + import yaml + value = yaml.load(value) + except ImportError: + # Don't require yaml. json usually works for these. + import json + value = json.loads(value) + except: + logger.debug('Unable to parse %s. Treating it as a string.'%value) + new_params[key] = value + + return new_params + + +def AddModules(config, modules): + if modules: + if 'modules' not in config: + config['modules'] = modules + else: + config['modules'].extend(modules) + +def main(): + from .config import ReadConfig, Process + + args = parse_args() + + if args.njobs < 1: + raise ValueError("Invalid number of jobs %d"%args.njobs) + if args.job < 1: + raise ValueError("Invalid job number %d. Must be >= 1"%args.job) + if args.job > args.njobs: + raise ValueError("Invalid job number %d. Must be <= njobs (%d)"%(args.job,args.njobs)) + + # Parse the integer verbosity level from the command line args into a logging_level string + logging_levels = { 0: logging.CRITICAL, + 1: logging.WARNING, + 2: logging.INFO, + 3: logging.DEBUG } + logging_level = logging_levels[args.verbosity] + + # If requested, load the profiler + if args.profile: + import cProfile, pstats, io + pr = cProfile.Profile() + pr.enable() + + # Setup logging to go to sys.stdout or (if requested) to an output file + if args.log_file is None: + logging.basicConfig(format="%(message)s", level=logging_level, stream=sys.stdout) + else: + logging.basicConfig(format="%(message)s", level=logging_level, filename=args.log_file) + logger = logging.getLogger('galsim') + + logger.warn('Using config file %s', args.config_file) + all_config = ReadConfig(args.config_file, args.file_type, logger) + logger.debug('Successfully read in config file.') + + # Process each config document + for config in all_config: + + if 'root' not in config: + config['root'] = os.path.splitext(args.config_file)[0] + + # Parse the command-line variables: + new_params = ParseVariables(args.variables, logger) + + # Add modules to the config['modules'] list + AddModules(config, args.module) + + # Profiling doesn't work well with multiple processes. We'll need to separately + # enable profiling withing the workers and output when the process ends. Set + # config['profile'] = True to enable this. + if args.profile: + config['profile'] = True + + logger.debug("Process config dict: \n%s", pprint.pformat(config)) + + # Process the configuration + Process(config, logger, njobs=args.njobs, job=args.job, new_params=new_params, + except_abort=args.except_abort) + + if args.profile: + # cf. example code here: https://docs.python.org/2/library/profile.html + pr.disable() + try: + from StringIO import StringIO + except ImportError: + from io import StringIO + s = StringIO() + sortby = 'time' # Note: This is now called tottime, but time seems to be a valid + # alias for this that is backwards compatible to older versions + # of pstats. + ps = pstats.Stats(pr, stream=s).sort_stats(sortby).reverse_order() + ps.print_stats() + logger.error(s.getvalue()) diff --git a/galsim/random.py b/galsim/random.py index cd25c6d4b9f..937fe78a1c9 100644 --- a/galsim/random.py +++ b/galsim/random.py @@ -164,7 +164,7 @@ def __copy__(self): def __getstate__(self): d = self.__dict__.copy() - d['rng_str'] = self._rng.serialize() + d['rng_str'] = self.serialize() d.pop('_rng') return d @@ -190,7 +190,7 @@ def raw(self): of random deviate for this class, just return the raw integer value that would have been used to generate this value. """ - return self._rng.raw() + return int(self._rng.raw()) def generate(self, array): """Generate many pseudo-random values, filling in the values of a numpy array. @@ -224,7 +224,7 @@ def __ne__(self, other): __hash__ = None def serialize(self): - return self._rng.serialize() + return str(self._rng.serialize()) def _seed_repr(self): s = self.serialize().split(' ') @@ -809,7 +809,7 @@ def __str__(self): def __eq__(self, other): if repr(self) != repr(other): return False - return (self._rng.serialize() == other._rng.serialize() and + return (self.serialize() == other.serialize() and self._function == other._function and self._xmin == other._xmin and self._xmax == other._xmax and @@ -819,7 +819,7 @@ def __eq__(self, other): # Functions aren't picklable, so for pickling, we reinitialize the DistDeviate using the # original function parameter, which may be a string or a file name. def __getinitargs__(self): - return (self._rng.serialize(), self._function, self._xmin, self._xmax, + return (self.serialize(), self._function, self._xmin, self._xmax, self._interpolant, self._npoints) diff --git a/galsim/share b/galsim/share new file mode 120000 index 00000000000..f2d0b20356b --- /dev/null +++ b/galsim/share @@ -0,0 +1 @@ +../share \ No newline at end of file diff --git a/galsim/vonkarman.py b/galsim/vonkarman.py index 795baa30674..5073e75eade 100644 --- a/galsim/vonkarman.py +++ b/galsim/vonkarman.py @@ -118,16 +118,16 @@ def __init__(self, lam, r0, L0=25.0, flux=1, scale_unit=arcsec, def _sbvk(self): sbvk = _galsim.SBVonKarman(self._lam, self._r0, self._L0, self._flux, self._scale, self._do_delta, self._gsparams._gsp) - self._delta_amp = sbvk.getDeltaAmplitude() + self._delta = sbvk.getDelta() if not self._suppress: - if self._delta_amp > self._gsparams.maxk_threshold: + if self._delta > self._gsparams.maxk_threshold: import warnings warnings.warn("VonKarman delta-function component is larger than maxk_threshold. " "Please see docstring for information about this component and how " "to toggle it.") if self._do_delta: sbvk = _galsim.SBVonKarman(self._lam, self._r0, self._L0, - self._flux-self._delta_amp, self._scale, + self._flux-self._delta, self._scale, self._do_delta, self._gsparams._gsp) return sbvk @@ -136,7 +136,7 @@ def _sbp(self): # Add in a delta function with appropriate amplitude if requested. if self._do_delta: sbvk = self._sbvk - sbdelta = _galsim.SBDeltaFunction(self._delta_amp, self._gsparams._gsp) + sbdelta = _galsim.SBDeltaFunction(self._delta, self._gsparams._gsp) return _galsim.SBAdd([sbvk, sbdelta], self._gsparams._gsp) else: return self._sbvk @@ -167,8 +167,8 @@ def _is_analytic_x(self): @property def delta_amplitude(self): - self._sbvk # This is where _delta_amp is calculated. - return self._delta_amp + self._sbvk # This is where _delta is calculated. + return self._delta @property def half_light_radius(self): diff --git a/include/fftw3/fftw3.h b/include/fftw3/fftw3.h new file mode 100644 index 00000000000..6637303faca --- /dev/null +++ b/include/fftw3/fftw3.h @@ -0,0 +1,381 @@ +/* + * Copyright (c) 2003, 2007-8 Matteo Frigo + * Copyright (c) 2003, 2007-8 Massachusetts Institute of Technology + * + * The following statement of license applies *only* to this header file, + * and *not* to the other files distributed with FFTW or derived therefrom: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/***************************** NOTE TO USERS ********************************* + * + * THIS IS A HEADER FILE, NOT A MANUAL + * + * If you want to know how to use FFTW, please read the manual, + * online at http://www.fftw.org/doc/ and also included with FFTW. + * For a quick start, see the manual's tutorial section. + * + * (Reading header files to learn how to use a library is a habit + * stemming from code lacking a proper manual. Arguably, it's a + * *bad* habit in most cases, because header files can contain + * interfaces that are not part of the public, stable API.) + * + ****************************************************************************/ + +#ifndef FFTW3_H +#define FFTW3_H + +#include + +#ifdef __cplusplus +extern "C" +{ +#endif /* __cplusplus */ + +/* If is included, use the C99 complex type. Otherwise + define a type bit-compatible with C99 complex */ +#if !defined(FFTW_NO_Complex) && defined(_Complex_I) && defined(complex) && defined(I) +# define FFTW_DEFINE_COMPLEX(R, C) typedef R _Complex C +#else +# define FFTW_DEFINE_COMPLEX(R, C) typedef R C[2] +#endif + +#define FFTW_CONCAT(prefix, name) prefix ## name +#define FFTW_MANGLE_DOUBLE(name) FFTW_CONCAT(fftw_, name) +#define FFTW_MANGLE_FLOAT(name) FFTW_CONCAT(fftwf_, name) +#define FFTW_MANGLE_LONG_DOUBLE(name) FFTW_CONCAT(fftwl_, name) + +/* IMPORTANT: for Windows compilers, you should add a line + #define FFTW_DLL + here and in kernel/ifftw.h if you are compiling/using FFTW as a + DLL, in order to do the proper importing/exporting, or + alternatively compile with -DFFTW_DLL or the equivalent + command-line flag. This is not necessary under MinGW/Cygwin, where + libtool does the imports/exports automatically. */ +#if defined(FFTW_DLL) && (defined(_WIN32) || defined(__WIN32__)) + /* annoying Windows syntax for shared-library declarations */ +# if defined(COMPILING_FFTW) /* defined in api.h when compiling FFTW */ +# define FFTW_EXTERN extern __declspec(dllexport) +# else /* user is calling FFTW; import symbol */ +# define FFTW_EXTERN extern __declspec(dllimport) +# endif +#else +# define FFTW_EXTERN extern +#endif + +enum fftw_r2r_kind_do_not_use_me { + FFTW_R2HC=0, FFTW_HC2R=1, FFTW_DHT=2, + FFTW_REDFT00=3, FFTW_REDFT01=4, FFTW_REDFT10=5, FFTW_REDFT11=6, + FFTW_RODFT00=7, FFTW_RODFT01=8, FFTW_RODFT10=9, FFTW_RODFT11=10 +}; + +struct fftw_iodim_do_not_use_me { + int n; /* dimension size */ + int is; /* input stride */ + int os; /* output stride */ +}; + +#include /* for ptrdiff_t */ +struct fftw_iodim64_do_not_use_me { + ptrdiff_t n; /* dimension size */ + ptrdiff_t is; /* input stride */ + ptrdiff_t os; /* output stride */ +}; + +/* + huge second-order macro that defines prototypes for all API + functions. We expand this macro for each supported precision + + X: name-mangling macro + R: real data type + C: complex data type +*/ + +#define FFTW_DEFINE_API(X, R, C) \ + \ +FFTW_DEFINE_COMPLEX(R, C); \ + \ +typedef struct X(plan_s) *X(plan); \ + \ +typedef struct fftw_iodim_do_not_use_me X(iodim); \ +typedef struct fftw_iodim64_do_not_use_me X(iodim64); \ + \ +typedef enum fftw_r2r_kind_do_not_use_me X(r2r_kind); \ + \ +FFTW_EXTERN void X(execute)(const X(plan) p); \ + \ +FFTW_EXTERN X(plan) X(plan_dft)(int rank, const int *n, \ + C *in, C *out, int sign, unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_dft_1d)(int n, C *in, C *out, int sign, \ + unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_dft_2d)(int n0, int n1, \ + C *in, C *out, int sign, unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_dft_3d)(int n0, int n1, int n2, \ + C *in, C *out, int sign, unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_many_dft)(int rank, const int *n, \ + int howmany, \ + C *in, const int *inembed, \ + int istride, int idist, \ + C *out, const int *onembed, \ + int ostride, int odist, \ + int sign, unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_guru_dft)(int rank, const X(iodim) *dims, \ + int howmany_rank, \ + const X(iodim) *howmany_dims, \ + C *in, C *out, \ + int sign, unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_guru_split_dft)(int rank, const X(iodim) *dims, \ + int howmany_rank, \ + const X(iodim) *howmany_dims, \ + R *ri, R *ii, R *ro, R *io, \ + unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_guru64_dft)(int rank, \ + const X(iodim64) *dims, \ + int howmany_rank, \ + const X(iodim64) *howmany_dims, \ + C *in, C *out, \ + int sign, unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_guru64_split_dft)(int rank, \ + const X(iodim64) *dims, \ + int howmany_rank, \ + const X(iodim64) *howmany_dims, \ + R *ri, R *ii, R *ro, R *io, \ + unsigned flags); \ + \ +FFTW_EXTERN void X(execute_dft)(const X(plan) p, C *in, C *out); \ +FFTW_EXTERN void X(execute_split_dft)(const X(plan) p, R *ri, R *ii, \ + R *ro, R *io); \ + \ +FFTW_EXTERN X(plan) X(plan_many_dft_r2c)(int rank, const int *n, \ + int howmany, \ + R *in, const int *inembed, \ + int istride, int idist, \ + C *out, const int *onembed, \ + int ostride, int odist, \ + unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_dft_r2c)(int rank, const int *n, \ + R *in, C *out, unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_dft_r2c_1d)(int n,R *in,C *out,unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_dft_r2c_2d)(int n0, int n1, \ + R *in, C *out, unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_dft_r2c_3d)(int n0, int n1, \ + int n2, \ + R *in, C *out, unsigned flags); \ + \ + \ +FFTW_EXTERN X(plan) X(plan_many_dft_c2r)(int rank, const int *n, \ + int howmany, \ + C *in, const int *inembed, \ + int istride, int idist, \ + R *out, const int *onembed, \ + int ostride, int odist, \ + unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_dft_c2r)(int rank, const int *n, \ + C *in, R *out, unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_dft_c2r_1d)(int n,C *in,R *out,unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_dft_c2r_2d)(int n0, int n1, \ + C *in, R *out, unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_dft_c2r_3d)(int n0, int n1, \ + int n2, \ + C *in, R *out, unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_guru_dft_r2c)(int rank, const X(iodim) *dims, \ + int howmany_rank, \ + const X(iodim) *howmany_dims, \ + R *in, C *out, \ + unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_guru_dft_c2r)(int rank, const X(iodim) *dims, \ + int howmany_rank, \ + const X(iodim) *howmany_dims, \ + C *in, R *out, \ + unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_guru_split_dft_r2c)( \ + int rank, const X(iodim) *dims, \ + int howmany_rank, \ + const X(iodim) *howmany_dims, \ + R *in, R *ro, R *io, \ + unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_guru_split_dft_c2r)( \ + int rank, const X(iodim) *dims, \ + int howmany_rank, \ + const X(iodim) *howmany_dims, \ + R *ri, R *ii, R *out, \ + unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_guru64_dft_r2c)(int rank, \ + const X(iodim64) *dims, \ + int howmany_rank, \ + const X(iodim64) *howmany_dims, \ + R *in, C *out, \ + unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_guru64_dft_c2r)(int rank, \ + const X(iodim64) *dims, \ + int howmany_rank, \ + const X(iodim64) *howmany_dims, \ + C *in, R *out, \ + unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_guru64_split_dft_r2c)( \ + int rank, const X(iodim64) *dims, \ + int howmany_rank, \ + const X(iodim64) *howmany_dims, \ + R *in, R *ro, R *io, \ + unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_guru64_split_dft_c2r)( \ + int rank, const X(iodim64) *dims, \ + int howmany_rank, \ + const X(iodim64) *howmany_dims, \ + R *ri, R *ii, R *out, \ + unsigned flags); \ + \ +FFTW_EXTERN void X(execute_dft_r2c)(const X(plan) p, R *in, C *out); \ +FFTW_EXTERN void X(execute_dft_c2r)(const X(plan) p, C *in, R *out); \ + \ +FFTW_EXTERN void X(execute_split_dft_r2c)(const X(plan) p, \ + R *in, R *ro, R *io); \ +FFTW_EXTERN void X(execute_split_dft_c2r)(const X(plan) p, \ + R *ri, R *ii, R *out); \ + \ +FFTW_EXTERN X(plan) X(plan_many_r2r)(int rank, const int *n, \ + int howmany, \ + R *in, const int *inembed, \ + int istride, int idist, \ + R *out, const int *onembed, \ + int ostride, int odist, \ + const X(r2r_kind) *kind, unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_r2r)(int rank, const int *n, R *in, R *out, \ + const X(r2r_kind) *kind, unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_r2r_1d)(int n, R *in, R *out, \ + X(r2r_kind) kind, unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_r2r_2d)(int n0, int n1, R *in, R *out, \ + X(r2r_kind) kind0, X(r2r_kind) kind1, \ + unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_r2r_3d)(int n0, int n1, int n2, \ + R *in, R *out, X(r2r_kind) kind0, \ + X(r2r_kind) kind1, X(r2r_kind) kind2, \ + unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_guru_r2r)(int rank, const X(iodim) *dims, \ + int howmany_rank, \ + const X(iodim) *howmany_dims, \ + R *in, R *out, \ + const X(r2r_kind) *kind, unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_guru64_r2r)(int rank, const X(iodim64) *dims, \ + int howmany_rank, \ + const X(iodim64) *howmany_dims, \ + R *in, R *out, \ + const X(r2r_kind) *kind, unsigned flags); \ + \ +FFTW_EXTERN void X(execute_r2r)(const X(plan) p, R *in, R *out); \ + \ +FFTW_EXTERN void X(destroy_plan)(X(plan) p); \ +FFTW_EXTERN void X(forget_wisdom)(void); \ +FFTW_EXTERN void X(cleanup)(void); \ + \ +FFTW_EXTERN void X(set_timelimit)(double); \ + \ +FFTW_EXTERN void X(plan_with_nthreads)(int nthreads); \ +FFTW_EXTERN int X(init_threads)(void); \ +FFTW_EXTERN void X(cleanup_threads)(void); \ + \ +FFTW_EXTERN void X(export_wisdom_to_file)(FILE *output_file); \ +FFTW_EXTERN char *X(export_wisdom_to_string)(void); \ +FFTW_EXTERN void X(export_wisdom)(void (*write_char)(char c, void *), \ + void *data); \ +FFTW_EXTERN int X(import_system_wisdom)(void); \ +FFTW_EXTERN int X(import_wisdom_from_file)(FILE *input_file); \ +FFTW_EXTERN int X(import_wisdom_from_string)(const char *input_string); \ +FFTW_EXTERN int X(import_wisdom)(int (*read_char)(void *), void *data); \ + \ +FFTW_EXTERN void X(fprint_plan)(const X(plan) p, FILE *output_file); \ +FFTW_EXTERN void X(print_plan)(const X(plan) p); \ + \ +FFTW_EXTERN void *X(malloc)(size_t n); \ +FFTW_EXTERN void X(free)(void *p); \ + \ +FFTW_EXTERN void X(flops)(const X(plan) p, \ + double *add, double *mul, double *fmas); \ +FFTW_EXTERN double X(estimate_cost)(const X(plan) p); \ + \ +FFTW_EXTERN const char X(version)[]; \ +FFTW_EXTERN const char X(cc)[]; \ +FFTW_EXTERN const char X(codelet_optim)[]; + + +/* end of FFTW_DEFINE_API macro */ + +FFTW_DEFINE_API(FFTW_MANGLE_DOUBLE, double, fftw_complex) +FFTW_DEFINE_API(FFTW_MANGLE_FLOAT, float, fftwf_complex) +FFTW_DEFINE_API(FFTW_MANGLE_LONG_DOUBLE, long double, fftwl_complex) + +#define FFTW_FORWARD (-1) +#define FFTW_BACKWARD (+1) + +#define FFTW_NO_TIMELIMIT (-1.0) + +/* documented flags */ +#define FFTW_MEASURE (0U) +#define FFTW_DESTROY_INPUT (1U << 0) +#define FFTW_UNALIGNED (1U << 1) +#define FFTW_CONSERVE_MEMORY (1U << 2) +#define FFTW_EXHAUSTIVE (1U << 3) /* NO_EXHAUSTIVE is default */ +#define FFTW_PRESERVE_INPUT (1U << 4) /* cancels FFTW_DESTROY_INPUT */ +#define FFTW_PATIENT (1U << 5) /* IMPATIENT is default */ +#define FFTW_ESTIMATE (1U << 6) + +/* undocumented beyond-guru flags */ +#define FFTW_ESTIMATE_PATIENT (1U << 7) +#define FFTW_BELIEVE_PCOST (1U << 8) +#define FFTW_NO_DFT_R2HC (1U << 9) +#define FFTW_NO_NONTHREADED (1U << 10) +#define FFTW_NO_BUFFERING (1U << 11) +#define FFTW_NO_INDIRECT_OP (1U << 12) +#define FFTW_ALLOW_LARGE_GENERIC (1U << 13) /* NO_LARGE_GENERIC is default */ +#define FFTW_NO_RANK_SPLITS (1U << 14) +#define FFTW_NO_VRANK_SPLITS (1U << 15) +#define FFTW_NO_VRECURSE (1U << 16) +#define FFTW_NO_SIMD (1U << 17) +#define FFTW_NO_SLOW (1U << 18) +#define FFTW_NO_FIXED_RADIX_LARGE_N (1U << 19) +#define FFTW_ALLOW_PRUNING (1U << 20) +#define FFTW_WISDOM_ONLY (1U << 21) + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + +#endif /* FFTW3_H */ diff --git a/include/galsim/CorrelatedNoise.h b/include/galsim/CorrelatedNoise.h index 2864832075c..bb78b25739c 100644 --- a/include/galsim/CorrelatedNoise.h +++ b/include/galsim/CorrelatedNoise.h @@ -26,7 +26,6 @@ */ #include -#include "TMV_Sym.h" #include "Image.h" #include "SBProfile.h" @@ -41,26 +40,9 @@ namespace galsim { * written into. The rest are initialized and remain as zero. * * For an example of this function in use, see `galsim/correlatednoise.py`. - * - * Currently, this actually copies elements from an internal calculation of the covariance - * matrix (using Mike Jarvis' TMV library). It could, therefore, be calculated more - * efficiently by direct assignment. However, as this public member function is foreseen as - * being mainly for visualization/checking purposes, we go via the TMV intermediary to avoid - * code duplication. If, in future, it becomes critical to speed up this function this can be - * revisited. - */ - void calculateCovarianceMatrix(ImageView& cov, - const SBProfile& sbp, const Bounds& bounds, double dx); - - /** - * @brief Return, as a TMV SymMatrix, a noise covariance matrix between every element in an - * input Image with pixel scale dx. - * - * The TMV SymMatrix uses FortranStyle indexing (to match the FITS-compliant usage in Image) - * along with ColumnMajor ordering (the default), and Upper triangle storage. */ - tmv::SymMatrix calculateCovarianceSymMatrix( - const SBProfile& sbp, const Bounds& bounds, double dx); + void calculateCovarianceMatrix(ImageView& cov, const SBProfile& sbp, + const Bounds& bounds, double dx); } #endif diff --git a/include/galsim/FFT.h b/include/galsim/FFT.h index f9a6951c2df..721c527fb28 100644 --- a/include/galsim/FFT.h +++ b/include/galsim/FFT.h @@ -61,8 +61,7 @@ #include #include -#include "fftw3.h" -#include "TMV.h" +#include #include "Std.h" #include "Interpolant.h" @@ -149,16 +148,9 @@ namespace galsim { return *this; } - ~FFTW_Array() {} + ~FFTW_Array(); - void resize(size_t n) - { - if (_n != n) { - _n = n; - _array.resize(n); - _p = _array.get(); - } - } + void resize(size_t n); void fill(T val) { @@ -180,9 +172,6 @@ namespace galsim { private: size_t _n; - // fftw_malloc doesn't seem to actually guarantee 16 byte alignment, so we instead - // use TMV's AlignedArray class to handle the byte alignment for us. - tmv::AlignedArray _array; T* _p; }; diff --git a/include/galsim/IgnoreWarnings.h b/include/galsim/IgnoreWarnings.h index 5a93bfbb0bf..e98edbf5dd7 100644 --- a/include/galsim/IgnoreWarnings.h +++ b/include/galsim/IgnoreWarnings.h @@ -64,6 +64,10 @@ #pragma GCC diagnostic ignored "-Wlogical-op-parentheses" #endif +#if __has_warning("-Wshift-count-overflow") +#pragma GCC diagnostic ignored "-Wshift-count-overflow" +#endif + // And clang might need this even if it claims to be GNUC before 4.8. #if __has_warning("-Wunused-local-typedefs") #pragma GCC diagnostic ignored "-Wunused-local-typedefs" diff --git a/include/galsim/LRUCache.h b/include/galsim/LRUCache.h index 7bccc126f25..6b3882c59e2 100644 --- a/include/galsim/LRUCache.h +++ b/include/galsim/LRUCache.h @@ -183,8 +183,7 @@ namespace galsim { shared_ptr value(LRUCacheHelper::NewValue(key)); // Remove items from the cache as necessary. while (_entries.size() >= _nmax) { - bool erased = _cache.erase(_entries.back().first); - assert(erased); + _cache.erase(_entries.back().first); _entries.pop_back(); } // Add the new value to the front. diff --git a/include/galsim/Laguerre.h b/include/galsim/Laguerre.h index 426a6ab2936..612d28724fe 100644 --- a/include/galsim/Laguerre.h +++ b/include/galsim/Laguerre.h @@ -25,7 +25,19 @@ #include #include #include +#ifdef USE_TMV #include "TMV.h" +typedef tmv::Vector VectorXd; +typedef tmv::Matrix MatrixXd; +typedef tmv::Vector > VectorXcd; +typedef tmv::Matrix > MatrixXcd; +#else +#include "Eigen/Dense" +using Eigen::VectorXd; +using Eigen::MatrixXd; +using Eigen::VectorXcd; +using Eigen::MatrixXcd; +#endif #include "Std.h" @@ -196,7 +208,7 @@ namespace galsim { // ??? +=, -=, etc. private: - LVectorReference(tmv::Vector& v, PQIndex pq) : + LVectorReference(VectorXd& v, PQIndex pq) : _re(&v[pq.rIndex()]), _isign(pq.iSign()) {} double *_re; int _isign; // 0 if this is a real element, -1 if needs conjugation, else +1 @@ -204,19 +216,6 @@ namespace galsim { friend class LVector; }; - // A custom deleter to allow us to return views to the LVector as numpy arrays - // which will keep track of the Vector allocation. When the last LVector _or_ - // external view of _owner goes out of scope, then the tmv::Vector is destroyed. - class LVectorDeleter - { - public: - LVectorDeleter(shared_ptr > v) : _v(v) {} - - void operator()(double * p) const {} // the _v shared_ptr will delete for us! - - shared_ptr > _v; - }; - class LVector { public: @@ -227,7 +226,7 @@ namespace galsim { _v->setZero(); } - LVector(int order, const tmv::GenVector& v) : + LVector(int order, const VectorXd& v) : _order(order) { allocateMem(); @@ -235,19 +234,13 @@ namespace galsim { assert(v.size() == PQIndex::size(order)); } - LVector(int order, shared_ptr > v) : - _order(order), _v(v), - _owner(_v->ptr(), LVectorDeleter(_v)) - { assert(v->size() == PQIndex::size(order)); } - - LVector(const LVector& rhs) : _order(rhs._order), _v(rhs._v), _owner(rhs._owner) {} + LVector(const LVector& rhs) : _order(rhs._order), _v(rhs._v) {} LVector& operator=(const LVector& rhs) { if (_v.get()==rhs._v.get()) return *this; _order=rhs._order; _v = rhs._v; - _owner = rhs._owner; return *this; } @@ -276,7 +269,7 @@ namespace galsim { // by making a new copy of the vector first. If it is already the sole owner, // then nothing is done. (FYI: The term for this is "Copy on Write" semantics.) void take_ownership() - { if (!_v.unique()) { _v.reset(new tmv::Vector(*_v)); } } + { if (!_v.unique()) { _v.reset(new VectorXd(*_v)); } } void clear() { take_ownership(); _v->setZero(); } @@ -288,8 +281,8 @@ namespace galsim { int size() const { return _v->size(); } // Access the real-representation vector directly. - const tmv::Vector& rVector() const { return *_v; } - tmv::Vector& rVector() { take_ownership(); return *_v; } + const VectorXd& rVector() const { return *_v; } + VectorXd& rVector() { take_ownership(); return *_v; } // op[] with int returns real double operator[](int i) const { return (*_v)[i]; } @@ -363,7 +356,13 @@ namespace galsim { } // Inner product of the real values. - double dot(const LVector& rhs) const { return (*_v)*(*rhs._v); } + double dot(const LVector& rhs) const { +#ifdef USE_TMV + return (*_v)*(*rhs._v); +#else + return _v->dot(*rhs._v); +#endif + } // write to an ostream void write(std::ostream& os, int maxorder=-1) const; @@ -388,31 +387,31 @@ namespace galsim { // Create a matrix containing basis values at vector of input points. // Output matrix has m(i,j) = jth basis function at ith point - static shared_ptr > basis( - const tmv::ConstVectorView& x, const tmv::ConstVectorView& y, + static shared_ptr basis( + const VectorXd& x, const VectorXd& y, int order, double sigma=1.); // Create design matrix, including factors of 1/sigma stored in invsig - static shared_ptr > design( - const tmv::ConstVectorView& x, const tmv::ConstVectorView& y, - const tmv::ConstVectorView& invsig, int order, double sigma=1.); + static shared_ptr design( + const VectorXd& x, const VectorXd& y, + const VectorXd& invsig, int order, double sigma=1.); // ...or provide your own matrix static void design( - const tmv::ConstVectorView& x, const tmv::ConstVectorView& y, - const tmv::ConstVectorView& invsig, - tmv::MatrixView psi, int order, double sigma=1.); + const VectorXd& x, const VectorXd& y, + const VectorXd& invsig, + MatrixXd& psi, int order, double sigma=1.); static void basis( - const tmv::ConstVectorView& x, const tmv::ConstVectorView& y, - tmv::MatrixView psi, int order, double sigma=1.); + const VectorXd& x, const VectorXd& y, + MatrixXd& psi, int order, double sigma=1.); - static shared_ptr > > kBasis( - const tmv::ConstVectorView& kx, const tmv::ConstVectorView& ky, + static shared_ptr kBasis( + const VectorXd& kx, const VectorXd& ky, int order, double sigma); static void kBasis( - const tmv::ConstVectorView& kx, const tmv::ConstVectorView& ky, - tmv::MatrixView > psi_k, int order, double sigma); + const VectorXd& kx, const VectorXd& ky, + MatrixXcd& psi_k, int order, double sigma); // ?? Add routine to decompose a data vector into b's // ?? Add routines to evaluate summed basis at a set of x/k points @@ -428,189 +427,26 @@ namespace galsim { double flux(int maxP=-1) const; double apertureFlux(double R, int maxP=-1) const; -#if 0 - // Return reference to a matrix that generates ???realPsi transformations - // under infinitesimal point transforms (translate, dilate, shear). - // Returned matrix is at least as large as needed to go order x (order+2) - // The choices for generators: - enum GType { iX = 0, iY, iMu, iE1, iE2, iRot, nGen }; - static const tmv::ConstMatrixView Generator( - GType iparam, int orderOut, int orderIn); -#endif - - shared_ptr getOwner() const { return _owner; } - private: - // real vs fourier is set by the type of psi. - // For real, T = double - // For fourier, T = std::complex - template - static void mBasis( - const tmv::ConstVectorView& x, const tmv::ConstVectorView& y, - const tmv::ConstVectorView* invsig, - tmv::MatrixView psi, int order, double sigma=1.); void allocateMem() { int s = PQIndex::size(_order); - _v.reset(new tmv::Vector(s)); - _owner.reset(_v->ptr(), LVectorDeleter(_v)); + _v.reset(new VectorXd(s)); } int _order; - shared_ptr > _v; - shared_ptr _owner; + shared_ptr _v; }; std::ostream& operator<<(std::ostream& os, const LVector& lv); std::istream& operator>>(std::istream& is, LVector& lv); -#if 0 - // To allow iteration over all the generators: - inline LVector::GType& operator++(LVector::GType& g) { return g=LVector::GType(g+1); } -#endif - // This function finds the innermost radius at which the integrated flux // of the LVector's shape crosses the specified threshold, using the first // maxP monopole terms (or all, if maxP omitted) extern double fluxRadius(const LVector& lv, double threshold, int maxP=-1); - - // NB. The LTransform class is not currently used by anything in GalSim. - // Plus, there are not even any implemenations of the MakeLTransform functions below. -#if 0 - - //-------------------------------------------------------------- - // - // Next class is a transformation matrix for Laguerre vector. Internal - // storage is as a matrix over the real degrees of freedom. - // Interface gives you the (complex) matrix elements of pqIndex pairs. - - // Again this is a HANDLE, so it can be passed into - // subroutines without referencing. Copy/assignment create a new link; - // for fresh copy, use copy() method. - class LTransform - { - public: - LTransform(int orderOut, int orderIn) : - _orderIn(orderIn), _orderOut(orderOut), - _m(new tmv::Matrix(PQIndex::size(orderOut),PQIndex::size(orderIn),0.)) - {} - - // Build an LTransform from a tmv::Matrix for the real degrees of freedom. - // Matrix must have correct dimensions. - LTransform(int orderOut, int orderIn, const tmv::GenMatrix& m) : - _orderIn(orderIn), _orderOut(orderOut), - _m(new tmv::Matrix(m)) - { - assert(m.ncols() == PQIndex::size(orderIn)); - assert(m.nrows() == PQIndex::size(orderOut)); - } - - LTransform(int orderOut, int orderIn, shared_ptr > m) : - _orderIn(orderIn), _orderOut(orderOut), _m(m) - { - assert(m->ncols() == PQIndex::size(orderIn)); - assert(m->nrows() == PQIndex::size(orderOut)); - } - - LTransform(const LTransform& rhs) : - _orderIn(rhs._orderIn), _orderOut(rhs._orderOut), _m(rhs._m) {} - - LTransform& operator=(const LTransform& rhs) - { - if (_m.get()==rhs._m.get()) return *this; - _orderIn=rhs._orderIn; _orderOut=rhs._orderOut; _m = rhs._m; - return *this; - } - - ~LTransform() {} - - LTransform copy() const - { - LTransform fresh(_orderOut, _orderIn); - *(fresh._m) = *_m; - return fresh; - } - - int getOrderIn() const { return _orderIn; } - int getOrderOut() const { return _orderOut; } - int sizeIn() const { return _m->ncols(); } - int sizeOut() const { return _m->nrows(); } - - void resize(int orderOut, int orderIn) - { - if (_orderIn != orderIn || _orderOut != orderOut) { - _orderIn = orderIn; - _orderOut = orderOut; - _m.reset(new tmv::Matrix( - PQIndex::size(orderOut), PQIndex::size(orderIn), 0.)); - } else { - take_ownership(); - } - } - - // As above, we use take_ownership() to implement Copy on Write semantics. - void take_ownership() - { if (!_m.unique()) { _m.reset(new tmv::Matrix(*_m)); } } - - void clear() { take_ownership(); _m->setZero(); } - void identity() { take_ownership(); _m->setToIdentity(); } - - // Access the real-representation vector directly. - tmv::Matrix& rMatrix() { take_ownership(); return *_m; } - const tmv::Matrix& rMatrix() const { return *_m; } - - // Element read - std::complex operator()(PQIndex pq1, PQIndex pq2) const; - std::complex operator()(int p1, int q1, int p2, int q2) const - { return operator()(PQIndex(p1,q1),PQIndex(p2,q2)); } - - // Element write. Note that it is necessary to give two complex - // simultaneously to allow writing the real version of the matrix: - void set( - PQIndex pq1, PQIndex pq2, - std::complex Cpq1pq2, std::complex Cqp1pq2); - - // Operate on other Laguerre vectors/matrices - LVector operator*(const LVector rhs) const; - LTransform operator*(const LTransform rhs) const; - LTransform& operator*=(const LTransform rhs); - - private: - int _orderIn; - int _orderOut; - shared_ptr > _m; - }; - - // Here are the primary types of transformations: - // For the point transforms, set coordShift=false if we want - // to transform the FLUX on a fixed coordinate grid. Set true - // if want to describe the same flux on a transformed COORD system. - - // Shear: - LTransform MakeLTransform( - CppShear eta, int orderOut, int orderIn, bool coordShift=false); - - // Dilation: - LTransform MakeLTransform( - double mu, int orderOut, int orderIn, bool coordShift=false); - - // Translation: - LTransform MakeLTransform( - Position x0, int orderOut, int orderIn, bool coordShift=false); - - // Rotation: - LTransform RotationLTransform( - double theta, int orderOut, int orderIn, bool coordShift=false); - - // Convolution with PSF: - LTransform MakeLTransform( - const LVector psf, const double D, - const int orderOut, const int orderIn, const int orderStar); - -#endif // LTransform section - } #endif diff --git a/include/galsim/SBProfileImpl.h b/include/galsim/SBProfileImpl.h index d366ce55674..3d03197fb54 100644 --- a/include/galsim/SBProfileImpl.h +++ b/include/galsim/SBProfileImpl.h @@ -22,7 +22,6 @@ #include "SBProfile.h" #include "integ/Int.h" -#include "TMV.h" namespace galsim { diff --git a/include/galsim/SBShapeletImpl.h b/include/galsim/SBShapeletImpl.h index d0d44a26f96..6fea4f31ef9 100644 --- a/include/galsim/SBShapeletImpl.h +++ b/include/galsim/SBShapeletImpl.h @@ -73,14 +73,6 @@ namespace galsim { double kx0, double dkx, double dkxy, double ky0, double dky, double dkyx) const; - // The above functions just build a list of (x,y) values and then call these: - void fillXValue(tmv::MatrixView val, - const tmv::Matrix& x, - const tmv::Matrix& y) const; - void fillKValue(tmv::MatrixView > val, - const tmv::Matrix& kx, - const tmv::Matrix& ky) const; - std::string serialize() const; private: diff --git a/include/galsim/SBVonKarman.h b/include/galsim/SBVonKarman.h index 2f98db8baa3..8c9193c3f5a 100644 --- a/include/galsim/SBVonKarman.h +++ b/include/galsim/SBVonKarman.h @@ -62,7 +62,7 @@ namespace galsim { double getL0() const; double getScale() const; bool getDoDelta() const; - double getDeltaAmplitude() const; + double getDelta() const; double getHalfLightRadius() const; double structureFunction(double) const; diff --git a/include/galsim/SBVonKarmanImpl.h b/include/galsim/SBVonKarmanImpl.h index d6a1da4865d..a92f34fca86 100644 --- a/include/galsim/SBVonKarmanImpl.h +++ b/include/galsim/SBVonKarmanImpl.h @@ -45,7 +45,7 @@ namespace galsim { double stepK() const { return _stepk; } double maxK() const { return _maxk; } - double getDeltaAmplitude() const { return _deltaAmplitude; } + double getDelta() const { return _delta; } double getHalfLightRadius() const { return _hlr; } double kValue(double) const; @@ -66,8 +66,8 @@ namespace galsim { double _L053; // (r0/L0)^(-5/3) double _stepk; double _maxk; - double _deltaAmplitude; - double _deltaScale; // 1/(1-_deltaAmplitude) + double _delta; + double _deltaScale; // 1/(1-_delta) double _lam_arcsec; // _lam * ARCSEC2RAD / 2pi bool _doDelta; double _hlr; // half-light-radius @@ -102,7 +102,7 @@ namespace galsim { double maxK() const; double stepK() const; - double getDeltaAmplitude() const; + double getDelta() const; double getHalfLightRadius() const; Position centroid() const { return Position(0., 0.); } diff --git a/include/galsim/boost1_48_0/assert.hpp b/include/galsim/boost1_48_0/assert.hpp index 174f0846fd1..10d3919f230 100644 --- a/include/galsim/boost1_48_0/assert.hpp +++ b/include/galsim/boost1_48_0/assert.hpp @@ -34,7 +34,11 @@ #elif defined(BOOST_ENABLE_ASSERT_HANDLER) +#ifdef USE_BOOST #include +#else +#include "galsim/boost1_48_0/current_function.hpp" +#endif namespace boost { @@ -63,8 +67,6 @@ namespace boost #elif defined(BOOST_ENABLE_ASSERT_HANDLER) - #include - namespace boost { void assertion_failed_msg(char const * expr, char const * msg, @@ -80,7 +82,6 @@ namespace boost #define BOOST_ASSERT_HPP #include #include - #include // IDE's like Visual Studio perform better if output goes to std::cout or // some other stream, so allow user to configure output stream: diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000000..322e03d71b3 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,2 @@ +[build-system] +requires = ["setuptools>=38", "pybind11>=2.2"] diff --git a/pysrc/Bessel.cpp b/pysrc/Bessel.cpp index 5b3a55e7df7..1d2cbd554d5 100644 --- a/pysrc/Bessel.cpp +++ b/pysrc/Bessel.cpp @@ -17,29 +17,22 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "math/BesselRoots.h" #include "math/Bessel.h" -namespace bp = boost::python; - namespace galsim { namespace math { - void pyExportBessel() { - - bp::def("j0_root", &getBesselRoot0); - // In python, with switch from mostly matching the boost names for these to matching - // the names scipy.special uses. - bp::def("j0", &j0); - bp::def("j1", &j1); - bp::def("jv", &cyl_bessel_j); - bp::def("yv", &cyl_bessel_y); - bp::def("iv", &cyl_bessel_i); - bp::def("kv", &cyl_bessel_k); - + void pyExportBessel(PY_MODULE& _galsim) + { + GALSIM_DOT def("j0_root", &getBesselRoot0); + GALSIM_DOT def("j0", &j0); + GALSIM_DOT def("j1", &j1); + GALSIM_DOT def("jv", &cyl_bessel_j); + GALSIM_DOT def("yv", &cyl_bessel_y); + GALSIM_DOT def("iv", &cyl_bessel_i); + GALSIM_DOT def("kv", &cyl_bessel_k); } } // namespace math diff --git a/pysrc/Bounds.cpp b/pysrc/Bounds.cpp index 87d3a4d7bc7..a38845c521f 100644 --- a/pysrc/Bounds.cpp +++ b/pysrc/Bounds.cpp @@ -17,41 +17,37 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "Bounds.h" -namespace bp = boost::python; - namespace galsim { template - static void WrapPosition(const std::string& suffix) + static void WrapPosition(PY_MODULE& _galsim, const std::string& suffix) { - bp::class_< Position >(("Position" + suffix).c_str(), bp::no_init) - .def(bp::init()) + py::class_ >(GALSIM_COMMA ("Position" + suffix).c_str() BP_NOINIT) + .def(py::init()) .def_readonly("x", &Position::x) .def_readonly("y", &Position::y); } template - static void WrapBounds(const std::string& suffix) + static void WrapBounds(PY_MODULE& _galsim, const std::string& suffix) { - bp::class_< Bounds >(("Bounds" + suffix).c_str(), bp::no_init) - .def(bp::init()) - .add_property("xmin", &Bounds::getXMin) - .add_property("xmax", &Bounds::getXMax) - .add_property("ymin", &Bounds::getYMin) - .add_property("ymax", &Bounds::getYMax); + py::class_< Bounds >(GALSIM_COMMA ("Bounds" + suffix).c_str() BP_NOINIT) + .def(py::init()) + .def_property_readonly("xmin", &Bounds::getXMin) + .def_property_readonly("xmax", &Bounds::getXMax) + .def_property_readonly("ymin", &Bounds::getYMin) + .def_property_readonly("ymax", &Bounds::getYMax); } - void pyExportBounds() + void pyExportBounds(PY_MODULE& _galsim) { - WrapPosition("D"); - WrapPosition("I"); - WrapBounds("D"); - WrapBounds("I"); + WrapPosition(_galsim, "D"); + WrapPosition(_galsim, "I"); + WrapBounds(_galsim, "D"); + WrapBounds(_galsim, "I"); } } // namespace galsim diff --git a/pysrc/CDModel.cpp b/pysrc/CDModel.cpp index 9ea75799a86..2806583fce8 100644 --- a/pysrc/CDModel.cpp +++ b/pysrc/CDModel.cpp @@ -17,29 +17,25 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "CDModel.h" -namespace bp = boost::python; - namespace galsim { template - static void WrapTemplates() + static void WrapTemplates(PY_MODULE& _galsim) { typedef void (*ApplyCD_func)(ImageView& , const BaseImage& , const BaseImage& , const BaseImage& , const BaseImage& , const BaseImage& , const int , const double ); - bp::def("_ApplyCD", ApplyCD_func(&ApplyCD)); + GALSIM_DOT def("_ApplyCD", ApplyCD_func(&ApplyCD)); } - void pyExportCDModel() + void pyExportCDModel(PY_MODULE& _galsim) { - WrapTemplates(); - WrapTemplates(); + WrapTemplates(_galsim); + WrapTemplates(_galsim); } } // namespace galsim diff --git a/pysrc/CorrelatedNoise.cpp b/pysrc/CorrelatedNoise.cpp deleted file mode 100644 index e25301e0075..00000000000 --- a/pysrc/CorrelatedNoise.cpp +++ /dev/null @@ -1,35 +0,0 @@ -/* -*- c++ -*- - * Copyright (c) 2012-2017 by the GalSim developers team on GitHub - * https://github.com/GalSim-developers - * - * This file is part of GalSim: The modular galaxy image simulation toolkit. - * https://github.com/GalSim-developers/GalSim - * - * GalSim is free software: redistribution and use in source and binary forms, - * with or without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this - * list of conditions, and the disclaimer given in the accompanying LICENSE - * file. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions, and the disclaimer given in the documentation - * and/or other materials provided with the distribution. - */ - -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - -#include "Interpolant.h" -#include "CorrelatedNoise.h" - -namespace bp = boost::python; - -namespace galsim { - - void pyExportCorrelationFunction() - { - bp::def("_calculateCovarianceMatrix", calculateCovarianceMatrix); - } - -} // namespace galsim diff --git a/pysrc/HSM.cpp b/pysrc/HSM.cpp index 327ccaad13f..c0d9b646906 100644 --- a/pysrc/HSM.cpp +++ b/pysrc/HSM.cpp @@ -17,16 +17,9 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" - -#define BOOST_PYTHON_MAX_ARITY 22 // We have a function with 21 params here... -// c.f. www.boost.org/libs/python/doc/v2/configuration.html -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "hsm/PSFCorr.h" -namespace bp = boost::python; - namespace galsim { namespace hsm { @@ -69,28 +62,28 @@ namespace hsm { } template - static void WrapTemplates() { - typedef void (*FAM_func)(ShapeData&t, const BaseImage&, const BaseImage&, + static void WrapTemplates(PY_MODULE& _galsim) + { + typedef void (*FAM_func)(ShapeData&, const BaseImage&, const BaseImage&, double, double, Position, bool, const HSMParams&); - bp::def("_FindAdaptiveMomView", FAM_func(&FindAdaptiveMomView)); + GALSIM_DOT def("_FindAdaptiveMomView", FAM_func(&FindAdaptiveMomView)); typedef void (*ESH_func)(ShapeData&, const BaseImage&, const BaseImage&, const BaseImage&, float, const char *, const char*, double, double, double, Position, const HSMParams&); - bp::def("_EstimateShearView", ESH_func(&EstimateShearView)); + GALSIM_DOT def("_EstimateShearView", ESH_func(&EstimateShearView)); }; - void pyExportHSM() { - - bp::class_ pyHSMParams("HSMParams", bp::no_init); - pyHSMParams - .def(bp::init< + void pyExportHSM(PY_MODULE& _galsim) + { + py::class_(GALSIM_COMMA "HSMParams" BP_NOINIT) + .def(py::init< double, double, double, int, int, double, long, long, double, double, double, int, double, double, double>()); - bp::class_("ShapeData", "", bp::no_init) - .def("__init__", bp::make_constructor(&ShapeData_init, bp::default_call_policies())) + py::class_(GALSIM_COMMA "ShapeData" BP_NOINIT) + .def(PY_INIT(&ShapeData_init)) .def_readonly("image_bounds", &ShapeData::image_bounds) .def_readonly("moments_status", &ShapeData::moments_status) .def_readonly("observed_e1", &ShapeData::observed_e1) @@ -112,13 +105,12 @@ namespace hsm { .def_readonly("psf_sigma", &ShapeData::psf_sigma) .def_readonly("psf_e1", &ShapeData::psf_e1) .def_readonly("psf_e2", &ShapeData::psf_e2) - .def_readonly("error_message", &ShapeData::error_message) - ; + .def_readonly("error_message", &ShapeData::error_message); - WrapTemplates(); - WrapTemplates(); - WrapTemplates(); - WrapTemplates(); + WrapTemplates(_galsim); + WrapTemplates(_galsim); + WrapTemplates(_galsim); + WrapTemplates(_galsim); } } // namespace hsm diff --git a/pysrc/Image.cpp b/pysrc/Image.cpp index 653cc0edc24..4a2af958343 100644 --- a/pysrc/Image.cpp +++ b/pysrc/Image.cpp @@ -17,19 +17,15 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" // header that includes Python.h always needs to come first - +#include "PyBind11Helper.h" #include "Image.h" -namespace bp = boost::python; - // Note that docstrings are now added in galsim/image.py namespace galsim { template - static ImageView* MakeFromArray(size_t idata, int step, int stride, - const Bounds& bounds) + static ImageView* MakeFromArray( + size_t idata, int step, int stride, const Bounds& bounds) { T* data = reinterpret_cast(idata); shared_ptr owner; @@ -37,44 +33,44 @@ namespace galsim { } template - static void WrapImage(const std::string& suffix) + static void WrapImage(PY_MODULE& _galsim, const std::string& suffix) { - bp::class_< BaseImage, boost::noncopyable >(("BaseImage" + suffix).c_str(), bp::no_init); + py::class_ BP_NONCOPYABLE>( + GALSIM_COMMA ("BaseImage" + suffix).c_str() BP_NOINIT); typedef ImageView* (*Make_func)(size_t, int, int, const Bounds&); - bp::class_< ImageView, bp::bases< BaseImage > >(("ImageView" + suffix).c_str(), - bp::no_init) - .def("__init__", bp::make_constructor((Make_func)&MakeFromArray, - bp::default_call_policies())); + py::class_, BP_BASES(BaseImage)>( + GALSIM_COMMA ("ImageView" + suffix).c_str() BP_NOINIT) + .def(PY_INIT((Make_func)&MakeFromArray)); typedef void (*rfft_func_type)(const BaseImage&, ImageView >, bool, bool); typedef void (*irfft_func_type)(const BaseImage&, ImageView, bool, bool); typedef void (*cfft_func_type)(const BaseImage&, ImageView >, bool, bool, bool); - bp::def("rfft", rfft_func_type(&rfft)); - bp::def("irfft", irfft_func_type(&irfft)); - bp::def("cfft", cfft_func_type(&cfft)); + GALSIM_DOT def("rfft", rfft_func_type(&rfft)); + GALSIM_DOT def("irfft", irfft_func_type(&irfft)); + GALSIM_DOT def("cfft", cfft_func_type(&cfft)); typedef void (*wrap_func_type)(ImageView, const Bounds&, bool, bool); - bp::def("wrapImage", wrap_func_type(&wrapImage)); + GALSIM_DOT def("wrapImage", wrap_func_type(&wrapImage)); typedef void (*invert_func_type)(ImageView); - bp::def("invertImage", invert_func_type(&invertImage)); + GALSIM_DOT def("invertImage", invert_func_type(&invertImage)); } - void pyExportImage() + void pyExportImage(PY_MODULE& _galsim) { - WrapImage("US"); - WrapImage("UI"); - WrapImage("S"); - WrapImage("I"); - WrapImage("F"); - WrapImage("D"); - WrapImage >("CD"); - WrapImage >("CF"); + WrapImage(_galsim, "US"); + WrapImage(_galsim, "UI"); + WrapImage(_galsim, "S"); + WrapImage(_galsim, "I"); + WrapImage(_galsim, "F"); + WrapImage(_galsim, "D"); + WrapImage >(_galsim, "CD"); + WrapImage >(_galsim, "CF"); - bp::def("goodFFTSize", &goodFFTSize); + GALSIM_DOT def("goodFFTSize", &goodFFTSize); } } // namespace galsim diff --git a/pysrc/Integ.cpp b/pysrc/Integ.cpp index 9b066a0fb7c..975dc3c07cb 100644 --- a/pysrc/Integ.cpp +++ b/pysrc/Integ.cpp @@ -17,15 +17,10 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "integ/Int.h" - #include -namespace bp = boost::python; - namespace galsim { namespace integ { @@ -34,29 +29,29 @@ namespace integ { public std::unary_function { public: - PyFunc(const bp::object& func) : _func(func) {} + PyFunc(const py::object& func) : _func(func) {} double operator()(double x) const - { return bp::extract(_func(x)); } + { return PY_CAST(_func(x)); } private: - const bp::object& _func; + const py::object& _func; }; // Integrate a python function using int1d. - bp::tuple PyInt1d(const bp::object& func, double min, double max, + py::tuple PyInt1d(const py::object& func, double min, double max, double rel_err=DEFRELERR, double abs_err=DEFABSERR) { PyFunc pyfunc(func); try { double res = int1d(pyfunc, min, max, rel_err, abs_err); - return bp::make_tuple(true, res); + return py::make_tuple(true, res); } catch (IntFailure& e) { - return bp::make_tuple(false, e.what()); + return py::make_tuple(false, e.what()); } } - void pyExportInteg() { - - bp::def("PyInt1d", &PyInt1d); + void pyExportInteg(PY_MODULE& _galsim) + { + GALSIM_DOT def("PyInt1d", &PyInt1d); } diff --git a/pysrc/Interpolant.cpp b/pysrc/Interpolant.cpp index 936d9f87e20..eb07f0556b0 100644 --- a/pysrc/Interpolant.cpp +++ b/pysrc/Interpolant.cpp @@ -17,40 +17,37 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" +#include #include "Interpolant.h" -namespace bp = boost::python; - namespace galsim { - void pyExportInterpolant() + void pyExportInterpolant(PY_MODULE& _galsim) { - bp::class_("Interpolant", bp::no_init); + py::class_(GALSIM_COMMA "Interpolant" BP_NOINIT); - bp::class_ >("Delta", bp::no_init) - .def(bp::init()); + py::class_(GALSIM_COMMA "Delta" BP_NOINIT) + .def(py::init()); - bp::class_ >("Nearest", bp::no_init) - .def(bp::init()); + py::class_(GALSIM_COMMA "Nearest" BP_NOINIT) + .def(py::init()); - bp::class_ >("SincInterpolant", bp::no_init) - .def(bp::init()); + py::class_(GALSIM_COMMA "SincInterpolant" BP_NOINIT) + .def(py::init()); - bp::class_ >("Lanczos", bp::no_init) - .def(bp::init()) + py::class_(GALSIM_COMMA "Lanczos" BP_NOINIT) + .def(py::init()) .def("urange", &Lanczos::urange); - bp::class_ >("Linear", bp::no_init) - .def(bp::init()); + py::class_(GALSIM_COMMA "Linear" BP_NOINIT) + .def(py::init()); - bp::class_ >("Cubic", bp::no_init) - .def(bp::init()); + py::class_(GALSIM_COMMA "Cubic" BP_NOINIT) + .def(py::init()); - bp::class_ >("Quintic", bp::no_init) - .def(bp::init()); + py::class_(GALSIM_COMMA "Quintic" BP_NOINIT) + .def(py::init()); } } // namespace galsim diff --git a/pysrc/PhotonArray.cpp b/pysrc/PhotonArray.cpp index 78482fdb5cf..6036d7b202d 100644 --- a/pysrc/PhotonArray.cpp +++ b/pysrc/PhotonArray.cpp @@ -17,13 +17,9 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include // header that includes Python.h always needs to come first - +#include "PyBind11Helper.h" #include "PhotonArray.h" -namespace bp = boost::python; - namespace galsim { template @@ -47,11 +43,11 @@ namespace galsim { return new PhotonArray(N, x, y, flux, dxdz, dydz, wave, is_corr); } - void pyExportPhotonArray() + void pyExportPhotonArray(PY_MODULE& _galsim) { - bp::class_ pyPhotonArray("PhotonArray", bp::no_init); + py::class_ pyPhotonArray(GALSIM_COMMA "PhotonArray" BP_NOINIT); pyPhotonArray - .def("__init__", bp::make_constructor(&construct, bp::default_call_policies())) + .def(PY_INIT(&construct)) .def("convolve", &PhotonArray::convolve); WrapTemplates(pyPhotonArray); WrapTemplates(pyPhotonArray); diff --git a/pysrc/PyBind11Helper.h b/pysrc/PyBind11Helper.h new file mode 100644 index 00000000000..0a2fa15cb59 --- /dev/null +++ b/pysrc/PyBind11Helper.h @@ -0,0 +1,80 @@ +/* -*- c++ -*- + * Copyright (c) 2012-2017 by the GalSim developers team on GitHub + * https://github.com/GalSim-developers + * + * This file is part of GalSim: The modular galaxy image simulation toolkit. + * https://github.com/GalSim-developers/GalSim + * + * GalSim is free software: redistribution and use in source and binary forms, + * with or without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions, and the disclaimer given in the accompanying LICENSE + * file. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions, and the disclaimer given in the documentation + * and/or other materials provided with the distribution. + */ +#ifndef PyBind11Helper_H +#define PyBind11Helper_H + +#ifdef USE_BOOST + +#include "galsim/IgnoreWarnings.h" + +#define BOOST_PYTHON_MAX_ARITY 22 // We have a function with 21 params in HSM.cpp + // c.f. www.boost.org/libs/python/doc/v2/configuration.html + +#define BOOST_NO_CXX11_SMART_PTR +#include +#include +namespace py = boost::python; + +// Boost Python and PyBind11 work fairly similarly. There are a few differences though. +// In some cases, pybind11 simplified things, or changed how some things work. So these +// macros allow us to write code that works for either boost python or pybind11. + +// First some things where the boost equivalent of some pybind11 function is different: +#define PYBIND11_MODULE(x,y) BOOST_PYTHON_MODULE(x) +#define PY_MODULE py::scope +#define PY_CAST py::extract +#define PY_INIT(args...) "__init__", py::make_constructor(args, py::default_call_policies()) +#define def_property_readonly add_property + +// PyBind11 requires the module object to be written some places where boost python does not. +// Our module name is always _galsim, so where we would write _galsim. or _galsim, we write these +// instead so in boost python, the module name goes away. +#define GALSIM_DOT py:: +#define GALSIM_COMMA + +// Finally, there are somethings that are only needed for boost python. These are not required +// at all for pybind11. +#define BP_SCOPE(x) py::scope x; +#define BP_NOINIT , py::no_init +#define BP_NONCOPYABLE , boost::noncopyable +#define BP_BASES(T) py::bases + +#else + +#include +#include +#include +#include +namespace py = pybind11; + +#define PY_MODULE py::module +#define PY_CAST py::cast +#define PY_INIT(args...) py::init(args) + +#define GALSIM_DOT _galsim. +#define GALSIM_COMMA _galsim, + +#define BP_SCOPE(x) +#define BP_NOINIT +#define BP_NONCOPYABLE +#define BP_BASES(T) T + +#endif + +#endif diff --git a/pysrc/Random.cpp b/pysrc/Random.cpp index 007c4b6fec0..47cf87aee8e 100644 --- a/pysrc/Random.cpp +++ b/pysrc/Random.cpp @@ -17,13 +17,9 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "Random.h" -namespace bp = boost::python; - namespace galsim { void Generate(BaseDeviate& rng, size_t N, size_t idata) @@ -50,12 +46,12 @@ namespace galsim { rng.generateFromExpectation(N, data); } - void pyExportRandom() + void pyExportRandom(PY_MODULE& _galsim) { - bp::class_ ("BaseDeviateImpl", "", bp::no_init) - .def(bp::init()) - .def(bp::init()) - .def(bp::init()) + py::class_ (GALSIM_COMMA "BaseDeviateImpl" BP_NOINIT) + .def(py::init()) + .def(py::init()) + .def(py::init()) .def("seed", (void (BaseDeviate::*) (long) )&BaseDeviate::seed) .def("reset", (void (BaseDeviate::*) (const BaseDeviate&) )&BaseDeviate::reset) .def("clearCache", &BaseDeviate::clearCache) @@ -65,34 +61,41 @@ namespace galsim { .def("generate", &Generate) .def("add_generate", &AddGenerate); - bp::class_ >("UniformDeviateImpl", bp::no_init) - .def(bp::init()) + py::class_( + GALSIM_COMMA "UniformDeviateImpl" BP_NOINIT) + .def(py::init()) .def("generate1", &UniformDeviate::generate1); - bp::class_ >("GaussianDeviateImpl", bp::no_init) - .def(bp::init()) + py::class_( + GALSIM_COMMA "GaussianDeviateImpl" BP_NOINIT) + .def(py::init()) .def("generate1", &GaussianDeviate::generate1) .def("generate_from_variance", &GenerateFromVariance); - bp::class_ >("BinomialDeviateImpl", bp::no_init) - .def(bp::init()) + py::class_( + GALSIM_COMMA "BinomialDeviateImpl" BP_NOINIT) + .def(py::init()) .def("generate1", &BinomialDeviate::generate1); - bp::class_ >("PoissonDeviateImpl", bp::no_init) - .def(bp::init()) + py::class_( + GALSIM_COMMA "PoissonDeviateImpl" BP_NOINIT) + .def(py::init()) .def("generate1", &PoissonDeviate::generate1) .def("generate_from_expectation", &GenerateFromExpectation); - bp::class_ >("WeibullDeviateImpl", bp::no_init) - .def(bp::init()) + py::class_( + GALSIM_COMMA "WeibullDeviateImpl" BP_NOINIT) + .def(py::init()) .def("generate1", &WeibullDeviate::generate1); - bp::class_ >("GammaDeviateImpl", bp::no_init) - .def(bp::init()) + py::class_( + GALSIM_COMMA "GammaDeviateImpl" BP_NOINIT) + .def(py::init()) .def("generate1", &GammaDeviate::generate1); - bp::class_ >("Chi2DeviateImpl", bp::no_init) - .def(bp::init()) + py::class_( + GALSIM_COMMA "Chi2DeviateImpl" BP_NOINIT) + .def(py::init()) .def("generate1", &Chi2Deviate::generate1); } diff --git a/pysrc/RealGalaxy.cpp b/pysrc/RealGalaxy.cpp index 4663ebbdeb8..17dfdd2c1a9 100644 --- a/pysrc/RealGalaxy.cpp +++ b/pysrc/RealGalaxy.cpp @@ -17,13 +17,9 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "RealGalaxy.h" -namespace bp = boost::python; - namespace galsim { void CallComputeCRGCoefficients(size_t coef_data, size_t Sigma_data, @@ -39,8 +35,8 @@ namespace galsim { ComputeCRGCoefficients(coef, Sigma, w, kimgs, psf, nsed, nband, nkx, nky); }; - void pyExportRealGalaxy() { - bp::def("ComputeCRGCoefficients", &CallComputeCRGCoefficients); + void pyExportRealGalaxy(PY_MODULE& _galsim) { + GALSIM_DOT def("ComputeCRGCoefficients", &CallComputeCRGCoefficients); } } // namespace galsim diff --git a/pysrc/SBAdd.cpp b/pysrc/SBAdd.cpp index 187c803bcf9..78bc6a86d53 100644 --- a/pysrc/SBAdd.cpp +++ b/pysrc/SBAdd.cpp @@ -17,29 +17,30 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBAdd.h" -namespace bp = boost::python; - namespace galsim { - static SBAdd* construct(const bp::list& slist, GSParams gsparams) +#ifdef USE_BOOST + static SBAdd* construct(const py::object& iterable, GSParams gsparams) { + py::stl_input_iterator iter(iterable), end; std::list plist; - int n = len(slist); - for(int i=0; i(slist[i])); - } + for(; iter != end; ++iter) plist.push_back(*iter); + return new SBAdd(plist, gsparams); + } +#else + static SBAdd* construct(const std::list& plist, GSParams gsparams) + { return new SBAdd(plist, gsparams); } +#endif - void pyExportSBAdd() + void pyExportSBAdd(PY_MODULE& _galsim) { - bp::class_< SBAdd, bp::bases >("SBAdd", bp::no_init) - .def("__init__", bp::make_constructor(&construct, bp::default_call_policies())); + py::class_(GALSIM_COMMA "SBAdd" BP_NOINIT) + .def(PY_INIT(&construct)); } } // namespace galsim diff --git a/pysrc/SBAiry.cpp b/pysrc/SBAiry.cpp index 1feba0a0c80..27ec96aa2d8 100644 --- a/pysrc/SBAiry.cpp +++ b/pysrc/SBAiry.cpp @@ -17,19 +17,15 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBAiry.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBAiry() + void pyExportSBAiry(PY_MODULE& _galsim) { - bp::class_ >("SBAiry", bp::no_init) - .def(bp::init()); + py::class_(GALSIM_COMMA "SBAiry" BP_NOINIT) + .def(py::init()); } } // namespace galsim diff --git a/pysrc/SBBox.cpp b/pysrc/SBBox.cpp index a40820adfae..0c0acc89b0c 100644 --- a/pysrc/SBBox.cpp +++ b/pysrc/SBBox.cpp @@ -17,22 +17,17 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBBox.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBBox() + void pyExportSBBox(PY_MODULE& _galsim) { - bp::class_ >("SBBox", bp::no_init) - .def(bp::init()); - - bp::class_ >("SBTopHat", bp::no_init) - .def(bp::init()); + py::class_(GALSIM_COMMA "SBBox" BP_NOINIT) + .def(py::init()); + py::class_(GALSIM_COMMA "SBTopHat" BP_NOINIT) + .def(py::init()); } } // namespace galsim diff --git a/pysrc/SBConvolve.cpp b/pysrc/SBConvolve.cpp index 73f09eb4d0e..fd35d4fc9a7 100644 --- a/pysrc/SBConvolve.cpp +++ b/pysrc/SBConvolve.cpp @@ -17,35 +17,36 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBConvolve.h" -namespace bp = boost::python; - namespace galsim { - static SBConvolve* construct(const bp::list& slist, bool real_space, GSParams gsparams) +#ifdef USE_BOOST + static SBConvolve* construct( + const py::object& iterable, bool real_space, GSParams gsparams) { + py::stl_input_iterator iter(iterable), end; std::list plist; - int n = len(slist); - for(int i=0; i(slist[i])); - } + for(; iter != end; ++iter) plist.push_back(*iter); return new SBConvolve(plist, real_space, gsparams); } - - void pyExportSBConvolve() +#else + static SBConvolve* construct( + const std::list& plist, bool real_space, GSParams gsparams) { - bp::class_< SBConvolve, bp::bases >("SBConvolve", bp::no_init) - .def("__init__", bp::make_constructor(&construct, bp::default_call_policies())); - - bp::class_< SBAutoConvolve, bp::bases >("SBAutoConvolve", bp::no_init) - .def(bp::init()); + return new SBConvolve(plist, real_space, gsparams); + } +#endif - bp::class_< SBAutoCorrelate, bp::bases >("SBAutoCorrelate", bp::no_init) - .def(bp::init()); + void pyExportSBConvolve(PY_MODULE& _galsim) + { + py::class_(GALSIM_COMMA "SBConvolve" BP_NOINIT) + .def(PY_INIT(&construct)); + py::class_(GALSIM_COMMA "SBAutoConvolve" BP_NOINIT) + .def(py::init()); + py::class_(GALSIM_COMMA "SBAutoCorrelate" BP_NOINIT) + .def(py::init()); } } // namespace galsim diff --git a/pysrc/SBDeconvolve.cpp b/pysrc/SBDeconvolve.cpp index b0d73f01c37..706f1c168c7 100644 --- a/pysrc/SBDeconvolve.cpp +++ b/pysrc/SBDeconvolve.cpp @@ -17,19 +17,15 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBDeconvolve.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBDeconvolve() + void pyExportSBDeconvolve(PY_MODULE& _galsim) { - bp::class_< SBDeconvolve, bp::bases >("SBDeconvolve", bp::no_init) - .def(bp::init()); + py::class_(GALSIM_COMMA "SBDeconvolve" BP_NOINIT) + .def(py::init()); } } // namespace galsim diff --git a/pysrc/SBDeltaFunction.cpp b/pysrc/SBDeltaFunction.cpp index cdab9102f2b..8a990985c03 100644 --- a/pysrc/SBDeltaFunction.cpp +++ b/pysrc/SBDeltaFunction.cpp @@ -17,19 +17,15 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBDeltaFunction.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBDeltaFunction() + void pyExportSBDeltaFunction(PY_MODULE& _galsim) { - bp::class_ >("SBDeltaFunction", bp::no_init) - .def(bp::init()); + py::class_(GALSIM_COMMA "SBDeltaFunction" BP_NOINIT) + .def(py::init()); } } // namespace galsim diff --git a/pysrc/SBExponential.cpp b/pysrc/SBExponential.cpp index 8da77b47666..7ec40eebd00 100644 --- a/pysrc/SBExponential.cpp +++ b/pysrc/SBExponential.cpp @@ -17,19 +17,15 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBExponential.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBExponential() + void pyExportSBExponential(PY_MODULE& _galsim) { - bp::class_ >("SBExponential", bp::no_init) - .def(bp::init()); + py::class_(GALSIM_COMMA "SBExponential" BP_NOINIT) + .def(py::init()); } } // namespace galsim diff --git a/pysrc/SBFourierSqrt.cpp b/pysrc/SBFourierSqrt.cpp index 0012ca08bec..3e6109818b7 100644 --- a/pysrc/SBFourierSqrt.cpp +++ b/pysrc/SBFourierSqrt.cpp @@ -17,19 +17,15 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBFourierSqrt.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBFourierSqrt() + void pyExportSBFourierSqrt(PY_MODULE& _galsim) { - bp::class_< SBFourierSqrt, bp::bases >("SBFourierSqrt", bp::no_init) - .def(bp::init()); + py::class_(GALSIM_COMMA "SBFourierSqrt" BP_NOINIT) + .def(py::init()); } } // namespace galsim diff --git a/pysrc/SBGaussian.cpp b/pysrc/SBGaussian.cpp index e5f0117a731..e7fdf297496 100644 --- a/pysrc/SBGaussian.cpp +++ b/pysrc/SBGaussian.cpp @@ -17,19 +17,15 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBGaussian.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBGaussian() + void pyExportSBGaussian(PY_MODULE& _galsim) { - bp::class_ >("SBGaussian", bp::no_init) - .def(bp::init()); + py::class_(GALSIM_COMMA "SBGaussian" BP_NOINIT) + .def(py::init()); } } // namespace galsim diff --git a/pysrc/SBInclinedExponential.cpp b/pysrc/SBInclinedExponential.cpp index b41099ceb96..e978e77ede8 100644 --- a/pysrc/SBInclinedExponential.cpp +++ b/pysrc/SBInclinedExponential.cpp @@ -17,20 +17,16 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBInclinedExponential.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBInclinedExponential() + void pyExportSBInclinedExponential(PY_MODULE& _galsim) { - bp::class_ >( - "SBInclinedExponential", bp::no_init) - .def(bp::init()); + py::class_( + GALSIM_COMMA "SBInclinedExponential" BP_NOINIT) + .def(py::init()); } } // namespace galsim diff --git a/pysrc/SBInclinedSersic.cpp b/pysrc/SBInclinedSersic.cpp index 8d166492dae..ddc86f78005 100644 --- a/pysrc/SBInclinedSersic.cpp +++ b/pysrc/SBInclinedSersic.cpp @@ -17,19 +17,16 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBInclinedSersic.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBInclinedSersic() + void pyExportSBInclinedSersic(PY_MODULE& _galsim) { - bp::class_ >("SBInclinedSersic", bp::no_init) - .def(bp::init()); + py::class_( + GALSIM_COMMA "SBInclinedSersic" BP_NOINIT) + .def(py::init()); } } // namespace galsim diff --git a/pysrc/SBInterpolatedImage.cpp b/pysrc/SBInterpolatedImage.cpp index 7d286ef7ed4..3103fd79a40 100644 --- a/pysrc/SBInterpolatedImage.cpp +++ b/pysrc/SBInterpolatedImage.cpp @@ -17,40 +17,35 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBInterpolatedImage.h" -namespace bp = boost::python; - namespace galsim { template - static void WrapTemplates(W& wrapper) + static void WrapTemplates(PY_MODULE& _galsim, W& wrapper) { - wrapper - .def(bp::init &, const Bounds&, const Bounds&, - const Interpolant&, const Interpolant&, - double, double, GSParams>()); + wrapper.def(py::init &, const Bounds&, const Bounds&, + const Interpolant&, const Interpolant&, + double, double, GSParams>()); typedef double (*cscf_func_type)(const BaseImage&, double); - bp::def("CalculateSizeContainingFlux", cscf_func_type(&CalculateSizeContainingFlux)); + GALSIM_DOT def("CalculateSizeContainingFlux", cscf_func_type(&CalculateSizeContainingFlux)); } - void pyExportSBInterpolatedImage() + void pyExportSBInterpolatedImage(PY_MODULE& _galsim) { - bp::class_< SBInterpolatedImage, bp::bases > pySBInterpolatedImage( - "SBInterpolatedImage", bp::no_init); + py::class_ pySBInterpolatedImage( + GALSIM_COMMA "SBInterpolatedImage" BP_NOINIT); pySBInterpolatedImage .def("calculateMaxK", &SBInterpolatedImage::calculateMaxK); - WrapTemplates(pySBInterpolatedImage); - WrapTemplates(pySBInterpolatedImage); + WrapTemplates(_galsim, pySBInterpolatedImage); + WrapTemplates(_galsim, pySBInterpolatedImage); - bp::class_< SBInterpolatedKImage, bp::bases > pySBInterpolatedKImage( - "SBInterpolatedKImage", bp::no_init); + py::class_ pySBInterpolatedKImage( + GALSIM_COMMA "SBInterpolatedKImage" BP_NOINIT); pySBInterpolatedKImage - .def(bp::init > &, + .def(py::init > &, double, const Interpolant&, GSParams>()); } diff --git a/pysrc/SBKolmogorov.cpp b/pysrc/SBKolmogorov.cpp index 0b5403d89f6..25514954863 100644 --- a/pysrc/SBKolmogorov.cpp +++ b/pysrc/SBKolmogorov.cpp @@ -17,19 +17,15 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBKolmogorov.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBKolmogorov() + void pyExportSBKolmogorov(PY_MODULE& _galsim) { - bp::class_ >("SBKolmogorov", bp::no_init) - .def(bp::init()); + py::class_(GALSIM_COMMA "SBKolmogorov" BP_NOINIT) + .def(py::init()); } } // namespace galsim diff --git a/pysrc/SBMoffat.cpp b/pysrc/SBMoffat.cpp index 94d7ac91c6a..50c261a8459 100644 --- a/pysrc/SBMoffat.cpp +++ b/pysrc/SBMoffat.cpp @@ -17,22 +17,18 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBMoffat.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBMoffat() + void pyExportSBMoffat(PY_MODULE& _galsim) { - bp::class_ >("SBMoffat", bp::no_init) - .def(bp::init()) + py::class_(GALSIM_COMMA "SBMoffat" BP_NOINIT) + .def(py::init()) .def("getHalfLightRadius", &SBMoffat::getHalfLightRadius); - bp::def("MoffatCalculateSRFromHLR", &MoffatCalculateScaleRadiusFromHLR); + GALSIM_DOT def("MoffatCalculateSRFromHLR", &MoffatCalculateScaleRadiusFromHLR); } } // namespace galsim diff --git a/pysrc/SBProfile.cpp b/pysrc/SBProfile.cpp index 74823b77939..8dc44ccc0b7 100644 --- a/pysrc/SBProfile.cpp +++ b/pysrc/SBProfile.cpp @@ -17,41 +17,28 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#define BOOST_PYTHON_MAX_ARITY 20 // We have a function with 17 params here... - // c.f. www.boost.org/libs/python/doc/v2/configuration.html -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBProfile.h" #include "SBTransform.h" -namespace bp = boost::python; - namespace galsim { template - static void WrapTemplates(W& wrapper) { - // We don't need to wrap templates in a separate function, but it keeps us - // from having to repeat each of the lines below for each type. - // We also don't need to make 'W' a template parameter in this case, - // but it's easier to do that than write out the full class_ type. - wrapper - .def("draw", - (void (SBProfile::*)(ImageView, double) const)&SBProfile::draw); - wrapper - .def("drawK", - (void (SBProfile::*)(ImageView >, double) const) - &SBProfile::drawK); + static void WrapTemplates(W& wrapper) + { + wrapper.def("draw", (void (SBProfile::*)(ImageView, double) const)&SBProfile::draw); + wrapper.def("drawK", (void (SBProfile::*)(ImageView >, double) const) + &SBProfile::drawK); } - void pyExportSBProfile() + void pyExportSBProfile(PY_MODULE& _galsim) { - bp::class_ ("GSParams", bp::no_init) - .def(bp::init< + py::class_(GALSIM_COMMA "GSParams" BP_NOINIT) + .def(py::init< int, int, double, double, double, double, double, double, double, double, double, double, double, double, int, double>()); - bp::class_ pySBProfile("SBProfile", bp::no_init); + py::class_ pySBProfile(GALSIM_COMMA "SBProfile" BP_NOINIT); pySBProfile .def("xValue", &SBProfile::xValue) .def("kValue", &SBProfile::kValue) diff --git a/pysrc/SBSecondKick.cpp b/pysrc/SBSecondKick.cpp index 0b6468205fd..e7cb9d1dcee 100644 --- a/pysrc/SBSecondKick.cpp +++ b/pysrc/SBSecondKick.cpp @@ -17,22 +17,18 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBSecondKick.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBSecondKick() + void pyExportSBSecondKick(PY_MODULE& _galsim) { - bp::class_ >("SBSecondKick", bp::no_init) - .def(bp::init()) + py::class_(GALSIM_COMMA "SBSecondKick" BP_NOINIT) + .def(py::init()) .def("getDelta", &SBSecondKick::getDelta) .def("structureFunction", &SBSecondKick::structureFunction) ; - }; + } } // namespace galsim diff --git a/pysrc/SBSersic.cpp b/pysrc/SBSersic.cpp index 4476b968f6e..67bf388c31d 100644 --- a/pysrc/SBSersic.cpp +++ b/pysrc/SBSersic.cpp @@ -17,23 +17,19 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBSersic.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBSersic() + void pyExportSBSersic(PY_MODULE& _galsim) { - bp::class_ >("SBSersic", bp::no_init) - .def(bp::init()); + py::class_(GALSIM_COMMA "SBSersic" BP_NOINIT) + .def(py::init()); - bp::def("SersicTruncatedScale", &SersicTruncatedScale); - bp::def("SersicIntegratedFlux", &SersicIntegratedFlux); - bp::def("SersicHLR", &SersicHLR); + GALSIM_DOT def("SersicTruncatedScale", &SersicTruncatedScale); + GALSIM_DOT def("SersicIntegratedFlux", &SersicIntegratedFlux); + GALSIM_DOT def("SersicHLR", &SersicHLR); } } // namespace galsim diff --git a/pysrc/SBShapelet.cpp b/pysrc/SBShapelet.cpp index 5255d08d22f..1432088b101 100644 --- a/pysrc/SBShapelet.cpp +++ b/pysrc/SBShapelet.cpp @@ -17,13 +17,9 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBShapelet.h" -namespace bp = boost::python; - namespace galsim { static void fit(double sigma, int order, size_t idata, @@ -35,24 +31,26 @@ namespace galsim { double* data = reinterpret_cast(idata); int size = PQIndex::size(order); - tmv::VectorView v = tmv::VectorViewOf(data, size); - v = bvec.rVector(); + for (int i=0; i(idata); int size = PQIndex::size(order); - LVector bvec(order, tmv::VectorViewOf(data, size)); + VectorXd v(size); + for (int i=0; i >("SBShapelet", bp::no_init) - .def("__init__", bp::make_constructor(&construct, bp::default_call_policies())); + py::class_(GALSIM_COMMA "SBShapelet" BP_NOINIT) + .def(PY_INIT(&construct)); - bp::def("ShapeletFitImage", &fit); + GALSIM_DOT def("ShapeletFitImage", &fit); } } // namespace galsim diff --git a/pysrc/SBSpergel.cpp b/pysrc/SBSpergel.cpp index 5e6f2c1f05d..13cdb99d48b 100644 --- a/pysrc/SBSpergel.cpp +++ b/pysrc/SBSpergel.cpp @@ -17,23 +17,19 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBSpergel.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBSpergel() + void pyExportSBSpergel(PY_MODULE& _galsim) { - bp::class_ >("SBSpergel",bp::no_init) - .def(bp::init()) + py::class_(GALSIM_COMMA "SBSpergel" BP_NOINIT) + .def(py::init()) .def("calculateIntegratedFlux", &SBSpergel::calculateIntegratedFlux) .def("calculateFluxRadius", &SBSpergel::calculateFluxRadius); - bp::def("SpergelCalculateHLR", &SpergelCalculateHLR); + GALSIM_DOT def("SpergelCalculateHLR", &SpergelCalculateHLR); } } // namespace galsim diff --git a/pysrc/SBTransform.cpp b/pysrc/SBTransform.cpp index 29cc71db63d..c55463944b2 100644 --- a/pysrc/SBTransform.cpp +++ b/pysrc/SBTransform.cpp @@ -17,19 +17,15 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBTransform.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBTransform() + void pyExportSBTransform(PY_MODULE& _galsim) { - bp::class_< SBTransform, bp::bases >("SBTransform", bp::no_init) - .def(bp::init(GALSIM_COMMA "SBTransform" BP_NOINIT) + .def(py::init, double, GSParams>()); } diff --git a/pysrc/SBVonKarman.cpp b/pysrc/SBVonKarman.cpp index 5dd81a714dc..f2190d1db7c 100644 --- a/pysrc/SBVonKarman.cpp +++ b/pysrc/SBVonKarman.cpp @@ -17,20 +17,16 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBVonKarman.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBVonKarman() + void pyExportSBVonKarman(PY_MODULE& _galsim) { - bp::class_ >("SBVonKarman", bp::no_init) - .def(bp::init()) - .def("getDeltaAmplitude", &SBVonKarman::getDeltaAmplitude) + py::class_(GALSIM_COMMA "SBVonKarman" BP_NOINIT) + .def(py::init()) + .def("getDelta", &SBVonKarman::getDelta) .def("getHalfLightRadius", &SBVonKarman::getHalfLightRadius) .def("structureFunction", &SBVonKarman::structureFunction) ; diff --git a/pysrc/Silicon.cpp b/pysrc/Silicon.cpp index 54520de1284..add5a60dc18 100644 --- a/pysrc/Silicon.cpp +++ b/pysrc/Silicon.cpp @@ -17,45 +17,37 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" // header that includes Python.h always needs to come first - +#include "PyBind11Helper.h" #include "Silicon.h" #include "Random.h" -namespace bp = boost::python; - namespace galsim { template static void WrapTemplates(W& wrapper) { typedef double (Silicon::*accumulate_fn)(const PhotonArray&, UniformDeviate, ImageView, Position); - wrapper - .def("accumulate", (accumulate_fn)&Silicon::accumulate); + wrapper.def("accumulate", (accumulate_fn)&Silicon::accumulate); } - - static Silicon* MakeSilicon(int NumVertices, double NumElect, int Nx, int Ny, int QDist, - double Nrecalc, double DiffStep, double PixelSize, - double SensorThickness, size_t idata, - const Table& treeRingTable, - const Position& treeRingCenter, - const Table& abs_length_table) + static Silicon* MakeSilicon( + int NumVertices, double NumElect, int Nx, int Ny, int QDist, + double Nrecalc, double DiffStep, double PixelSize, + double SensorThickness, size_t idata, + const Table& treeRingTable, + const Position& treeRingCenter, + const Table& abs_length_table) { double* data = reinterpret_cast(idata); - int NumPolys = Nx * Ny + 2; - int Nv = 4 * NumVertices + 4; return new Silicon(NumVertices, NumElect, Nx, Ny, QDist, Nrecalc, DiffStep, PixelSize, SensorThickness, data, treeRingTable, treeRingCenter, abs_length_table); } - void pyExportSilicon() + void pyExportSilicon(PY_MODULE& _galsim) { - bp::class_ pySilicon("Silicon", bp::no_init); - pySilicon - .def("__init__", bp::make_constructor(&MakeSilicon, bp::default_call_policies())); + py::class_ pySilicon(GALSIM_COMMA "Silicon" BP_NOINIT); + pySilicon.def(PY_INIT(&MakeSilicon)); WrapTemplates(pySilicon); WrapTemplates(pySilicon); diff --git a/pysrc/Table.cpp b/pysrc/Table.cpp index d6014fef40b..1080178dbbc 100644 --- a/pysrc/Table.cpp +++ b/pysrc/Table.cpp @@ -17,16 +17,12 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" // header that includes Python.h always needs to come first - +#include "PyBind11Helper.h" #include "Table.h" -namespace bp = boost::python; - namespace galsim { - static Table* makeTable(size_t iargs, size_t ivals, int N, const char* interp_c) + static Table* MakeTable(size_t iargs, size_t ivals, int N, const char* interp_c) { const double* args = reinterpret_cast(iargs); const double* vals = reinterpret_cast(ivals); @@ -41,14 +37,14 @@ namespace galsim { return new Table(args, vals, N, i); } - static void interpMany(const Table& table, size_t iargs, size_t ivals, int N) + static void InterpMany(const Table& table, size_t iargs, size_t ivals, int N) { const double* args = reinterpret_cast(iargs); double* vals = reinterpret_cast(ivals); table.interpMany(args, vals, N); } - static Table2D* makeTable2D(size_t ix, size_t iy, size_t ivals, int Nx, int Ny, + static Table2D* MakeTable2D(size_t ix, size_t iy, size_t ivals, int Nx, int Ny, const char* interp_c) { const double* x = reinterpret_cast(ix); @@ -64,7 +60,7 @@ namespace galsim { return new Table2D(x, y, vals, Nx, Ny, i); } - static void interpMany2D(const Table2D& table2d, size_t ix, size_t iy, size_t ivals, int N) + static void InterpMany2D(const Table2D& table2d, size_t ix, size_t iy, size_t ivals, int N) { const double* x = reinterpret_cast(ix); const double* y = reinterpret_cast(iy); @@ -88,19 +84,17 @@ namespace galsim { table2d.gradientMany(x, y, dfdx, dfdy, N); } - void pyExportTable() + void pyExportTable(PY_MODULE& _galsim) { - bp::class_ pyTable("_LookupTable", bp::no_init); - pyTable - .def("__init__", bp::make_constructor(&makeTable, bp::default_call_policies())) + py::class_
(GALSIM_COMMA "_LookupTable" BP_NOINIT) + .def(PY_INIT(&MakeTable)) .def("interp", &Table::lookup) - .def("interpMany", &interpMany); + .def("interpMany", &InterpMany); - bp::class_ pyTable2D("_LookupTable2D", bp::no_init); - pyTable2D - .def("__init__", bp::make_constructor(&makeTable2D, bp::default_call_policies())) + py::class_(GALSIM_COMMA "_LookupTable2D" BP_NOINIT) + .def(PY_INIT(&MakeTable2D)) .def("interp", &Table2D::lookup) - .def("interpMany", &interpMany2D) + .def("interpMany", &InterpMany2D) .def("gradient", &Gradient) .def("gradientMany", &GradientMany); } diff --git a/pysrc/WCS.cpp b/pysrc/WCS.cpp index dc8617c8954..871055a0c19 100644 --- a/pysrc/WCS.cpp +++ b/pysrc/WCS.cpp @@ -17,13 +17,9 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "WCS.h" -namespace bp = boost::python; - namespace galsim { void CallApplyCD(int n, size_t x_data, size_t y_data, size_t cd_data) @@ -32,7 +28,7 @@ namespace galsim { double* yar = reinterpret_cast(y_data); const double* cdar = reinterpret_cast(cd_data); ApplyCD(n, xar, yar, cdar); - }; + } void CallApplyPV(int n, int m, size_t u_data, size_t v_data, size_t pv_data) { @@ -40,28 +36,29 @@ namespace galsim { double* var = reinterpret_cast(v_data); const double* pvar = reinterpret_cast(pv_data); ApplyPV(n, m, uar, var, pvar); - }; + } - bp::tuple CallInvertPV(double u, double v, size_t pv_data) + py::tuple CallInvertPV(double u, double v, size_t pv_data) { const double* pvar = reinterpret_cast(pv_data); InvertPV(u, v, pvar); - return bp::make_tuple(u,v); - }; + return py::make_tuple(u,v); + } - bp::tuple CallInvertAB(int m, double x, double y, size_t ab_data, size_t abp_data) + py::tuple CallInvertAB(int m, double x, double y, size_t ab_data, size_t abp_data) { const double* abar = reinterpret_cast(ab_data); const double* abpar = reinterpret_cast(abp_data); InvertAB(m, x, y, abar, abpar); - return bp::make_tuple(x,y); - }; + return py::make_tuple(x,y); + } - void pyExportWCS() { - bp::def("ApplyPV", &CallApplyPV); - bp::def("ApplyCD", &CallApplyCD); - bp::def("InvertPV", &CallInvertPV); - bp::def("InvertAB", &CallInvertAB); + void pyExportWCS(PY_MODULE& _galsim) + { + GALSIM_DOT def("ApplyPV", &CallApplyPV); + GALSIM_DOT def("ApplyCD", &CallApplyCD); + GALSIM_DOT def("InvertPV", &CallInvertPV); + GALSIM_DOT def("InvertAB", &CallInvertAB); } } // namespace galsim diff --git a/pysrc/module.cpp b/pysrc/module.cpp index 0001f28957b..a32ea0fe38f 100644 --- a/pysrc/module.cpp +++ b/pysrc/module.cpp @@ -17,89 +17,93 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" +#include "Python.h" +#include "PyBind11Helper.h" namespace galsim { - void pyExportBounds(); - void pyExportPhotonArray(); - void pyExportImage(); - void pyExportSBProfile(); - void pyExportSBAdd(); - void pyExportSBConvolve(); - void pyExportSBDeconvolve(); - void pyExportSBFourierSqrt(); - void pyExportSBTransform(); - void pyExportSBBox(); - void pyExportSBGaussian(); - void pyExportSBExponential(); - void pyExportSBSersic(); - void pyExportSBSpergel(); - void pyExportSBMoffat(); - void pyExportSBAiry(); - void pyExportSBShapelet(); - void pyExportSBInterpolatedImage(); - void pyExportSBKolmogorov(); - void pyExportSBInclinedExponential(); - void pyExportSBInclinedSersic(); - void pyExportSBDeltaFunction(); - void pyExportSBVonKarman(); - void pyExportSBSecondKick(); - void pyExportRandom(); - void pyExportTable(); - void pyExportInterpolant(); - void pyExportCDModel(); - void pyExportSilicon(); - void pyExportRealGalaxy(); - void pyExportWCS(); + void pyExportBounds(PY_MODULE&); + void pyExportPhotonArray(PY_MODULE&); + void pyExportImage(PY_MODULE&); + void pyExportSBProfile(PY_MODULE&); + void pyExportSBAdd(PY_MODULE&); + void pyExportSBConvolve(PY_MODULE&); + void pyExportSBDeconvolve(PY_MODULE&); + void pyExportSBFourierSqrt(PY_MODULE&); + void pyExportSBTransform(PY_MODULE&); + void pyExportSBBox(PY_MODULE&); + void pyExportSBGaussian(PY_MODULE&); + void pyExportSBDeltaFunction(PY_MODULE&); + void pyExportSBExponential(PY_MODULE&); + void pyExportSBSersic(PY_MODULE&); + void pyExportSBSpergel(PY_MODULE&); + void pyExportSBMoffat(PY_MODULE&); + void pyExportSBAiry(PY_MODULE&); + void pyExportSBShapelet(PY_MODULE&); + void pyExportSBInterpolatedImage(PY_MODULE&); + void pyExportSBKolmogorov(PY_MODULE&); + void pyExportSBInclinedExponential(PY_MODULE&); + void pyExportSBInclinedSersic(PY_MODULE&); + void pyExportSBVonKarman(PY_MODULE&); + void pyExportSBSecondKick(PY_MODULE&); + void pyExportRandom(PY_MODULE&); + void pyExportTable(PY_MODULE&); + void pyExportInterpolant(PY_MODULE&); + void pyExportCDModel(PY_MODULE&); + void pyExportSilicon(PY_MODULE&); + void pyExportRealGalaxy(PY_MODULE&); + void pyExportWCS(PY_MODULE&); namespace hsm { - void pyExportHSM(); - } // namespace hsm + void pyExportHSM(PY_MODULE&); + } namespace integ { - void pyExportInteg(); - } // namespace integ + void pyExportInteg(PY_MODULE&); + } namespace math { - void pyExportBessel(); - } // namespace integ + void pyExportBessel(PY_MODULE&); + } } // namespace galsim -BOOST_PYTHON_MODULE(_galsim) { - galsim::pyExportBounds(); - galsim::pyExportImage(); - galsim::pyExportPhotonArray(); - galsim::pyExportSBProfile(); - galsim::pyExportSBAdd(); - galsim::pyExportSBConvolve(); - galsim::pyExportSBDeconvolve(); - galsim::pyExportSBFourierSqrt(); - galsim::pyExportSBTransform(); - galsim::pyExportSBBox(); - galsim::pyExportSBGaussian(); - galsim::pyExportSBExponential(); - galsim::pyExportSBSersic(); - galsim::pyExportSBSpergel(); - galsim::pyExportSBMoffat(); - galsim::pyExportSBAiry(); - galsim::pyExportSBShapelet(); - galsim::pyExportSBInterpolatedImage(); - galsim::pyExportSBKolmogorov(); - galsim::pyExportSBInclinedExponential(); - galsim::pyExportSBInclinedSersic(); - galsim::pyExportSBDeltaFunction(); - galsim::pyExportSBVonKarman(); - galsim::pyExportSBSecondKick(); - galsim::pyExportRandom(); - galsim::pyExportInterpolant(); - galsim::pyExportCDModel(); - galsim::hsm::pyExportHSM(); - galsim::integ::pyExportInteg(); - galsim::pyExportTable(); - galsim::math::pyExportBessel(); - galsim::pyExportSilicon(); - galsim::pyExportRealGalaxy(); - galsim::pyExportWCS(); +PYBIND11_MODULE(_galsim, _galsim) +{ + BP_SCOPE(_galsim); + + galsim::pyExportBounds(_galsim); + galsim::pyExportPhotonArray(_galsim); + galsim::pyExportImage(_galsim); + galsim::pyExportSBProfile(_galsim); + galsim::pyExportSBAdd(_galsim); + galsim::pyExportSBConvolve(_galsim); + galsim::pyExportSBDeconvolve(_galsim); + galsim::pyExportSBFourierSqrt(_galsim); + galsim::pyExportSBTransform(_galsim); + galsim::pyExportSBBox(_galsim); + galsim::pyExportSBGaussian(_galsim); + galsim::pyExportSBDeltaFunction(_galsim); + galsim::pyExportSBExponential(_galsim); + galsim::pyExportSBSersic(_galsim); + galsim::pyExportSBSpergel(_galsim); + galsim::pyExportSBMoffat(_galsim); + galsim::pyExportSBAiry(_galsim); + galsim::pyExportSBShapelet(_galsim); + galsim::pyExportSBInterpolatedImage(_galsim); + galsim::pyExportSBKolmogorov(_galsim); + galsim::pyExportSBInclinedExponential(_galsim); + galsim::pyExportSBInclinedSersic(_galsim); + galsim::pyExportSBVonKarman(_galsim); + galsim::pyExportSBSecondKick(_galsim); + galsim::pyExportRandom(_galsim); + galsim::pyExportTable(_galsim); + galsim::pyExportInterpolant(_galsim); + galsim::pyExportCDModel(_galsim); + galsim::pyExportSilicon(_galsim); + galsim::pyExportRealGalaxy(_galsim); + galsim::pyExportWCS(_galsim); + + galsim::hsm::pyExportHSM(_galsim); + galsim::integ::pyExportInteg(_galsim); + galsim::math::pyExportBessel(_galsim); } diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000000..0126b13338b --- /dev/null +++ b/requirements.txt @@ -0,0 +1,21 @@ +# I didn't try to figure out which versions of these are really required. These are the +# current versions at the time of writing this (Jan, 2018), and they are known to work. + +# These are in conda_requirements.txt. If using that, you may prefer to do +# conda install -c conda-forge --file conda_requirements.txt +# prior to running pip install -r requirements.txt +setuptools>=38 +#eigency>=1.78 # 1.77 still doesn't work. Oops. (My fault.) +numpy>=1.13 +future>=0.15 +astropy>=2.0 +pybind11>=2.2 +pip==9.0.3 # For now, pybind11 in conjunction with pip version 10.0 is broken. Use 9.0.3. + +# Not technically required, but useful. +pyyaml>=3.12 +pandas>=0.20 + +# This is not in conda. Let pip install these. +LSSTDESC.Coord>=1.0.5 +starlink-pyast>=3.9.0 # Also not required, but useful. diff --git a/setup.py b/setup.py new file mode 100644 index 00000000000..b36e4db921c --- /dev/null +++ b/setup.py @@ -0,0 +1,864 @@ +from __future__ import print_function +import sys,os,glob,re +import platform +import ctypes +import ctypes.util +import types +import subprocess +import re + +try: + from setuptools import setup, Extension, find_packages + from setuptools.command.build_ext import build_ext + from setuptools.command.build_clib import build_clib + from setuptools.command.install import install + from setuptools.command.install_scripts import install_scripts + from setuptools.command.easy_install import easy_install + from setuptools.command.test import test + import setuptools + print("Using setuptools version",setuptools.__version__) +except ImportError: + print() + print("****") + print(" Installation requires setuptools version >= 38.") + print(" Please upgrade or install with pip install -U setuptools") + print("****") + print() + raise + +print('Python version = ',sys.version) +py_version = "%d.%d"%sys.version_info[0:2] # we check things based on the major.minor version. + +scripts = ['galsim', 'galsim_download_cosmos'] +scripts = [ os.path.join('bin',f) for f in scripts ] + +def all_files_from(dir, ext=''): + files = [] + for root, dirnames, filenames in os.walk(dir): + for filename in filenames: + if filename.endswith(ext): + files.append(os.path.join(root, filename)) + return files + +py_sources = all_files_from('pysrc', '.cpp') +cpp_sources = all_files_from('src', '.cpp') +test_sources = all_files_from('tests', '.cpp') +headers = all_files_from('include') +shared_data = all_files_from('share') + +# If we build with debug, undefine NDEBUG flag +undef_macros = [] +if "--debug" in sys.argv: + undef_macros+=['NDEBUG'] + +copt = { + 'gcc' : ['-O2','-msse2','-std=c++11','-fvisibility=hidden'], + 'icc' : ['-O2','-msse2','-vec-report0','-std=c++11'], + 'clang' : ['-O2','-msse2','-std=c++11','-Wno-shorten-64-to-32','-fvisibility=hidden', + '-stdlib=libc++'], + 'unknown' : [], +} + +if "--debug" in sys.argv: + copt['gcc'].append('-g') + copt['icc'].append('-g') + copt['clang'].append('-g') + +def get_compiler(cc): + """Try to figure out which kind of compiler this really is. + In particular, try to distinguish between clang and gcc, either of which may + be called cc or gcc. + """ + cmd = [cc,'--version'] + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + lines = p.stdout.readlines() + print('compiler version information: ') + for line in lines: + print(line.decode().strip()) + # Python3 needs this decode bit. + # Python2.7 doesn't need it, but it works fine. + line = lines[0].decode(encoding='UTF-8') + if line.startswith('Configured'): + line = lines[1].decode(encoding='UTF-8') + + if 'clang' in line: + return 'clang' + elif 'gcc' in line: + return 'gcc' + elif 'GCC' in line: + return 'gcc' + elif 'clang' in cc: + return 'clang' + elif 'gcc' in cc or 'g++' in cc: + return 'gcc' + elif 'icc' in cc or 'icpc' in cc: + return 'icc' + else: + return 'unknown' + +# Check for the fftw3 library in some likely places +def find_fftw_lib(output=False): + try_libdirs = [] + lib_ext = '.so' + + # Start with the explicit FFTW_DIR, if present. + if 'FFTW_DIR' in os.environ: + try_libdirs.append(os.environ['FFTW_DIR']) + try_libdirs.append(os.path.join(os.environ['FFTW_DIR'],'lib')) + + # Try some standard locations where things get installed + if 'posix' in os.name.lower(): + try_libdirs.extend(['/usr/local/lib', '/usr/lib']) + if 'darwin' in platform.system().lower(): + try_libdirs.extend(['/usr/local/lib', '/usr/lib', '/sw/lib', '/opt/local/lib']) + lib_ext = '.dylib' + + # Check the directories in LD_LIBRARY_PATH. This doesn't work on OSX >= 10.11 + for path in ['LIBRARY_PATH', 'LD_LIBRARY_PATH', 'DYLD_LIBRARY_PATH']: + if path in os.environ: + for dir in os.environ[path].split(':'): + try_libdirs.append(dir) + + # The user's home directory is often a good place to check. + try_libdirs.append(os.path.join(os.path.expanduser("~"),"lib")) + + # If the above don't work, the fftw3 module may have the right directory. + try: + import fftw3 + try_libdirs.append(fftw3.lib.libdir) + except ImportError: + pass + + name = 'libfftw3' + lib_ext + if output: print("Looking for ",name) + tried_dirs = set() # Keep track, so we don't try the same thing twice. + for dir in try_libdirs: + if dir == '': continue # This messes things up if it's in there. + if dir in tried_dirs: continue + else: tried_dirs.add(dir) + if not os.path.isdir(dir): continue + libpath = os.path.join(dir, name) + if not os.path.isfile(libpath): continue + if output: print(" ", dir, end='') + try: + lib = ctypes.cdll.LoadLibrary(libpath) + if output: print(" (yes)") + return libpath + except OSError as e: + if output: print(" (no)") + # Some places use lib64 rather than/in addition to lib. Try that as well. + if dir.endswith('lib') and os.path.isdir(dir + '64'): + dir += '64' + try: + libpath = os.path.join(dir, name) + if not os.path.isfile(libpath): continue + lib = ctypes.cdll.LoadLibrary(libpath) + if output: print(" ", dir, " (yes)") + return libpath + except OSError: + pass + try: + libpath = ctypes.util.find_library('fftw3') + if libpath == None: + raise OSError + if os.path.split(libpath)[0] == '': + # If the above doesn't return a real path, try this instead. + libpath = ctypes.util._findLib_gcc('fftw3') + if libpath == None: + raise OSError + libpath = os.path.realpath(libpath) + lib = ctypes.cdll.LoadLibrary(libpath) + except Exception as e: + if output: + print("Could not find fftw3 library. Make sure it is installed either in a standard ") + print("location such as /usr/local/lib, or the installation directory is either in ") + print("your LIBRARY_PATH or FFTW_DIR environment variable.") + raise + else: + dir, name = os.path.split(libpath) + if output: + if dir == '': dir = '[none]' + print(" ", dir, " (yes)") + return libpath + + +# Check for Eigen in some likely places +def find_eigen_dir(output=False): + import distutils.sysconfig + + try_dirs = [] + if 'EIGEN_DIR' in os.environ: + try_dirs.append(os.environ['EIGEN_DIR']) + try_dirs.append(os.path.join(os.environ['EIGEN_DIR'], 'include')) + # This is where conda will install it. + try_dirs.append(distutils.sysconfig.get_config_var('INCLUDEDIR')) + if 'posix' in os.name.lower(): + try_dirs.extend(['/usr/local/include', '/usr/include']) + if 'darwin' in platform.system().lower(): + try_dirs.extend(['/usr/local/include', '/usr/include', '/sw/include', + '/opt/local/include']) + for path in ['C_INCLUDE_PATH']: + if path in os.environ: + for dir in os.environ[path].split(':'): + try_dirs.append(dir) + # eigency is a python package that bundles the Eigen header files, so if that's there, + # can use that. + try: + import eigency + try_dirs.append(eigency.get_includes()[2]) + except ImportError: + pass + + if output: print("Looking for Eigen:") + for dir in try_dirs: + if not os.path.isdir(dir): continue + if output: print(" ", dir, end='') + if os.path.isfile(os.path.join(dir, 'Eigen/Core')): + if output: print(" (yes)") + return dir + if os.path.isfile(os.path.join(dir, 'eigen3', 'Eigen/Core')): + dir = os.path.join(dir, 'eigen3') + if output: + # Only print this if the eigen3 addition was key to finding it. + print("\n ", dir, " (yes)") + return dir + if output: print(" (no)") + if output: + print("Could not find Eigen. Make sure it is installed either in a standard ") + print("location such as /usr/local/include, or the installation directory is either in ") + print("your C_INCLUDE_PATH or EIGEN_DIR environment variable.") + raise OSError("Could not find Eigen") + + +def try_compile(cpp_code, cc, cflags=[], lflags=[]): + """Check if compiling some code with the given compiler and flags works properly. + """ + import tempfile + cpp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.cpp') + cpp_file.write(cpp_code.encode()) + cpp_file.close(); + os_file = tempfile.NamedTemporaryFile(delete=False, suffix='.os') + os_file.close() + exe_file = tempfile.NamedTemporaryFile(delete=False, suffix='.exe') + exe_file.close() + + # Compile + cmd = cc + ' ' + ' '.join(cflags + ['-c',cpp_file.name,'-o',os_file.name]) + #print('cmd = ',cmd) + try: + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) + lines = p.stdout.readlines() + #print('output = ',lines) + p.communicate() + except (IOError,OSError) as e: + p.returncode = 1 + if p.returncode != 0: + os.remove(cpp_file.name) + if os.path.exists(os_file.name): + os.remove(os_file.name) + return False + + # Link + cmd = cc + ' ' + ' '.join(lflags + [os_file.name,'-o',exe_file.name]) + #print('cmd = ',cmd) + try: + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) + lines = p.stdout.readlines() + #print('output = ',lines) + p.communicate() + except (IOError,OSError) as e: + p.returncode = 1 + + if p.returncode and cc.endswith('cc'): + # The linker needs to be a c++ linker, which isn't 'cc'. However, I couldn't figure + # out how to get setup.py to tell me the actual command to use for linking. All the + # executables available from build_ext.compiler.executables are 'cc', not 'c++'. + # I think this must be related to the bugs about not handling c++ correctly. + # http://bugs.python.org/issue9031 + # http://bugs.python.org/issue1222585 + # So just switch it manually and see if that works. + cmd = 'c++ ' + ' '.join(lflags + [os_file.name,'-o',exe_file.name]) + #print('cmd = ',cmd) + try: + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) + lines = p.stdout.readlines() + #print('output = ',lines) + p.communicate() + except (IOError,OSError) as e: + p.returncode = 1 + + # Remove the temp files + os.remove(cpp_file.name) + os.remove(os_file.name) + if os.path.exists(exe_file.name): + os.remove(exe_file.name) + return p.returncode == 0 + + +def try_cpp(cc, cflags=[], lflags=[]): + """Check if compiling a simple bit of c++ code with the given compiler works properly. + """ + from textwrap import dedent + cpp_code = dedent(""" + #include + #include + int main() { + int n = 500; + std::vector x(n,0.); + for (int i=0; i + #include + #include + + int main(void) { + std::cout << std::tgamma(1.3) << std::endl; + return 0; + } + """) + return try_compile(cpp_code, cc, cflags, lflags) + + +def cpu_count(): + """Get the number of cpus + """ + try: + import psutil + return psutil.cpu_count() + except ImportError: + pass + + if hasattr(os, 'sysconf'): + if 'SC_NPROCESSORS_ONLN' in os.sysconf_names: + # Linux & Unix: + ncpus = os.sysconf('SC_NPROCESSORS_ONLN') + if isinstance(ncpus, int) and ncpus > 0: + return ncpus + else: # OSX: + p = subprocess.Popen(['sysctl -n hw.ncpu'],stdout=subprocess.PIPE,shell=True) + return int(p.stdout.read().strip()) + # Windows: + if 'NUMBER_OF_PROCESSORS' in os.environ: + ncpus = int(os.environ['NUMBER_OF_PROCESSORS']) + if ncpus > 0: + return ncpus + return 1 # Default + +def parallel_compile(self, sources, output_dir=None, macros=None, + include_dirs=None, debug=0, extra_preargs=None, + extra_postargs=None, depends=None): + """New compile function that we monkey patch into the existing compiler instance. + """ + import multiprocessing.pool + + # Copied from the regular compile function + macros, objects, extra_postargs, pp_opts, build = \ + self._setup_compile(output_dir, macros, include_dirs, sources, + depends, extra_postargs) + cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) + + def _single_compile(obj): + try: + src, ext = build[obj] + except KeyError: + return + self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) + + # Set by fix_compiler + global glob_use_njobs + if glob_use_njobs == 1: + # This is equivalent to regular compile function + for obj in objects: + _single_compile(obj) + else: + # Use ThreadPool, rather than Pool, since the objects are picklable. + pool = multiprocessing.pool.ThreadPool(glob_use_njobs) + pool.map(_single_compile, objects) + pool.close() + pool.join() + + # Return *all* object filenames, not just the ones we just built. + return objects + + +def fix_compiler(compiler, njobs): + # Remove any -Wstrict-prototypes in the compiler flags (since invalid for C++) + try: + compiler.compiler_so.remove("-Wstrict-prototypes") + except (AttributeError, ValueError): + pass + + # Remove ccache if present so it isn't interpretted as the compiler + if compiler.compiler_so[0] == 'ccache': + del compiler.compiler_so[0] + + # Figure out what compiler it will use + #print('compiler = ',compiler.compiler) + cc = compiler.compiler_so[0] + cflags = compiler.compiler_so[1:] + comp_type = get_compiler(cc) + if cc == comp_type: + print('Using compiler %s'%(cc)) + else: + print('Using compiler %s, which is %s'%(cc,comp_type)) + + # Make sure the compiler works with a simple c++ code + if not try_cpp(cc, cflags): + print("There seems to be something wrong with the compiler or cflags") + print("%s %s"%(cc, ' '.join(cflags))) + raise OSError("Compiler does not work for compiling C++ code") + + # Check if we can use ccache to speed up repeated compilation. + if try_cpp('ccache ' + cc, cflags): + print('Using ccache') + compiler.set_executable('compiler_so', ['ccache',cc] + cflags) + + if njobs > 1: + # Global variable for tracking the number of jobs to use. + # We can't pass this to parallel compile, since the signature is fixed. + # So if using parallel compile, set this value to use within parallel compile. + global glob_use_njobs + glob_use_njobs = njobs + compiler.compile = types.MethodType(parallel_compile, compiler) + + extra_cflags = copt[comp_type] + + success = try_cpp11(cc, cflags + extra_cflags) + if not success: + # In case libc++ doesn't work, try letting the system use the default stdlib + try: + extra_cflags.remove('-stdlib=libc++') + except (AttributeError, ValueError): + pass + else: + success = try_cpp11(cc, cflags + extra_cflags) + if not success: + print('The compiler %s with flags %s did not successfully compile C++11 code'% + (cc, ' '.join(extra_cflags))) + raise OSError("Compiler is not C++-11 compatible") + + # Return the extra cflags, since those will be added to the build step in a different place. + print('Using extra flags ',extra_cflags) + return extra_cflags + +def add_dirs(builder, output=False): + # We need to do most of this both for build_clib and build_ext, so separate it out here. + + # First some basic ones we always need. + builder.include_dirs.append('include') + builder.include_dirs.append('include/galsim') + + # Look for fftw3. + fftw_lib = find_fftw_lib(output=output) + fftw_libpath, fftw_libname = os.path.split(fftw_lib) + if hasattr(builder, 'library_dirs'): + if fftw_libpath != '': + builder.library_dirs.append(fftw_libpath) + builder.libraries.append('galsim') # Make sure galsim comes before fftw3 + builder.libraries.append(os.path.split(fftw_lib)[1].split('.')[0][3:]) + fftw_include = os.path.join(os.path.split(fftw_libpath)[0], 'include') + if os.path.isfile(os.path.join(fftw_include, 'fftw3.h')): + print('Include directory for fftw3 is ',fftw_include) + # Usually, the fftw3.h file is in an associated include dir, but not always. + builder.include_dirs.append(fftw_include) + else: + # If not, we have our own copy of fftw3.h here. + print('Using local copy of fftw3.h') + builder.include_dirs.append('include/fftw3') + + # Look for Eigen/Core + eigen_dir = find_eigen_dir(output=output) + builder.include_dirs.append(eigen_dir) + + # Finally, add pybind11's include dir + import pybind11 + print('PyBind11 is version ',pybind11.__version__) + print('Looking for pybind11 header files: ') + for user in [True, False, None]: + if user is None: + # Last time through, raise an error. + print("Could not find pybind11 header files.") + print("They should have been in one of the following two locations:") + print(" ",pybind11.get_include(True)) + print(" ",pybind11.get_include(False)) + raise OSError("Could not find PyBind11") + + try_dir = pybind11.get_include(user=user) + print(' ',try_dir,end='') + if os.path.isfile(os.path.join(try_dir, 'pybind11/pybind11.h')): + print(' (yes)') + builder.include_dirs.append(try_dir) + break + else: + print(' (no)') + +def parse_njobs(njobs, task=None, command=None, maxn=4): + """Helper function to parse njobs, which may be None (use ncpu) or an int. + Returns an int value for njobs + """ + if njobs is None: + njobs = cpu_count() + if maxn != None and njobs > maxn: + # Usually 4 is plenty. Testing with too many jobs tends to lead to + # memory and timeout errors. The user can bump this up if they want. + njobs = maxn + if task is not None: + if njobs == 1: + print('Using a single process for %s.'%task) + else: + print('Using %d cpus for %s'%(njobs,task)) + print('To override, you may do python setup.py %s -jN'%command) + else: + njobs = int(njobs) + if task is not None: + if njobs == 1: + print('Using a single process for %s.'%task) + else: + print('Using %d cpus for %s'%(njobs,task)) + return njobs + + +# Make a subclass of build_ext so we can add to the -I list. +class my_build_clib(build_clib): + user_options = build_ext.user_options + [('njobs=', 'j', "Number of jobs to use for compiling")] + + def initialize_options(self): + build_clib.initialize_options(self) + self.njobs = None + + def finalize_options(self): + build_clib.finalize_options(self) + if self.njobs is None and 'glob_njobs' in globals(): + global glob_njobs + self.njobs = glob_njobs + add_dirs(self, output=True) # This happens first, so only output for this call. + + # Add any extra things based on the compiler being used.. + def build_libraries(self, libraries): + + build_ext = self.distribution.get_command_obj('build_ext') + njobs = parse_njobs(self.njobs, 'compiling', 'install') + + cflags = fix_compiler(self.compiler, njobs) + + # Add the appropriate extra flags for that compiler. + for (lib_name, build_info) in libraries: + build_info['cflags'] = build_info.get('cflags',[]) + cflags + + # Now run the normal build function. + build_clib.build_libraries(self, libraries) + + +# Make a subclass of build_ext so we can add to the -I list. +class my_build_ext(build_ext): + user_options = build_ext.user_options + [('njobs=', 'j', "Number of jobs to use for compiling")] + + def initialize_options(self): + build_ext.initialize_options(self) + self.njobs = None + + def finalize_options(self): + build_ext.finalize_options(self) + # I couldn't find an easy way to send the user option from my_install to my_buld_ext. + # So use a global variable. (UGH!) + if self.njobs is None and 'glob_njobs' in globals(): + global glob_njobs + self.njobs = glob_njobs + add_dirs(self) + + # Add any extra things based on the compiler being used.. + def build_extensions(self): + + njobs = parse_njobs(self.njobs, 'compiling', 'install') + cflags = fix_compiler(self.compiler, njobs) + + # Add the appropriate extra flags for that compiler. + for e in self.extensions: + e.extra_compile_args = cflags + for flag in cflags: + if 'stdlib' in flag: + e.extra_link_args.append(flag) + + # Now run the normal build function. + build_ext.build_extensions(self) + + +def make_meta_data(install_dir): + print('install_dir = ',install_dir) + meta_data_file = os.path.join('galsim','meta_data.py') + share_dir = os.path.join(install_dir,'galsim','share') + try: + f = open(meta_data_file,'w') + except IOError: + # Not sure if this is still relevant in setup.py world, but if user ran this under + # sudo and now is not using sudo, then the file might exist, but not be writable. + # However, it should still be removable, since the directory should be owned + # by the user. So remove it and then retry opening it. + os.remove(meta_data_file) + f = open(meta_data_file,'w') + + f.write('# This file is automatically generated by setup.py when building GalSim.\n') + f.write('# Do not edit. Any edits will be lost the next time setpu.py is run.\n') + f.write('\n') + f.write('install_dir = "%s"\n'%install_dir) + f.write('share_dir = "%s"\n'%share_dir) + f.close() + return meta_data_file + +class my_install(install): + user_options = install.user_options + [('njobs=', 'j', "Number of jobs to use for compiling")] + + def initialize_options(self): + install.initialize_options(self) + self.njobs = None + + def finalize_options(self): + install.finalize_options(self) + global glob_njobs + glob_njobs = self.njobs + + def run(self): + # Make the meta_data.py file based on the actual installation directory. + meta_data_file = make_meta_data(self.install_lib) + install.run(self) + +# AFAICT, setuptools doesn't provide any easy access to the final installation location of the +# executable scripts. This bit is just to save the value of script_dir so I can use it later. +# cf. http://stackoverflow.com/questions/12975540/correct-way-to-find-scripts-directory-from-setup-py-in-python-distutils/ +class my_easy_install(easy_install): # Used when installing via python setup.py install + # Match the call signature of the easy_install version. + def write_script(self, script_name, contents, mode="t", *ignored): + # Run the normal version + easy_install.write_script(self, script_name, contents, mode, *ignored) + # Save the script install directory in the distribution object. + # This is the same thing that is returned by the setup function. + self.distribution.script_install_dir = self.script_dir + +class my_install_scripts(install_scripts): # Used when pip installing. + def run(self): + install_scripts.run(self) + self.distribution.script_install_dir = self.install_dir + +class my_test(test): + # cf. https://pytest.readthedocs.io/en/2.7.3/goodpractises.html + user_options = [('njobs=', 'j', "Number of jobs to use in py.test")] + + def initialize_options(self): + test.initialize_options(self) + self.pytest_args = None + self.njobs = None + + def finalize_options(self): + test.finalize_options(self) + self.test_args = [] + self.test_suite = True + + def run_cpp_tests(self): + builder = self.distribution.get_command_obj('build_ext') + compiler = builder.compiler + ext = builder.extensions[0] + objects = compiler.compile(test_sources, + output_dir=builder.build_temp, + macros=ext.define_macros, + include_dirs=ext.include_dirs, + debug=builder.debug, + extra_postargs=ext.extra_compile_args, + depends=ext.depends) + + if ext.extra_objects: + objects.extend(ext.extra_objects) + extra_args = ext.extra_link_args or [] + + cflags = fix_compiler(compiler, False) + extra_args.extend(cflags) + + libraries = builder.get_libraries(ext) + libraries.append('galsim') + + library_dirs = ext.library_dirs + fftw_lib = find_fftw_lib() + fftw_libpath, fftw_libname = os.path.split(fftw_lib) + if fftw_libpath != '': + library_dirs.append(fftw_libpath) + libraries.append(fftw_libname.split('.')[0][3:]) + + exe_file = os.path.join(builder.build_temp,'cpp_test') + compiler.link_executable( + objects, 'cpp_test', + output_dir=builder.build_temp, + libraries=libraries, + library_dirs=library_dirs, + runtime_library_dirs=ext.runtime_library_dirs, + extra_postargs=extra_args, + debug=builder.debug, + target_lang='c++') + + p = subprocess.Popen([exe_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + lines = p.stdout.readlines() + p.communicate() + for line in lines: + print(line.decode().strip()) + if p.returncode != 0: + raise RuntimeError("C++ tests failed") + print("All C++ tests passed.") + + def run_tests(self): + + # Build and run the C++ tests + self.run_cpp_tests() + + njobs = parse_njobs(self.njobs, 'pytest', 'test') + pytest_args = ['-n=%d'%njobs, '--timeout=60'] + + original_dir = os.getcwd() + os.chdir('tests') + test_files = glob.glob('test*.py') + + if True: + import pytest + errno = pytest.main(pytest_args + test_files) + if errno != 0: + raise RuntimeError("Some Python tests failed") + else: + # Alternate method calls pytest executable. But the above code seems to work. + p = subprocess.Popen(['pytest'] + pytest_args + test_files) + p.communicate() + if p.returncode != 0: + raise RuntimeError("Some Python tests failed") + os.chdir(original_dir) + print("All python tests passed.") + + +lib=("galsim", {'sources' : cpp_sources, + 'depends' : headers, + 'include_dirs' : ['include', 'include/galsim'], + 'undef_macros' : undef_macros }) +ext=Extension("galsim._galsim", + py_sources, + undef_macros = undef_macros) + +build_dep = ['setuptools>=38', 'pybind11>=2.2'] +run_dep = ['numpy', 'future', 'astropy', 'LSSTDESC.Coord'] +test_dep = ['pytest', 'pytest-xdist', 'pytest-timeout', 'nose', + 'scipy', 'pyyaml', 'starlink-pyast', 'matplotlib'] +# Note: Even though we don't use nosetests, nose is required for some tests to work. +# cf. https://gist.github.com/dannygoldstein/e18866ebb9c39a2739f7b9f16440e2f5 + +# If Eigen doesn't exist in the normal places, add eigency ad a build dependency. +try: + find_eigen_dir() +except OSError: + print('Adding eigency to build_dep') + # Once 1.78 is out I *think* we can remove the cython dependency here. + build_dep += ['cython', 'eigency>=1.77'] + + +with open('README.md') as file: + long_description = file.read() + +# Read in the galsim version from galsim/_version.py +# cf. http://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package +version_file=os.path.join('galsim','_version.py') +verstrline = open(version_file, "rt").read() +VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]" +mo = re.search(VSRE, verstrline, re.M) +if mo: + galsim_version = mo.group(1) +else: + raise RuntimeError("Unable to find version string in %s." % (version_file,)) +print('GalSim version is %s'%(galsim_version)) + +# Write a Version.h file that has this information for people using the C++ library. +vi = re.split('\.|-',galsim_version) +version_info = tuple([int(x) for x in vi if x.isdigit()]) +if len(version_info) == 2: + version_info = version_info + (0,) +version_h_text = """ +// This file is auto-generated by SCons. Do not edit. +#define GALSIM_MAJOR %d +#define GALSIM_MINOR %d +#define GALSIM_REVISION %d + +#include +#include + +namespace galsim { + // Compiled versions of the above #define values. + extern int major_version(); + extern int minor_version(); + extern int revision(); + + // Returns string of the form "1.4.2" + extern std::string version(); + + // Checks if the compiled library version matches the #define values in this header file. + inline bool check_version() { + // Same code as version(), but inline, so we get the above values to compare + // to the values compiled into the library. + std::ostringstream oss; + oss << GALSIM_MAJOR << '.' << GALSIM_MINOR << '.' << GALSIM_REVISION; + return oss.str() == version(); + } +} +"""%version_info +version_h_file = os.path.join('include', 'galsim', 'Version.h') +with open(version_h_file, 'w') as f: + f.write(version_h_text) + +dist = setup(name="GalSim", + version=galsim_version, + author="GalSim Developers (point of contact: Mike Jarvis)", + author_email="michael@jarvis.net", + description="The modular galaxy image simulation toolkit", + long_description=long_description, + license = "BSD License", + url="https://github.com/rmjarvis/GalSim", + download_url="https://github.com/GalSim-developers/GalSim/releases/tag/v%s.zip"%galsim_version, + packages=find_packages(), + package_data={'galsim' : shared_data}, + #include_package_data=True, + libraries=[lib], + ext_modules=[ext], + setup_requires=build_dep, + install_requires=build_dep + run_dep, + tests_require=test_dep, + cmdclass = {'build_ext': my_build_ext, + 'build_clib': my_build_clib, + 'install': my_install, + 'install_scripts': my_install_scripts, + 'easy_install': my_easy_install, + 'test': my_test, + }, + entry_points = {'console_scripts' : [ + 'galsim = galsim.__main__:main', + 'galsim_download_cosmos = galsim.download_cosmos:main' + ]}, + zip_safe=False, + ) + +# Check that the path includes the directory where the scripts are installed. +real_env_path = [os.path.realpath(d) for d in os.environ['PATH'].split(':')] +if hasattr(dist,'script_install_dir'): + print('scripts installed into ',dist.script_install_dir) + if (dist.script_install_dir not in os.environ['PATH'].split(':') and + os.path.realpath(dist.script_install_dir) not in real_env_path): + + print('\nWARNING: The GalSim executables were installed in a directory not in your PATH') + print(' If you want to use the executables, you should add the directory') + print('\n ',dist.script_install_dir,'\n') + print(' to your path. The current path is') + print('\n ',os.environ['PATH'],'\n') + print(' Alternatively, you can specify a different prefix with --prefix=PREFIX,') + print(' in which case the scripts will be installed in PREFIX/bin.') + print(' If you are installing via pip use --install-option="--prefix=PREFIX"') diff --git a/src/CorrelatedNoise.cpp b/src/CorrelatedNoise.cpp index 8944774db1b..60c689cf02b 100644 --- a/src/CorrelatedNoise.cpp +++ b/src/CorrelatedNoise.cpp @@ -32,48 +32,22 @@ namespace galsim { int idim = 1 + bounds.getXMax() - bounds.getXMin(); int jdim = 1 + bounds.getYMax() - bounds.getYMin(); int covdim = idim * jdim; - tmv::SymMatrix symcov = calculateCovarianceSymMatrix(sbp, bounds, dx); - - for (int i=1; i<=covdim; i++){ // note that the Image indices use the FITS convention and - // start from 1!! - for (int j=i; j<=covdim; j++){ - cov.setValue(i, j, symcov(i, j)); // fill in the upper triangle with the - // correct CorrFunc value - } - } - } - - tmv::SymMatrix calculateCovarianceSymMatrix( - const SBProfile& sbp, const Bounds& bounds, double dx) - { - // Calculate the required dimensions - int idim = 1 + bounds.getXMax() - bounds.getXMin(); - int jdim = 1 + bounds.getYMax() - bounds.getYMin(); - int covdim = idim * jdim; int k, ell; // k and l are indices that refer to image pixel separation vectors in the // correlation func. double x_k, y_ell; // physical vector separations in the correlation func, dx * k etc. - tmv::SymMatrix cov = tmv::SymMatrix< - double, tmv::FortranStyle|tmv::Upper>(covdim); - - for (int i=1; i<=covdim; i++){ // note that the Image indices use the FITS convention and - // start from 1!! - for (int j=i; j<=covdim; j++){ - + for (int i=1; i<=covdim; i++) { + for (int j=i; j<=covdim; j++) { k = ((j - 1) / jdim) - ((i - 1) / idim); // using integer division rules here ell = ((j - 1) % jdim) - ((i - 1) % idim); x_k = double(k) * dx; y_ell = double(ell) * dx; Position p = Position(x_k, y_ell); - cov(i, j) = sbp.xValue(p); // fill in the upper triangle with the correct value - + cov.setValue(i, j, sbp.xValue(p)); } } - return cov; } } diff --git a/src/FFT.cpp b/src/FFT.cpp index 0889dd0c3e5..15ec607cfa7 100644 --- a/src/FFT.cpp +++ b/src/FFT.cpp @@ -33,6 +33,26 @@ namespace galsim { + template + void FFTW_Array::resize(size_t n) + { + if (_n != n) { + _n = n; + // cf. BaseImage::allocateMem, which uses the same code. + char* mem = new char[_n * sizeof(T) + sizeof(char*) + 15]; + _p = reinterpret_cast( (uintptr_t)(mem + sizeof(char*) + 15) & ~(size_t) 0x0F ); + ((char**)_p)[-1] = mem; + } + } + + template + FFTW_Array::~FFTW_Array() + { + if (_p) { + delete [] ((char**)_p)[-1]; + } + } + KTable::KTable(int N, double dk, std::complex value) : _dk(dk), _invdk(1./dk) { if (N<=0) throw FFTError("KTable size <=0"); @@ -1113,5 +1133,7 @@ namespace galsim { return kt; } + template class FFTW_Array; + template class FFTW_Array >; } diff --git a/src/Laguerre.cpp b/src/Laguerre.cpp index 03ef177d873..f7d0870794c 100644 --- a/src/Laguerre.cpp +++ b/src/Laguerre.cpp @@ -27,6 +27,13 @@ #include "Solve.h" #include "math/Angle.h" +#ifdef USE_TMV +#define MatrixXT tmv::Matrix +#else +using Eigen::Dynamic; +#define MatrixXT Eigen::Matrix +#endif + namespace galsim { std::string LVector::repr() const @@ -74,131 +81,6 @@ namespace galsim { } } -#if 0 - // routines to retrieve and save complex elements of LTransform: - // ???? Check these ??? - std::complex LTransform::operator()(PQIndex pq1, PQIndex pq2) const - { - assert(pq1.pqValid() && !pq1.pastOrder(_orderOut)); - assert(pq2.pqValid() && !pq2.pastOrder(_orderIn)); - int r1index=pq1.rIndex(); - int r2index=pq2.rIndex(); - int i1index=(pq1.isReal()? r1index: r1index+1); - int i2index=(pq2.isReal()? r2index: r2index+1); - - double x = (*_m)(r1index,r2index) + pq1.iSign()*pq2.iSign()*(*_m)(i1index,i2index); - double y = pq1.iSign()*(*_m)(i1index,r2index) - pq2.iSign()*(*_m)(r1index,i2index); - - std::complex z(x,y); - if (pq2.isReal()) z *= 0.5; - - return z; - } - - void LTransform::set( - PQIndex pq1, PQIndex pq2, std::complex Cpq1pq2, std::complex Cqp1pq2) - { - assert(pq1.pqValid() && !pq1.pastOrder(_orderOut)); - assert(pq2.pqValid() && !pq2.pastOrder(_orderIn)); - - take_ownership(); - const double RoundoffTolerance=1.e-15; - std::complex Cpq1qp2; - - if (pq2.needsConjugation()) { - pq2 = pq2.swapPQ(); - std::complex tmp=conj(Cqp1pq2); - Cqp1pq2 = conj(Cpq1pq2); - Cpq1pq2 = tmp; - } - if (pq1.needsConjugation()) { - pq1 = pq1.swapPQ(); - std::complex tmp=Cqp1pq2; - Cqp1pq2 = Cpq1pq2; - Cpq1pq2 = tmp; - } - - int rIndex1 = pq1.rIndex(); - int rIndex2 = pq2.rIndex(); - int iIndex1 = rIndex1+1; - int iIndex2 = rIndex2+1; - - if (pq1.isReal()) { - if (Cpq1pq2!=Cqp1pq2) { - FormatAndThrow<>() - << "Invalid LTransform elements for p1=q1, " << Cpq1pq2 - << " != " << Cqp1pq2; - } - (*_m)(rIndex1,rIndex2) = Cpq1pq2.real() * (pq2.isReal()? 1. : 2.); - if (pq2.isReal()) { - if (std::abs(Cpq1pq2.imag()) > RoundoffTolerance) { - FormatAndThrow<>() - << "Nonzero imaginary LTransform elements for p1=q1, p2=q2: " - << Cpq1pq2; - } - } else { - (*_m)(rIndex1,iIndex2) = -2.*Cpq1pq2.imag(); - } - return; - } else if (pq2.isReal()) { - // Here we know p1!=q1: - if (norm(Cpq1pq2-conj(Cqp1pq2))>RoundoffTolerance) { - FormatAndThrow<>() - << "Inputs to LTransform.set are not conjugate for p2=q2: " - << Cpq1pq2 << " vs " << Cqp1pq2 ; - } - (*_m)(rIndex1, rIndex2) = Cpq1pq2.real(); - (*_m)(iIndex1, rIndex2) = Cpq1pq2.imag(); - } else { - // Neither pq is real: - std::complex z=Cpq1pq2 + Cqp1pq2; - (*_m)(rIndex1, rIndex2) = z.real(); - (*_m)(rIndex1, iIndex2) = -z.imag(); - z=Cpq1pq2 - Cqp1pq2; - (*_m)(iIndex1, rIndex2) = z.imag(); - (*_m)(iIndex1, iIndex2) = z.real(); - } - } - - LVector LTransform::operator*(const LVector rhs) const - { - if (_orderIn != rhs.getOrder()) - FormatAndThrow<>() - << "Order mismatch between LTransform [" << _orderIn - << "] and LVector [" << rhs.getOrder() - << "]"; - shared_ptr > out(new tmv::Vector(sizeOut())); - *out = (*_m) * rhs.rVector(); - return LVector(_orderOut, out); - } - - LTransform LTransform::operator*(const LTransform rhs) const - { - if (_orderIn != rhs.getOrderOut()) - FormatAndThrow<>() - << "Order mismatch between LTransform [" << _orderIn - << "] and LTransform [" << rhs.getOrderOut() - << "]"; - shared_ptr > out( - new tmv::Matrix(sizeOut(),rhs.sizeIn())); - *out = (*_m) * (*rhs._m); - return LTransform(_orderOut, rhs._orderIn, out); - } - - LTransform& LTransform::operator*=(const LTransform rhs) - { - take_ownership(); - if (_orderIn != rhs.getOrderOut()) - FormatAndThrow<>() - << "Order mismatch between LTransform [" << _orderIn - << "] and LTransform [" << rhs.getOrderOut() - << "]"; - (*_m) *= (*rhs._m); - _orderIn = rhs._orderOut; - return *this; - } -#endif - //---------------------------------------------------------------- //---------------------------------------------------------------- // Calculate Laguerre polynomials and wavefunctions: @@ -258,72 +140,89 @@ namespace galsim { } } - shared_ptr > LVector::basis( - const tmv::ConstVectorView& x, const tmv::ConstVectorView& y, + shared_ptr LVector::basis( + const VectorXd& x, const VectorXd& y, int order, double sigma) { assert(x.size()==y.size()); - shared_ptr > psi( - new tmv::Matrix(x.size(), PQIndex::size(order))); - basis(x, y, psi->view(), order, sigma); + shared_ptr psi(new MatrixXd(x.size(), PQIndex::size(order))); + basis(x, y, *psi, order, sigma); return psi; } + // Forward declaration. Implemented below. + template + void CalculateBasis( + const VectorXd& x, const VectorXd& y, const VectorXd* invsig, + MatrixXT& psi, + int order, double sigma); + void LVector::basis( - const tmv::ConstVectorView& x, const tmv::ConstVectorView& y, - tmv::MatrixView psi, int order, double sigma) + const VectorXd& x, const VectorXd& y, + MatrixXd& psi, int order, double sigma) { +#ifdef USE_TMV assert(y.size() == x.size() && psi.nrows() == x.size()); assert(psi.ncols()==PQIndex::size(order)); - mBasis(x, y, 0, psi, order, sigma); +#else + assert(y.size() == x.size() && psi.rows() == x.size()); + assert(psi.cols()==PQIndex::size(order)); +#endif + CalculateBasis(x, y, 0, psi, order, sigma); } - shared_ptr > LVector::design( - const tmv::ConstVectorView& x, const tmv::ConstVectorView& y, - const tmv::ConstVectorView& invsig, int order, double sigma) + shared_ptr LVector::design( + const VectorXd& x, const VectorXd& y, + const VectorXd& invsig, int order, double sigma) { - shared_ptr > psi( - new tmv::Matrix(x.size(), PQIndex::size(order))); - design(x, y, invsig, psi->view(), order, sigma); + shared_ptr psi(new MatrixXd(x.size(), PQIndex::size(order))); + design(x, y, invsig, *psi, order, sigma); return psi; } void LVector::design( - const tmv::ConstVectorView& x, const tmv::ConstVectorView& y, - const tmv::ConstVectorView& invsig, - tmv::MatrixView psi, int order, double sigma) + const VectorXd& x, const VectorXd& y, + const VectorXd& invsig, + MatrixXd& psi, int order, double sigma) { +#ifdef USE_TMV assert(y.size() == x.size() && psi.nrows() == x.size() && invsig.size() == x.size()); assert(psi.ncols()==PQIndex::size(order)); - mBasis(x, y, &invsig, psi, order, sigma); +#else + assert(y.size() == x.size() && psi.rows() == x.size() && invsig.size() == x.size()); + assert(psi.cols()==PQIndex::size(order)); +#endif + CalculateBasis(x, y, &invsig, psi, order, sigma); } - shared_ptr > > LVector::kBasis( - const tmv::ConstVectorView& kx, const tmv::ConstVectorView& ky, + shared_ptr LVector::kBasis( + const VectorXd& kx, const VectorXd& ky, int order, double sigma) { assert (ky.size() == kx.size()); - const int ndof=PQIndex::size(order); - const int npts = kx.size(); - shared_ptr > > psi_k( - new tmv::Matrix >(npts, ndof, 0.)); - kBasis(kx,ky,psi_k->view(),order,sigma); + shared_ptr psi_k(new MatrixXcd(kx.size(), PQIndex::size(order))); + kBasis(kx,ky,*psi_k,order,sigma); return psi_k; } void LVector::kBasis( - const tmv::ConstVectorView& kx, const tmv::ConstVectorView& ky, - tmv::MatrixView > psi_k, int order, double sigma) + const VectorXd& kx, const VectorXd& ky, + MatrixXcd& psi_k, int order, double sigma) { +#ifdef USE_TMV assert(ky.size() == kx.size() && psi_k.nrows() == kx.size()); assert(psi_k.ncols()==PQIndex::size(order)); - mBasis(kx, ky, 0, psi_k, order, sigma); +#else + assert(ky.size() == kx.size() && psi_k.rows() == kx.size()); + assert(psi_k.cols()==PQIndex::size(order)); +#endif + CalculateBasis(kx, ky, 0, psi_k, order, sigma); } // This helper class deals with the differences between the real and fourier calculations - // in mBasis. First the real-space values: + // in CalculateBasis. First the real-space values: template - struct mBasisHelper + struct BasisHelper { static double Asign(int ) { return 1.; } @@ -335,7 +234,7 @@ namespace galsim { // Now the fourier space version, marked by T being complex. template - struct mBasisHelper > + struct BasisHelper > { // The "sign" of the eigenvectors are 1, -I, -1, I, and then repeat. // The input m4 should be m%4. @@ -357,13 +256,16 @@ namespace galsim { }; template - void LVector::mBasis( - const tmv::ConstVectorView& x, const tmv::ConstVectorView& y, - const tmv::ConstVectorView* invsig, - tmv::MatrixView psi, int order, double sigma) + void CalculateBasis( + const VectorXd& x, const VectorXd& y, const VectorXd* invsig, + MatrixXT& psi, int order, double sigma) { assert (y.size()==x.size()); +#ifdef USE_TMV assert (psi.nrows()==x.size() && psi.ncols()==PQIndex::size(order)); +#else + assert (psi.rows()==x.size() && psi.cols()==PQIndex::size(order)); +#endif const int N=order; const int npts_full = x.size(); @@ -375,12 +277,14 @@ namespace galsim { const int BLOCKING_FACTOR=4096; const int max_npts = std::max(BLOCKING_FACTOR,npts_full); - tmv::DiagMatrix Rsq_full(max_npts); - tmv::Matrix A_full(max_npts,2); - tmv::Matrix tmp_full(max_npts,2); - tmv::DiagMatrix Lmq_full(max_npts); - tmv::DiagMatrix Lmqm1_full(max_npts); - tmv::DiagMatrix Lmqm2_full(max_npts); + VectorXd Rsq_full(max_npts); + MatrixXd A_full(max_npts,2); + MatrixXd tmp_full(max_npts,2); + VectorXd Lmq_full(max_npts); + VectorXd Lmqm1_full(max_npts); + VectorXd Lmqm2_full(max_npts); + + psi.setZero(); for (int ilo=0; ilo X = DiagMatrixViewOf(x.subVector(ilo,ihi)); - tmv::ConstDiagMatrixView Y = DiagMatrixViewOf(y.subVector(ilo,ihi)); +#ifdef USE_TMV + tmv::ConstVectorView X = x.subVector(ilo,ihi); + tmv::ConstVectorView Y = y.subVector(ilo,ihi); +#else + Eigen::VectorBlock X = x.segment(ilo,ihi-ilo); + Eigen::VectorBlock Y = y.segment(ilo,ihi-ilo); +#endif // Get the appropriate portion of our temporary matrices. - tmv::DiagMatrixView Rsq = Rsq_full.subDiagMatrix(0,npts); +#ifdef USE_TMV + tmv::VectorView Rsq = Rsq_full.subVector(0,npts); tmv::MatrixView A = A_full.rowRange(0,npts); tmv::MatrixView tmp = tmp_full.rowRange(0,npts); +#else + Eigen::VectorBlock Rsq = Rsq_full.segment(0,npts); + Eigen::Block A = A_full.topRows(npts); + Eigen::Block tmp = tmp_full.topRows(npts); +#endif // We need rsq values twice, so store them here. - Rsq = X*X; - Rsq += Y*Y; +#ifdef USE_TMV + Rsq = ElemProd(X,X); + Rsq += ElemProd(Y,Y); +#else + Rsq.array() = X.array() * X.array(); + Rsq.array() += Y.array() * Y.array(); +#endif // This matrix will keep track of real & imag parts // of prefactor * exp(-r^2/2) (x+iy)^m / sqrt(m!) // Build the Gaussian factor +#ifdef USE_TMV for (int i=0; i::applyPrefactor(A.col(0),sigma); +#else + for (int i=0; i::applyPrefactor(A.col(0),sigma); A.col(1).setZero(); // Put 1/sigma factor into every point if doing a design matrix: +#ifdef USE_TMV if (invsig) A.col(0) *= tmv::DiagMatrixViewOf(invsig->subVector(ilo,ihi)); +#else + if (invsig) A.col(0).array() *= invsig->segment(ilo,ihi-ilo).array(); +#endif // Assign the m=0 column first: - psi.col( PQIndex(0,0).rIndex(), ilo,ihi ) = A.col(0); +#ifdef USE_TMV + psi.col(PQIndex(0,0).rIndex(), ilo,ihi) = A.col(0); +#else + psi.col(PQIndex(0,0).rIndex()).segment(ilo,ihi-ilo) = A.col(0).cast(); +#endif // Then ascend m's at q=0: for (int m=1; m<=N; m++) { int rIndex = PQIndex(m,0).rIndex(); // Multiply by (X+iY)/sqrt(m), including a factor 2 first time through - tmp = Y * A; - A = X * A; +#ifdef USE_TMV + tmp = DiagMatrixViewOf(Y) * A; + A = DiagMatrixViewOf(X) * A; +#else + tmp = Y.asDiagonal() * A; + A = X.asDiagonal() * A; +#endif A.col(0) += tmp.col(1); A.col(1) -= tmp.col(0); A *= m==1 ? 2. : 1./sqrtn(m); - psi.subMatrix(ilo,ihi,rIndex,rIndex+2) = mBasisHelper::Asign(m%4) * A; +#ifdef USE_TMV + psi.subMatrix(ilo,ihi,rIndex,rIndex+2) = BasisHelper::Asign(m%4) * A; +#else + psi.block(ilo,rIndex,ihi-ilo,2) = BasisHelper::Asign(m%4) * A; +#endif } - // Make three DiagMatrix to hold Lmq's during recurrence calculations - shared_ptr > Lmq( - new tmv::DiagMatrixView(Lmq_full.subDiagMatrix(0,npts))); - shared_ptr > Lmqm1( - new tmv::DiagMatrixView(Lmqm1_full.subDiagMatrix(0,npts))); - shared_ptr > Lmqm2( - new tmv::DiagMatrixView(Lmqm2_full.subDiagMatrix(0,npts))); + // Make three Vectors to hold Lmq's during recurrence calculations +#ifdef USE_TMV + shared_ptr > Lmq( + new tmv::VectorView(Lmq_full.subVector(0,npts))); + shared_ptr > Lmqm1( + new tmv::VectorView(Lmqm1_full.subVector(0,npts))); + shared_ptr > Lmqm2( + new tmv::VectorView(Lmqm2_full.subVector(0,npts))); +#else + shared_ptr > Lmq( + new Eigen::VectorBlock(Lmq_full.segment(0,npts))); + shared_ptr > Lmqm1( + new Eigen::VectorBlock(Lmqm1_full.segment(0,npts))); + shared_ptr > Lmqm2( + new Eigen::VectorBlock(Lmqm2_full.segment(0,npts))); +#endif for (int m=0; m<=N; m++) { PQIndex pq(m,0); @@ -447,14 +397,31 @@ namespace galsim { const int q = pq.getQ(); const int iQ = pq.rIndex(); +#ifdef USE_TMV Lmqm1->setAllTo(1.); // This is Lm0. - *Lmq = Rsq - (p+q-1.); - *Lmq *= mBasisHelper::Lsign(1.) / (sqrtn(p)*sqrtn(q)); + *Lmq = Rsq; + Lmq->addToAll(-(p+q-1.)); +#else + Lmqm1->setConstant(1.); + Lmq->array() = Rsq.array() - (p+q-1.); +#endif + *Lmq *= BasisHelper::Lsign(1.) / (sqrtn(p)*sqrtn(q)); if (m==0) { - psi.col(iQ,ilo,ihi) = (*Lmq) * psi.col(iQ0,ilo,ihi); +#ifdef USE_TMV + psi.col(iQ,ilo,ihi) = DiagMatrixViewOf(*Lmq) * psi.col(iQ0,ilo,ihi); +#else + psi.col(iQ).segment(ilo,ihi-ilo) = Lmq->asDiagonal() * + psi.col(iQ0).segment(ilo,ihi-ilo); +#endif } else { - psi.subMatrix(ilo,ihi,iQ,iQ+2) = (*Lmq) * psi.subMatrix(ilo,ihi,iQ0,iQ0+2); +#ifdef USE_TMV + psi.subMatrix(ilo,ihi,iQ,iQ+2) = DiagMatrixViewOf(*Lmq) * + psi.subMatrix(ilo,ihi,iQ0,iQ0+2); +#else + psi.block(ilo,iQ,ihi-ilo,2) = Lmq->asDiagonal() * + psi.block(ilo,iQ0,ihi-ilo,2); +#endif } } @@ -472,21 +439,37 @@ namespace galsim { Lmqm1.swap(Lmq); double invsqrtpq = 1./sqrtn(p)/sqrtn(q); - *Lmq = Rsq - (p+q-1.); - *Lmq *= mBasisHelper::Lsign(invsqrtpq) * *Lmqm1; +#ifdef USE_TMV + *Lmq = Rsq; + Lmq->addToAll(-(p+q-1.)); + *Lmq = BasisHelper::Lsign(invsqrtpq) * ElemProd(*Lmq, *Lmqm1); +#else + Lmq->array() = Rsq.array() - (p+q-1.); + Lmq->array() *= BasisHelper::Lsign(invsqrtpq) * Lmqm1->array(); +#endif *Lmq -= (sqrtn(p-1)*sqrtn(q-1)*invsqrtpq) * (*Lmqm2); if (m==0) { - psi.col(iQ,ilo,ihi) = (*Lmq) * psi.col(iQ0,ilo,ihi); +#ifdef USE_TMV + psi.col(iQ,ilo,ihi) = DiagMatrixViewOf(*Lmq) * psi.col(iQ0,ilo,ihi); +#else + psi.col(iQ).segment(ilo,ihi-ilo) = Lmq->asDiagonal() * + psi.col(iQ0).segment(ilo,ihi-ilo); +#endif } else { - psi.subMatrix(ilo,ihi,iQ,iQ+2) = (*Lmq) * psi.subMatrix(ilo,ihi,iQ0,iQ0+2); +#ifdef USE_TMV + psi.subMatrix(ilo,ihi,iQ,iQ+2) = DiagMatrixViewOf(*Lmq) * + psi.subMatrix(ilo,ihi,iQ0,iQ0+2); +#else + psi.block(ilo,iQ,ihi-ilo,2) = Lmq->asDiagonal() * + psi.block(ilo,iQ0,ihi-ilo,2); +#endif } } } } } - //--------------------------------------------------------------------------- //--------------------------------------------------------------------------- // Flux determinations @@ -502,7 +485,7 @@ namespace galsim { double LVector::apertureFlux(double R_, int maxP) const { - static shared_ptr > fp; + static shared_ptr fp; static double R=-1.; static double psize=-1; @@ -512,11 +495,11 @@ namespace galsim { if (maxP > getOrder()/2) maxP=getOrder()/2; if (!fp.get() || R_ != R || maxP>psize) { - fp.reset(new tmv::Vector(maxP)); + fp.reset(new VectorXd(maxP)); psize = maxP; R = R_; - tmv::Vector Lp(maxP+1); - tmv::Vector Qp(maxP+1); + VectorXd Lp(maxP+1); + VectorXd Qp(maxP+1); double x = R*R; double efact = std::exp(-0.5*x); Lp[0] = Qp[0]=1.; @@ -600,254 +583,6 @@ namespace galsim { << "," << std::setw(2) << getQ() ; } -#if 0 - // Transformation generators - these return a view into static quantities: - const tmv::ConstMatrixView LVector::Generator( - GType iparam, int orderOut, int orderIn) - { - static shared_ptr > gmu; - static shared_ptr > gx; - static shared_ptr > gy; - static shared_ptr > ge1; - static shared_ptr > ge2; - static shared_ptr > grot; - - const int sizeIn = PQIndex::size(orderIn); - const int sizeOut = PQIndex::size(orderOut); - - const int order = std::max(orderOut, orderIn); - if (iparam==iMu) { - if (!gmu.get() || gmu->nrows() zz(-1.,0.); - if (pq.isReal()) lt.set(pq,pq,zz, zz); - else lt.set(pq,pq,zz, 0.); - PQIndex pqprime(p+1, q+1); - if (!pqprime.pastOrder(order)) { - zz = std::complex(-sqrtn(p+1)*sqrtn(q+1), 0.); - if (pq.isReal()) lt.set(pq,pqprime,zz, zz); - else lt.set(pq,pqprime,zz, 0.); - } - if (q>0) { - pqprime.setPQ(p-1,q-1); - zz = std::complex(sqrtn(p)*sqrtn(q), 0.); - if (pq.isReal()) lt.set(pq,pqprime,zz, zz); - else lt.set(pq,pqprime,zz, 0.); - } - } - gmu.reset(new tmv::Matrix(lt.rMatrix())); - } - return gmu->subMatrix(0, sizeOut, 0, sizeIn); - } - if (iparam==iX) { - if (!gx.get() || gx->nrows() zz(-0.5*sqrtn(p+1),0.); - if (pq.isReal()) { - if (!pqprime.pastOrder(order)) lt.set(pq,pqprime,zz, zz); - if (p>0) { - zz = std::complex(0.5*sqrtn(p), 0.); - pqprime.setPQ(p-1,q); - lt.set(pq,pqprime,zz, zz); - } - } else { - if (!pqprime.pastOrder(order)) { - lt.set(pq,pqprime,zz, 0.); - pqprime.setPQ(p, q+1); - zz = std::complex(-0.5*sqrtn(q+1),0.); - if (pq.m()==1) { - lt.set(pq,pqprime, zz, zz); - } else { - lt.set(pq,pqprime, zz, 0.); - } - } - pqprime.setPQ(p-1,q); - zz = std::complex(0.5*sqrtn(p), 0.); - if (pq.m()==1) { - lt.set(pq,pqprime, zz, zz); - } else { - lt.set(pq,pqprime, zz, 0.); - } - if (q>0) { - pqprime.setPQ(p,q-1); - zz = std::complex(0.5*sqrtn(q), 0.); - lt.set(pq,pqprime, zz, 0.); - } - } - } - gx.reset(new tmv::Matrix(lt.rMatrix())); - } - return gx->subMatrix(0, sizeOut, 0, sizeIn); - } - - if (iparam==iY) { - if (!gy.get() || gy->nrows() zz(0.,-0.5*sqrtn(p+1)); - if (pq.isReal()) { - if (!pqprime.pastOrder(order)) lt.set(pq,pqprime,zz, zz); - if (p>0) { - zz = std::complex(0.,0.5*sqrtn(q)); - pqprime.setPQ(p,q-1); - lt.set(pq,pqprime,zz, zz); - } - } else { - if (!pqprime.pastOrder(order)) { - lt.set(pq,pqprime,zz, 0.); - pqprime.setPQ(p, q+1); - zz = std::complex(0.,0.5*sqrtn(q+1)); - if (pq.m()==1) { - lt.set(pq,pqprime, zz, conj(zz)); - } else { - lt.set(pq,pqprime, zz, 0.); - } - } - pqprime.setPQ(p-1,q); - zz = std::complex(0.,-0.5*sqrtn(p)); - if (pq.m()==1) { - lt.set(pq,pqprime, zz, conj(zz)); - } else { - lt.set(pq,pqprime, zz, 0.); - } - if (q>0) { - pqprime.setPQ(p,q-1); - zz = std::complex(0.,0.5*sqrtn(q)); - lt.set(pq,pqprime, zz, 0.); - } - } - } - gy.reset(new tmv::Matrix(lt.rMatrix())); - } - return gy->subMatrix(0, sizeOut, 0, sizeIn); - } - - if (iparam==iE1) { - if (!ge1.get() || ge1->nrows() zz(-0.25*sqrtn(p+1)*sqrtn(p+2),0.); - if (pq.isReal()) { - if (!pqprime.pastOrder(order)) lt.set(pq,pqprime,zz, zz); - if (p>1) { - zz = std::complex(0.25*sqrtn(p)*sqrtn(p-1),0.); - pqprime.setPQ(p-2,q); - lt.set(pq,pqprime,zz, zz); - } - } else { - if (!pqprime.pastOrder(order)) { - lt.set(pq,pqprime,zz, 0.); - pqprime.setPQ(p, q+2); - zz = std::complex(-0.25*sqrtn(q+1)*sqrtn(q+2),0.); - if (pq.m()==2) { - lt.set(pq,pqprime, zz, zz); - } else { - lt.set(pq,pqprime, zz, 0.); - } - } - if (p>1) { - pqprime.setPQ(p-2,q); - zz = std::complex(0.25*sqrtn(p)*sqrtn(p-1),0.); - if (pq.m()==2) { - lt.set(pq,pqprime, zz, zz); - } else { - lt.set(pq,pqprime, zz, 0.); - } - if (q>1) { - pqprime.setPQ(p,q-2); - zz = std::complex(0.25*sqrtn(q)*sqrtn(q-1),0.); - lt.set(pq,pqprime, zz, 0.); - } - } - } - } - ge1.reset(new tmv::Matrix(lt.rMatrix())); - } - return ge1->subMatrix(0, sizeOut, 0, sizeIn); - } - - if (iparam==iE2) { - if (!ge2.get() || ge2->nrows() zz(0., -0.25*sqrtn(p+1)*sqrtn(p+2)); - if (pq.isReal()) { - if (!pqprime.pastOrder(order)) lt.set(pq,pqprime,zz, zz); - if (p>1) { - zz = std::complex(0.,-0.25*sqrtn(p)*sqrtn(p-1)); - pqprime.setPQ(p-2,q); - lt.set(pq,pqprime,zz, zz); - } - } else { - if (!pqprime.pastOrder(order)) { - lt.set(pq,pqprime,zz, 0.); - pqprime.setPQ(p, q+2); - zz = std::complex(0.,0.25*sqrtn(q+1)*sqrtn(q+2)); - if (pq.m()==2) { - lt.set(pq,pqprime, zz, conj(zz)); - } else { - lt.set(pq,pqprime, zz, 0.); - } - } - if (p>1) { - pqprime.setPQ(p-2,q); - zz = std::complex(0.,-0.25*sqrtn(p)*sqrtn(p-1)); - if (pq.m()==2) { - lt.set(pq,pqprime, zz, conj(zz)); - } else { - lt.set(pq,pqprime, zz, 0.); - } - if (q>1) { - pqprime.setPQ(p,q-2); - zz = std::complex(0.,0.25*sqrtn(q)*sqrtn(q-1)); - lt.set(pq,pqprime, zz, 0.); - } - } - } - } - ge2.reset(new tmv::Matrix(lt.rMatrix())); - } - return ge2->subMatrix(0, sizeOut, 0, sizeIn); - } - - if (iparam==iRot) { - // Rotation is diagonal - could use a DiagMatrix perhaps - if (!grot.get() || grot->nrows()0) lt.set(pq,pq, std::complex(0.,-m), 0.); - } - grot.reset(new tmv::Matrix(lt.rMatrix())); - } - return grot->subMatrix(0, sizeOut, 0, sizeIn); - } else { - throw std::runtime_error("Unknown parameter for LVector::Generator()"); - } - } -#endif - // Function to solve for radius enclosing a specified flux. // Return negative radius if no root is apparent. class FRSolve diff --git a/src/RealGalaxy.cpp b/src/RealGalaxy.cpp index 5c01b0dd228..91b29817fc9 100644 --- a/src/RealGalaxy.cpp +++ b/src/RealGalaxy.cpp @@ -17,7 +17,12 @@ * and/or other materials provided with the distribution. */ +#ifdef USE_TMV #include "TMV.h" +#else +#include "Eigen/Dense" +#endif + #include "RealGalaxy.h" namespace galsim @@ -76,10 +81,18 @@ namespace galsim Sigma[-iy, -ix] = np.conj(dx) */ +#ifdef USE_TMV + typedef tmv::Matrix > MatrixXcd; + typedef tmv::Vector > VectorXcd; +#else + using Eigen::MatrixXcd; + using Eigen::VectorXcd; + using Eigen::VectorXd; +#endif int npix = nkx * nky; int nsedsq = nsed * nsed; - tmv::Matrix > A(nband, nsed); - tmv::Vector > b(nband); + MatrixXcd A(nband, nsed); + VectorXcd b(nband); for (int ix=0; ix ww = tmv::DiagMatrixViewOf(w + iy*nkx + ix, nband, npix); tmv::ConstMatrixView > psf = @@ -95,29 +109,78 @@ namespace galsim tmv::VectorViewOf(kimgs + iy*nkx + ix, nband, npix); tmv::VectorView > x = tmv::VectorViewOf(coef + iy*nkx*nsed + ix*nsed, nsed, 1); - tmv::MatrixView > dx = - tmv::MatrixViewOf(Sigma + iy*nkx*nsedsq + ix*nsedsq, nsed, nsed, nsed, 1); + tmv::MatrixView > dxT = + tmv::MatrixViewOf(Sigma + iy*nkx*nsedsq + ix*nsedsq, nsed, nsed, 1, nsed); A = ww * psf; b = ww * kimg; try { x = b / A; - A.makeInverseATA(dx); + A.makeInverseATA(dxT); } catch (tmv::Singular) { A.divideUsing(tmv::QRP); x = b / A; - A.makeInverseATA(dx); + A.makeInverseATA(dxT); + } +#else + using Eigen::Dynamic; + using Eigen::InnerStride; + using Eigen::Stride; + using Eigen::Upper; + Eigen::Map > ww( + w+iy*nkx+ix, nband, InnerStride<>(npix)); + Eigen::Map > psf( + psf_eff_kimgs + iy*nkx + ix, nband, nsed, + Stride(npix, npix * nsed)); + Eigen::Map > kimg( + kimgs + iy*nkx + ix, nband, InnerStride<>(npix)); + Eigen::Map x(coef + iy*nkx*nsed + ix*nsed, nsed); + Eigen::Map dxT(Sigma + iy*nkx*nsedsq + ix*nsedsq, nsed, nsed); + + A = ww.asDiagonal() * psf; + b = ww.asDiagonal() * kimg; + Eigen::HouseholderQR qr = A.householderQr(); + Eigen::Diagonal Rdiag = qr.matrixQR().diagonal(); + if (Rdiag.array().abs().minCoeff() < 1.e-15*Rdiag.array().abs().maxCoeff()) { + // Then (nearly) signular. Use QRP instead. (This should be fairly rare.) + Eigen::ColPivHouseholderQR qrp = A.colPivHouseholderQr(); + x = qrp.solve(b); + + // A = Q R Pt + // (AtA)^-1 = (PRtQtQRPt)^-1 = (PRtRPt)^-1 = P R^-1 Rt^-1 Pt + const int nzp = qrp.nonzeroPivots(); + Eigen::TriangularView, Upper> R = + qrp.matrixR().topLeftCorner(nzp,nzp).triangularView(); + dxT.setIdentity(); + R.adjoint().solveInPlace(dxT.topLeftCorner(nzp,nzp)); + R.solveInPlace(dxT.topLeftCorner(nzp,nzp)); + dxT = qrp.colsPermutation() * dxT * qrp.colsPermutation().transpose(); + } else { + x = qr.solve(b); + // A = Q R + // (AtA)^-1 = (RtQtQR)^-1 = (RtR)^-1 = R^-1 Rt^-1 + Eigen::TriangularView, Upper> R = + qr.matrixQR().topRows(nsed).triangularView(); + dxT.setIdentity(); + R.adjoint().solveInPlace(dxT); + R.solveInPlace(dxT); } +#endif + + if (ix > 0 && iy > 0) { int ix2 = nkx - ix; int iy2 = nky - iy; if (ix == ix2 && iy == iy2) continue; - tmv::VectorView > x2 = - tmv::VectorViewOf(coef + iy2*nkx*nsed + ix2*nsed, nsed, 1); - tmv::MatrixView > dx2 = - tmv::MatrixViewOf(Sigma + iy2*nkx*nsedsq + ix2*nsedsq, nsed, nsed, nsed, 1); - x2 = x.conjugate(); - dx2 = dx.conjugate(); +#ifdef USE_TMV + tmv::VectorViewOf(coef + iy2*nkx*nsed + ix2*nsed, nsed, 1) = x.conjugate(); + tmv::MatrixViewOf(Sigma + iy2*nkx*nsedsq + ix2*nsedsq, nsed, nsed, 1, nsed) = + dxT.conjugate(); +#else + Eigen::Map(coef + iy2*nkx*nsed + ix2*nsed, nsed) = x.conjugate(); + Eigen::Map(Sigma + iy2*nkx*nsedsq + ix2*nsedsq, nsed, nsed) = + dxT.conjugate(); +#endif } } } diff --git a/src/SBInterpolatedImage.cpp b/src/SBInterpolatedImage.cpp index 740d2b89426..4dd0324a791 100644 --- a/src/SBInterpolatedImage.cpp +++ b/src/SBInterpolatedImage.cpp @@ -105,8 +105,8 @@ namespace galsim { dbg<<"nonzero bounds = "<<_nonzero_bounds<(*_pimpl).getDoDelta(); } - double SBVonKarman::getDeltaAmplitude() const + double SBVonKarman::getDelta() const { assert(dynamic_cast(_pimpl.get())); - return static_cast(*_pimpl).getDeltaAmplitude(); + return static_cast(*_pimpl).getDelta(); } double SBVonKarman::getHalfLightRadius() const @@ -128,8 +128,8 @@ namespace galsim { const GSParamsPtr& gsparams) : _lam(lam), _L0(L0), _L0_invcuberoot(fast_pow(_L0, -1./3)), _L053(fast_pow(L0, 5./3)), - _deltaAmplitude(exp(-0.5*magic1*_L053)), - _deltaScale(1./(1.-_deltaAmplitude)), + _delta(exp(-0.5*magic1*_L053)), + _deltaScale(1./(1.-_delta)), _lam_arcsec(_lam * ARCSEC2RAD / (2.*M_PI)), _doDelta(doDelta), _gsparams(gsparams), _radial(Table::spline) @@ -139,13 +139,13 @@ namespace galsim { // note that kValue(0.0) = 1. double mkt = _gsparams->maxk_threshold; if (_doDelta) { - if (mkt < _deltaAmplitude) { + if (mkt < _delta) { // If the delta function amplitude is too large, then no matter how far out in k we // go, kValue never drops below that amplitude. // _maxk = std::numeric_limits::infinity(); _maxk = MOCK_INF; } else { - mkt = mkt*(1.-_deltaAmplitude)+_deltaAmplitude; + mkt = mkt*(1.-_delta)+_delta; } } if (_maxk != MOCK_INF) { @@ -157,7 +157,7 @@ namespace galsim { } dbg<<"_maxk = "<<_maxk<<" arcsec^-1\n"; dbg<<"SB(maxk) = "< +#include +#include +#ifdef USE_TMV #include "TMV.h" #include "TMV_SymBand.h" -#include "Table.h" -#include -#include +#endif -#include +#include "Table.h" namespace galsim { @@ -60,7 +61,7 @@ namespace galsim { ArgVec::ArgVec(const double* vec, int n): _vec(vec), _n(n) { - xdbg<<"Make ArgVec from: "< VectorXd; +typedef tmv::Matrix MatrixXd; +typedef tmv::VectorView MapVectorXd; +#else +#include "Eigen/Dense" +using Eigen::VectorXd; +using Eigen::MatrixXd; +typedef Eigen::Map MapVectorXd; +#endif namespace galsim { @@ -42,28 +52,46 @@ namespace galsim { } } - void setup_pow(tmv::VectorView& x, tmv::Matrix& xpow) + void setup_pow(MapVectorXd& x, MatrixXd& xpow) { +#ifdef USE_TMV xpow.col(0).setAllTo(1.); xpow.col(1) = x; for (int i=2; i pvu(pvar, m, m, m, 1, tmv::NonConj); - tmv::ConstMatrixView pvv(pvar + m*m, m, m, m, 1, tmv::NonConj); +#ifdef USE_TMV + tmv::ConstMatrixView pvuT(pvar, m, m, 1, m, tmv::NonConj); + tmv::ConstMatrixView pvvT(pvar + m*m, m, m, 1, m, tmv::NonConj); +#else + Eigen::Map pvuT(pvar, m, m); + Eigen::Map pvvT(pvar + m*m, m, m); +#endif while (n) { // Do this in blocks of at most 256 to avoid blowing up the memory usage when // this is run on a large image. It's also a bit faster this way, since there // are fewer cache misses. const int nn = n >= 256 ? 256 : n; - tmv::VectorView u(uar, nn, 1, tmv::NonConj); - tmv::VectorView v(var, nn, 1, tmv::NonConj); - tmv::Matrix upow(nn,m); - tmv::Matrix vpow(nn,m); +#ifdef USE_TMV + MapVectorXd u(uar, nn, 1, tmv::NonConj); + MapVectorXd v(var, nn, 1, tmv::NonConj); +#else + MapVectorXd u(uar, nn); + MapVectorXd v(var, nn); +#endif + MatrixXd upow(nn, m); + MatrixXd vpow(nn, m); + setup_pow(u, upow); setup_pow(v, vpow); @@ -77,13 +105,25 @@ namespace galsim { // above formulae. So we use the fact that // diag(AT . B) = sum_rows(A * B) - tmv::Vector ones(m, 1.); - tmv::Matrix temp = vpow * pvu.transpose(); +#ifdef USE_TMV + VectorXd ones(m, 1.); +#else + VectorXd ones = Eigen::VectorXd::Ones(m); +#endif + MatrixXd temp = vpow * pvuT; +#ifdef USE_TMV temp = ElemProd(upow, temp); +#else + temp.array() *= upow.array(); +#endif u = temp * ones; - temp = vpow * pvv.transpose(); + temp = vpow * pvvT; +#ifdef USE_TMV temp = ElemProd(upow, temp); +#else + temp.array() *= upow.array(); +#endif v = temp * ones; uar += nn; @@ -109,19 +149,30 @@ namespace galsim { double u0 = u; double v0 = v; - tmv::ConstMatrixView pvu(pvar, 4, 4, 4, 1, tmv::NonConj); - tmv::ConstMatrixView pvv(pvar + 16, 4, 4, 4, 1, tmv::NonConj); +#ifdef USE_TMV + tmv::ConstMatrixView pvuT(pvar, 4, 4, 1, 4, tmv::NonConj); + tmv::ConstMatrixView pvvT(pvar + 16, 4, 4, 1, 4, tmv::NonConj); + typedef tmv::SmallVector Vector4d; + typedef tmv::SmallMatrix Matrix2d; + typedef tmv::SmallVector Vector2d; +#else + Eigen::Map pvuT(pvar); + Eigen::Map pvvT(pvar + 16); + using Eigen::Vector4d; + using Eigen::Matrix2d; + using Eigen::Vector2d; +#endif // Some temporary vectors/matrices we'll use within the loop below. - tmv::SmallVector upow; - tmv::SmallVector vpow; - tmv::SmallVector pvu_vpow; - tmv::SmallVector pvv_vpow; - tmv::SmallVector dupow; - tmv::SmallVector dvpow; - tmv::SmallMatrix j1; - tmv::SmallVector diff; - tmv::SmallVector duv; + Vector4d upow; + Vector4d vpow; + Vector4d pvu_vpow; + Vector4d pvv_vpow; + Vector4d dupow; + Vector4d dvpow; + Matrix2d j1; + Vector2d diff; + Vector2d duv; double prev_err = -1.; for (int iter=0; iter& xpow) + void setup_pow(double x, VectorXd& xpow) { + xpow[0] = 1.; xpow[1] = x; for (int i=2; i abx(abar, m, m, m, 1, tmv::NonConj); - tmv::ConstMatrixView aby(abar + m*m, m, m, m, 1, tmv::NonConj); - dbg<<"abx = "< MatrixXd; +typedef tmv::Vector VectorXd; +#else +#include "Eigen/Dense" +using Eigen::MatrixXd; +using Eigen::VectorXd; +#endif -#include "FFT.h" +#include "hsm/PSFCorr.h" #include "math/Nan.h" +#include "FFT.h" namespace galsim { namespace hsm { @@ -69,7 +78,7 @@ namespace hsm { void find_mom_2( ConstImageView data, - tmv::Matrix& moments, int max_order, + MatrixXd& moments, int max_order, double& x0, double& y0, double& sigma, double convergence_threshold, int& num_iter, const HSMParams& hsmparams); @@ -262,7 +271,7 @@ namespace hsm { results.moments_status = 0; } else { dbg<<"About to get moments using find_mom_2"< moments(3,3); + MatrixXd moments(3,3); double sig = guess_sig; find_mom_2(masked_object_image_cview, moments, 2, results.moments_centroid.x, results.moments_centroid.y, sig, @@ -439,7 +448,7 @@ namespace hsm { */ void qho1d_wf_1(long nx, double xmin, double xstep, long Nmax, double sigma, - tmv::Matrix& psi) + MatrixXd& psi) { double beta, beta2__2, norm0; @@ -509,7 +518,7 @@ namespace hsm { */ void find_mom_1( ConstImageView data, - tmv::Matrix& moments, int max_order, + MatrixXd& moments, int max_order, double x0, double y0, double sigma) { @@ -520,14 +529,21 @@ namespace hsm { int ny = data.getNRow(); int sx = data.getStep(); int sy = data.getStride(); - tmv::Matrix psi_x(nx, max_order+1); - tmv::Matrix psi_y(ny, max_order+1); + MatrixXd psi_x(nx, max_order+1); + MatrixXd psi_y(ny, max_order+1); /* Compute wavefunctions */ qho1d_wf_1(nx, (double)xmin - x0, 1., max_order, sigma, psi_x); qho1d_wf_1(ny, (double)ymin - y0, 1., max_order, sigma, psi_y); +#ifdef USE_TMV tmv::ConstMatrixView mdata(data.getData(),nx,ny,sx,sy,tmv::NonConj); +#else + using Eigen::Dynamic; + using Eigen::Stride; + Eigen::Map > mdata( + data.getData(),nx,ny, Stride(sy,sx)); +#endif moments = psi_x.transpose() * mdata * psi_y; } @@ -553,7 +569,7 @@ namespace hsm { void find_mom_2( ConstImageView data, - tmv::Matrix& moments, int max_order, + MatrixXd& moments, int max_order, double& x0, double& y0, double& sigma, double convergence_threshold, int& num_iter, const HSMParams& hsmparams) { @@ -562,7 +578,7 @@ namespace hsm { double convergence_factor = 1; /* Ensure at least one iteration. */ num_iter = 0; - tmv::Matrix iter_moments(hsmparams.adapt_order+1,hsmparams.adapt_order+1); + MatrixXd iter_moments(hsmparams.adapt_order+1,hsmparams.adapt_order+1); #ifdef N_CHECKVAL if (convergence_threshold <= 0) { @@ -664,7 +680,7 @@ namespace hsm { double Inv2Minv_xx = 0.5/Minv_xx; // Will be useful later... /* Generate Minv_xx__x_x0__x_x0 array */ - tmv::Vector Minv_xx__x_x0__x_x0(xmax-xmin+1); + VectorXd Minv_xx__x_x0__x_x0(xmax-xmin+1); for(int x=xmin;x<=xmax;x++) Minv_xx__x_x0__x_x0[x-xmin] = Minv_xx*(x-x0)*(x-x0); /* Now let's initialize the outputs and then sum @@ -725,7 +741,11 @@ namespace hsm { const double* imageptr = data.getPtr(ix1,y); const int step = data.getStep(); double x_x0 = ix1 - x0; +#ifdef USE_TMV const double* mxxptr = Minv_xx__x_x0__x_x0.cptr() + ix1-xmin; +#else + const double* mxxptr = Minv_xx__x_x0__x_x0.data() + ix1-xmin; +#endif for(int x=ix1;x<=ix2;++x,x_x0+=1.,imageptr+=step) { /* Compute displacement from weight centroid, then * get elliptical radius and weight. @@ -916,9 +936,20 @@ namespace hsm { dbg<<"image3: "< mIm1(image1.getData(),nx1,ny1,sx1,sy1,tmv::NonConj); tmv::ConstMatrixView mIm2(image2.getData(),nx2,ny2,sx2,sy2,tmv::NonConj); tmv::MatrixView mIm3(image_out.getData(),nx3,ny3,sx3,sy3,tmv::NonConj); +#else + using Eigen::Dynamic; + using Eigen::Stride; + Eigen::Map > mIm1( + image1.getData(),nx1,ny1, Stride(sy1,sx1)); + Eigen::Map > mIm2( + image2.getData(),nx2,ny2, Stride(sy2,sx2)); + Eigen::Map > mIm3( + image_out.getData(),nx3,ny3, Stride(sy3,sx3)); +#endif dbg<<"mIm1 = "< m1(dim1,dim1,0.); - tmv::Matrix m2(dim1,dim1,0.); - tmv::Matrix mout(dim1,dim1,0.); - tmv::Vector Ax(dim4,0.); - tmv::Vector Bx(dim4,0.); + MatrixXd m1(dim1,dim1,0.); + MatrixXd m2(dim1,dim1,0.); + MatrixXd mout(dim1,dim1,0.); + VectorXd Ax(dim4,0.); + VectorXd Bx(dim4,0.); /* Build input maps */ for(int x=image1.getXMin();x<=image1.getXMax();x++) @@ -1075,8 +1122,13 @@ namespace hsm { image_out(i,j) += mout(i-out_xref,j-out_yref); #endif dbg<<"Done: mIm3 => "< moments(hsmparams.ksb_moments_max+1,hsmparams.ksb_moments_max+1); - tmv::Matrix psfmoms(hsmparams.ksb_moments_max+1,hsmparams.ksb_moments_max+1); + MatrixXd moments(hsmparams.ksb_moments_max+1,hsmparams.ksb_moments_max+1); + MatrixXd psfmoms(hsmparams.ksb_moments_max+1,hsmparams.ksb_moments_max+1); /* Determine the adaptive centroid and variance of the measured galaxy */ x0 = x0_gal; diff --git a/src/math/BesselK.cpp b/src/math/BesselK.cpp index af0362febfa..e5ccd1a6158 100644 --- a/src/math/BesselK.cpp +++ b/src/math/BesselK.cpp @@ -872,6 +872,7 @@ namespace math { s2 = 0.; if (inu == 0 && n == 1) { recurse = false; + s2 = 0; // Unused in this case, but saves a maybe-uninitialized warning. } else { s2 = s1 * (x + dnu + .5 - p1 / p2) / x; } diff --git a/src/math/Gamma.cpp b/src/math/Gamma.cpp index 23505e9f60b..430295ef69b 100644 --- a/src/math/Gamma.cpp +++ b/src/math/Gamma.cpp @@ -46,6 +46,7 @@ namespace math { // Defined in BesselJ.cpp double dcsevl(double x, const double* cs, int n); +#if not (__cplusplus >= 201103L) double tgamma(double x) { double g = dgamma(x); @@ -74,6 +75,7 @@ namespace math { #endif return g; } +#endif double gamma_p(double a, double x) { diff --git a/src/mmgr.cpp b/src/mmgr.cpp deleted file mode 100644 index 8019280da21..00000000000 --- a/src/mmgr.cpp +++ /dev/null @@ -1,1719 +0,0 @@ -/* -*- c++ -*- - * Copyright (c) 2012-2017 by the GalSim developers team on GitHub - * https://github.com/GalSim-developers - * - * This file is part of GalSim: The modular galaxy image simulation toolkit. - * https://github.com/GalSim-developers/GalSim - * - * GalSim is free software: redistribution and use in source and binary forms, - * with or without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this - * list of conditions, and the disclaimer given in the accompanying LICENSE - * file. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions, and the disclaimer given in the documentation - * and/or other materials provided with the distribution. - */ - -// --------------------------------------------------------------------------------------------------------------------------------- -// Copyright 2000, Paul Nettle. All rights reserved. -// -// You are free to use this source code in any commercial or non-commercial product. -// -// mmgr.cpp - Memory manager & tracking software -// -// The most recent version of this software can be found at: ftp://ftp.GraphicsPapers.com/pub/ProgrammingTools/MemoryManagers/ -// -// [NOTE: Best when viewed with 8-character tabs] -// -// --------------------------------------------------------------------------------------------------------------------------------- -// -// !!IMPORTANT!! -// -// This software is self-documented with periodic comments. Before you start using this software, perform a search for the string -// "-DOC-" to locate pertinent information about how to use this software. -// -// You are also encouraged to read the comment blocks throughout this source file. They will help you understand how this memory -// tracking software works, so you can better utilize it within your applications. -// -// NOTES: -// -// 1. This code purposely uses no external routines that allocate RAM (other than the raw allocation routines, such as malloc). We -// do this because we want this to be as self-contained as possible. As an example, we don't use assert, because when running -// under WIN32, the assert brings up a dialog box, which allocates RAM. Doing this in the middle of an allocation would be bad. -// -// 2. When trying to override new/delete under MFC (which has its own version of global new/delete) the linker will complain. In -// order to fix this error, use the compiler option: /FORCE, which will force it to build an executable even with linker errors. -// Be sure to check those errors each time you compile, otherwise, you may miss a valid linker error. -// -// 3. If you see something that looks odd to you or seems like a strange way of going about doing something, then consider that this -// code was carefully thought out. If something looks odd, then just assume I've got a good reason for doing it that way (an -// example is the use of the class MemStaticTimeTracker.) -// -// 4. With MFC applications, you will need to comment out any occurance of "#define new DEBUG_NEW" from all source files. -// -// 5. Include file dependencies are _very_important_ for getting the MMGR to integrate nicely into your application. Be careful if -// you're including standard includes from within your own project inclues; that will break this very specific dependency order. -// It should look like this: -// -// #include // Standard includes MUST come first -// #include // -// #include // -// -// #include "mmgr.h" // mmgr.h MUST come next -// -// #include "myfile1.h" // Project includes MUST come last -// #include "myfile2.h" // -// #include "myfile3.h" // -// -// --------------------------------------------------------------------------------------------------------------------------------- - -//#include "stdafx.h" -#include -#include -#include -#include -#include -#include -#include -#include - -#ifndef WIN32 -#include -#define LINE_END "\n" -#else -#define LINE_END "\r\n" -#endif - -#include "galsim/mmgr.h" - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- If you're like me, it's hard to gain trust in foreign code. This memory manager will try to INDUCE your code to crash (for -// very good reasons... like making bugs obvious as early as possible.) Some people may be inclined to remove this memory tracking -// software if it causes crashes that didn't exist previously. In reality, these new crashes are the BEST reason for using this -// software! -// -// Whether this software causes your application to crash, or if it reports errors, you need to be able to TRUST this software. To -// this end, you are given some very simple debugging tools. -// -// The quickest way to locate problems is to enable the STRESS_TEST macro (below.) This should catch 95% of the crashes before they -// occur by validating every allocation each time this memory manager performs an allocation function. If that doesn't work, keep -// reading... -// -// If you enable the TEST_MEMORY_MANAGER #define (below), this memory manager will log an entry in the memory.log file each time it -// enters and exits one of its primary allocation handling routines. Each call that succeeds should place an "ENTER" and an "EXIT" -// into the log. If the program crashes within the memory manager, it will log an "ENTER", but not an "EXIT". The log will also -// report the name of the routine. -// -// Just because this memory manager crashes does not mean that there is a bug here! First, an application could inadvertantly damage -// the heap, causing malloc(), realloc() or free() to crash. Also, an application could inadvertantly damage some of the memory used -// by this memory tracking software, causing it to crash in much the same way that a damaged heap would affect the standard -// allocation routines. -// -// In the event of a crash within this code, the first thing you'll want to do is to locate the actual line of code that is -// crashing. You can do this by adding log() entries throughout the routine that crashes, repeating this process until you narrow -// in on the offending line of code. If the crash happens in a standard C allocation routine (i.e. malloc, realloc or free) don't -// bother contacting me, your application has damaged the heap. You can help find the culprit in your code by enabling the -// STRESS_TEST macro (below.) -// -// If you truely suspect a bug in this memory manager (and you had better be sure about it! :) you can contact me at -// midnight@GraphicsPapers.com. Before you do, however, check for a newer version at: -// -// ftp://ftp.GraphicsPapers.com/pub/ProgrammingTools/MemoryManagers/ -// -// When using this debugging aid, make sure that you are NOT setting the alwaysLogAll variable on, otherwise the log could be -// cluttered and hard to read. -// --------------------------------------------------------------------------------------------------------------------------------- - -//#define TEST_MEMORY_MANAGER - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- Enable this sucker if you really want to stress-test your app's memory usage, or to help find hard-to-find bugs -// --------------------------------------------------------------------------------------------------------------------------------- - -//#define STRESS_TEST - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- Enable this sucker if you want to stress-test your app's error-handling. Set RANDOM_FAIL to the percentage of failures you -// want to test with (0 = none, >100 = all failures). -// --------------------------------------------------------------------------------------------------------------------------------- - -//#define RANDOM_FAILURE 100.0 - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- Locals -- modify these flags to suit your needs -// --------------------------------------------------------------------------------------------------------------------------------- - -#ifdef STRESS_TEST -static const size_t hashBits = 12; -static bool randomWipe = true; -static bool alwaysValidateAll = true; -static bool alwaysLogAll = true; -static bool alwaysWipeAll = true; -static bool cleanupLogOnFirstRun = true; -static const size_t paddingSize = 1024; // An extra 8K per allocation! -#else -static const size_t hashBits = 12; -static bool randomWipe = false; -static bool alwaysValidateAll = false; -static bool alwaysLogAll = false; -static bool alwaysWipeAll = true; -static bool cleanupLogOnFirstRun = true; -static const size_t paddingSize = 4; -#endif - -// --------------------------------------------------------------------------------------------------------------------------------- -// We define our own assert, because we don't want to bring up an assertion dialog, since that allocates RAM. Our new assert -// simply declares a forced breakpoint. -// --------------------------------------------------------------------------------------------------------------------------------- - -#ifdef WIN32 -#ifdef _DEBUG -#define m_assert(x) if ((x) == false) __asm { int 3 } -#else -#define m_assert(x) {} -#endif -#else // Linux uses assert, which we can use safely, since it doesn't bring up a dialog within the program. -#define m_assert assert -#endif - -// --------------------------------------------------------------------------------------------------------------------------------- -// Here, we turn off our macros because any place in this source file where the word 'new' or the word 'delete' (etc.) -// appear will be expanded by the macro. So to avoid problems using them within this source file, we'll just #undef them. -// --------------------------------------------------------------------------------------------------------------------------------- - -#undef new -#undef delete -#undef malloc -#undef calloc -#undef realloc -#undef free - -// --------------------------------------------------------------------------------------------------------------------------------- -// Defaults for the constants & statics in the MemoryManager class -// --------------------------------------------------------------------------------------------------------------------------------- - -const size_t m_alloc_unknown = 0; -const size_t m_alloc_new = 1; -const size_t m_alloc_new_array = 2; -const size_t m_alloc_malloc = 3; -const size_t m_alloc_calloc = 4; -const size_t m_alloc_realloc = 5; -const size_t m_alloc_delete = 6; -const size_t m_alloc_delete_array = 7; -const size_t m_alloc_free = 8; - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- Get to know these values. They represent the values that will be used to fill unused and deallocated RAM. -// --------------------------------------------------------------------------------------------------------------------------------- - -static size_t prefixPattern = 0xbaadf00d; // Fill pattern for bytes preceeding allocated blocks -static size_t postfixPattern = 0xdeadc0de; // Fill pattern for bytes following allocated blocks -static size_t unusedPattern = 0xfeedface; // Fill pattern for freshly allocated blocks -static size_t releasedPattern = 0xdeadbeef; // Fill pattern for deallocated blocks - -// --------------------------------------------------------------------------------------------------------------------------------- -// Other locals -// --------------------------------------------------------------------------------------------------------------------------------- - -static const size_t hashSize = 1 << hashBits; -static const char *allocationTypes[] = {"Unknown", - "new", "new[]", "malloc", "calloc", - "realloc", "delete", "delete[]", "free"}; -static sAllocUnit *hashTable[hashSize]; -static sAllocUnit *reservoir; -static size_t currentAllocationCount = 0; -static size_t breakOnAllocationCount = 0; -static sMStats stats; -static const char *sourceFile = "??"; -static const char *sourceFunc = "??"; -static size_t sourceLine = 0; -static bool staticDeinitTime = false; -static sAllocUnit **reservoirBuffer = NULL; -static size_t reservoirBufferSize = 0; - -// --------------------------------------------------------------------------------------------------------------------------------- -// Local functions only -// --------------------------------------------------------------------------------------------------------------------------------- - -static void doCleanupLogOnFirstRun() -{ - if (cleanupLogOnFirstRun) - { - unlink("memory.log"); - cleanupLogOnFirstRun = false; - } -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static void log(const char *format, ...) -{ - // Build the buffer - - static char buffer[2048]; - va_list ap; - va_start(ap, format); - vsprintf(buffer, format, ap); - va_end(ap); - - // Cleanup the log? - - if (cleanupLogOnFirstRun) doCleanupLogOnFirstRun(); - - // Open the log file - - FILE *fp = fopen("memory.log", "ab"); - - // If you hit this assert, then the memory logger is unable to log information to a file (can't open the file for some - // reason.) You can interrogate the variable 'buffer' to see what was supposed to be logged (but won't be.) - m_assert(fp); - - if (!fp) return; - - // Spit out the data to the log - - fprintf(fp, "%s" LINE_END, buffer); - fclose(fp); -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static const char *sourceFileStripper(const char *sourceFile) -{ - char *ptr = strrchr(sourceFile, '\\'); - if (ptr) return ptr + 1; - ptr = strrchr(sourceFile, '/'); - if (ptr) return ptr + 1; - return sourceFile; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static const char *ownerString(const char *sourceFile, const size_t sourceLine, const char *sourceFunc) -{ - static char str[90]; - memset(str, 0, sizeof(str)); - sprintf(str, "%s(%05zu)::%s", sourceFileStripper(sourceFile), sourceLine, sourceFunc); - return str; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static const char *insertCommas(size_t value) -{ - static char str[30]; - memset(str, 0, sizeof(str)); - - sprintf(str, "%zu", value); - if (strlen(str) > 3) - { - memmove(&str[strlen(str)-3], &str[strlen(str)-4], 4); - str[strlen(str) - 4] = ','; - } - if (strlen(str) > 7) - { - memmove(&str[strlen(str)-7], &str[strlen(str)-8], 8); - str[strlen(str) - 8] = ','; - } - if (strlen(str) > 11) - { - memmove(&str[strlen(str)-11], &str[strlen(str)-12], 12); - str[strlen(str) - 12] = ','; - } - - return str; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static const char *memorySizeString(size_t size) -{ - static char str[90]; - if (size > (1024*1024)) sprintf(str, "%10s (%7.2fM)", insertCommas(size), (float) size / (1024.0f * 1024.0f)); - else if (size > 1024) sprintf(str, "%10s (%7.2fK)", insertCommas(size), (float) size / 1024.0f); - else sprintf(str, "%10s bytes ", insertCommas(size)); - return str; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static sAllocUnit *findAllocUnit(const void *reportedAddress) -{ - // Just in case... - m_assert(reportedAddress != NULL); - - // Use the address to locate the hash index. Note that we shift off the lower four bits. This is because most allocated - // addresses will be on four-, eight- or even sixteen-byte boundaries. If we didn't do this, the hash index would not have - // very good coverage. - - size_t hashIndex = ((size_t) reportedAddress >> 4) & (hashSize - 1); - sAllocUnit *ptr = hashTable[hashIndex]; - while(ptr) - { - if (ptr->reportedAddress == reportedAddress) return ptr; - ptr = ptr->next; - } - - return NULL; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static size_t calculateActualSize(const size_t reportedSize) -{ - // We use DWORDS as our padding, and a long is guaranteed to be 4 bytes, but an int is not (ANSI defines an int as - // being the standard word size for a processor; on a 32-bit machine, that's 4 bytes, but on a 64-bit machine, it's - // 8 bytes, which means an int can actually be larger than a long.) - - return reportedSize + paddingSize * sizeof(long) * 2; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static size_t calculateReportedSize(const size_t actualSize) -{ - // We use DWORDS as our padding, and a long is guaranteed to be 4 bytes, but an int is not (ANSI defines an int as - // being the standard word size for a processor; on a 32-bit machine, that's 4 bytes, but on a 64-bit machine, it's - // 8 bytes, which means an int can actually be larger than a long.) - - return actualSize - paddingSize * sizeof(long) * 2; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static void *calculateReportedAddress(const void *actualAddress) -{ - // We allow this... - - if (!actualAddress) return NULL; - - // JUst account for the padding - - return (void *) ((char *) actualAddress + sizeof(long) * paddingSize); -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static void wipeWithPattern(sAllocUnit *allocUnit, size_t pattern, const size_t originalReportedSize = 0) -{ - // For a serious test run, we use wipes of random a random value. However, if this causes a crash, we don't want it to - // crash in a differnt place each time, so we specifically DO NOT call srand. If, by chance your program calls srand(), - // you may wish to disable that when running with a random wipe test. This will make any crashes more consistent so they - // can be tracked down easier. - - if (randomWipe) - { - pattern = ((rand() & 0xff) << 24) | ((rand() & 0xff) << 16) | ((rand() & 0xff) << 8) | (rand() & 0xff); - } - - // -DOC- We should wipe with 0's if we're not in debug mode, so we can help hide bugs if possible when we release the - // product. So uncomment the following line for releases. - // - // Note that the "alwaysWipeAll" should be turned on for this to have effect, otherwise it won't do much good. But we'll - // leave it this way (as an option) because this does slow things down. - // pattern = 0; - - // This part of the operation is optional - - if (alwaysWipeAll && allocUnit->reportedSize > originalReportedSize) - { - // Fill the bulk - - long *lptr = (long *) ((char *)allocUnit->reportedAddress + originalReportedSize); - int length = allocUnit->reportedSize - originalReportedSize; - int nlongs = length / sizeof(long); - int i; - for (i = 0; i < nlongs; i++, lptr++) - { - *lptr = pattern; - } - - // Fill the remainder - - size_t shiftCount = 0; - char *cptr = (char *) lptr; - for (i = 0; i < (length & 0x3); i++, cptr++, shiftCount += 8) - { - *cptr = (pattern & (0xff << shiftCount)) >> shiftCount; - } - } - - // Write in the prefix/postfix bytes - - long *pre = (long *) allocUnit->actualAddress; - long *post = (long *) ((char *)allocUnit->actualAddress + allocUnit->actualSize - paddingSize * sizeof(long)); - for (size_t i = 0; i < paddingSize; i++, pre++, post++) - { - *pre = prefixPattern; - *post = postfixPattern; - } -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static void resetGlobals() -{ - sourceFile = "??"; - sourceLine = 0; - sourceFunc = "??"; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static void dumpAllocations(FILE *fp) -{ - fprintf(fp, "Alloc. Addr Size Addr Size BreakOn BreakOn " LINE_END); - fprintf(fp, "Number Reported Reported Actual Actual Unused Method Dealloc Realloc Allocated by " LINE_END); - fprintf(fp, "------ ---------- ---------- ---------- ---------- ---------- -------- ------- ------- --------------------------------------------------- " LINE_END); - - - for (size_t i = 0; i < hashSize; i++) - { - sAllocUnit *ptr = hashTable[i]; - while(ptr) - { - fprintf(fp, "%06zu 0x%08zX 0x%08zX 0x%08zX 0x%08zX 0x%08zX %-8s %c %c %s" LINE_END, - ptr->allocationNumber, - (size_t) ptr->reportedAddress, - (size_t) ptr->reportedSize, - (size_t) ptr->actualAddress, - (size_t) ptr->actualSize, - m_calcUnused(ptr), - allocationTypes[ptr->allocationType], - ptr->breakOnDealloc ? 'Y':'N', - ptr->breakOnRealloc ? 'Y':'N', - ownerString(ptr->sourceFile, ptr->sourceLine, ptr->sourceFunc)); - ptr = ptr->next; - } - } -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static void dumpLeakReport() -{ - // Open the report file - - FILE *fp = fopen("memleaks.log", "w+b"); - - // If you hit this assert, then the memory report generator is unable to log information to a file (can't open the file for - // some reason.) - m_assert(fp); - if (!fp) return; - - // Any leaks? - - // Header - - static char timeString[25]; - memset(timeString, 0, sizeof(timeString)); - time_t t = time(NULL); - struct tm *tme = localtime(&t); - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, "| Memory leak report for: %02d/%02d/%04d %02d:%02d:%02d |" LINE_END, tme->tm_mon + 1, tme->tm_mday, tme->tm_year + 1900, tme->tm_hour, tme->tm_min, tme->tm_sec); - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, LINE_END); - fprintf(fp, LINE_END); - if (stats.totalAllocUnitCount) - { - fprintf(fp, "%zu memory leak%s found:" LINE_END, stats.totalAllocUnitCount, stats.totalAllocUnitCount == 1 ? "":"s"); - } - else - { - fprintf(fp, "Congratulations! No memory leaks found!" LINE_END); - - // We can finally free up our own memory allocations - - if (reservoirBuffer) - { - for (size_t i = 0; i < reservoirBufferSize; i++) - { - free(reservoirBuffer[i]); - } - free(reservoirBuffer); - reservoirBuffer = 0; - reservoirBufferSize = 0; - reservoir = NULL; - } - } - fprintf(fp, LINE_END); - - if (stats.totalAllocUnitCount) - { - dumpAllocations(fp); - } - - fclose(fp); -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// We use a static class to let us know when we're in the midst of static deinitialization -// --------------------------------------------------------------------------------------------------------------------------------- - -class MemStaticTimeTracker -{ -public: - // Don't do the leak report in the destructor, since other static variables might - // be deallocated after this one. - MemStaticTimeTracker() { doCleanupLogOnFirstRun(); } - ~MemStaticTimeTracker() { staticDeinitTime = true; dumpLeakReport(); } -}; -static MemStaticTimeTracker mstt; - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- Flags & options -- Call these routines to enable/disable the following options -// --------------------------------------------------------------------------------------------------------------------------------- - -bool &m_alwaysValidateAll() -{ - // Force a validation of all allocation units each time we enter this software - return alwaysValidateAll; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -bool &m_alwaysLogAll() -{ - // Force a log of every allocation & deallocation into memory.log - return alwaysLogAll; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -bool &m_alwaysWipeAll() -{ - // Force this software to always wipe memory with a pattern when it is being allocated/dallocated - return alwaysWipeAll; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -bool &m_randomeWipe() -{ - // Force this software to use a random pattern when wiping memory -- good for stress testing - return randomWipe; -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- Simply call this routine with the address of an allocated block of RAM, to cause it to force a breakpoint when it is -// reallocated. -// --------------------------------------------------------------------------------------------------------------------------------- - -bool &m_breakOnRealloc(void *reportedAddress) -{ - // Locate the existing allocation unit - - sAllocUnit *au = findAllocUnit(reportedAddress); - - // If you hit this assert, you tried to set a breakpoint on reallocation for an address that doesn't exist. Interrogate the - // stack frame or the variable 'au' to see which allocation this is. - m_assert(au != NULL); - - // If you hit this assert, you tried to set a breakpoint on reallocation for an address that wasn't allocated in a way that - // is compatible with reallocation. - m_assert(au->allocationType == m_alloc_malloc || - au->allocationType == m_alloc_calloc || - au->allocationType == m_alloc_realloc); - - return au->breakOnRealloc; -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- Simply call this routine with the address of an allocated block of RAM, to cause it to force a breakpoint when it is -// deallocated. -// --------------------------------------------------------------------------------------------------------------------------------- - -bool &m_breakOnDealloc(void *reportedAddress) -{ - // Locate the existing allocation unit - - sAllocUnit *au = findAllocUnit(reportedAddress); - - // If you hit this assert, you tried to set a breakpoint on deallocation for an address that doesn't exist. Interrogate the - // stack frame or the variable 'au' to see which allocation this is. - m_assert(au != NULL); - - return au->breakOnDealloc; -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- When tracking down a difficult bug, use this routine to force a breakpoint on a specific allocation count -// --------------------------------------------------------------------------------------------------------------------------------- - -void m_breakOnAllocation(size_t count) -{ - breakOnAllocationCount = count; -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// Used by the macros -// --------------------------------------------------------------------------------------------------------------------------------- - -void m_setOwner(const char *file, const size_t line, const char *func) -{ - sourceFile = file; - sourceLine = line; - sourceFunc = func; -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// Global new/new[] -// -// These are the standard new/new[] operators. They are merely interface functions that operate like normal new/new[], but use our -// memory tracking routines. -// --------------------------------------------------------------------------------------------------------------------------------- - -void *operator new(size_t reportedSize) throw(std::bad_alloc) -{ -#ifdef TEST_MEMORY_MANAGER - log("ENTER: new with size = %u",(size_t) reportedSize); -#endif - - // ANSI says: allocation requests of 0 bytes will still return a valid value - - if (reportedSize == 0) reportedSize = 1; - - // ANSI says: loop continuously because the error handler could possibly free up some memory - - for(;;) - { - // Try the allocation - - void *ptr = m_allocator(sourceFile, sourceLine, sourceFunc, m_alloc_new, reportedSize); - if (ptr) - { -#ifdef TEST_MEMORY_MANAGER - log("EXIT : new with ptr = %p",ptr); -#endif - return ptr; - } - - // There isn't a way to determine the new handler, except through setting it. So we'll just set it to NULL, then - // set it back again. - - std::new_handler nh = std::set_new_handler(0); - std::set_new_handler(nh); - - // If there is an error handler, call it - - if (nh) - { - (*nh)(); - } - - // Otherwise, throw the exception - - else - { -#ifdef TEST_MEMORY_MANAGER - log("EXIT : new with bad_alloc"); -#endif - throw std::bad_alloc(); - } - } -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -void *operator new[](size_t reportedSize) throw(std::bad_alloc) -{ -#ifdef TEST_MEMORY_MANAGER - log("ENTER: new[] with size = %u",(size_t) reportedSize); -#endif - - // The ANSI standard says that allocation requests of 0 bytes will still return a valid value - - if (reportedSize == 0) reportedSize = 1; - - // ANSI says: loop continuously because the error handler could possibly free up some memory - - for(;;) - { - // Try the allocation - - void *ptr = m_allocator(sourceFile, sourceLine, sourceFunc, m_alloc_new_array, reportedSize); - if (ptr) - { -#ifdef TEST_MEMORY_MANAGER - log("EXIT : new[] with ptr = %p",ptr); -#endif - return ptr; - } - - // There isn't a way to determine the new handler, except through setting it. So we'll just set it to NULL, then - // set it back again. - - std::new_handler nh = std::set_new_handler(0); - std::set_new_handler(nh); - - // If there is an error handler, call it - - if (nh) - { - (*nh)(); - } - - // Otherwise, throw the exception - - else - { -#ifdef TEST_MEMORY_MANAGER - log("EXIT : new[] with bad_alloc"); -#endif - throw std::bad_alloc(); - } - } -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// Other global new/new[] -// -// These are the standard new/new[] operators as used by Microsoft's memory tracker. We don't want them interfering with our memory -// tracking efforts. Like the previous versions, these are merely interface functions that operate like normal new/new[], but use -// our memory tracking routines. -// --------------------------------------------------------------------------------------------------------------------------------- - -void *operator new(size_t reportedSize, const char *sourceFile, int sourceLine) throw(std::bad_alloc) -{ -#ifdef TEST_MEMORY_MANAGER - log("ENTER: new with size = %u, file,line = %s, %d", - (size_t) reportedSize, sourceFile, sourceLine); -#endif - - // The ANSI standard says that allocation requests of 0 bytes will still return a valid value - - if (reportedSize == 0) reportedSize = 1; - - // ANSI says: loop continuously because the error handler could possibly free up some memory - - for(;;) - { - // Try the allocation - - void *ptr = m_allocator(sourceFile, sourceLine, "??", m_alloc_new, reportedSize); - if (ptr) - { -#ifdef TEST_MEMORY_MANAGER - log("EXIT : new with ptr = %p",ptr); -#endif - return ptr; - } - - // There isn't a way to determine the new handler, except through setting it. So we'll just set it to NULL, then - // set it back again. - - std::new_handler nh = std::set_new_handler(0); - std::set_new_handler(nh); - - // If there is an error handler, call it - - if (nh) - { - (*nh)(); - } - - // Otherwise, throw the exception - - else - { -#ifdef TEST_MEMORY_MANAGER - log("EXIT : new with bad_alloc"); -#endif - throw std::bad_alloc(); - } - } -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -void *operator new[](size_t reportedSize, const char *sourceFile, int sourceLine) throw(std::bad_alloc) -{ -#ifdef TEST_MEMORY_MANAGER - log("ENTER: new[] with size = %u, file,line = %s, %d", - (size_t) reportedSize, sourceFile, sourceLine); -#endif - - // The ANSI standard says that allocation requests of 0 bytes will still return a valid value - - if (reportedSize == 0) reportedSize = 1; - - // ANSI says: loop continuously because the error handler could possibly free up some memory - - for(;;) - { - // Try the allocation - - void *ptr = m_allocator(sourceFile, sourceLine, "??", m_alloc_new_array, reportedSize); - if (ptr) - { -#ifdef TEST_MEMORY_MANAGER - log("EXIT : new[] with ptr = %p",ptr); -#endif - return ptr; - } - - // There isn't a way to determine the new handler, except through setting it. So we'll just set it to NULL, then - // set it back again. - - std::new_handler nh = std::set_new_handler(0); - std::set_new_handler(nh); - - // If there is an error handler, call it - - if (nh) - { - (*nh)(); - } - - // Otherwise, throw the exception - - else - { -#ifdef TEST_MEMORY_MANAGER - log("EXIT : new[] with bad_alloc"); -#endif - throw std::bad_alloc(); - } - } -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// Global delete/delete[] -// -// These are the standard delete/delete[] operators. They are merely interface functions that operate like normal delete/delete[], -// but use our memory tracking routines. -// --------------------------------------------------------------------------------------------------------------------------------- - -void operator delete(void *reportedAddress) throw() -{ -#ifdef TEST_MEMORY_MANAGER - log("ENTER: delete for %p",reportedAddress); -#endif - - // ANSI says: delete & delete[] allow NULL pointers (they do nothing) - - if (!reportedAddress) return; - - m_deallocator(sourceFile, sourceLine, sourceFunc, m_alloc_delete, reportedAddress); - -#ifdef TEST_MEMORY_MANAGER - log("EXIT : delete for %p",reportedAddress); -#endif -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -void operator delete[](void *reportedAddress) throw() -{ -#ifdef TEST_MEMORY_MANAGER - log("ENTER: delete[] for %p",reportedAddress); -#endif - - // ANSI says: delete & delete[] allow NULL pointers (they do nothing) - - if (!reportedAddress) return; - - m_deallocator(sourceFile, sourceLine, sourceFunc, m_alloc_delete_array, reportedAddress); - -#ifdef TEST_MEMORY_MANAGER - log("EXIT : delete[] for %p",reportedAddress); -#endif -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// Allocate memory and track it -// --------------------------------------------------------------------------------------------------------------------------------- - -void *m_allocator(const char *sourceFile, const size_t sourceLine, const char *sourceFunc, const size_t allocationType, const size_t reportedSize) -{ - try - { -#ifdef TEST_MEMORY_MANAGER - log("ENTER: m_allocator()"); -#endif - - // Increase our allocation count - - currentAllocationCount++; - - // Log the request - - if (alwaysLogAll) log("%05d %-40s %8s : %s", currentAllocationCount, ownerString(sourceFile, sourceLine, sourceFunc), allocationTypes[allocationType], memorySizeString(reportedSize)); - - // If you hit this assert, you requested a breakpoint on a specific allocation count - m_assert(currentAllocationCount != breakOnAllocationCount); - - // If necessary, grow the reservoir of unused allocation units - - if (!reservoir) - { - // Allocate 256 reservoir elements - - reservoir = (sAllocUnit *) malloc(sizeof(sAllocUnit) * 256); - - // If you hit this assert, then the memory manager failed to allocate internal memory for tracking the - // allocations - m_assert(reservoir != NULL); - - // Danger Will Robinson! - - if (reservoir == NULL) throw "Unable to allocate RAM for internal memory tracking data"; - - // Build a linked-list of the elements in our reservoir - - memset(reservoir, 0, sizeof(sAllocUnit) * 256); - for (size_t i = 0; i < 256 - 1; i++) - { - reservoir[i].next = &reservoir[i+1]; - } - - // Add this address to our reservoirBuffer so we can free it later - - sAllocUnit **temp = (sAllocUnit **) realloc(reservoirBuffer, (reservoirBufferSize + 1) * sizeof(sAllocUnit *)); - m_assert(temp); - if (temp) - { - reservoirBuffer = temp; - reservoirBuffer[reservoirBufferSize++] = reservoir; - } - } - - // Logical flow says this should never happen... - m_assert(reservoir != NULL); - - // Grab a new allocaton unit from the front of the reservoir - - sAllocUnit *au = reservoir; - reservoir = au->next; - - // Populate it with some real data - - memset(au, 0, sizeof(sAllocUnit)); - au->actualSize = calculateActualSize(reportedSize); -#ifdef RANDOM_FAILURE - double a = rand(); - double b = RAND_MAX / 100.0 * RANDOM_FAILURE; - if (a > b) - { - au->actualAddress = malloc(au->actualSize); - } - else - { - log("!Random faiure!"); - au->actualAddress = NULL; - } -#else - au->actualAddress = malloc(au->actualSize); -#endif - au->reportedSize = reportedSize; - au->reportedAddress = calculateReportedAddress(au->actualAddress); - au->allocationType = allocationType; - au->sourceLine = sourceLine; - au->allocationNumber = currentAllocationCount; - if (sourceFile) strncpy(au->sourceFile, sourceFileStripper(sourceFile), sizeof(au->sourceFile) - 1); - else strcpy (au->sourceFile, "??"); - if (sourceFunc) strncpy(au->sourceFunc, sourceFunc, sizeof(au->sourceFunc) - 1); - else strcpy (au->sourceFunc, "??"); - - // We don't want to assert with random failures, because we want the application to deal with them. - -#ifndef RANDOM_FAILURE - // If you hit this assert, then the requested allocation simply failed (you're out of memory.) Interrogate the - // variable 'au' or the stack frame to see what you were trying to do. - m_assert(au->actualAddress != NULL); -#endif - - if (au->actualAddress == NULL) - { - throw "Request for allocation failed. Out of memory."; - } - - // If you hit this assert, then this allocation was made from a source that isn't setup to use this memory tracking - // software, use the stack frame to locate the source and include our H file. - m_assert(allocationType != m_alloc_unknown); - - // Insert the new allocation into the hash table - - size_t hashIndex = ((size_t) au->reportedAddress >> 4) & (hashSize - 1); - if (hashTable[hashIndex]) hashTable[hashIndex]->prev = au; - au->next = hashTable[hashIndex]; - au->prev = NULL; - hashTable[hashIndex] = au; - - // Account for the new allocatin unit in our stats - - stats.totalReportedMemory += au->reportedSize; - stats.totalActualMemory += au->actualSize; - stats.totalAllocUnitCount++; - if (stats.totalReportedMemory > stats.peakReportedMemory) stats.peakReportedMemory = stats.totalReportedMemory; - if (stats.totalActualMemory > stats.peakActualMemory) stats.peakActualMemory = stats.totalActualMemory; - if (stats.totalAllocUnitCount > stats.peakAllocUnitCount) stats.peakAllocUnitCount = stats.totalAllocUnitCount; - stats.accumulatedReportedMemory += au->reportedSize; - stats.accumulatedActualMemory += au->actualSize; - stats.accumulatedAllocUnitCount++; - - // Prepare the allocation unit for use (wipe it with recognizable garbage) - - wipeWithPattern(au, unusedPattern); - - // calloc() expects the reported memory address range to be filled with 0's - - if (allocationType == m_alloc_calloc) - { - memset(au->reportedAddress, 0, au->reportedSize); - } - - // Validate every single allocated unit in memory - - if (alwaysValidateAll) m_validateAllAllocUnits(); - - // Log the result - - if (alwaysLogAll) log(" OK: %010p (hash: %d)", au->reportedAddress, hashIndex); - - // Resetting the globals insures that if at some later time, somebody calls our memory manager from an unknown - // source (i.e. they didn't include our H file) then we won't think it was the last allocation. - - resetGlobals(); - - // Return the (reported) address of the new allocation unit - -#ifdef TEST_MEMORY_MANAGER - log("EXIT : m_allocator()"); -#endif - - return au->reportedAddress; - } - catch(const char *err) - { - // Deal with the errors - - log(err); - resetGlobals(); - -#ifdef TEST_MEMORY_MANAGER - log("EXIT : m_allocator()"); -#endif - - return NULL; - } -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// Reallocate memory and track it -// --------------------------------------------------------------------------------------------------------------------------------- - -void *m_reallocator(const char *sourceFile, const size_t sourceLine, const char *sourceFunc, const size_t reallocationType, const size_t reportedSize, void *reportedAddress) -{ - try - { -#ifdef TEST_MEMORY_MANAGER - log("ENTER: m_reallocator()"); -#endif - - // Calling realloc with a NULL should force same operations as a malloc - - if (!reportedAddress) - { - return m_allocator(sourceFile, sourceLine, sourceFunc, reallocationType, reportedSize); - } - - // Increase our allocation count - - currentAllocationCount++; - - // If you hit this assert, you requested a breakpoint on a specific allocation count - m_assert(currentAllocationCount != breakOnAllocationCount); - - // Log the request - - if (alwaysLogAll) log("%05d %-40s %8s(%010p): %s", currentAllocationCount, ownerString(sourceFile, sourceLine, sourceFunc), allocationTypes[reallocationType], reportedAddress, memorySizeString(reportedSize)); - - // Locate the existing allocation unit - - sAllocUnit *au = findAllocUnit(reportedAddress); - - // If you hit this assert, you tried to reallocate RAM that wasn't allocated by this memory manager. - m_assert(au != NULL); - if (au == NULL) throw "Request to reallocate RAM that was never allocated"; - - // If you hit this assert, then the allocation unit that is about to be reallocated is damaged. But you probably - // already know that from a previous assert you should have seen in validateAllocUnit() :) - m_assert(m_validateAllocUnit(au)); - - // If you hit this assert, then this reallocation was made from a source that isn't setup to use this memory - // tracking software, use the stack frame to locate the source and include our H file. - m_assert(reallocationType != m_alloc_unknown); - - // If you hit this assert, you were trying to reallocate RAM that was not allocated in a way that is compatible with - // realloc. In other words, you have a allocation/reallocation mismatch. - m_assert(au->allocationType == m_alloc_malloc || - au->allocationType == m_alloc_calloc || - au->allocationType == m_alloc_realloc); - - // If you hit this assert, then the "break on realloc" flag for this allocation unit is set (and will continue to be - // set until you specifically shut it off. Interrogate the 'au' variable to determine information about this - // allocation unit. - m_assert(au->breakOnRealloc == false); - - // Keep track of the original size - - size_t originalReportedSize = au->reportedSize; - - // Do the reallocation - - void *oldReportedAddress = reportedAddress; - size_t newActualSize = calculateActualSize(reportedSize); - void *newActualAddress = NULL; -#ifdef RANDOM_FAILURE - double a = rand(); - double b = RAND_MAX / 100.0 * RANDOM_FAILURE; - if (a > b) - { - newActualAddress = realloc(au->actualAddress, newActualSize); - } - else - { - log("!Random faiure!"); - } -#else - newActualAddress = realloc(au->actualAddress, newActualSize); -#endif - - // We don't want to assert with random failures, because we want the application to deal with them. - -#ifndef RANDOM_FAILURE - // If you hit this assert, then the requested allocation simply failed (you're out of memory) Interrogate the - // variable 'au' to see the original allocation. You can also query 'newActualSize' to see the amount of memory - // trying to be allocated. Finally, you can query 'reportedSize' to see how much memory was requested by the caller. - m_assert(newActualAddress); -#endif - - if (!newActualAddress) throw "Request for reallocation failed. Out of memory."; - - // Remove this allocation from our stats (we'll add the new reallocation again later) - - stats.totalReportedMemory -= au->reportedSize; - stats.totalActualMemory -= au->actualSize; - - // Update the allocation with the new information - - au->actualSize = newActualSize; - au->actualAddress = newActualAddress; - au->reportedSize = calculateReportedSize(newActualSize); - au->reportedAddress = calculateReportedAddress(newActualAddress); - au->allocationType = reallocationType; - au->sourceLine = sourceLine; - au->allocationNumber = currentAllocationCount; - if (sourceFile) strncpy(au->sourceFile, sourceFileStripper(sourceFile), sizeof(au->sourceFile) - 1); - else strcpy (au->sourceFile, "??"); - if (sourceFunc) strncpy(au->sourceFunc, sourceFunc, sizeof(au->sourceFunc) - 1); - else strcpy (au->sourceFunc, "??"); - - // The reallocation may cause the address to change, so we should relocate our allocation unit within the hash table - - size_t hashIndex = (size_t) -1; - if (oldReportedAddress != au->reportedAddress) - { - // Remove this allocation unit from the hash table - - { - size_t hashIndex2 = ((size_t) oldReportedAddress >> 4) & (hashSize - 1); - if (hashTable[hashIndex2] == au) - { - hashTable[hashIndex2] = hashTable[hashIndex2]->next; - } - else - { - if (au->prev) au->prev->next = au->next; - if (au->next) au->next->prev = au->prev; - } - } - - // Re-insert it back into the hash table - - hashIndex = ((size_t) au->reportedAddress >> 4) & (hashSize - 1); - if (hashTable[hashIndex]) hashTable[hashIndex]->prev = au; - au->next = hashTable[hashIndex]; - au->prev = NULL; - hashTable[hashIndex] = au; - } - - // Account for the new allocatin unit in our stats - - stats.totalReportedMemory += au->reportedSize; - stats.totalActualMemory += au->actualSize; - if (stats.totalReportedMemory > stats.peakReportedMemory) stats.peakReportedMemory = stats.totalReportedMemory; - if (stats.totalActualMemory > stats.peakActualMemory) stats.peakActualMemory = stats.totalActualMemory; - int deltaReportedSize = reportedSize - originalReportedSize; - if (deltaReportedSize > 0) - { - stats.accumulatedReportedMemory += deltaReportedSize; - stats.accumulatedActualMemory += deltaReportedSize; - } - - // Prepare the allocation unit for use (wipe it with recognizable garbage) - - wipeWithPattern(au, unusedPattern, originalReportedSize); - - // If you hit this assert, then something went wrong, because the allocation unit was properly validated PRIOR to - // the reallocation. This should not happen. - m_assert(m_validateAllocUnit(au)); - - // Validate every single allocated unit in memory - - if (alwaysValidateAll) m_validateAllAllocUnits(); - - // Log the result - - if (alwaysLogAll) log(" OK: %010p (hash: %d)", au->reportedAddress, hashIndex); - - // Resetting the globals insures that if at some later time, somebody calls our memory manager from an unknown - // source (i.e. they didn't include our H file) then we won't think it was the last allocation. - - resetGlobals(); - - // Return the (reported) address of the new allocation unit - -#ifdef TEST_MEMORY_MANAGER - log("EXIT : m_reallocator()"); -#endif - - return au->reportedAddress; - } - catch(const char *err) - { - // Deal with the errors - - log(err); - resetGlobals(); - -#ifdef TEST_MEMORY_MANAGER - log("EXIT : m_reallocator()"); -#endif - - return NULL; - } -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// Deallocate memory and track it -// --------------------------------------------------------------------------------------------------------------------------------- - -void m_deallocator(const char *sourceFile, const size_t sourceLine, const char *sourceFunc, const size_t deallocationType, const void *reportedAddress) -{ - try - { -#ifdef TEST_MEMORY_MANAGER - log("ENTER: m_deallocator()"); -#endif - - // Log the request - - if (alwaysLogAll) log(" %-40s %8s(%010p)", ownerString(sourceFile, sourceLine, sourceFunc), allocationTypes[deallocationType], reportedAddress); - - // Go get the allocation unit - - sAllocUnit *au = findAllocUnit(reportedAddress); - - // If you hit this assert, you tried to deallocate RAM that wasn't allocated by this memory manager. - m_assert(au != NULL); - if (au == NULL) throw "Request to deallocate RAM that was never allocated"; - - // If you hit this assert, then the allocation unit that is about to be deallocated is damaged. But you probably - // already know that from a previous assert you should have seen in validateAllocUnit() :) - m_assert(m_validateAllocUnit(au)); - - // If you hit this assert, then this deallocation was made from a source that isn't setup to use this memory - // tracking software, use the stack frame to locate the source and include our H file. - m_assert(deallocationType != m_alloc_unknown); - - // If you hit this assert, you were trying to deallocate RAM that was not allocated in a way that is compatible with - // the deallocation method requested. In other words, you have a allocation/deallocation mismatch. - if (au->allocationType == m_alloc_new && !(deallocationType == m_alloc_delete)) { - std::cout<<"alloc == new, but dealloc != delete for "<actualAddress)<allocationType == m_alloc_new_array && !(deallocationType == m_alloc_delete_array)) { - std::cout<<"alloc == new[], but dealloc != delete[] for "<actualAddress)<allocationType == m_alloc_new ) || - (deallocationType == m_alloc_delete_array && au->allocationType == m_alloc_new_array) || - (deallocationType == m_alloc_free && au->allocationType == m_alloc_malloc ) || - (deallocationType == m_alloc_free && au->allocationType == m_alloc_calloc ) || - (deallocationType == m_alloc_free && au->allocationType == m_alloc_realloc ) || - (deallocationType == m_alloc_unknown ) ); - - // If you hit this assert, then the "break on dealloc" flag for this allocation unit is set. Interrogate the 'au' - // variable to determine information about this allocation unit. - m_assert(au->breakOnDealloc == false); - - // Wipe the deallocated RAM with a new pattern. This doen't actually do us much good in debug mode under WIN32, - // because Microsoft's memory debugging & tracking utilities will wipe it right after we do. Oh well. - - wipeWithPattern(au, releasedPattern); - - // Do the deallocation - - free(au->actualAddress); - - // Remove this allocation unit from the hash table - - size_t hashIndex = ((size_t) au->reportedAddress >> 4) & (hashSize - 1); - if (hashTable[hashIndex] == au) - { - hashTable[hashIndex] = au->next; - } - else - { - if (au->prev) au->prev->next = au->next; - if (au->next) au->next->prev = au->prev; - } - - // Remove this allocation from our stats - - stats.totalReportedMemory -= au->reportedSize; - stats.totalActualMemory -= au->actualSize; - stats.totalAllocUnitCount--; - - // Add this allocation unit to the front of our reservoir of unused allocation units - - memset(au, 0, sizeof(sAllocUnit)); - au->next = reservoir; - reservoir = au; - - // Resetting the globals insures that if at some later time, somebody calls our memory manager from an unknown - // source (i.e. they didn't include our H file) then we won't think it was the last allocation. - - resetGlobals(); - - // Validate every single allocated unit in memory - - if (alwaysValidateAll) m_validateAllAllocUnits(); - - // If we're in the midst of static deinitialization time, track any pending memory leaks - - if (staticDeinitTime) dumpLeakReport(); - } - catch(const char *err) - { - // Deal with errors - - log(err); - resetGlobals(); - } - -#ifdef TEST_MEMORY_MANAGER - log("EXIT : m_deallocator()"); -#endif -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- The following utilitarian allow you to become proactive in tracking your own memory, or help you narrow in on those tough -// bugs. -// --------------------------------------------------------------------------------------------------------------------------------- - -bool m_validateAddress(const void *reportedAddress) -{ - // Just see if the address exists in our allocation routines - - return findAllocUnit(reportedAddress) != NULL; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -bool m_validateAllocUnit(const sAllocUnit *allocUnit) -{ - // Make sure the padding is untouched - - long *pre = (long *) allocUnit->actualAddress; - long *post = (long *) ((char *)allocUnit->actualAddress + allocUnit->actualSize - paddingSize * sizeof(long)); - bool errorFlag = false; - for (size_t i = 0; i < paddingSize; i++, pre++, post++) - { - if (*pre != (long) prefixPattern) - { - log("A memory allocation unit was corrupt because of an underrun:"); - m_dumpAllocUnit(allocUnit, " "); - errorFlag = true; - log("prefix is:"); - long *pre2 = (long *) allocUnit->actualAddress; - for (size_t i2=0; i2actualAddress + allocUnit->actualSize - paddingSize * sizeof(long)); - for (size_t i2=0; i2next; - } - } - - // Test for hash-table correctness - - if (allocCount != stats.totalAllocUnitCount) - { - log("Memory tracking hash table corrupt!"); - errors++; - } - - // If you hit this assert, then the internal memory (hash table) used by this memory tracking software is damaged! The - // best way to track this down is to use the alwaysLogAll flag in conjunction with STRESS_TEST macro to narrow in on the - // offending code. After running the application with these settings (and hitting this assert again), interrogate the - // memory.log file to find the previous successful operation. The corruption will have occurred between that point and this - // assertion. - m_assert(allocCount == stats.totalAllocUnitCount); - - // If you hit this assert, then you've probably already been notified that there was a problem with a allocation unit in a - // prior call to validateAllocUnit(), but this assert is here just to make sure you know about it. :) - m_assert(errors == 0); - - // Log any errors - - if (errors) log("While validting all allocation units, %d allocation unit(s) were found to have problems", errors); - - // Return the error status - - return errors != 0; -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- Unused RAM calculation routines. Use these to determine how much of your RAM is unused (in bytes) -// --------------------------------------------------------------------------------------------------------------------------------- - -size_t m_calcUnused(const sAllocUnit *allocUnit) -{ - const size_t *ptr = (const size_t *) allocUnit->reportedAddress; - size_t count = 0; - - for (size_t i = 0; i < allocUnit->reportedSize; i += sizeof(long), ptr++) - { - if (*ptr == unusedPattern) count += sizeof(long); - } - - return count; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -size_t m_calcAllUnused() -{ - // Just go through each allocation unit in the hash table and count the unused RAM - - size_t total = 0; - for (size_t i = 0; i < hashSize; i++) - { - sAllocUnit *ptr = hashTable[i]; - while(ptr) - { - total += m_calcUnused(ptr); - ptr = ptr->next; - } - } - - return total; -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- The following functions are for logging and statistics reporting. -// --------------------------------------------------------------------------------------------------------------------------------- - -void m_extra_log(const char *text) -{ - log("%s\n",text); -} - -void m_dumpAllocUnit(const sAllocUnit *allocUnit, const char *prefix) -{ - log("%sAddress (reported): %010p", prefix, allocUnit->reportedAddress); - log("%sAddress (actual) : %010p", prefix, allocUnit->actualAddress); - log("%sSize (reported) : 0x%08X (%s)", prefix, allocUnit->reportedSize, memorySizeString(allocUnit->reportedSize)); - log("%sSize (actual) : 0x%08X (%s)", prefix, allocUnit->actualSize, memorySizeString(allocUnit->actualSize)); - log("%sOwner : %s(%d)::%s", prefix, allocUnit->sourceFile, allocUnit->sourceLine, allocUnit->sourceFunc); - log("%sAllocation type : %s", prefix, allocationTypes[allocUnit->allocationType]); - log("%sAllocation number : %d", prefix, allocUnit->allocationNumber); -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -void m_dumpMemoryReport(const char *filename, const bool overwrite) -{ - // Open the report file - - FILE *fp = NULL; - - if (overwrite) fp = fopen(filename, "w+b"); - else fp = fopen(filename, "ab"); - - // If you hit this assert, then the memory report generator is unable to log information to a file (can't open the file for - // some reason.) - m_assert(fp); - if (!fp) return; - - // Header - - static char timeString[25]; - memset(timeString, 0, sizeof(timeString)); - time_t t = time(NULL); - struct tm *tme = localtime(&t); - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, "| Memory report for: %02d/%02d/%04d %02d:%02d:%02d |" LINE_END, tme->tm_mon + 1, tme->tm_mday, tme->tm_year + 1900, tme->tm_hour, tme->tm_min, tme->tm_sec); - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, LINE_END); - fprintf(fp, LINE_END); - - // Report summary - - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, "| T O T A L S |" LINE_END); - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, " Allocation unit count: %10s" LINE_END, insertCommas(stats.totalAllocUnitCount)); - fprintf(fp, " Reported to application: %s" LINE_END, memorySizeString(stats.totalReportedMemory)); - fprintf(fp, " Actual total memory in use: %s" LINE_END, memorySizeString(stats.totalActualMemory)); - fprintf(fp, " Memory tracking overhead: %s" LINE_END, memorySizeString(stats.totalActualMemory - stats.totalReportedMemory)); - fprintf(fp, LINE_END); - - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, "| P E A K S |" LINE_END); - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, " Allocation unit count: %10s" LINE_END, insertCommas(stats.peakAllocUnitCount)); - fprintf(fp, " Reported to application: %s" LINE_END, memorySizeString(stats.peakReportedMemory)); - fprintf(fp, " Actual: %s" LINE_END, memorySizeString(stats.peakActualMemory)); - fprintf(fp, " Memory tracking overhead: %s" LINE_END, memorySizeString(stats.peakActualMemory - stats.peakReportedMemory)); - fprintf(fp, LINE_END); - - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, "| A C C U M U L A T E D |" LINE_END); - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, " Allocation unit count: %s" LINE_END, memorySizeString(stats.accumulatedAllocUnitCount)); - fprintf(fp, " Reported to application: %s" LINE_END, memorySizeString(stats.accumulatedReportedMemory)); - fprintf(fp, " Actual: %s" LINE_END, memorySizeString(stats.accumulatedActualMemory)); - fprintf(fp, LINE_END); - - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, "| U N U S E D |" LINE_END); - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, " Memory allocated but not in use: %s" LINE_END, memorySizeString(m_calcAllUnused())); - fprintf(fp, LINE_END); - - dumpAllocations(fp); - - fclose(fp); -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -sMStats m_getMemoryStatistics() -{ - return stats; -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// mmgr.cpp - End of file -// --------------------------------------------------------------------------------------------------------------------------------- diff --git a/test_requirements.txt b/test_requirements.txt new file mode 100644 index 00000000000..cf91a082b96 --- /dev/null +++ b/test_requirements.txt @@ -0,0 +1,6 @@ +pytest>=3.4 +pytest-xdist>=1.19 +pytest-timeout>=1.2 +scipy>=1.0 +nose>=1.3 +matplotlib>=2.0 # Not needed by GalSim, but an implicit requirement of starlink diff --git a/tests/.coveragerc b/tests/.coveragerc index 4959a661b0d..cb662bed3f2 100644 --- a/tests/.coveragerc +++ b/tests/.coveragerc @@ -11,7 +11,13 @@ omit = *deprecated/* # This is a utility for tracking down OSErrors. Don't include in coverage. - fds_test.py + *fds_test.py + + # These files are used for the executables, galsim and galsim_download_cosmos. + # They don't get run via nosetests, so they don't really get covered. + *__main__.py + *main.py + *download_cosmos.py # Without this, coverage misses anything that is only run in multiprocessing mode. concurrency = multiprocessing diff --git a/tests/TestAll.cpp b/tests/TestAll.cpp index 636db85b0d8..95d36029918 100644 --- a/tests/TestAll.cpp +++ b/tests/TestAll.cpp @@ -22,6 +22,7 @@ #include #include "Test.h" +#include extern void TestImage(); extern void TestInteg(); @@ -30,10 +31,14 @@ extern void TestVersion(); int main() { try { + std::cout<<"Start C++ tests.\n"; // Run them all here: TestImage(); + std::cout<<"TestImage passed all tests.\n"; TestInteg(); + std::cout<<"TestInteg passed all tests.\n"; TestVersion(); + std::cout<<"TestVersion passed all tests.\n"; } catch (std::exception& e) { std::cerr< profile.max() / 2.)[0][-1] - np.testing.assert_equal(hwhm_index, dx_scale / 2, - err_msg="Kolmogorov PSF does not have the expected FWHM.") - t2 = time.time() - print 'time for %s = %.2f'%(funcname(),t2-t1) - -def test_atmos_flux_scaling(): - """Test flux scaling for AtmosphericPSF. - """ - import time - t1 = time.time() - # init with lam_over_r0 and flux only (should be ok given last tests) - obj = galsim.deprecated.AtmosphericPSF(lam_over_r0=test_lor0, flux=test_flux) - obj *= 2. - np.testing.assert_almost_equal( - obj.getFlux(), test_flux * 2., decimal=param_decimal, - err_msg="Flux param inconsistent after __imul__.") - obj = galsim.deprecated.AtmosphericPSF(lam_over_r0=test_lor0, flux=test_flux) - obj /= 2. - np.testing.assert_almost_equal( - obj.getFlux(), test_flux / 2., decimal=param_decimal, - err_msg="Flux param inconsistent after __idiv__.") - obj = galsim.deprecated.AtmosphericPSF(lam_over_r0=test_lor0, flux=test_flux) - obj2 = obj * 2. - # First test that original obj is unharmed... - np.testing.assert_almost_equal( - obj.getFlux(), test_flux, decimal=param_decimal, - err_msg="Flux param inconsistent after __rmul__ (original).") - # Then test new obj2 flux - np.testing.assert_almost_equal( - obj2.getFlux(), test_flux * 2., decimal=param_decimal, - err_msg="Flux param inconsistent after __rmul__ (result).") - obj = galsim.deprecated.AtmosphericPSF(lam_over_r0=test_lor0, flux=test_flux) - obj2 = 2. * obj - # First test that original obj is unharmed... - np.testing.assert_almost_equal( - obj.getFlux(), test_flux, decimal=param_decimal, - err_msg="Flux param inconsistent after __mul__ (original).") - # Then test new obj2 flux - np.testing.assert_almost_equal( - obj2.getFlux(), test_flux * 2., decimal=param_decimal, - err_msg="Flux param inconsistent after __mul__ (result).") - obj = galsim.deprecated.AtmosphericPSF(lam_over_r0=test_lor0, flux=test_flux) - obj2 = obj / 2. - # First test that original obj is unharmed... - np.testing.assert_almost_equal( - obj.getFlux(), test_flux, decimal=param_decimal, - err_msg="Flux param inconsistent after __div__ (original).") - # Then test new obj2 flux - np.testing.assert_almost_equal( - obj2.getFlux(), test_flux / 2., decimal=param_decimal, - err_msg="Flux param inconsistent after __div__ (result).") - t2 = time.time() - print 'time for %s = %.2f'%(funcname(),t2-t1) - - -if __name__ == "__main__": - test_AtmosphericPSF_flux() - test_AtmosphericPSF_properties() - test_AtmosphericPSF_fwhm() - test_atmos_flux_scaling() diff --git a/tests/deprecated/test_ellipse.py b/tests/deprecated/test_ellipse.py deleted file mode 100644 index a349f9dacf1..00000000000 --- a/tests/deprecated/test_ellipse.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright (c) 2012-2017 by the GalSim developers team on GitHub -# https://github.com/GalSim-developers -# -# This file is part of GalSim: The modular galaxy image simulation toolkit. -# https://github.com/GalSim-developers/GalSim -# -# GalSim is free software: redistribution and use in source and binary forms, -# with or without modification, are permitted provided that the following -# conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, this -# list of conditions, and the disclaimer given in the accompanying LICENSE -# file. -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions, and the disclaimer given in the documentation -# and/or other materials provided with the distribution. -# -import numpy as np -import os -import sys - -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - -from galsim import pyfits - -##### set up necessary info for tests -# a few shear values over which we will loop so we can check them all -# note: Rachel started with these q and beta, and then calculated all the other numbers in IDL using -# the standard formulae -q = [0.5, 0.3, 0.1, 0.7] -n_shear = len(q) -beta = [0.5*np.pi, 0.25*np.pi, 0.0*np.pi, np.pi/3.0] -g = [0.333333, 0.538462, 0.818182, 0.176471] -g1 = [-0.33333334, 0.0, 0.81818175, -0.088235296] -g2 = [0.0, 0.53846157, 0.0, 0.15282802] -e = [0.600000, 0.834862, 0.980198, 0.342282] -e1 = [-0.6000000, 0.0, 0.98019803, -0.17114094] -e2 = [0.0, 0.83486235, 0.0, 0.29642480] -eta = [0.693147, 1.20397, 2.30259, 0.356675] -eta1 = [-0.69314718, 0.0, 2.3025851, -0.17833748] -eta2 = [0.0, 1.2039728, 0.0, 0.30888958] -decimal = 5 - -# some ellipse properties over which we will loop - use the shear values above, and: -mu = [0.0, 0.5, -0.1] -n_mu = len(mu) -x_shift = [0.0, 1.7, -3.0] -y_shift = [-1.3, 0.0, 9.1] -n_shift = len(x_shift) - -def funcname(): - import inspect - return inspect.stack()[1][3] - -def all_ellipse_vals(test_ellipse, ind_shear, ind_mu, ind_shift, check_shear=1.0, check_mu=1.0, - check_shift = 1.0): - # this function tests that the various numbers stored in some Ellipse object are consistent with - # the tabulated values that we expect, given indices against which to test - vec = [test_ellipse.getS().g1, test_ellipse.getS().g2, test_ellipse.getMu(), - test_ellipse.getX0().x, test_ellipse.getX0().y] - test_vec = [check_shear*g1[ind_shear], check_shear*g2[ind_shear], check_mu*mu[ind_mu], - check_shift*x_shift[ind_shift], check_shift*y_shift[ind_shift]] - np.testing.assert_array_almost_equal(vec, test_vec, decimal=decimal, - err_msg = "Incorrectly initialized Ellipse") - -def test_ellipse_initialization(): - """Test that Ellipses can be initialized in a variety of ways and get the expected results.""" - import time - t1 = time.time() - # make an empty Ellipse and make sure everything is zero - e = galsim.deprecated.Ellipse() - vec = [e.getS().g1, e.getS().g2, e.getMu(), e.getX0().x, e.getX0().y] - vec_ideal = [0.0, 0.0, 0.0, 0.0, 0.0] - np.testing.assert_array_almost_equal(vec, vec_ideal, decimal = decimal, - err_msg = "Incorrectly initialized empty ellipse") - - # then loop over the ways we can initialize, with all things initialized and with only those - # that are non-zero initialized, using args, kwargs in various ways - for ind_shear in range(n_shear): - for ind_mu in range(n_mu): - for ind_shift in range(n_shift): - # initialize with all of shear, mu, shift - ## using a Shear, either as arg or kwarg - ## using a mu, either as arg or kwarg - ## using a shift, either as Position arg or kwargs - ## using the various ways of making a Shear passed through as kwargs - s = galsim.Shear(g1 = g1[ind_shear], g2 = g2[ind_shear]) - p = galsim.PositionD(x_shift[ind_shift], y_shift[ind_shift]) - e = galsim.deprecated.Ellipse(s, mu[ind_mu], p) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift) - e = galsim.deprecated.Ellipse(p, shear=s, mu=mu[ind_mu]) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift) - e = galsim.deprecated.Ellipse(s, mu[ind_mu], x_shift=p.x, y_shift=p.y) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift) - e = galsim.deprecated.Ellipse(shear=s, mu=mu[ind_mu], x_shift=p.x, y_shift=p.y) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift) - e = galsim.deprecated.Ellipse(q = q[ind_shear], - beta = beta[ind_shear]*galsim.radians, - mu=mu[ind_mu], x_shift = p.x, y_shift = p.y) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift) - - # now initialize with only 2 of the 3 and make sure the other is zero - e = galsim.deprecated.Ellipse(mu[ind_mu], p) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_shear=0.0) - e = galsim.deprecated.Ellipse(p, mu=mu[ind_mu]) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_shear=0.0) - e = galsim.deprecated.Ellipse(mu[ind_mu], x_shift = p.x, y_shift = p.y) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_shear=0.0) - e = galsim.deprecated.Ellipse(mu = mu[ind_mu], x_shift = p.x, y_shift = p.y) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_shear=0.0) - e = galsim.deprecated.Ellipse(s, p) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_mu=0.0) - e = galsim.deprecated.Ellipse(p, shear=s) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_mu=0.0) - e = galsim.deprecated.Ellipse(s, x_shift = p.x, y_shift = p.y) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_mu=0.0) - e = galsim.deprecated.Ellipse(shear=s, x_shift = p.x, y_shift = p.y) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_mu=0.0) - e = galsim.deprecated.Ellipse(s, mu[ind_mu]) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_shift=0.0) - e = galsim.deprecated.Ellipse(s, mu=mu[ind_mu]) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_shift=0.0) - e = galsim.deprecated.Ellipse(mu[ind_mu], shear=s) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_shift=0.0) - e = galsim.deprecated.Ellipse(shear=s, mu=mu[ind_mu]) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_shift=0.0) - - # now initialize with only 1 of the 3 and make sure the other is zero - e = galsim.deprecated.Ellipse(s) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_mu=0.0, check_shift=0.0) - e = galsim.deprecated.Ellipse(shear=s) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_mu=0.0, check_shift=0.0) - e = galsim.deprecated.Ellipse(eta1=eta1[ind_shear], eta2=eta2[ind_shear]) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_mu=0.0, check_shift=0.0) - e = galsim.deprecated.Ellipse(mu[ind_mu]) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_shear=0.0, check_shift=0.0) - e = galsim.deprecated.Ellipse(mu=mu[ind_mu]) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_shear=0.0, check_shift=0.0) - e = galsim.deprecated.Ellipse(p) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_mu=0.0, check_shear=0.0) - e = galsim.deprecated.Ellipse(x_shift = p.x, y_shift = p.y) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_mu=0.0, check_shear=0.0) - # check for some cases that should fail - s = galsim.Shear() - try: - np.testing.assert_raises(TypeError, galsim.deprecated.Ellipse, s, g2=0.3) - np.testing.assert_raises(TypeError, galsim.deprecated.Ellipse, shear=s, x_shift=1, g1=0.2) - np.testing.assert_raises(TypeError, galsim.deprecated.Ellipse, s, - shift=galsim.PositionD(), x_shift=0.1) - np.testing.assert_raises(TypeError, galsim.deprecated.Ellipse, s, s) - np.testing.assert_raises(TypeError, galsim.deprecated.Ellipse, g1=0.1, randomkwarg=0.7) - np.testing.assert_raises(TypeError, galsim.deprecated.Ellipse, shear=0.1) - except ImportError: - print 'The assert_raises tests require nose' - - t2 = time.time() - print 'time for %s = %.2f'%(funcname(),t2-t1) - -if __name__ == "__main__": - test_ellipse_initialization() diff --git a/tests/run_all_tests b/tests/run_all_tests index dd9a209e613..8c352cd3be6 100755 --- a/tests/run_all_tests +++ b/tests/run_all_tests @@ -1,5 +1,5 @@ #!/bin/bash -python=../bin/installed_python +python='/usr/bin/env python' for test in `ls test*.py` do echo $test diff --git a/tests/test_airy.py b/tests/test_airy.py index bd71b792d72..2a3303b9eac 100644 --- a/tests/test_airy.py +++ b/tests/test_airy.py @@ -21,18 +21,13 @@ import os import sys +import galsim from galsim_test_helpers import * path, filename = os.path.split(__file__) imgdir = os.path.join(path, "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. default_params = galsim.GSParams( diff --git a/tests/test_bandpass.py b/tests/test_bandpass.py index ecf60547bbb..e4872dbcbc2 100644 --- a/tests/test_bandpass.py +++ b/tests/test_bandpass.py @@ -19,16 +19,11 @@ from __future__ import print_function import os import numpy as np -from galsim_test_helpers import * import sys from astropy import units -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim +import galsim +from galsim_test_helpers import * datapath = os.path.join(galsim.meta_data.share_dir, "bandpasses") diff --git a/tests/test_bessel.py b/tests/test_bessel.py index 455411ae329..29af4bae84b 100644 --- a/tests/test_bessel.py +++ b/tests/test_bessel.py @@ -22,18 +22,9 @@ import numpy as np import warnings +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - import os - import sys - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - - @timer def test_j0(): """Test the bessel.j0 function""" diff --git a/tests/test_box.py b/tests/test_box.py index 42f7579da3b..d51650cd1f7 100644 --- a/tests/test_box.py +++ b/tests/test_box.py @@ -21,18 +21,13 @@ import os import sys +import galsim from galsim_test_helpers import * path, filename = os.path.split(__file__) imgdir = os.path.join(path, "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. default_params = galsim.GSParams( diff --git a/tests/test_calc.py b/tests/test_calc.py index 80e7b75078f..7a42b32e06d 100644 --- a/tests/test_calc.py +++ b/tests/test_calc.py @@ -19,14 +19,9 @@ from __future__ import print_function import numpy as np +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - @timer def test_hlr(): diff --git a/tests/test_catalog.py b/tests/test_catalog.py index 408a8e207c3..48a4858953d 100644 --- a/tests/test_catalog.py +++ b/tests/test_catalog.py @@ -21,15 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - @timer def test_basic_catalog(): diff --git a/tests/test_cdmodel.py b/tests/test_cdmodel.py index c6cf036d7b9..3f41f50b7b6 100644 --- a/tests/test_cdmodel.py +++ b/tests/test_cdmodel.py @@ -21,16 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim - from galsim.cdmodel import * -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - from galsim.cdmodel import * # Use a deterministic random number generator so we don't fail tests because of rare flukes in the # random numbers. @@ -66,13 +59,13 @@ def test_simplegeometry(): it.setValue(center,center+1,level) # set up models, images - cdr0 = PowerLawCD(2,shiftcoeff,0,0,0,0,0,0) + cdr0 = galsim.cdmodel.PowerLawCD(2,shiftcoeff,0,0,0,0,0,0) i0cdr0 = cdr0.applyForward(i0) - cdt0 = PowerLawCD(2,0,shiftcoeff,0,0,0,0,0) + cdt0 = galsim.cdmodel.PowerLawCD(2,0,shiftcoeff,0,0,0,0,0) i0cdt0 = cdt0.applyForward(i0) - cdrx = PowerLawCD(2,0,0,shiftcoeff,0,0,0,0) - cdtx = PowerLawCD(2,0,0,0,shiftcoeff,0,0,0) + cdrx = galsim.cdmodel.PowerLawCD(2,0,0,shiftcoeff,0,0,0,0) + cdtx = galsim.cdmodel.PowerLawCD(2,0,0,0,shiftcoeff,0,0,0) # these should do something ircdtx = cdtx.applyForward(ir) @@ -169,7 +162,8 @@ def test_simplegeometry(): # a model that should not change anything here u = galsim.UniformDeviate(rseed) - cdnull = PowerLawCD(2, 0, 0, shiftcoeff*u(), shiftcoeff*u(), shiftcoeff*u(), shiftcoeff*u(), 0) + cdnull = galsim.cdmodel.PowerLawCD( + 2, 0, 0, shiftcoeff*u(), shiftcoeff*u(), shiftcoeff*u(), shiftcoeff*u(), 0) i0cdnull = cdnull.applyForward(i0) # setting all pixels to 0 that we expect to be not 0... @@ -224,7 +218,7 @@ def test_fluxconservation(): image.addNoise(galsim.GaussianNoise(sigma=noise, rng=urng)) flat = galsim.Image(size, size, dtype=np.float64, init_value=1.) - cd = PowerLawCD( + cd = galsim.cdmodel.PowerLawCD( 2, shiftcoeff, 0.94 * shiftcoeff, shiftcoeff/2.4, shiftcoeff/5., shiftcoeff/3.7, shiftcoeff/1.8, alpha) imagecd = cd.applyForward(image) @@ -267,7 +261,7 @@ def test_forwardbackward(): # Define a consistent rng for repeatability urng = galsim.UniformDeviate(rseed) image.addNoise(galsim.GaussianNoise(sigma=noise, rng=urng)) - cd = PowerLawCD( + cd = galsim.cdmodel.PowerLawCD( 2, shiftcoeff * 0.0234, shiftcoeff * 0.05234, shiftcoeff * 0.01312, shiftcoeff * 0.00823, shiftcoeff * 0.07216, shiftcoeff * 0.01934, alpha) @@ -301,7 +295,8 @@ def test_gainratio(): gal2 = galsim.Gaussian(flux=0.5*galflux, sigma=galsigma) image2 = gal2.drawImage(scale=1.,dtype=np.float64) - cd = PowerLawCD(2, shiftcoeff, 1.389*shiftcoeff, shiftcoeff/7.23, 2.*shiftcoeff/2.4323, + cd = galsim.cdmodel.PowerLawCD( + 2, shiftcoeff, 1.389*shiftcoeff, shiftcoeff/7.23, 2.*shiftcoeff/2.4323, shiftcoeff/1.8934, shiftcoeff/3.1, alpha) image_cd = cd.applyForward(image) @@ -321,7 +316,7 @@ def test_exampleimage(): shiftcoeff = 1.e-7 #n, r0, t0, rx, tx, r, t, alpha - cd = PowerLawCD( + cd = galsim.cdmodel.PowerLawCD( 5, 2. * shiftcoeff, shiftcoeff, 1.25 * shiftcoeff, 1.25 * shiftcoeff, 0.75 * shiftcoeff, 0.5 * shiftcoeff, 0.3) # model used externally to bring cdtest1 to cdtest2 diff --git a/tests/test_celestial.py b/tests/test_celestial.py index 0165deb3d61..60b56011819 100644 --- a/tests/test_celestial.py +++ b/tests/test_celestial.py @@ -23,18 +23,12 @@ import math import coord +import galsim from galsim_test_helpers import * imgdir = os.path.join(".", "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - # We'll use these a lot, so just import them. from numpy import sin, cos, tan, arcsin, arccos, arctan, sqrt, pi diff --git a/tests/test_chromatic.py b/tests/test_chromatic.py index 27f1a98dc91..b58ec86064f 100644 --- a/tests/test_chromatic.py +++ b/tests/test_chromatic.py @@ -19,13 +19,9 @@ from __future__ import print_function import os import numpy as np + +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - import sys - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim bppath = os.path.join(galsim.meta_data.share_dir, "bandpasses") sedpath = os.path.join(galsim.meta_data.share_dir, "SEDs") diff --git a/tests/test_config_gsobject.py b/tests/test_config_gsobject.py index 7371aec6e79..b4fd21f20fd 100644 --- a/tests/test_config_gsobject.py +++ b/tests/test_config_gsobject.py @@ -21,15 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - @timer def test_gaussian(): diff --git a/tests/test_config_image.py b/tests/test_config_image.py index f1446cdf4ba..2730f0970d4 100644 --- a/tests/test_config_image.py +++ b/tests/test_config_image.py @@ -25,15 +25,9 @@ import re import warnings +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - @timer def test_single(): diff --git a/tests/test_config_noise.py b/tests/test_config_noise.py index 13ab0ac2d33..9d685133b6f 100644 --- a/tests/test_config_noise.py +++ b/tests/test_config_noise.py @@ -23,14 +23,9 @@ import logging import math +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim @timer def test_gaussian(): diff --git a/tests/test_config_output.py b/tests/test_config_output.py index 0a9e243f1bf..0a9b2b8720e 100644 --- a/tests/test_config_output.py +++ b/tests/test_config_output.py @@ -28,15 +28,9 @@ import re import glob +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - @timer def test_fits(): diff --git a/tests/test_config_value.py b/tests/test_config_value.py index 43b1dffe163..dc08984edfb 100644 --- a/tests/test_config_value.py +++ b/tests/test_config_value.py @@ -22,15 +22,9 @@ import sys import math +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - @timer def test_float_value(): diff --git a/tests/test_convolve.py b/tests/test_convolve.py index dd0bdf26323..aff04575a40 100644 --- a/tests/test_convolve.py +++ b/tests/test_convolve.py @@ -21,18 +21,12 @@ import os import sys +import galsim from galsim_test_helpers import * imgdir = os.path.join(".", "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. default_params = galsim.GSParams( diff --git a/tests/test_correlatednoise.py b/tests/test_correlatednoise.py index e01a46795a1..a46d96d934f 100644 --- a/tests/test_correlatednoise.py +++ b/tests/test_correlatednoise.py @@ -20,16 +20,9 @@ import time import numpy as np +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - import os - import sys - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # Use a deterministic random number generator so we don't fail tests because of rare flukes # in the random numbers. diff --git a/tests/test_deltafunction.py b/tests/test_deltafunction.py index 8f3167bf3f7..9d128612e70 100644 --- a/tests/test_deltafunction.py +++ b/tests/test_deltafunction.py @@ -21,13 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. diff --git a/tests/test_deprecated.py b/tests/test_deprecated.py index d7ae2d762db..6f12cd434d0 100644 --- a/tests/test_deprecated.py +++ b/tests/test_deprecated.py @@ -21,14 +21,9 @@ import sys import numpy as np +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim def check_dep(f, *args, **kwargs): """Check that some function raises a GalSimDeprecationWarning as a warning, but not an error. diff --git a/tests/test_des.py b/tests/test_des.py index 5b4edacf703..b0b4103db4e 100644 --- a/tests/test_des.py +++ b/tests/test_des.py @@ -20,17 +20,10 @@ import numpy import os import sys + import galsim import galsim.des - from galsim_test_helpers import * - -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - from galsim._pyfits import pyfits @timer diff --git a/tests/test_detectors.py b/tests/test_detectors.py index 806f5e2df15..c9c92562edd 100644 --- a/tests/test_detectors.py +++ b/tests/test_detectors.py @@ -21,16 +21,9 @@ from __future__ import print_function import numpy as np import warnings -from galsim_test_helpers import * -try: - import galsim -except ImportError: - import os - import sys - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim +import galsim +from galsim_test_helpers import * @timer diff --git a/tests/test_draw.py b/tests/test_draw.py index 7af72b3dc59..33f824a3015 100644 --- a/tests/test_draw.py +++ b/tests/test_draw.py @@ -21,14 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # for flux normalization tests test_flux = 1.8 diff --git a/tests/test_exponential.py b/tests/test_exponential.py index e947bbdfe1d..b41e95bfabc 100644 --- a/tests/test_exponential.py +++ b/tests/test_exponential.py @@ -21,18 +21,13 @@ import os import sys +import galsim from galsim_test_helpers import * path, filename = os.path.split(__file__) imgdir = os.path.join(path, "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. default_params = galsim.GSParams( diff --git a/tests/test_fitsheader.py b/tests/test_fitsheader.py index 11e33832548..2bcb5ee799b 100644 --- a/tests/test_fitsheader.py +++ b/tests/test_fitsheader.py @@ -21,14 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # Get whatever version of pyfits or astropy we are using from galsim._pyfits import pyfits, pyfits_version diff --git a/tests/test_fouriersqrt.py b/tests/test_fouriersqrt.py index f3966b7c8b3..04900407320 100644 --- a/tests/test_fouriersqrt.py +++ b/tests/test_fouriersqrt.py @@ -21,17 +21,12 @@ import os import sys +import galsim from galsim_test_helpers import * imgdir = os.path.join(".", "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. diff --git a/tests/test_gaussian.py b/tests/test_gaussian.py index ad4c014a84b..8ed938ba922 100644 --- a/tests/test_gaussian.py +++ b/tests/test_gaussian.py @@ -21,17 +21,13 @@ import os import sys +import galsim from galsim_test_helpers import * path, filename = os.path.split(__file__) imgdir = os.path.join(path, "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. diff --git a/tests/test_hsm.py b/tests/test_hsm.py index 83d5e8e78e0..792054728a0 100644 --- a/tests/test_hsm.py +++ b/tests/test_hsm.py @@ -30,14 +30,9 @@ import numpy as np import math +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # define a range of input parameters for the Gaussians that we are testing gaussian_sig_values = [0.5, 1.0, 2.0] diff --git a/tests/test_image.py b/tests/test_image.py index a8ad498df68..cde93e899cb 100644 --- a/tests/test_image.py +++ b/tests/test_image.py @@ -47,17 +47,10 @@ import os import sys import numpy as np - -from galsim_test_helpers import * from distutils.version import LooseVersion -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - +import galsim +from galsim_test_helpers import * from galsim._pyfits import pyfits # Setup info for tests, not likely to change diff --git a/tests/test_inclined.py b/tests/test_inclined.py index bc60b752c86..a338c04c402 100644 --- a/tests/test_inclined.py +++ b/tests/test_inclined.py @@ -24,17 +24,11 @@ from copy import deepcopy import os import sys - -from galsim_test_helpers import * import numpy as np +import galsim +from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # Save images used in regression testing for manual inspection? save_profiles = False diff --git a/tests/test_integ.py b/tests/test_integ.py index b2d141c2e84..1f62fbfd37f 100644 --- a/tests/test_integ.py +++ b/tests/test_integ.py @@ -21,16 +21,9 @@ from __future__ import print_function import numpy as np +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - import os - import sys - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim test_sigma = 7. # test value of Gaussian sigma for integral tests test_rel_err = 1.e-7 # the relative accuracy at which to test diff --git a/tests/test_interpolatedimage.py b/tests/test_interpolatedimage.py index c10297b57c4..2bfcd554304 100644 --- a/tests/test_interpolatedimage.py +++ b/tests/test_interpolatedimage.py @@ -24,16 +24,11 @@ import os import sys +import galsim from galsim_test_helpers import * +from galsim._pyfits import pyfits path, filename = os.path.split(__file__) # Get the path to this file for use below... -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - -from galsim._pyfits import pyfits # For reference tests: TESTDIR=os.path.join(path, "interpolant_comparison_files") diff --git a/tests/test_kolmogorov.py b/tests/test_kolmogorov.py index d516d51ac81..62b08d7eb4c 100644 --- a/tests/test_kolmogorov.py +++ b/tests/test_kolmogorov.py @@ -21,17 +21,13 @@ import os import sys +import galsim from galsim_test_helpers import * path, filename = os.path.split(__file__) imgdir = os.path.join(path, "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. diff --git a/tests/test_lensing.py b/tests/test_lensing.py index 0c3fded9a5a..78a36459de9 100644 --- a/tests/test_lensing.py +++ b/tests/test_lensing.py @@ -23,14 +23,9 @@ import sys import warnings +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim refdir = os.path.join(".", "lensing_reference_data") # Directory containing the reference diff --git a/tests/test_lsst.py b/tests/test_lsst.py index abf2a0163bb..05e69a7ad95 100644 --- a/tests/test_lsst.py +++ b/tests/test_lsst.py @@ -21,11 +21,9 @@ import numpy as np import warnings import os -import galsim import sys -from galsim_test_helpers import funcname -from galsim.celestial import CelestialCoord +import galsim from galsim_test_helpers import * have_lsst_stack = True @@ -33,8 +31,6 @@ try: from galsim.lsst import LsstCamera, LsstWCS except ImportError as ee: - #if __name__ == '__main__': - #raise # make sure that you are failing because the stack isn't there, # rather than because of some bug in lsst_wcs.py if "You cannot use the LSST module" in str(ee): @@ -204,7 +200,7 @@ def setUpClass(cls): cls.decPointing = -33.015167519966 cls.rotation = 27.0 - pointing = CelestialCoord(cls.raPointing*galsim.degrees, cls.decPointing*galsim.degrees) + pointing = galsim.CelestialCoord(cls.raPointing*galsim.degrees, cls.decPointing*galsim.degrees) cls.camera = LsstCamera(pointing, cls.rotation*galsim.degrees) @timer @@ -272,13 +268,13 @@ def palpyPupilCoords(star, pointing): for ra, dec, rotation in zip(ra_pointing_list, dec_pointing_list, rotation_angle_list): - pointing = CelestialCoord(ra*galsim.radians, dec*galsim.radians) + pointing = galsim.CelestialCoord(ra*galsim.radians, dec*galsim.radians) camera = LsstCamera(pointing, rotation*galsim.radians) dra_list = (rng.random_sample(100)-0.5)*0.5 ddec_list = (rng.random_sample(100)-0.5)*0.5 - star_list = np.array([CelestialCoord((ra+dra)*galsim.radians, + star_list = np.array([galsim.CelestialCoord((ra+dra)*galsim.radians, (dec+ddec)*galsim.radians) for dra, ddec in zip(dra_list, ddec_list)]) @@ -312,7 +308,7 @@ def test_pupil_coordinates_from_floats(self): raPointing = 113.0 decPointing = -25.6 rot = 82.1 - pointing = CelestialCoord(raPointing*galsim.degrees, decPointing*galsim.degrees) + pointing = galsim.CelestialCoord(raPointing*galsim.degrees, decPointing*galsim.degrees) camera = LsstCamera(pointing, rot*galsim.degrees) arcsec_per_radian = 180.0*3600.0/np.pi @@ -321,7 +317,7 @@ def test_pupil_coordinates_from_floats(self): decList = (rng.random_sample(100)-0.5)*20.0+decPointing pointingList = [] for rr, dd in zip(raList, decList): - pointingList.append(CelestialCoord(rr*galsim.degrees, dd*galsim.degrees)) + pointingList.append(galsim.CelestialCoord(rr*galsim.degrees, dd*galsim.degrees)) control_x, control_y = camera.pupilCoordsFromPoint(pointingList) test_x, test_y = camera.pupilCoordsFromFloat(np.radians(raList), np.radians(decList)) @@ -423,9 +419,9 @@ def test_rotation_angle_pupil_coordinate_convention(self): dec = 0.0 delta = 0.001 - pointing = CelestialCoord(ra*galsim.degrees, dec*galsim.degrees) - north = CelestialCoord(ra*galsim.degrees, (dec+delta)*galsim.degrees) - east = CelestialCoord((ra+delta)*galsim.degrees, dec*galsim.degrees) + pointing = galsim.CelestialCoord(ra*galsim.degrees, dec*galsim.degrees) + north = galsim.CelestialCoord(ra*galsim.degrees, (dec+delta)*galsim.degrees) + east = galsim.CelestialCoord((ra+delta)*galsim.degrees, dec*galsim.degrees) camera = LsstCamera(pointing, 0.0*galsim.degrees) x_0, y_0 = camera.pupilCoordsFromPoint(pointing) @@ -474,9 +470,9 @@ def test_rotation_angle_pixel_coordinate_convention(self): dec = 0.0 delta = 0.001 - pointing = CelestialCoord(ra*galsim.degrees, dec*galsim.degrees) - north = CelestialCoord(ra*galsim.degrees, (dec+delta)*galsim.degrees) - east = CelestialCoord((ra+delta)*galsim.degrees, dec*galsim.degrees) + pointing = galsim.CelestialCoord(ra*galsim.degrees, dec*galsim.degrees) + north = galsim.CelestialCoord(ra*galsim.degrees, (dec+delta)*galsim.degrees) + east = galsim.CelestialCoord((ra+delta)*galsim.degrees, dec*galsim.degrees) camera = LsstCamera(pointing, 0.0*galsim.degrees) x_0, y_0, name = camera.pixelCoordsFromPoint(pointing) @@ -532,7 +528,7 @@ def setUpClass(cls): cls.rotation = 27.0 * galsim.degrees cls.chip_name = 'R:0,1 S:1,2' - cls.pointing = CelestialCoord(cls.raPointing, cls.decPointing) + cls.pointing = galsim.CelestialCoord(cls.raPointing, cls.decPointing) cls.wcs = LsstWCS(cls.pointing, cls.rotation, cls.chip_name) @timer @@ -542,7 +538,7 @@ def test_constructor(self): when you specify a nonsense chip. """ - pointing = CelestialCoord(112.0*galsim.degrees, -39.0*galsim.degrees) + pointing = galsim.CelestialCoord(112.0*galsim.degrees, -39.0*galsim.degrees) rotation = 23.1*galsim.degrees wcs1 = LsstWCS(pointing, rotation, 'R:1,1 S:2,2') @@ -559,7 +555,7 @@ def test_attribute_exceptions(self): """ with self.assertRaises(AttributeError) as context: - self.wcs.pointing = CelestialCoord(22.0*galsim.degrees, -17.0*galsim.degrees) + self.wcs.pointing = galsim.CelestialCoord(22.0*galsim.degrees, -17.0*galsim.degrees) with self.assertRaises(AttributeError) as context: self.wcs.rotation_angle = 23.0*galsim.degrees @@ -606,10 +602,10 @@ def test_tan_wcs(self): [self.wcs._chip_name]*len(xPixList)) for rr1, dd1, rr2, dd2 in zip(raTest, decTest, wcsRa, wcsDec): - pp = CelestialCoord(rr1*galsim.radians, dd1*galsim.radians) + pp = galsim.CelestialCoord(rr1*galsim.radians, dd1*galsim.radians) dist = \ - pp.distanceTo(CelestialCoord(rr2*galsim.radians, dd2*galsim.radians))/galsim.arcsec + pp.distanceTo(galsim.CelestialCoord(rr2*galsim.radians, dd2*galsim.radians))/galsim.arcsec msg = 'error in tanWcs was %e arcsec' % dist self.assertLess(dist, 0.001, msg=msg) @@ -664,13 +660,13 @@ def test_tan_sip_wcs(self): for rrTest, ddTest, rrTan, ddTan, rrSip, ddSip in \ zip(raTest, decTest, tanWcsRa, tanWcsDec, tanSipWcsRa, tanSipWcsDec): - pp = CelestialCoord(rrTest*galsim.radians, ddTest*galsim.radians) + pp = galsim.CelestialCoord(rrTest*galsim.radians, ddTest*galsim.radians) distTan = \ - pp.distanceTo(CelestialCoord(rrTan*galsim.radians, ddTan*galsim.radians))/galsim.arcsec + pp.distanceTo(galsim.CelestialCoord(rrTan*galsim.radians, ddTan*galsim.radians))/galsim.arcsec distSip = \ - pp.distanceTo(CelestialCoord(rrSip*galsim.radians, ddSip*galsim.radians))/galsim.arcsec + pp.distanceTo(galsim.CelestialCoord(rrSip*galsim.radians, ddSip*galsim.radians))/galsim.arcsec msg = 'error in TAN WCS %e arcsec; error in TAN-SIP WCS %e arcsec' % (distTan, distSip) self.assertLess(distSip, 0.001, msg=msg) @@ -728,7 +724,7 @@ def test_eq(self): wcs1 = wcs1._newOrigin(new_origin) self.assertNotEqual(self.wcs, wcs1) - other_pointing = CelestialCoord(1.9*galsim.degrees, -34.0*galsim.degrees) + other_pointing = galsim.CelestialCoord(1.9*galsim.degrees, -34.0*galsim.degrees) wcs2 = LsstWCS(other_pointing, self.rotation, self.chip_name) self.assertNotEqual(self.wcs, wcs2) @@ -744,7 +740,7 @@ def test_copy(self): Test that copy() works """ - pointing = CelestialCoord(64.82*galsim.degrees, -16.73*galsim.degrees) + pointing = galsim.CelestialCoord(64.82*galsim.degrees, -16.73*galsim.degrees) rotation = 116.8*galsim.degrees chip_name = 'R:1,2 S:2,2' wcs0 = LsstWCS(pointing, rotation, chip_name) diff --git a/tests/test_metacal.py b/tests/test_metacal.py index 800addcf879..0720dcc206c 100644 --- a/tests/test_metacal.py +++ b/tests/test_metacal.py @@ -19,8 +19,8 @@ from __future__ import print_function import time import numpy as np -import galsim +import galsim from galsim_test_helpers import * VAR_NDECIMAL=4 diff --git a/tests/test_moffat.py b/tests/test_moffat.py index 5823bb0b90f..2040494a3b2 100644 --- a/tests/test_moffat.py +++ b/tests/test_moffat.py @@ -21,17 +21,13 @@ import os import sys +import galsim from galsim_test_helpers import * path, filename = os.path.split(__file__) imgdir = os.path.join(path, "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. diff --git a/tests/test_noise.py b/tests/test_noise.py index 12e359e147d..dc31ce49fd4 100644 --- a/tests/test_noise.py +++ b/tests/test_noise.py @@ -21,15 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - testseed = 1000 precision = 10 diff --git a/tests/test_optics.py b/tests/test_optics.py index 36dd1bf816b..fd938233799 100644 --- a/tests/test_optics.py +++ b/tests/test_optics.py @@ -21,17 +21,11 @@ import os import sys +import galsim from galsim_test_helpers import * imgdir = os.path.join(".", "Optics_comparison_images") # Directory containing the reference images. -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - testshape = (512, 512) # shape of image arrays for all tests diff --git a/tests/test_phase_psf.py b/tests/test_phase_psf.py index b7f49996b40..f0aff61c35f 100644 --- a/tests/test_phase_psf.py +++ b/tests/test_phase_psf.py @@ -19,16 +19,9 @@ from __future__ import print_function import os import numpy as np -from galsim_test_helpers import * - -try: - import galsim -except ImportError: - import sys - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim +import galsim +from galsim_test_helpers import * imgdir = os.path.join(".", "Optics_comparison_images") # Directory containing the reference images. diff --git a/tests/test_photon_array.py b/tests/test_photon_array.py index a183a240b47..bcb487e564c 100644 --- a/tests/test_photon_array.py +++ b/tests/test_photon_array.py @@ -28,15 +28,9 @@ except ImportError: no_astroplan = True +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - bppath = os.path.join(galsim.meta_data.share_dir, "bandpasses") sedpath = os.path.join(galsim.meta_data.share_dir, "SEDs") diff --git a/tests/test_pse.py b/tests/test_pse.py index 6e110414897..db990b1f696 100644 --- a/tests/test_pse.py +++ b/tests/test_pse.py @@ -21,14 +21,9 @@ import numpy as np import time +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim path, filename = os.path.split(__file__) datapath = os.path.abspath(os.path.join(path, "../examples/data/")) diff --git a/tests/test_random.py b/tests/test_random.py index ea68cf7a91d..56472b1297e 100644 --- a/tests/test_random.py +++ b/tests/test_random.py @@ -21,14 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # # Note: all tests below were generated using the python interface to the RNG. Eventually need tests diff --git a/tests/test_randwalk.py b/tests/test_randwalk.py index 2972de5c332..d31e8c8abac 100644 --- a/tests/test_randwalk.py +++ b/tests/test_randwalk.py @@ -20,16 +20,10 @@ import numpy as np import os import sys -import galsim +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - @timer def test_randwalk_defaults(): diff --git a/tests/test_real.py b/tests/test_real.py index 431db2410b2..5981a27b2fa 100644 --- a/tests/test_real.py +++ b/tests/test_real.py @@ -21,14 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim bppath = os.path.join(galsim.meta_data.share_dir, "bandpasses") sedpath = os.path.join(galsim.meta_data.share_dir, "SEDs") diff --git a/tests/test_scene.py b/tests/test_scene.py index 25e4d6f3c66..8fd6f2213c9 100644 --- a/tests/test_scene.py +++ b/tests/test_scene.py @@ -19,15 +19,11 @@ from __future__ import print_function import os import numpy as np -from galsim_test_helpers import * import sys -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim +import galsim +from galsim_test_helpers import * + path, filename = os.path.split(__file__) datapath = os.path.abspath(os.path.join(path, "../examples/data/")) diff --git a/tests/test_sed.py b/tests/test_sed.py index 970ba8cfd01..45ab1033614 100644 --- a/tests/test_sed.py +++ b/tests/test_sed.py @@ -19,17 +19,13 @@ from __future__ import print_function import os import numpy as np -from galsim_test_helpers import * import sys from astropy import units, constants import warnings -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim +import galsim +from galsim_test_helpers import * + bppath = os.path.join(galsim.meta_data.share_dir, "bandpasses") sedpath = os.path.join(galsim.meta_data.share_dir, "SEDs") diff --git a/tests/test_sensor.py b/tests/test_sensor.py index 48f8d50bdaa..2f04261debe 100644 --- a/tests/test_sensor.py +++ b/tests/test_sensor.py @@ -21,14 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim @timer def test_simple(): diff --git a/tests/test_sersic.py b/tests/test_sersic.py index 3fa0b366ca6..1f32e4e3310 100644 --- a/tests/test_sersic.py +++ b/tests/test_sersic.py @@ -21,17 +21,13 @@ import os import sys +import galsim from galsim_test_helpers import * path, filename = os.path.split(__file__) imgdir = os.path.join(path, "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. diff --git a/tests/test_shapelet.py b/tests/test_shapelet.py index 265d9bf259b..46e01c0158d 100644 --- a/tests/test_shapelet.py +++ b/tests/test_shapelet.py @@ -21,18 +21,12 @@ import os import sys +import galsim from galsim_test_helpers import * imgdir = os.path.join(".", "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - # define a series of tests @timer diff --git a/tests/test_shear.py b/tests/test_shear.py index 4f1694bbcaf..06e3659f570 100644 --- a/tests/test_shear.py +++ b/tests/test_shear.py @@ -21,14 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # Below are a set of tests to make sure that we have achieved consistency in defining shears and # ellipses using different conventions. The underlying idea is that in test_base.py we already diff --git a/tests/test_spergel.py b/tests/test_spergel.py index 8fa3fe656e6..7e17d2c9723 100644 --- a/tests/test_spergel.py +++ b/tests/test_spergel.py @@ -21,17 +21,13 @@ import os import sys +import galsim from galsim_test_helpers import * path, filename = os.path.split(__file__) imgdir = os.path.join(path, "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. diff --git a/tests/test_sum.py b/tests/test_sum.py index 5b5a85d64ae..156bda07d8f 100644 --- a/tests/test_sum.py +++ b/tests/test_sum.py @@ -21,17 +21,12 @@ import os import sys +import galsim from galsim_test_helpers import * imgdir = os.path.join(".", "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. diff --git a/tests/test_table.py b/tests/test_table.py index b25415f5e97..6d4cf341fb3 100644 --- a/tests/test_table.py +++ b/tests/test_table.py @@ -27,15 +27,10 @@ import os import numpy as np +import galsim from galsim_test_helpers import * path, filename = os.path.split(__file__) # Get the path to this file for use below... -try: - import galsim -except ImportError: - import sys - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim TESTDIR=os.path.join(path, "table_comparison_files") diff --git a/tests/test_transforms.py b/tests/test_transforms.py index 54ac4866733..37fbc8847f9 100644 --- a/tests/test_transforms.py +++ b/tests/test_transforms.py @@ -21,17 +21,12 @@ import os import sys +import galsim from galsim_test_helpers import * imgdir = os.path.join(".", "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # for flux normalization tests test_flux = 1.8 diff --git a/tests/test_utilities.py b/tests/test_utilities.py index bc4d2a0d7a2..e1cb9a277a3 100644 --- a/tests/test_utilities.py +++ b/tests/test_utilities.py @@ -21,14 +21,8 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim -import galsim.utilities testshape = (512, 512) # shape of image arrays for all tests decimal = 6 # Last decimal place used for checking equality of float arrays, see diff --git a/tests/test_wcs.py b/tests/test_wcs.py index f604fd8a39f..65198601830 100644 --- a/tests/test_wcs.py +++ b/tests/test_wcs.py @@ -22,14 +22,9 @@ import sys import warnings +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # These positions will be used a few times below, so define them here. # One of the tests requires that the last pair are integers, so don't change that. diff --git a/tests/test_wfirst.py b/tests/test_wfirst.py index 0655673b36f..6d7b538b56e 100644 --- a/tests/test_wfirst.py +++ b/tests/test_wfirst.py @@ -21,18 +21,10 @@ from __future__ import print_function import numpy as np +import galsim +import galsim.wfirst from galsim_test_helpers import * -try: - import galsim - import galsim.wfirst -except ImportError: - import os - import sys - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - import galsim.wfirst @timer def skip_wfirst_wcs():