From 83b95d98b4f867b3ddde770afe467bc20cc77d93 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sat, 30 Dec 2017 13:09:14 -0500 Subject: [PATCH 001/111] First pass at implementing pybind11 wrapping (#809-pybind11) --- SConstruct | 96 +++++++++++++++++---- galsim/random.py | 10 +-- pysrc/Bessel.cpp | 27 +++--- pysrc/Bounds.cpp | 32 +++---- pysrc/CDModel.cpp | 16 ++-- pysrc/CorrelatedNoise.cpp | 35 -------- pysrc/HSM.cpp | 47 +++++----- pysrc/Image.cpp | 56 ++++++------ pysrc/Integ.cpp | 15 ++-- pysrc/Interpolant.cpp | 25 +++--- pysrc/PhotonArray.cpp | 18 ++-- pysrc/PyBind11Helper.h | 96 +++++++++++++++++++++ pysrc/Random.cpp | 31 ++++--- pysrc/RealGalaxy.cpp | 10 +-- pysrc/SBAdd.cpp | 29 ++++--- pysrc/SBAiry.cpp | 10 +-- pysrc/SBBox.cpp | 13 +-- pysrc/SBConvolve.cpp | 37 ++++---- pysrc/SBDeconvolve.cpp | 10 +-- pysrc/SBDeltaFunction.cpp | 10 +-- pysrc/SBExponential.cpp | 12 +-- pysrc/SBFourierSqrt.cpp | 10 +-- pysrc/SBGaussian.cpp | 10 +-- pysrc/SBInclinedExponential.cpp | 12 +-- pysrc/SBInclinedSersic.cpp | 11 +-- pysrc/SBInterpolatedImage.cpp | 31 +++---- pysrc/SBKolmogorov.cpp | 10 +-- pysrc/SBMoffat.cpp | 12 +-- pysrc/SBProfile.cpp | 31 ++----- pysrc/SBSersic.cpp | 16 ++-- pysrc/SBShapelet.cpp | 19 ++--- pysrc/SBSpergel.cpp | 12 +-- pysrc/SBTransform.cpp | 10 +-- pysrc/Silicon.cpp | 38 ++++----- pysrc/Table.cpp | 38 ++++----- pysrc/WCS.cpp | 33 ++++---- pysrc/module.cpp | 146 +++++++++++++++++--------------- 37 files changed, 541 insertions(+), 533 deletions(-) delete mode 100644 pysrc/CorrelatedNoise.cpp create mode 100644 pysrc/PyBind11Helper.h diff --git a/SConstruct b/SConstruct index dc7bd16539a..d2096d7bf4c 100644 --- a/SConstruct +++ b/SConstruct @@ -88,9 +88,7 @@ opts.Add('TMV_DIR','Explicitly give the tmv prefix','') opts.Add('TMV_LINK','File that contains the linking instructions for TMV','') opts.Add('FFTW_DIR','Explicitly give the fftw3 prefix','') opts.Add('BOOST_DIR','Explicitly give the boost prefix','') -opts.Add(BoolVariable('USE_BOOST', - 'Use the local boost installation for optional boost header files', - False)) +opts.Add(BoolVariable('USE_BOOST','Use boost python for the wrapping, rather than pybind11',False)) opts.Add(PathVariable('EXTRA_INCLUDE_PATH', 'Extra paths for header files (separated by : if more than 1)', @@ -392,7 +390,7 @@ def BasicCCFlags(env): else: env.Replace(CCFLAGS=['-O2']) sse_flags = ['-msse2', '-msse'] - env.Append(CCFLAGS=['-std=c++98','-fno-strict-aliasing']) + env.Append(CCFLAGS=['-fno-strict-aliasing']) # Unfortunately this next flag requires strict-aliasing, but allowing that # opens up a Pandora's box of bugs and warnings, so I don't want to do that. #env.Append(CCFLAGS=['-ftree-vectorize']) @@ -411,7 +409,6 @@ def BasicCCFlags(env): else: env.Replace(CCFLAGS=['-O2']) sse_flags = ['-msse2', '-msse'] - env.Append(CCFLAGS=['-std=c++98']) if env['WITH_PROF']: env.Append(CCFLAGS=['-pg']) env.Append(LINKFLAGS=['-pg']) @@ -421,7 +418,7 @@ def BasicCCFlags(env): env.Append(CCFLAGS=['-g3']) elif compiler == 'icpc': - env.Replace(CCFLAGS=['-O2','-std=c++98']) + env.Replace(CCFLAGS=['-O2']) sse_flags = ['-msse2', '-msse'] if version >= 10: env.Append(CCFLAGS=['-vec-report0']) @@ -910,6 +907,15 @@ def TryRunResult(config,text,name): return ok +def CheckFlags(context,try_flags,source_file): + init_flags = context.env['CCFLAGS'] + context.env.PrependUnique(CCFLAGS=try_flags) + result = context.TryCompile(source_file,'.cpp') + if not result: + context.env.Replace(CCFLAGS=init_flags) + return result + + def CheckLibsSimple(config,try_libs,source_file,prepend=True): init_libs = [] if 'LIBS' in config.env._dict.keys(): @@ -1647,6 +1653,48 @@ def CheckCoord(config): return 1 +def CheckPyBind11(config): + config.Message('Checking for pybind11... ') + + result, output = TryScript(config,"import pybind11",python) + config.Result(result) + if not result: + ErrorExit("Unable to import pybind11 using the python executable:\n" + python) + + result, pybind11_ver = TryScript(config,"import pybind11; print(pybind11.__version__)",python) + print('pybind11 version is',pybind11_ver) + + config.Message('Checking if we can build against PyBind11... ') + + result, dir1 = TryScript(config,"import pybind11; print(pybind11.get_include())",python) + result, dir2 = TryScript(config,"import pybind11; print(pybind11.get_include(True))",python) + config.env.Append(CPPPATH=[dir1,dir2]) + + pb_source_file = """ +#include + +int check_pb_run() { return 23; } + +PYBIND11_PLUGIN(check_pb) { + pybind11::module m("check_pb"); + m.def("run",&check_pb_run); + return m.ptr(); +} +""" + result = (CheckFlags(config, '', pb_source_file) or + CheckFlags(config, '-std=c++14', pb_source_file) or + CheckFlags(config, '-std=c++11', pb_source_file)) + if not result: + ErrorExit("Unable to compile C++ source code using pybind11:\n" + python) + + result = CheckModuleLibs(config,[''],pb_source_file,'check_pb') + if not result: + ErrorExit("Unable to make a python module with pybind11:\n" + python) + + config.Result(result) + return result + + def CheckBoostPython(config): bp_source_file = """ @@ -1670,7 +1718,8 @@ BOOST_PYTHON_MODULE(check_bp) { """ config.Message('Checking if we can build against Boost.Python... ') - result = config.TryCompile(bp_source_file,'.cpp') + result = (CheckFlags(config, '-std=c++98', bp_source_file) or + CheckFlags(config, '', bp_source_file)) if not result: ErrorExit('Unable to compile a file with #include "boost/python.hpp"') @@ -1694,9 +1743,11 @@ BOOST_PYTHON_MODULE(check_bp) { if not result: ErrorExit('Unable to build a python loadable module with Boost.Python') + config.env.AppendUnique(CPPDEFINES=['USE_BOOST']) config.Result(1) return 1 + # If the compiler is incompatible with the compiler that was used to build python, # then there can be problems with the exception passing between the C++ layer and the # python layer. We don't know any solution to this, but it's worth letting the user @@ -1712,16 +1763,28 @@ def CheckPythonExcept(config): #pragma GCC diagnostic ignored "-Wunused-local-typedefs" #endif #endif -#define BOOST_NO_CXX11_SMART_PTR -#include "boost/python.hpp" #include +#ifdef USE_BOOST +#define BOOST_NO_CXX11_SMART_PTR +#include "boost/python.hpp" +#else +#include +#endif void run_throw() { throw std::runtime_error("test error handling"); } +#ifdef USE_BOOST BOOST_PYTHON_MODULE(test_throw) { - boost::python::def("run",&run_throw); + boost::python::def("run", &run_throw); } +#else +PYBIND11_PLUGIN(test_throw) { + pybind11::module test_throw("test_throw"); + test_throw.def("run", &run_throw); + return test_throw.ptr(); +} +#endif """ py_source_file = """ import test_throw @@ -1855,7 +1918,8 @@ def DoCppChecks(config): ##### # Check for boost: - config.CheckBoost() + if config.env['USE_BOOST']: + config.CheckBoost() ##### # Check for tmv: @@ -1953,7 +2017,10 @@ def DoPyChecks(config): config.CheckPyFITS() config.CheckFuture() config.CheckCoord() - config.CheckBoostPython() + if config.env['USE_BOOST']: + config.CheckBoostPython() + else: + config.CheckPyBind11() config.CheckPythonExcept() @@ -2014,10 +2081,6 @@ def DoConfig(env): print('TMV Extra Debugging turned on') env.AppendUnique(CPPDEFINES=['TMV_EXTRA_DEBUG']) - if env['USE_BOOST']: - print('Using local boost header files') - env.AppendUnique(CPPDEFINES=['USE_BOOST']) - # Don't bother with checks if doing scons -c if not env.GetOption('clean'): # Sometimes when you are changing around things in other directories, SCons doesn't notice. @@ -2044,6 +2107,7 @@ def DoConfig(env): 'CheckPyFITS' : CheckPyFITS , 'CheckFuture' : CheckFuture , 'CheckCoord' : CheckCoord , + 'CheckPyBind11' : CheckPyBind11 , 'CheckBoostPython' : CheckBoostPython , 'CheckPythonExcept' : CheckPythonExcept , }) diff --git a/galsim/random.py b/galsim/random.py index c85c058e8e1..c7b74568cab 100644 --- a/galsim/random.py +++ b/galsim/random.py @@ -163,7 +163,7 @@ def __copy__(self): def __getstate__(self): d = self.__dict__.copy() - d['rng_str'] = self._rng.serialize() + d['rng_str'] = self.serialize() d.pop('_rng') return d @@ -189,7 +189,7 @@ def raw(self): of random deviate for this class, just return the raw integer value that would have been used to generate this value. """ - return self._rng.raw() + return int(self._rng.raw()) def generate(self, array): """Generate many pseudo-random values, filling in the values of a numpy array. @@ -223,7 +223,7 @@ def __ne__(self, other): __hash__ = None def serialize(self): - return self._rng.serialize() + return str(self._rng.serialize()) def _seed_repr(self): s = self.serialize().split(' ') @@ -819,7 +819,7 @@ def __str__(self): def __eq__(self, other): if repr(self) != repr(other): return False - return (self._rng.serialize() == other._rng.serialize() and + return (self.serialize() == other.serialize() and self._function == other._function and self._xmin == other._xmin and self._xmax == other._xmax and @@ -829,7 +829,7 @@ def __eq__(self, other): # Functions aren't picklable, so for pickling, we reinitialize the DistDeviate using the # original function parameter, which may be a string or a file name. def __getinitargs__(self): - return (self._rng.serialize(), self._function, self._xmin, self._xmax, + return (self.serialize(), self._function, self._xmin, self._xmax, self._interpolant, self._npoints) diff --git a/pysrc/Bessel.cpp b/pysrc/Bessel.cpp index 5b3a55e7df7..be3ef0ef39c 100644 --- a/pysrc/Bessel.cpp +++ b/pysrc/Bessel.cpp @@ -17,29 +17,22 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "math/BesselRoots.h" #include "math/Bessel.h" -namespace bp = boost::python; - namespace galsim { namespace math { - void pyExportBessel() { - - bp::def("j0_root", &getBesselRoot0); - // In python, with switch from mostly matching the boost names for these to matching - // the names scipy.special uses. - bp::def("j0", &j0); - bp::def("j1", &j1); - bp::def("jv", &cyl_bessel_j); - bp::def("yv", &cyl_bessel_y); - bp::def("iv", &cyl_bessel_i); - bp::def("kv", &cyl_bessel_k); - + void pyExportBessel(PYBIND11_MODULE& _galsim) + { + GALSIM_DOT def("j0_root", &getBesselRoot0); + GALSIM_DOT def("j0", &j0); + GALSIM_DOT def("j1", &j1); + GALSIM_DOT def("jv", &cyl_bessel_j); + GALSIM_DOT def("yv", &cyl_bessel_y); + GALSIM_DOT def("iv", &cyl_bessel_i); + GALSIM_DOT def("kv", &cyl_bessel_k); } } // namespace math diff --git a/pysrc/Bounds.cpp b/pysrc/Bounds.cpp index 87d3a4d7bc7..cf9345533db 100644 --- a/pysrc/Bounds.cpp +++ b/pysrc/Bounds.cpp @@ -17,41 +17,37 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "Bounds.h" -namespace bp = boost::python; - namespace galsim { template - static void WrapPosition(const std::string& suffix) + static void WrapPosition(PYBIND11_MODULE& _galsim, const std::string& suffix) { - bp::class_< Position >(("Position" + suffix).c_str(), bp::no_init) + bp::class_ >(GALSIM_COMMA ("Position" + suffix).c_str() BP_NOINIT) .def(bp::init()) .def_readonly("x", &Position::x) .def_readonly("y", &Position::y); } template - static void WrapBounds(const std::string& suffix) + static void WrapBounds(PYBIND11_MODULE& _galsim, const std::string& suffix) { - bp::class_< Bounds >(("Bounds" + suffix).c_str(), bp::no_init) + bp::class_< Bounds >(GALSIM_COMMA ("Bounds" + suffix).c_str() BP_NOINIT) .def(bp::init()) - .add_property("xmin", &Bounds::getXMin) - .add_property("xmax", &Bounds::getXMax) - .add_property("ymin", &Bounds::getYMin) - .add_property("ymax", &Bounds::getYMax); + .def_property_readonly("xmin", &Bounds::getXMin) + .def_property_readonly("xmax", &Bounds::getXMax) + .def_property_readonly("ymin", &Bounds::getYMin) + .def_property_readonly("ymax", &Bounds::getYMax); } - void pyExportBounds() + void pyExportBounds(PYBIND11_MODULE& _galsim) { - WrapPosition("D"); - WrapPosition("I"); - WrapBounds("D"); - WrapBounds("I"); + WrapPosition(_galsim, "D"); + WrapPosition(_galsim, "I"); + WrapBounds(_galsim, "D"); + WrapBounds(_galsim, "I"); } } // namespace galsim diff --git a/pysrc/CDModel.cpp b/pysrc/CDModel.cpp index 9ea75799a86..5b0b8a086c9 100644 --- a/pysrc/CDModel.cpp +++ b/pysrc/CDModel.cpp @@ -17,29 +17,25 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "CDModel.h" -namespace bp = boost::python; - namespace galsim { template - static void WrapTemplates() + static void WrapTemplates(PYBIND11_MODULE& _galsim) { typedef void (*ApplyCD_func)(ImageView& , const BaseImage& , const BaseImage& , const BaseImage& , const BaseImage& , const BaseImage& , const int , const double ); - bp::def("_ApplyCD", ApplyCD_func(&ApplyCD)); + GALSIM_DOT def("_ApplyCD", ApplyCD_func(&ApplyCD)); } - void pyExportCDModel() + void pyExportCDModel(PYBIND11_MODULE& _galsim) { - WrapTemplates(); - WrapTemplates(); + WrapTemplates(_galsim); + WrapTemplates(_galsim); } } // namespace galsim diff --git a/pysrc/CorrelatedNoise.cpp b/pysrc/CorrelatedNoise.cpp deleted file mode 100644 index e25301e0075..00000000000 --- a/pysrc/CorrelatedNoise.cpp +++ /dev/null @@ -1,35 +0,0 @@ -/* -*- c++ -*- - * Copyright (c) 2012-2017 by the GalSim developers team on GitHub - * https://github.com/GalSim-developers - * - * This file is part of GalSim: The modular galaxy image simulation toolkit. - * https://github.com/GalSim-developers/GalSim - * - * GalSim is free software: redistribution and use in source and binary forms, - * with or without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this - * list of conditions, and the disclaimer given in the accompanying LICENSE - * file. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions, and the disclaimer given in the documentation - * and/or other materials provided with the distribution. - */ - -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - -#include "Interpolant.h" -#include "CorrelatedNoise.h" - -namespace bp = boost::python; - -namespace galsim { - - void pyExportCorrelationFunction() - { - bp::def("_calculateCovarianceMatrix", calculateCovarianceMatrix); - } - -} // namespace galsim diff --git a/pysrc/HSM.cpp b/pysrc/HSM.cpp index 327ccaad13f..34329746cf7 100644 --- a/pysrc/HSM.cpp +++ b/pysrc/HSM.cpp @@ -17,20 +17,13 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" - -#define BOOST_PYTHON_MAX_ARITY 22 // We have a function with 21 params here... -// c.f. www.boost.org/libs/python/doc/v2/configuration.html -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "hsm/PSFCorr.h" -namespace bp = boost::python; - namespace galsim { namespace hsm { - static ShapeData* ShapeData_init( + static BP_CONSTRUCTOR(ShapeData_init, ShapeData, const galsim::Bounds& image_bounds, int moments_status, float observed_e1, float observed_e2, float moments_sigma, float moments_amp, @@ -42,7 +35,12 @@ namespace hsm { float resolution_factor, float psf_sigma, float psf_e1, float psf_e2, const char* error_message) { +#ifdef USE_BOOST ShapeData* data = new ShapeData(); +#else + PYBIND11_PLACEMENT_NEW ShapeData(); + ShapeData* data = &instance; +#endif data->image_bounds = image_bounds; data->moments_status = moments_status; data->observed_e1 = observed_e1; @@ -65,32 +63,34 @@ namespace hsm { data->psf_e1 = psf_e1; data->psf_e2 = psf_e2; data->error_message = error_message; +#ifdef USE_BOOST return data; +#endif } template - static void WrapTemplates() { + static void WrapTemplates(PYBIND11_MODULE& _galsim) + { typedef void (*FAM_func)(ShapeData&t, const BaseImage&, const BaseImage&, double, double, Position, bool, const HSMParams&); - bp::def("_FindAdaptiveMomView", FAM_func(&FindAdaptiveMomView)); + GALSIM_DOT def("_FindAdaptiveMomView", FAM_func(&FindAdaptiveMomView)); typedef void (*ESH_func)(ShapeData&, const BaseImage&, const BaseImage&, const BaseImage&, float, const char *, const char*, double, double, double, Position, const HSMParams&); - bp::def("_EstimateShearView", ESH_func(&EstimateShearView)); + GALSIM_DOT def("_EstimateShearView", ESH_func(&EstimateShearView)); }; - void pyExportHSM() { - - bp::class_ pyHSMParams("HSMParams", bp::no_init); - pyHSMParams + void pyExportHSM(PYBIND11_MODULE& _galsim) + { + bp::class_(GALSIM_COMMA "HSMParams" BP_NOINIT) .def(bp::init< double, double, double, int, int, double, long, long, double, double, double, int, double, double, double>()); - bp::class_("ShapeData", "", bp::no_init) - .def("__init__", bp::make_constructor(&ShapeData_init, bp::default_call_policies())) + bp::class_(GALSIM_COMMA "ShapeData" BP_NOINIT) + .def("__init__", BP_MAKE_CONSTRUCTOR(&ShapeData_init)) .def_readonly("image_bounds", &ShapeData::image_bounds) .def_readonly("moments_status", &ShapeData::moments_status) .def_readonly("observed_e1", &ShapeData::observed_e1) @@ -112,13 +112,12 @@ namespace hsm { .def_readonly("psf_sigma", &ShapeData::psf_sigma) .def_readonly("psf_e1", &ShapeData::psf_e1) .def_readonly("psf_e2", &ShapeData::psf_e2) - .def_readonly("error_message", &ShapeData::error_message) - ; + .def_readonly("error_message", &ShapeData::error_message); - WrapTemplates(); - WrapTemplates(); - WrapTemplates(); - WrapTemplates(); + WrapTemplates(_galsim); + WrapTemplates(_galsim); + WrapTemplates(_galsim); + WrapTemplates(_galsim); } } // namespace hsm diff --git a/pysrc/Image.cpp b/pysrc/Image.cpp index 653cc0edc24..9c9b58598f6 100644 --- a/pysrc/Image.cpp +++ b/pysrc/Image.cpp @@ -17,64 +17,60 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" // header that includes Python.h always needs to come first - +#include "PyBind11Helper.h" #include "Image.h" -namespace bp = boost::python; - // Note that docstrings are now added in galsim/image.py namespace galsim { template - static ImageView* MakeFromArray(size_t idata, int step, int stride, - const Bounds& bounds) + static BP_CONSTRUCTOR(MakeFromArray, ImageView, + size_t idata, int step, int stride, const Bounds& bounds) { T* data = reinterpret_cast(idata); shared_ptr owner; - return new ImageView(data, owner, step, stride, bounds); + PYBIND11_PLACEMENT_NEW ImageView(data, owner, step, stride, bounds); } template - static void WrapImage(const std::string& suffix) + static void WrapImage(PYBIND11_MODULE& _galsim, const std::string& suffix) { - bp::class_< BaseImage, boost::noncopyable >(("BaseImage" + suffix).c_str(), bp::no_init); + bp::class_ BOOST_NONCOPYABLE>( + GALSIM_COMMA ("BaseImage" + suffix).c_str() BP_NOINIT); - typedef ImageView* (*Make_func)(size_t, int, int, const Bounds&); - bp::class_< ImageView, bp::bases< BaseImage > >(("ImageView" + suffix).c_str(), - bp::no_init) - .def("__init__", bp::make_constructor((Make_func)&MakeFromArray, - bp::default_call_policies())); + typedef BP_CONSTRUCTOR((*Make_func), ImageView, size_t, int, int, const Bounds&); + bp::class_ BP_BASES(BaseImage)>( + GALSIM_COMMA ("ImageView" + suffix).c_str() BP_NOINIT) + .def("__init__", BP_MAKE_CONSTRUCTOR((Make_func)&MakeFromArray)); typedef void (*rfft_func_type)(const BaseImage&, ImageView >, bool, bool); typedef void (*irfft_func_type)(const BaseImage&, ImageView, bool, bool); typedef void (*cfft_func_type)(const BaseImage&, ImageView >, bool, bool, bool); - bp::def("rfft", rfft_func_type(&rfft)); - bp::def("irfft", irfft_func_type(&irfft)); - bp::def("cfft", cfft_func_type(&cfft)); + GALSIM_DOT def("rfft", rfft_func_type(&rfft)); + GALSIM_DOT def("irfft", irfft_func_type(&irfft)); + GALSIM_DOT def("cfft", cfft_func_type(&cfft)); typedef void (*wrap_func_type)(ImageView, const Bounds&, bool, bool); - bp::def("wrapImage", wrap_func_type(&wrapImage)); + GALSIM_DOT def("wrapImage", wrap_func_type(&wrapImage)); typedef void (*invert_func_type)(ImageView); - bp::def("invertImage", invert_func_type(&invertImage)); + GALSIM_DOT def("invertImage", invert_func_type(&invertImage)); } - void pyExportImage() + void pyExportImage(PYBIND11_MODULE& _galsim) { - WrapImage("US"); - WrapImage("UI"); - WrapImage("S"); - WrapImage("I"); - WrapImage("F"); - WrapImage("D"); - WrapImage >("CD"); - WrapImage >("CF"); + WrapImage(_galsim, "US"); + WrapImage(_galsim, "UI"); + WrapImage(_galsim, "S"); + WrapImage(_galsim, "I"); + WrapImage(_galsim, "F"); + WrapImage(_galsim, "D"); + WrapImage >(_galsim, "CD"); + WrapImage >(_galsim, "CF"); - bp::def("goodFFTSize", &goodFFTSize); + GALSIM_DOT def("goodFFTSize", &goodFFTSize); } } // namespace galsim diff --git a/pysrc/Integ.cpp b/pysrc/Integ.cpp index 9b066a0fb7c..6b9715e4dbf 100644 --- a/pysrc/Integ.cpp +++ b/pysrc/Integ.cpp @@ -17,15 +17,10 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "integ/Int.h" - #include -namespace bp = boost::python; - namespace galsim { namespace integ { @@ -36,7 +31,7 @@ namespace integ { public: PyFunc(const bp::object& func) : _func(func) {} double operator()(double x) const - { return bp::extract(_func(x)); } + { return CAST(_func(x)); } private: const bp::object& _func; }; @@ -54,9 +49,9 @@ namespace integ { } } - void pyExportInteg() { - - bp::def("PyInt1d", &PyInt1d); + void pyExportInteg(PYBIND11_MODULE& _galsim) + { + GALSIM_DOT def("PyInt1d", &PyInt1d); } diff --git a/pysrc/Interpolant.cpp b/pysrc/Interpolant.cpp index 936d9f87e20..f3d273d3a95 100644 --- a/pysrc/Interpolant.cpp +++ b/pysrc/Interpolant.cpp @@ -17,39 +17,36 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" +#include #include "Interpolant.h" -namespace bp = boost::python; - namespace galsim { - void pyExportInterpolant() + void pyExportInterpolant(PYBIND11_MODULE& _galsim) { - bp::class_("Interpolant", bp::no_init); + bp::class_(GALSIM_COMMA "Interpolant" BP_NOINIT); - bp::class_ >("Delta", bp::no_init) + bp::class_(GALSIM_COMMA "Delta" BP_NOINIT) .def(bp::init()); - bp::class_ >("Nearest", bp::no_init) + bp::class_(GALSIM_COMMA "Nearest" BP_NOINIT) .def(bp::init()); - bp::class_ >("SincInterpolant", bp::no_init) + bp::class_(GALSIM_COMMA "SincInterpolant" BP_NOINIT) .def(bp::init()); - bp::class_ >("Lanczos", bp::no_init) + bp::class_(GALSIM_COMMA "Lanczos" BP_NOINIT) .def(bp::init()) .def("urange", &Lanczos::urange); - bp::class_ >("Linear", bp::no_init) + bp::class_(GALSIM_COMMA "Linear" BP_NOINIT) .def(bp::init()); - bp::class_ >("Cubic", bp::no_init) + bp::class_(GALSIM_COMMA "Cubic" BP_NOINIT) .def(bp::init()); - bp::class_ >("Quintic", bp::no_init) + bp::class_(GALSIM_COMMA "Quintic" BP_NOINIT) .def(bp::init()); } diff --git a/pysrc/PhotonArray.cpp b/pysrc/PhotonArray.cpp index 78482fdb5cf..e357f5117fc 100644 --- a/pysrc/PhotonArray.cpp +++ b/pysrc/PhotonArray.cpp @@ -17,13 +17,9 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include // header that includes Python.h always needs to come first - +#include "PyBind11Helper.h" #include "PhotonArray.h" -namespace bp = boost::python; - namespace galsim { template @@ -35,8 +31,8 @@ namespace galsim { &PhotonArray::setFrom); } - static PhotonArray* construct(int N, size_t ix, size_t iy, size_t iflux, - size_t idxdz, size_t idydz, size_t iwave, bool is_corr) + static BP_CONSTRUCTOR(construct, PhotonArray, int N, size_t ix, size_t iy, size_t iflux, + size_t idxdz, size_t idydz, size_t iwave, bool is_corr) { double *x = reinterpret_cast(ix); double *y = reinterpret_cast(iy); @@ -44,14 +40,14 @@ namespace galsim { double *dxdz = reinterpret_cast(idxdz); double *dydz = reinterpret_cast(idydz); double *wave = reinterpret_cast(iwave); - return new PhotonArray(N, x, y, flux, dxdz, dydz, wave, is_corr); + PYBIND11_PLACEMENT_NEW PhotonArray(N, x, y, flux, dxdz, dydz, wave, is_corr); } - void pyExportPhotonArray() + void pyExportPhotonArray(PYBIND11_MODULE& _galsim) { - bp::class_ pyPhotonArray("PhotonArray", bp::no_init); + bp::class_ pyPhotonArray(GALSIM_COMMA "PhotonArray" BP_NOINIT); pyPhotonArray - .def("__init__", bp::make_constructor(&construct, bp::default_call_policies())) + .def("__init__", BP_MAKE_CONSTRUCTOR(&construct)) .def("convolve", &PhotonArray::convolve); WrapTemplates(pyPhotonArray); WrapTemplates(pyPhotonArray); diff --git a/pysrc/PyBind11Helper.h b/pysrc/PyBind11Helper.h new file mode 100644 index 00000000000..4552b025eb3 --- /dev/null +++ b/pysrc/PyBind11Helper.h @@ -0,0 +1,96 @@ +/* -*- c++ -*- + * Copyright (c) 2012-2017 by the GalSim developers team on GitHub + * https://github.com/GalSim-developers + * + * This file is part of GalSim: The modular galaxy image simulation toolkit. + * https://github.com/GalSim-developers/GalSim + * + * GalSim is free software: redistribution and use in source and binary forms, + * with or without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions, and the disclaimer given in the accompanying LICENSE + * file. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions, and the disclaimer given in the documentation + * and/or other materials provided with the distribution. + */ +#ifndef PyBind11Helper_H +#define PyBind11Helper_H + +#ifdef USE_BOOST + +#include "galsim/IgnoreWarnings.h" + +#define BOOST_PYTHON_MAX_ARITY 22 // We have a function with 21 params in HSM.cpp + // c.f. www.boost.org/libs/python/doc/v2/configuration.html + +#define BOOST_NO_CXX11_SMART_PTR +#include +#include +namespace bp = boost::python; + +#define PYBIND11_PLUGIN(x) BOOST_PYTHON_MODULE(x) +#define PYBIND11_MAKE_MODULE(x) bp::scope _galsim +#define PYBIND11_RETURN_PTR(x) + +#define TUPLE(args...) bp::tuple +#define MAKE_TUPLE bp::make_tuple + +#define GALSIM_DOT bp:: +#define GALSIM_COMMA +#define PYBIND11_MODULE bp::scope +#define BP_HANDLE bp::handle<> +#define BP_THROW bp::throw_error_already_set() +#define BP_NOINIT , bp::no_init +#define ENABLE_PICKLING .enable_pickling() +#define PYBIND11_CAST(x) x +#define BP_OTHER(T) bp::other() +#define ADD_PROPERTY(name, func) add_property(name, func) +#define BP_REGISTER(T) bp::register_ptr_to_python< boost::shared_ptr >() +#define BOOST_NONCOPYABLE , boost::noncopyable +#define BP_BASES(T) , bp::bases +#define BP_MAKE_CONSTRUCTOR(args...) bp::make_constructor(args, bp::default_call_policies()) +#define BP_CONSTRUCTOR(f,x,args...) x* f(args) +#define PYBIND11_PLACEMENT_NEW return new +#define CAST bp::extract +#define BP_COPY_CONST_REFERENCE bp::return_value_policy() +#define def_property_readonly add_property + +#else + +#include +#include +#include +#include +namespace bp = pybind11; + +#define PYBIND11_MAKE_MODULE(x) pybind11::module x(#x) +#define PYBIND11_RETURN_PTR(x) return x.ptr() + +#define TUPLE(args...) std::tuple +#define MAKE_TUPLE std::make_tuple + +#define GALSIM_DOT _galsim. +#define GALSIM_COMMA _galsim, +#define PYBIND11_MODULE pybind11::module +#define BP_HANDLE pybind11::handle +#define BP_THROW throw pybind11::error_already_set() +#define BP_NOINIT +#define ENABLE_PICKLING +#define PYBIND11_CAST(x) pybind11::cast(x) +#define BP_OTHER(T) T() +#define ADD_PROPERTY(name, func) def_property_readonly(name, func) +#define BP_REGISTER(T) +#define BOOST_NONCOPYABLE +#define BP_BASES(T) , T +#define BP_MAKE_CONSTRUCTOR(args...) args +#define BP_CONSTRUCTOR(f,x,args...) void f(x& instance, args) +#define PYBIND11_PLACEMENT_NEW new (&instance) +#define CAST pybind11::cast +#define BP_COPY_CONST_REFERENCE pybind11::return_value_policy::reference + +#endif + +#endif diff --git a/pysrc/Random.cpp b/pysrc/Random.cpp index 007c4b6fec0..d70ee865ae2 100644 --- a/pysrc/Random.cpp +++ b/pysrc/Random.cpp @@ -17,13 +17,9 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "Random.h" -namespace bp = boost::python; - namespace galsim { void Generate(BaseDeviate& rng, size_t N, size_t idata) @@ -50,9 +46,9 @@ namespace galsim { rng.generateFromExpectation(N, data); } - void pyExportRandom() + void pyExportRandom(PYBIND11_MODULE& _galsim) { - bp::class_ ("BaseDeviateImpl", "", bp::no_init) + bp::class_ (GALSIM_COMMA "BaseDeviateImpl" BP_NOINIT) .def(bp::init()) .def(bp::init()) .def(bp::init()) @@ -65,33 +61,40 @@ namespace galsim { .def("generate", &Generate) .def("add_generate", &AddGenerate); - bp::class_ >("UniformDeviateImpl", bp::no_init) + bp::class_( + GALSIM_COMMA "UniformDeviateImpl" BP_NOINIT) .def(bp::init()) .def("generate1", &UniformDeviate::generate1); - bp::class_ >("GaussianDeviateImpl", bp::no_init) + bp::class_( + GALSIM_COMMA "GaussianDeviateImpl" BP_NOINIT) .def(bp::init()) .def("generate1", &GaussianDeviate::generate1) .def("generate_from_variance", &GenerateFromVariance); - bp::class_ >("BinomialDeviateImpl", bp::no_init) + bp::class_( + GALSIM_COMMA "BinomialDeviateImpl" BP_NOINIT) .def(bp::init()) .def("generate1", &BinomialDeviate::generate1); - bp::class_ >("PoissonDeviateImpl", bp::no_init) + bp::class_( + GALSIM_COMMA "PoissonDeviateImpl" BP_NOINIT) .def(bp::init()) .def("generate1", &PoissonDeviate::generate1) .def("generate_from_expectation", &GenerateFromExpectation); - bp::class_ >("WeibullDeviateImpl", bp::no_init) + bp::class_( + GALSIM_COMMA "WeibullDeviateImpl" BP_NOINIT) .def(bp::init()) .def("generate1", &WeibullDeviate::generate1); - bp::class_ >("GammaDeviateImpl", bp::no_init) + bp::class_( + GALSIM_COMMA "GammaDeviateImpl" BP_NOINIT) .def(bp::init()) .def("generate1", &GammaDeviate::generate1); - bp::class_ >("Chi2DeviateImpl", bp::no_init) + bp::class_( + GALSIM_COMMA "Chi2DeviateImpl" BP_NOINIT) .def(bp::init()) .def("generate1", &Chi2Deviate::generate1); } diff --git a/pysrc/RealGalaxy.cpp b/pysrc/RealGalaxy.cpp index 4663ebbdeb8..d73986b4437 100644 --- a/pysrc/RealGalaxy.cpp +++ b/pysrc/RealGalaxy.cpp @@ -17,13 +17,9 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "Pybind11Helper.h" #include "RealGalaxy.h" -namespace bp = boost::python; - namespace galsim { void CallComputeCRGCoefficients(size_t coef_data, size_t Sigma_data, @@ -39,8 +35,8 @@ namespace galsim { ComputeCRGCoefficients(coef, Sigma, w, kimgs, psf, nsed, nband, nkx, nky); }; - void pyExportRealGalaxy() { - bp::def("ComputeCRGCoefficients", &CallComputeCRGCoefficients); + void pyExportRealGalaxy(PYBIND11_MODULE& _galsim) { + GALSIM_DOT def("ComputeCRGCoefficients", &CallComputeCRGCoefficients); } } // namespace galsim diff --git a/pysrc/SBAdd.cpp b/pysrc/SBAdd.cpp index 187c803bcf9..153aeaafd0a 100644 --- a/pysrc/SBAdd.cpp +++ b/pysrc/SBAdd.cpp @@ -17,29 +17,30 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBAdd.h" -namespace bp = boost::python; - namespace galsim { - static SBAdd* construct(const bp::list& slist, GSParams gsparams) +#ifdef USE_BOOST + static BP_CONSTRUCTOR(construct, SBAdd, const bp::object& iterable, GSParams gsparams) { + bp::stl_input_iterator iter(iterable), end; std::list plist; - int n = len(slist); - for(int i=0; i(slist[i])); - } - return new SBAdd(plist, gsparams); + for(; iter != end; ++iter) plist.push_back(*iter); + PYBIND11_PLACEMENT_NEW SBAdd(plist, gsparams); + } +#else + static BP_CONSTRUCTOR(construct, SBAdd, const std::list& plist, GSParams gsparams) + { + PYBIND11_PLACEMENT_NEW SBAdd(plist, gsparams); } +#endif - void pyExportSBAdd() + void pyExportSBAdd(PYBIND11_MODULE& _galsim) { - bp::class_< SBAdd, bp::bases >("SBAdd", bp::no_init) - .def("__init__", bp::make_constructor(&construct, bp::default_call_policies())); + bp::class_(GALSIM_COMMA "SBAdd" BP_NOINIT) + .def("__init__", BP_MAKE_CONSTRUCTOR(&construct)); } } // namespace galsim diff --git a/pysrc/SBAiry.cpp b/pysrc/SBAiry.cpp index 1feba0a0c80..72c21f05f26 100644 --- a/pysrc/SBAiry.cpp +++ b/pysrc/SBAiry.cpp @@ -17,18 +17,14 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBAiry.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBAiry() + void pyExportSBAiry(PYBIND11_MODULE& _galsim) { - bp::class_ >("SBAiry", bp::no_init) + bp::class_(GALSIM_COMMA "SBAiry" BP_NOINIT) .def(bp::init()); } diff --git a/pysrc/SBBox.cpp b/pysrc/SBBox.cpp index a40820adfae..f700493847e 100644 --- a/pysrc/SBBox.cpp +++ b/pysrc/SBBox.cpp @@ -17,21 +17,16 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBBox.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBBox() + void pyExportSBBox(PYBIND11_MODULE& _galsim) { - bp::class_ >("SBBox", bp::no_init) + bp::class_(GALSIM_COMMA "SBBox" BP_NOINIT) .def(bp::init()); - - bp::class_ >("SBTopHat", bp::no_init) + bp::class_(GALSIM_COMMA "SBTopHat" BP_NOINIT) .def(bp::init()); } diff --git a/pysrc/SBConvolve.cpp b/pysrc/SBConvolve.cpp index 73f09eb4d0e..10cb1dad372 100644 --- a/pysrc/SBConvolve.cpp +++ b/pysrc/SBConvolve.cpp @@ -17,34 +17,35 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBConvolve.h" -namespace bp = boost::python; - namespace galsim { - static SBConvolve* construct(const bp::list& slist, bool real_space, GSParams gsparams) +#ifdef USE_BOOST + static BP_CONSTRUCTOR(construct, SBConvolve, + const bp::object& iterable, bool real_space, GSParams gsparams) { + bp::stl_input_iterator iter(iterable), end; std::list plist; - int n = len(slist); - for(int i=0; i(slist[i])); - } - return new SBConvolve(plist, real_space, gsparams); + for(; iter != end; ++iter) plist.push_back(*iter); + PYBIND11_PLACEMENT_NEW SBConvolve(plist, real_space, gsparams); } - - void pyExportSBConvolve() +#else + static BP_CONSTRUCTOR(construct, SBConvolve, + const std::list& plist, bool real_space, GSParams gsparams) { - bp::class_< SBConvolve, bp::bases >("SBConvolve", bp::no_init) - .def("__init__", bp::make_constructor(&construct, bp::default_call_policies())); + PYBIND11_PLACEMENT_NEW SBConvolve(plist, real_space, gsparams); + } +#endif - bp::class_< SBAutoConvolve, bp::bases >("SBAutoConvolve", bp::no_init) + void pyExportSBConvolve(PYBIND11_MODULE& _galsim) + { + bp::class_(GALSIM_COMMA "SBConvolve" BP_NOINIT) + .def("__init__", BP_MAKE_CONSTRUCTOR( &construct)); + bp::class_(GALSIM_COMMA "SBAutoConvolve" BP_NOINIT) .def(bp::init()); - - bp::class_< SBAutoCorrelate, bp::bases >("SBAutoCorrelate", bp::no_init) + bp::class_(GALSIM_COMMA "SBAutoCorrelate" BP_NOINIT) .def(bp::init()); } diff --git a/pysrc/SBDeconvolve.cpp b/pysrc/SBDeconvolve.cpp index b0d73f01c37..e65ce52e0f3 100644 --- a/pysrc/SBDeconvolve.cpp +++ b/pysrc/SBDeconvolve.cpp @@ -17,18 +17,14 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBDeconvolve.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBDeconvolve() + void pyExportSBDeconvolve(PYBIND11_MODULE& _galsim) { - bp::class_< SBDeconvolve, bp::bases >("SBDeconvolve", bp::no_init) + bp::class_(GALSIM_COMMA "SBDeconvolve" BP_NOINIT) .def(bp::init()); } diff --git a/pysrc/SBDeltaFunction.cpp b/pysrc/SBDeltaFunction.cpp index cdab9102f2b..a0467d26caf 100644 --- a/pysrc/SBDeltaFunction.cpp +++ b/pysrc/SBDeltaFunction.cpp @@ -17,18 +17,14 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBDeltaFunction.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBDeltaFunction() + void pyExportSBDeltaFunction(PYBIND11_MODULE& _galsim) { - bp::class_ >("SBDeltaFunction", bp::no_init) + bp::class_(GALSIM_COMMA "SBDeltaFunction" BP_NOINIT) .def(bp::init()); } diff --git a/pysrc/SBExponential.cpp b/pysrc/SBExponential.cpp index 8da77b47666..3dc5a5a340e 100644 --- a/pysrc/SBExponential.cpp +++ b/pysrc/SBExponential.cpp @@ -17,19 +17,15 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBExponential.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBExponential() + void pyExportSBExponential(PYBIND11_MODULE& _galsim) { - bp::class_ >("SBExponential", bp::no_init) - .def(bp::init()); + bp::class_(GALSIM_COMMA "SBExponential" BP_NOINIT) + .def(bp::init()); } } // namespace galsim diff --git a/pysrc/SBFourierSqrt.cpp b/pysrc/SBFourierSqrt.cpp index 0012ca08bec..cb9c37c4978 100644 --- a/pysrc/SBFourierSqrt.cpp +++ b/pysrc/SBFourierSqrt.cpp @@ -17,18 +17,14 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBFourierSqrt.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBFourierSqrt() + void pyExportSBFourierSqrt(PYBIND11_MODULE& _galsim) { - bp::class_< SBFourierSqrt, bp::bases >("SBFourierSqrt", bp::no_init) + bp::class_(GALSIM_COMMA "SBFourierSqrt" BP_NOINIT) .def(bp::init()); } diff --git a/pysrc/SBGaussian.cpp b/pysrc/SBGaussian.cpp index e5f0117a731..04da1cd4ac3 100644 --- a/pysrc/SBGaussian.cpp +++ b/pysrc/SBGaussian.cpp @@ -17,18 +17,14 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBGaussian.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBGaussian() + void pyExportSBGaussian(PYBIND11_MODULE& _galsim) { - bp::class_ >("SBGaussian", bp::no_init) + bp::class_(GALSIM_COMMA "SBGaussian" BP_NOINIT) .def(bp::init()); } diff --git a/pysrc/SBInclinedExponential.cpp b/pysrc/SBInclinedExponential.cpp index b41099ceb96..8358baa7454 100644 --- a/pysrc/SBInclinedExponential.cpp +++ b/pysrc/SBInclinedExponential.cpp @@ -17,19 +17,15 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBInclinedExponential.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBInclinedExponential() + void pyExportSBInclinedExponential(PYBIND11_MODULE& _galsim) { - bp::class_ >( - "SBInclinedExponential", bp::no_init) + bp::class_( + GALSIM_COMMA "SBInclinedExponential" BP_NOINIT) .def(bp::init()); } diff --git a/pysrc/SBInclinedSersic.cpp b/pysrc/SBInclinedSersic.cpp index 8d166492dae..98ad663b32d 100644 --- a/pysrc/SBInclinedSersic.cpp +++ b/pysrc/SBInclinedSersic.cpp @@ -17,18 +17,15 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBInclinedSersic.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBInclinedSersic() + void pyExportSBInclinedSersic(PYBIND11_MODULE& _galsim) { - bp::class_ >("SBInclinedSersic", bp::no_init) + bp::class_( + GALSIM_COMMA "SBInclinedSersic" BP_NOINIT) .def(bp::init()); } diff --git a/pysrc/SBInterpolatedImage.cpp b/pysrc/SBInterpolatedImage.cpp index 7d286ef7ed4..6f86a9185e2 100644 --- a/pysrc/SBInterpolatedImage.cpp +++ b/pysrc/SBInterpolatedImage.cpp @@ -17,38 +17,33 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBInterpolatedImage.h" -namespace bp = boost::python; - namespace galsim { template - static void WrapTemplates(W& wrapper) + static void WrapTemplates(PYBIND11_MODULE& _galsim, W& wrapper) { - wrapper - .def(bp::init &, const Bounds&, const Bounds&, - const Interpolant&, const Interpolant&, - double, double, GSParams>()); + wrapper.def(bp::init &, const Bounds&, const Bounds&, + const Interpolant&, const Interpolant&, + double, double, GSParams>()); typedef double (*cscf_func_type)(const BaseImage&, double); - bp::def("CalculateSizeContainingFlux", cscf_func_type(&CalculateSizeContainingFlux)); + GALSIM_DOT def("CalculateSizeContainingFlux", cscf_func_type(&CalculateSizeContainingFlux)); } - void pyExportSBInterpolatedImage() + void pyExportSBInterpolatedImage(PYBIND11_MODULE& _galsim) { - bp::class_< SBInterpolatedImage, bp::bases > pySBInterpolatedImage( - "SBInterpolatedImage", bp::no_init); + bp::class_ pySBInterpolatedImage( + GALSIM_COMMA "SBInterpolatedImage" BP_NOINIT); pySBInterpolatedImage .def("calculateMaxK", &SBInterpolatedImage::calculateMaxK); - WrapTemplates(pySBInterpolatedImage); - WrapTemplates(pySBInterpolatedImage); + WrapTemplates(_galsim, pySBInterpolatedImage); + WrapTemplates(_galsim, pySBInterpolatedImage); - bp::class_< SBInterpolatedKImage, bp::bases > pySBInterpolatedKImage( - "SBInterpolatedKImage", bp::no_init); + bp::class_ pySBInterpolatedKImage( + GALSIM_COMMA "SBInterpolatedKImage" BP_NOINIT); pySBInterpolatedKImage .def(bp::init > &, double, const Interpolant&, GSParams>()); diff --git a/pysrc/SBKolmogorov.cpp b/pysrc/SBKolmogorov.cpp index 0b5403d89f6..49299919b70 100644 --- a/pysrc/SBKolmogorov.cpp +++ b/pysrc/SBKolmogorov.cpp @@ -17,18 +17,14 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBKolmogorov.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBKolmogorov() + void pyExportSBKolmogorov(PYBIND11_MODULE& _galsim) { - bp::class_ >("SBKolmogorov", bp::no_init) + bp::class_(GALSIM_COMMA "SBKolmogorov" BP_NOINIT) .def(bp::init()); } diff --git a/pysrc/SBMoffat.cpp b/pysrc/SBMoffat.cpp index 94d7ac91c6a..5ad40adfd0f 100644 --- a/pysrc/SBMoffat.cpp +++ b/pysrc/SBMoffat.cpp @@ -17,22 +17,18 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBMoffat.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBMoffat() + void pyExportSBMoffat(PYBIND11_MODULE& _galsim) { - bp::class_ >("SBMoffat", bp::no_init) + bp::class_(GALSIM_COMMA "SBMoffat" BP_NOINIT) .def(bp::init()) .def("getHalfLightRadius", &SBMoffat::getHalfLightRadius); - bp::def("MoffatCalculateSRFromHLR", &MoffatCalculateScaleRadiusFromHLR); + GALSIM_DOT def("MoffatCalculateSRFromHLR", &MoffatCalculateScaleRadiusFromHLR); } } // namespace galsim diff --git a/pysrc/SBProfile.cpp b/pysrc/SBProfile.cpp index 74823b77939..c8366f6ce10 100644 --- a/pysrc/SBProfile.cpp +++ b/pysrc/SBProfile.cpp @@ -17,41 +17,28 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#define BOOST_PYTHON_MAX_ARITY 20 // We have a function with 17 params here... - // c.f. www.boost.org/libs/python/doc/v2/configuration.html -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBProfile.h" #include "SBTransform.h" -namespace bp = boost::python; - namespace galsim { template - static void WrapTemplates(W& wrapper) { - // We don't need to wrap templates in a separate function, but it keeps us - // from having to repeat each of the lines below for each type. - // We also don't need to make 'W' a template parameter in this case, - // but it's easier to do that than write out the full class_ type. - wrapper - .def("draw", - (void (SBProfile::*)(ImageView, double) const)&SBProfile::draw); - wrapper - .def("drawK", - (void (SBProfile::*)(ImageView >, double) const) - &SBProfile::drawK); + static void WrapTemplates(W& wrapper) + { + wrapper.def("draw", (void (SBProfile::*)(ImageView, double) const)&SBProfile::draw); + wrapper.def("drawK", (void (SBProfile::*)(ImageView >, double) const) + &SBProfile::drawK); } - void pyExportSBProfile() + void pyExportSBProfile(PYBIND11_MODULE& _galsim) { - bp::class_ ("GSParams", bp::no_init) + bp::class_(GALSIM_COMMA "GSParams" BP_NOINIT) .def(bp::init< int, int, double, double, double, double, double, double, double, double, double, double, double, double, int, double>()); - bp::class_ pySBProfile("SBProfile", bp::no_init); + bp::class_ pySBProfile(GALSIM_COMMA "SBProfile" BP_NOINIT); pySBProfile .def("xValue", &SBProfile::xValue) .def("kValue", &SBProfile::kValue) diff --git a/pysrc/SBSersic.cpp b/pysrc/SBSersic.cpp index 4476b968f6e..73c5784bb58 100644 --- a/pysrc/SBSersic.cpp +++ b/pysrc/SBSersic.cpp @@ -17,23 +17,19 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBSersic.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBSersic() + void pyExportSBSersic(PYBIND11_MODULE& _galsim) { - bp::class_ >("SBSersic", bp::no_init) + bp::class_(GALSIM_COMMA "SBSersic" BP_NOINIT) .def(bp::init()); - bp::def("SersicTruncatedScale", &SersicTruncatedScale); - bp::def("SersicIntegratedFlux", &SersicIntegratedFlux); - bp::def("SersicHLR", &SersicHLR); + GALSIM_DOT def("SersicTruncatedScale", &SersicTruncatedScale); + GALSIM_DOT def("SersicIntegratedFlux", &SersicIntegratedFlux); + GALSIM_DOT def("SersicHLR", &SersicHLR); } } // namespace galsim diff --git a/pysrc/SBShapelet.cpp b/pysrc/SBShapelet.cpp index 5255d08d22f..797dfb122bd 100644 --- a/pysrc/SBShapelet.cpp +++ b/pysrc/SBShapelet.cpp @@ -17,13 +17,9 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBShapelet.h" -namespace bp = boost::python; - namespace galsim { static void fit(double sigma, int order, size_t idata, @@ -39,20 +35,21 @@ namespace galsim { v = bvec.rVector(); } - static SBShapelet* construct(double sigma, int order, size_t idata, GSParams gsparams) + static BP_CONSTRUCTOR(construct, SBShapelet, + double sigma, int order, size_t idata, GSParams gsparams) { double* data = reinterpret_cast(idata); int size = PQIndex::size(order); LVector bvec(order, tmv::VectorViewOf(data, size)); - return new SBShapelet(sigma, bvec, gsparams); + PYBIND11_PLACEMENT_NEW SBShapelet(sigma, bvec, gsparams); } - void pyExportSBShapelet() + void pyExportSBShapelet(PYBIND11_MODULE& _galsim) { - bp::class_ >("SBShapelet", bp::no_init) - .def("__init__", bp::make_constructor(&construct, bp::default_call_policies())); + bp::class_(GALSIM_COMMA "SBShapelet" BP_NOINIT) + .def("__init__", BP_MAKE_CONSTRUCTOR(&construct)); - bp::def("ShapeletFitImage", &fit); + GALSIM_DOT def("ShapeletFitImage", &fit); } } // namespace galsim diff --git a/pysrc/SBSpergel.cpp b/pysrc/SBSpergel.cpp index 5e6f2c1f05d..677add7a52b 100644 --- a/pysrc/SBSpergel.cpp +++ b/pysrc/SBSpergel.cpp @@ -17,23 +17,19 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBSpergel.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBSpergel() + void pyExportSBSpergel(PYBIND11_MODULE& _galsim) { - bp::class_ >("SBSpergel",bp::no_init) + bp::class_(GALSIM_COMMA "SBSpergel" BP_NOINIT) .def(bp::init()) .def("calculateIntegratedFlux", &SBSpergel::calculateIntegratedFlux) .def("calculateFluxRadius", &SBSpergel::calculateFluxRadius); - bp::def("SpergelCalculateHLR", &SpergelCalculateHLR); + GALSIM_DOT def("SpergelCalculateHLR", &SpergelCalculateHLR); } } // namespace galsim diff --git a/pysrc/SBTransform.cpp b/pysrc/SBTransform.cpp index 29cc71db63d..4c79da0eb7c 100644 --- a/pysrc/SBTransform.cpp +++ b/pysrc/SBTransform.cpp @@ -17,18 +17,14 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "SBTransform.h" -namespace bp = boost::python; - namespace galsim { - void pyExportSBTransform() + void pyExportSBTransform(PYBIND11_MODULE& _galsim) { - bp::class_< SBTransform, bp::bases >("SBTransform", bp::no_init) + bp::class_(GALSIM_COMMA "SBTransform" BP_NOINIT) .def(bp::init, double, GSParams>()); } diff --git a/pysrc/Silicon.cpp b/pysrc/Silicon.cpp index 54520de1284..0380e97083f 100644 --- a/pysrc/Silicon.cpp +++ b/pysrc/Silicon.cpp @@ -17,45 +17,37 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" // header that includes Python.h always needs to come first - +#include "PyBind11Helper.h" #include "Silicon.h" #include "Random.h" -namespace bp = boost::python; - namespace galsim { template static void WrapTemplates(W& wrapper) { typedef double (Silicon::*accumulate_fn)(const PhotonArray&, UniformDeviate, ImageView, Position); - wrapper - .def("accumulate", (accumulate_fn)&Silicon::accumulate); + wrapper.def("accumulate", (accumulate_fn)&Silicon::accumulate); } - - static Silicon* MakeSilicon(int NumVertices, double NumElect, int Nx, int Ny, int QDist, - double Nrecalc, double DiffStep, double PixelSize, - double SensorThickness, size_t idata, - const Table& treeRingTable, - const Position& treeRingCenter, - const Table& abs_length_table) + static BP_CONSTRUCTOR(MakeSilicon, Silicon, + int NumVertices, double NumElect, int Nx, int Ny, int QDist, + double Nrecalc, double DiffStep, double PixelSize, + double SensorThickness, size_t idata, + const Table& treeRingTable, + const Position& treeRingCenter, + const Table& abs_length_table) { double* data = reinterpret_cast(idata); - int NumPolys = Nx * Ny + 2; - int Nv = 4 * NumVertices + 4; - return new Silicon(NumVertices, NumElect, Nx, Ny, QDist, - Nrecalc, DiffStep, PixelSize, SensorThickness, data, - treeRingTable, treeRingCenter, abs_length_table); + PYBIND11_PLACEMENT_NEW Silicon(NumVertices, NumElect, Nx, Ny, QDist, + Nrecalc, DiffStep, PixelSize, SensorThickness, data, + treeRingTable, treeRingCenter, abs_length_table); } - void pyExportSilicon() + void pyExportSilicon(PYBIND11_MODULE& _galsim) { - bp::class_ pySilicon("Silicon", bp::no_init); - pySilicon - .def("__init__", bp::make_constructor(&MakeSilicon, bp::default_call_policies())); + bp::class_ pySilicon(GALSIM_COMMA "Silicon" BP_NOINIT); + pySilicon.def("__init__", BP_MAKE_CONSTRUCTOR(&MakeSilicon)); WrapTemplates(pySilicon); WrapTemplates(pySilicon); diff --git a/pysrc/Table.cpp b/pysrc/Table.cpp index d6014fef40b..f6d922f59b0 100644 --- a/pysrc/Table.cpp +++ b/pysrc/Table.cpp @@ -17,16 +17,13 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" // header that includes Python.h always needs to come first - +#include "PyBind11Helper.h" #include "Table.h" -namespace bp = boost::python; - namespace galsim { - static Table* makeTable(size_t iargs, size_t ivals, int N, const char* interp_c) + static BP_CONSTRUCTOR(MakeTable, Table, + size_t iargs, size_t ivals, int N, const char* interp_c) { const double* args = reinterpret_cast(iargs); const double* vals = reinterpret_cast(ivals); @@ -38,18 +35,19 @@ namespace galsim { else if (interp == "ceil") i = Table::ceil; else if (interp == "nearest") i = Table::nearest; - return new Table(args, vals, N, i); + PYBIND11_PLACEMENT_NEW Table(args, vals, N, i); } - static void interpMany(const Table& table, size_t iargs, size_t ivals, int N) + static void InterpMany(const Table& table, size_t iargs, size_t ivals, int N) { const double* args = reinterpret_cast(iargs); double* vals = reinterpret_cast(ivals); table.interpMany(args, vals, N); } - static Table2D* makeTable2D(size_t ix, size_t iy, size_t ivals, int Nx, int Ny, - const char* interp_c) + static BP_CONSTRUCTOR(MakeTable2D, Table2D, + size_t ix, size_t iy, size_t ivals, int Nx, int Ny, + const char* interp_c) { const double* x = reinterpret_cast(ix); const double* y = reinterpret_cast(iy); @@ -61,10 +59,10 @@ namespace galsim { else if (interp == "ceil") i = Table2D::ceil; else if (interp == "nearest") i = Table2D::nearest; - return new Table2D(x, y, vals, Nx, Ny, i); + PYBIND11_PLACEMENT_NEW Table2D(x, y, vals, Nx, Ny, i); } - static void interpMany2D(const Table2D& table2d, size_t ix, size_t iy, size_t ivals, int N) + static void InterpMany2D(const Table2D& table2d, size_t ix, size_t iy, size_t ivals, int N) { const double* x = reinterpret_cast(ix); const double* y = reinterpret_cast(iy); @@ -88,19 +86,17 @@ namespace galsim { table2d.gradientMany(x, y, dfdx, dfdy, N); } - void pyExportTable() + void pyExportTable(PYBIND11_MODULE& _galsim) { - bp::class_ pyTable("_LookupTable", bp::no_init); - pyTable - .def("__init__", bp::make_constructor(&makeTable, bp::default_call_policies())) + bp::class_
(GALSIM_COMMA "_LookupTable" BP_NOINIT) + .def("__init__", BP_MAKE_CONSTRUCTOR(&MakeTable)) .def("interp", &Table::lookup) - .def("interpMany", &interpMany); + .def("interpMany", &InterpMany); - bp::class_ pyTable2D("_LookupTable2D", bp::no_init); - pyTable2D - .def("__init__", bp::make_constructor(&makeTable2D, bp::default_call_policies())) + bp::class_(GALSIM_COMMA "_LookupTable2D" BP_NOINIT) + .def("__init__", BP_MAKE_CONSTRUCTOR(&MakeTable2D)) .def("interp", &Table2D::lookup) - .def("interpMany", &interpMany2D) + .def("interpMany", &InterpMany2D) .def("gradient", &Gradient) .def("gradientMany", &GradientMany); } diff --git a/pysrc/WCS.cpp b/pysrc/WCS.cpp index dc8617c8954..15841a38527 100644 --- a/pysrc/WCS.cpp +++ b/pysrc/WCS.cpp @@ -17,13 +17,9 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" - +#include "PyBind11Helper.h" #include "WCS.h" -namespace bp = boost::python; - namespace galsim { void CallApplyCD(int n, size_t x_data, size_t y_data, size_t cd_data) @@ -32,7 +28,7 @@ namespace galsim { double* yar = reinterpret_cast(y_data); const double* cdar = reinterpret_cast(cd_data); ApplyCD(n, xar, yar, cdar); - }; + } void CallApplyPV(int n, int m, size_t u_data, size_t v_data, size_t pv_data) { @@ -40,28 +36,29 @@ namespace galsim { double* var = reinterpret_cast(v_data); const double* pvar = reinterpret_cast(pv_data); ApplyPV(n, m, uar, var, pvar); - }; + } - bp::tuple CallInvertPV(double u, double v, size_t pv_data) + TUPLE(double,double) CallInvertPV(double u, double v, size_t pv_data) { const double* pvar = reinterpret_cast(pv_data); InvertPV(u, v, pvar); - return bp::make_tuple(u,v); - }; + return MAKE_TUPLE(u,v); + } - bp::tuple CallInvertAB(int m, double x, double y, size_t ab_data, size_t abp_data) + TUPLE(double,double) CallInvertAB(int m, double x, double y, size_t ab_data, size_t abp_data) { const double* abar = reinterpret_cast(ab_data); const double* abpar = reinterpret_cast(abp_data); InvertAB(m, x, y, abar, abpar); - return bp::make_tuple(x,y); - }; + return MAKE_TUPLE(x,y); + } - void pyExportWCS() { - bp::def("ApplyPV", &CallApplyPV); - bp::def("ApplyCD", &CallApplyCD); - bp::def("InvertPV", &CallInvertPV); - bp::def("InvertAB", &CallInvertAB); + void pyExportWCS(PYBIND11_MODULE& _galsim) + { + GALSIM_DOT def("ApplyPV", &CallApplyPV); + GALSIM_DOT def("ApplyCD", &CallApplyCD); + GALSIM_DOT def("InvertPV", &CallInvertPV); + GALSIM_DOT def("InvertAB", &CallInvertAB); } } // namespace galsim diff --git a/pysrc/module.cpp b/pysrc/module.cpp index 08eef20c7b7..f8f2fbb8849 100644 --- a/pysrc/module.cpp +++ b/pysrc/module.cpp @@ -17,85 +17,91 @@ * and/or other materials provided with the distribution. */ -#include "galsim/IgnoreWarnings.h" -#include "boost/python.hpp" +#include "Python.h" +#include "PyBind11Helper.h" namespace galsim { - void pyExportBounds(); - void pyExportPhotonArray(); - void pyExportImage(); - void pyExportSBProfile(); - void pyExportSBAdd(); - void pyExportSBConvolve(); - void pyExportSBDeconvolve(); - void pyExportSBFourierSqrt(); - void pyExportSBTransform(); - void pyExportSBBox(); - void pyExportSBGaussian(); - void pyExportSBExponential(); - void pyExportSBSersic(); - void pyExportSBSpergel(); - void pyExportSBMoffat(); - void pyExportSBAiry(); - void pyExportSBShapelet(); - void pyExportSBInterpolatedImage(); - void pyExportSBKolmogorov(); - void pyExportSBInclinedExponential(); - void pyExportSBInclinedSersic(); - void pyExportSBDeltaFunction(); - void pyExportRandom(); - void pyExportTable(); - void pyExportInterpolant(); - void pyExportCDModel(); - void pyExportSilicon(); - void pyExportRealGalaxy(); - void pyExportWCS(); + void pyExportBounds(PYBIND11_MODULE&); + void pyExportPhotonArray(PYBIND11_MODULE&); + void pyExportImage(PYBIND11_MODULE&); + void pyExportSBProfile(PYBIND11_MODULE&); + void pyExportSBAdd(PYBIND11_MODULE&); + void pyExportSBConvolve(PYBIND11_MODULE&); + void pyExportSBDeconvolve(PYBIND11_MODULE&); + void pyExportSBFourierSqrt(PYBIND11_MODULE&); + void pyExportSBTransform(PYBIND11_MODULE&); + void pyExportSBBox(PYBIND11_MODULE&); + void pyExportSBGaussian(PYBIND11_MODULE&); + void pyExportSBDeltaFunction(PYBIND11_MODULE&); + void pyExportSBExponential(PYBIND11_MODULE&); + void pyExportSBSersic(PYBIND11_MODULE&); + void pyExportSBSpergel(PYBIND11_MODULE&); + void pyExportSBMoffat(PYBIND11_MODULE&); + void pyExportSBAiry(PYBIND11_MODULE&); + void pyExportSBShapelet(PYBIND11_MODULE&); + void pyExportSBInterpolatedImage(PYBIND11_MODULE&); + void pyExportSBKolmogorov(PYBIND11_MODULE&); + void pyExportSBInclinedExponential(PYBIND11_MODULE&); + void pyExportSBInclinedSersic(PYBIND11_MODULE&); + void pyExportRandom(PYBIND11_MODULE&); + void pyExportTable(PYBIND11_MODULE&); + void pyExportInterpolant(PYBIND11_MODULE&); + void pyExportCDModel(PYBIND11_MODULE&); + void pyExportSilicon(PYBIND11_MODULE&); + void pyExportRealGalaxy(PYBIND11_MODULE&); + void pyExportWCS(PYBIND11_MODULE&); namespace hsm { - void pyExportHSM(); - } // namespace hsm + void pyExportHSM(PYBIND11_MODULE&); + } namespace integ { - void pyExportInteg(); - } // namespace integ + void pyExportInteg(PYBIND11_MODULE&); + } namespace math { - void pyExportBessel(); - } // namespace integ + void pyExportBessel(PYBIND11_MODULE&); + } } // namespace galsim -BOOST_PYTHON_MODULE(_galsim) { - galsim::pyExportBounds(); - galsim::pyExportImage(); - galsim::pyExportPhotonArray(); - galsim::pyExportSBProfile(); - galsim::pyExportSBAdd(); - galsim::pyExportSBConvolve(); - galsim::pyExportSBDeconvolve(); - galsim::pyExportSBFourierSqrt(); - galsim::pyExportSBTransform(); - galsim::pyExportSBBox(); - galsim::pyExportSBGaussian(); - galsim::pyExportSBExponential(); - galsim::pyExportSBSersic(); - galsim::pyExportSBSpergel(); - galsim::pyExportSBMoffat(); - galsim::pyExportSBAiry(); - galsim::pyExportSBShapelet(); - galsim::pyExportSBInterpolatedImage(); - galsim::pyExportSBKolmogorov(); - galsim::pyExportSBInclinedExponential(); - galsim::pyExportSBInclinedSersic(); - galsim::pyExportSBDeltaFunction(); - galsim::pyExportRandom(); - galsim::pyExportInterpolant(); - galsim::pyExportCDModel(); - galsim::hsm::pyExportHSM(); - galsim::integ::pyExportInteg(); - galsim::pyExportTable(); - galsim::math::pyExportBessel(); - galsim::pyExportSilicon(); - galsim::pyExportRealGalaxy(); - galsim::pyExportWCS(); +PYBIND11_PLUGIN(_galsim) +{ + PYBIND11_MAKE_MODULE(_galsim); + + galsim::pyExportBounds(_galsim); + galsim::pyExportPhotonArray(_galsim); + galsim::pyExportImage(_galsim); + galsim::pyExportSBProfile(_galsim); + galsim::pyExportSBAdd(_galsim); + galsim::pyExportSBConvolve(_galsim); + galsim::pyExportSBDeconvolve(_galsim); + galsim::pyExportSBFourierSqrt(_galsim); + galsim::pyExportSBTransform(_galsim); + galsim::pyExportSBBox(_galsim); + galsim::pyExportSBGaussian(_galsim); + galsim::pyExportSBDeltaFunction(_galsim); + galsim::pyExportSBExponential(_galsim); + galsim::pyExportSBSersic(_galsim); + galsim::pyExportSBSpergel(_galsim); + galsim::pyExportSBMoffat(_galsim); + galsim::pyExportSBAiry(_galsim); + galsim::pyExportSBShapelet(_galsim); + galsim::pyExportSBInterpolatedImage(_galsim); + galsim::pyExportSBKolmogorov(_galsim); + galsim::pyExportSBInclinedExponential(_galsim); + galsim::pyExportSBInclinedSersic(_galsim); + galsim::pyExportRandom(_galsim); + galsim::pyExportTable(_galsim); + galsim::pyExportInterpolant(_galsim); + galsim::pyExportCDModel(_galsim); + galsim::pyExportSilicon(_galsim); + galsim::pyExportRealGalaxy(_galsim); + galsim::pyExportWCS(_galsim); + + galsim::hsm::pyExportHSM(_galsim); + galsim::integ::pyExportInteg(_galsim); + galsim::math::pyExportBessel(_galsim); + + PYBIND11_RETURN_PTR(_galsim); } From be6c2823a1b444094235bdd18bd4d3257fd0269c Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sun, 31 Dec 2017 14:57:05 -0500 Subject: [PATCH 002/111] Remove TMV from places that don't really need it (#809-pybind11) --- include/galsim/CorrelatedNoise.h | 22 ++-------------------- include/galsim/FFT.h | 17 +++-------------- include/galsim/SBProfileImpl.h | 1 - src/CorrelatedNoise.cpp | 32 +++----------------------------- src/FFT.cpp | 22 ++++++++++++++++++++++ src/SBTransform.cpp | 1 - 6 files changed, 30 insertions(+), 65 deletions(-) diff --git a/include/galsim/CorrelatedNoise.h b/include/galsim/CorrelatedNoise.h index 2864832075c..bb78b25739c 100644 --- a/include/galsim/CorrelatedNoise.h +++ b/include/galsim/CorrelatedNoise.h @@ -26,7 +26,6 @@ */ #include -#include "TMV_Sym.h" #include "Image.h" #include "SBProfile.h" @@ -41,26 +40,9 @@ namespace galsim { * written into. The rest are initialized and remain as zero. * * For an example of this function in use, see `galsim/correlatednoise.py`. - * - * Currently, this actually copies elements from an internal calculation of the covariance - * matrix (using Mike Jarvis' TMV library). It could, therefore, be calculated more - * efficiently by direct assignment. However, as this public member function is foreseen as - * being mainly for visualization/checking purposes, we go via the TMV intermediary to avoid - * code duplication. If, in future, it becomes critical to speed up this function this can be - * revisited. - */ - void calculateCovarianceMatrix(ImageView& cov, - const SBProfile& sbp, const Bounds& bounds, double dx); - - /** - * @brief Return, as a TMV SymMatrix, a noise covariance matrix between every element in an - * input Image with pixel scale dx. - * - * The TMV SymMatrix uses FortranStyle indexing (to match the FITS-compliant usage in Image) - * along with ColumnMajor ordering (the default), and Upper triangle storage. */ - tmv::SymMatrix calculateCovarianceSymMatrix( - const SBProfile& sbp, const Bounds& bounds, double dx); + void calculateCovarianceMatrix(ImageView& cov, const SBProfile& sbp, + const Bounds& bounds, double dx); } #endif diff --git a/include/galsim/FFT.h b/include/galsim/FFT.h index f9a6951c2df..721c527fb28 100644 --- a/include/galsim/FFT.h +++ b/include/galsim/FFT.h @@ -61,8 +61,7 @@ #include #include -#include "fftw3.h" -#include "TMV.h" +#include #include "Std.h" #include "Interpolant.h" @@ -149,16 +148,9 @@ namespace galsim { return *this; } - ~FFTW_Array() {} + ~FFTW_Array(); - void resize(size_t n) - { - if (_n != n) { - _n = n; - _array.resize(n); - _p = _array.get(); - } - } + void resize(size_t n); void fill(T val) { @@ -180,9 +172,6 @@ namespace galsim { private: size_t _n; - // fftw_malloc doesn't seem to actually guarantee 16 byte alignment, so we instead - // use TMV's AlignedArray class to handle the byte alignment for us. - tmv::AlignedArray _array; T* _p; }; diff --git a/include/galsim/SBProfileImpl.h b/include/galsim/SBProfileImpl.h index d366ce55674..3d03197fb54 100644 --- a/include/galsim/SBProfileImpl.h +++ b/include/galsim/SBProfileImpl.h @@ -22,7 +22,6 @@ #include "SBProfile.h" #include "integ/Int.h" -#include "TMV.h" namespace galsim { diff --git a/src/CorrelatedNoise.cpp b/src/CorrelatedNoise.cpp index 8944774db1b..60c689cf02b 100644 --- a/src/CorrelatedNoise.cpp +++ b/src/CorrelatedNoise.cpp @@ -32,48 +32,22 @@ namespace galsim { int idim = 1 + bounds.getXMax() - bounds.getXMin(); int jdim = 1 + bounds.getYMax() - bounds.getYMin(); int covdim = idim * jdim; - tmv::SymMatrix symcov = calculateCovarianceSymMatrix(sbp, bounds, dx); - - for (int i=1; i<=covdim; i++){ // note that the Image indices use the FITS convention and - // start from 1!! - for (int j=i; j<=covdim; j++){ - cov.setValue(i, j, symcov(i, j)); // fill in the upper triangle with the - // correct CorrFunc value - } - } - } - - tmv::SymMatrix calculateCovarianceSymMatrix( - const SBProfile& sbp, const Bounds& bounds, double dx) - { - // Calculate the required dimensions - int idim = 1 + bounds.getXMax() - bounds.getXMin(); - int jdim = 1 + bounds.getYMax() - bounds.getYMin(); - int covdim = idim * jdim; int k, ell; // k and l are indices that refer to image pixel separation vectors in the // correlation func. double x_k, y_ell; // physical vector separations in the correlation func, dx * k etc. - tmv::SymMatrix cov = tmv::SymMatrix< - double, tmv::FortranStyle|tmv::Upper>(covdim); - - for (int i=1; i<=covdim; i++){ // note that the Image indices use the FITS convention and - // start from 1!! - for (int j=i; j<=covdim; j++){ - + for (int i=1; i<=covdim; i++) { + for (int j=i; j<=covdim; j++) { k = ((j - 1) / jdim) - ((i - 1) / idim); // using integer division rules here ell = ((j - 1) % jdim) - ((i - 1) % idim); x_k = double(k) * dx; y_ell = double(ell) * dx; Position p = Position(x_k, y_ell); - cov(i, j) = sbp.xValue(p); // fill in the upper triangle with the correct value - + cov.setValue(i, j, sbp.xValue(p)); } } - return cov; } } diff --git a/src/FFT.cpp b/src/FFT.cpp index 0889dd0c3e5..15ec607cfa7 100644 --- a/src/FFT.cpp +++ b/src/FFT.cpp @@ -33,6 +33,26 @@ namespace galsim { + template + void FFTW_Array::resize(size_t n) + { + if (_n != n) { + _n = n; + // cf. BaseImage::allocateMem, which uses the same code. + char* mem = new char[_n * sizeof(T) + sizeof(char*) + 15]; + _p = reinterpret_cast( (uintptr_t)(mem + sizeof(char*) + 15) & ~(size_t) 0x0F ); + ((char**)_p)[-1] = mem; + } + } + + template + FFTW_Array::~FFTW_Array() + { + if (_p) { + delete [] ((char**)_p)[-1]; + } + } + KTable::KTable(int N, double dk, std::complex value) : _dk(dk), _invdk(1./dk) { if (N<=0) throw FFTError("KTable size <=0"); @@ -1113,5 +1133,7 @@ namespace galsim { return kt; } + template class FFTW_Array; + template class FFTW_Array >; } diff --git a/src/SBTransform.cpp b/src/SBTransform.cpp index 176c4a8b024..192678adf3f 100644 --- a/src/SBTransform.cpp +++ b/src/SBTransform.cpp @@ -19,7 +19,6 @@ //#define DEBUGLOGGING -#include "TMV.h" #include "SBTransform.h" #include "SBTransformImpl.h" #include "fmath/fmath.hpp" // Use their compiler checks for the right SSE include. From c4d82463da0c2b95b2ff9d6c944c282eb165697e Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 1 Jan 2018 19:04:37 -0500 Subject: [PATCH 003/111] Add check for Eigen in SConstruct (#809-pybind11) --- SConstruct | 95 +++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 91 insertions(+), 4 deletions(-) diff --git a/SConstruct b/SConstruct index d2096d7bf4c..dba0dadc3ac 100644 --- a/SConstruct +++ b/SConstruct @@ -85,6 +85,7 @@ opts.Add(PathVariable('FINAL_PREFIX', opts.Add(BoolVariable('WITH_UPS','Install ups/ directory for use with EUPS', False)) opts.Add('TMV_DIR','Explicitly give the tmv prefix','') +opts.Add('EIGEN_DIR','Explicitly give the Eigen prefix','') opts.Add('TMV_LINK','File that contains the linking instructions for TMV','') opts.Add('FFTW_DIR','Explicitly give the fftw3 prefix','') opts.Add('BOOST_DIR','Explicitly give the boost prefix','') @@ -714,7 +715,7 @@ def AddDepPaths(bin_paths,cpp_paths,lib_paths): """ - types = ['BOOST', 'TMV', 'FFTW'] + types = ['BOOST', 'TMV', 'EIGEN', 'FFTW'] for t in types: dirtag = t+'_DIR' @@ -724,9 +725,12 @@ def AddDepPaths(bin_paths,cpp_paths,lib_paths): print('WARNING: could not find specified %s = %s'%(dirtag,env[dirtag])) continue - AddPath(bin_paths, os.path.join(tdir, 'bin')) - AddPath(lib_paths, os.path.join(tdir, 'lib')) - AddPath(cpp_paths, os.path.join(tdir, 'include')) + if t == 'EIGEN': + AddPath(cpp_paths, tdir) + else: + AddPath(bin_paths, os.path.join(tdir, 'bin')) + AddPath(lib_paths, os.path.join(tdir, 'lib')) + AddPath(cpp_paths, os.path.join(tdir, 'include')) def AddExtraPaths(env): @@ -1523,6 +1527,87 @@ PyMODINIT_FUNC initcheck_tmv(void) return 1 +def CheckEigen(config): + eigen_source_file = """ +#include "Python.h" +#include "Eigen/Core" +#include "Eigen/Cholesky" + +static void useEigen() { + Eigen::MatrixXd S(10,10); + S.setConstant(4.); + S.diagonal().array() += 50.; + Eigen::MatrixXd m(10,3); + m.setConstant(2.); + S.llt().solveInPlace(m); +} + +static PyObject* run(PyObject* self, PyObject* args) +{ + useEigen(); + return Py_BuildValue("i", 23); +} + +static PyMethodDef Methods[] = { + {"run", run, METH_VARARGS, "return 23"}, + {NULL, NULL, 0, NULL} +}; + +#if PY_MAJOR_VERSION >= 3 + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "check_eigen", + NULL, + -1, + Methods, + NULL, + NULL, + NULL, + NULL +}; + +PyMODINIT_FUNC PyInit_check_eigen(void) + +#else + +PyMODINIT_FUNC initcheck_eigen(void) + +#endif +{ +#if PY_MAJOR_VERSION >= 3 + return PyModule_Create(&moduledef); +#else + Py_InitModule("check_eigen", Methods); +#endif +} +""" + config.Message('Checking if we can build module using Eigen... ') + + result = config.TryCompile(eigen_source_file,'.cpp') + if not result: + ErrorExit('Unable to compile a module using eigen') + + result = CheckModuleLibs(config,[],eigen_source_file,'check_eigen') + if not result: + ErrorExit('Unable to build a python loadable module that uses eigen') + + config.Result(1) + + eigen_version_file = """ +#include +#include "Eigen/Core" +int main() { + std::cout< Date: Mon, 1 Jan 2018 19:05:32 -0500 Subject: [PATCH 004/111] Fix a stray use of installed boost header file (#809-pybind11) --- include/galsim/boost1_48_0/assert.hpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/include/galsim/boost1_48_0/assert.hpp b/include/galsim/boost1_48_0/assert.hpp index 174f0846fd1..10d3919f230 100644 --- a/include/galsim/boost1_48_0/assert.hpp +++ b/include/galsim/boost1_48_0/assert.hpp @@ -34,7 +34,11 @@ #elif defined(BOOST_ENABLE_ASSERT_HANDLER) +#ifdef USE_BOOST #include +#else +#include "galsim/boost1_48_0/current_function.hpp" +#endif namespace boost { @@ -63,8 +67,6 @@ namespace boost #elif defined(BOOST_ENABLE_ASSERT_HANDLER) - #include - namespace boost { void assertion_failed_msg(char const * expr, char const * msg, @@ -80,7 +82,6 @@ namespace boost #define BOOST_ASSERT_HPP #include #include - #include // IDE's like Visual Studio perform better if output goes to std::cout or // some other stream, so allow user to configure output stream: From 714ba525338dcc28025700a89406c42c029ebbec Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 1 Jan 2018 22:39:28 -0500 Subject: [PATCH 005/111] Use Eigen rather than TMV in WCS.cpp, RealGalaxy.cpp (#809-pybind11) --- src/RealGalaxy.cpp | 68 +++++++++++++--- src/WCS.cpp | 196 ++++++++++++++++++++++++++++++++++----------- 2 files changed, 204 insertions(+), 60 deletions(-) diff --git a/src/RealGalaxy.cpp b/src/RealGalaxy.cpp index 5c01b0dd228..82e38c8fddd 100644 --- a/src/RealGalaxy.cpp +++ b/src/RealGalaxy.cpp @@ -17,7 +17,12 @@ * and/or other materials provided with the distribution. */ +#ifdef USE_TMV #include "TMV.h" +#else +#include "Eigen/Dense" +#endif + #include "RealGalaxy.h" namespace galsim @@ -76,10 +81,18 @@ namespace galsim Sigma[-iy, -ix] = np.conj(dx) */ +#ifdef USE_TMV + typedef tmv::Matrix > MatrixXcd; + typedef tmv::Vector > VectorXcd; +#else + using Eigen::MatrixXcd; + using Eigen::VectorXcd; + using Eigen::VectorXd; +#endif int npix = nkx * nky; int nsedsq = nsed * nsed; - tmv::Matrix > A(nband, nsed); - tmv::Vector > b(nband); + MatrixXcd A(nband, nsed); + VectorXcd b(nband); for (int ix=0; ix ww = tmv::DiagMatrixViewOf(w + iy*nkx + ix, nband, npix); tmv::ConstMatrixView > psf = @@ -95,29 +109,59 @@ namespace galsim tmv::VectorViewOf(kimgs + iy*nkx + ix, nband, npix); tmv::VectorView > x = tmv::VectorViewOf(coef + iy*nkx*nsed + ix*nsed, nsed, 1); - tmv::MatrixView > dx = - tmv::MatrixViewOf(Sigma + iy*nkx*nsedsq + ix*nsedsq, nsed, nsed, nsed, 1); + tmv::MatrixView > dxT = + tmv::MatrixViewOf(Sigma + iy*nkx*nsedsq + ix*nsedsq, nsed, nsed, 1, nsed); A = ww * psf; b = ww * kimg; try { x = b / A; - A.makeInverseATA(dx); + A.makeInverseATA(dxT); } catch (tmv::Singular) { A.divideUsing(tmv::QRP); x = b / A; - A.makeInverseATA(dx); + A.makeInverseATA(dxT); } +#else + using Eigen::Dynamic; + using Eigen::InnerStride; + using Eigen::Stride; + using Eigen::Upper; + Eigen::Map > ww( + w+iy*nkx+ix, nband, InnerStride<>(npix)); + Eigen::Map > psf( + psf_eff_kimgs + iy*nkx + ix, nband, nsed, + Stride(npix, npix * nsed)); + Eigen::Map > kimg( + kimgs + iy*nkx + ix, nband, InnerStride<>(npix)); + Eigen::Map x(coef + iy*nkx*nsed + ix*nsed, nsed); + Eigen::Map dxT(Sigma + iy*nkx*nsedsq + ix*nsedsq, nsed, nsed); + + A = ww.asDiagonal() * psf; + b = ww.asDiagonal() * kimg; + Eigen::ColPivHouseholderQR qr = A.colPivHouseholderQr(); + x = qr.solve(b); + // (AtA)^-1 = (PtRtQtQRP)^-1 = (PtRtRP)^-1 = Pt R^-1 Rt^-1 P + dxT = qr.colsPermutation().transpose() * + qr.matrixR().triangularView().solve( + qr.matrixR().triangularView().transpose().solve( + MatrixXcd(qr.colsPermutation()))); +#endif + + if (ix > 0 && iy > 0) { int ix2 = nkx - ix; int iy2 = nky - iy; if (ix == ix2 && iy == iy2) continue; - tmv::VectorView > x2 = - tmv::VectorViewOf(coef + iy2*nkx*nsed + ix2*nsed, nsed, 1); - tmv::MatrixView > dx2 = - tmv::MatrixViewOf(Sigma + iy2*nkx*nsedsq + ix2*nsedsq, nsed, nsed, nsed, 1); - x2 = x.conjugate(); - dx2 = dx.conjugate(); +#ifdef USE_TMV + tmv::VectorViewOf(coef + iy2*nkx*nsed + ix2*nsed, nsed, 1) = x.conjugate(); + tmv::MatrixViewOf(Sigma + iy2*nkx*nsedsq + ix2*nsedsq, nsed, nsed, 1, nsed) = + dxT.conjugate(); +#else + Eigen::Map(coef + iy2*nkx*nsed + ix2*nsed, nsed) = x.conjugate(); + Eigen::Map(Sigma + iy2*nkx*nsedsq + ix2*nsedsq, nsed, nsed) = + dxT.conjugate(); +#endif } } } diff --git a/src/WCS.cpp b/src/WCS.cpp index 0b14843f3ec..085809c0f19 100644 --- a/src/WCS.cpp +++ b/src/WCS.cpp @@ -21,7 +21,17 @@ #include "Std.h" #include "WCS.h" +#ifdef USE_TMV #include "TMV.h" +typedef tmv::Vector VectorXd; +typedef tmv::Matrix MatrixXd; +typedef tmv::VectorView MapVectorXd; +#else +#include "Eigen/Dense" +using Eigen::VectorXd; +using Eigen::MatrixXd; +typedef Eigen::Map MapVectorXd; +#endif namespace galsim { @@ -42,28 +52,46 @@ namespace galsim { } } - void setup_pow(tmv::VectorView& x, tmv::Matrix& xpow) + void setup_pow(MapVectorXd& x, MatrixXd& xpow) { +#ifdef USE_TMV xpow.col(0).setAllTo(1.); xpow.col(1) = x; for (int i=2; i pvu(pvar, m, m, m, 1, tmv::NonConj); - tmv::ConstMatrixView pvv(pvar + m*m, m, m, m, 1, tmv::NonConj); +#ifdef USE_TMV + tmv::ConstMatrixView pvuT(pvar, m, m, 1, m, tmv::NonConj); + tmv::ConstMatrixView pvvT(pvar + m*m, m, m, 1, m, tmv::NonConj); +#else + Eigen::Map pvuT(pvar, m, m); + Eigen::Map pvvT(pvar + m*m, m, m); +#endif while (n) { // Do this in blocks of at most 256 to avoid blowing up the memory usage when // this is run on a large image. It's also a bit faster this way, since there // are fewer cache misses. const int nn = n >= 256 ? 256 : n; - tmv::VectorView u(uar, nn, 1, tmv::NonConj); - tmv::VectorView v(var, nn, 1, tmv::NonConj); - tmv::Matrix upow(nn,m); - tmv::Matrix vpow(nn,m); +#ifdef USE_TMV + MapVectorXd u(uar, nn, 1, tmv::NonConj); + MapVectorXd v(var, nn, 1, tmv::NonConj); +#else + MapVectorXd u(uar, nn); + MapVectorXd v(var, nn); +#endif + MatrixXd upow(nn, m); + MatrixXd vpow(nn, m); + setup_pow(u, upow); setup_pow(v, vpow); @@ -77,13 +105,25 @@ namespace galsim { // above formulae. So we use the fact that // diag(AT . B) = sum_rows(A * B) - tmv::Vector ones(m, 1.); - tmv::Matrix temp = vpow * pvu.transpose(); +#ifdef USE_TMV + VectorXd ones(m, 1.); +#else + VectorXd ones = Eigen::VectorXd::Ones(m); +#endif + MatrixXd temp = vpow * pvuT; +#ifdef USE_TMV temp = ElemProd(upow, temp); +#else + temp.array() *= upow.array(); +#endif u = temp * ones; - temp = vpow * pvv.transpose(); + temp = vpow * pvvT; +#ifdef USE_TMV temp = ElemProd(upow, temp); +#else + temp.array() *= upow.array(); +#endif v = temp * ones; uar += nn; @@ -109,19 +149,30 @@ namespace galsim { double u0 = u; double v0 = v; - tmv::ConstMatrixView pvu(pvar, 4, 4, 4, 1, tmv::NonConj); - tmv::ConstMatrixView pvv(pvar + 16, 4, 4, 4, 1, tmv::NonConj); +#ifdef USE_TMV + tmv::ConstMatrixView pvuT(pvar, 4, 4, 1, 4, tmv::NonConj); + tmv::ConstMatrixView pvvT(pvar + 16, 4, 4, 1, 4, tmv::NonConj); + typedef tmv::SmallVector Vector4d; + typedef tmv::SmallMatrix Matrix2d; + typedef tmv::SmallVector Vector2d; +#else + Eigen::Map pvuT(pvar); + Eigen::Map pvvT(pvar + 16); + using Eigen::Vector4d; + using Eigen::Matrix2d; + using Eigen::Vector2d; +#endif // Some temporary vectors/matrices we'll use within the loop below. - tmv::SmallVector upow; - tmv::SmallVector vpow; - tmv::SmallVector pvu_vpow; - tmv::SmallVector pvv_vpow; - tmv::SmallVector dupow; - tmv::SmallVector dvpow; - tmv::SmallMatrix j1; - tmv::SmallVector diff; - tmv::SmallVector duv; + Vector4d upow; + Vector4d vpow; + Vector4d pvu_vpow; + Vector4d pvv_vpow; + Vector4d dupow; + Vector4d dvpow; + Matrix2d j1; + Vector2d diff; + Vector2d duv; double prev_err = -1.; for (int iter=0; iter& xpow) + void setup_pow(double x, VectorXd& xpow) { + xpow[0] = 1.; xpow[1] = x; for (int i=2; i abx(abar, m, m, m, 1, tmv::NonConj); - tmv::ConstMatrixView aby(abar + m*m, m, m, m, 1, tmv::NonConj); - dbg<<"abx = "< +#include +#include +#ifdef USE_TMV #include "TMV.h" #include "TMV_SymBand.h" -#include "Table.h" -#include -#include +#endif -#include +#include "Table.h" namespace galsim { @@ -60,7 +61,7 @@ namespace galsim { ArgVec::ArgVec(const double* vec, int n): _vec(vec), _n(n) { - xdbg<<"Make ArgVec from: "< VectorXd; +typedef tmv::Matrix MatrixXd; +typedef tmv::Vector > VectorXcd; +typedef tmv::Matrix > MatrixXcd; +#else +#include "Eigen/Dense" +using Eigen::VectorXd; +using Eigen::MatrixXd; +using Eigen::VectorXcd; +using Eigen::MatrixXcd; +#endif #include "Std.h" @@ -196,7 +208,7 @@ namespace galsim { // ??? +=, -=, etc. private: - LVectorReference(tmv::Vector& v, PQIndex pq) : + LVectorReference(VectorXd& v, PQIndex pq) : _re(&v[pq.rIndex()]), _isign(pq.iSign()) {} double *_re; int _isign; // 0 if this is a real element, -1 if needs conjugation, else +1 @@ -204,19 +216,6 @@ namespace galsim { friend class LVector; }; - // A custom deleter to allow us to return views to the LVector as numpy arrays - // which will keep track of the Vector allocation. When the last LVector _or_ - // external view of _owner goes out of scope, then the tmv::Vector is destroyed. - class LVectorDeleter - { - public: - LVectorDeleter(shared_ptr > v) : _v(v) {} - - void operator()(double * p) const {} // the _v shared_ptr will delete for us! - - shared_ptr > _v; - }; - class LVector { public: @@ -227,7 +226,7 @@ namespace galsim { _v->setZero(); } - LVector(int order, const tmv::GenVector& v) : + LVector(int order, const VectorXd& v) : _order(order) { allocateMem(); @@ -235,19 +234,13 @@ namespace galsim { assert(v.size() == PQIndex::size(order)); } - LVector(int order, shared_ptr > v) : - _order(order), _v(v), - _owner(_v->ptr(), LVectorDeleter(_v)) - { assert(v->size() == PQIndex::size(order)); } - - LVector(const LVector& rhs) : _order(rhs._order), _v(rhs._v), _owner(rhs._owner) {} + LVector(const LVector& rhs) : _order(rhs._order), _v(rhs._v) {} LVector& operator=(const LVector& rhs) { if (_v.get()==rhs._v.get()) return *this; _order=rhs._order; _v = rhs._v; - _owner = rhs._owner; return *this; } @@ -276,7 +269,7 @@ namespace galsim { // by making a new copy of the vector first. If it is already the sole owner, // then nothing is done. (FYI: The term for this is "Copy on Write" semantics.) void take_ownership() - { if (!_v.unique()) { _v.reset(new tmv::Vector(*_v)); } } + { if (!_v.unique()) { _v.reset(new VectorXd(*_v)); } } void clear() { take_ownership(); _v->setZero(); } @@ -288,8 +281,8 @@ namespace galsim { int size() const { return _v->size(); } // Access the real-representation vector directly. - const tmv::Vector& rVector() const { return *_v; } - tmv::Vector& rVector() { take_ownership(); return *_v; } + const VectorXd& rVector() const { return *_v; } + VectorXd& rVector() { take_ownership(); return *_v; } // op[] with int returns real double operator[](int i) const { return (*_v)[i]; } @@ -363,7 +356,13 @@ namespace galsim { } // Inner product of the real values. - double dot(const LVector& rhs) const { return (*_v)*(*rhs._v); } + double dot(const LVector& rhs) const { +#ifdef USE_TMV + return (*_v)*(*rhs._v); +#else + return _v->dot(*rhs._v); +#endif + } // write to an ostream void write(std::ostream& os, int maxorder=-1) const; @@ -388,31 +387,31 @@ namespace galsim { // Create a matrix containing basis values at vector of input points. // Output matrix has m(i,j) = jth basis function at ith point - static shared_ptr > basis( - const tmv::ConstVectorView& x, const tmv::ConstVectorView& y, + static shared_ptr basis( + const VectorXd& x, const VectorXd& y, int order, double sigma=1.); // Create design matrix, including factors of 1/sigma stored in invsig - static shared_ptr > design( - const tmv::ConstVectorView& x, const tmv::ConstVectorView& y, - const tmv::ConstVectorView& invsig, int order, double sigma=1.); + static shared_ptr design( + const VectorXd& x, const VectorXd& y, + const VectorXd& invsig, int order, double sigma=1.); // ...or provide your own matrix static void design( - const tmv::ConstVectorView& x, const tmv::ConstVectorView& y, - const tmv::ConstVectorView& invsig, - tmv::MatrixView psi, int order, double sigma=1.); + const VectorXd& x, const VectorXd& y, + const VectorXd& invsig, + MatrixXd& psi, int order, double sigma=1.); static void basis( - const tmv::ConstVectorView& x, const tmv::ConstVectorView& y, - tmv::MatrixView psi, int order, double sigma=1.); + const VectorXd& x, const VectorXd& y, + MatrixXd& psi, int order, double sigma=1.); - static shared_ptr > > kBasis( - const tmv::ConstVectorView& kx, const tmv::ConstVectorView& ky, + static shared_ptr kBasis( + const VectorXd& kx, const VectorXd& ky, int order, double sigma); static void kBasis( - const tmv::ConstVectorView& kx, const tmv::ConstVectorView& ky, - tmv::MatrixView > psi_k, int order, double sigma); + const VectorXd& kx, const VectorXd& ky, + MatrixXcd& psi_k, int order, double sigma); // ?? Add routine to decompose a data vector into b's // ?? Add routines to evaluate summed basis at a set of x/k points @@ -428,189 +427,26 @@ namespace galsim { double flux(int maxP=-1) const; double apertureFlux(double R, int maxP=-1) const; -#if 0 - // Return reference to a matrix that generates ???realPsi transformations - // under infinitesimal point transforms (translate, dilate, shear). - // Returned matrix is at least as large as needed to go order x (order+2) - // The choices for generators: - enum GType { iX = 0, iY, iMu, iE1, iE2, iRot, nGen }; - static const tmv::ConstMatrixView Generator( - GType iparam, int orderOut, int orderIn); -#endif - - shared_ptr getOwner() const { return _owner; } - private: - // real vs fourier is set by the type of psi. - // For real, T = double - // For fourier, T = std::complex - template - static void mBasis( - const tmv::ConstVectorView& x, const tmv::ConstVectorView& y, - const tmv::ConstVectorView* invsig, - tmv::MatrixView psi, int order, double sigma=1.); void allocateMem() { int s = PQIndex::size(_order); - _v.reset(new tmv::Vector(s)); - _owner.reset(_v->ptr(), LVectorDeleter(_v)); + _v.reset(new VectorXd(s)); } int _order; - shared_ptr > _v; - shared_ptr _owner; + shared_ptr _v; }; std::ostream& operator<<(std::ostream& os, const LVector& lv); std::istream& operator>>(std::istream& is, LVector& lv); -#if 0 - // To allow iteration over all the generators: - inline LVector::GType& operator++(LVector::GType& g) { return g=LVector::GType(g+1); } -#endif - // This function finds the innermost radius at which the integrated flux // of the LVector's shape crosses the specified threshold, using the first // maxP monopole terms (or all, if maxP omitted) extern double fluxRadius(const LVector& lv, double threshold, int maxP=-1); - - // NB. The LTransform class is not currently used by anything in GalSim. - // Plus, there are not even any implemenations of the MakeLTransform functions below. -#if 0 - - //-------------------------------------------------------------- - // - // Next class is a transformation matrix for Laguerre vector. Internal - // storage is as a matrix over the real degrees of freedom. - // Interface gives you the (complex) matrix elements of pqIndex pairs. - - // Again this is a HANDLE, so it can be passed into - // subroutines without referencing. Copy/assignment create a new link; - // for fresh copy, use copy() method. - class LTransform - { - public: - LTransform(int orderOut, int orderIn) : - _orderIn(orderIn), _orderOut(orderOut), - _m(new tmv::Matrix(PQIndex::size(orderOut),PQIndex::size(orderIn),0.)) - {} - - // Build an LTransform from a tmv::Matrix for the real degrees of freedom. - // Matrix must have correct dimensions. - LTransform(int orderOut, int orderIn, const tmv::GenMatrix& m) : - _orderIn(orderIn), _orderOut(orderOut), - _m(new tmv::Matrix(m)) - { - assert(m.ncols() == PQIndex::size(orderIn)); - assert(m.nrows() == PQIndex::size(orderOut)); - } - - LTransform(int orderOut, int orderIn, shared_ptr > m) : - _orderIn(orderIn), _orderOut(orderOut), _m(m) - { - assert(m->ncols() == PQIndex::size(orderIn)); - assert(m->nrows() == PQIndex::size(orderOut)); - } - - LTransform(const LTransform& rhs) : - _orderIn(rhs._orderIn), _orderOut(rhs._orderOut), _m(rhs._m) {} - - LTransform& operator=(const LTransform& rhs) - { - if (_m.get()==rhs._m.get()) return *this; - _orderIn=rhs._orderIn; _orderOut=rhs._orderOut; _m = rhs._m; - return *this; - } - - ~LTransform() {} - - LTransform copy() const - { - LTransform fresh(_orderOut, _orderIn); - *(fresh._m) = *_m; - return fresh; - } - - int getOrderIn() const { return _orderIn; } - int getOrderOut() const { return _orderOut; } - int sizeIn() const { return _m->ncols(); } - int sizeOut() const { return _m->nrows(); } - - void resize(int orderOut, int orderIn) - { - if (_orderIn != orderIn || _orderOut != orderOut) { - _orderIn = orderIn; - _orderOut = orderOut; - _m.reset(new tmv::Matrix( - PQIndex::size(orderOut), PQIndex::size(orderIn), 0.)); - } else { - take_ownership(); - } - } - - // As above, we use take_ownership() to implement Copy on Write semantics. - void take_ownership() - { if (!_m.unique()) { _m.reset(new tmv::Matrix(*_m)); } } - - void clear() { take_ownership(); _m->setZero(); } - void identity() { take_ownership(); _m->setToIdentity(); } - - // Access the real-representation vector directly. - tmv::Matrix& rMatrix() { take_ownership(); return *_m; } - const tmv::Matrix& rMatrix() const { return *_m; } - - // Element read - std::complex operator()(PQIndex pq1, PQIndex pq2) const; - std::complex operator()(int p1, int q1, int p2, int q2) const - { return operator()(PQIndex(p1,q1),PQIndex(p2,q2)); } - - // Element write. Note that it is necessary to give two complex - // simultaneously to allow writing the real version of the matrix: - void set( - PQIndex pq1, PQIndex pq2, - std::complex Cpq1pq2, std::complex Cqp1pq2); - - // Operate on other Laguerre vectors/matrices - LVector operator*(const LVector rhs) const; - LTransform operator*(const LTransform rhs) const; - LTransform& operator*=(const LTransform rhs); - - private: - int _orderIn; - int _orderOut; - shared_ptr > _m; - }; - - // Here are the primary types of transformations: - // For the point transforms, set coordShift=false if we want - // to transform the FLUX on a fixed coordinate grid. Set true - // if want to describe the same flux on a transformed COORD system. - - // Shear: - LTransform MakeLTransform( - CppShear eta, int orderOut, int orderIn, bool coordShift=false); - - // Dilation: - LTransform MakeLTransform( - double mu, int orderOut, int orderIn, bool coordShift=false); - - // Translation: - LTransform MakeLTransform( - Position x0, int orderOut, int orderIn, bool coordShift=false); - - // Rotation: - LTransform RotationLTransform( - double theta, int orderOut, int orderIn, bool coordShift=false); - - // Convolution with PSF: - LTransform MakeLTransform( - const LVector psf, const double D, - const int orderOut, const int orderIn, const int orderStar); - -#endif // LTransform section - } #endif diff --git a/include/galsim/SBShapeletImpl.h b/include/galsim/SBShapeletImpl.h index d0d44a26f96..6fea4f31ef9 100644 --- a/include/galsim/SBShapeletImpl.h +++ b/include/galsim/SBShapeletImpl.h @@ -73,14 +73,6 @@ namespace galsim { double kx0, double dkx, double dkxy, double ky0, double dky, double dkyx) const; - // The above functions just build a list of (x,y) values and then call these: - void fillXValue(tmv::MatrixView val, - const tmv::Matrix& x, - const tmv::Matrix& y) const; - void fillKValue(tmv::MatrixView > val, - const tmv::Matrix& kx, - const tmv::Matrix& ky) const; - std::string serialize() const; private: diff --git a/pysrc/SBShapelet.cpp b/pysrc/SBShapelet.cpp index 797dfb122bd..e5c0e45b7ec 100644 --- a/pysrc/SBShapelet.cpp +++ b/pysrc/SBShapelet.cpp @@ -31,8 +31,7 @@ namespace galsim { double* data = reinterpret_cast(idata); int size = PQIndex::size(order); - tmv::VectorView v = tmv::VectorViewOf(data, size); - v = bvec.rVector(); + for (int i=0; i(idata); int size = PQIndex::size(order); - LVector bvec(order, tmv::VectorViewOf(data, size)); + VectorXd v(size); + for (int i=0; i +#else +using Eigen::Dynamic; +#define MatrixXT Eigen::Matrix +#endif + namespace galsim { std::string LVector::repr() const @@ -74,131 +81,6 @@ namespace galsim { } } -#if 0 - // routines to retrieve and save complex elements of LTransform: - // ???? Check these ??? - std::complex LTransform::operator()(PQIndex pq1, PQIndex pq2) const - { - assert(pq1.pqValid() && !pq1.pastOrder(_orderOut)); - assert(pq2.pqValid() && !pq2.pastOrder(_orderIn)); - int r1index=pq1.rIndex(); - int r2index=pq2.rIndex(); - int i1index=(pq1.isReal()? r1index: r1index+1); - int i2index=(pq2.isReal()? r2index: r2index+1); - - double x = (*_m)(r1index,r2index) + pq1.iSign()*pq2.iSign()*(*_m)(i1index,i2index); - double y = pq1.iSign()*(*_m)(i1index,r2index) - pq2.iSign()*(*_m)(r1index,i2index); - - std::complex z(x,y); - if (pq2.isReal()) z *= 0.5; - - return z; - } - - void LTransform::set( - PQIndex pq1, PQIndex pq2, std::complex Cpq1pq2, std::complex Cqp1pq2) - { - assert(pq1.pqValid() && !pq1.pastOrder(_orderOut)); - assert(pq2.pqValid() && !pq2.pastOrder(_orderIn)); - - take_ownership(); - const double RoundoffTolerance=1.e-15; - std::complex Cpq1qp2; - - if (pq2.needsConjugation()) { - pq2 = pq2.swapPQ(); - std::complex tmp=conj(Cqp1pq2); - Cqp1pq2 = conj(Cpq1pq2); - Cpq1pq2 = tmp; - } - if (pq1.needsConjugation()) { - pq1 = pq1.swapPQ(); - std::complex tmp=Cqp1pq2; - Cqp1pq2 = Cpq1pq2; - Cpq1pq2 = tmp; - } - - int rIndex1 = pq1.rIndex(); - int rIndex2 = pq2.rIndex(); - int iIndex1 = rIndex1+1; - int iIndex2 = rIndex2+1; - - if (pq1.isReal()) { - if (Cpq1pq2!=Cqp1pq2) { - FormatAndThrow<>() - << "Invalid LTransform elements for p1=q1, " << Cpq1pq2 - << " != " << Cqp1pq2; - } - (*_m)(rIndex1,rIndex2) = Cpq1pq2.real() * (pq2.isReal()? 1. : 2.); - if (pq2.isReal()) { - if (std::abs(Cpq1pq2.imag()) > RoundoffTolerance) { - FormatAndThrow<>() - << "Nonzero imaginary LTransform elements for p1=q1, p2=q2: " - << Cpq1pq2; - } - } else { - (*_m)(rIndex1,iIndex2) = -2.*Cpq1pq2.imag(); - } - return; - } else if (pq2.isReal()) { - // Here we know p1!=q1: - if (norm(Cpq1pq2-conj(Cqp1pq2))>RoundoffTolerance) { - FormatAndThrow<>() - << "Inputs to LTransform.set are not conjugate for p2=q2: " - << Cpq1pq2 << " vs " << Cqp1pq2 ; - } - (*_m)(rIndex1, rIndex2) = Cpq1pq2.real(); - (*_m)(iIndex1, rIndex2) = Cpq1pq2.imag(); - } else { - // Neither pq is real: - std::complex z=Cpq1pq2 + Cqp1pq2; - (*_m)(rIndex1, rIndex2) = z.real(); - (*_m)(rIndex1, iIndex2) = -z.imag(); - z=Cpq1pq2 - Cqp1pq2; - (*_m)(iIndex1, rIndex2) = z.imag(); - (*_m)(iIndex1, iIndex2) = z.real(); - } - } - - LVector LTransform::operator*(const LVector rhs) const - { - if (_orderIn != rhs.getOrder()) - FormatAndThrow<>() - << "Order mismatch between LTransform [" << _orderIn - << "] and LVector [" << rhs.getOrder() - << "]"; - shared_ptr > out(new tmv::Vector(sizeOut())); - *out = (*_m) * rhs.rVector(); - return LVector(_orderOut, out); - } - - LTransform LTransform::operator*(const LTransform rhs) const - { - if (_orderIn != rhs.getOrderOut()) - FormatAndThrow<>() - << "Order mismatch between LTransform [" << _orderIn - << "] and LTransform [" << rhs.getOrderOut() - << "]"; - shared_ptr > out( - new tmv::Matrix(sizeOut(),rhs.sizeIn())); - *out = (*_m) * (*rhs._m); - return LTransform(_orderOut, rhs._orderIn, out); - } - - LTransform& LTransform::operator*=(const LTransform rhs) - { - take_ownership(); - if (_orderIn != rhs.getOrderOut()) - FormatAndThrow<>() - << "Order mismatch between LTransform [" << _orderIn - << "] and LTransform [" << rhs.getOrderOut() - << "]"; - (*_m) *= (*rhs._m); - _orderIn = rhs._orderOut; - return *this; - } -#endif - //---------------------------------------------------------------- //---------------------------------------------------------------- // Calculate Laguerre polynomials and wavefunctions: @@ -258,72 +140,89 @@ namespace galsim { } } - shared_ptr > LVector::basis( - const tmv::ConstVectorView& x, const tmv::ConstVectorView& y, + shared_ptr LVector::basis( + const VectorXd& x, const VectorXd& y, int order, double sigma) { assert(x.size()==y.size()); - shared_ptr > psi( - new tmv::Matrix(x.size(), PQIndex::size(order))); - basis(x, y, psi->view(), order, sigma); + shared_ptr psi(new MatrixXd(x.size(), PQIndex::size(order))); + basis(x, y, *psi, order, sigma); return psi; } + // Forward declaration. Implemented below. + template + void CalculateBasis( + const VectorXd& x, const VectorXd& y, const VectorXd* invsig, + MatrixXT& psi, + int order, double sigma); + void LVector::basis( - const tmv::ConstVectorView& x, const tmv::ConstVectorView& y, - tmv::MatrixView psi, int order, double sigma) + const VectorXd& x, const VectorXd& y, + MatrixXd& psi, int order, double sigma) { +#ifdef USE_TMV assert(y.size() == x.size() && psi.nrows() == x.size()); assert(psi.ncols()==PQIndex::size(order)); - mBasis(x, y, 0, psi, order, sigma); +#else + assert(y.size() == x.size() && psi.rows() == x.size()); + assert(psi.cols()==PQIndex::size(order)); +#endif + CalculateBasis(x, y, 0, psi, order, sigma); } - shared_ptr > LVector::design( - const tmv::ConstVectorView& x, const tmv::ConstVectorView& y, - const tmv::ConstVectorView& invsig, int order, double sigma) + shared_ptr LVector::design( + const VectorXd& x, const VectorXd& y, + const VectorXd& invsig, int order, double sigma) { - shared_ptr > psi( - new tmv::Matrix(x.size(), PQIndex::size(order))); - design(x, y, invsig, psi->view(), order, sigma); + shared_ptr psi(new MatrixXd(x.size(), PQIndex::size(order))); + design(x, y, invsig, *psi, order, sigma); return psi; } void LVector::design( - const tmv::ConstVectorView& x, const tmv::ConstVectorView& y, - const tmv::ConstVectorView& invsig, - tmv::MatrixView psi, int order, double sigma) + const VectorXd& x, const VectorXd& y, + const VectorXd& invsig, + MatrixXd& psi, int order, double sigma) { +#ifdef USE_TMV assert(y.size() == x.size() && psi.nrows() == x.size() && invsig.size() == x.size()); assert(psi.ncols()==PQIndex::size(order)); - mBasis(x, y, &invsig, psi, order, sigma); +#else + assert(y.size() == x.size() && psi.rows() == x.size() && invsig.size() == x.size()); + assert(psi.cols()==PQIndex::size(order)); +#endif + CalculateBasis(x, y, &invsig, psi, order, sigma); } - shared_ptr > > LVector::kBasis( - const tmv::ConstVectorView& kx, const tmv::ConstVectorView& ky, + shared_ptr LVector::kBasis( + const VectorXd& kx, const VectorXd& ky, int order, double sigma) { assert (ky.size() == kx.size()); - const int ndof=PQIndex::size(order); - const int npts = kx.size(); - shared_ptr > > psi_k( - new tmv::Matrix >(npts, ndof, 0.)); - kBasis(kx,ky,psi_k->view(),order,sigma); + shared_ptr psi_k(new MatrixXcd(kx.size(), PQIndex::size(order))); + kBasis(kx,ky,*psi_k,order,sigma); return psi_k; } void LVector::kBasis( - const tmv::ConstVectorView& kx, const tmv::ConstVectorView& ky, - tmv::MatrixView > psi_k, int order, double sigma) + const VectorXd& kx, const VectorXd& ky, + MatrixXcd& psi_k, int order, double sigma) { +#ifdef USE_TMV assert(ky.size() == kx.size() && psi_k.nrows() == kx.size()); assert(psi_k.ncols()==PQIndex::size(order)); - mBasis(kx, ky, 0, psi_k, order, sigma); +#else + assert(ky.size() == kx.size() && psi_k.rows() == kx.size()); + assert(psi_k.cols()==PQIndex::size(order)); +#endif + CalculateBasis(kx, ky, 0, psi_k, order, sigma); } // This helper class deals with the differences between the real and fourier calculations - // in mBasis. First the real-space values: + // in CalculateBasis. First the real-space values: template - struct mBasisHelper + struct BasisHelper { static double Asign(int ) { return 1.; } @@ -335,7 +234,7 @@ namespace galsim { // Now the fourier space version, marked by T being complex. template - struct mBasisHelper > + struct BasisHelper > { // The "sign" of the eigenvectors are 1, -I, -1, I, and then repeat. // The input m4 should be m%4. @@ -357,13 +256,16 @@ namespace galsim { }; template - void LVector::mBasis( - const tmv::ConstVectorView& x, const tmv::ConstVectorView& y, - const tmv::ConstVectorView* invsig, - tmv::MatrixView psi, int order, double sigma) + void CalculateBasis( + const VectorXd& x, const VectorXd& y, const VectorXd* invsig, + MatrixXT& psi, int order, double sigma) { assert (y.size()==x.size()); +#ifdef USE_TMV assert (psi.nrows()==x.size() && psi.ncols()==PQIndex::size(order)); +#else + assert (psi.rows()==x.size() && psi.cols()==PQIndex::size(order)); +#endif const int N=order; const int npts_full = x.size(); @@ -375,12 +277,14 @@ namespace galsim { const int BLOCKING_FACTOR=4096; const int max_npts = std::max(BLOCKING_FACTOR,npts_full); - tmv::DiagMatrix Rsq_full(max_npts); - tmv::Matrix A_full(max_npts,2); - tmv::Matrix tmp_full(max_npts,2); - tmv::DiagMatrix Lmq_full(max_npts); - tmv::DiagMatrix Lmqm1_full(max_npts); - tmv::DiagMatrix Lmqm2_full(max_npts); + VectorXd Rsq_full(max_npts); + MatrixXd A_full(max_npts,2); + MatrixXd tmp_full(max_npts,2); + VectorXd Lmq_full(max_npts); + VectorXd Lmqm1_full(max_npts); + VectorXd Lmqm2_full(max_npts); + + psi.setZero(); for (int ilo=0; ilo X = DiagMatrixViewOf(x.subVector(ilo,ihi)); - tmv::ConstDiagMatrixView Y = DiagMatrixViewOf(y.subVector(ilo,ihi)); +#ifdef USE_TMV + tmv::ConstVectorView X = x.subVector(ilo,ihi); + tmv::ConstVectorView Y = y.subVector(ilo,ihi); +#else + Eigen::VectorBlock X = x.segment(ilo,ihi-ilo); + Eigen::VectorBlock Y = y.segment(ilo,ihi-ilo); +#endif // Get the appropriate portion of our temporary matrices. - tmv::DiagMatrixView Rsq = Rsq_full.subDiagMatrix(0,npts); +#ifdef USE_TMV + tmv::VectorView Rsq = Rsq_full.subVector(0,npts); tmv::MatrixView A = A_full.rowRange(0,npts); tmv::MatrixView tmp = tmp_full.rowRange(0,npts); +#else + Eigen::VectorBlock Rsq = Rsq_full.segment(0,npts); + Eigen::Block A = A_full.topRows(npts); + Eigen::Block tmp = tmp_full.topRows(npts); +#endif // We need rsq values twice, so store them here. - Rsq = X*X; - Rsq += Y*Y; +#ifdef USE_TMV + Rsq = ElemProd(X,X); + Rsq += ElemProd(Y,Y); +#else + Rsq.array() = X.array() * X.array(); + Rsq.array() += Y.array() * Y.array(); +#endif // This matrix will keep track of real & imag parts // of prefactor * exp(-r^2/2) (x+iy)^m / sqrt(m!) // Build the Gaussian factor +#ifdef USE_TMV for (int i=0; i::applyPrefactor(A.col(0),sigma); +#else + for (int i=0; i::applyPrefactor(A.col(0),sigma); A.col(1).setZero(); // Put 1/sigma factor into every point if doing a design matrix: +#ifdef USE_TMV if (invsig) A.col(0) *= tmv::DiagMatrixViewOf(invsig->subVector(ilo,ihi)); +#else + if (invsig) A.col(0).array() *= invsig->segment(ilo,ihi-ilo).array(); +#endif // Assign the m=0 column first: - psi.col( PQIndex(0,0).rIndex(), ilo,ihi ) = A.col(0); +#ifdef USE_TMV + psi.col(PQIndex(0,0).rIndex(), ilo,ihi) = A.col(0); +#else + psi.col(PQIndex(0,0).rIndex()).segment(ilo,ihi-ilo) = A.col(0).cast(); +#endif // Then ascend m's at q=0: for (int m=1; m<=N; m++) { int rIndex = PQIndex(m,0).rIndex(); // Multiply by (X+iY)/sqrt(m), including a factor 2 first time through - tmp = Y * A; - A = X * A; +#ifdef USE_TMV + tmp = DiagMatrixViewOf(Y) * A; + A = DiagMatrixViewOf(X) * A; +#else + tmp = Y.asDiagonal() * A; + A = X.asDiagonal() * A; +#endif A.col(0) += tmp.col(1); A.col(1) -= tmp.col(0); A *= m==1 ? 2. : 1./sqrtn(m); - psi.subMatrix(ilo,ihi,rIndex,rIndex+2) = mBasisHelper::Asign(m%4) * A; +#ifdef USE_TMV + psi.subMatrix(ilo,ihi,rIndex,rIndex+2) = BasisHelper::Asign(m%4) * A; +#else + psi.block(ilo,rIndex,ihi-ilo,2) = BasisHelper::Asign(m%4) * A; +#endif } - // Make three DiagMatrix to hold Lmq's during recurrence calculations - shared_ptr > Lmq( - new tmv::DiagMatrixView(Lmq_full.subDiagMatrix(0,npts))); - shared_ptr > Lmqm1( - new tmv::DiagMatrixView(Lmqm1_full.subDiagMatrix(0,npts))); - shared_ptr > Lmqm2( - new tmv::DiagMatrixView(Lmqm2_full.subDiagMatrix(0,npts))); + // Make three Vectors to hold Lmq's during recurrence calculations +#ifdef USE_TMV + shared_ptr > Lmq( + new tmv::VectorView(Lmq_full.subVector(0,npts))); + shared_ptr > Lmqm1( + new tmv::VectorView(Lmqm1_full.subVector(0,npts))); + shared_ptr > Lmqm2( + new tmv::VectorView(Lmqm2_full.subVector(0,npts))); +#else + shared_ptr > Lmq( + new Eigen::VectorBlock(Lmq_full.segment(0,npts))); + shared_ptr > Lmqm1( + new Eigen::VectorBlock(Lmqm1_full.segment(0,npts))); + shared_ptr > Lmqm2( + new Eigen::VectorBlock(Lmqm2_full.segment(0,npts))); +#endif for (int m=0; m<=N; m++) { PQIndex pq(m,0); @@ -447,14 +397,31 @@ namespace galsim { const int q = pq.getQ(); const int iQ = pq.rIndex(); +#ifdef USE_TMV Lmqm1->setAllTo(1.); // This is Lm0. - *Lmq = Rsq - (p+q-1.); - *Lmq *= mBasisHelper::Lsign(1.) / (sqrtn(p)*sqrtn(q)); + *Lmq = Rsq; + Lmq->addToAll(-(p+q-1.)); +#else + Lmqm1->setConstant(1.); + Lmq->array() = Rsq.array() - (p+q-1.); +#endif + *Lmq *= BasisHelper::Lsign(1.) / (sqrtn(p)*sqrtn(q)); if (m==0) { - psi.col(iQ,ilo,ihi) = (*Lmq) * psi.col(iQ0,ilo,ihi); +#ifdef USE_TMV + psi.col(iQ,ilo,ihi) = DiagMatrixViewOf(*Lmq) * psi.col(iQ0,ilo,ihi); +#else + psi.col(iQ).segment(ilo,ihi-ilo) = Lmq->asDiagonal() * + psi.col(iQ0).segment(ilo,ihi-ilo); +#endif } else { - psi.subMatrix(ilo,ihi,iQ,iQ+2) = (*Lmq) * psi.subMatrix(ilo,ihi,iQ0,iQ0+2); +#ifdef USE_TMV + psi.subMatrix(ilo,ihi,iQ,iQ+2) = DiagMatrixViewOf(*Lmq) * + psi.subMatrix(ilo,ihi,iQ0,iQ0+2); +#else + psi.block(ilo,iQ,ihi-ilo,2) = Lmq->asDiagonal() * + psi.block(ilo,iQ0,ihi-ilo,2); +#endif } } @@ -472,21 +439,37 @@ namespace galsim { Lmqm1.swap(Lmq); double invsqrtpq = 1./sqrtn(p)/sqrtn(q); - *Lmq = Rsq - (p+q-1.); - *Lmq *= mBasisHelper::Lsign(invsqrtpq) * *Lmqm1; +#ifdef USE_TMV + *Lmq = Rsq; + Lmq->addToAll(-(p+q-1.)); + *Lmq = BasisHelper::Lsign(invsqrtpq) * ElemProd(*Lmq, *Lmqm1); +#else + Lmq->array() = Rsq.array() - (p+q-1.); + Lmq->array() *= BasisHelper::Lsign(invsqrtpq) * Lmqm1->array(); +#endif *Lmq -= (sqrtn(p-1)*sqrtn(q-1)*invsqrtpq) * (*Lmqm2); if (m==0) { - psi.col(iQ,ilo,ihi) = (*Lmq) * psi.col(iQ0,ilo,ihi); +#ifdef USE_TMV + psi.col(iQ,ilo,ihi) = DiagMatrixViewOf(*Lmq) * psi.col(iQ0,ilo,ihi); +#else + psi.col(iQ).segment(ilo,ihi-ilo) = Lmq->asDiagonal() * + psi.col(iQ0).segment(ilo,ihi-ilo); +#endif } else { - psi.subMatrix(ilo,ihi,iQ,iQ+2) = (*Lmq) * psi.subMatrix(ilo,ihi,iQ0,iQ0+2); +#ifdef USE_TMV + psi.subMatrix(ilo,ihi,iQ,iQ+2) = DiagMatrixViewOf(*Lmq) * + psi.subMatrix(ilo,ihi,iQ0,iQ0+2); +#else + psi.block(ilo,iQ,ihi-ilo,2) = Lmq->asDiagonal() * + psi.block(ilo,iQ0,ihi-ilo,2); +#endif } } } } } - //--------------------------------------------------------------------------- //--------------------------------------------------------------------------- // Flux determinations @@ -502,7 +485,7 @@ namespace galsim { double LVector::apertureFlux(double R_, int maxP) const { - static shared_ptr > fp; + static shared_ptr fp; static double R=-1.; static double psize=-1; @@ -512,11 +495,11 @@ namespace galsim { if (maxP > getOrder()/2) maxP=getOrder()/2; if (!fp.get() || R_ != R || maxP>psize) { - fp.reset(new tmv::Vector(maxP)); + fp.reset(new VectorXd(maxP)); psize = maxP; R = R_; - tmv::Vector Lp(maxP+1); - tmv::Vector Qp(maxP+1); + VectorXd Lp(maxP+1); + VectorXd Qp(maxP+1); double x = R*R; double efact = std::exp(-0.5*x); Lp[0] = Qp[0]=1.; @@ -600,254 +583,6 @@ namespace galsim { << "," << std::setw(2) << getQ() ; } -#if 0 - // Transformation generators - these return a view into static quantities: - const tmv::ConstMatrixView LVector::Generator( - GType iparam, int orderOut, int orderIn) - { - static shared_ptr > gmu; - static shared_ptr > gx; - static shared_ptr > gy; - static shared_ptr > ge1; - static shared_ptr > ge2; - static shared_ptr > grot; - - const int sizeIn = PQIndex::size(orderIn); - const int sizeOut = PQIndex::size(orderOut); - - const int order = std::max(orderOut, orderIn); - if (iparam==iMu) { - if (!gmu.get() || gmu->nrows() zz(-1.,0.); - if (pq.isReal()) lt.set(pq,pq,zz, zz); - else lt.set(pq,pq,zz, 0.); - PQIndex pqprime(p+1, q+1); - if (!pqprime.pastOrder(order)) { - zz = std::complex(-sqrtn(p+1)*sqrtn(q+1), 0.); - if (pq.isReal()) lt.set(pq,pqprime,zz, zz); - else lt.set(pq,pqprime,zz, 0.); - } - if (q>0) { - pqprime.setPQ(p-1,q-1); - zz = std::complex(sqrtn(p)*sqrtn(q), 0.); - if (pq.isReal()) lt.set(pq,pqprime,zz, zz); - else lt.set(pq,pqprime,zz, 0.); - } - } - gmu.reset(new tmv::Matrix(lt.rMatrix())); - } - return gmu->subMatrix(0, sizeOut, 0, sizeIn); - } - if (iparam==iX) { - if (!gx.get() || gx->nrows() zz(-0.5*sqrtn(p+1),0.); - if (pq.isReal()) { - if (!pqprime.pastOrder(order)) lt.set(pq,pqprime,zz, zz); - if (p>0) { - zz = std::complex(0.5*sqrtn(p), 0.); - pqprime.setPQ(p-1,q); - lt.set(pq,pqprime,zz, zz); - } - } else { - if (!pqprime.pastOrder(order)) { - lt.set(pq,pqprime,zz, 0.); - pqprime.setPQ(p, q+1); - zz = std::complex(-0.5*sqrtn(q+1),0.); - if (pq.m()==1) { - lt.set(pq,pqprime, zz, zz); - } else { - lt.set(pq,pqprime, zz, 0.); - } - } - pqprime.setPQ(p-1,q); - zz = std::complex(0.5*sqrtn(p), 0.); - if (pq.m()==1) { - lt.set(pq,pqprime, zz, zz); - } else { - lt.set(pq,pqprime, zz, 0.); - } - if (q>0) { - pqprime.setPQ(p,q-1); - zz = std::complex(0.5*sqrtn(q), 0.); - lt.set(pq,pqprime, zz, 0.); - } - } - } - gx.reset(new tmv::Matrix(lt.rMatrix())); - } - return gx->subMatrix(0, sizeOut, 0, sizeIn); - } - - if (iparam==iY) { - if (!gy.get() || gy->nrows() zz(0.,-0.5*sqrtn(p+1)); - if (pq.isReal()) { - if (!pqprime.pastOrder(order)) lt.set(pq,pqprime,zz, zz); - if (p>0) { - zz = std::complex(0.,0.5*sqrtn(q)); - pqprime.setPQ(p,q-1); - lt.set(pq,pqprime,zz, zz); - } - } else { - if (!pqprime.pastOrder(order)) { - lt.set(pq,pqprime,zz, 0.); - pqprime.setPQ(p, q+1); - zz = std::complex(0.,0.5*sqrtn(q+1)); - if (pq.m()==1) { - lt.set(pq,pqprime, zz, conj(zz)); - } else { - lt.set(pq,pqprime, zz, 0.); - } - } - pqprime.setPQ(p-1,q); - zz = std::complex(0.,-0.5*sqrtn(p)); - if (pq.m()==1) { - lt.set(pq,pqprime, zz, conj(zz)); - } else { - lt.set(pq,pqprime, zz, 0.); - } - if (q>0) { - pqprime.setPQ(p,q-1); - zz = std::complex(0.,0.5*sqrtn(q)); - lt.set(pq,pqprime, zz, 0.); - } - } - } - gy.reset(new tmv::Matrix(lt.rMatrix())); - } - return gy->subMatrix(0, sizeOut, 0, sizeIn); - } - - if (iparam==iE1) { - if (!ge1.get() || ge1->nrows() zz(-0.25*sqrtn(p+1)*sqrtn(p+2),0.); - if (pq.isReal()) { - if (!pqprime.pastOrder(order)) lt.set(pq,pqprime,zz, zz); - if (p>1) { - zz = std::complex(0.25*sqrtn(p)*sqrtn(p-1),0.); - pqprime.setPQ(p-2,q); - lt.set(pq,pqprime,zz, zz); - } - } else { - if (!pqprime.pastOrder(order)) { - lt.set(pq,pqprime,zz, 0.); - pqprime.setPQ(p, q+2); - zz = std::complex(-0.25*sqrtn(q+1)*sqrtn(q+2),0.); - if (pq.m()==2) { - lt.set(pq,pqprime, zz, zz); - } else { - lt.set(pq,pqprime, zz, 0.); - } - } - if (p>1) { - pqprime.setPQ(p-2,q); - zz = std::complex(0.25*sqrtn(p)*sqrtn(p-1),0.); - if (pq.m()==2) { - lt.set(pq,pqprime, zz, zz); - } else { - lt.set(pq,pqprime, zz, 0.); - } - if (q>1) { - pqprime.setPQ(p,q-2); - zz = std::complex(0.25*sqrtn(q)*sqrtn(q-1),0.); - lt.set(pq,pqprime, zz, 0.); - } - } - } - } - ge1.reset(new tmv::Matrix(lt.rMatrix())); - } - return ge1->subMatrix(0, sizeOut, 0, sizeIn); - } - - if (iparam==iE2) { - if (!ge2.get() || ge2->nrows() zz(0., -0.25*sqrtn(p+1)*sqrtn(p+2)); - if (pq.isReal()) { - if (!pqprime.pastOrder(order)) lt.set(pq,pqprime,zz, zz); - if (p>1) { - zz = std::complex(0.,-0.25*sqrtn(p)*sqrtn(p-1)); - pqprime.setPQ(p-2,q); - lt.set(pq,pqprime,zz, zz); - } - } else { - if (!pqprime.pastOrder(order)) { - lt.set(pq,pqprime,zz, 0.); - pqprime.setPQ(p, q+2); - zz = std::complex(0.,0.25*sqrtn(q+1)*sqrtn(q+2)); - if (pq.m()==2) { - lt.set(pq,pqprime, zz, conj(zz)); - } else { - lt.set(pq,pqprime, zz, 0.); - } - } - if (p>1) { - pqprime.setPQ(p-2,q); - zz = std::complex(0.,-0.25*sqrtn(p)*sqrtn(p-1)); - if (pq.m()==2) { - lt.set(pq,pqprime, zz, conj(zz)); - } else { - lt.set(pq,pqprime, zz, 0.); - } - if (q>1) { - pqprime.setPQ(p,q-2); - zz = std::complex(0.,0.25*sqrtn(q)*sqrtn(q-1)); - lt.set(pq,pqprime, zz, 0.); - } - } - } - } - ge2.reset(new tmv::Matrix(lt.rMatrix())); - } - return ge2->subMatrix(0, sizeOut, 0, sizeIn); - } - - if (iparam==iRot) { - // Rotation is diagonal - could use a DiagMatrix perhaps - if (!grot.get() || grot->nrows()0) lt.set(pq,pq, std::complex(0.,-m), 0.); - } - grot.reset(new tmv::Matrix(lt.rMatrix())); - } - return grot->subMatrix(0, sizeOut, 0, sizeIn); - } else { - throw std::runtime_error("Unknown parameter for LVector::Generator()"); - } - } -#endif - // Function to solve for radius enclosing a specified flux. // Return negative radius if no root is apparent. class FRSolve diff --git a/src/SBShapelet.cpp b/src/SBShapelet.cpp index 063caad6f74..1c8a2be794b 100644 --- a/src/SBShapelet.cpp +++ b/src/SBShapelet.cpp @@ -151,6 +151,33 @@ namespace galsim { const LVector& SBShapelet::SBShapeletImpl::getBVec() const { return _bvec; } LVector& SBShapelet::SBShapeletImpl::getBVec() { return _bvec; } + void FillXValue(const LVector& bvec, double sigma, + VectorXd& val, const VectorXd& x, const VectorXd& y) + { + dbg<<"order = "< MatrixXd; +typedef tmv::Vector VectorXd; +#else +#include "Eigen/Dense" +using Eigen::MatrixXd; +using Eigen::VectorXd; +#endif -#include "FFT.h" +#include "hsm/PSFCorr.h" #include "math/Nan.h" +#include "FFT.h" namespace galsim { namespace hsm { @@ -69,7 +78,7 @@ namespace hsm { void find_mom_2( ConstImageView data, - tmv::Matrix& moments, int max_order, + MatrixXd& moments, int max_order, double& x0, double& y0, double& sigma, double convergence_threshold, int& num_iter, const HSMParams& hsmparams); @@ -262,7 +271,7 @@ namespace hsm { results.moments_status = 0; } else { dbg<<"About to get moments using find_mom_2"< moments(3,3); + MatrixXd moments(3,3); double sig = guess_sig; find_mom_2(masked_object_image_cview, moments, 2, results.moments_centroid.x, results.moments_centroid.y, sig, @@ -439,7 +448,7 @@ namespace hsm { */ void qho1d_wf_1(long nx, double xmin, double xstep, long Nmax, double sigma, - tmv::Matrix& psi) + MatrixXd& psi) { double beta, beta2__2, norm0; @@ -509,7 +518,7 @@ namespace hsm { */ void find_mom_1( ConstImageView data, - tmv::Matrix& moments, int max_order, + MatrixXd& moments, int max_order, double x0, double y0, double sigma) { @@ -520,14 +529,21 @@ namespace hsm { int ny = data.getNRow(); int sx = data.getStep(); int sy = data.getStride(); - tmv::Matrix psi_x(nx, max_order+1); - tmv::Matrix psi_y(ny, max_order+1); + MatrixXd psi_x(nx, max_order+1); + MatrixXd psi_y(ny, max_order+1); /* Compute wavefunctions */ qho1d_wf_1(nx, (double)xmin - x0, 1., max_order, sigma, psi_x); qho1d_wf_1(ny, (double)ymin - y0, 1., max_order, sigma, psi_y); +#ifdef USE_TMV tmv::ConstMatrixView mdata(data.getData(),nx,ny,sx,sy,tmv::NonConj); +#else + using Eigen::Dynamic; + using Eigen::Stride; + Eigen::Map > mdata( + data.getData(),nx,ny, Stride(sy,sx)); +#endif moments = psi_x.transpose() * mdata * psi_y; } @@ -553,7 +569,7 @@ namespace hsm { void find_mom_2( ConstImageView data, - tmv::Matrix& moments, int max_order, + MatrixXd& moments, int max_order, double& x0, double& y0, double& sigma, double convergence_threshold, int& num_iter, const HSMParams& hsmparams) { @@ -562,7 +578,7 @@ namespace hsm { double convergence_factor = 1; /* Ensure at least one iteration. */ num_iter = 0; - tmv::Matrix iter_moments(hsmparams.adapt_order+1,hsmparams.adapt_order+1); + MatrixXd iter_moments(hsmparams.adapt_order+1,hsmparams.adapt_order+1); #ifdef N_CHECKVAL if (convergence_threshold <= 0) { @@ -664,7 +680,7 @@ namespace hsm { double Inv2Minv_xx = 0.5/Minv_xx; // Will be useful later... /* Generate Minv_xx__x_x0__x_x0 array */ - tmv::Vector Minv_xx__x_x0__x_x0(xmax-xmin+1); + VectorXd Minv_xx__x_x0__x_x0(xmax-xmin+1); for(int x=xmin;x<=xmax;x++) Minv_xx__x_x0__x_x0[x-xmin] = Minv_xx*(x-x0)*(x-x0); /* Now let's initialize the outputs and then sum @@ -725,7 +741,11 @@ namespace hsm { const double* imageptr = data.getPtr(ix1,y); const int step = data.getStep(); double x_x0 = ix1 - x0; +#ifdef USE_TMV const double* mxxptr = Minv_xx__x_x0__x_x0.cptr() + ix1-xmin; +#else + const double* mxxptr = Minv_xx__x_x0__x_x0.data() + ix1-xmin; +#endif for(int x=ix1;x<=ix2;++x,x_x0+=1.,imageptr+=step) { /* Compute displacement from weight centroid, then * get elliptical radius and weight. @@ -916,9 +936,20 @@ namespace hsm { dbg<<"image3: "< mIm1(image1.getData(),nx1,ny1,sx1,sy1,tmv::NonConj); tmv::ConstMatrixView mIm2(image2.getData(),nx2,ny2,sx2,sy2,tmv::NonConj); tmv::MatrixView mIm3(image_out.getData(),nx3,ny3,sx3,sy3,tmv::NonConj); +#else + using Eigen::Dynamic; + using Eigen::Stride; + Eigen::Map > mIm1( + image1.getData(),nx1,ny1, Stride(sy1,sx1)); + Eigen::Map > mIm2( + image2.getData(),nx2,ny2, Stride(sy2,sx2)); + Eigen::Map > mIm3( + image_out.getData(),nx3,ny3, Stride(sy3,sx3)); +#endif dbg<<"mIm1 = "< m1(dim1,dim1,0.); - tmv::Matrix m2(dim1,dim1,0.); - tmv::Matrix mout(dim1,dim1,0.); - tmv::Vector Ax(dim4,0.); - tmv::Vector Bx(dim4,0.); + MatrixXd m1(dim1,dim1,0.); + MatrixXd m2(dim1,dim1,0.); + MatrixXd mout(dim1,dim1,0.); + VectorXd Ax(dim4,0.); + VectorXd Bx(dim4,0.); /* Build input maps */ for(int x=image1.getXMin();x<=image1.getXMax();x++) @@ -1075,8 +1122,13 @@ namespace hsm { image_out(i,j) += mout(i-out_xref,j-out_yref); #endif dbg<<"Done: mIm3 => "< moments(hsmparams.ksb_moments_max+1,hsmparams.ksb_moments_max+1); - tmv::Matrix psfmoms(hsmparams.ksb_moments_max+1,hsmparams.ksb_moments_max+1); + MatrixXd moments(hsmparams.ksb_moments_max+1,hsmparams.ksb_moments_max+1); + MatrixXd psfmoms(hsmparams.ksb_moments_max+1,hsmparams.ksb_moments_max+1); /* Determine the adaptive centroid and variance of the measured galaxy */ x0 = x0_gal; From baf65faca10940afd7298fe79068c1da4ba007d9 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Tue, 2 Jan 2018 16:06:58 -0500 Subject: [PATCH 009/111] Make TMV optional in SConstruct (#809-pybind11) --- SConstruct | 209 ++++++++++++++++++++++++----------------------------- 1 file changed, 94 insertions(+), 115 deletions(-) diff --git a/SConstruct b/SConstruct index dba0dadc3ac..4a4bf4d01a3 100644 --- a/SConstruct +++ b/SConstruct @@ -84,12 +84,8 @@ opts.Add(PathVariable('FINAL_PREFIX', '', PathVariable.PathAccept)) opts.Add(BoolVariable('WITH_UPS','Install ups/ directory for use with EUPS', False)) -opts.Add('TMV_DIR','Explicitly give the tmv prefix','') -opts.Add('EIGEN_DIR','Explicitly give the Eigen prefix','') -opts.Add('TMV_LINK','File that contains the linking instructions for TMV','') opts.Add('FFTW_DIR','Explicitly give the fftw3 prefix','') -opts.Add('BOOST_DIR','Explicitly give the boost prefix','') -opts.Add(BoolVariable('USE_BOOST','Use boost python for the wrapping, rather than pybind11',False)) +opts.Add('EIGEN_DIR','Explicitly give the Eigen prefix','') opts.Add(PathVariable('EXTRA_INCLUDE_PATH', 'Extra paths for header files (separated by : if more than 1)', @@ -124,12 +120,19 @@ opts.Add(PathVariable('LD_LIBRARY_PATH', 'cf. DYLD_LIBRARY_PATH for why this may be useful.', '', PathVariable.PathAccept)) +opts.Add(BoolVariable('USE_TMV','Use TMV for linear algebra, rather than Eigen',False)) +opts.Add('TMV_DIR','Explicitly give the tmv prefix','') +opts.Add('TMV_LINK','File that contains the linking instructions for TMV','') +opts.Add(BoolVariable('TMV_DEBUG','Turn on extra debugging statements within TMV library',False)) + +opts.Add(BoolVariable('USE_BOOST','Use boost python for the wrapping, rather than pybind11',False)) +opts.Add('BOOST_DIR','Explicitly give the boost prefix','') + opts.Add('PYTEST','Name of pytest executable','') opts.Add(BoolVariable('CACHE_LIB','Cache the results of the library checks',True)) opts.Add(BoolVariable('WITH_PROF', 'Use the compiler flag -pg to include profiling info for gprof', False)) opts.Add(BoolVariable('MEM_TEST','Test for memory leaks', False)) -opts.Add(BoolVariable('TMV_DEBUG','Turn on extra debugging statements within TMV library',False)) # None of the code uses openmp yet. Re-enable this if we start using it. #opts.Add(BoolVariable('WITH_OPENMP','Look for openmp and use if found.', False)) opts.Add(BoolVariable('USE_UNKNOWN_VARS', @@ -530,19 +533,6 @@ def AddOpenMPFlag(env): flag = ['-mp','--exceptions'] ldflag = ['-mp'] xlib = ['pthread'] - elif compiler == 'cl': - #flag = ['/openmp'] - #ldflag = ['/openmp'] - #xlib = [] - # The Express edition, which is the one I have, doesn't come with - # the file omp.h, which we need. So I am unable to test TMV's - # OpenMP with cl. - # I believe the Professional edition has full OpenMP support, - # so if you have that, the above lines might work for you. - # Just uncomment those, and commend the below three lines. - print('No OpenMP support for cl') - env['WITH_OPENMP'] = False - return else: print('\nWARNING: No OpenMP support for compiler ',compiler,'\n') env['WITH_OPENMP'] = False @@ -726,6 +716,7 @@ def AddDepPaths(bin_paths,cpp_paths,lib_paths): continue if t == 'EIGEN': + # Eigen doesn't put its header files in an include subdirectory. AddPath(cpp_paths, tdir) else: AddPath(bin_paths, os.path.join(tdir, 'bin')) @@ -1100,6 +1091,75 @@ int main() return 0; } """ + tmv_version_file = """ +#include +#include "TMV.h" +int main() +{ std::cout< 10 or int(minor) >= 7) and '-latlas' not in tmv_link and + ('-lblas' in tmv_link or '-lcblas' in tmv_link)): + print('WARNING: The Apple BLAS library has been found not to be thread safe on') + print(' Mac OS versions 10.7+, even across multiple processes (i.e. not') + print(' just multiple threads in the same process.) The symptom is that') + print(' `scons tests` may hang when running pytest using multiple') + print(' processes.') + if xcode_version is None: + # If we couldn't run xcodebuild, then don't give any more information about this. + pass + elif xcode_version < '5.1': + print(' This seems to have been partially fixed with XCode 5.1, so we') + print(' recommend upgrading to the latest XCode version. However, even') + print(' with 5.1, some systems still seem to have problems.') + env['BAD_BLAS'] = True + else: + print(' This seems to have been partially fixed with XCode 5.1, so there') + print(' is a good chance you will not have any problems. But there are') + print(' still occasional systems that fail when using multithreading with') + print(' programs or modules that link to the BLAS library (such as GalSim).') + print(' If you do have problems, the solution is to recompile TMV with') + print(' the SCons option "WITH_BLAS=false".') + + # ParseFlags doesn't know about -fopenmp being a LINKFLAG, so it + # puts it into CCFLAGS instead. Move it over to LINKFLAGS before + # merging everything. + tmv_link_dict = config.env.ParseFlags(tmv_link) + config.env.Append(LIBS=tmv_link_dict['LIBS']) + config.env.AppendUnique(LINKFLAGS=tmv_link_dict['LINKFLAGS']) + config.env.AppendUnique(LINKFLAGS=tmv_link_dict['CCFLAGS']) + config.env.AppendUnique(LIBPATH=tmv_link_dict['LIBPATH']) + + compiler = config.env['CXXTYPE'] + if compiler == 'g++' and '-openmp' in config.env['LINKFLAGS']: + config.env['LINKFLAGS'].remove('-openmp') + config.env.AppendUnique(LINKFLAGS='-fopenmp') + print('Checking for correct TMV linkage... (this may take a little while)') config.Message('Checking for correct TMV linkage... ') @@ -1120,6 +1180,7 @@ int main() 'Error: TMV file failed to link correctly', 'Check that the correct location is specified for TMV_DIR') + config.env.AppendUnique(CPPDEFINES=['USE_TMV']) config.Result(1) return 1 @@ -1990,115 +2051,33 @@ def DoCppChecks(config): Check for some headers and libraries. """ - ##### - # Check for fftw3: - - # First do a simple check that the library and header are in the path. + # FFTW if not config.CheckHeader('fftw3.h',language='C++'): ErrorExit( 'fftw3.h not found', 'You should specify the location of fftw3 as FFTW_DIR=...') - config.CheckFFTW() - ##### - # Check for boost: + # Boost if config.env['USE_BOOST']: config.CheckBoost() - ##### - # Check for tmv: - - # First do a simple check that the library and header are in the path. - # We check the linking with the BLAS library below. - if not config.CheckHeader('TMV.h',language='C++'): - ErrorExit( - 'TMV.h not found', - 'You should specify the location of TMV as TMV_DIR=...') - - tmv_version_file = """ -#include -#include "TMV.h" -int main() -{ std::cout< 10 or int(minor) >= 7) and '-latlas' not in tmv_link and - ('-lblas' in tmv_link or '-lcblas' in tmv_link)): - print('WARNING: The Apple BLAS library has been found not to be thread safe on') - print(' Mac OS versions 10.7+, even across multiple processes (i.e. not') - print(' just multiple threads in the same process.) The symptom is that') - print(' `scons tests` may hang when running pytest using multiple') - print(' processes.') - if xcode_version is None: - # If we couldn't run xcodebuild, then don't give any more information about this. - pass - elif xcode_version < '5.1': - print(' This seems to have been partially fixed with XCode 5.1, so we') - print(' recommend upgrading to the latest XCode version. However, even') - print(' with 5.1, some systems still seem to have problems.') - env['BAD_BLAS'] = True - else: - print(' This seems to have been partially fixed with XCode 5.1, so there') - print(' is a good chance you will not have any problems. But there are') - print(' still occasional systems that fail when using multithreading with') - print(' programs or modules that link to the BLAS library (such as GalSim).') - print(' If you do have problems, the solution is to recompile TMV with') - print(' the SCons option "WITH_BLAS=false".') - - # ParseFlags doesn't know about -fopenmp being a LINKFLAG, so it - # puts it into CCFLAGS instead. Move it over to LINKFLAGS before - # merging everything. - tmv_link_dict = config.env.ParseFlags(tmv_link) - config.env.Append(LIBS=tmv_link_dict['LIBS']) - config.env.AppendUnique(LINKFLAGS=tmv_link_dict['LINKFLAGS']) - config.env.AppendUnique(LINKFLAGS=tmv_link_dict['CCFLAGS']) - config.env.AppendUnique(LIBPATH=tmv_link_dict['LIBPATH']) - - if compiler == 'g++' and '-openmp' in config.env['LINKFLAGS']: - config.env['LINKFLAGS'].remove('-openmp') - config.env.AppendUnique(LINKFLAGS='-fopenmp') - - # Finally, do the tests for the TMV library linkage: - config.CheckTMV() + # TMV + if config.env['USE_TMV']: + if not config.CheckHeader('TMV.h',language='C++'): + ErrorExit( + 'TMV.h not found', + 'You should specify the location of TMV as TMV_DIR=...') + config.CheckTMV() def DoPyChecks(config): # These checks are only relevant for the pysrc compilation: config.CheckPython() - config.CheckPyTMV() - config.CheckEigen() + if config.env['USE_TMV']: + config.CheckPyTMV() + else: + config.CheckEigen() config.CheckNumPy() config.CheckPyFITS() config.CheckFuture() @@ -2163,7 +2142,7 @@ def DoConfig(env): print('Debugging turned off') env.AppendUnique(CPPDEFINES=['NDEBUG']) else: - if env['TMV_DEBUG']: + if env['USE_TMV'] and env['TMV_DEBUG']: print('TMV Extra Debugging turned on') env.AppendUnique(CPPDEFINES=['TMV_EXTRA_DEBUG']) From 2674501403b0230b1274dc7153dae9ff43213279 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Tue, 2 Jan 2018 16:59:19 -0500 Subject: [PATCH 010/111] Automatically get Eigen directory from eigency if possible (#809-pybind11) --- SConstruct | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/SConstruct b/SConstruct index 4a4bf4d01a3..8c0942da335 100644 --- a/SConstruct +++ b/SConstruct @@ -173,7 +173,6 @@ def ClearCache(): shutil.rmtree(".sconf_temp") def GetMacVersion(): - print('Mac version is',platform.mac_ver()[0]) ver = platform.mac_ver()[0].split('.') if len(ver) >= 2: return ver[:2] @@ -278,6 +277,7 @@ def ErrorExit(*args, **kwargs): if sys.platform.find('darwin') != -1: major, minor = GetMacVersion() if int(major) > 10 or int(minor) >= 11: + print('Mac version is %s.%s'%(major,minor)) print() print('Starting with El Capitan (OSX 10.11), Apple instituted a new policy called') print('"System Integrity Protection" (SIP) where they strip "dangerous" environment') @@ -1115,6 +1115,7 @@ int main() # if we can tell that this is what the TMV library is using. # Update: Even after 5.1, it still seems to have problems for some systems. major, minor = GetMacVersion() + print('Mac version is %s.%s'%(major,minor)) try: p = subprocess.Popen(['xcodebuild','-version'], stdout=subprocess.PIPE) xcode_version = p.stdout.readlines()[0].decode().split()[1] @@ -2070,6 +2071,25 @@ def DoCppChecks(config): 'You should specify the location of TMV as TMV_DIR=...') config.CheckTMV() + # Eigen + else: + ok = config.CheckHeader('Eigen/Core',language='C++') + if not ok: + # Try to get the correct include directory from eigency + try: + import eigency + config.env.Append(CPPPATH=eigency.get_includes()[2]) + ok = config.CheckHeader('Eigen/Core',language='C++') + except ImportError: + pass + if not ok: + ErrorExit( + 'Eigen/Core not found', + 'You should either specify the location of Eigen as EIGEN_DIR=... ' + 'or install eigency using: \n' + ' pip install git+git://github.com/wouterboomsma/eigency.git') + + def DoPyChecks(config): # These checks are only relevant for the pysrc compilation: From 2948102cd95c0c2a162a75c1f750c2cff65dd60d Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Wed, 3 Jan 2018 01:25:08 -0500 Subject: [PATCH 011/111] Automatically get fftw3 library from pyfftw3 if possible (#809-pybind11) --- SConstruct | 24 ++- include/fftw3/fftw3.h | 381 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 399 insertions(+), 6 deletions(-) create mode 100644 include/fftw3/fftw3.h diff --git a/SConstruct b/SConstruct index 8c0942da335..4634230749b 100644 --- a/SConstruct +++ b/SConstruct @@ -1032,16 +1032,26 @@ int main() config.Message('Checking for correct FFTW linkage... ') if not config.TryCompile(fftw_source_file,'.cpp'): ErrorExit( - 'Error: fftw file failed to compile.', - 'Check that the correct location is specified for FFTW_DIR') + 'Error: fftw file failed to compile.') result = ( CheckLibsFull(config,[''],fftw_source_file) or CheckLibsFull(config,['fftw3'],fftw_source_file) ) + if not result: + # Try to get the correct library location from pyfftw3 + try: + import fftw3 + config.env.Append(LIBPATH=fftw3.lib.libdir) + result = CheckLibsFull(config,[fftw3.lib.libbase],fftw_source_file) + except ImportError: + pass if not result: ErrorExit( 'Error: fftw file failed to link correctly', - 'Check that the correct location is specified for FFTW_DIR') + 'You should either specify the location of fftw3 as FFTW_DIR=... ' + 'or install pyfftw3 using: \n' + ' pip install pyfftw3\n' + 'which can often find it automatically.') config.Result(1) return 1 @@ -2054,9 +2064,10 @@ def DoCppChecks(config): # FFTW if not config.CheckHeader('fftw3.h',language='C++'): - ErrorExit( - 'fftw3.h not found', - 'You should specify the location of fftw3 as FFTW_DIR=...') + # We have our own version of fftw3.h in case it's not easy to find this. + # (The library location is often accessible via pyffw3 if it is installed somewhere.) + print('Using local fftw3.h file in GalSim/include/fftw3') + config.env.Append(CPPPATH='#include/fftw3') config.CheckFFTW() # Boost @@ -2078,6 +2089,7 @@ def DoCppChecks(config): # Try to get the correct include directory from eigency try: import eigency + print('Trying eigency installation: ',eigency.get_includes()[2]) config.env.Append(CPPPATH=eigency.get_includes()[2]) ok = config.CheckHeader('Eigen/Core',language='C++') except ImportError: diff --git a/include/fftw3/fftw3.h b/include/fftw3/fftw3.h new file mode 100644 index 00000000000..6637303faca --- /dev/null +++ b/include/fftw3/fftw3.h @@ -0,0 +1,381 @@ +/* + * Copyright (c) 2003, 2007-8 Matteo Frigo + * Copyright (c) 2003, 2007-8 Massachusetts Institute of Technology + * + * The following statement of license applies *only* to this header file, + * and *not* to the other files distributed with FFTW or derived therefrom: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/***************************** NOTE TO USERS ********************************* + * + * THIS IS A HEADER FILE, NOT A MANUAL + * + * If you want to know how to use FFTW, please read the manual, + * online at http://www.fftw.org/doc/ and also included with FFTW. + * For a quick start, see the manual's tutorial section. + * + * (Reading header files to learn how to use a library is a habit + * stemming from code lacking a proper manual. Arguably, it's a + * *bad* habit in most cases, because header files can contain + * interfaces that are not part of the public, stable API.) + * + ****************************************************************************/ + +#ifndef FFTW3_H +#define FFTW3_H + +#include + +#ifdef __cplusplus +extern "C" +{ +#endif /* __cplusplus */ + +/* If is included, use the C99 complex type. Otherwise + define a type bit-compatible with C99 complex */ +#if !defined(FFTW_NO_Complex) && defined(_Complex_I) && defined(complex) && defined(I) +# define FFTW_DEFINE_COMPLEX(R, C) typedef R _Complex C +#else +# define FFTW_DEFINE_COMPLEX(R, C) typedef R C[2] +#endif + +#define FFTW_CONCAT(prefix, name) prefix ## name +#define FFTW_MANGLE_DOUBLE(name) FFTW_CONCAT(fftw_, name) +#define FFTW_MANGLE_FLOAT(name) FFTW_CONCAT(fftwf_, name) +#define FFTW_MANGLE_LONG_DOUBLE(name) FFTW_CONCAT(fftwl_, name) + +/* IMPORTANT: for Windows compilers, you should add a line + #define FFTW_DLL + here and in kernel/ifftw.h if you are compiling/using FFTW as a + DLL, in order to do the proper importing/exporting, or + alternatively compile with -DFFTW_DLL or the equivalent + command-line flag. This is not necessary under MinGW/Cygwin, where + libtool does the imports/exports automatically. */ +#if defined(FFTW_DLL) && (defined(_WIN32) || defined(__WIN32__)) + /* annoying Windows syntax for shared-library declarations */ +# if defined(COMPILING_FFTW) /* defined in api.h when compiling FFTW */ +# define FFTW_EXTERN extern __declspec(dllexport) +# else /* user is calling FFTW; import symbol */ +# define FFTW_EXTERN extern __declspec(dllimport) +# endif +#else +# define FFTW_EXTERN extern +#endif + +enum fftw_r2r_kind_do_not_use_me { + FFTW_R2HC=0, FFTW_HC2R=1, FFTW_DHT=2, + FFTW_REDFT00=3, FFTW_REDFT01=4, FFTW_REDFT10=5, FFTW_REDFT11=6, + FFTW_RODFT00=7, FFTW_RODFT01=8, FFTW_RODFT10=9, FFTW_RODFT11=10 +}; + +struct fftw_iodim_do_not_use_me { + int n; /* dimension size */ + int is; /* input stride */ + int os; /* output stride */ +}; + +#include /* for ptrdiff_t */ +struct fftw_iodim64_do_not_use_me { + ptrdiff_t n; /* dimension size */ + ptrdiff_t is; /* input stride */ + ptrdiff_t os; /* output stride */ +}; + +/* + huge second-order macro that defines prototypes for all API + functions. We expand this macro for each supported precision + + X: name-mangling macro + R: real data type + C: complex data type +*/ + +#define FFTW_DEFINE_API(X, R, C) \ + \ +FFTW_DEFINE_COMPLEX(R, C); \ + \ +typedef struct X(plan_s) *X(plan); \ + \ +typedef struct fftw_iodim_do_not_use_me X(iodim); \ +typedef struct fftw_iodim64_do_not_use_me X(iodim64); \ + \ +typedef enum fftw_r2r_kind_do_not_use_me X(r2r_kind); \ + \ +FFTW_EXTERN void X(execute)(const X(plan) p); \ + \ +FFTW_EXTERN X(plan) X(plan_dft)(int rank, const int *n, \ + C *in, C *out, int sign, unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_dft_1d)(int n, C *in, C *out, int sign, \ + unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_dft_2d)(int n0, int n1, \ + C *in, C *out, int sign, unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_dft_3d)(int n0, int n1, int n2, \ + C *in, C *out, int sign, unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_many_dft)(int rank, const int *n, \ + int howmany, \ + C *in, const int *inembed, \ + int istride, int idist, \ + C *out, const int *onembed, \ + int ostride, int odist, \ + int sign, unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_guru_dft)(int rank, const X(iodim) *dims, \ + int howmany_rank, \ + const X(iodim) *howmany_dims, \ + C *in, C *out, \ + int sign, unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_guru_split_dft)(int rank, const X(iodim) *dims, \ + int howmany_rank, \ + const X(iodim) *howmany_dims, \ + R *ri, R *ii, R *ro, R *io, \ + unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_guru64_dft)(int rank, \ + const X(iodim64) *dims, \ + int howmany_rank, \ + const X(iodim64) *howmany_dims, \ + C *in, C *out, \ + int sign, unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_guru64_split_dft)(int rank, \ + const X(iodim64) *dims, \ + int howmany_rank, \ + const X(iodim64) *howmany_dims, \ + R *ri, R *ii, R *ro, R *io, \ + unsigned flags); \ + \ +FFTW_EXTERN void X(execute_dft)(const X(plan) p, C *in, C *out); \ +FFTW_EXTERN void X(execute_split_dft)(const X(plan) p, R *ri, R *ii, \ + R *ro, R *io); \ + \ +FFTW_EXTERN X(plan) X(plan_many_dft_r2c)(int rank, const int *n, \ + int howmany, \ + R *in, const int *inembed, \ + int istride, int idist, \ + C *out, const int *onembed, \ + int ostride, int odist, \ + unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_dft_r2c)(int rank, const int *n, \ + R *in, C *out, unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_dft_r2c_1d)(int n,R *in,C *out,unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_dft_r2c_2d)(int n0, int n1, \ + R *in, C *out, unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_dft_r2c_3d)(int n0, int n1, \ + int n2, \ + R *in, C *out, unsigned flags); \ + \ + \ +FFTW_EXTERN X(plan) X(plan_many_dft_c2r)(int rank, const int *n, \ + int howmany, \ + C *in, const int *inembed, \ + int istride, int idist, \ + R *out, const int *onembed, \ + int ostride, int odist, \ + unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_dft_c2r)(int rank, const int *n, \ + C *in, R *out, unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_dft_c2r_1d)(int n,C *in,R *out,unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_dft_c2r_2d)(int n0, int n1, \ + C *in, R *out, unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_dft_c2r_3d)(int n0, int n1, \ + int n2, \ + C *in, R *out, unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_guru_dft_r2c)(int rank, const X(iodim) *dims, \ + int howmany_rank, \ + const X(iodim) *howmany_dims, \ + R *in, C *out, \ + unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_guru_dft_c2r)(int rank, const X(iodim) *dims, \ + int howmany_rank, \ + const X(iodim) *howmany_dims, \ + C *in, R *out, \ + unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_guru_split_dft_r2c)( \ + int rank, const X(iodim) *dims, \ + int howmany_rank, \ + const X(iodim) *howmany_dims, \ + R *in, R *ro, R *io, \ + unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_guru_split_dft_c2r)( \ + int rank, const X(iodim) *dims, \ + int howmany_rank, \ + const X(iodim) *howmany_dims, \ + R *ri, R *ii, R *out, \ + unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_guru64_dft_r2c)(int rank, \ + const X(iodim64) *dims, \ + int howmany_rank, \ + const X(iodim64) *howmany_dims, \ + R *in, C *out, \ + unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_guru64_dft_c2r)(int rank, \ + const X(iodim64) *dims, \ + int howmany_rank, \ + const X(iodim64) *howmany_dims, \ + C *in, R *out, \ + unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_guru64_split_dft_r2c)( \ + int rank, const X(iodim64) *dims, \ + int howmany_rank, \ + const X(iodim64) *howmany_dims, \ + R *in, R *ro, R *io, \ + unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_guru64_split_dft_c2r)( \ + int rank, const X(iodim64) *dims, \ + int howmany_rank, \ + const X(iodim64) *howmany_dims, \ + R *ri, R *ii, R *out, \ + unsigned flags); \ + \ +FFTW_EXTERN void X(execute_dft_r2c)(const X(plan) p, R *in, C *out); \ +FFTW_EXTERN void X(execute_dft_c2r)(const X(plan) p, C *in, R *out); \ + \ +FFTW_EXTERN void X(execute_split_dft_r2c)(const X(plan) p, \ + R *in, R *ro, R *io); \ +FFTW_EXTERN void X(execute_split_dft_c2r)(const X(plan) p, \ + R *ri, R *ii, R *out); \ + \ +FFTW_EXTERN X(plan) X(plan_many_r2r)(int rank, const int *n, \ + int howmany, \ + R *in, const int *inembed, \ + int istride, int idist, \ + R *out, const int *onembed, \ + int ostride, int odist, \ + const X(r2r_kind) *kind, unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_r2r)(int rank, const int *n, R *in, R *out, \ + const X(r2r_kind) *kind, unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_r2r_1d)(int n, R *in, R *out, \ + X(r2r_kind) kind, unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_r2r_2d)(int n0, int n1, R *in, R *out, \ + X(r2r_kind) kind0, X(r2r_kind) kind1, \ + unsigned flags); \ +FFTW_EXTERN X(plan) X(plan_r2r_3d)(int n0, int n1, int n2, \ + R *in, R *out, X(r2r_kind) kind0, \ + X(r2r_kind) kind1, X(r2r_kind) kind2, \ + unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_guru_r2r)(int rank, const X(iodim) *dims, \ + int howmany_rank, \ + const X(iodim) *howmany_dims, \ + R *in, R *out, \ + const X(r2r_kind) *kind, unsigned flags); \ + \ +FFTW_EXTERN X(plan) X(plan_guru64_r2r)(int rank, const X(iodim64) *dims, \ + int howmany_rank, \ + const X(iodim64) *howmany_dims, \ + R *in, R *out, \ + const X(r2r_kind) *kind, unsigned flags); \ + \ +FFTW_EXTERN void X(execute_r2r)(const X(plan) p, R *in, R *out); \ + \ +FFTW_EXTERN void X(destroy_plan)(X(plan) p); \ +FFTW_EXTERN void X(forget_wisdom)(void); \ +FFTW_EXTERN void X(cleanup)(void); \ + \ +FFTW_EXTERN void X(set_timelimit)(double); \ + \ +FFTW_EXTERN void X(plan_with_nthreads)(int nthreads); \ +FFTW_EXTERN int X(init_threads)(void); \ +FFTW_EXTERN void X(cleanup_threads)(void); \ + \ +FFTW_EXTERN void X(export_wisdom_to_file)(FILE *output_file); \ +FFTW_EXTERN char *X(export_wisdom_to_string)(void); \ +FFTW_EXTERN void X(export_wisdom)(void (*write_char)(char c, void *), \ + void *data); \ +FFTW_EXTERN int X(import_system_wisdom)(void); \ +FFTW_EXTERN int X(import_wisdom_from_file)(FILE *input_file); \ +FFTW_EXTERN int X(import_wisdom_from_string)(const char *input_string); \ +FFTW_EXTERN int X(import_wisdom)(int (*read_char)(void *), void *data); \ + \ +FFTW_EXTERN void X(fprint_plan)(const X(plan) p, FILE *output_file); \ +FFTW_EXTERN void X(print_plan)(const X(plan) p); \ + \ +FFTW_EXTERN void *X(malloc)(size_t n); \ +FFTW_EXTERN void X(free)(void *p); \ + \ +FFTW_EXTERN void X(flops)(const X(plan) p, \ + double *add, double *mul, double *fmas); \ +FFTW_EXTERN double X(estimate_cost)(const X(plan) p); \ + \ +FFTW_EXTERN const char X(version)[]; \ +FFTW_EXTERN const char X(cc)[]; \ +FFTW_EXTERN const char X(codelet_optim)[]; + + +/* end of FFTW_DEFINE_API macro */ + +FFTW_DEFINE_API(FFTW_MANGLE_DOUBLE, double, fftw_complex) +FFTW_DEFINE_API(FFTW_MANGLE_FLOAT, float, fftwf_complex) +FFTW_DEFINE_API(FFTW_MANGLE_LONG_DOUBLE, long double, fftwl_complex) + +#define FFTW_FORWARD (-1) +#define FFTW_BACKWARD (+1) + +#define FFTW_NO_TIMELIMIT (-1.0) + +/* documented flags */ +#define FFTW_MEASURE (0U) +#define FFTW_DESTROY_INPUT (1U << 0) +#define FFTW_UNALIGNED (1U << 1) +#define FFTW_CONSERVE_MEMORY (1U << 2) +#define FFTW_EXHAUSTIVE (1U << 3) /* NO_EXHAUSTIVE is default */ +#define FFTW_PRESERVE_INPUT (1U << 4) /* cancels FFTW_DESTROY_INPUT */ +#define FFTW_PATIENT (1U << 5) /* IMPATIENT is default */ +#define FFTW_ESTIMATE (1U << 6) + +/* undocumented beyond-guru flags */ +#define FFTW_ESTIMATE_PATIENT (1U << 7) +#define FFTW_BELIEVE_PCOST (1U << 8) +#define FFTW_NO_DFT_R2HC (1U << 9) +#define FFTW_NO_NONTHREADED (1U << 10) +#define FFTW_NO_BUFFERING (1U << 11) +#define FFTW_NO_INDIRECT_OP (1U << 12) +#define FFTW_ALLOW_LARGE_GENERIC (1U << 13) /* NO_LARGE_GENERIC is default */ +#define FFTW_NO_RANK_SPLITS (1U << 14) +#define FFTW_NO_VRANK_SPLITS (1U << 15) +#define FFTW_NO_VRECURSE (1U << 16) +#define FFTW_NO_SIMD (1U << 17) +#define FFTW_NO_SLOW (1U << 18) +#define FFTW_NO_FIXED_RADIX_LARGE_N (1U << 19) +#define FFTW_ALLOW_PRUNING (1U << 20) +#define FFTW_WISDOM_ONLY (1U << 21) + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + +#endif /* FFTW3_H */ From d221e963ffb4ab9860f1e4444a84e48f2da47816 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Wed, 3 Jan 2018 01:25:54 -0500 Subject: [PATCH 012/111] Don't do tgamma, lgamma if already in scope (#809-pybind11) --- src/math/Gamma.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/math/Gamma.cpp b/src/math/Gamma.cpp index 23505e9f60b..8d4f9bb7947 100644 --- a/src/math/Gamma.cpp +++ b/src/math/Gamma.cpp @@ -46,6 +46,7 @@ namespace math { // Defined in BesselJ.cpp double dcsevl(double x, const double* cs, int n); +#if not __cplusplus >= 201103L double tgamma(double x) { double g = dgamma(x); @@ -74,6 +75,7 @@ namespace math { #endif return g; } +#endif double gamma_p(double a, double x) { From ed4d247546a4ea23c1e9952c9303c501d16a0030 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Wed, 3 Jan 2018 01:27:15 -0500 Subject: [PATCH 013/111] First pass at setup.py (#809-pybind11) --- .gitignore | 5 +- MANIFEST.in | 4 + galsim/share | 1 + setup.py | 204 +++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 213 insertions(+), 1 deletion(-) create mode 100644 MANIFEST.in create mode 120000 galsim/share create mode 100644 setup.py diff --git a/.gitignore b/.gitignore index 85f471a077f..02d802a3ef1 100644 --- a/.gitignore +++ b/.gitignore @@ -15,10 +15,13 @@ docs/doxygen_example_output/* tests/nosetests.xml examples/output/* devutils/sizeof_SIFD -gs.error +gs_error.txt *junk* debug.out tmp* examples_bin *~ .cache +*.egg* +build +dist diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000000..8f68820da6b --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,4 @@ +graft galsim +include *.md +include LICENSE +global-exclude __pycache__ *.pyc .obj diff --git a/galsim/share b/galsim/share new file mode 120000 index 00000000000..4c6a23531fe --- /dev/null +++ b/galsim/share @@ -0,0 +1 @@ +../share/ \ No newline at end of file diff --git a/setup.py b/setup.py new file mode 100644 index 00000000000..f3d50fa22f1 --- /dev/null +++ b/setup.py @@ -0,0 +1,204 @@ +from __future__ import print_function +import sys,os,glob,re +import select + + +from setuptools import setup, Extension +from setuptools.command.build_ext import build_ext +from setuptools.command.install_scripts import install_scripts +from setuptools.command.easy_install import easy_install +import setuptools +print("Using setuptools version",setuptools.__version__) + +print('Python version = ',sys.version) +py_version = "%d.%d"%sys.version_info[0:2] # we check things based on the major.minor version. + +scripts = ['galsim', 'galsim_download_cosmos'] +scripts = [ os.path.join('bin',f) for f in scripts ] + +def all_files_from(dir, ext=''): + files = [] + for root, dirnames, filenames in os.walk(dir): + for filename in filenames: + if filename.endswith(ext): + files.append(os.path.join(root, filename)) + return files + +sources = all_files_from('src', '.cpp') + all_files_from('pysrc', '.cpp') +headers = all_files_from('include') +shared_data = all_files_from('share') +print('sources = ',sources) +print('headers = ',headers) +print('shared = ',shared_data) + +# If we build with debug, undefine NDEBUG flag +undef_macros = [] +if "--debug" in sys.argv: + undef_macros+=['NDEBUG'] + +copt = { + 'gcc' : ['-O3','-ffast-math','-std=c++11'], + 'icc' : ['-O3','-std=c++11'], + 'clang' : ['-O3','-ffast-math','-std=c++11','-Wno-shorten-64-to-32'], + 'unknown' : [], +} + +if "--debug" in sys.argv: + copt['gcc'].append('-g') + copt['icc'].append('-g') + copt['clang'].append('-g') + +def get_compiler(cc): + """Try to figure out which kind of compiler this really is. + In particular, try to distinguish between clang and gcc, either of which may + be called cc or gcc. + """ + cmd = [cc,'--version'] + import subprocess + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + lines = p.stdout.readlines() + print('compiler version information: ') + for line in lines: + print(line.decode().strip()) + try: + # Python3 needs this decode bit. + # Python2.7 doesn't need it, but it works fine. + line = lines[0].decode(encoding='UTF-8') + if line.startswith('Configured'): + line = lines[1].decode(encoding='UTF-8') + except TypeError: + # Python2.6 throws a TypeError, so just use the lines as they are. + line = lines[0] + if line.startswith('Configured'): + line = lines[1] + + if 'clang' in line: + return 'clang' + elif 'gcc' in line: + return 'gcc' + elif 'GCC' in line: + return 'gcc' + elif 'clang' in cc: + return 'clang' + elif 'gcc' in cc or 'g++' in cc: + return 'gcc' + elif 'icc' in cc or 'icpc' in cc: + return 'icc' + else: + return 'unknown' + +# Make a subclass of build_ext so we can add to the -I list. +class my_builder( build_ext ): + # Adding the libraries and include_dirs here rather than when declaring the Extension + # means that the setup_requires modules should already be installed, so pybind11, eigency, + # and fftw3 should all import properly. + def finalize_options(self): + print('finalize_options:') + build_ext.finalize_options(self) + self.include_dirs.append('include') + self.include_dirs.append('include/galsim') + import pybind11 + # Include both the standard location and the --user location, since it's hard to tell + # which one is the right choice. + self.include_dirs.append(pybind11.get_include(user=False)) + self.include_dirs.append(pybind11.get_include(user=True)) + import fftw3 + self.include_dirs.append('include/fftw3') + self.library_dirs.append(fftw3.lib.libdir) + fftw3_libname = fftw3.lib.libbase + if fftw3_libname.startswith('lib'): fftw3_libname = fftw3_libname[3:] + self.libraries.append(fftw3_libname) + import eigency + self.include_dirs.append(eigency.get_includes()[2]) + print('include_dirs = ',self.include_dirs) + print('library_dirs = ',self.library_dirs) + print('libraries = ',self.libraries) + + # Add any extra things based on the compiler being used.. + def build_extensions(self): + # Figure out what compiler it will use + cc = self.compiler.executables['compiler_cxx'][0] + print('Using compiler %s'%(cc)) + # Figure out what compiler it will use + cc = self.compiler.executables['compiler_cxx'][0] + comp_type = get_compiler(cc) + if cc == comp_type: + print('Using compiler %s'%(cc)) + else: + print('Using compiler %s, which is %s'%(cc,comp_type)) + # Add the appropriate extra flags for that compiler. + for e in self.extensions: + e.extra_compile_args = copt[ comp_type ] + #e.extra_link_args = lopt[ comp_type ] + # Now run the normal build function. + build_ext.build_extensions(self) + +# AFAICT, setuptools doesn't provide any easy access to the final installation location of the +# executable scripts. This bit is just to save the value of script_dir so I can use it later. +# cf. http://stackoverflow.com/questions/12975540/correct-way-to-find-scripts-directory-from-setup-py-in-python-distutils/ +class my_easy_install( easy_install ): + # Match the call signature of the easy_install version. + def write_script(self, script_name, contents, mode="t", *ignored): + # Run the normal version + easy_install.write_script(self, script_name, contents, mode, *ignored) + # Save the script install directory in the distribution object. + # This is the same thing that is returned by the setup function. + self.distribution.script_install_dir = self.script_dir + +ext=Extension("galsim._galsim", + sources, + undef_macros = undef_macros) + +build_dep = ['pybind11', 'pyfftw3', 'eigency'] +run_dep = ['numpy', 'future', 'astropy', 'pyyaml', 'LSSTDESC.Coord', 'pandas'] + +with open('README.md') as file: + long_description = file.read() + +# Read in the galsim version from galsim/_version.py +# cf. http://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package +version_file=os.path.join('galsim','_version.py') +verstrline = open(version_file, "rt").read() +VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]" +mo = re.search(VSRE, verstrline, re.M) +if mo: + galsim_version = mo.group(1) +else: + raise RuntimeError("Unable to find version string in %s." % (version_file,)) +print('GalSim version is %s'%(galsim_version)) + +dist = setup(name="GalSim", + version=galsim_version, + author="GalSim Developers (point of contact: Mike Jarvis)", + author_email="michael@jarvis.net", + description="The modular galaxy image simulation toolkit", + long_description=long_description, + license = "BSD License", + url="https://github.com/rmjarvis/GalSim", + download_url="https://github.com/GalSim-developers/GalSim/releases/tag/v%s.zip"%galsim_version, + packages=['galsim'], + include_package_data=True, + ext_modules=[ext], + setup_requires=build_dep, + install_requires=build_dep + run_dep, + cmdclass = {'build_ext': my_builder, + 'easy_install': my_easy_install, + }, + scripts=scripts, + zip_safe=False, + ) + +# Check that the path includes the directory where the scripts are installed. +real_env_path = [os.path.realpath(d) for d in os.environ['PATH'].split(':')] +if (hasattr(dist,'script_install_dir') and + dist.script_install_dir not in os.environ['PATH'].split(':') and + os.path.realpath(dist.script_install_dir) not in real_env_path): + + print('\nWARNING: The GalSim executables were installed in a directory not in your PATH') + print(' If you want to use the executables, you should add the directory') + print('\n ',dist.script_install_dir,'\n') + print(' to your path. The current path is') + print('\n ',os.environ['PATH'],'\n') + print(' Alternatively, you can specify a different prefix with --prefix=PREFIX,') + print(' in which case the scripts will be installed in PREFIX/bin.') + print(' If you are installing via pip use --install-option="--prefix=PREFIX"') From a04d5825fea9756d4ac83c505982bc695c253ada Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Wed, 3 Jan 2018 08:21:24 -0500 Subject: [PATCH 014/111] Add requirements file (#809-pybind11) --- requirements.txt | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 requirements.txt diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000000..a8c12c687b8 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,17 @@ +# I didn't try to figure out which versions of these are really required. These are the +# current versions at the time of writing this (Jan, 2018), and they are known to work. + +pybind11 >= 2.0.0 +pyfftw3 >= 0.2.1 + +numpy >= 1.13.3 +future >= 0.15.2 +astropy >= 2.0.3 +pyyaml >= 3.12 +LSSTDESC.Coord >= 1.0.5 +pandas >= 0.22.0 + +# The version of eigency (1.75) on pip doens't install the Eigen directory properly. +# cf. https://github.com/wouterboomsma/eigency/issues/17 +# They already fixed the problem on master, so this commit works until they release 1.76. +git+git://github.com/wouterboomsma/eigency.git@ed54a61e8143284e243f0a7ddbc5acb4c8bf58bd From 471225ee16a748d61b69fcb9c743815a56efb622 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Wed, 3 Jan 2018 10:21:49 -0500 Subject: [PATCH 015/111] Remove mmgr.cpp file (#809-pybind11) --- src/mmgr.cpp | 1719 -------------------------------------------------- 1 file changed, 1719 deletions(-) delete mode 100644 src/mmgr.cpp diff --git a/src/mmgr.cpp b/src/mmgr.cpp deleted file mode 100644 index 8019280da21..00000000000 --- a/src/mmgr.cpp +++ /dev/null @@ -1,1719 +0,0 @@ -/* -*- c++ -*- - * Copyright (c) 2012-2017 by the GalSim developers team on GitHub - * https://github.com/GalSim-developers - * - * This file is part of GalSim: The modular galaxy image simulation toolkit. - * https://github.com/GalSim-developers/GalSim - * - * GalSim is free software: redistribution and use in source and binary forms, - * with or without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this - * list of conditions, and the disclaimer given in the accompanying LICENSE - * file. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions, and the disclaimer given in the documentation - * and/or other materials provided with the distribution. - */ - -// --------------------------------------------------------------------------------------------------------------------------------- -// Copyright 2000, Paul Nettle. All rights reserved. -// -// You are free to use this source code in any commercial or non-commercial product. -// -// mmgr.cpp - Memory manager & tracking software -// -// The most recent version of this software can be found at: ftp://ftp.GraphicsPapers.com/pub/ProgrammingTools/MemoryManagers/ -// -// [NOTE: Best when viewed with 8-character tabs] -// -// --------------------------------------------------------------------------------------------------------------------------------- -// -// !!IMPORTANT!! -// -// This software is self-documented with periodic comments. Before you start using this software, perform a search for the string -// "-DOC-" to locate pertinent information about how to use this software. -// -// You are also encouraged to read the comment blocks throughout this source file. They will help you understand how this memory -// tracking software works, so you can better utilize it within your applications. -// -// NOTES: -// -// 1. This code purposely uses no external routines that allocate RAM (other than the raw allocation routines, such as malloc). We -// do this because we want this to be as self-contained as possible. As an example, we don't use assert, because when running -// under WIN32, the assert brings up a dialog box, which allocates RAM. Doing this in the middle of an allocation would be bad. -// -// 2. When trying to override new/delete under MFC (which has its own version of global new/delete) the linker will complain. In -// order to fix this error, use the compiler option: /FORCE, which will force it to build an executable even with linker errors. -// Be sure to check those errors each time you compile, otherwise, you may miss a valid linker error. -// -// 3. If you see something that looks odd to you or seems like a strange way of going about doing something, then consider that this -// code was carefully thought out. If something looks odd, then just assume I've got a good reason for doing it that way (an -// example is the use of the class MemStaticTimeTracker.) -// -// 4. With MFC applications, you will need to comment out any occurance of "#define new DEBUG_NEW" from all source files. -// -// 5. Include file dependencies are _very_important_ for getting the MMGR to integrate nicely into your application. Be careful if -// you're including standard includes from within your own project inclues; that will break this very specific dependency order. -// It should look like this: -// -// #include // Standard includes MUST come first -// #include // -// #include // -// -// #include "mmgr.h" // mmgr.h MUST come next -// -// #include "myfile1.h" // Project includes MUST come last -// #include "myfile2.h" // -// #include "myfile3.h" // -// -// --------------------------------------------------------------------------------------------------------------------------------- - -//#include "stdafx.h" -#include -#include -#include -#include -#include -#include -#include -#include - -#ifndef WIN32 -#include -#define LINE_END "\n" -#else -#define LINE_END "\r\n" -#endif - -#include "galsim/mmgr.h" - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- If you're like me, it's hard to gain trust in foreign code. This memory manager will try to INDUCE your code to crash (for -// very good reasons... like making bugs obvious as early as possible.) Some people may be inclined to remove this memory tracking -// software if it causes crashes that didn't exist previously. In reality, these new crashes are the BEST reason for using this -// software! -// -// Whether this software causes your application to crash, or if it reports errors, you need to be able to TRUST this software. To -// this end, you are given some very simple debugging tools. -// -// The quickest way to locate problems is to enable the STRESS_TEST macro (below.) This should catch 95% of the crashes before they -// occur by validating every allocation each time this memory manager performs an allocation function. If that doesn't work, keep -// reading... -// -// If you enable the TEST_MEMORY_MANAGER #define (below), this memory manager will log an entry in the memory.log file each time it -// enters and exits one of its primary allocation handling routines. Each call that succeeds should place an "ENTER" and an "EXIT" -// into the log. If the program crashes within the memory manager, it will log an "ENTER", but not an "EXIT". The log will also -// report the name of the routine. -// -// Just because this memory manager crashes does not mean that there is a bug here! First, an application could inadvertantly damage -// the heap, causing malloc(), realloc() or free() to crash. Also, an application could inadvertantly damage some of the memory used -// by this memory tracking software, causing it to crash in much the same way that a damaged heap would affect the standard -// allocation routines. -// -// In the event of a crash within this code, the first thing you'll want to do is to locate the actual line of code that is -// crashing. You can do this by adding log() entries throughout the routine that crashes, repeating this process until you narrow -// in on the offending line of code. If the crash happens in a standard C allocation routine (i.e. malloc, realloc or free) don't -// bother contacting me, your application has damaged the heap. You can help find the culprit in your code by enabling the -// STRESS_TEST macro (below.) -// -// If you truely suspect a bug in this memory manager (and you had better be sure about it! :) you can contact me at -// midnight@GraphicsPapers.com. Before you do, however, check for a newer version at: -// -// ftp://ftp.GraphicsPapers.com/pub/ProgrammingTools/MemoryManagers/ -// -// When using this debugging aid, make sure that you are NOT setting the alwaysLogAll variable on, otherwise the log could be -// cluttered and hard to read. -// --------------------------------------------------------------------------------------------------------------------------------- - -//#define TEST_MEMORY_MANAGER - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- Enable this sucker if you really want to stress-test your app's memory usage, or to help find hard-to-find bugs -// --------------------------------------------------------------------------------------------------------------------------------- - -//#define STRESS_TEST - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- Enable this sucker if you want to stress-test your app's error-handling. Set RANDOM_FAIL to the percentage of failures you -// want to test with (0 = none, >100 = all failures). -// --------------------------------------------------------------------------------------------------------------------------------- - -//#define RANDOM_FAILURE 100.0 - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- Locals -- modify these flags to suit your needs -// --------------------------------------------------------------------------------------------------------------------------------- - -#ifdef STRESS_TEST -static const size_t hashBits = 12; -static bool randomWipe = true; -static bool alwaysValidateAll = true; -static bool alwaysLogAll = true; -static bool alwaysWipeAll = true; -static bool cleanupLogOnFirstRun = true; -static const size_t paddingSize = 1024; // An extra 8K per allocation! -#else -static const size_t hashBits = 12; -static bool randomWipe = false; -static bool alwaysValidateAll = false; -static bool alwaysLogAll = false; -static bool alwaysWipeAll = true; -static bool cleanupLogOnFirstRun = true; -static const size_t paddingSize = 4; -#endif - -// --------------------------------------------------------------------------------------------------------------------------------- -// We define our own assert, because we don't want to bring up an assertion dialog, since that allocates RAM. Our new assert -// simply declares a forced breakpoint. -// --------------------------------------------------------------------------------------------------------------------------------- - -#ifdef WIN32 -#ifdef _DEBUG -#define m_assert(x) if ((x) == false) __asm { int 3 } -#else -#define m_assert(x) {} -#endif -#else // Linux uses assert, which we can use safely, since it doesn't bring up a dialog within the program. -#define m_assert assert -#endif - -// --------------------------------------------------------------------------------------------------------------------------------- -// Here, we turn off our macros because any place in this source file where the word 'new' or the word 'delete' (etc.) -// appear will be expanded by the macro. So to avoid problems using them within this source file, we'll just #undef them. -// --------------------------------------------------------------------------------------------------------------------------------- - -#undef new -#undef delete -#undef malloc -#undef calloc -#undef realloc -#undef free - -// --------------------------------------------------------------------------------------------------------------------------------- -// Defaults for the constants & statics in the MemoryManager class -// --------------------------------------------------------------------------------------------------------------------------------- - -const size_t m_alloc_unknown = 0; -const size_t m_alloc_new = 1; -const size_t m_alloc_new_array = 2; -const size_t m_alloc_malloc = 3; -const size_t m_alloc_calloc = 4; -const size_t m_alloc_realloc = 5; -const size_t m_alloc_delete = 6; -const size_t m_alloc_delete_array = 7; -const size_t m_alloc_free = 8; - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- Get to know these values. They represent the values that will be used to fill unused and deallocated RAM. -// --------------------------------------------------------------------------------------------------------------------------------- - -static size_t prefixPattern = 0xbaadf00d; // Fill pattern for bytes preceeding allocated blocks -static size_t postfixPattern = 0xdeadc0de; // Fill pattern for bytes following allocated blocks -static size_t unusedPattern = 0xfeedface; // Fill pattern for freshly allocated blocks -static size_t releasedPattern = 0xdeadbeef; // Fill pattern for deallocated blocks - -// --------------------------------------------------------------------------------------------------------------------------------- -// Other locals -// --------------------------------------------------------------------------------------------------------------------------------- - -static const size_t hashSize = 1 << hashBits; -static const char *allocationTypes[] = {"Unknown", - "new", "new[]", "malloc", "calloc", - "realloc", "delete", "delete[]", "free"}; -static sAllocUnit *hashTable[hashSize]; -static sAllocUnit *reservoir; -static size_t currentAllocationCount = 0; -static size_t breakOnAllocationCount = 0; -static sMStats stats; -static const char *sourceFile = "??"; -static const char *sourceFunc = "??"; -static size_t sourceLine = 0; -static bool staticDeinitTime = false; -static sAllocUnit **reservoirBuffer = NULL; -static size_t reservoirBufferSize = 0; - -// --------------------------------------------------------------------------------------------------------------------------------- -// Local functions only -// --------------------------------------------------------------------------------------------------------------------------------- - -static void doCleanupLogOnFirstRun() -{ - if (cleanupLogOnFirstRun) - { - unlink("memory.log"); - cleanupLogOnFirstRun = false; - } -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static void log(const char *format, ...) -{ - // Build the buffer - - static char buffer[2048]; - va_list ap; - va_start(ap, format); - vsprintf(buffer, format, ap); - va_end(ap); - - // Cleanup the log? - - if (cleanupLogOnFirstRun) doCleanupLogOnFirstRun(); - - // Open the log file - - FILE *fp = fopen("memory.log", "ab"); - - // If you hit this assert, then the memory logger is unable to log information to a file (can't open the file for some - // reason.) You can interrogate the variable 'buffer' to see what was supposed to be logged (but won't be.) - m_assert(fp); - - if (!fp) return; - - // Spit out the data to the log - - fprintf(fp, "%s" LINE_END, buffer); - fclose(fp); -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static const char *sourceFileStripper(const char *sourceFile) -{ - char *ptr = strrchr(sourceFile, '\\'); - if (ptr) return ptr + 1; - ptr = strrchr(sourceFile, '/'); - if (ptr) return ptr + 1; - return sourceFile; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static const char *ownerString(const char *sourceFile, const size_t sourceLine, const char *sourceFunc) -{ - static char str[90]; - memset(str, 0, sizeof(str)); - sprintf(str, "%s(%05zu)::%s", sourceFileStripper(sourceFile), sourceLine, sourceFunc); - return str; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static const char *insertCommas(size_t value) -{ - static char str[30]; - memset(str, 0, sizeof(str)); - - sprintf(str, "%zu", value); - if (strlen(str) > 3) - { - memmove(&str[strlen(str)-3], &str[strlen(str)-4], 4); - str[strlen(str) - 4] = ','; - } - if (strlen(str) > 7) - { - memmove(&str[strlen(str)-7], &str[strlen(str)-8], 8); - str[strlen(str) - 8] = ','; - } - if (strlen(str) > 11) - { - memmove(&str[strlen(str)-11], &str[strlen(str)-12], 12); - str[strlen(str) - 12] = ','; - } - - return str; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static const char *memorySizeString(size_t size) -{ - static char str[90]; - if (size > (1024*1024)) sprintf(str, "%10s (%7.2fM)", insertCommas(size), (float) size / (1024.0f * 1024.0f)); - else if (size > 1024) sprintf(str, "%10s (%7.2fK)", insertCommas(size), (float) size / 1024.0f); - else sprintf(str, "%10s bytes ", insertCommas(size)); - return str; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static sAllocUnit *findAllocUnit(const void *reportedAddress) -{ - // Just in case... - m_assert(reportedAddress != NULL); - - // Use the address to locate the hash index. Note that we shift off the lower four bits. This is because most allocated - // addresses will be on four-, eight- or even sixteen-byte boundaries. If we didn't do this, the hash index would not have - // very good coverage. - - size_t hashIndex = ((size_t) reportedAddress >> 4) & (hashSize - 1); - sAllocUnit *ptr = hashTable[hashIndex]; - while(ptr) - { - if (ptr->reportedAddress == reportedAddress) return ptr; - ptr = ptr->next; - } - - return NULL; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static size_t calculateActualSize(const size_t reportedSize) -{ - // We use DWORDS as our padding, and a long is guaranteed to be 4 bytes, but an int is not (ANSI defines an int as - // being the standard word size for a processor; on a 32-bit machine, that's 4 bytes, but on a 64-bit machine, it's - // 8 bytes, which means an int can actually be larger than a long.) - - return reportedSize + paddingSize * sizeof(long) * 2; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static size_t calculateReportedSize(const size_t actualSize) -{ - // We use DWORDS as our padding, and a long is guaranteed to be 4 bytes, but an int is not (ANSI defines an int as - // being the standard word size for a processor; on a 32-bit machine, that's 4 bytes, but on a 64-bit machine, it's - // 8 bytes, which means an int can actually be larger than a long.) - - return actualSize - paddingSize * sizeof(long) * 2; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static void *calculateReportedAddress(const void *actualAddress) -{ - // We allow this... - - if (!actualAddress) return NULL; - - // JUst account for the padding - - return (void *) ((char *) actualAddress + sizeof(long) * paddingSize); -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static void wipeWithPattern(sAllocUnit *allocUnit, size_t pattern, const size_t originalReportedSize = 0) -{ - // For a serious test run, we use wipes of random a random value. However, if this causes a crash, we don't want it to - // crash in a differnt place each time, so we specifically DO NOT call srand. If, by chance your program calls srand(), - // you may wish to disable that when running with a random wipe test. This will make any crashes more consistent so they - // can be tracked down easier. - - if (randomWipe) - { - pattern = ((rand() & 0xff) << 24) | ((rand() & 0xff) << 16) | ((rand() & 0xff) << 8) | (rand() & 0xff); - } - - // -DOC- We should wipe with 0's if we're not in debug mode, so we can help hide bugs if possible when we release the - // product. So uncomment the following line for releases. - // - // Note that the "alwaysWipeAll" should be turned on for this to have effect, otherwise it won't do much good. But we'll - // leave it this way (as an option) because this does slow things down. - // pattern = 0; - - // This part of the operation is optional - - if (alwaysWipeAll && allocUnit->reportedSize > originalReportedSize) - { - // Fill the bulk - - long *lptr = (long *) ((char *)allocUnit->reportedAddress + originalReportedSize); - int length = allocUnit->reportedSize - originalReportedSize; - int nlongs = length / sizeof(long); - int i; - for (i = 0; i < nlongs; i++, lptr++) - { - *lptr = pattern; - } - - // Fill the remainder - - size_t shiftCount = 0; - char *cptr = (char *) lptr; - for (i = 0; i < (length & 0x3); i++, cptr++, shiftCount += 8) - { - *cptr = (pattern & (0xff << shiftCount)) >> shiftCount; - } - } - - // Write in the prefix/postfix bytes - - long *pre = (long *) allocUnit->actualAddress; - long *post = (long *) ((char *)allocUnit->actualAddress + allocUnit->actualSize - paddingSize * sizeof(long)); - for (size_t i = 0; i < paddingSize; i++, pre++, post++) - { - *pre = prefixPattern; - *post = postfixPattern; - } -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static void resetGlobals() -{ - sourceFile = "??"; - sourceLine = 0; - sourceFunc = "??"; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static void dumpAllocations(FILE *fp) -{ - fprintf(fp, "Alloc. Addr Size Addr Size BreakOn BreakOn " LINE_END); - fprintf(fp, "Number Reported Reported Actual Actual Unused Method Dealloc Realloc Allocated by " LINE_END); - fprintf(fp, "------ ---------- ---------- ---------- ---------- ---------- -------- ------- ------- --------------------------------------------------- " LINE_END); - - - for (size_t i = 0; i < hashSize; i++) - { - sAllocUnit *ptr = hashTable[i]; - while(ptr) - { - fprintf(fp, "%06zu 0x%08zX 0x%08zX 0x%08zX 0x%08zX 0x%08zX %-8s %c %c %s" LINE_END, - ptr->allocationNumber, - (size_t) ptr->reportedAddress, - (size_t) ptr->reportedSize, - (size_t) ptr->actualAddress, - (size_t) ptr->actualSize, - m_calcUnused(ptr), - allocationTypes[ptr->allocationType], - ptr->breakOnDealloc ? 'Y':'N', - ptr->breakOnRealloc ? 'Y':'N', - ownerString(ptr->sourceFile, ptr->sourceLine, ptr->sourceFunc)); - ptr = ptr->next; - } - } -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -static void dumpLeakReport() -{ - // Open the report file - - FILE *fp = fopen("memleaks.log", "w+b"); - - // If you hit this assert, then the memory report generator is unable to log information to a file (can't open the file for - // some reason.) - m_assert(fp); - if (!fp) return; - - // Any leaks? - - // Header - - static char timeString[25]; - memset(timeString, 0, sizeof(timeString)); - time_t t = time(NULL); - struct tm *tme = localtime(&t); - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, "| Memory leak report for: %02d/%02d/%04d %02d:%02d:%02d |" LINE_END, tme->tm_mon + 1, tme->tm_mday, tme->tm_year + 1900, tme->tm_hour, tme->tm_min, tme->tm_sec); - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, LINE_END); - fprintf(fp, LINE_END); - if (stats.totalAllocUnitCount) - { - fprintf(fp, "%zu memory leak%s found:" LINE_END, stats.totalAllocUnitCount, stats.totalAllocUnitCount == 1 ? "":"s"); - } - else - { - fprintf(fp, "Congratulations! No memory leaks found!" LINE_END); - - // We can finally free up our own memory allocations - - if (reservoirBuffer) - { - for (size_t i = 0; i < reservoirBufferSize; i++) - { - free(reservoirBuffer[i]); - } - free(reservoirBuffer); - reservoirBuffer = 0; - reservoirBufferSize = 0; - reservoir = NULL; - } - } - fprintf(fp, LINE_END); - - if (stats.totalAllocUnitCount) - { - dumpAllocations(fp); - } - - fclose(fp); -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// We use a static class to let us know when we're in the midst of static deinitialization -// --------------------------------------------------------------------------------------------------------------------------------- - -class MemStaticTimeTracker -{ -public: - // Don't do the leak report in the destructor, since other static variables might - // be deallocated after this one. - MemStaticTimeTracker() { doCleanupLogOnFirstRun(); } - ~MemStaticTimeTracker() { staticDeinitTime = true; dumpLeakReport(); } -}; -static MemStaticTimeTracker mstt; - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- Flags & options -- Call these routines to enable/disable the following options -// --------------------------------------------------------------------------------------------------------------------------------- - -bool &m_alwaysValidateAll() -{ - // Force a validation of all allocation units each time we enter this software - return alwaysValidateAll; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -bool &m_alwaysLogAll() -{ - // Force a log of every allocation & deallocation into memory.log - return alwaysLogAll; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -bool &m_alwaysWipeAll() -{ - // Force this software to always wipe memory with a pattern when it is being allocated/dallocated - return alwaysWipeAll; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -bool &m_randomeWipe() -{ - // Force this software to use a random pattern when wiping memory -- good for stress testing - return randomWipe; -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- Simply call this routine with the address of an allocated block of RAM, to cause it to force a breakpoint when it is -// reallocated. -// --------------------------------------------------------------------------------------------------------------------------------- - -bool &m_breakOnRealloc(void *reportedAddress) -{ - // Locate the existing allocation unit - - sAllocUnit *au = findAllocUnit(reportedAddress); - - // If you hit this assert, you tried to set a breakpoint on reallocation for an address that doesn't exist. Interrogate the - // stack frame or the variable 'au' to see which allocation this is. - m_assert(au != NULL); - - // If you hit this assert, you tried to set a breakpoint on reallocation for an address that wasn't allocated in a way that - // is compatible with reallocation. - m_assert(au->allocationType == m_alloc_malloc || - au->allocationType == m_alloc_calloc || - au->allocationType == m_alloc_realloc); - - return au->breakOnRealloc; -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- Simply call this routine with the address of an allocated block of RAM, to cause it to force a breakpoint when it is -// deallocated. -// --------------------------------------------------------------------------------------------------------------------------------- - -bool &m_breakOnDealloc(void *reportedAddress) -{ - // Locate the existing allocation unit - - sAllocUnit *au = findAllocUnit(reportedAddress); - - // If you hit this assert, you tried to set a breakpoint on deallocation for an address that doesn't exist. Interrogate the - // stack frame or the variable 'au' to see which allocation this is. - m_assert(au != NULL); - - return au->breakOnDealloc; -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- When tracking down a difficult bug, use this routine to force a breakpoint on a specific allocation count -// --------------------------------------------------------------------------------------------------------------------------------- - -void m_breakOnAllocation(size_t count) -{ - breakOnAllocationCount = count; -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// Used by the macros -// --------------------------------------------------------------------------------------------------------------------------------- - -void m_setOwner(const char *file, const size_t line, const char *func) -{ - sourceFile = file; - sourceLine = line; - sourceFunc = func; -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// Global new/new[] -// -// These are the standard new/new[] operators. They are merely interface functions that operate like normal new/new[], but use our -// memory tracking routines. -// --------------------------------------------------------------------------------------------------------------------------------- - -void *operator new(size_t reportedSize) throw(std::bad_alloc) -{ -#ifdef TEST_MEMORY_MANAGER - log("ENTER: new with size = %u",(size_t) reportedSize); -#endif - - // ANSI says: allocation requests of 0 bytes will still return a valid value - - if (reportedSize == 0) reportedSize = 1; - - // ANSI says: loop continuously because the error handler could possibly free up some memory - - for(;;) - { - // Try the allocation - - void *ptr = m_allocator(sourceFile, sourceLine, sourceFunc, m_alloc_new, reportedSize); - if (ptr) - { -#ifdef TEST_MEMORY_MANAGER - log("EXIT : new with ptr = %p",ptr); -#endif - return ptr; - } - - // There isn't a way to determine the new handler, except through setting it. So we'll just set it to NULL, then - // set it back again. - - std::new_handler nh = std::set_new_handler(0); - std::set_new_handler(nh); - - // If there is an error handler, call it - - if (nh) - { - (*nh)(); - } - - // Otherwise, throw the exception - - else - { -#ifdef TEST_MEMORY_MANAGER - log("EXIT : new with bad_alloc"); -#endif - throw std::bad_alloc(); - } - } -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -void *operator new[](size_t reportedSize) throw(std::bad_alloc) -{ -#ifdef TEST_MEMORY_MANAGER - log("ENTER: new[] with size = %u",(size_t) reportedSize); -#endif - - // The ANSI standard says that allocation requests of 0 bytes will still return a valid value - - if (reportedSize == 0) reportedSize = 1; - - // ANSI says: loop continuously because the error handler could possibly free up some memory - - for(;;) - { - // Try the allocation - - void *ptr = m_allocator(sourceFile, sourceLine, sourceFunc, m_alloc_new_array, reportedSize); - if (ptr) - { -#ifdef TEST_MEMORY_MANAGER - log("EXIT : new[] with ptr = %p",ptr); -#endif - return ptr; - } - - // There isn't a way to determine the new handler, except through setting it. So we'll just set it to NULL, then - // set it back again. - - std::new_handler nh = std::set_new_handler(0); - std::set_new_handler(nh); - - // If there is an error handler, call it - - if (nh) - { - (*nh)(); - } - - // Otherwise, throw the exception - - else - { -#ifdef TEST_MEMORY_MANAGER - log("EXIT : new[] with bad_alloc"); -#endif - throw std::bad_alloc(); - } - } -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// Other global new/new[] -// -// These are the standard new/new[] operators as used by Microsoft's memory tracker. We don't want them interfering with our memory -// tracking efforts. Like the previous versions, these are merely interface functions that operate like normal new/new[], but use -// our memory tracking routines. -// --------------------------------------------------------------------------------------------------------------------------------- - -void *operator new(size_t reportedSize, const char *sourceFile, int sourceLine) throw(std::bad_alloc) -{ -#ifdef TEST_MEMORY_MANAGER - log("ENTER: new with size = %u, file,line = %s, %d", - (size_t) reportedSize, sourceFile, sourceLine); -#endif - - // The ANSI standard says that allocation requests of 0 bytes will still return a valid value - - if (reportedSize == 0) reportedSize = 1; - - // ANSI says: loop continuously because the error handler could possibly free up some memory - - for(;;) - { - // Try the allocation - - void *ptr = m_allocator(sourceFile, sourceLine, "??", m_alloc_new, reportedSize); - if (ptr) - { -#ifdef TEST_MEMORY_MANAGER - log("EXIT : new with ptr = %p",ptr); -#endif - return ptr; - } - - // There isn't a way to determine the new handler, except through setting it. So we'll just set it to NULL, then - // set it back again. - - std::new_handler nh = std::set_new_handler(0); - std::set_new_handler(nh); - - // If there is an error handler, call it - - if (nh) - { - (*nh)(); - } - - // Otherwise, throw the exception - - else - { -#ifdef TEST_MEMORY_MANAGER - log("EXIT : new with bad_alloc"); -#endif - throw std::bad_alloc(); - } - } -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -void *operator new[](size_t reportedSize, const char *sourceFile, int sourceLine) throw(std::bad_alloc) -{ -#ifdef TEST_MEMORY_MANAGER - log("ENTER: new[] with size = %u, file,line = %s, %d", - (size_t) reportedSize, sourceFile, sourceLine); -#endif - - // The ANSI standard says that allocation requests of 0 bytes will still return a valid value - - if (reportedSize == 0) reportedSize = 1; - - // ANSI says: loop continuously because the error handler could possibly free up some memory - - for(;;) - { - // Try the allocation - - void *ptr = m_allocator(sourceFile, sourceLine, "??", m_alloc_new_array, reportedSize); - if (ptr) - { -#ifdef TEST_MEMORY_MANAGER - log("EXIT : new[] with ptr = %p",ptr); -#endif - return ptr; - } - - // There isn't a way to determine the new handler, except through setting it. So we'll just set it to NULL, then - // set it back again. - - std::new_handler nh = std::set_new_handler(0); - std::set_new_handler(nh); - - // If there is an error handler, call it - - if (nh) - { - (*nh)(); - } - - // Otherwise, throw the exception - - else - { -#ifdef TEST_MEMORY_MANAGER - log("EXIT : new[] with bad_alloc"); -#endif - throw std::bad_alloc(); - } - } -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// Global delete/delete[] -// -// These are the standard delete/delete[] operators. They are merely interface functions that operate like normal delete/delete[], -// but use our memory tracking routines. -// --------------------------------------------------------------------------------------------------------------------------------- - -void operator delete(void *reportedAddress) throw() -{ -#ifdef TEST_MEMORY_MANAGER - log("ENTER: delete for %p",reportedAddress); -#endif - - // ANSI says: delete & delete[] allow NULL pointers (they do nothing) - - if (!reportedAddress) return; - - m_deallocator(sourceFile, sourceLine, sourceFunc, m_alloc_delete, reportedAddress); - -#ifdef TEST_MEMORY_MANAGER - log("EXIT : delete for %p",reportedAddress); -#endif -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -void operator delete[](void *reportedAddress) throw() -{ -#ifdef TEST_MEMORY_MANAGER - log("ENTER: delete[] for %p",reportedAddress); -#endif - - // ANSI says: delete & delete[] allow NULL pointers (they do nothing) - - if (!reportedAddress) return; - - m_deallocator(sourceFile, sourceLine, sourceFunc, m_alloc_delete_array, reportedAddress); - -#ifdef TEST_MEMORY_MANAGER - log("EXIT : delete[] for %p",reportedAddress); -#endif -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// Allocate memory and track it -// --------------------------------------------------------------------------------------------------------------------------------- - -void *m_allocator(const char *sourceFile, const size_t sourceLine, const char *sourceFunc, const size_t allocationType, const size_t reportedSize) -{ - try - { -#ifdef TEST_MEMORY_MANAGER - log("ENTER: m_allocator()"); -#endif - - // Increase our allocation count - - currentAllocationCount++; - - // Log the request - - if (alwaysLogAll) log("%05d %-40s %8s : %s", currentAllocationCount, ownerString(sourceFile, sourceLine, sourceFunc), allocationTypes[allocationType], memorySizeString(reportedSize)); - - // If you hit this assert, you requested a breakpoint on a specific allocation count - m_assert(currentAllocationCount != breakOnAllocationCount); - - // If necessary, grow the reservoir of unused allocation units - - if (!reservoir) - { - // Allocate 256 reservoir elements - - reservoir = (sAllocUnit *) malloc(sizeof(sAllocUnit) * 256); - - // If you hit this assert, then the memory manager failed to allocate internal memory for tracking the - // allocations - m_assert(reservoir != NULL); - - // Danger Will Robinson! - - if (reservoir == NULL) throw "Unable to allocate RAM for internal memory tracking data"; - - // Build a linked-list of the elements in our reservoir - - memset(reservoir, 0, sizeof(sAllocUnit) * 256); - for (size_t i = 0; i < 256 - 1; i++) - { - reservoir[i].next = &reservoir[i+1]; - } - - // Add this address to our reservoirBuffer so we can free it later - - sAllocUnit **temp = (sAllocUnit **) realloc(reservoirBuffer, (reservoirBufferSize + 1) * sizeof(sAllocUnit *)); - m_assert(temp); - if (temp) - { - reservoirBuffer = temp; - reservoirBuffer[reservoirBufferSize++] = reservoir; - } - } - - // Logical flow says this should never happen... - m_assert(reservoir != NULL); - - // Grab a new allocaton unit from the front of the reservoir - - sAllocUnit *au = reservoir; - reservoir = au->next; - - // Populate it with some real data - - memset(au, 0, sizeof(sAllocUnit)); - au->actualSize = calculateActualSize(reportedSize); -#ifdef RANDOM_FAILURE - double a = rand(); - double b = RAND_MAX / 100.0 * RANDOM_FAILURE; - if (a > b) - { - au->actualAddress = malloc(au->actualSize); - } - else - { - log("!Random faiure!"); - au->actualAddress = NULL; - } -#else - au->actualAddress = malloc(au->actualSize); -#endif - au->reportedSize = reportedSize; - au->reportedAddress = calculateReportedAddress(au->actualAddress); - au->allocationType = allocationType; - au->sourceLine = sourceLine; - au->allocationNumber = currentAllocationCount; - if (sourceFile) strncpy(au->sourceFile, sourceFileStripper(sourceFile), sizeof(au->sourceFile) - 1); - else strcpy (au->sourceFile, "??"); - if (sourceFunc) strncpy(au->sourceFunc, sourceFunc, sizeof(au->sourceFunc) - 1); - else strcpy (au->sourceFunc, "??"); - - // We don't want to assert with random failures, because we want the application to deal with them. - -#ifndef RANDOM_FAILURE - // If you hit this assert, then the requested allocation simply failed (you're out of memory.) Interrogate the - // variable 'au' or the stack frame to see what you were trying to do. - m_assert(au->actualAddress != NULL); -#endif - - if (au->actualAddress == NULL) - { - throw "Request for allocation failed. Out of memory."; - } - - // If you hit this assert, then this allocation was made from a source that isn't setup to use this memory tracking - // software, use the stack frame to locate the source and include our H file. - m_assert(allocationType != m_alloc_unknown); - - // Insert the new allocation into the hash table - - size_t hashIndex = ((size_t) au->reportedAddress >> 4) & (hashSize - 1); - if (hashTable[hashIndex]) hashTable[hashIndex]->prev = au; - au->next = hashTable[hashIndex]; - au->prev = NULL; - hashTable[hashIndex] = au; - - // Account for the new allocatin unit in our stats - - stats.totalReportedMemory += au->reportedSize; - stats.totalActualMemory += au->actualSize; - stats.totalAllocUnitCount++; - if (stats.totalReportedMemory > stats.peakReportedMemory) stats.peakReportedMemory = stats.totalReportedMemory; - if (stats.totalActualMemory > stats.peakActualMemory) stats.peakActualMemory = stats.totalActualMemory; - if (stats.totalAllocUnitCount > stats.peakAllocUnitCount) stats.peakAllocUnitCount = stats.totalAllocUnitCount; - stats.accumulatedReportedMemory += au->reportedSize; - stats.accumulatedActualMemory += au->actualSize; - stats.accumulatedAllocUnitCount++; - - // Prepare the allocation unit for use (wipe it with recognizable garbage) - - wipeWithPattern(au, unusedPattern); - - // calloc() expects the reported memory address range to be filled with 0's - - if (allocationType == m_alloc_calloc) - { - memset(au->reportedAddress, 0, au->reportedSize); - } - - // Validate every single allocated unit in memory - - if (alwaysValidateAll) m_validateAllAllocUnits(); - - // Log the result - - if (alwaysLogAll) log(" OK: %010p (hash: %d)", au->reportedAddress, hashIndex); - - // Resetting the globals insures that if at some later time, somebody calls our memory manager from an unknown - // source (i.e. they didn't include our H file) then we won't think it was the last allocation. - - resetGlobals(); - - // Return the (reported) address of the new allocation unit - -#ifdef TEST_MEMORY_MANAGER - log("EXIT : m_allocator()"); -#endif - - return au->reportedAddress; - } - catch(const char *err) - { - // Deal with the errors - - log(err); - resetGlobals(); - -#ifdef TEST_MEMORY_MANAGER - log("EXIT : m_allocator()"); -#endif - - return NULL; - } -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// Reallocate memory and track it -// --------------------------------------------------------------------------------------------------------------------------------- - -void *m_reallocator(const char *sourceFile, const size_t sourceLine, const char *sourceFunc, const size_t reallocationType, const size_t reportedSize, void *reportedAddress) -{ - try - { -#ifdef TEST_MEMORY_MANAGER - log("ENTER: m_reallocator()"); -#endif - - // Calling realloc with a NULL should force same operations as a malloc - - if (!reportedAddress) - { - return m_allocator(sourceFile, sourceLine, sourceFunc, reallocationType, reportedSize); - } - - // Increase our allocation count - - currentAllocationCount++; - - // If you hit this assert, you requested a breakpoint on a specific allocation count - m_assert(currentAllocationCount != breakOnAllocationCount); - - // Log the request - - if (alwaysLogAll) log("%05d %-40s %8s(%010p): %s", currentAllocationCount, ownerString(sourceFile, sourceLine, sourceFunc), allocationTypes[reallocationType], reportedAddress, memorySizeString(reportedSize)); - - // Locate the existing allocation unit - - sAllocUnit *au = findAllocUnit(reportedAddress); - - // If you hit this assert, you tried to reallocate RAM that wasn't allocated by this memory manager. - m_assert(au != NULL); - if (au == NULL) throw "Request to reallocate RAM that was never allocated"; - - // If you hit this assert, then the allocation unit that is about to be reallocated is damaged. But you probably - // already know that from a previous assert you should have seen in validateAllocUnit() :) - m_assert(m_validateAllocUnit(au)); - - // If you hit this assert, then this reallocation was made from a source that isn't setup to use this memory - // tracking software, use the stack frame to locate the source and include our H file. - m_assert(reallocationType != m_alloc_unknown); - - // If you hit this assert, you were trying to reallocate RAM that was not allocated in a way that is compatible with - // realloc. In other words, you have a allocation/reallocation mismatch. - m_assert(au->allocationType == m_alloc_malloc || - au->allocationType == m_alloc_calloc || - au->allocationType == m_alloc_realloc); - - // If you hit this assert, then the "break on realloc" flag for this allocation unit is set (and will continue to be - // set until you specifically shut it off. Interrogate the 'au' variable to determine information about this - // allocation unit. - m_assert(au->breakOnRealloc == false); - - // Keep track of the original size - - size_t originalReportedSize = au->reportedSize; - - // Do the reallocation - - void *oldReportedAddress = reportedAddress; - size_t newActualSize = calculateActualSize(reportedSize); - void *newActualAddress = NULL; -#ifdef RANDOM_FAILURE - double a = rand(); - double b = RAND_MAX / 100.0 * RANDOM_FAILURE; - if (a > b) - { - newActualAddress = realloc(au->actualAddress, newActualSize); - } - else - { - log("!Random faiure!"); - } -#else - newActualAddress = realloc(au->actualAddress, newActualSize); -#endif - - // We don't want to assert with random failures, because we want the application to deal with them. - -#ifndef RANDOM_FAILURE - // If you hit this assert, then the requested allocation simply failed (you're out of memory) Interrogate the - // variable 'au' to see the original allocation. You can also query 'newActualSize' to see the amount of memory - // trying to be allocated. Finally, you can query 'reportedSize' to see how much memory was requested by the caller. - m_assert(newActualAddress); -#endif - - if (!newActualAddress) throw "Request for reallocation failed. Out of memory."; - - // Remove this allocation from our stats (we'll add the new reallocation again later) - - stats.totalReportedMemory -= au->reportedSize; - stats.totalActualMemory -= au->actualSize; - - // Update the allocation with the new information - - au->actualSize = newActualSize; - au->actualAddress = newActualAddress; - au->reportedSize = calculateReportedSize(newActualSize); - au->reportedAddress = calculateReportedAddress(newActualAddress); - au->allocationType = reallocationType; - au->sourceLine = sourceLine; - au->allocationNumber = currentAllocationCount; - if (sourceFile) strncpy(au->sourceFile, sourceFileStripper(sourceFile), sizeof(au->sourceFile) - 1); - else strcpy (au->sourceFile, "??"); - if (sourceFunc) strncpy(au->sourceFunc, sourceFunc, sizeof(au->sourceFunc) - 1); - else strcpy (au->sourceFunc, "??"); - - // The reallocation may cause the address to change, so we should relocate our allocation unit within the hash table - - size_t hashIndex = (size_t) -1; - if (oldReportedAddress != au->reportedAddress) - { - // Remove this allocation unit from the hash table - - { - size_t hashIndex2 = ((size_t) oldReportedAddress >> 4) & (hashSize - 1); - if (hashTable[hashIndex2] == au) - { - hashTable[hashIndex2] = hashTable[hashIndex2]->next; - } - else - { - if (au->prev) au->prev->next = au->next; - if (au->next) au->next->prev = au->prev; - } - } - - // Re-insert it back into the hash table - - hashIndex = ((size_t) au->reportedAddress >> 4) & (hashSize - 1); - if (hashTable[hashIndex]) hashTable[hashIndex]->prev = au; - au->next = hashTable[hashIndex]; - au->prev = NULL; - hashTable[hashIndex] = au; - } - - // Account for the new allocatin unit in our stats - - stats.totalReportedMemory += au->reportedSize; - stats.totalActualMemory += au->actualSize; - if (stats.totalReportedMemory > stats.peakReportedMemory) stats.peakReportedMemory = stats.totalReportedMemory; - if (stats.totalActualMemory > stats.peakActualMemory) stats.peakActualMemory = stats.totalActualMemory; - int deltaReportedSize = reportedSize - originalReportedSize; - if (deltaReportedSize > 0) - { - stats.accumulatedReportedMemory += deltaReportedSize; - stats.accumulatedActualMemory += deltaReportedSize; - } - - // Prepare the allocation unit for use (wipe it with recognizable garbage) - - wipeWithPattern(au, unusedPattern, originalReportedSize); - - // If you hit this assert, then something went wrong, because the allocation unit was properly validated PRIOR to - // the reallocation. This should not happen. - m_assert(m_validateAllocUnit(au)); - - // Validate every single allocated unit in memory - - if (alwaysValidateAll) m_validateAllAllocUnits(); - - // Log the result - - if (alwaysLogAll) log(" OK: %010p (hash: %d)", au->reportedAddress, hashIndex); - - // Resetting the globals insures that if at some later time, somebody calls our memory manager from an unknown - // source (i.e. they didn't include our H file) then we won't think it was the last allocation. - - resetGlobals(); - - // Return the (reported) address of the new allocation unit - -#ifdef TEST_MEMORY_MANAGER - log("EXIT : m_reallocator()"); -#endif - - return au->reportedAddress; - } - catch(const char *err) - { - // Deal with the errors - - log(err); - resetGlobals(); - -#ifdef TEST_MEMORY_MANAGER - log("EXIT : m_reallocator()"); -#endif - - return NULL; - } -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// Deallocate memory and track it -// --------------------------------------------------------------------------------------------------------------------------------- - -void m_deallocator(const char *sourceFile, const size_t sourceLine, const char *sourceFunc, const size_t deallocationType, const void *reportedAddress) -{ - try - { -#ifdef TEST_MEMORY_MANAGER - log("ENTER: m_deallocator()"); -#endif - - // Log the request - - if (alwaysLogAll) log(" %-40s %8s(%010p)", ownerString(sourceFile, sourceLine, sourceFunc), allocationTypes[deallocationType], reportedAddress); - - // Go get the allocation unit - - sAllocUnit *au = findAllocUnit(reportedAddress); - - // If you hit this assert, you tried to deallocate RAM that wasn't allocated by this memory manager. - m_assert(au != NULL); - if (au == NULL) throw "Request to deallocate RAM that was never allocated"; - - // If you hit this assert, then the allocation unit that is about to be deallocated is damaged. But you probably - // already know that from a previous assert you should have seen in validateAllocUnit() :) - m_assert(m_validateAllocUnit(au)); - - // If you hit this assert, then this deallocation was made from a source that isn't setup to use this memory - // tracking software, use the stack frame to locate the source and include our H file. - m_assert(deallocationType != m_alloc_unknown); - - // If you hit this assert, you were trying to deallocate RAM that was not allocated in a way that is compatible with - // the deallocation method requested. In other words, you have a allocation/deallocation mismatch. - if (au->allocationType == m_alloc_new && !(deallocationType == m_alloc_delete)) { - std::cout<<"alloc == new, but dealloc != delete for "<actualAddress)<allocationType == m_alloc_new_array && !(deallocationType == m_alloc_delete_array)) { - std::cout<<"alloc == new[], but dealloc != delete[] for "<actualAddress)<allocationType == m_alloc_new ) || - (deallocationType == m_alloc_delete_array && au->allocationType == m_alloc_new_array) || - (deallocationType == m_alloc_free && au->allocationType == m_alloc_malloc ) || - (deallocationType == m_alloc_free && au->allocationType == m_alloc_calloc ) || - (deallocationType == m_alloc_free && au->allocationType == m_alloc_realloc ) || - (deallocationType == m_alloc_unknown ) ); - - // If you hit this assert, then the "break on dealloc" flag for this allocation unit is set. Interrogate the 'au' - // variable to determine information about this allocation unit. - m_assert(au->breakOnDealloc == false); - - // Wipe the deallocated RAM with a new pattern. This doen't actually do us much good in debug mode under WIN32, - // because Microsoft's memory debugging & tracking utilities will wipe it right after we do. Oh well. - - wipeWithPattern(au, releasedPattern); - - // Do the deallocation - - free(au->actualAddress); - - // Remove this allocation unit from the hash table - - size_t hashIndex = ((size_t) au->reportedAddress >> 4) & (hashSize - 1); - if (hashTable[hashIndex] == au) - { - hashTable[hashIndex] = au->next; - } - else - { - if (au->prev) au->prev->next = au->next; - if (au->next) au->next->prev = au->prev; - } - - // Remove this allocation from our stats - - stats.totalReportedMemory -= au->reportedSize; - stats.totalActualMemory -= au->actualSize; - stats.totalAllocUnitCount--; - - // Add this allocation unit to the front of our reservoir of unused allocation units - - memset(au, 0, sizeof(sAllocUnit)); - au->next = reservoir; - reservoir = au; - - // Resetting the globals insures that if at some later time, somebody calls our memory manager from an unknown - // source (i.e. they didn't include our H file) then we won't think it was the last allocation. - - resetGlobals(); - - // Validate every single allocated unit in memory - - if (alwaysValidateAll) m_validateAllAllocUnits(); - - // If we're in the midst of static deinitialization time, track any pending memory leaks - - if (staticDeinitTime) dumpLeakReport(); - } - catch(const char *err) - { - // Deal with errors - - log(err); - resetGlobals(); - } - -#ifdef TEST_MEMORY_MANAGER - log("EXIT : m_deallocator()"); -#endif -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- The following utilitarian allow you to become proactive in tracking your own memory, or help you narrow in on those tough -// bugs. -// --------------------------------------------------------------------------------------------------------------------------------- - -bool m_validateAddress(const void *reportedAddress) -{ - // Just see if the address exists in our allocation routines - - return findAllocUnit(reportedAddress) != NULL; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -bool m_validateAllocUnit(const sAllocUnit *allocUnit) -{ - // Make sure the padding is untouched - - long *pre = (long *) allocUnit->actualAddress; - long *post = (long *) ((char *)allocUnit->actualAddress + allocUnit->actualSize - paddingSize * sizeof(long)); - bool errorFlag = false; - for (size_t i = 0; i < paddingSize; i++, pre++, post++) - { - if (*pre != (long) prefixPattern) - { - log("A memory allocation unit was corrupt because of an underrun:"); - m_dumpAllocUnit(allocUnit, " "); - errorFlag = true; - log("prefix is:"); - long *pre2 = (long *) allocUnit->actualAddress; - for (size_t i2=0; i2actualAddress + allocUnit->actualSize - paddingSize * sizeof(long)); - for (size_t i2=0; i2next; - } - } - - // Test for hash-table correctness - - if (allocCount != stats.totalAllocUnitCount) - { - log("Memory tracking hash table corrupt!"); - errors++; - } - - // If you hit this assert, then the internal memory (hash table) used by this memory tracking software is damaged! The - // best way to track this down is to use the alwaysLogAll flag in conjunction with STRESS_TEST macro to narrow in on the - // offending code. After running the application with these settings (and hitting this assert again), interrogate the - // memory.log file to find the previous successful operation. The corruption will have occurred between that point and this - // assertion. - m_assert(allocCount == stats.totalAllocUnitCount); - - // If you hit this assert, then you've probably already been notified that there was a problem with a allocation unit in a - // prior call to validateAllocUnit(), but this assert is here just to make sure you know about it. :) - m_assert(errors == 0); - - // Log any errors - - if (errors) log("While validting all allocation units, %d allocation unit(s) were found to have problems", errors); - - // Return the error status - - return errors != 0; -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- Unused RAM calculation routines. Use these to determine how much of your RAM is unused (in bytes) -// --------------------------------------------------------------------------------------------------------------------------------- - -size_t m_calcUnused(const sAllocUnit *allocUnit) -{ - const size_t *ptr = (const size_t *) allocUnit->reportedAddress; - size_t count = 0; - - for (size_t i = 0; i < allocUnit->reportedSize; i += sizeof(long), ptr++) - { - if (*ptr == unusedPattern) count += sizeof(long); - } - - return count; -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -size_t m_calcAllUnused() -{ - // Just go through each allocation unit in the hash table and count the unused RAM - - size_t total = 0; - for (size_t i = 0; i < hashSize; i++) - { - sAllocUnit *ptr = hashTable[i]; - while(ptr) - { - total += m_calcUnused(ptr); - ptr = ptr->next; - } - } - - return total; -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// -DOC- The following functions are for logging and statistics reporting. -// --------------------------------------------------------------------------------------------------------------------------------- - -void m_extra_log(const char *text) -{ - log("%s\n",text); -} - -void m_dumpAllocUnit(const sAllocUnit *allocUnit, const char *prefix) -{ - log("%sAddress (reported): %010p", prefix, allocUnit->reportedAddress); - log("%sAddress (actual) : %010p", prefix, allocUnit->actualAddress); - log("%sSize (reported) : 0x%08X (%s)", prefix, allocUnit->reportedSize, memorySizeString(allocUnit->reportedSize)); - log("%sSize (actual) : 0x%08X (%s)", prefix, allocUnit->actualSize, memorySizeString(allocUnit->actualSize)); - log("%sOwner : %s(%d)::%s", prefix, allocUnit->sourceFile, allocUnit->sourceLine, allocUnit->sourceFunc); - log("%sAllocation type : %s", prefix, allocationTypes[allocUnit->allocationType]); - log("%sAllocation number : %d", prefix, allocUnit->allocationNumber); -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -void m_dumpMemoryReport(const char *filename, const bool overwrite) -{ - // Open the report file - - FILE *fp = NULL; - - if (overwrite) fp = fopen(filename, "w+b"); - else fp = fopen(filename, "ab"); - - // If you hit this assert, then the memory report generator is unable to log information to a file (can't open the file for - // some reason.) - m_assert(fp); - if (!fp) return; - - // Header - - static char timeString[25]; - memset(timeString, 0, sizeof(timeString)); - time_t t = time(NULL); - struct tm *tme = localtime(&t); - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, "| Memory report for: %02d/%02d/%04d %02d:%02d:%02d |" LINE_END, tme->tm_mon + 1, tme->tm_mday, tme->tm_year + 1900, tme->tm_hour, tme->tm_min, tme->tm_sec); - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, LINE_END); - fprintf(fp, LINE_END); - - // Report summary - - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, "| T O T A L S |" LINE_END); - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, " Allocation unit count: %10s" LINE_END, insertCommas(stats.totalAllocUnitCount)); - fprintf(fp, " Reported to application: %s" LINE_END, memorySizeString(stats.totalReportedMemory)); - fprintf(fp, " Actual total memory in use: %s" LINE_END, memorySizeString(stats.totalActualMemory)); - fprintf(fp, " Memory tracking overhead: %s" LINE_END, memorySizeString(stats.totalActualMemory - stats.totalReportedMemory)); - fprintf(fp, LINE_END); - - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, "| P E A K S |" LINE_END); - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, " Allocation unit count: %10s" LINE_END, insertCommas(stats.peakAllocUnitCount)); - fprintf(fp, " Reported to application: %s" LINE_END, memorySizeString(stats.peakReportedMemory)); - fprintf(fp, " Actual: %s" LINE_END, memorySizeString(stats.peakActualMemory)); - fprintf(fp, " Memory tracking overhead: %s" LINE_END, memorySizeString(stats.peakActualMemory - stats.peakReportedMemory)); - fprintf(fp, LINE_END); - - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, "| A C C U M U L A T E D |" LINE_END); - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, " Allocation unit count: %s" LINE_END, memorySizeString(stats.accumulatedAllocUnitCount)); - fprintf(fp, " Reported to application: %s" LINE_END, memorySizeString(stats.accumulatedReportedMemory)); - fprintf(fp, " Actual: %s" LINE_END, memorySizeString(stats.accumulatedActualMemory)); - fprintf(fp, LINE_END); - - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, "| U N U S E D |" LINE_END); - fprintf(fp, " ---------------------------------------------------------------------------------------------------------------------------------- " LINE_END); - fprintf(fp, " Memory allocated but not in use: %s" LINE_END, memorySizeString(m_calcAllUnused())); - fprintf(fp, LINE_END); - - dumpAllocations(fp); - - fclose(fp); -} - -// --------------------------------------------------------------------------------------------------------------------------------- - -sMStats m_getMemoryStatistics() -{ - return stats; -} - -// --------------------------------------------------------------------------------------------------------------------------------- -// mmgr.cpp - End of file -// --------------------------------------------------------------------------------------------------------------------------------- From 10960dcd6b1032697f6ad7df337a1679688dc346 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Wed, 3 Jan 2018 10:21:56 -0500 Subject: [PATCH 016/111] typo (#809-pybind11) --- pysrc/RealGalaxy.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pysrc/RealGalaxy.cpp b/pysrc/RealGalaxy.cpp index d73986b4437..f274864834d 100644 --- a/pysrc/RealGalaxy.cpp +++ b/pysrc/RealGalaxy.cpp @@ -17,7 +17,7 @@ * and/or other materials provided with the distribution. */ -#include "Pybind11Helper.h" +#include "PyBind11Helper.h" #include "RealGalaxy.h" namespace galsim { From ea38cdf0a30a616688b20872bf34c848d26c0dd9 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Wed, 3 Jan 2018 13:23:01 -0500 Subject: [PATCH 017/111] Make meta_data.py file using actual install_dir (#809-pybind11) --- setup.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/setup.py b/setup.py index f3d50fa22f1..d45704eeb39 100644 --- a/setup.py +++ b/setup.py @@ -116,6 +116,8 @@ def finalize_options(self): # Add any extra things based on the compiler being used.. def build_extensions(self): + print('Platform is ',self.plat_name) + # Figure out what compiler it will use cc = self.compiler.executables['compiler_cxx'][0] print('Using compiler %s'%(cc)) @@ -126,17 +128,45 @@ def build_extensions(self): print('Using compiler %s'%(cc)) else: print('Using compiler %s, which is %s'%(cc,comp_type)) + # Add the appropriate extra flags for that compiler. for e in self.extensions: e.extra_compile_args = copt[ comp_type ] #e.extra_link_args = lopt[ comp_type ] + # Now run the normal build function. build_ext.build_extensions(self) +def make_meta_data(install_dir): + print('install_dir = ',install_dir) + meta_data_file = os.path.join('galsim','meta_data.py') + share_dir = os.path.join(install_dir,'galsim','share') + try: + f = open(meta_data_file,'w') + except IOError: + # Not sure if this is still relevant in setup.py world, but if user ran this under + # sudo and now is not using sudo, then the file might exist, but not be writable. + # However, it should still be removable, since the directory should be owned + # by the user. So remove it and then retry opening it. + os.remove(meta_data_file) + f = open(meta_data_file,'w') + + f.write('# This file is automatically generated by setup.py when building GalSim.\n') + f.write('# Do not edit. Any edits will be lost the next time setpu.py is run.\n') + f.write('\n') + f.write('install_dir = "%s"\n'%install_dir) + f.write('share_dir = "%s"\n'%share_dir) + f.close() + # AFAICT, setuptools doesn't provide any easy access to the final installation location of the # executable scripts. This bit is just to save the value of script_dir so I can use it later. # cf. http://stackoverflow.com/questions/12975540/correct-way-to-find-scripts-directory-from-setup-py-in-python-distutils/ class my_easy_install( easy_install ): + def finalize_options(self): + easy_install.finalize_options(self) + # Make the meta_data.py file based on the actual installation directory. + make_meta_data(self.install_dir) + # Match the call signature of the easy_install version. def write_script(self, script_name, contents, mode="t", *ignored): # Run the normal version From 93b7b1f2c43808b063e08d3293dea068ab80da0b Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Wed, 3 Jan 2018 13:24:07 -0500 Subject: [PATCH 018/111] Switch to using entry_points rather than scripts for the executables (#809-pybind11) --- bin/galsim_json.py | 31 -------------- examples/check_des | 11 +++-- examples/check_json | 31 +++++++------- examples/check_yaml | 29 +++++++------ galsim/__init__.py | 1 + bin/galsim_yaml.py => galsim/__main__.py | 15 +------ .../download_cosmos.py | 21 ++-------- bin/galsim.py => galsim/main.py | 25 ++++------- setup.py | 41 ++++++++++--------- 9 files changed, 70 insertions(+), 135 deletions(-) delete mode 100644 bin/galsim_json.py rename bin/galsim_yaml.py => galsim/__main__.py (66%) rename bin/galsim_download_cosmos.py => galsim/download_cosmos.py (97%) rename bin/galsim.py => galsim/main.py (93%) diff --git a/bin/galsim_json.py b/bin/galsim_json.py deleted file mode 100644 index 5f546749f9a..00000000000 --- a/bin/galsim_json.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) 2012-2017 by the GalSim developers team on GitHub -# https://github.com/GalSim-developers -# -# This file is part of GalSim: The modular galaxy image simulation toolkit. -# https://github.com/GalSim-developers/GalSim -# -# GalSim is free software: redistribution and use in source and binary forms, -# with or without modification, are permitted provided that the following -# conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, this -# list of conditions, and the disclaimer given in the accompanying LICENSE -# file. -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions, and the disclaimer given in the documentation -# and/or other materials provided with the distribution. -# - -# For backwards compatibility. -# `galsim_json` is equivalent to `galsim -f json`, although if the config -# file has an extension that starts with `.j`, the `-f json` part is -# unnecessary. - -from __future__ import print_function - -import sys -import subprocess -print('Note: galsim_json has been deprecated. Use galsim instead.') -print('Running galsim -f json',' '.join(sys.argv[1:])) -print() -subprocess.call( ['galsim','-f','json'] + sys.argv[1:] ) diff --git a/examples/check_des b/examples/check_des index e9f4a1d8840..a49b990b00e 100755 --- a/examples/check_des +++ b/examples/check_des @@ -20,8 +20,7 @@ cd des -python=../../bin/installed_python # For python scripts -bin=../../bin # For galsim executable +python='/usr/bin/env python' # For python scripts if [ ! -d "output" ]; then mkdir output @@ -35,15 +34,15 @@ fi nfiles=1 time $python draw_psf.py last=$nfiles || exit -time $bin/galsim draw_psf.yaml output.nfiles=$nfiles || exit +time galsim draw_psf.yaml output.nfiles=$nfiles || exit # These don't have any check, but at least make sure they run to completion. -time $bin/galsim blend.yaml || exit -time $bin/galsim blendset.yaml || exit +time galsim blend.yaml || exit +time galsim blendset.yaml || exit # Using the real galaxies takes a long time, dominated by the pyfits I/O (which preload does # not help with). So use parametric for this test. -time $bin/galsim meds.yaml output.nfiles=$nfiles output.nobjects=1000 gal.items.0.gal_type=parametric || exit +time galsim meds.yaml output.nfiles=$nfiles output.nobjects=1000 gal.items.0.gal_type=parametric || exit echo 'Checking diffs:' diff --git a/examples/check_json b/examples/check_json index 8ceb12e6212..9b696b3e9cc 100755 --- a/examples/check_json +++ b/examples/check_json @@ -18,47 +18,46 @@ # and/or other materials provided with the distribution. # -python=../bin/installed_python # For python scripts -bin=../bin # For galsim executable +python='/usr/bin/env python' # For python scripts /bin/rm -rf output /bin/rm -rf output_json time $python demo1.py || exit -time $bin/galsim -v2 json/demo1.json || exit +time galsim -v2 json/demo1.json || exit time $python demo2.py || exit -time $bin/galsim -v2 json/demo2.json || exit +time galsim -v2 json/demo2.json || exit time $python demo3.py || exit -time $bin/galsim -v2 json/demo3.json || exit +time galsim -v2 json/demo3.json || exit time $python demo4.py || exit -time $bin/galsim -v2 json/demo4.json || exit +time galsim -v2 json/demo4.json || exit time $python demo5.py || exit -time $bin/galsim -v2 json/demo5.json || exit +time galsim -v2 json/demo5.json || exit time $python demo6.py || exit -time $bin/galsim -v2 json/demo6a.json || exit -time $bin/galsim -v2 json/demo6b.json || exit +time galsim -v2 json/demo6a.json || exit +time galsim -v2 json/demo6b.json || exit time $python demo7.py || exit -time $bin/galsim -v2 json/demo7.json || exit +time galsim -v2 json/demo7.json || exit time $python demo8.py || exit -time $bin/galsim -v2 json/demo8a.json || exit -time $bin/galsim -v2 json/demo8b.json || exit +time galsim -v2 json/demo8a.json || exit +time galsim -v2 json/demo8b.json || exit time $python demo9.py || exit -time $bin/galsim -v1 json/demo9.json output.skip='{"type":"List","items":[0,0,0,0,0,1]}' || exit -time $bin/galsim -v1 json/demo9.json output.noclobber=True || exit +time galsim -v1 json/demo9.json output.skip='{"type":"List","items":[0,0,0,0,0,1]}' || exit +time galsim -v1 json/demo9.json output.noclobber=True || exit time $python demo10.py || exit -time $bin/galsim -v2 json/demo10.json || exit +time galsim -v2 json/demo10.json || exit time $python demo11.py || exit -time $bin/galsim -v2 json/demo11.json || exit +time galsim -v2 json/demo11.json || exit echo 'Checking diffs: (No output means success)' diff --git a/examples/check_yaml b/examples/check_yaml index d0234113eb3..9b7ed5630f9 100755 --- a/examples/check_yaml +++ b/examples/check_yaml @@ -18,46 +18,45 @@ # and/or other materials provided with the distribution. # -python=../bin/installed_python # For python scripts -bin=../bin # For galsim executable +python='/usr/bin/env python' /bin/rm -rf output /bin/rm -rf output_yaml time $python demo1.py || exit -time $bin/galsim -v2 demo1.yaml || exit +time galsim -v2 demo1.yaml || exit time $python demo2.py || exit -time $bin/galsim -v2 demo2.yaml || exit +time galsim -v2 demo2.yaml || exit time $python demo3.py || exit -time $bin/galsim -v2 demo3.yaml || exit +time galsim -v2 demo3.yaml || exit time $python demo4.py || exit -time $bin/galsim -v2 demo4.yaml || exit +time galsim -v2 demo4.yaml || exit time $python demo5.py || exit -time $bin/galsim -v2 demo5.yaml || exit +time galsim -v2 demo5.yaml || exit time $python demo6.py || exit -time $bin/galsim -v2 demo6.yaml || exit +time galsim -v2 demo6.yaml || exit time $python demo7.py || exit -time $bin/galsim -v2 demo7.yaml || exit +time galsim -v2 demo7.yaml || exit time $python demo8.py || exit -time $bin/galsim -v2 demo8.yaml || exit +time galsim -v2 demo8.yaml || exit time $python demo9.py || exit -time $bin/galsim -v1 -n 3 -j 1 demo9.yaml || exit -time $bin/galsim -v1 -n 3 -j 2 demo9.yaml || exit -time $bin/galsim -v1 -n 3 -j 3 demo9.yaml || exit +time galsim -v1 -n 3 -j 1 demo9.yaml || exit +time galsim -v1 -n 3 -j 2 demo9.yaml || exit +time galsim -v1 -n 3 -j 3 demo9.yaml || exit time $python demo10.py || exit -time $bin/galsim -v2 demo10.yaml || exit +time galsim -v2 demo10.yaml || exit time $python demo11.py || exit -time $bin/galsim -v2 demo11.yaml || exit +time galsim -v2 demo11.yaml || exit echo 'Checking diffs: (No output means success)' diff --git a/galsim/__init__.py b/galsim/__init__.py index efa65a19922..ef07babda98 100644 --- a/galsim/__init__.py +++ b/galsim/__init__.py @@ -179,3 +179,4 @@ from . import cdmodel from . import utilities from . import fft +from . import download_cosmos diff --git a/bin/galsim_yaml.py b/galsim/__main__.py similarity index 66% rename from bin/galsim_yaml.py rename to galsim/__main__.py index 81087b6527d..2de141f6052 100644 --- a/bin/galsim_yaml.py +++ b/galsim/__main__.py @@ -16,16 +16,5 @@ # and/or other materials provided with the distribution. # - -# For backwards compatibility. -# `galsim_yaml` is equivalent to `galsim -f yaml`, although in most cases, -# the `-f yaml` part is unnecessary. - -from __future__ import print_function - -import sys -import subprocess -print('Note: galsim_yaml has been deprecated. Use galsim instead.') -print('Running galsim -f yaml',' '.join(sys.argv[1:])) -print() -subprocess.call( ['galsim','-f','yaml'] + sys.argv[1:] ) +from .main import main +main() diff --git a/bin/galsim_download_cosmos.py b/galsim/download_cosmos.py similarity index 97% rename from bin/galsim_download_cosmos.py rename to galsim/download_cosmos.py index 2916f962cef..b4cda9d04c0 100644 --- a/bin/galsim_download_cosmos.py +++ b/galsim/download_cosmos.py @@ -28,17 +28,6 @@ except: from urllib.request import urlopen -# Since this will be installed in the same directory as our galsim executable, -# we need to do the same trick about changing the path so it imports the real -# galsim module, not that executable. -temp = sys.path[0] -sys.path = sys.path[1:] -import galsim -sys.path = [temp] + sys.path - -script_name = os.path.basename(__file__) - - def parse_args(): """Handle the command line arguments using either argparse (if available) or optparse. """ @@ -372,6 +361,9 @@ def link_target(unpack_dir, link_dir, args, logger): logger.info("Made link to %s from %s", unpack_dir, link_dir) def main(): + from ._version import __version__ as version + from .meta_data import share_dir + args = parse_args() # Parse the integer verbosity level from the command line args into a logging_level string @@ -387,7 +379,7 @@ def main(): logger = logging.getLogger('galsim') # Give diagnostic about GalSim version - logger.debug("GalSim version: %s",galsim.__version__) + logger.debug("GalSim version: %s",version) logger.debug("This download script is: %s",__file__) logger.info("Type %s -h to see command line options.\n",script_name) @@ -399,7 +391,6 @@ def main(): # file_name is the name of the file to download, taken from the url. # target is the full path of the downloaded tarball - share_dir = galsim.meta_data.share_dir if args.dir is not None: target_dir = args.dir link = not args.nolink @@ -460,7 +451,3 @@ def main(): # Get the directory where this would normally have been unpacked. link_dir = os.path.join(share_dir, file_name)[:-len('.tar.gz')] link_target(unpack_dir, link_dir, args, logger) - - -if __name__ == "__main__": - main() diff --git a/bin/galsim.py b/galsim/main.py similarity index 93% rename from bin/galsim.py rename to galsim/main.py index 7bb34ac7eb0..209f6a34070 100644 --- a/bin/galsim.py +++ b/galsim/main.py @@ -27,22 +27,13 @@ import logging import pprint -# The only wrinkle about letting this executable be called galsim is that we want to -# make sure that `import galsim` doesn't import itself. We want it to import the real -# galsim module of course. So the solution is to get rid of the current directory -# from python's default search path -temp = sys.path[0] -sys.path = sys.path[1:] -import galsim -# Now put it back in case anyone else relies on this feature. -sys.path = [temp] + sys.path - def parse_args(): """Handle the command line arguments using either argparse (if available) or optparse. """ + from ._version import __version__ as version # Short description strings common to both parsing mechanisms - version_str = "GalSim Version %s"%galsim.version + version_str = "GalSim Version %s"%version description = "galsim: configuration file parser for %s. "%version_str description += "See https://github.com/GalSim-developers/GalSim/wiki/Config-Documentation " description += "for documentation about using this program." @@ -197,6 +188,8 @@ def AddModules(config, modules): config['modules'].extend(modules) def main(): + from .config import ReadConfig, Process + args = parse_args() if args.njobs < 1: @@ -227,7 +220,7 @@ def main(): logger = logging.getLogger('galsim') logger.warn('Using config file %s', args.config_file) - all_config = galsim.config.ReadConfig(args.config_file, args.file_type, logger) + all_config = ReadConfig(args.config_file, args.file_type, logger) logger.debug('Successfully read in config file.') # Process each config document @@ -251,8 +244,8 @@ def main(): logger.debug("Process config dict: \n%s", pprint.pformat(config)) # Process the configuration - galsim.config.Process(config, logger, njobs=args.njobs, job=args.job, new_params=new_params, - except_abort=args.except_abort) + Process(config, logger, njobs=args.njobs, job=args.job, new_params=new_params, + except_abort=args.except_abort) if args.profile: # cf. example code here: https://docs.python.org/2/library/profile.html @@ -268,7 +261,3 @@ def main(): ps = pstats.Stats(pr, stream=s).sort_stats(sortby).reverse_order() ps.print_stats() logger.error(s.getvalue()) - - -if __name__ == "__main__": - main() diff --git a/setup.py b/setup.py index d45704eeb39..975f218fa1f 100644 --- a/setup.py +++ b/setup.py @@ -198,25 +198,28 @@ def write_script(self, script_name, contents, mode="t", *ignored): print('GalSim version is %s'%(galsim_version)) dist = setup(name="GalSim", - version=galsim_version, - author="GalSim Developers (point of contact: Mike Jarvis)", - author_email="michael@jarvis.net", - description="The modular galaxy image simulation toolkit", - long_description=long_description, - license = "BSD License", - url="https://github.com/rmjarvis/GalSim", - download_url="https://github.com/GalSim-developers/GalSim/releases/tag/v%s.zip"%galsim_version, - packages=['galsim'], - include_package_data=True, - ext_modules=[ext], - setup_requires=build_dep, - install_requires=build_dep + run_dep, - cmdclass = {'build_ext': my_builder, - 'easy_install': my_easy_install, - }, - scripts=scripts, - zip_safe=False, - ) + version=galsim_version, + author="GalSim Developers (point of contact: Mike Jarvis)", + author_email="michael@jarvis.net", + description="The modular galaxy image simulation toolkit", + long_description=long_description, + license = "BSD License", + url="https://github.com/rmjarvis/GalSim", + download_url="https://github.com/GalSim-developers/GalSim/releases/tag/v%s.zip"%galsim_version, + packages=['galsim'], + include_package_data=True, + ext_modules=[ext], + setup_requires=build_dep, + install_requires=build_dep + run_dep, + cmdclass = {'build_ext': my_builder, + 'easy_install': my_easy_install, + }, + entry_points = {'console_scripts' : [ + 'galsim = galsim.__main__:main', + 'galsim_download_cosmos = galsim.download_cosmos:main' + ]}, + zip_safe=False, + ) # Check that the path includes the directory where the scripts are installed. real_env_path = [os.path.realpath(d) for d in os.environ['PATH'].split(':')] From 1205657936fdfd75fc2ea58d2782cba7c390d700 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Wed, 3 Jan 2018 17:39:59 -0500 Subject: [PATCH 019/111] Add conda_requirements for those so inclined (#809-pybind11) --- conda_requirements.txt | 9 +++++++++ requirements.txt | 18 +++++++++++------- setup.py | 4 +++- 3 files changed, 23 insertions(+), 8 deletions(-) create mode 100644 conda_requirements.txt diff --git a/conda_requirements.txt b/conda_requirements.txt new file mode 100644 index 00000000000..547f215c261 --- /dev/null +++ b/conda_requirements.txt @@ -0,0 +1,9 @@ +# The requirements that can be installed with conda install -c conda-forge + +gcc >= 4.8 +numpy >= 1.13 +future >= 0.15 +astropy >= 2.0 +pyyaml >= 3.12 +pandas >= 0.20 +cython >= 0.26 diff --git a/requirements.txt b/requirements.txt index a8c12c687b8..a8d6e423c6f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,15 +1,19 @@ # I didn't try to figure out which versions of these are really required. These are the # current versions at the time of writing this (Jan, 2018), and they are known to work. -pybind11 >= 2.0.0 -pyfftw3 >= 0.2.1 - -numpy >= 1.13.3 -future >= 0.15.2 -astropy >= 2.0.3 +# These are in conda_requirements.txt. If using that, you may prefer to do +# conda install --file conda_requirements.txt +numpy >= 1.13 +future >= 0.15 +astropy >= 2.0 pyyaml >= 3.12 +pandas >= 0.20 +cython >= 0.26 + +# These are not in (at least some versions of) conda. Let pip install these. +pybind11 >= 2.0 +pyfftw3 >= 0.2.1 LSSTDESC.Coord >= 1.0.5 -pandas >= 0.22.0 # The version of eigency (1.75) on pip doens't install the Eigen directory properly. # cf. https://github.com/wouterboomsma/eigency/issues/17 diff --git a/setup.py b/setup.py index 975f218fa1f..9107cedfdbe 100644 --- a/setup.py +++ b/setup.py @@ -179,7 +179,9 @@ def write_script(self, script_name, contents, mode="t", *ignored): sources, undef_macros = undef_macros) -build_dep = ['pybind11', 'pyfftw3', 'eigency'] +# Note: We don't actually need cython, but eigency depends on it at build time, and their +# setup.py is broken such that if it's not already installed it fails catastrophically. +build_dep = ['pybind11', 'pyfftw3', 'cython', 'eigency'] run_dep = ['numpy', 'future', 'astropy', 'pyyaml', 'LSSTDESC.Coord', 'pandas'] with open('README.md') as file: From 38dd0db0182a72ea3ebb3477fb5070b7eaaadaf0 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Thu, 4 Jan 2018 13:36:44 -0500 Subject: [PATCH 020/111] Use a custom function to find fftw3 rather than pyfftw3, which doesn't work in python 3 (#809-pybind11) --- setup.py | 59 +++++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 52 insertions(+), 7 deletions(-) diff --git a/setup.py b/setup.py index 9107cedfdbe..a5721845e5e 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,7 @@ from __future__ import print_function import sys,os,glob,re -import select +import platform +import ctypes from setuptools import setup, Extension @@ -87,6 +88,48 @@ def get_compiler(cc): else: return 'unknown' +# Check for the fftw3 library in some likely places +def find_fftw_lib(): + try_libdirs = [] + lib_ext = '.so' + if 'FFTW_PATH' in os.environ: + try_libdirs.append(os.environ['FFTW_PATH']) + try_libdirs.append(os.path.join(os.environ['FFTW_PATH'],'lib')) + if 'posix' in os.name.lower(): + try_libdirs.extend(['/usr/local/lib', '/usr/lib']) + if 'darwin' in platform.system().lower(): + try_libdirs.extend(['/sw/lib', '/opt/local/lib']) + lib_ext = '.dylib' + for path in ['LIBRARY_PATH', 'LD_LIBRARY_PATH', 'DYLD_LIBRARY_PATH']: + if path in os.environ: + for dir in os.environ[path].split(':'): + try_libdirs.append(dir) + + name = 'libfftw3' + lib_ext + for dir in try_libdirs: + try: + libpath = os.path.join(dir, name) + lib = ctypes.cdll.LoadLibrary(libpath) + print("found %s at %s" %(name, libpath)) + return libpath + except OSError as e: + print("Did not find %s in %s" %(name, libpath)) + continue + print("Could not find %s in any of the normal locations"%name) + print("Trying ctypes.util.find_library") + try: + libpath = ctypes.util.find_library('fftw3') + if libpath == None: + raise OSError + lib = ctypes.cdll.LoadLibrary(libpath) + print("found %s at %s" %(name, libpath)) + return libpath + except Exception as e: + print("Could not find fftw3 library. Make sure it is installed either in a standard ") + print("location such as /usr/local/lib, or the installation directory is either in ") + print("your LIBRARY_PATH or FFTW_PATH environment variable.") + raise + # Make a subclass of build_ext so we can add to the -I list. class my_builder( build_ext ): # Adding the libraries and include_dirs here rather than when declaring the Extension @@ -97,17 +140,19 @@ def finalize_options(self): build_ext.finalize_options(self) self.include_dirs.append('include') self.include_dirs.append('include/galsim') + import pybind11 # Include both the standard location and the --user location, since it's hard to tell # which one is the right choice. self.include_dirs.append(pybind11.get_include(user=False)) self.include_dirs.append(pybind11.get_include(user=True)) - import fftw3 + self.include_dirs.append('include/fftw3') - self.library_dirs.append(fftw3.lib.libdir) - fftw3_libname = fftw3.lib.libbase - if fftw3_libname.startswith('lib'): fftw3_libname = fftw3_libname[3:] - self.libraries.append(fftw3_libname) + fftw_lib = find_fftw_lib() + fftw_libpath, fftw_libname = os.path.split(fftw_lib) + self.library_dirs.append(os.path.split(fftw_lib)[0]) + self.libraries.append(os.path.split(fftw_lib)[1].split('.')[0][3:]) + import eigency self.include_dirs.append(eigency.get_includes()[2]) print('include_dirs = ',self.include_dirs) @@ -181,7 +226,7 @@ def write_script(self, script_name, contents, mode="t", *ignored): # Note: We don't actually need cython, but eigency depends on it at build time, and their # setup.py is broken such that if it's not already installed it fails catastrophically. -build_dep = ['pybind11', 'pyfftw3', 'cython', 'eigency'] +build_dep = ['pybind11', 'cython', 'eigency'] run_dep = ['numpy', 'future', 'astropy', 'pyyaml', 'LSSTDESC.Coord', 'pandas'] with open('README.md') as file: From f5cd94de0f82a26983885a9514f7dddff8febf62 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Thu, 4 Jan 2018 13:43:13 -0500 Subject: [PATCH 021/111] Add conda_requirements.txt to use conda for whatever it can do. (#809-pybind11) --- MANIFEST.in | 2 +- conda_requirements.txt | 11 ++++++++--- requirements.txt | 16 ++++++++++------ setup.py | 9 +++++---- 4 files changed, 24 insertions(+), 14 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index 8f68820da6b..879d877969b 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,4 @@ -graft galsim +recursive-include galsim include *.md include LICENSE global-exclude __pycache__ *.pyc .obj diff --git a/conda_requirements.txt b/conda_requirements.txt index 547f215c261..c48dead7a45 100644 --- a/conda_requirements.txt +++ b/conda_requirements.txt @@ -1,9 +1,14 @@ -# The requirements that can be installed with conda install -c conda-forge - -gcc >= 4.8 +# The requirements packages that can be installed with +# conda install -y -c conda-forge --file conda_requirements.txt numpy >= 1.13 future >= 0.15 astropy >= 2.0 pyyaml >= 3.12 pandas >= 0.20 +pybind11 >= 2.0 +pip >= 9.0 +gcc >= 4.8 cython >= 0.26 +setuptools >= 38.2 +setuptools_scm >= 1.15.6 + diff --git a/requirements.txt b/requirements.txt index a8d6e423c6f..da5996e2757 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,20 +2,24 @@ # current versions at the time of writing this (Jan, 2018), and they are known to work. # These are in conda_requirements.txt. If using that, you may prefer to do -# conda install --file conda_requirements.txt +# conda install -c conda-forge --file conda_requirements.txt +# prior to running pip install -r requirements.txt numpy >= 1.13 future >= 0.15 astropy >= 2.0 pyyaml >= 3.12 pandas >= 0.20 +pybind11 >= 2.0 +pip >= 9.0 +gcc >= 4.8 cython >= 0.26 +setuptools >= 38.2 +setuptools_scm >= 1.15.6 -# These are not in (at least some versions of) conda. Let pip install these. -pybind11 >= 2.0 -pyfftw3 >= 0.2.1 +# These are not in conda. Let pip install these. LSSTDESC.Coord >= 1.0.5 # The version of eigency (1.75) on pip doens't install the Eigen directory properly. # cf. https://github.com/wouterboomsma/eigency/issues/17 -# They already fixed the problem on master, so this commit works until they release 1.76. -git+git://github.com/wouterboomsma/eigency.git@ed54a61e8143284e243f0a7ddbc5acb4c8bf58bd +# It also improperly depends on cython at build time. This commit fixes that issue. +git+git://github.com/rmjarvis/eigency.git@33d8d65417484318255dbb422e6ad49dda803f06 diff --git a/setup.py b/setup.py index a5721845e5e..fb9bee28b58 100644 --- a/setup.py +++ b/setup.py @@ -224,9 +224,9 @@ def write_script(self, script_name, contents, mode="t", *ignored): sources, undef_macros = undef_macros) -# Note: We don't actually need cython, but eigency depends on it at build time, and their -# setup.py is broken such that if it's not already installed it fails catastrophically. -build_dep = ['pybind11', 'cython', 'eigency'] +# Note: We don't actually need cython or setuptools_scm, but eigency depends on them at build time, +# and their setup.py is broken such that if they're not already installed it fails catastrophically. +build_dep = ['pybind11', 'setuptools_scm', 'cython', 'eigency'] run_dep = ['numpy', 'future', 'astropy', 'pyyaml', 'LSSTDESC.Coord', 'pandas'] with open('README.md') as file: @@ -254,7 +254,8 @@ def write_script(self, script_name, contents, mode="t", *ignored): url="https://github.com/rmjarvis/GalSim", download_url="https://github.com/GalSim-developers/GalSim/releases/tag/v%s.zip"%galsim_version, packages=['galsim'], - include_package_data=True, + #package_data={'galsim' : shared_data}, + #include_package_data=True, ext_modules=[ext], setup_requires=build_dep, install_requires=build_dep + run_dep, From eefc527a454324ee5f1648d489f05cff17c1db6f Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Thu, 4 Jan 2018 14:19:49 -0500 Subject: [PATCH 022/111] gcc is not a pip package. (#809-pybind11) --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index da5996e2757..586a3ad1f23 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,7 +11,6 @@ pyyaml >= 3.12 pandas >= 0.20 pybind11 >= 2.0 pip >= 9.0 -gcc >= 4.8 cython >= 0.26 setuptools >= 38.2 setuptools_scm >= 1.15.6 From d6451ab2b58029a31dd69b123a3828d02250e245 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Thu, 4 Jan 2018 17:03:32 -0500 Subject: [PATCH 023/111] Write Version.h file (#809-pybind11) --- setup.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/setup.py b/setup.py index fb9bee28b58..3bbe720f0a1 100644 --- a/setup.py +++ b/setup.py @@ -244,6 +244,42 @@ def write_script(self, script_name, contents, mode="t", *ignored): raise RuntimeError("Unable to find version string in %s." % (version_file,)) print('GalSim version is %s'%(galsim_version)) +# Write a Version.h file that has this information for people using the C++ library. +version_info = tuple(map(int, galsim_version.split('.'))) +if len(version_info) == 2: + version_info = version_info + (0,) +version_h_text = """ +// This file is auto-generated by SCons. Do not edit. +#define GALSIM_MAJOR %d +#define GALSIM_MINOR %d +#define GALSIM_REVISION %d + +#include +#include + +namespace galsim { + // Compiled versions of the above #define values. + extern int major_version(); + extern int minor_version(); + extern int revision(); + + // Returns string of the form "1.4.2" + extern std::string version(); + + // Checks if the compiled library version matches the #define values in this header file. + inline bool check_version() { + // Same code as version(), but inline, so we get the above values to compare + // to the values compiled into the library. + std::ostringstream oss; + oss << GALSIM_MAJOR << '.' << GALSIM_MINOR << '.' << GALSIM_REVISION; + return oss.str() == version(); + } +} +"""%version_info +version_h_file = os.path.join('include', 'galsim', 'Version.h') +with open(version_h_file, 'w') as f: + f.write(version_h_text) + dist = setup(name="GalSim", version=galsim_version, author="GalSim Developers (point of contact: Mike Jarvis)", From e525e2ccb6e1ccc6876640bd354fdfb0cfe7aab7 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Thu, 4 Jan 2018 17:58:52 -0500 Subject: [PATCH 024/111] Fix share symlink, which wasn't working properly on linux --- galsim/share | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galsim/share b/galsim/share index 4c6a23531fe..f2d0b20356b 120000 --- a/galsim/share +++ b/galsim/share @@ -1 +1 @@ -../share/ \ No newline at end of file +../share \ No newline at end of file From 0c8e64eab441b4e95c9aec14c39ce6b8e51a2df0 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Thu, 4 Jan 2018 18:43:31 -0500 Subject: [PATCH 025/111] Switch to using package_data for share, since seems to be more reliable than manifest --- MANIFEST.in | 5 +++-- setup.py | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index 879d877969b..2b6eaaef2e9 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,5 @@ -recursive-include galsim +recursive-include galsim *.py include *.md include LICENSE -global-exclude __pycache__ *.pyc .obj +recursive-include share * +global-exclude __pycache__ *.pyc .obj .gitignore SCons* diff --git a/setup.py b/setup.py index 3bbe720f0a1..fc7c8526a54 100644 --- a/setup.py +++ b/setup.py @@ -222,6 +222,7 @@ def write_script(self, script_name, contents, mode="t", *ignored): ext=Extension("galsim._galsim", sources, + depends=headers, undef_macros = undef_macros) # Note: We don't actually need cython or setuptools_scm, but eigency depends on them at build time, @@ -290,7 +291,7 @@ def write_script(self, script_name, contents, mode="t", *ignored): url="https://github.com/rmjarvis/GalSim", download_url="https://github.com/GalSim-developers/GalSim/releases/tag/v%s.zip"%galsim_version, packages=['galsim'], - #package_data={'galsim' : shared_data}, + package_data={'galsim' : shared_data}, #include_package_data=True, ext_modules=[ext], setup_requires=build_dep, From e6c644df59b9cbcb393183df47e22652169719c8 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Fri, 5 Jan 2018 12:36:01 -0500 Subject: [PATCH 026/111] Fix gcc compiler warnings (#809-pybind11) --- include/galsim/LRUCache.h | 3 +- pysrc/Bessel.cpp | 2 +- pysrc/Bounds.cpp | 6 +-- pysrc/CDModel.cpp | 4 +- pysrc/HSM.cpp | 6 +-- pysrc/Image.cpp | 6 +-- pysrc/Integ.cpp | 2 +- pysrc/Interpolant.cpp | 2 +- pysrc/PhotonArray.cpp | 4 +- pysrc/PyBind11Helper.h | 19 ++++----- pysrc/Random.cpp | 2 +- pysrc/RealGalaxy.cpp | 2 +- pysrc/SBAdd.cpp | 6 +-- pysrc/SBAiry.cpp | 2 +- pysrc/SBBox.cpp | 2 +- pysrc/SBConvolve.cpp | 6 +-- pysrc/SBDeconvolve.cpp | 2 +- pysrc/SBDeltaFunction.cpp | 2 +- pysrc/SBExponential.cpp | 2 +- pysrc/SBFourierSqrt.cpp | 2 +- pysrc/SBGaussian.cpp | 2 +- pysrc/SBInclinedExponential.cpp | 2 +- pysrc/SBInclinedSersic.cpp | 2 +- pysrc/SBInterpolatedImage.cpp | 4 +- pysrc/SBKolmogorov.cpp | 2 +- pysrc/SBMoffat.cpp | 2 +- pysrc/SBProfile.cpp | 2 +- pysrc/SBSersic.cpp | 2 +- pysrc/SBShapelet.cpp | 4 +- pysrc/SBSpergel.cpp | 2 +- pysrc/SBTransform.cpp | 2 +- pysrc/Silicon.cpp | 4 +- pysrc/Table.cpp | 6 +-- pysrc/WCS.cpp | 10 ++--- pysrc/module.cpp | 70 ++++++++++++++++----------------- setup.py | 10 ++++- src/SBInterpolatedImage.cpp | 4 +- src/math/BesselK.cpp | 1 + src/math/BesselY.cpp | 1 + 39 files changed, 107 insertions(+), 107 deletions(-) diff --git a/include/galsim/LRUCache.h b/include/galsim/LRUCache.h index 17ceee9c744..2f2e75b9f22 100644 --- a/include/galsim/LRUCache.h +++ b/include/galsim/LRUCache.h @@ -183,8 +183,7 @@ namespace galsim { shared_ptr value(LRUCacheHelper::NewValue(key)); // Remove items from the cache as necessary. while (_entries.size() >= _nmax) { - bool erased = _cache.erase(_entries.back().first); - assert(erased); + _cache.erase(_entries.back().first); _entries.pop_back(); } // Add the new value to the front. diff --git a/pysrc/Bessel.cpp b/pysrc/Bessel.cpp index be3ef0ef39c..f13789a84f9 100644 --- a/pysrc/Bessel.cpp +++ b/pysrc/Bessel.cpp @@ -24,7 +24,7 @@ namespace galsim { namespace math { - void pyExportBessel(PYBIND11_MODULE& _galsim) + void pyExportBessel(PB11_MODULE& _galsim) { GALSIM_DOT def("j0_root", &getBesselRoot0); GALSIM_DOT def("j0", &j0); diff --git a/pysrc/Bounds.cpp b/pysrc/Bounds.cpp index cf9345533db..c728b5525d1 100644 --- a/pysrc/Bounds.cpp +++ b/pysrc/Bounds.cpp @@ -23,7 +23,7 @@ namespace galsim { template - static void WrapPosition(PYBIND11_MODULE& _galsim, const std::string& suffix) + static void WrapPosition(PB11_MODULE& _galsim, const std::string& suffix) { bp::class_ >(GALSIM_COMMA ("Position" + suffix).c_str() BP_NOINIT) .def(bp::init()) @@ -32,7 +32,7 @@ namespace galsim { } template - static void WrapBounds(PYBIND11_MODULE& _galsim, const std::string& suffix) + static void WrapBounds(PB11_MODULE& _galsim, const std::string& suffix) { bp::class_< Bounds >(GALSIM_COMMA ("Bounds" + suffix).c_str() BP_NOINIT) .def(bp::init()) @@ -42,7 +42,7 @@ namespace galsim { .def_property_readonly("ymax", &Bounds::getYMax); } - void pyExportBounds(PYBIND11_MODULE& _galsim) + void pyExportBounds(PB11_MODULE& _galsim) { WrapPosition(_galsim, "D"); WrapPosition(_galsim, "I"); diff --git a/pysrc/CDModel.cpp b/pysrc/CDModel.cpp index 5b0b8a086c9..f412c866c88 100644 --- a/pysrc/CDModel.cpp +++ b/pysrc/CDModel.cpp @@ -23,7 +23,7 @@ namespace galsim { template - static void WrapTemplates(PYBIND11_MODULE& _galsim) + static void WrapTemplates(PB11_MODULE& _galsim) { typedef void (*ApplyCD_func)(ImageView& , const BaseImage& , const BaseImage& , const BaseImage& , @@ -32,7 +32,7 @@ namespace galsim { GALSIM_DOT def("_ApplyCD", ApplyCD_func(&ApplyCD)); } - void pyExportCDModel(PYBIND11_MODULE& _galsim) + void pyExportCDModel(PB11_MODULE& _galsim) { WrapTemplates(_galsim); WrapTemplates(_galsim); diff --git a/pysrc/HSM.cpp b/pysrc/HSM.cpp index 34329746cf7..941f716e2aa 100644 --- a/pysrc/HSM.cpp +++ b/pysrc/HSM.cpp @@ -38,7 +38,7 @@ namespace hsm { #ifdef USE_BOOST ShapeData* data = new ShapeData(); #else - PYBIND11_PLACEMENT_NEW ShapeData(); + PB11_PLACEMENT_NEW ShapeData(); ShapeData* data = &instance; #endif data->image_bounds = image_bounds; @@ -69,7 +69,7 @@ namespace hsm { } template - static void WrapTemplates(PYBIND11_MODULE& _galsim) + static void WrapTemplates(PB11_MODULE& _galsim) { typedef void (*FAM_func)(ShapeData&t, const BaseImage&, const BaseImage&, double, double, Position, bool, const HSMParams&); @@ -82,7 +82,7 @@ namespace hsm { GALSIM_DOT def("_EstimateShearView", ESH_func(&EstimateShearView)); }; - void pyExportHSM(PYBIND11_MODULE& _galsim) + void pyExportHSM(PB11_MODULE& _galsim) { bp::class_(GALSIM_COMMA "HSMParams" BP_NOINIT) .def(bp::init< diff --git a/pysrc/Image.cpp b/pysrc/Image.cpp index 9c9b58598f6..f7e8b7d2cad 100644 --- a/pysrc/Image.cpp +++ b/pysrc/Image.cpp @@ -29,11 +29,11 @@ namespace galsim { { T* data = reinterpret_cast(idata); shared_ptr owner; - PYBIND11_PLACEMENT_NEW ImageView(data, owner, step, stride, bounds); + PB11_PLACEMENT_NEW ImageView(data, owner, step, stride, bounds); } template - static void WrapImage(PYBIND11_MODULE& _galsim, const std::string& suffix) + static void WrapImage(PB11_MODULE& _galsim, const std::string& suffix) { bp::class_ BOOST_NONCOPYABLE>( GALSIM_COMMA ("BaseImage" + suffix).c_str() BP_NOINIT); @@ -59,7 +59,7 @@ namespace galsim { GALSIM_DOT def("invertImage", invert_func_type(&invertImage)); } - void pyExportImage(PYBIND11_MODULE& _galsim) + void pyExportImage(PB11_MODULE& _galsim) { WrapImage(_galsim, "US"); WrapImage(_galsim, "UI"); diff --git a/pysrc/Integ.cpp b/pysrc/Integ.cpp index 6b9715e4dbf..9f9e2a22aee 100644 --- a/pysrc/Integ.cpp +++ b/pysrc/Integ.cpp @@ -49,7 +49,7 @@ namespace integ { } } - void pyExportInteg(PYBIND11_MODULE& _galsim) + void pyExportInteg(PB11_MODULE& _galsim) { GALSIM_DOT def("PyInt1d", &PyInt1d); diff --git a/pysrc/Interpolant.cpp b/pysrc/Interpolant.cpp index f3d273d3a95..914a531412d 100644 --- a/pysrc/Interpolant.cpp +++ b/pysrc/Interpolant.cpp @@ -23,7 +23,7 @@ namespace galsim { - void pyExportInterpolant(PYBIND11_MODULE& _galsim) + void pyExportInterpolant(PB11_MODULE& _galsim) { bp::class_(GALSIM_COMMA "Interpolant" BP_NOINIT); diff --git a/pysrc/PhotonArray.cpp b/pysrc/PhotonArray.cpp index e357f5117fc..a2a17ce6c92 100644 --- a/pysrc/PhotonArray.cpp +++ b/pysrc/PhotonArray.cpp @@ -40,10 +40,10 @@ namespace galsim { double *dxdz = reinterpret_cast(idxdz); double *dydz = reinterpret_cast(idydz); double *wave = reinterpret_cast(iwave); - PYBIND11_PLACEMENT_NEW PhotonArray(N, x, y, flux, dxdz, dydz, wave, is_corr); + PB11_PLACEMENT_NEW PhotonArray(N, x, y, flux, dxdz, dydz, wave, is_corr); } - void pyExportPhotonArray(PYBIND11_MODULE& _galsim) + void pyExportPhotonArray(PB11_MODULE& _galsim) { bp::class_ pyPhotonArray(GALSIM_COMMA "PhotonArray" BP_NOINIT); pyPhotonArray diff --git a/pysrc/PyBind11Helper.h b/pysrc/PyBind11Helper.h index 4552b025eb3..6aad554abed 100644 --- a/pysrc/PyBind11Helper.h +++ b/pysrc/PyBind11Helper.h @@ -31,21 +31,19 @@ #include namespace bp = boost::python; -#define PYBIND11_PLUGIN(x) BOOST_PYTHON_MODULE(x) -#define PYBIND11_MAKE_MODULE(x) bp::scope _galsim -#define PYBIND11_RETURN_PTR(x) +#define PB11_MAKE_MODULE(x) BOOST_PYTHON_MODULE(x) #define TUPLE(args...) bp::tuple #define MAKE_TUPLE bp::make_tuple #define GALSIM_DOT bp:: #define GALSIM_COMMA -#define PYBIND11_MODULE bp::scope +#define PB11_MODULE bp::scope #define BP_HANDLE bp::handle<> #define BP_THROW bp::throw_error_already_set() #define BP_NOINIT , bp::no_init #define ENABLE_PICKLING .enable_pickling() -#define PYBIND11_CAST(x) x +#define PB11_CAST(x) x #define BP_OTHER(T) bp::other() #define ADD_PROPERTY(name, func) add_property(name, func) #define BP_REGISTER(T) bp::register_ptr_to_python< boost::shared_ptr >() @@ -53,7 +51,7 @@ namespace bp = boost::python; #define BP_BASES(T) , bp::bases #define BP_MAKE_CONSTRUCTOR(args...) bp::make_constructor(args, bp::default_call_policies()) #define BP_CONSTRUCTOR(f,x,args...) x* f(args) -#define PYBIND11_PLACEMENT_NEW return new +#define PB11_PLACEMENT_NEW return new #define CAST bp::extract #define BP_COPY_CONST_REFERENCE bp::return_value_policy() #define def_property_readonly add_property @@ -66,20 +64,19 @@ namespace bp = boost::python; #include namespace bp = pybind11; -#define PYBIND11_MAKE_MODULE(x) pybind11::module x(#x) -#define PYBIND11_RETURN_PTR(x) return x.ptr() +#define PB11_MAKE_MODULE(x) PYBIND11_MODULE(x,x) #define TUPLE(args...) std::tuple #define MAKE_TUPLE std::make_tuple #define GALSIM_DOT _galsim. #define GALSIM_COMMA _galsim, -#define PYBIND11_MODULE pybind11::module +#define PB11_MODULE pybind11::module #define BP_HANDLE pybind11::handle #define BP_THROW throw pybind11::error_already_set() #define BP_NOINIT #define ENABLE_PICKLING -#define PYBIND11_CAST(x) pybind11::cast(x) +#define PB11_CAST(x) pybind11::cast(x) #define BP_OTHER(T) T() #define ADD_PROPERTY(name, func) def_property_readonly(name, func) #define BP_REGISTER(T) @@ -87,7 +84,7 @@ namespace bp = pybind11; #define BP_BASES(T) , T #define BP_MAKE_CONSTRUCTOR(args...) args #define BP_CONSTRUCTOR(f,x,args...) void f(x& instance, args) -#define PYBIND11_PLACEMENT_NEW new (&instance) +#define PB11_PLACEMENT_NEW new (&instance) #define CAST pybind11::cast #define BP_COPY_CONST_REFERENCE pybind11::return_value_policy::reference diff --git a/pysrc/Random.cpp b/pysrc/Random.cpp index d70ee865ae2..bf17db5b26f 100644 --- a/pysrc/Random.cpp +++ b/pysrc/Random.cpp @@ -46,7 +46,7 @@ namespace galsim { rng.generateFromExpectation(N, data); } - void pyExportRandom(PYBIND11_MODULE& _galsim) + void pyExportRandom(PB11_MODULE& _galsim) { bp::class_ (GALSIM_COMMA "BaseDeviateImpl" BP_NOINIT) .def(bp::init()) diff --git a/pysrc/RealGalaxy.cpp b/pysrc/RealGalaxy.cpp index f274864834d..ead57a9bf77 100644 --- a/pysrc/RealGalaxy.cpp +++ b/pysrc/RealGalaxy.cpp @@ -35,7 +35,7 @@ namespace galsim { ComputeCRGCoefficients(coef, Sigma, w, kimgs, psf, nsed, nband, nkx, nky); }; - void pyExportRealGalaxy(PYBIND11_MODULE& _galsim) { + void pyExportRealGalaxy(PB11_MODULE& _galsim) { GALSIM_DOT def("ComputeCRGCoefficients", &CallComputeCRGCoefficients); } diff --git a/pysrc/SBAdd.cpp b/pysrc/SBAdd.cpp index 153aeaafd0a..3153c23c911 100644 --- a/pysrc/SBAdd.cpp +++ b/pysrc/SBAdd.cpp @@ -28,16 +28,16 @@ namespace galsim { bp::stl_input_iterator iter(iterable), end; std::list plist; for(; iter != end; ++iter) plist.push_back(*iter); - PYBIND11_PLACEMENT_NEW SBAdd(plist, gsparams); + PB11_PLACEMENT_NEW SBAdd(plist, gsparams); } #else static BP_CONSTRUCTOR(construct, SBAdd, const std::list& plist, GSParams gsparams) { - PYBIND11_PLACEMENT_NEW SBAdd(plist, gsparams); + PB11_PLACEMENT_NEW SBAdd(plist, gsparams); } #endif - void pyExportSBAdd(PYBIND11_MODULE& _galsim) + void pyExportSBAdd(PB11_MODULE& _galsim) { bp::class_(GALSIM_COMMA "SBAdd" BP_NOINIT) .def("__init__", BP_MAKE_CONSTRUCTOR(&construct)); diff --git a/pysrc/SBAiry.cpp b/pysrc/SBAiry.cpp index 72c21f05f26..4025a035ba1 100644 --- a/pysrc/SBAiry.cpp +++ b/pysrc/SBAiry.cpp @@ -22,7 +22,7 @@ namespace galsim { - void pyExportSBAiry(PYBIND11_MODULE& _galsim) + void pyExportSBAiry(PB11_MODULE& _galsim) { bp::class_(GALSIM_COMMA "SBAiry" BP_NOINIT) .def(bp::init()); diff --git a/pysrc/SBBox.cpp b/pysrc/SBBox.cpp index f700493847e..1a4f37faa52 100644 --- a/pysrc/SBBox.cpp +++ b/pysrc/SBBox.cpp @@ -22,7 +22,7 @@ namespace galsim { - void pyExportSBBox(PYBIND11_MODULE& _galsim) + void pyExportSBBox(PB11_MODULE& _galsim) { bp::class_(GALSIM_COMMA "SBBox" BP_NOINIT) .def(bp::init()); diff --git a/pysrc/SBConvolve.cpp b/pysrc/SBConvolve.cpp index 10cb1dad372..46935f9a249 100644 --- a/pysrc/SBConvolve.cpp +++ b/pysrc/SBConvolve.cpp @@ -29,17 +29,17 @@ namespace galsim { bp::stl_input_iterator iter(iterable), end; std::list plist; for(; iter != end; ++iter) plist.push_back(*iter); - PYBIND11_PLACEMENT_NEW SBConvolve(plist, real_space, gsparams); + PB11_PLACEMENT_NEW SBConvolve(plist, real_space, gsparams); } #else static BP_CONSTRUCTOR(construct, SBConvolve, const std::list& plist, bool real_space, GSParams gsparams) { - PYBIND11_PLACEMENT_NEW SBConvolve(plist, real_space, gsparams); + PB11_PLACEMENT_NEW SBConvolve(plist, real_space, gsparams); } #endif - void pyExportSBConvolve(PYBIND11_MODULE& _galsim) + void pyExportSBConvolve(PB11_MODULE& _galsim) { bp::class_(GALSIM_COMMA "SBConvolve" BP_NOINIT) .def("__init__", BP_MAKE_CONSTRUCTOR( &construct)); diff --git a/pysrc/SBDeconvolve.cpp b/pysrc/SBDeconvolve.cpp index e65ce52e0f3..74729b76514 100644 --- a/pysrc/SBDeconvolve.cpp +++ b/pysrc/SBDeconvolve.cpp @@ -22,7 +22,7 @@ namespace galsim { - void pyExportSBDeconvolve(PYBIND11_MODULE& _galsim) + void pyExportSBDeconvolve(PB11_MODULE& _galsim) { bp::class_(GALSIM_COMMA "SBDeconvolve" BP_NOINIT) .def(bp::init()); diff --git a/pysrc/SBDeltaFunction.cpp b/pysrc/SBDeltaFunction.cpp index a0467d26caf..655d0b5d836 100644 --- a/pysrc/SBDeltaFunction.cpp +++ b/pysrc/SBDeltaFunction.cpp @@ -22,7 +22,7 @@ namespace galsim { - void pyExportSBDeltaFunction(PYBIND11_MODULE& _galsim) + void pyExportSBDeltaFunction(PB11_MODULE& _galsim) { bp::class_(GALSIM_COMMA "SBDeltaFunction" BP_NOINIT) .def(bp::init()); diff --git a/pysrc/SBExponential.cpp b/pysrc/SBExponential.cpp index 3dc5a5a340e..c40b081a1a0 100644 --- a/pysrc/SBExponential.cpp +++ b/pysrc/SBExponential.cpp @@ -22,7 +22,7 @@ namespace galsim { - void pyExportSBExponential(PYBIND11_MODULE& _galsim) + void pyExportSBExponential(PB11_MODULE& _galsim) { bp::class_(GALSIM_COMMA "SBExponential" BP_NOINIT) .def(bp::init()); diff --git a/pysrc/SBFourierSqrt.cpp b/pysrc/SBFourierSqrt.cpp index cb9c37c4978..df001b771be 100644 --- a/pysrc/SBFourierSqrt.cpp +++ b/pysrc/SBFourierSqrt.cpp @@ -22,7 +22,7 @@ namespace galsim { - void pyExportSBFourierSqrt(PYBIND11_MODULE& _galsim) + void pyExportSBFourierSqrt(PB11_MODULE& _galsim) { bp::class_(GALSIM_COMMA "SBFourierSqrt" BP_NOINIT) .def(bp::init()); diff --git a/pysrc/SBGaussian.cpp b/pysrc/SBGaussian.cpp index 04da1cd4ac3..11cccd321ee 100644 --- a/pysrc/SBGaussian.cpp +++ b/pysrc/SBGaussian.cpp @@ -22,7 +22,7 @@ namespace galsim { - void pyExportSBGaussian(PYBIND11_MODULE& _galsim) + void pyExportSBGaussian(PB11_MODULE& _galsim) { bp::class_(GALSIM_COMMA "SBGaussian" BP_NOINIT) .def(bp::init()); diff --git a/pysrc/SBInclinedExponential.cpp b/pysrc/SBInclinedExponential.cpp index 8358baa7454..459ce8eb8b9 100644 --- a/pysrc/SBInclinedExponential.cpp +++ b/pysrc/SBInclinedExponential.cpp @@ -22,7 +22,7 @@ namespace galsim { - void pyExportSBInclinedExponential(PYBIND11_MODULE& _galsim) + void pyExportSBInclinedExponential(PB11_MODULE& _galsim) { bp::class_( GALSIM_COMMA "SBInclinedExponential" BP_NOINIT) diff --git a/pysrc/SBInclinedSersic.cpp b/pysrc/SBInclinedSersic.cpp index 98ad663b32d..8476adb9501 100644 --- a/pysrc/SBInclinedSersic.cpp +++ b/pysrc/SBInclinedSersic.cpp @@ -22,7 +22,7 @@ namespace galsim { - void pyExportSBInclinedSersic(PYBIND11_MODULE& _galsim) + void pyExportSBInclinedSersic(PB11_MODULE& _galsim) { bp::class_( GALSIM_COMMA "SBInclinedSersic" BP_NOINIT) diff --git a/pysrc/SBInterpolatedImage.cpp b/pysrc/SBInterpolatedImage.cpp index 6f86a9185e2..ebb28f36ec1 100644 --- a/pysrc/SBInterpolatedImage.cpp +++ b/pysrc/SBInterpolatedImage.cpp @@ -23,7 +23,7 @@ namespace galsim { template - static void WrapTemplates(PYBIND11_MODULE& _galsim, W& wrapper) + static void WrapTemplates(PB11_MODULE& _galsim, W& wrapper) { wrapper.def(bp::init &, const Bounds&, const Bounds&, const Interpolant&, const Interpolant&, @@ -33,7 +33,7 @@ namespace galsim { GALSIM_DOT def("CalculateSizeContainingFlux", cscf_func_type(&CalculateSizeContainingFlux)); } - void pyExportSBInterpolatedImage(PYBIND11_MODULE& _galsim) + void pyExportSBInterpolatedImage(PB11_MODULE& _galsim) { bp::class_ pySBInterpolatedImage( GALSIM_COMMA "SBInterpolatedImage" BP_NOINIT); diff --git a/pysrc/SBKolmogorov.cpp b/pysrc/SBKolmogorov.cpp index 49299919b70..0dc7f6d7aea 100644 --- a/pysrc/SBKolmogorov.cpp +++ b/pysrc/SBKolmogorov.cpp @@ -22,7 +22,7 @@ namespace galsim { - void pyExportSBKolmogorov(PYBIND11_MODULE& _galsim) + void pyExportSBKolmogorov(PB11_MODULE& _galsim) { bp::class_(GALSIM_COMMA "SBKolmogorov" BP_NOINIT) .def(bp::init()); diff --git a/pysrc/SBMoffat.cpp b/pysrc/SBMoffat.cpp index 5ad40adfd0f..928121fa67c 100644 --- a/pysrc/SBMoffat.cpp +++ b/pysrc/SBMoffat.cpp @@ -22,7 +22,7 @@ namespace galsim { - void pyExportSBMoffat(PYBIND11_MODULE& _galsim) + void pyExportSBMoffat(PB11_MODULE& _galsim) { bp::class_(GALSIM_COMMA "SBMoffat" BP_NOINIT) .def(bp::init()) diff --git a/pysrc/SBProfile.cpp b/pysrc/SBProfile.cpp index c8366f6ce10..d0b8f84877a 100644 --- a/pysrc/SBProfile.cpp +++ b/pysrc/SBProfile.cpp @@ -31,7 +31,7 @@ namespace galsim { &SBProfile::drawK); } - void pyExportSBProfile(PYBIND11_MODULE& _galsim) + void pyExportSBProfile(PB11_MODULE& _galsim) { bp::class_(GALSIM_COMMA "GSParams" BP_NOINIT) .def(bp::init< diff --git a/pysrc/SBSersic.cpp b/pysrc/SBSersic.cpp index 73c5784bb58..60c43dd41aa 100644 --- a/pysrc/SBSersic.cpp +++ b/pysrc/SBSersic.cpp @@ -22,7 +22,7 @@ namespace galsim { - void pyExportSBSersic(PYBIND11_MODULE& _galsim) + void pyExportSBSersic(PB11_MODULE& _galsim) { bp::class_(GALSIM_COMMA "SBSersic" BP_NOINIT) .def(bp::init()); diff --git a/pysrc/SBShapelet.cpp b/pysrc/SBShapelet.cpp index e5c0e45b7ec..a54703b8936 100644 --- a/pysrc/SBShapelet.cpp +++ b/pysrc/SBShapelet.cpp @@ -42,10 +42,10 @@ namespace galsim { VectorXd v(size); for (int i=0; i(GALSIM_COMMA "SBShapelet" BP_NOINIT) .def("__init__", BP_MAKE_CONSTRUCTOR(&construct)); diff --git a/pysrc/SBSpergel.cpp b/pysrc/SBSpergel.cpp index 677add7a52b..52dda21910d 100644 --- a/pysrc/SBSpergel.cpp +++ b/pysrc/SBSpergel.cpp @@ -22,7 +22,7 @@ namespace galsim { - void pyExportSBSpergel(PYBIND11_MODULE& _galsim) + void pyExportSBSpergel(PB11_MODULE& _galsim) { bp::class_(GALSIM_COMMA "SBSpergel" BP_NOINIT) .def(bp::init()) diff --git a/pysrc/SBTransform.cpp b/pysrc/SBTransform.cpp index 4c79da0eb7c..94075f301e2 100644 --- a/pysrc/SBTransform.cpp +++ b/pysrc/SBTransform.cpp @@ -22,7 +22,7 @@ namespace galsim { - void pyExportSBTransform(PYBIND11_MODULE& _galsim) + void pyExportSBTransform(PB11_MODULE& _galsim) { bp::class_(GALSIM_COMMA "SBTransform" BP_NOINIT) .def(bp::init(idata); - PYBIND11_PLACEMENT_NEW Silicon(NumVertices, NumElect, Nx, Ny, QDist, + PB11_PLACEMENT_NEW Silicon(NumVertices, NumElect, Nx, Ny, QDist, Nrecalc, DiffStep, PixelSize, SensorThickness, data, treeRingTable, treeRingCenter, abs_length_table); } - void pyExportSilicon(PYBIND11_MODULE& _galsim) + void pyExportSilicon(PB11_MODULE& _galsim) { bp::class_ pySilicon(GALSIM_COMMA "Silicon" BP_NOINIT); pySilicon.def("__init__", BP_MAKE_CONSTRUCTOR(&MakeSilicon)); diff --git a/pysrc/Table.cpp b/pysrc/Table.cpp index f6d922f59b0..94b82335d5d 100644 --- a/pysrc/Table.cpp +++ b/pysrc/Table.cpp @@ -35,7 +35,7 @@ namespace galsim { else if (interp == "ceil") i = Table::ceil; else if (interp == "nearest") i = Table::nearest; - PYBIND11_PLACEMENT_NEW Table(args, vals, N, i); + PB11_PLACEMENT_NEW Table(args, vals, N, i); } static void InterpMany(const Table& table, size_t iargs, size_t ivals, int N) @@ -59,7 +59,7 @@ namespace galsim { else if (interp == "ceil") i = Table2D::ceil; else if (interp == "nearest") i = Table2D::nearest; - PYBIND11_PLACEMENT_NEW Table2D(x, y, vals, Nx, Ny, i); + PB11_PLACEMENT_NEW Table2D(x, y, vals, Nx, Ny, i); } static void InterpMany2D(const Table2D& table2d, size_t ix, size_t iy, size_t ivals, int N) @@ -86,7 +86,7 @@ namespace galsim { table2d.gradientMany(x, y, dfdx, dfdy, N); } - void pyExportTable(PYBIND11_MODULE& _galsim) + void pyExportTable(PB11_MODULE& _galsim) { bp::class_
(GALSIM_COMMA "_LookupTable" BP_NOINIT) .def("__init__", BP_MAKE_CONSTRUCTOR(&MakeTable)) diff --git a/pysrc/WCS.cpp b/pysrc/WCS.cpp index 15841a38527..7cff4148d34 100644 --- a/pysrc/WCS.cpp +++ b/pysrc/WCS.cpp @@ -38,22 +38,22 @@ namespace galsim { ApplyPV(n, m, uar, var, pvar); } - TUPLE(double,double) CallInvertPV(double u, double v, size_t pv_data) + bp::tuple CallInvertPV(double u, double v, size_t pv_data) { const double* pvar = reinterpret_cast(pv_data); InvertPV(u, v, pvar); - return MAKE_TUPLE(u,v); + return bp::make_tuple(u,v); } - TUPLE(double,double) CallInvertAB(int m, double x, double y, size_t ab_data, size_t abp_data) + bp::tuple CallInvertAB(int m, double x, double y, size_t ab_data, size_t abp_data) { const double* abar = reinterpret_cast(ab_data); const double* abpar = reinterpret_cast(abp_data); InvertAB(m, x, y, abar, abpar); - return MAKE_TUPLE(x,y); + return bp::make_tuple(x,y); } - void pyExportWCS(PYBIND11_MODULE& _galsim) + void pyExportWCS(PB11_MODULE& _galsim) { GALSIM_DOT def("ApplyPV", &CallApplyPV); GALSIM_DOT def("ApplyCD", &CallApplyCD); diff --git a/pysrc/module.cpp b/pysrc/module.cpp index f8f2fbb8849..0ef229e0390 100644 --- a/pysrc/module.cpp +++ b/pysrc/module.cpp @@ -21,54 +21,52 @@ #include "PyBind11Helper.h" namespace galsim { - void pyExportBounds(PYBIND11_MODULE&); - void pyExportPhotonArray(PYBIND11_MODULE&); - void pyExportImage(PYBIND11_MODULE&); - void pyExportSBProfile(PYBIND11_MODULE&); - void pyExportSBAdd(PYBIND11_MODULE&); - void pyExportSBConvolve(PYBIND11_MODULE&); - void pyExportSBDeconvolve(PYBIND11_MODULE&); - void pyExportSBFourierSqrt(PYBIND11_MODULE&); - void pyExportSBTransform(PYBIND11_MODULE&); - void pyExportSBBox(PYBIND11_MODULE&); - void pyExportSBGaussian(PYBIND11_MODULE&); - void pyExportSBDeltaFunction(PYBIND11_MODULE&); - void pyExportSBExponential(PYBIND11_MODULE&); - void pyExportSBSersic(PYBIND11_MODULE&); - void pyExportSBSpergel(PYBIND11_MODULE&); - void pyExportSBMoffat(PYBIND11_MODULE&); - void pyExportSBAiry(PYBIND11_MODULE&); - void pyExportSBShapelet(PYBIND11_MODULE&); - void pyExportSBInterpolatedImage(PYBIND11_MODULE&); - void pyExportSBKolmogorov(PYBIND11_MODULE&); - void pyExportSBInclinedExponential(PYBIND11_MODULE&); - void pyExportSBInclinedSersic(PYBIND11_MODULE&); - void pyExportRandom(PYBIND11_MODULE&); - void pyExportTable(PYBIND11_MODULE&); - void pyExportInterpolant(PYBIND11_MODULE&); - void pyExportCDModel(PYBIND11_MODULE&); - void pyExportSilicon(PYBIND11_MODULE&); - void pyExportRealGalaxy(PYBIND11_MODULE&); - void pyExportWCS(PYBIND11_MODULE&); + void pyExportBounds(PB11_MODULE&); + void pyExportPhotonArray(PB11_MODULE&); + void pyExportImage(PB11_MODULE&); + void pyExportSBProfile(PB11_MODULE&); + void pyExportSBAdd(PB11_MODULE&); + void pyExportSBConvolve(PB11_MODULE&); + void pyExportSBDeconvolve(PB11_MODULE&); + void pyExportSBFourierSqrt(PB11_MODULE&); + void pyExportSBTransform(PB11_MODULE&); + void pyExportSBBox(PB11_MODULE&); + void pyExportSBGaussian(PB11_MODULE&); + void pyExportSBDeltaFunction(PB11_MODULE&); + void pyExportSBExponential(PB11_MODULE&); + void pyExportSBSersic(PB11_MODULE&); + void pyExportSBSpergel(PB11_MODULE&); + void pyExportSBMoffat(PB11_MODULE&); + void pyExportSBAiry(PB11_MODULE&); + void pyExportSBShapelet(PB11_MODULE&); + void pyExportSBInterpolatedImage(PB11_MODULE&); + void pyExportSBKolmogorov(PB11_MODULE&); + void pyExportSBInclinedExponential(PB11_MODULE&); + void pyExportSBInclinedSersic(PB11_MODULE&); + void pyExportRandom(PB11_MODULE&); + void pyExportTable(PB11_MODULE&); + void pyExportInterpolant(PB11_MODULE&); + void pyExportCDModel(PB11_MODULE&); + void pyExportSilicon(PB11_MODULE&); + void pyExportRealGalaxy(PB11_MODULE&); + void pyExportWCS(PB11_MODULE&); namespace hsm { - void pyExportHSM(PYBIND11_MODULE&); + void pyExportHSM(PB11_MODULE&); } namespace integ { - void pyExportInteg(PYBIND11_MODULE&); + void pyExportInteg(PB11_MODULE&); } namespace math { - void pyExportBessel(PYBIND11_MODULE&); + void pyExportBessel(PB11_MODULE&); } } // namespace galsim -PYBIND11_PLUGIN(_galsim) +PB11_MAKE_MODULE(_galsim) { - PYBIND11_MAKE_MODULE(_galsim); - galsim::pyExportBounds(_galsim); galsim::pyExportPhotonArray(_galsim); galsim::pyExportImage(_galsim); @@ -102,6 +100,4 @@ PYBIND11_PLUGIN(_galsim) galsim::hsm::pyExportHSM(_galsim); galsim::integ::pyExportInteg(_galsim); galsim::math::pyExportBessel(_galsim); - - PYBIND11_RETURN_PTR(_galsim); } diff --git a/setup.py b/setup.py index fc7c8526a54..7ced7525c5c 100644 --- a/setup.py +++ b/setup.py @@ -38,9 +38,9 @@ def all_files_from(dir, ext=''): undef_macros+=['NDEBUG'] copt = { - 'gcc' : ['-O3','-ffast-math','-std=c++11'], + 'gcc' : ['-O3','-ffast-math','-std=c++11','-fvisibility=hidden'], 'icc' : ['-O3','-std=c++11'], - 'clang' : ['-O3','-ffast-math','-std=c++11','-Wno-shorten-64-to-32'], + 'clang' : ['-O3','-ffast-math','-std=c++11','-Wno-shorten-64-to-32','-fvisibility=hidden'], 'unknown' : [], } @@ -161,6 +161,12 @@ def finalize_options(self): # Add any extra things based on the compiler being used.. def build_extensions(self): + # Remove any -Wstrict-prototypes in the compiler flags (since invalid for C++) + try: + self.compiler.compiler_so.remove("-Wstrict-prototypes") + except (AttributeError, ValueError): + pass + print('Platform is ',self.plat_name) # Figure out what compiler it will use diff --git a/src/SBInterpolatedImage.cpp b/src/SBInterpolatedImage.cpp index 740d2b89426..4dd0324a791 100644 --- a/src/SBInterpolatedImage.cpp +++ b/src/SBInterpolatedImage.cpp @@ -105,8 +105,8 @@ namespace galsim { dbg<<"nonzero bounds = "<<_nonzero_bounds< Date: Fri, 5 Jan 2018 13:42:22 -0500 Subject: [PATCH 028/111] Fix script_install_dir check for pip installation (#809-pybind11) --- setup.py | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/setup.py b/setup.py index f2e22a7ae33..7faa46aba1e 100644 --- a/setup.py +++ b/setup.py @@ -222,7 +222,7 @@ def run(self): # AFAICT, setuptools doesn't provide any easy access to the final installation location of the # executable scripts. This bit is just to save the value of script_dir so I can use it later. # cf. http://stackoverflow.com/questions/12975540/correct-way-to-find-scripts-directory-from-setup-py-in-python-distutils/ -class my_easy_install( easy_install ): +class my_easy_install(easy_install): # Used when installing via python setup.py install # Match the call signature of the easy_install version. def write_script(self, script_name, contents, mode="t", *ignored): # Run the normal version @@ -231,6 +231,11 @@ def write_script(self, script_name, contents, mode="t", *ignored): # This is the same thing that is returned by the setup function. self.distribution.script_install_dir = self.script_dir +class my_install_scripts(install_scripts): # Used when pip installing. + def run(self): + install_scripts.run(self) + self.distribution.script_install_dir = self.install_dir + ext=Extension("galsim._galsim", sources, depends=headers, @@ -309,6 +314,7 @@ def write_script(self, script_name, contents, mode="t", *ignored): install_requires=build_dep + run_dep, cmdclass = {'build_ext': my_builder, 'install': my_install, + 'install_scripts': my_install_scripts, 'easy_install': my_easy_install, }, entry_points = {'console_scripts' : [ @@ -320,15 +326,17 @@ def write_script(self, script_name, contents, mode="t", *ignored): # Check that the path includes the directory where the scripts are installed. real_env_path = [os.path.realpath(d) for d in os.environ['PATH'].split(':')] -if (hasattr(dist,'script_install_dir') and - dist.script_install_dir not in os.environ['PATH'].split(':') and - os.path.realpath(dist.script_install_dir) not in real_env_path): - - print('\nWARNING: The GalSim executables were installed in a directory not in your PATH') - print(' If you want to use the executables, you should add the directory') - print('\n ',dist.script_install_dir,'\n') - print(' to your path. The current path is') - print('\n ',os.environ['PATH'],'\n') - print(' Alternatively, you can specify a different prefix with --prefix=PREFIX,') - print(' in which case the scripts will be installed in PREFIX/bin.') - print(' If you are installing via pip use --install-option="--prefix=PREFIX"') +if hasattr(dist,'script_install_dir'): + print('scripts installed into ',dist.script_install_dir) + if (dist.script_install_dir not in os.environ['PATH'].split(':') and + os.path.realpath(dist.script_install_dir) not in real_env_path): + + print('\nWARNING: The GalSim executables were installed in a directory not in your PATH') + print(' If you want to use the executables, you should add the directory') + print('\n ',dist.script_install_dir,'\n') + print(' to your path. The current path is') + print('\n ',os.environ['PATH'],'\n') + print(' Alternatively, you can specify a different prefix with --prefix=PREFIX,') + print(' in which case the scripts will be installed in PREFIX/bin.') + print(' If you are installing via pip use --install-option="--prefix=PREFIX"') + From a79281cb1cdff6e87d9a89870d7a1ef44df6886d Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Fri, 5 Jan 2018 22:42:29 -0500 Subject: [PATCH 029/111] Use -O2 to avoid numerical inaccuracies from over optimization (#809-pybind11) --- setup.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index 7faa46aba1e..e41a1c10ba9 100644 --- a/setup.py +++ b/setup.py @@ -39,9 +39,9 @@ def all_files_from(dir, ext=''): undef_macros+=['NDEBUG'] copt = { - 'gcc' : ['-O3','-ffast-math','-std=c++11','-fvisibility=hidden'], - 'icc' : ['-O3','-std=c++11'], - 'clang' : ['-O3','-ffast-math','-std=c++11','-Wno-shorten-64-to-32','-fvisibility=hidden'], + 'gcc' : ['-O2','-msse2','-std=c++11','-fvisibility=hidden'], + 'icc' : ['-O2','-msse2','-vec-report0','-std=c++11'], + 'clang' : ['-O2','-msse2','-ffast-math','-std=c++11','-Wno-shorten-64-to-32','-fvisibility=hidden'], 'unknown' : [], } From 497ed90dd3d69cdae60b22c0675f15693654a221 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Fri, 5 Jan 2018 22:44:13 -0500 Subject: [PATCH 030/111] Regularize how test suite imports galsim (#809-pybind11) --- tests/test_airy.py | 7 +---- tests/test_bandpass.py | 9 ++----- tests/test_bessel.py | 11 +------- tests/test_box.py | 7 +---- tests/test_calc.py | 7 +---- tests/test_catalog.py | 8 +----- tests/test_cdmodel.py | 29 +++++++++----------- tests/test_celestial.py | 8 +----- tests/test_chromatic.py | 8 ++---- tests/test_config_gsobject.py | 8 +----- tests/test_config_image.py | 8 +----- tests/test_config_noise.py | 7 +---- tests/test_config_output.py | 8 +----- tests/test_config_value.py | 8 +----- tests/test_convolve.py | 8 +----- tests/test_correlatednoise.py | 9 +------ tests/test_deltafunction.py | 6 +---- tests/test_deprecated.py | 7 +---- tests/test_des.py | 9 +------ tests/test_detectors.py | 11 ++------ tests/test_draw.py | 7 +---- tests/test_exponential.py | 7 +---- tests/test_fitsheader.py | 7 +---- tests/test_fouriersqrt.py | 7 +---- tests/test_gaussian.py | 6 +---- tests/test_hsm.py | 7 +---- tests/test_image.py | 11 ++------ tests/test_inclined.py | 10 ++----- tests/test_integ.py | 9 +------ tests/test_interpolatedimage.py | 9 ++----- tests/test_kolmogorov.py | 6 +---- tests/test_lensing.py | 7 +---- tests/test_lsst.py | 48 +++++++++++++++------------------ tests/test_metacal.py | 2 +- tests/test_moffat.py | 6 +---- tests/test_noise.py | 8 +----- tests/test_optics.py | 8 +----- tests/test_phase_psf.py | 11 ++------ tests/test_photon_array.py | 7 +---- tests/test_pse.py | 7 +---- tests/test_random.py | 7 +---- tests/test_randwalk.py | 8 +----- tests/test_real.py | 7 +---- tests/test_scene.py | 10 +++---- tests/test_sed.py | 10 +++---- tests/test_sensor.py | 7 +---- tests/test_sersic.py | 6 +---- tests/test_shapelet.py | 8 +----- tests/test_shear.py | 7 +---- tests/test_spergel.py | 6 +---- tests/test_sum.py | 7 +---- tests/test_table.py | 7 +---- tests/test_transforms.py | 7 +---- tests/test_utilities.py | 8 +----- tests/test_wcs.py | 7 +---- tests/test_wfirst.py | 12 ++------- 56 files changed, 100 insertions(+), 397 deletions(-) diff --git a/tests/test_airy.py b/tests/test_airy.py index bd71b792d72..2a3303b9eac 100644 --- a/tests/test_airy.py +++ b/tests/test_airy.py @@ -21,18 +21,13 @@ import os import sys +import galsim from galsim_test_helpers import * path, filename = os.path.split(__file__) imgdir = os.path.join(path, "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. default_params = galsim.GSParams( diff --git a/tests/test_bandpass.py b/tests/test_bandpass.py index f18036d3a07..cbd3b6039cf 100644 --- a/tests/test_bandpass.py +++ b/tests/test_bandpass.py @@ -19,15 +19,10 @@ from __future__ import print_function import os import numpy as np -from galsim_test_helpers import * import sys -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim +import galsim +from galsim_test_helpers import * datapath = os.path.join(galsim.meta_data.share_dir, "bandpasses") diff --git a/tests/test_bessel.py b/tests/test_bessel.py index 455411ae329..29af4bae84b 100644 --- a/tests/test_bessel.py +++ b/tests/test_bessel.py @@ -22,18 +22,9 @@ import numpy as np import warnings +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - import os - import sys - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - - @timer def test_j0(): """Test the bessel.j0 function""" diff --git a/tests/test_box.py b/tests/test_box.py index 42f7579da3b..d51650cd1f7 100644 --- a/tests/test_box.py +++ b/tests/test_box.py @@ -21,18 +21,13 @@ import os import sys +import galsim from galsim_test_helpers import * path, filename = os.path.split(__file__) imgdir = os.path.join(path, "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. default_params = galsim.GSParams( diff --git a/tests/test_calc.py b/tests/test_calc.py index 80e7b75078f..7a42b32e06d 100644 --- a/tests/test_calc.py +++ b/tests/test_calc.py @@ -19,14 +19,9 @@ from __future__ import print_function import numpy as np +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - @timer def test_hlr(): diff --git a/tests/test_catalog.py b/tests/test_catalog.py index 408a8e207c3..48a4858953d 100644 --- a/tests/test_catalog.py +++ b/tests/test_catalog.py @@ -21,15 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - @timer def test_basic_catalog(): diff --git a/tests/test_cdmodel.py b/tests/test_cdmodel.py index c6cf036d7b9..3f41f50b7b6 100644 --- a/tests/test_cdmodel.py +++ b/tests/test_cdmodel.py @@ -21,16 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim - from galsim.cdmodel import * -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - from galsim.cdmodel import * # Use a deterministic random number generator so we don't fail tests because of rare flukes in the # random numbers. @@ -66,13 +59,13 @@ def test_simplegeometry(): it.setValue(center,center+1,level) # set up models, images - cdr0 = PowerLawCD(2,shiftcoeff,0,0,0,0,0,0) + cdr0 = galsim.cdmodel.PowerLawCD(2,shiftcoeff,0,0,0,0,0,0) i0cdr0 = cdr0.applyForward(i0) - cdt0 = PowerLawCD(2,0,shiftcoeff,0,0,0,0,0) + cdt0 = galsim.cdmodel.PowerLawCD(2,0,shiftcoeff,0,0,0,0,0) i0cdt0 = cdt0.applyForward(i0) - cdrx = PowerLawCD(2,0,0,shiftcoeff,0,0,0,0) - cdtx = PowerLawCD(2,0,0,0,shiftcoeff,0,0,0) + cdrx = galsim.cdmodel.PowerLawCD(2,0,0,shiftcoeff,0,0,0,0) + cdtx = galsim.cdmodel.PowerLawCD(2,0,0,0,shiftcoeff,0,0,0) # these should do something ircdtx = cdtx.applyForward(ir) @@ -169,7 +162,8 @@ def test_simplegeometry(): # a model that should not change anything here u = galsim.UniformDeviate(rseed) - cdnull = PowerLawCD(2, 0, 0, shiftcoeff*u(), shiftcoeff*u(), shiftcoeff*u(), shiftcoeff*u(), 0) + cdnull = galsim.cdmodel.PowerLawCD( + 2, 0, 0, shiftcoeff*u(), shiftcoeff*u(), shiftcoeff*u(), shiftcoeff*u(), 0) i0cdnull = cdnull.applyForward(i0) # setting all pixels to 0 that we expect to be not 0... @@ -224,7 +218,7 @@ def test_fluxconservation(): image.addNoise(galsim.GaussianNoise(sigma=noise, rng=urng)) flat = galsim.Image(size, size, dtype=np.float64, init_value=1.) - cd = PowerLawCD( + cd = galsim.cdmodel.PowerLawCD( 2, shiftcoeff, 0.94 * shiftcoeff, shiftcoeff/2.4, shiftcoeff/5., shiftcoeff/3.7, shiftcoeff/1.8, alpha) imagecd = cd.applyForward(image) @@ -267,7 +261,7 @@ def test_forwardbackward(): # Define a consistent rng for repeatability urng = galsim.UniformDeviate(rseed) image.addNoise(galsim.GaussianNoise(sigma=noise, rng=urng)) - cd = PowerLawCD( + cd = galsim.cdmodel.PowerLawCD( 2, shiftcoeff * 0.0234, shiftcoeff * 0.05234, shiftcoeff * 0.01312, shiftcoeff * 0.00823, shiftcoeff * 0.07216, shiftcoeff * 0.01934, alpha) @@ -301,7 +295,8 @@ def test_gainratio(): gal2 = galsim.Gaussian(flux=0.5*galflux, sigma=galsigma) image2 = gal2.drawImage(scale=1.,dtype=np.float64) - cd = PowerLawCD(2, shiftcoeff, 1.389*shiftcoeff, shiftcoeff/7.23, 2.*shiftcoeff/2.4323, + cd = galsim.cdmodel.PowerLawCD( + 2, shiftcoeff, 1.389*shiftcoeff, shiftcoeff/7.23, 2.*shiftcoeff/2.4323, shiftcoeff/1.8934, shiftcoeff/3.1, alpha) image_cd = cd.applyForward(image) @@ -321,7 +316,7 @@ def test_exampleimage(): shiftcoeff = 1.e-7 #n, r0, t0, rx, tx, r, t, alpha - cd = PowerLawCD( + cd = galsim.cdmodel.PowerLawCD( 5, 2. * shiftcoeff, shiftcoeff, 1.25 * shiftcoeff, 1.25 * shiftcoeff, 0.75 * shiftcoeff, 0.5 * shiftcoeff, 0.3) # model used externally to bring cdtest1 to cdtest2 diff --git a/tests/test_celestial.py b/tests/test_celestial.py index 0165deb3d61..60b56011819 100644 --- a/tests/test_celestial.py +++ b/tests/test_celestial.py @@ -23,18 +23,12 @@ import math import coord +import galsim from galsim_test_helpers import * imgdir = os.path.join(".", "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - # We'll use these a lot, so just import them. from numpy import sin, cos, tan, arcsin, arccos, arctan, sqrt, pi diff --git a/tests/test_chromatic.py b/tests/test_chromatic.py index 27f1a98dc91..b58ec86064f 100644 --- a/tests/test_chromatic.py +++ b/tests/test_chromatic.py @@ -19,13 +19,9 @@ from __future__ import print_function import os import numpy as np + +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - import sys - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim bppath = os.path.join(galsim.meta_data.share_dir, "bandpasses") sedpath = os.path.join(galsim.meta_data.share_dir, "SEDs") diff --git a/tests/test_config_gsobject.py b/tests/test_config_gsobject.py index 7371aec6e79..b4fd21f20fd 100644 --- a/tests/test_config_gsobject.py +++ b/tests/test_config_gsobject.py @@ -21,15 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - @timer def test_gaussian(): diff --git a/tests/test_config_image.py b/tests/test_config_image.py index f1446cdf4ba..2730f0970d4 100644 --- a/tests/test_config_image.py +++ b/tests/test_config_image.py @@ -25,15 +25,9 @@ import re import warnings +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - @timer def test_single(): diff --git a/tests/test_config_noise.py b/tests/test_config_noise.py index 13ab0ac2d33..9d685133b6f 100644 --- a/tests/test_config_noise.py +++ b/tests/test_config_noise.py @@ -23,14 +23,9 @@ import logging import math +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim @timer def test_gaussian(): diff --git a/tests/test_config_output.py b/tests/test_config_output.py index 0a9e243f1bf..0a9b2b8720e 100644 --- a/tests/test_config_output.py +++ b/tests/test_config_output.py @@ -28,15 +28,9 @@ import re import glob +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - @timer def test_fits(): diff --git a/tests/test_config_value.py b/tests/test_config_value.py index 43b1dffe163..dc08984edfb 100644 --- a/tests/test_config_value.py +++ b/tests/test_config_value.py @@ -22,15 +22,9 @@ import sys import math +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - @timer def test_float_value(): diff --git a/tests/test_convolve.py b/tests/test_convolve.py index ea04e09a9de..ab1bf6b0934 100644 --- a/tests/test_convolve.py +++ b/tests/test_convolve.py @@ -21,18 +21,12 @@ import os import sys +import galsim from galsim_test_helpers import * imgdir = os.path.join(".", "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. default_params = galsim.GSParams( diff --git a/tests/test_correlatednoise.py b/tests/test_correlatednoise.py index e01a46795a1..a46d96d934f 100644 --- a/tests/test_correlatednoise.py +++ b/tests/test_correlatednoise.py @@ -20,16 +20,9 @@ import time import numpy as np +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - import os - import sys - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # Use a deterministic random number generator so we don't fail tests because of rare flukes # in the random numbers. diff --git a/tests/test_deltafunction.py b/tests/test_deltafunction.py index 8f3167bf3f7..9d128612e70 100644 --- a/tests/test_deltafunction.py +++ b/tests/test_deltafunction.py @@ -21,13 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. diff --git a/tests/test_deprecated.py b/tests/test_deprecated.py index d7ae2d762db..6f12cd434d0 100644 --- a/tests/test_deprecated.py +++ b/tests/test_deprecated.py @@ -21,14 +21,9 @@ import sys import numpy as np +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim def check_dep(f, *args, **kwargs): """Check that some function raises a GalSimDeprecationWarning as a warning, but not an error. diff --git a/tests/test_des.py b/tests/test_des.py index 5b4edacf703..b0b4103db4e 100644 --- a/tests/test_des.py +++ b/tests/test_des.py @@ -20,17 +20,10 @@ import numpy import os import sys + import galsim import galsim.des - from galsim_test_helpers import * - -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - from galsim._pyfits import pyfits @timer diff --git a/tests/test_detectors.py b/tests/test_detectors.py index 806f5e2df15..c9c92562edd 100644 --- a/tests/test_detectors.py +++ b/tests/test_detectors.py @@ -21,16 +21,9 @@ from __future__ import print_function import numpy as np import warnings -from galsim_test_helpers import * -try: - import galsim -except ImportError: - import os - import sys - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim +import galsim +from galsim_test_helpers import * @timer diff --git a/tests/test_draw.py b/tests/test_draw.py index 7af72b3dc59..33f824a3015 100644 --- a/tests/test_draw.py +++ b/tests/test_draw.py @@ -21,14 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # for flux normalization tests test_flux = 1.8 diff --git a/tests/test_exponential.py b/tests/test_exponential.py index e947bbdfe1d..b41e95bfabc 100644 --- a/tests/test_exponential.py +++ b/tests/test_exponential.py @@ -21,18 +21,13 @@ import os import sys +import galsim from galsim_test_helpers import * path, filename = os.path.split(__file__) imgdir = os.path.join(path, "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. default_params = galsim.GSParams( diff --git a/tests/test_fitsheader.py b/tests/test_fitsheader.py index 11e33832548..2bcb5ee799b 100644 --- a/tests/test_fitsheader.py +++ b/tests/test_fitsheader.py @@ -21,14 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # Get whatever version of pyfits or astropy we are using from galsim._pyfits import pyfits, pyfits_version diff --git a/tests/test_fouriersqrt.py b/tests/test_fouriersqrt.py index f3966b7c8b3..04900407320 100644 --- a/tests/test_fouriersqrt.py +++ b/tests/test_fouriersqrt.py @@ -21,17 +21,12 @@ import os import sys +import galsim from galsim_test_helpers import * imgdir = os.path.join(".", "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. diff --git a/tests/test_gaussian.py b/tests/test_gaussian.py index ad4c014a84b..8ed938ba922 100644 --- a/tests/test_gaussian.py +++ b/tests/test_gaussian.py @@ -21,17 +21,13 @@ import os import sys +import galsim from galsim_test_helpers import * path, filename = os.path.split(__file__) imgdir = os.path.join(path, "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. diff --git a/tests/test_hsm.py b/tests/test_hsm.py index 83d5e8e78e0..792054728a0 100644 --- a/tests/test_hsm.py +++ b/tests/test_hsm.py @@ -30,14 +30,9 @@ import numpy as np import math +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # define a range of input parameters for the Gaussians that we are testing gaussian_sig_values = [0.5, 1.0, 2.0] diff --git a/tests/test_image.py b/tests/test_image.py index a8ad498df68..cde93e899cb 100644 --- a/tests/test_image.py +++ b/tests/test_image.py @@ -47,17 +47,10 @@ import os import sys import numpy as np - -from galsim_test_helpers import * from distutils.version import LooseVersion -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - +import galsim +from galsim_test_helpers import * from galsim._pyfits import pyfits # Setup info for tests, not likely to change diff --git a/tests/test_inclined.py b/tests/test_inclined.py index bc60b752c86..a338c04c402 100644 --- a/tests/test_inclined.py +++ b/tests/test_inclined.py @@ -24,17 +24,11 @@ from copy import deepcopy import os import sys - -from galsim_test_helpers import * import numpy as np +import galsim +from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # Save images used in regression testing for manual inspection? save_profiles = False diff --git a/tests/test_integ.py b/tests/test_integ.py index b2d141c2e84..1f62fbfd37f 100644 --- a/tests/test_integ.py +++ b/tests/test_integ.py @@ -21,16 +21,9 @@ from __future__ import print_function import numpy as np +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - import os - import sys - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim test_sigma = 7. # test value of Gaussian sigma for integral tests test_rel_err = 1.e-7 # the relative accuracy at which to test diff --git a/tests/test_interpolatedimage.py b/tests/test_interpolatedimage.py index c9267a3dda6..a1b6685d881 100644 --- a/tests/test_interpolatedimage.py +++ b/tests/test_interpolatedimage.py @@ -24,16 +24,11 @@ import os import sys +import galsim from galsim_test_helpers import * +from galsim._pyfits import pyfits path, filename = os.path.split(__file__) # Get the path to this file for use below... -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - -from galsim._pyfits import pyfits # For reference tests: TESTDIR=os.path.join(path, "interpolant_comparison_files") diff --git a/tests/test_kolmogorov.py b/tests/test_kolmogorov.py index 8f14756e2c7..fdf8cb22ff2 100644 --- a/tests/test_kolmogorov.py +++ b/tests/test_kolmogorov.py @@ -21,17 +21,13 @@ import os import sys +import galsim from galsim_test_helpers import * path, filename = os.path.split(__file__) imgdir = os.path.join(path, "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. diff --git a/tests/test_lensing.py b/tests/test_lensing.py index 8a2ddad8c74..8e3563bcdd8 100644 --- a/tests/test_lensing.py +++ b/tests/test_lensing.py @@ -23,14 +23,9 @@ import sys import warnings +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim refdir = os.path.join(".", "lensing_reference_data") # Directory containing the reference diff --git a/tests/test_lsst.py b/tests/test_lsst.py index abf2a0163bb..05e69a7ad95 100644 --- a/tests/test_lsst.py +++ b/tests/test_lsst.py @@ -21,11 +21,9 @@ import numpy as np import warnings import os -import galsim import sys -from galsim_test_helpers import funcname -from galsim.celestial import CelestialCoord +import galsim from galsim_test_helpers import * have_lsst_stack = True @@ -33,8 +31,6 @@ try: from galsim.lsst import LsstCamera, LsstWCS except ImportError as ee: - #if __name__ == '__main__': - #raise # make sure that you are failing because the stack isn't there, # rather than because of some bug in lsst_wcs.py if "You cannot use the LSST module" in str(ee): @@ -204,7 +200,7 @@ def setUpClass(cls): cls.decPointing = -33.015167519966 cls.rotation = 27.0 - pointing = CelestialCoord(cls.raPointing*galsim.degrees, cls.decPointing*galsim.degrees) + pointing = galsim.CelestialCoord(cls.raPointing*galsim.degrees, cls.decPointing*galsim.degrees) cls.camera = LsstCamera(pointing, cls.rotation*galsim.degrees) @timer @@ -272,13 +268,13 @@ def palpyPupilCoords(star, pointing): for ra, dec, rotation in zip(ra_pointing_list, dec_pointing_list, rotation_angle_list): - pointing = CelestialCoord(ra*galsim.radians, dec*galsim.radians) + pointing = galsim.CelestialCoord(ra*galsim.radians, dec*galsim.radians) camera = LsstCamera(pointing, rotation*galsim.radians) dra_list = (rng.random_sample(100)-0.5)*0.5 ddec_list = (rng.random_sample(100)-0.5)*0.5 - star_list = np.array([CelestialCoord((ra+dra)*galsim.radians, + star_list = np.array([galsim.CelestialCoord((ra+dra)*galsim.radians, (dec+ddec)*galsim.radians) for dra, ddec in zip(dra_list, ddec_list)]) @@ -312,7 +308,7 @@ def test_pupil_coordinates_from_floats(self): raPointing = 113.0 decPointing = -25.6 rot = 82.1 - pointing = CelestialCoord(raPointing*galsim.degrees, decPointing*galsim.degrees) + pointing = galsim.CelestialCoord(raPointing*galsim.degrees, decPointing*galsim.degrees) camera = LsstCamera(pointing, rot*galsim.degrees) arcsec_per_radian = 180.0*3600.0/np.pi @@ -321,7 +317,7 @@ def test_pupil_coordinates_from_floats(self): decList = (rng.random_sample(100)-0.5)*20.0+decPointing pointingList = [] for rr, dd in zip(raList, decList): - pointingList.append(CelestialCoord(rr*galsim.degrees, dd*galsim.degrees)) + pointingList.append(galsim.CelestialCoord(rr*galsim.degrees, dd*galsim.degrees)) control_x, control_y = camera.pupilCoordsFromPoint(pointingList) test_x, test_y = camera.pupilCoordsFromFloat(np.radians(raList), np.radians(decList)) @@ -423,9 +419,9 @@ def test_rotation_angle_pupil_coordinate_convention(self): dec = 0.0 delta = 0.001 - pointing = CelestialCoord(ra*galsim.degrees, dec*galsim.degrees) - north = CelestialCoord(ra*galsim.degrees, (dec+delta)*galsim.degrees) - east = CelestialCoord((ra+delta)*galsim.degrees, dec*galsim.degrees) + pointing = galsim.CelestialCoord(ra*galsim.degrees, dec*galsim.degrees) + north = galsim.CelestialCoord(ra*galsim.degrees, (dec+delta)*galsim.degrees) + east = galsim.CelestialCoord((ra+delta)*galsim.degrees, dec*galsim.degrees) camera = LsstCamera(pointing, 0.0*galsim.degrees) x_0, y_0 = camera.pupilCoordsFromPoint(pointing) @@ -474,9 +470,9 @@ def test_rotation_angle_pixel_coordinate_convention(self): dec = 0.0 delta = 0.001 - pointing = CelestialCoord(ra*galsim.degrees, dec*galsim.degrees) - north = CelestialCoord(ra*galsim.degrees, (dec+delta)*galsim.degrees) - east = CelestialCoord((ra+delta)*galsim.degrees, dec*galsim.degrees) + pointing = galsim.CelestialCoord(ra*galsim.degrees, dec*galsim.degrees) + north = galsim.CelestialCoord(ra*galsim.degrees, (dec+delta)*galsim.degrees) + east = galsim.CelestialCoord((ra+delta)*galsim.degrees, dec*galsim.degrees) camera = LsstCamera(pointing, 0.0*galsim.degrees) x_0, y_0, name = camera.pixelCoordsFromPoint(pointing) @@ -532,7 +528,7 @@ def setUpClass(cls): cls.rotation = 27.0 * galsim.degrees cls.chip_name = 'R:0,1 S:1,2' - cls.pointing = CelestialCoord(cls.raPointing, cls.decPointing) + cls.pointing = galsim.CelestialCoord(cls.raPointing, cls.decPointing) cls.wcs = LsstWCS(cls.pointing, cls.rotation, cls.chip_name) @timer @@ -542,7 +538,7 @@ def test_constructor(self): when you specify a nonsense chip. """ - pointing = CelestialCoord(112.0*galsim.degrees, -39.0*galsim.degrees) + pointing = galsim.CelestialCoord(112.0*galsim.degrees, -39.0*galsim.degrees) rotation = 23.1*galsim.degrees wcs1 = LsstWCS(pointing, rotation, 'R:1,1 S:2,2') @@ -559,7 +555,7 @@ def test_attribute_exceptions(self): """ with self.assertRaises(AttributeError) as context: - self.wcs.pointing = CelestialCoord(22.0*galsim.degrees, -17.0*galsim.degrees) + self.wcs.pointing = galsim.CelestialCoord(22.0*galsim.degrees, -17.0*galsim.degrees) with self.assertRaises(AttributeError) as context: self.wcs.rotation_angle = 23.0*galsim.degrees @@ -606,10 +602,10 @@ def test_tan_wcs(self): [self.wcs._chip_name]*len(xPixList)) for rr1, dd1, rr2, dd2 in zip(raTest, decTest, wcsRa, wcsDec): - pp = CelestialCoord(rr1*galsim.radians, dd1*galsim.radians) + pp = galsim.CelestialCoord(rr1*galsim.radians, dd1*galsim.radians) dist = \ - pp.distanceTo(CelestialCoord(rr2*galsim.radians, dd2*galsim.radians))/galsim.arcsec + pp.distanceTo(galsim.CelestialCoord(rr2*galsim.radians, dd2*galsim.radians))/galsim.arcsec msg = 'error in tanWcs was %e arcsec' % dist self.assertLess(dist, 0.001, msg=msg) @@ -664,13 +660,13 @@ def test_tan_sip_wcs(self): for rrTest, ddTest, rrTan, ddTan, rrSip, ddSip in \ zip(raTest, decTest, tanWcsRa, tanWcsDec, tanSipWcsRa, tanSipWcsDec): - pp = CelestialCoord(rrTest*galsim.radians, ddTest*galsim.radians) + pp = galsim.CelestialCoord(rrTest*galsim.radians, ddTest*galsim.radians) distTan = \ - pp.distanceTo(CelestialCoord(rrTan*galsim.radians, ddTan*galsim.radians))/galsim.arcsec + pp.distanceTo(galsim.CelestialCoord(rrTan*galsim.radians, ddTan*galsim.radians))/galsim.arcsec distSip = \ - pp.distanceTo(CelestialCoord(rrSip*galsim.radians, ddSip*galsim.radians))/galsim.arcsec + pp.distanceTo(galsim.CelestialCoord(rrSip*galsim.radians, ddSip*galsim.radians))/galsim.arcsec msg = 'error in TAN WCS %e arcsec; error in TAN-SIP WCS %e arcsec' % (distTan, distSip) self.assertLess(distSip, 0.001, msg=msg) @@ -728,7 +724,7 @@ def test_eq(self): wcs1 = wcs1._newOrigin(new_origin) self.assertNotEqual(self.wcs, wcs1) - other_pointing = CelestialCoord(1.9*galsim.degrees, -34.0*galsim.degrees) + other_pointing = galsim.CelestialCoord(1.9*galsim.degrees, -34.0*galsim.degrees) wcs2 = LsstWCS(other_pointing, self.rotation, self.chip_name) self.assertNotEqual(self.wcs, wcs2) @@ -744,7 +740,7 @@ def test_copy(self): Test that copy() works """ - pointing = CelestialCoord(64.82*galsim.degrees, -16.73*galsim.degrees) + pointing = galsim.CelestialCoord(64.82*galsim.degrees, -16.73*galsim.degrees) rotation = 116.8*galsim.degrees chip_name = 'R:1,2 S:2,2' wcs0 = LsstWCS(pointing, rotation, chip_name) diff --git a/tests/test_metacal.py b/tests/test_metacal.py index 800addcf879..0720dcc206c 100644 --- a/tests/test_metacal.py +++ b/tests/test_metacal.py @@ -19,8 +19,8 @@ from __future__ import print_function import time import numpy as np -import galsim +import galsim from galsim_test_helpers import * VAR_NDECIMAL=4 diff --git a/tests/test_moffat.py b/tests/test_moffat.py index 5823bb0b90f..2040494a3b2 100644 --- a/tests/test_moffat.py +++ b/tests/test_moffat.py @@ -21,17 +21,13 @@ import os import sys +import galsim from galsim_test_helpers import * path, filename = os.path.split(__file__) imgdir = os.path.join(path, "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. diff --git a/tests/test_noise.py b/tests/test_noise.py index 12e359e147d..dc31ce49fd4 100644 --- a/tests/test_noise.py +++ b/tests/test_noise.py @@ -21,15 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - testseed = 1000 precision = 10 diff --git a/tests/test_optics.py b/tests/test_optics.py index 6953f9c82e8..58d333a6dc6 100644 --- a/tests/test_optics.py +++ b/tests/test_optics.py @@ -21,17 +21,11 @@ import os import sys +import galsim from galsim_test_helpers import * imgdir = os.path.join(".", "Optics_comparison_images") # Directory containing the reference images. -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - testshape = (512, 512) # shape of image arrays for all tests diff --git a/tests/test_phase_psf.py b/tests/test_phase_psf.py index ace9d5b8327..64bf051ab84 100644 --- a/tests/test_phase_psf.py +++ b/tests/test_phase_psf.py @@ -19,16 +19,9 @@ from __future__ import print_function import os import numpy as np -from galsim_test_helpers import * - -try: - import galsim -except ImportError: - import sys - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim +import galsim +from galsim_test_helpers import * imgdir = os.path.join(".", "Optics_comparison_images") # Directory containing the reference images. diff --git a/tests/test_photon_array.py b/tests/test_photon_array.py index 576d66c704b..0b9209d3eb5 100644 --- a/tests/test_photon_array.py +++ b/tests/test_photon_array.py @@ -21,14 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim bppath = os.path.join(galsim.meta_data.share_dir, "bandpasses") sedpath = os.path.join(galsim.meta_data.share_dir, "SEDs") diff --git a/tests/test_pse.py b/tests/test_pse.py index 449706ea0a7..ebf253154ec 100644 --- a/tests/test_pse.py +++ b/tests/test_pse.py @@ -21,14 +21,9 @@ import numpy as np import time +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim path, filename = os.path.split(__file__) datapath = os.path.abspath(os.path.join(path, "../examples/data/")) diff --git a/tests/test_random.py b/tests/test_random.py index bb555f25213..1f12aec6436 100644 --- a/tests/test_random.py +++ b/tests/test_random.py @@ -21,14 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # # Note: all tests below were generated using the python interface to the RNG. Eventually need tests diff --git a/tests/test_randwalk.py b/tests/test_randwalk.py index 0bcfc3bca9c..d867e23cd0e 100644 --- a/tests/test_randwalk.py +++ b/tests/test_randwalk.py @@ -20,16 +20,10 @@ import numpy as np import os import sys -import galsim +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - @timer def test_randwalk_defaults(): diff --git a/tests/test_real.py b/tests/test_real.py index 431db2410b2..5981a27b2fa 100644 --- a/tests/test_real.py +++ b/tests/test_real.py @@ -21,14 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim bppath = os.path.join(galsim.meta_data.share_dir, "bandpasses") sedpath = os.path.join(galsim.meta_data.share_dir, "SEDs") diff --git a/tests/test_scene.py b/tests/test_scene.py index 25e4d6f3c66..8fd6f2213c9 100644 --- a/tests/test_scene.py +++ b/tests/test_scene.py @@ -19,15 +19,11 @@ from __future__ import print_function import os import numpy as np -from galsim_test_helpers import * import sys -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim +import galsim +from galsim_test_helpers import * + path, filename = os.path.split(__file__) datapath = os.path.abspath(os.path.join(path, "../examples/data/")) diff --git a/tests/test_sed.py b/tests/test_sed.py index 651b6bfca20..cc27d5d92ca 100644 --- a/tests/test_sed.py +++ b/tests/test_sed.py @@ -19,17 +19,13 @@ from __future__ import print_function import os import numpy as np -from galsim_test_helpers import * import sys from astropy import units from astropy import constants -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim +import galsim +from galsim_test_helpers import * + bppath = os.path.join(galsim.meta_data.share_dir, "bandpasses") sedpath = os.path.join(galsim.meta_data.share_dir, "SEDs") diff --git a/tests/test_sensor.py b/tests/test_sensor.py index 48f8d50bdaa..2f04261debe 100644 --- a/tests/test_sensor.py +++ b/tests/test_sensor.py @@ -21,14 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim @timer def test_simple(): diff --git a/tests/test_sersic.py b/tests/test_sersic.py index 3fa0b366ca6..1f32e4e3310 100644 --- a/tests/test_sersic.py +++ b/tests/test_sersic.py @@ -21,17 +21,13 @@ import os import sys +import galsim from galsim_test_helpers import * path, filename = os.path.split(__file__) imgdir = os.path.join(path, "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. diff --git a/tests/test_shapelet.py b/tests/test_shapelet.py index 265d9bf259b..46e01c0158d 100644 --- a/tests/test_shapelet.py +++ b/tests/test_shapelet.py @@ -21,18 +21,12 @@ import os import sys +import galsim from galsim_test_helpers import * imgdir = os.path.join(".", "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - # define a series of tests @timer diff --git a/tests/test_shear.py b/tests/test_shear.py index 4f1694bbcaf..06e3659f570 100644 --- a/tests/test_shear.py +++ b/tests/test_shear.py @@ -21,14 +21,9 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # Below are a set of tests to make sure that we have achieved consistency in defining shears and # ellipses using different conventions. The underlying idea is that in test_base.py we already diff --git a/tests/test_spergel.py b/tests/test_spergel.py index 8fa3fe656e6..7e17d2c9723 100644 --- a/tests/test_spergel.py +++ b/tests/test_spergel.py @@ -21,17 +21,13 @@ import os import sys +import galsim from galsim_test_helpers import * path, filename = os.path.split(__file__) imgdir = os.path.join(path, "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. diff --git a/tests/test_sum.py b/tests/test_sum.py index 5b5a85d64ae..156bda07d8f 100644 --- a/tests/test_sum.py +++ b/tests/test_sum.py @@ -21,17 +21,12 @@ import os import sys +import galsim from galsim_test_helpers import * imgdir = os.path.join(".", "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # These are the default GSParams used when unspecified. We'll check that specifying # these explicitly produces the same results. diff --git a/tests/test_table.py b/tests/test_table.py index b25415f5e97..6d4cf341fb3 100644 --- a/tests/test_table.py +++ b/tests/test_table.py @@ -27,15 +27,10 @@ import os import numpy as np +import galsim from galsim_test_helpers import * path, filename = os.path.split(__file__) # Get the path to this file for use below... -try: - import galsim -except ImportError: - import sys - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim TESTDIR=os.path.join(path, "table_comparison_files") diff --git a/tests/test_transforms.py b/tests/test_transforms.py index 54ac4866733..37fbc8847f9 100644 --- a/tests/test_transforms.py +++ b/tests/test_transforms.py @@ -21,17 +21,12 @@ import os import sys +import galsim from galsim_test_helpers import * imgdir = os.path.join(".", "SBProfile_comparison_images") # Directory containing the reference # images. -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # for flux normalization tests test_flux = 1.8 diff --git a/tests/test_utilities.py b/tests/test_utilities.py index 3a8354cd5f0..d1708d33072 100644 --- a/tests/test_utilities.py +++ b/tests/test_utilities.py @@ -21,14 +21,8 @@ import os import sys +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim -import galsim.utilities testshape = (512, 512) # shape of image arrays for all tests decimal = 6 # Last decimal place used for checking equality of float arrays, see diff --git a/tests/test_wcs.py b/tests/test_wcs.py index f604fd8a39f..65198601830 100644 --- a/tests/test_wcs.py +++ b/tests/test_wcs.py @@ -22,14 +22,9 @@ import sys import warnings +import galsim from galsim_test_helpers import * -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim # These positions will be used a few times below, so define them here. # One of the tests requires that the last pair are integers, so don't change that. diff --git a/tests/test_wfirst.py b/tests/test_wfirst.py index 0655673b36f..6d7b538b56e 100644 --- a/tests/test_wfirst.py +++ b/tests/test_wfirst.py @@ -21,18 +21,10 @@ from __future__ import print_function import numpy as np +import galsim +import galsim.wfirst from galsim_test_helpers import * -try: - import galsim - import galsim.wfirst -except ImportError: - import os - import sys - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - import galsim.wfirst @timer def skip_wfirst_wcs(): From 512e3f7f6fa2a286eb88c6567288eb57eb9c22e7 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Fri, 5 Jan 2018 23:48:02 -0500 Subject: [PATCH 031/111] Use find_packages to include package subdirectories (#809-pybind11) --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index e41a1c10ba9..af8a0b84949 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ import ctypes -from setuptools import setup, Extension +from setuptools import setup, Extension, find_packages from setuptools.command.build_ext import build_ext from setuptools.command.install import install from setuptools.command.install_scripts import install_scripts @@ -306,7 +306,7 @@ def run(self): license = "BSD License", url="https://github.com/rmjarvis/GalSim", download_url="https://github.com/GalSim-developers/GalSim/releases/tag/v%s.zip"%galsim_version, - packages=['galsim'], + packages=find_packages(), package_data={'galsim' : shared_data}, #include_package_data=True, ext_modules=[ext], From 6ec3c63db1572c31277e39c1420f231db3702f06 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sat, 6 Jan 2018 12:26:09 -0500 Subject: [PATCH 032/111] Make compatible with pybind11 < v2.2 (#809-pybind11) --- pysrc/PyBind11Helper.h | 12 +++++++++++- pysrc/module.cpp | 4 ++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/pysrc/PyBind11Helper.h b/pysrc/PyBind11Helper.h index 6aad554abed..f96c05444e2 100644 --- a/pysrc/PyBind11Helper.h +++ b/pysrc/PyBind11Helper.h @@ -32,6 +32,8 @@ namespace bp = boost::python; #define PB11_MAKE_MODULE(x) BOOST_PYTHON_MODULE(x) +#define PB11_START_MODULE(x) +#define PB11_END_MODULE(x) #define TUPLE(args...) bp::tuple #define MAKE_TUPLE bp::make_tuple @@ -64,7 +66,15 @@ namespace bp = boost::python; #include namespace bp = pybind11; -#define PB11_MAKE_MODULE(x) PYBIND11_MODULE(x,x) +#if PYBIND11_VERSION_MAJOR >= 3 || (PYBIND11_VERSION_MAJOR == 2 && PYBIND11_VERSION_MINOR >= 2) + #define PB11_MAKE_MODULE(x) PYBIND11_MODULE(x,x) + #define PB11_START_MODULE(x) + #define PB11_END_MODULE(x) +#else + #define PB11_MAKE_MODULE(x) PYBIND11_PLUGIN(x) + #define PB11_START_MODULE(x) pybind11::module x(#x) + #define PB11_END_MODULE(x) return x.ptr() +#endif #define TUPLE(args...) std::tuple #define MAKE_TUPLE std::make_tuple diff --git a/pysrc/module.cpp b/pysrc/module.cpp index 0ef229e0390..f1ef8a68b0a 100644 --- a/pysrc/module.cpp +++ b/pysrc/module.cpp @@ -67,6 +67,8 @@ namespace galsim { PB11_MAKE_MODULE(_galsim) { + PB11_START_MODULE(_galsim); + galsim::pyExportBounds(_galsim); galsim::pyExportPhotonArray(_galsim); galsim::pyExportImage(_galsim); @@ -100,4 +102,6 @@ PB11_MAKE_MODULE(_galsim) galsim::hsm::pyExportHSM(_galsim); galsim::integ::pyExportInteg(_galsim); galsim::math::pyExportBessel(_galsim); + + PB11_END_MODULE(_galsim); } From 7bdcff6c4c9d3fc7bcfee3e7a8eebe8654142798 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sat, 6 Jan 2018 12:26:27 -0500 Subject: [PATCH 033/111] Try to use ccache if possible (#809-pybind11) --- setup.py | 101 ++++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 88 insertions(+), 13 deletions(-) diff --git a/setup.py b/setup.py index af8a0b84949..35639fd13f1 100644 --- a/setup.py +++ b/setup.py @@ -29,9 +29,6 @@ def all_files_from(dir, ext=''): sources = all_files_from('src', '.cpp') + all_files_from('pysrc', '.cpp') headers = all_files_from('include') shared_data = all_files_from('share') -print('sources = ',sources) -print('headers = ',headers) -print('shared = ',shared_data) # If we build with debug, undefine NDEBUG flag undef_macros = [] @@ -131,13 +128,91 @@ def find_fftw_lib(): print("your LIBRARY_PATH or FFTW_PATH environment variable.") raise +def try_cc(cc, cflags=[], lflags=[]): + """Check if compiling a simple bit of c++ code with the given compiler works properly. + """ + import subprocess + import tempfile + from textwrap import dedent + cpp_code = dedent(""" + #include + #include + int main() { + int n = 500; + std::vector x(n,0.); + for (int i=0; i ',self.compiler.compiler_so) + # Add the appropriate extra flags for that compiler. for e in self.extensions: e.extra_compile_args = copt[ comp_type ] @@ -213,10 +290,8 @@ def make_meta_data(install_dir): class my_install(install): def run(self): - print('install_lib = ',self.install_lib) # Make the meta_data.py file based on the actual installation directory. meta_data_file = make_meta_data(self.install_lib) - print('made meta_data file ',os.path.abspath(meta_data_file)) install.run(self) # AFAICT, setuptools doesn't provide any easy access to the final installation location of the From a7dc3b1d103ccffcef2a0c57bfbdbf3652909638 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sat, 6 Jan 2018 12:40:08 -0500 Subject: [PATCH 034/111] Remove unnecessary excludes from manifest (#809-pybind11) --- MANIFEST.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MANIFEST.in b/MANIFEST.in index 2b6eaaef2e9..b9125fd1bab 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -2,4 +2,4 @@ recursive-include galsim *.py include *.md include LICENSE recursive-include share * -global-exclude __pycache__ *.pyc .obj .gitignore SCons* +global-exclude .gitignore SCons* From f1aa716c9941a99c5b82652a0956c6acb3d7da97 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sat, 6 Jan 2018 14:09:03 -0500 Subject: [PATCH 035/111] Avoid clang warning and inaccuracy from over optimizing (#809-pybind11) --- include/galsim/IgnoreWarnings.h | 4 ++++ setup.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/include/galsim/IgnoreWarnings.h b/include/galsim/IgnoreWarnings.h index 5a93bfbb0bf..e98edbf5dd7 100644 --- a/include/galsim/IgnoreWarnings.h +++ b/include/galsim/IgnoreWarnings.h @@ -64,6 +64,10 @@ #pragma GCC diagnostic ignored "-Wlogical-op-parentheses" #endif +#if __has_warning("-Wshift-count-overflow") +#pragma GCC diagnostic ignored "-Wshift-count-overflow" +#endif + // And clang might need this even if it claims to be GNUC before 4.8. #if __has_warning("-Wunused-local-typedefs") #pragma GCC diagnostic ignored "-Wunused-local-typedefs" diff --git a/setup.py b/setup.py index 35639fd13f1..e95553d5258 100644 --- a/setup.py +++ b/setup.py @@ -38,7 +38,7 @@ def all_files_from(dir, ext=''): copt = { 'gcc' : ['-O2','-msse2','-std=c++11','-fvisibility=hidden'], 'icc' : ['-O2','-msse2','-vec-report0','-std=c++11'], - 'clang' : ['-O2','-msse2','-ffast-math','-std=c++11','-Wno-shorten-64-to-32','-fvisibility=hidden'], + 'clang' : ['-O2','-msse2','-std=c++11','-Wno-shorten-64-to-32','-fvisibility=hidden'], 'unknown' : [], } From 34ffca1d17265232fdc434b5fefd1e1ef9620122 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sat, 6 Jan 2018 17:18:51 -0500 Subject: [PATCH 036/111] Use mutiple processes for compiling if possible (#809-pybind11) --- setup.py | 86 +++++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 79 insertions(+), 7 deletions(-) diff --git a/setup.py b/setup.py index e95553d5258..c517e90a209 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ import sys,os,glob,re import platform import ctypes - +import types from setuptools import setup, Extension, find_packages from setuptools.command.build_ext import build_ext @@ -206,6 +206,65 @@ def try_cc(cc, cflags=[], lflags=[]): os.remove(exe_file.name) return p.returncode == 0 +def cpu_count(): + """Get the number of cpus + """ + try: + import psutil + return psutil.cpu_count() + except ImportError: + pass + + if hasattr(os, 'sysconf'): + if 'SC_NPROCESSORS_ONLN' in os.sysconf_names: + # Linux & Unix: + ncpus = os.sysconf('SC_NPROCESSORS_ONLN') + if isinstance(ncpus, int) and ncpus > 0: + return ncpus + else: # OSX: + p = subprocess.Popen(['sysctl -n hw.ncpu'],stdout=subprocess.PIPE,shell=True) + return int(p.stdout.read().strip()) + # Windows: + if 'NUMBER_OF_PROCESSORS' in os.environ: + ncpus = int(os.environ['NUMBER_OF_PROCESSORS']) + if ncpus > 0: + return ncpus + return 1 # Default + +def parallel_compile(self, sources, output_dir=None, macros=None, + include_dirs=None, debug=0, extra_preargs=None, + extra_postargs=None, depends=None): + """New compile function that we monkey patch into the existing compiler instance. + """ + import multiprocessing.pool + + # Copied from the regular compile function + ncpu = cpu_count() + macros, objects, extra_postargs, pp_opts, build = \ + self._setup_compile(output_dir, macros, include_dirs, sources, + depends, extra_postargs) + cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) + + def _single_compile(obj): + try: + src, ext = build[obj] + except KeyError: + return + self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) + + if ncpu == 1: + # This is equivalent to regular compile function + for obj in objects: + _single_compile(obj) + else: + # This next bit is taken from here: + # https://stackoverflow.com/questions/11013851/speeding-up-build-process-with-distutils + # convert to list, imap is evaluated on-demand + list(multiprocessing.pool.ThreadPool(ncpu).imap(_single_compile,objects)) + + # Return *all* object filenames, not just the ones we just built. + return objects + # Make a subclass of build_ext so we can add to the -I list. class my_builder( build_ext ): @@ -252,20 +311,33 @@ def build_extensions(self): else: print('Using compiler %s, which is %s'%(cc,comp_type)) + # Add the appropriate extra flags for that compiler. + print('Using extra args ',copt[comp_type]) + cflags += copt[comp_type] + # Check if we can use ccache to speed up repeated compilation. if try_cc('ccache ' + cc, cflags): print('Using ccache') - self.compiler.set_executable('compiler_so', ['ccache'] + self.compiler.compiler_so) + self.compiler.set_executable('compiler_so', ['ccache',cc] + cflags) #print('compiler_so => ',self.compiler.compiler_so) - # Add the appropriate extra flags for that compiler. - for e in self.extensions: - e.extra_compile_args = copt[ comp_type ] - #e.extra_link_args = lopt[ comp_type ] + # Try to compile in parallel + if self.parallel is None or self.parallel is True: + ncpu = cpu_count() + elif self.parallel: # is an integer + ncpu = self.parallel + else: + ncpu = 1 + if ncpu > 1: + print('Using %d cpus for compiling'%ncpu) + if self.parallel is None: + print('To override, you may do python setup.py build -j1') + self.compiler.compile = types.MethodType(parallel_compile, self.compiler) # Now run the normal build function. build_ext.build_extensions(self) + def make_meta_data(install_dir): print('install_dir = ',install_dir) meta_data_file = os.path.join('galsim','meta_data.py') @@ -372,7 +444,7 @@ def run(self): with open(version_h_file, 'w') as f: f.write(version_h_text) -dist = setup(name="GalSim", +dist = setup(name="GalSim", version=galsim_version, author="GalSim Developers (point of contact: Mike Jarvis)", author_email="michael@jarvis.net", From 6fb64da12ef47722a65f50d892034a9d4525321f Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 8 Jan 2018 10:20:17 -0500 Subject: [PATCH 037/111] Don't die if build_ext doesn't have parallel attribute. (#809-pybind11) --- setup.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/setup.py b/setup.py index c517e90a209..14f15056795 100644 --- a/setup.py +++ b/setup.py @@ -322,6 +322,10 @@ def build_extensions(self): #print('compiler_so => ',self.compiler.compiler_so) # Try to compile in parallel + if not hasattr('self', 'parallel'): + # This was new in distutils version 3.5. + # If user has older version, just set parallel to True and move on. + self.parallel = True if self.parallel is None or self.parallel is True: ncpu = cpu_count() elif self.parallel: # is an integer From 4a5dcb542b456b8eaa5fab5c327641cc28e2aaca Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 8 Jan 2018 10:48:08 -0500 Subject: [PATCH 038/111] Try adding the copt flags to the extensions (#809-pybind11) --- setup.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 14f15056795..ff5963f103d 100644 --- a/setup.py +++ b/setup.py @@ -313,7 +313,11 @@ def build_extensions(self): # Add the appropriate extra flags for that compiler. print('Using extra args ',copt[comp_type]) - cflags += copt[comp_type] + #cflags += copt[comp_type] + # It didn't work for Erin to add this to the end of cflags for some reason. Maybe related + # to the distutils version? Not sure. Anyway, this way should work. + for e in self.extensions: + e.extra_compile_args = copt[comp_type] # Check if we can use ccache to speed up repeated compilation. if try_cc('ccache ' + cc, cflags): From bf3804c52ba3c8cb95607a6345ebde0ceee0ca49 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Tue, 9 Jan 2018 09:53:42 -0500 Subject: [PATCH 039/111] Use env python now for run_all_tests. (#809-pybind11) --- tests/run_all_tests | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/run_all_tests b/tests/run_all_tests index dd9a209e613..8c352cd3be6 100755 --- a/tests/run_all_tests +++ b/tests/run_all_tests @@ -1,5 +1,5 @@ #!/bin/bash -python=../bin/installed_python +python='/usr/bin/env python' for test in `ls test*.py` do echo $test From 3e4c90e4cc7510d373ae476e161842c08a1e10f7 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Tue, 9 Jan 2018 20:23:56 -0500 Subject: [PATCH 040/111] Make sure both env and pyenv get the USE* defines and the -std= flags (#809-pybind11) --- SConstruct | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/SConstruct b/SConstruct index 4634230749b..5c104c5cd74 100644 --- a/SConstruct +++ b/SConstruct @@ -1191,7 +1191,6 @@ int main() 'Error: TMV file failed to link correctly', 'Check that the correct location is specified for TMV_DIR') - config.env.AppendUnique(CPPDEFINES=['USE_TMV']) config.Result(1) return 1 @@ -1900,7 +1899,6 @@ BOOST_PYTHON_MODULE(check_bp) { if not result: ErrorExit('Unable to build a python loadable module with Boost.Python') - config.env.AppendUnique(CPPDEFINES=['USE_BOOST']) config.Result(1) return 1 @@ -2072,10 +2070,12 @@ def DoCppChecks(config): # Boost if config.env['USE_BOOST']: + config.env.AppendUnique(CPPDEFINES=['USE_BOOST']) config.CheckBoost() # TMV if config.env['USE_TMV']: + config.env.AppendUnique(CPPDEFINES=['USE_TMV']) if not config.CheckHeader('TMV.h',language='C++'): ErrorExit( 'TMV.h not found', @@ -2211,8 +2211,14 @@ def DoConfig(env): }) DoPyChecks(config) pyenv = config.Finish() - env['final_messages'] = pyenv['final_messages'] + # Make sure any -std= compiler flags required for pysrc get propagated back to the + # main environment. + for flag in pyenv['CCFLAGS']: + if 'std=' in flag: + env.AppendUnique(CCFLAGS=[flag]) + + env['final_messages'] = pyenv['final_messages'] env['pyenv'] = pyenv # Turn the cache back on now, since we always want it for the main compilation steps. From 678bbb218609dfe235919abb2db5d88cbf907401 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Tue, 9 Jan 2018 20:24:16 -0500 Subject: [PATCH 041/111] Fix for when USE_BOOST=true (#809-pybind11) --- pysrc/PyBind11Helper.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pysrc/PyBind11Helper.h b/pysrc/PyBind11Helper.h index f96c05444e2..a4206a98e58 100644 --- a/pysrc/PyBind11Helper.h +++ b/pysrc/PyBind11Helper.h @@ -32,7 +32,7 @@ namespace bp = boost::python; #define PB11_MAKE_MODULE(x) BOOST_PYTHON_MODULE(x) -#define PB11_START_MODULE(x) +#define PB11_START_MODULE(x) bp::scope x; #define PB11_END_MODULE(x) #define TUPLE(args...) bp::tuple From deb944231bfd1cdf16274f075e1ed96f4f102c71 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Tue, 9 Jan 2018 20:24:36 -0500 Subject: [PATCH 042/111] Fix #if not directive for tgamma, lgamma (#809-pybind11) --- src/math/Gamma.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/math/Gamma.cpp b/src/math/Gamma.cpp index 8d4f9bb7947..430295ef69b 100644 --- a/src/math/Gamma.cpp +++ b/src/math/Gamma.cpp @@ -46,7 +46,7 @@ namespace math { // Defined in BesselJ.cpp double dcsevl(double x, const double* cs, int n); -#if not __cplusplus >= 201103L +#if not (__cplusplus >= 201103L) double tgamma(double x) { double g = dgamma(x); From 414be686bb23e43603ade15034c0690820630357 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Tue, 9 Jan 2018 20:45:32 -0500 Subject: [PATCH 043/111] Fix making executables when not using setup.py (#809-pybind11) --- bin/SConscript | 2 +- bin/galsim.py | 6 ++++++ bin/galsim_download_cosmos.py | 5 +++++ galsim/download_cosmos.py | 2 ++ 4 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 bin/galsim.py create mode 100644 bin/galsim_download_cosmos.py diff --git a/bin/SConscript b/bin/SConscript index c631c97f272..d4c3def773b 100644 --- a/bin/SConscript +++ b/bin/SConscript @@ -7,7 +7,7 @@ RunUninstall = env['_RunUninstall'] install_subdir = 'bin' -scripts = [ 'galsim' , 'galsim_yaml', 'galsim_json', 'galsim_download_cosmos' ] +scripts = [ 'galsim' , 'galsim_download_cosmos' ] targets = [ env.ExecScript(script, script + '.py') for script in scripts ] AlwaysBuild(targets) diff --git a/bin/galsim.py b/bin/galsim.py new file mode 100644 index 00000000000..d5db452019b --- /dev/null +++ b/bin/galsim.py @@ -0,0 +1,6 @@ + +# Equivalent to python -m galsim ... +import runpy + +if __name__ == '__main__': + runpy.run_module('galsim') diff --git a/bin/galsim_download_cosmos.py b/bin/galsim_download_cosmos.py new file mode 100644 index 00000000000..e055f7ae62b --- /dev/null +++ b/bin/galsim_download_cosmos.py @@ -0,0 +1,5 @@ + +import galsim + +if __name__ == '__main__': + galsim.download_cosmos.main() diff --git a/galsim/download_cosmos.py b/galsim/download_cosmos.py index b4cda9d04c0..322c740b13b 100644 --- a/galsim/download_cosmos.py +++ b/galsim/download_cosmos.py @@ -28,6 +28,8 @@ except: from urllib.request import urlopen +script_name = 'galsim_download_cosmos' + def parse_args(): """Handle the command line arguments using either argparse (if available) or optparse. """ From 892baadc5bcd6716e0f45a3e7b1c55426e1ad7cd Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Wed, 10 Jan 2018 21:38:43 -0500 Subject: [PATCH 044/111] Fix Eigen version of CRGCoefficients for nband > nsed (#809-pybind11) --- src/RealGalaxy.cpp | 33 ++++++++++++++++++++++++++------- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/src/RealGalaxy.cpp b/src/RealGalaxy.cpp index 82e38c8fddd..91b29817fc9 100644 --- a/src/RealGalaxy.cpp +++ b/src/RealGalaxy.cpp @@ -139,13 +139,32 @@ namespace galsim A = ww.asDiagonal() * psf; b = ww.asDiagonal() * kimg; - Eigen::ColPivHouseholderQR qr = A.colPivHouseholderQr(); - x = qr.solve(b); - // (AtA)^-1 = (PtRtQtQRP)^-1 = (PtRtRP)^-1 = Pt R^-1 Rt^-1 P - dxT = qr.colsPermutation().transpose() * - qr.matrixR().triangularView().solve( - qr.matrixR().triangularView().transpose().solve( - MatrixXcd(qr.colsPermutation()))); + Eigen::HouseholderQR qr = A.householderQr(); + Eigen::Diagonal Rdiag = qr.matrixQR().diagonal(); + if (Rdiag.array().abs().minCoeff() < 1.e-15*Rdiag.array().abs().maxCoeff()) { + // Then (nearly) signular. Use QRP instead. (This should be fairly rare.) + Eigen::ColPivHouseholderQR qrp = A.colPivHouseholderQr(); + x = qrp.solve(b); + + // A = Q R Pt + // (AtA)^-1 = (PRtQtQRPt)^-1 = (PRtRPt)^-1 = P R^-1 Rt^-1 Pt + const int nzp = qrp.nonzeroPivots(); + Eigen::TriangularView, Upper> R = + qrp.matrixR().topLeftCorner(nzp,nzp).triangularView(); + dxT.setIdentity(); + R.adjoint().solveInPlace(dxT.topLeftCorner(nzp,nzp)); + R.solveInPlace(dxT.topLeftCorner(nzp,nzp)); + dxT = qrp.colsPermutation() * dxT * qrp.colsPermutation().transpose(); + } else { + x = qr.solve(b); + // A = Q R + // (AtA)^-1 = (RtQtQR)^-1 = (RtR)^-1 = R^-1 Rt^-1 + Eigen::TriangularView, Upper> R = + qr.matrixQR().topRows(nsed).triangularView(); + dxT.setIdentity(); + R.adjoint().solveInPlace(dxT); + R.solveInPlace(dxT); + } #endif From bcba0449e4fe9dd310c8e51785f2bda8991ea609 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Thu, 11 Jan 2018 00:03:50 -0500 Subject: [PATCH 045/111] Switch over completely to pybind11 v2.2 syntax (#809-pybind11) --- SConstruct | 10 +++------- conda_requirements.txt | 2 +- pysrc/HSM.cpp | 11 ++--------- pysrc/Image.cpp | 2 +- pysrc/PhotonArray.cpp | 2 +- pysrc/PyBind11Helper.h | 24 +++++++++--------------- pysrc/SBAdd.cpp | 2 +- pysrc/SBConvolve.cpp | 2 +- pysrc/SBShapelet.cpp | 2 +- pysrc/Silicon.cpp | 2 +- pysrc/Table.cpp | 4 ++-- requirements.txt | 2 +- 12 files changed, 24 insertions(+), 41 deletions(-) diff --git a/SConstruct b/SConstruct index 5c104c5cd74..710a13f9e7f 100644 --- a/SConstruct +++ b/SConstruct @@ -1831,10 +1831,8 @@ def CheckPyBind11(config): int check_pb_run() { return 23; } -PYBIND11_PLUGIN(check_pb) { - pybind11::module m("check_pb"); - m.def("run",&check_pb_run); - return m.ptr(); +PYBIND11_MODULE(check_pb, check_pb) { + check_pb.def("run",&check_pb_run); } """ result = (CheckFlags(config, '', pb_source_file) or @@ -1934,10 +1932,8 @@ BOOST_PYTHON_MODULE(test_throw) { boost::python::def("run", &run_throw); } #else -PYBIND11_PLUGIN(test_throw) { - pybind11::module test_throw("test_throw"); +PYBIND11_MODULE(test_throw, test_throw) { test_throw.def("run", &run_throw); - return test_throw.ptr(); } #endif """ diff --git a/conda_requirements.txt b/conda_requirements.txt index c48dead7a45..5a7e0de4256 100644 --- a/conda_requirements.txt +++ b/conda_requirements.txt @@ -5,7 +5,7 @@ future >= 0.15 astropy >= 2.0 pyyaml >= 3.12 pandas >= 0.20 -pybind11 >= 2.0 +pybind11 >= 2.2 pip >= 9.0 gcc >= 4.8 cython >= 0.26 diff --git a/pysrc/HSM.cpp b/pysrc/HSM.cpp index 941f716e2aa..b372d93c978 100644 --- a/pysrc/HSM.cpp +++ b/pysrc/HSM.cpp @@ -35,12 +35,7 @@ namespace hsm { float resolution_factor, float psf_sigma, float psf_e1, float psf_e2, const char* error_message) { -#ifdef USE_BOOST ShapeData* data = new ShapeData(); -#else - PB11_PLACEMENT_NEW ShapeData(); - ShapeData* data = &instance; -#endif data->image_bounds = image_bounds; data->moments_status = moments_status; data->observed_e1 = observed_e1; @@ -63,15 +58,13 @@ namespace hsm { data->psf_e1 = psf_e1; data->psf_e2 = psf_e2; data->error_message = error_message; -#ifdef USE_BOOST return data; -#endif } template static void WrapTemplates(PB11_MODULE& _galsim) { - typedef void (*FAM_func)(ShapeData&t, const BaseImage&, const BaseImage&, + typedef void (*FAM_func)(ShapeData&, const BaseImage&, const BaseImage&, double, double, Position, bool, const HSMParams&); GALSIM_DOT def("_FindAdaptiveMomView", FAM_func(&FindAdaptiveMomView)); @@ -90,7 +83,7 @@ namespace hsm { int, double, double, double>()); bp::class_(GALSIM_COMMA "ShapeData" BP_NOINIT) - .def("__init__", BP_MAKE_CONSTRUCTOR(&ShapeData_init)) + .def(BP_MAKE_CONSTRUCTOR(&ShapeData_init)) .def_readonly("image_bounds", &ShapeData::image_bounds) .def_readonly("moments_status", &ShapeData::moments_status) .def_readonly("observed_e1", &ShapeData::observed_e1) diff --git a/pysrc/Image.cpp b/pysrc/Image.cpp index f7e8b7d2cad..73e895d4864 100644 --- a/pysrc/Image.cpp +++ b/pysrc/Image.cpp @@ -41,7 +41,7 @@ namespace galsim { typedef BP_CONSTRUCTOR((*Make_func), ImageView, size_t, int, int, const Bounds&); bp::class_ BP_BASES(BaseImage)>( GALSIM_COMMA ("ImageView" + suffix).c_str() BP_NOINIT) - .def("__init__", BP_MAKE_CONSTRUCTOR((Make_func)&MakeFromArray)); + .def(BP_MAKE_CONSTRUCTOR((Make_func)&MakeFromArray)); typedef void (*rfft_func_type)(const BaseImage&, ImageView >, bool, bool); diff --git a/pysrc/PhotonArray.cpp b/pysrc/PhotonArray.cpp index a2a17ce6c92..1e2ebf60346 100644 --- a/pysrc/PhotonArray.cpp +++ b/pysrc/PhotonArray.cpp @@ -47,7 +47,7 @@ namespace galsim { { bp::class_ pyPhotonArray(GALSIM_COMMA "PhotonArray" BP_NOINIT); pyPhotonArray - .def("__init__", BP_MAKE_CONSTRUCTOR(&construct)) + .def(BP_MAKE_CONSTRUCTOR(&construct)) .def("convolve", &PhotonArray::convolve); WrapTemplates(pyPhotonArray); WrapTemplates(pyPhotonArray); diff --git a/pysrc/PyBind11Helper.h b/pysrc/PyBind11Helper.h index a4206a98e58..43129a847aa 100644 --- a/pysrc/PyBind11Helper.h +++ b/pysrc/PyBind11Helper.h @@ -34,6 +34,8 @@ namespace bp = boost::python; #define PB11_MAKE_MODULE(x) BOOST_PYTHON_MODULE(x) #define PB11_START_MODULE(x) bp::scope x; #define PB11_END_MODULE(x) +#define BP_CONSTRUCTOR(f,x,args...) x* f(args) +#define PB11_PLACEMENT_NEW return new #define TUPLE(args...) bp::tuple #define MAKE_TUPLE bp::make_tuple @@ -51,9 +53,7 @@ namespace bp = boost::python; #define BP_REGISTER(T) bp::register_ptr_to_python< boost::shared_ptr >() #define BOOST_NONCOPYABLE , boost::noncopyable #define BP_BASES(T) , bp::bases -#define BP_MAKE_CONSTRUCTOR(args...) bp::make_constructor(args, bp::default_call_policies()) -#define BP_CONSTRUCTOR(f,x,args...) x* f(args) -#define PB11_PLACEMENT_NEW return new +#define BP_MAKE_CONSTRUCTOR(args...) "__init__", bp::make_constructor(args, bp::default_call_policies()) #define CAST bp::extract #define BP_COPY_CONST_REFERENCE bp::return_value_policy() #define def_property_readonly add_property @@ -66,15 +66,11 @@ namespace bp = boost::python; #include namespace bp = pybind11; -#if PYBIND11_VERSION_MAJOR >= 3 || (PYBIND11_VERSION_MAJOR == 2 && PYBIND11_VERSION_MINOR >= 2) - #define PB11_MAKE_MODULE(x) PYBIND11_MODULE(x,x) - #define PB11_START_MODULE(x) - #define PB11_END_MODULE(x) -#else - #define PB11_MAKE_MODULE(x) PYBIND11_PLUGIN(x) - #define PB11_START_MODULE(x) pybind11::module x(#x) - #define PB11_END_MODULE(x) return x.ptr() -#endif +#define PB11_MAKE_MODULE(x) PYBIND11_MODULE(x,x) +#define PB11_START_MODULE(x) +#define PB11_END_MODULE(x) +#define BP_CONSTRUCTOR(f,x,args...) x* f(args) +#define PB11_PLACEMENT_NEW return new #define TUPLE(args...) std::tuple #define MAKE_TUPLE std::make_tuple @@ -92,9 +88,7 @@ namespace bp = pybind11; #define BP_REGISTER(T) #define BOOST_NONCOPYABLE #define BP_BASES(T) , T -#define BP_MAKE_CONSTRUCTOR(args...) args -#define BP_CONSTRUCTOR(f,x,args...) void f(x& instance, args) -#define PB11_PLACEMENT_NEW new (&instance) +#define BP_MAKE_CONSTRUCTOR(args...) bp::init(args) #define CAST pybind11::cast #define BP_COPY_CONST_REFERENCE pybind11::return_value_policy::reference diff --git a/pysrc/SBAdd.cpp b/pysrc/SBAdd.cpp index 3153c23c911..c4739f39d04 100644 --- a/pysrc/SBAdd.cpp +++ b/pysrc/SBAdd.cpp @@ -40,7 +40,7 @@ namespace galsim { void pyExportSBAdd(PB11_MODULE& _galsim) { bp::class_(GALSIM_COMMA "SBAdd" BP_NOINIT) - .def("__init__", BP_MAKE_CONSTRUCTOR(&construct)); + .def(BP_MAKE_CONSTRUCTOR(&construct)); } } // namespace galsim diff --git a/pysrc/SBConvolve.cpp b/pysrc/SBConvolve.cpp index 46935f9a249..03db1060d96 100644 --- a/pysrc/SBConvolve.cpp +++ b/pysrc/SBConvolve.cpp @@ -42,7 +42,7 @@ namespace galsim { void pyExportSBConvolve(PB11_MODULE& _galsim) { bp::class_(GALSIM_COMMA "SBConvolve" BP_NOINIT) - .def("__init__", BP_MAKE_CONSTRUCTOR( &construct)); + .def(BP_MAKE_CONSTRUCTOR( &construct)); bp::class_(GALSIM_COMMA "SBAutoConvolve" BP_NOINIT) .def(bp::init()); bp::class_(GALSIM_COMMA "SBAutoCorrelate" BP_NOINIT) diff --git a/pysrc/SBShapelet.cpp b/pysrc/SBShapelet.cpp index a54703b8936..cce78071b7e 100644 --- a/pysrc/SBShapelet.cpp +++ b/pysrc/SBShapelet.cpp @@ -48,7 +48,7 @@ namespace galsim { void pyExportSBShapelet(PB11_MODULE& _galsim) { bp::class_(GALSIM_COMMA "SBShapelet" BP_NOINIT) - .def("__init__", BP_MAKE_CONSTRUCTOR(&construct)); + .def(BP_MAKE_CONSTRUCTOR(&construct)); GALSIM_DOT def("ShapeletFitImage", &fit); } diff --git a/pysrc/Silicon.cpp b/pysrc/Silicon.cpp index aa456349d1b..73ff5f347aa 100644 --- a/pysrc/Silicon.cpp +++ b/pysrc/Silicon.cpp @@ -47,7 +47,7 @@ namespace galsim { void pyExportSilicon(PB11_MODULE& _galsim) { bp::class_ pySilicon(GALSIM_COMMA "Silicon" BP_NOINIT); - pySilicon.def("__init__", BP_MAKE_CONSTRUCTOR(&MakeSilicon)); + pySilicon.def(BP_MAKE_CONSTRUCTOR(&MakeSilicon)); WrapTemplates(pySilicon); WrapTemplates(pySilicon); diff --git a/pysrc/Table.cpp b/pysrc/Table.cpp index 94b82335d5d..5991a4e5f9d 100644 --- a/pysrc/Table.cpp +++ b/pysrc/Table.cpp @@ -89,12 +89,12 @@ namespace galsim { void pyExportTable(PB11_MODULE& _galsim) { bp::class_
(GALSIM_COMMA "_LookupTable" BP_NOINIT) - .def("__init__", BP_MAKE_CONSTRUCTOR(&MakeTable)) + .def(BP_MAKE_CONSTRUCTOR(&MakeTable)) .def("interp", &Table::lookup) .def("interpMany", &InterpMany); bp::class_(GALSIM_COMMA "_LookupTable2D" BP_NOINIT) - .def("__init__", BP_MAKE_CONSTRUCTOR(&MakeTable2D)) + .def(BP_MAKE_CONSTRUCTOR(&MakeTable2D)) .def("interp", &Table2D::lookup) .def("interpMany", &InterpMany2D) .def("gradient", &Gradient) diff --git a/requirements.txt b/requirements.txt index 586a3ad1f23..730bbc37706 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ future >= 0.15 astropy >= 2.0 pyyaml >= 3.12 pandas >= 0.20 -pybind11 >= 2.0 +pybind11 >= 2.2 pip >= 9.0 cython >= 0.26 setuptools >= 38.2 From f6534ac054f24820dfac7784c7811dc9593685e3 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Thu, 11 Jan 2018 17:35:30 -0500 Subject: [PATCH 046/111] Clean up the PyBind11Helper macros to remove unnecessary ones and give better names to some (#809-pybind11) --- pysrc/Bessel.cpp | 2 +- pysrc/Bounds.cpp | 14 +++--- pysrc/CDModel.cpp | 4 +- pysrc/HSM.cpp | 14 +++--- pysrc/Image.cpp | 18 ++++---- pysrc/Integ.cpp | 14 +++--- pysrc/Interpolant.cpp | 32 +++++++------- pysrc/PhotonArray.cpp | 12 +++--- pysrc/PyBind11Helper.h | 75 +++++++++++++-------------------- pysrc/Random.cpp | 38 ++++++++--------- pysrc/RealGalaxy.cpp | 2 +- pysrc/SBAdd.cpp | 16 +++---- pysrc/SBAiry.cpp | 6 +-- pysrc/SBBox.cpp | 10 ++--- pysrc/SBConvolve.cpp | 28 ++++++------ pysrc/SBDeconvolve.cpp | 6 +-- pysrc/SBDeltaFunction.cpp | 6 +-- pysrc/SBExponential.cpp | 6 +-- pysrc/SBFourierSqrt.cpp | 6 +-- pysrc/SBGaussian.cpp | 6 +-- pysrc/SBInclinedExponential.cpp | 6 +-- pysrc/SBInclinedSersic.cpp | 6 +-- pysrc/SBInterpolatedImage.cpp | 12 +++--- pysrc/SBKolmogorov.cpp | 6 +-- pysrc/SBMoffat.cpp | 6 +-- pysrc/SBProfile.cpp | 8 ++-- pysrc/SBSersic.cpp | 6 +-- pysrc/SBShapelet.cpp | 12 +++--- pysrc/SBSpergel.cpp | 6 +-- pysrc/SBTransform.cpp | 6 +-- pysrc/Silicon.cpp | 26 ++++++------ pysrc/Table.cpp | 22 +++++----- pysrc/WCS.cpp | 10 ++--- pysrc/module.cpp | 70 +++++++++++++++--------------- 34 files changed, 248 insertions(+), 269 deletions(-) diff --git a/pysrc/Bessel.cpp b/pysrc/Bessel.cpp index f13789a84f9..1d2cbd554d5 100644 --- a/pysrc/Bessel.cpp +++ b/pysrc/Bessel.cpp @@ -24,7 +24,7 @@ namespace galsim { namespace math { - void pyExportBessel(PB11_MODULE& _galsim) + void pyExportBessel(PY_MODULE& _galsim) { GALSIM_DOT def("j0_root", &getBesselRoot0); GALSIM_DOT def("j0", &j0); diff --git a/pysrc/Bounds.cpp b/pysrc/Bounds.cpp index c728b5525d1..a38845c521f 100644 --- a/pysrc/Bounds.cpp +++ b/pysrc/Bounds.cpp @@ -23,26 +23,26 @@ namespace galsim { template - static void WrapPosition(PB11_MODULE& _galsim, const std::string& suffix) + static void WrapPosition(PY_MODULE& _galsim, const std::string& suffix) { - bp::class_ >(GALSIM_COMMA ("Position" + suffix).c_str() BP_NOINIT) - .def(bp::init()) + py::class_ >(GALSIM_COMMA ("Position" + suffix).c_str() BP_NOINIT) + .def(py::init()) .def_readonly("x", &Position::x) .def_readonly("y", &Position::y); } template - static void WrapBounds(PB11_MODULE& _galsim, const std::string& suffix) + static void WrapBounds(PY_MODULE& _galsim, const std::string& suffix) { - bp::class_< Bounds >(GALSIM_COMMA ("Bounds" + suffix).c_str() BP_NOINIT) - .def(bp::init()) + py::class_< Bounds >(GALSIM_COMMA ("Bounds" + suffix).c_str() BP_NOINIT) + .def(py::init()) .def_property_readonly("xmin", &Bounds::getXMin) .def_property_readonly("xmax", &Bounds::getXMax) .def_property_readonly("ymin", &Bounds::getYMin) .def_property_readonly("ymax", &Bounds::getYMax); } - void pyExportBounds(PB11_MODULE& _galsim) + void pyExportBounds(PY_MODULE& _galsim) { WrapPosition(_galsim, "D"); WrapPosition(_galsim, "I"); diff --git a/pysrc/CDModel.cpp b/pysrc/CDModel.cpp index f412c866c88..2806583fce8 100644 --- a/pysrc/CDModel.cpp +++ b/pysrc/CDModel.cpp @@ -23,7 +23,7 @@ namespace galsim { template - static void WrapTemplates(PB11_MODULE& _galsim) + static void WrapTemplates(PY_MODULE& _galsim) { typedef void (*ApplyCD_func)(ImageView& , const BaseImage& , const BaseImage& , const BaseImage& , @@ -32,7 +32,7 @@ namespace galsim { GALSIM_DOT def("_ApplyCD", ApplyCD_func(&ApplyCD)); } - void pyExportCDModel(PB11_MODULE& _galsim) + void pyExportCDModel(PY_MODULE& _galsim) { WrapTemplates(_galsim); WrapTemplates(_galsim); diff --git a/pysrc/HSM.cpp b/pysrc/HSM.cpp index b372d93c978..c0d9b646906 100644 --- a/pysrc/HSM.cpp +++ b/pysrc/HSM.cpp @@ -23,7 +23,7 @@ namespace galsim { namespace hsm { - static BP_CONSTRUCTOR(ShapeData_init, ShapeData, + static ShapeData* ShapeData_init( const galsim::Bounds& image_bounds, int moments_status, float observed_e1, float observed_e2, float moments_sigma, float moments_amp, @@ -62,7 +62,7 @@ namespace hsm { } template - static void WrapTemplates(PB11_MODULE& _galsim) + static void WrapTemplates(PY_MODULE& _galsim) { typedef void (*FAM_func)(ShapeData&, const BaseImage&, const BaseImage&, double, double, Position, bool, const HSMParams&); @@ -75,15 +75,15 @@ namespace hsm { GALSIM_DOT def("_EstimateShearView", ESH_func(&EstimateShearView)); }; - void pyExportHSM(PB11_MODULE& _galsim) + void pyExportHSM(PY_MODULE& _galsim) { - bp::class_(GALSIM_COMMA "HSMParams" BP_NOINIT) - .def(bp::init< + py::class_(GALSIM_COMMA "HSMParams" BP_NOINIT) + .def(py::init< double, double, double, int, int, double, long, long, double, double, double, int, double, double, double>()); - bp::class_(GALSIM_COMMA "ShapeData" BP_NOINIT) - .def(BP_MAKE_CONSTRUCTOR(&ShapeData_init)) + py::class_(GALSIM_COMMA "ShapeData" BP_NOINIT) + .def(PY_INIT(&ShapeData_init)) .def_readonly("image_bounds", &ShapeData::image_bounds) .def_readonly("moments_status", &ShapeData::moments_status) .def_readonly("observed_e1", &ShapeData::observed_e1) diff --git a/pysrc/Image.cpp b/pysrc/Image.cpp index 73e895d4864..4a2af958343 100644 --- a/pysrc/Image.cpp +++ b/pysrc/Image.cpp @@ -24,24 +24,24 @@ namespace galsim { template - static BP_CONSTRUCTOR(MakeFromArray, ImageView, - size_t idata, int step, int stride, const Bounds& bounds) + static ImageView* MakeFromArray( + size_t idata, int step, int stride, const Bounds& bounds) { T* data = reinterpret_cast(idata); shared_ptr owner; - PB11_PLACEMENT_NEW ImageView(data, owner, step, stride, bounds); + return new ImageView(data, owner, step, stride, bounds); } template - static void WrapImage(PB11_MODULE& _galsim, const std::string& suffix) + static void WrapImage(PY_MODULE& _galsim, const std::string& suffix) { - bp::class_ BOOST_NONCOPYABLE>( + py::class_ BP_NONCOPYABLE>( GALSIM_COMMA ("BaseImage" + suffix).c_str() BP_NOINIT); - typedef BP_CONSTRUCTOR((*Make_func), ImageView, size_t, int, int, const Bounds&); - bp::class_ BP_BASES(BaseImage)>( + typedef ImageView* (*Make_func)(size_t, int, int, const Bounds&); + py::class_, BP_BASES(BaseImage)>( GALSIM_COMMA ("ImageView" + suffix).c_str() BP_NOINIT) - .def(BP_MAKE_CONSTRUCTOR((Make_func)&MakeFromArray)); + .def(PY_INIT((Make_func)&MakeFromArray)); typedef void (*rfft_func_type)(const BaseImage&, ImageView >, bool, bool); @@ -59,7 +59,7 @@ namespace galsim { GALSIM_DOT def("invertImage", invert_func_type(&invertImage)); } - void pyExportImage(PB11_MODULE& _galsim) + void pyExportImage(PY_MODULE& _galsim) { WrapImage(_galsim, "US"); WrapImage(_galsim, "UI"); diff --git a/pysrc/Integ.cpp b/pysrc/Integ.cpp index 9f9e2a22aee..975dc3c07cb 100644 --- a/pysrc/Integ.cpp +++ b/pysrc/Integ.cpp @@ -29,27 +29,27 @@ namespace integ { public std::unary_function { public: - PyFunc(const bp::object& func) : _func(func) {} + PyFunc(const py::object& func) : _func(func) {} double operator()(double x) const - { return CAST(_func(x)); } + { return PY_CAST(_func(x)); } private: - const bp::object& _func; + const py::object& _func; }; // Integrate a python function using int1d. - bp::tuple PyInt1d(const bp::object& func, double min, double max, + py::tuple PyInt1d(const py::object& func, double min, double max, double rel_err=DEFRELERR, double abs_err=DEFABSERR) { PyFunc pyfunc(func); try { double res = int1d(pyfunc, min, max, rel_err, abs_err); - return bp::make_tuple(true, res); + return py::make_tuple(true, res); } catch (IntFailure& e) { - return bp::make_tuple(false, e.what()); + return py::make_tuple(false, e.what()); } } - void pyExportInteg(PB11_MODULE& _galsim) + void pyExportInteg(PY_MODULE& _galsim) { GALSIM_DOT def("PyInt1d", &PyInt1d); diff --git a/pysrc/Interpolant.cpp b/pysrc/Interpolant.cpp index 914a531412d..eb07f0556b0 100644 --- a/pysrc/Interpolant.cpp +++ b/pysrc/Interpolant.cpp @@ -23,31 +23,31 @@ namespace galsim { - void pyExportInterpolant(PB11_MODULE& _galsim) + void pyExportInterpolant(PY_MODULE& _galsim) { - bp::class_(GALSIM_COMMA "Interpolant" BP_NOINIT); + py::class_(GALSIM_COMMA "Interpolant" BP_NOINIT); - bp::class_(GALSIM_COMMA "Delta" BP_NOINIT) - .def(bp::init()); + py::class_(GALSIM_COMMA "Delta" BP_NOINIT) + .def(py::init()); - bp::class_(GALSIM_COMMA "Nearest" BP_NOINIT) - .def(bp::init()); + py::class_(GALSIM_COMMA "Nearest" BP_NOINIT) + .def(py::init()); - bp::class_(GALSIM_COMMA "SincInterpolant" BP_NOINIT) - .def(bp::init()); + py::class_(GALSIM_COMMA "SincInterpolant" BP_NOINIT) + .def(py::init()); - bp::class_(GALSIM_COMMA "Lanczos" BP_NOINIT) - .def(bp::init()) + py::class_(GALSIM_COMMA "Lanczos" BP_NOINIT) + .def(py::init()) .def("urange", &Lanczos::urange); - bp::class_(GALSIM_COMMA "Linear" BP_NOINIT) - .def(bp::init()); + py::class_(GALSIM_COMMA "Linear" BP_NOINIT) + .def(py::init()); - bp::class_(GALSIM_COMMA "Cubic" BP_NOINIT) - .def(bp::init()); + py::class_(GALSIM_COMMA "Cubic" BP_NOINIT) + .def(py::init()); - bp::class_(GALSIM_COMMA "Quintic" BP_NOINIT) - .def(bp::init()); + py::class_(GALSIM_COMMA "Quintic" BP_NOINIT) + .def(py::init()); } } // namespace galsim diff --git a/pysrc/PhotonArray.cpp b/pysrc/PhotonArray.cpp index 1e2ebf60346..6036d7b202d 100644 --- a/pysrc/PhotonArray.cpp +++ b/pysrc/PhotonArray.cpp @@ -31,8 +31,8 @@ namespace galsim { &PhotonArray::setFrom); } - static BP_CONSTRUCTOR(construct, PhotonArray, int N, size_t ix, size_t iy, size_t iflux, - size_t idxdz, size_t idydz, size_t iwave, bool is_corr) + static PhotonArray* construct(int N, size_t ix, size_t iy, size_t iflux, + size_t idxdz, size_t idydz, size_t iwave, bool is_corr) { double *x = reinterpret_cast(ix); double *y = reinterpret_cast(iy); @@ -40,14 +40,14 @@ namespace galsim { double *dxdz = reinterpret_cast(idxdz); double *dydz = reinterpret_cast(idydz); double *wave = reinterpret_cast(iwave); - PB11_PLACEMENT_NEW PhotonArray(N, x, y, flux, dxdz, dydz, wave, is_corr); + return new PhotonArray(N, x, y, flux, dxdz, dydz, wave, is_corr); } - void pyExportPhotonArray(PB11_MODULE& _galsim) + void pyExportPhotonArray(PY_MODULE& _galsim) { - bp::class_ pyPhotonArray(GALSIM_COMMA "PhotonArray" BP_NOINIT); + py::class_ pyPhotonArray(GALSIM_COMMA "PhotonArray" BP_NOINIT); pyPhotonArray - .def(BP_MAKE_CONSTRUCTOR(&construct)) + .def(PY_INIT(&construct)) .def("convolve", &PhotonArray::convolve); WrapTemplates(pyPhotonArray); WrapTemplates(pyPhotonArray); diff --git a/pysrc/PyBind11Helper.h b/pysrc/PyBind11Helper.h index 43129a847aa..8c8d73fcbe8 100644 --- a/pysrc/PyBind11Helper.h +++ b/pysrc/PyBind11Helper.h @@ -29,34 +29,31 @@ #define BOOST_NO_CXX11_SMART_PTR #include #include -namespace bp = boost::python; +namespace py = boost::python; -#define PB11_MAKE_MODULE(x) BOOST_PYTHON_MODULE(x) -#define PB11_START_MODULE(x) bp::scope x; -#define PB11_END_MODULE(x) -#define BP_CONSTRUCTOR(f,x,args...) x* f(args) -#define PB11_PLACEMENT_NEW return new +// Boost Python and PyBind11 work fairly similarly. There are a few differences though. +// In some cases, pybind11 simplified things, or changed how some things work. So these +// macros allow us to write code that works for either boost python or pybind11. -#define TUPLE(args...) bp::tuple -#define MAKE_TUPLE bp::make_tuple +// First some things where the boost equivalent of some pybind11 function is different: +#define PYBIND11_MODULE(x,x) BOOST_PYTHON_MODULE(x) +#define PY_MODULE py::scope +#define PY_CAST py::extract +#define PY_INIT(args...) "__init__", py::make_constructor(args, py::default_call_policies()) +#define def_property_readonly add_property -#define GALSIM_DOT bp:: +// PyBind11 requires the module object to be written some places where boost python does not. +// Our module name is always _galsim, so where we would write _galsim. or _galsim, we write these +// instead so in boost python, the module name goes away. +#define GALSIM_DOT py:: #define GALSIM_COMMA -#define PB11_MODULE bp::scope -#define BP_HANDLE bp::handle<> -#define BP_THROW bp::throw_error_already_set() -#define BP_NOINIT , bp::no_init -#define ENABLE_PICKLING .enable_pickling() -#define PB11_CAST(x) x -#define BP_OTHER(T) bp::other() -#define ADD_PROPERTY(name, func) add_property(name, func) -#define BP_REGISTER(T) bp::register_ptr_to_python< boost::shared_ptr >() -#define BOOST_NONCOPYABLE , boost::noncopyable -#define BP_BASES(T) , bp::bases -#define BP_MAKE_CONSTRUCTOR(args...) "__init__", bp::make_constructor(args, bp::default_call_policies()) -#define CAST bp::extract -#define BP_COPY_CONST_REFERENCE bp::return_value_policy() -#define def_property_readonly add_property + +// Finally, there are somethings that are only needed for boost python. These are not required +// at all for pybind11. +#define BP_SCOPE(x) py::scope x; +#define BP_NOINIT , py::no_init +#define BP_NONCOPYABLE , boost::noncopyable +#define BP_BASES(T) py::bases #else @@ -64,33 +61,19 @@ namespace bp = boost::python; #include #include #include -namespace bp = pybind11; +namespace py = pybind11; -#define PB11_MAKE_MODULE(x) PYBIND11_MODULE(x,x) -#define PB11_START_MODULE(x) -#define PB11_END_MODULE(x) -#define BP_CONSTRUCTOR(f,x,args...) x* f(args) -#define PB11_PLACEMENT_NEW return new - -#define TUPLE(args...) std::tuple -#define MAKE_TUPLE std::make_tuple +#define PY_MODULE py::module +#define PY_CAST py::cast +#define PY_INIT(args...) py::init(args) #define GALSIM_DOT _galsim. #define GALSIM_COMMA _galsim, -#define PB11_MODULE pybind11::module -#define BP_HANDLE pybind11::handle -#define BP_THROW throw pybind11::error_already_set() + +#define BP_SCOPE(x) #define BP_NOINIT -#define ENABLE_PICKLING -#define PB11_CAST(x) pybind11::cast(x) -#define BP_OTHER(T) T() -#define ADD_PROPERTY(name, func) def_property_readonly(name, func) -#define BP_REGISTER(T) -#define BOOST_NONCOPYABLE -#define BP_BASES(T) , T -#define BP_MAKE_CONSTRUCTOR(args...) bp::init(args) -#define CAST pybind11::cast -#define BP_COPY_CONST_REFERENCE pybind11::return_value_policy::reference +#define BP_NONCOPYABLE +#define BP_BASES(T) T #endif diff --git a/pysrc/Random.cpp b/pysrc/Random.cpp index bf17db5b26f..47cf87aee8e 100644 --- a/pysrc/Random.cpp +++ b/pysrc/Random.cpp @@ -46,12 +46,12 @@ namespace galsim { rng.generateFromExpectation(N, data); } - void pyExportRandom(PB11_MODULE& _galsim) + void pyExportRandom(PY_MODULE& _galsim) { - bp::class_ (GALSIM_COMMA "BaseDeviateImpl" BP_NOINIT) - .def(bp::init()) - .def(bp::init()) - .def(bp::init()) + py::class_ (GALSIM_COMMA "BaseDeviateImpl" BP_NOINIT) + .def(py::init()) + .def(py::init()) + .def(py::init()) .def("seed", (void (BaseDeviate::*) (long) )&BaseDeviate::seed) .def("reset", (void (BaseDeviate::*) (const BaseDeviate&) )&BaseDeviate::reset) .def("clearCache", &BaseDeviate::clearCache) @@ -61,41 +61,41 @@ namespace galsim { .def("generate", &Generate) .def("add_generate", &AddGenerate); - bp::class_( + py::class_( GALSIM_COMMA "UniformDeviateImpl" BP_NOINIT) - .def(bp::init()) + .def(py::init()) .def("generate1", &UniformDeviate::generate1); - bp::class_( + py::class_( GALSIM_COMMA "GaussianDeviateImpl" BP_NOINIT) - .def(bp::init()) + .def(py::init()) .def("generate1", &GaussianDeviate::generate1) .def("generate_from_variance", &GenerateFromVariance); - bp::class_( + py::class_( GALSIM_COMMA "BinomialDeviateImpl" BP_NOINIT) - .def(bp::init()) + .def(py::init()) .def("generate1", &BinomialDeviate::generate1); - bp::class_( + py::class_( GALSIM_COMMA "PoissonDeviateImpl" BP_NOINIT) - .def(bp::init()) + .def(py::init()) .def("generate1", &PoissonDeviate::generate1) .def("generate_from_expectation", &GenerateFromExpectation); - bp::class_( + py::class_( GALSIM_COMMA "WeibullDeviateImpl" BP_NOINIT) - .def(bp::init()) + .def(py::init()) .def("generate1", &WeibullDeviate::generate1); - bp::class_( + py::class_( GALSIM_COMMA "GammaDeviateImpl" BP_NOINIT) - .def(bp::init()) + .def(py::init()) .def("generate1", &GammaDeviate::generate1); - bp::class_( + py::class_( GALSIM_COMMA "Chi2DeviateImpl" BP_NOINIT) - .def(bp::init()) + .def(py::init()) .def("generate1", &Chi2Deviate::generate1); } diff --git a/pysrc/RealGalaxy.cpp b/pysrc/RealGalaxy.cpp index ead57a9bf77..17dfdd2c1a9 100644 --- a/pysrc/RealGalaxy.cpp +++ b/pysrc/RealGalaxy.cpp @@ -35,7 +35,7 @@ namespace galsim { ComputeCRGCoefficients(coef, Sigma, w, kimgs, psf, nsed, nband, nkx, nky); }; - void pyExportRealGalaxy(PB11_MODULE& _galsim) { + void pyExportRealGalaxy(PY_MODULE& _galsim) { GALSIM_DOT def("ComputeCRGCoefficients", &CallComputeCRGCoefficients); } diff --git a/pysrc/SBAdd.cpp b/pysrc/SBAdd.cpp index c4739f39d04..78bc6a86d53 100644 --- a/pysrc/SBAdd.cpp +++ b/pysrc/SBAdd.cpp @@ -23,24 +23,24 @@ namespace galsim { #ifdef USE_BOOST - static BP_CONSTRUCTOR(construct, SBAdd, const bp::object& iterable, GSParams gsparams) + static SBAdd* construct(const py::object& iterable, GSParams gsparams) { - bp::stl_input_iterator iter(iterable), end; + py::stl_input_iterator iter(iterable), end; std::list plist; for(; iter != end; ++iter) plist.push_back(*iter); - PB11_PLACEMENT_NEW SBAdd(plist, gsparams); + return new SBAdd(plist, gsparams); } #else - static BP_CONSTRUCTOR(construct, SBAdd, const std::list& plist, GSParams gsparams) + static SBAdd* construct(const std::list& plist, GSParams gsparams) { - PB11_PLACEMENT_NEW SBAdd(plist, gsparams); + return new SBAdd(plist, gsparams); } #endif - void pyExportSBAdd(PB11_MODULE& _galsim) + void pyExportSBAdd(PY_MODULE& _galsim) { - bp::class_(GALSIM_COMMA "SBAdd" BP_NOINIT) - .def(BP_MAKE_CONSTRUCTOR(&construct)); + py::class_(GALSIM_COMMA "SBAdd" BP_NOINIT) + .def(PY_INIT(&construct)); } } // namespace galsim diff --git a/pysrc/SBAiry.cpp b/pysrc/SBAiry.cpp index 4025a035ba1..27ec96aa2d8 100644 --- a/pysrc/SBAiry.cpp +++ b/pysrc/SBAiry.cpp @@ -22,10 +22,10 @@ namespace galsim { - void pyExportSBAiry(PB11_MODULE& _galsim) + void pyExportSBAiry(PY_MODULE& _galsim) { - bp::class_(GALSIM_COMMA "SBAiry" BP_NOINIT) - .def(bp::init()); + py::class_(GALSIM_COMMA "SBAiry" BP_NOINIT) + .def(py::init()); } } // namespace galsim diff --git a/pysrc/SBBox.cpp b/pysrc/SBBox.cpp index 1a4f37faa52..0c0acc89b0c 100644 --- a/pysrc/SBBox.cpp +++ b/pysrc/SBBox.cpp @@ -22,12 +22,12 @@ namespace galsim { - void pyExportSBBox(PB11_MODULE& _galsim) + void pyExportSBBox(PY_MODULE& _galsim) { - bp::class_(GALSIM_COMMA "SBBox" BP_NOINIT) - .def(bp::init()); - bp::class_(GALSIM_COMMA "SBTopHat" BP_NOINIT) - .def(bp::init()); + py::class_(GALSIM_COMMA "SBBox" BP_NOINIT) + .def(py::init()); + py::class_(GALSIM_COMMA "SBTopHat" BP_NOINIT) + .def(py::init()); } } // namespace galsim diff --git a/pysrc/SBConvolve.cpp b/pysrc/SBConvolve.cpp index 03db1060d96..fd35d4fc9a7 100644 --- a/pysrc/SBConvolve.cpp +++ b/pysrc/SBConvolve.cpp @@ -23,30 +23,30 @@ namespace galsim { #ifdef USE_BOOST - static BP_CONSTRUCTOR(construct, SBConvolve, - const bp::object& iterable, bool real_space, GSParams gsparams) + static SBConvolve* construct( + const py::object& iterable, bool real_space, GSParams gsparams) { - bp::stl_input_iterator iter(iterable), end; + py::stl_input_iterator iter(iterable), end; std::list plist; for(; iter != end; ++iter) plist.push_back(*iter); - PB11_PLACEMENT_NEW SBConvolve(plist, real_space, gsparams); + return new SBConvolve(plist, real_space, gsparams); } #else - static BP_CONSTRUCTOR(construct, SBConvolve, - const std::list& plist, bool real_space, GSParams gsparams) + static SBConvolve* construct( + const std::list& plist, bool real_space, GSParams gsparams) { - PB11_PLACEMENT_NEW SBConvolve(plist, real_space, gsparams); + return new SBConvolve(plist, real_space, gsparams); } #endif - void pyExportSBConvolve(PB11_MODULE& _galsim) + void pyExportSBConvolve(PY_MODULE& _galsim) { - bp::class_(GALSIM_COMMA "SBConvolve" BP_NOINIT) - .def(BP_MAKE_CONSTRUCTOR( &construct)); - bp::class_(GALSIM_COMMA "SBAutoConvolve" BP_NOINIT) - .def(bp::init()); - bp::class_(GALSIM_COMMA "SBAutoCorrelate" BP_NOINIT) - .def(bp::init()); + py::class_(GALSIM_COMMA "SBConvolve" BP_NOINIT) + .def(PY_INIT(&construct)); + py::class_(GALSIM_COMMA "SBAutoConvolve" BP_NOINIT) + .def(py::init()); + py::class_(GALSIM_COMMA "SBAutoCorrelate" BP_NOINIT) + .def(py::init()); } } // namespace galsim diff --git a/pysrc/SBDeconvolve.cpp b/pysrc/SBDeconvolve.cpp index 74729b76514..706f1c168c7 100644 --- a/pysrc/SBDeconvolve.cpp +++ b/pysrc/SBDeconvolve.cpp @@ -22,10 +22,10 @@ namespace galsim { - void pyExportSBDeconvolve(PB11_MODULE& _galsim) + void pyExportSBDeconvolve(PY_MODULE& _galsim) { - bp::class_(GALSIM_COMMA "SBDeconvolve" BP_NOINIT) - .def(bp::init()); + py::class_(GALSIM_COMMA "SBDeconvolve" BP_NOINIT) + .def(py::init()); } } // namespace galsim diff --git a/pysrc/SBDeltaFunction.cpp b/pysrc/SBDeltaFunction.cpp index 655d0b5d836..8a990985c03 100644 --- a/pysrc/SBDeltaFunction.cpp +++ b/pysrc/SBDeltaFunction.cpp @@ -22,10 +22,10 @@ namespace galsim { - void pyExportSBDeltaFunction(PB11_MODULE& _galsim) + void pyExportSBDeltaFunction(PY_MODULE& _galsim) { - bp::class_(GALSIM_COMMA "SBDeltaFunction" BP_NOINIT) - .def(bp::init()); + py::class_(GALSIM_COMMA "SBDeltaFunction" BP_NOINIT) + .def(py::init()); } } // namespace galsim diff --git a/pysrc/SBExponential.cpp b/pysrc/SBExponential.cpp index c40b081a1a0..7ec40eebd00 100644 --- a/pysrc/SBExponential.cpp +++ b/pysrc/SBExponential.cpp @@ -22,10 +22,10 @@ namespace galsim { - void pyExportSBExponential(PB11_MODULE& _galsim) + void pyExportSBExponential(PY_MODULE& _galsim) { - bp::class_(GALSIM_COMMA "SBExponential" BP_NOINIT) - .def(bp::init()); + py::class_(GALSIM_COMMA "SBExponential" BP_NOINIT) + .def(py::init()); } } // namespace galsim diff --git a/pysrc/SBFourierSqrt.cpp b/pysrc/SBFourierSqrt.cpp index df001b771be..3e6109818b7 100644 --- a/pysrc/SBFourierSqrt.cpp +++ b/pysrc/SBFourierSqrt.cpp @@ -22,10 +22,10 @@ namespace galsim { - void pyExportSBFourierSqrt(PB11_MODULE& _galsim) + void pyExportSBFourierSqrt(PY_MODULE& _galsim) { - bp::class_(GALSIM_COMMA "SBFourierSqrt" BP_NOINIT) - .def(bp::init()); + py::class_(GALSIM_COMMA "SBFourierSqrt" BP_NOINIT) + .def(py::init()); } } // namespace galsim diff --git a/pysrc/SBGaussian.cpp b/pysrc/SBGaussian.cpp index 11cccd321ee..e7fdf297496 100644 --- a/pysrc/SBGaussian.cpp +++ b/pysrc/SBGaussian.cpp @@ -22,10 +22,10 @@ namespace galsim { - void pyExportSBGaussian(PB11_MODULE& _galsim) + void pyExportSBGaussian(PY_MODULE& _galsim) { - bp::class_(GALSIM_COMMA "SBGaussian" BP_NOINIT) - .def(bp::init()); + py::class_(GALSIM_COMMA "SBGaussian" BP_NOINIT) + .def(py::init()); } } // namespace galsim diff --git a/pysrc/SBInclinedExponential.cpp b/pysrc/SBInclinedExponential.cpp index 459ce8eb8b9..e978e77ede8 100644 --- a/pysrc/SBInclinedExponential.cpp +++ b/pysrc/SBInclinedExponential.cpp @@ -22,11 +22,11 @@ namespace galsim { - void pyExportSBInclinedExponential(PB11_MODULE& _galsim) + void pyExportSBInclinedExponential(PY_MODULE& _galsim) { - bp::class_( + py::class_( GALSIM_COMMA "SBInclinedExponential" BP_NOINIT) - .def(bp::init()); + .def(py::init()); } } // namespace galsim diff --git a/pysrc/SBInclinedSersic.cpp b/pysrc/SBInclinedSersic.cpp index 8476adb9501..ddc86f78005 100644 --- a/pysrc/SBInclinedSersic.cpp +++ b/pysrc/SBInclinedSersic.cpp @@ -22,11 +22,11 @@ namespace galsim { - void pyExportSBInclinedSersic(PB11_MODULE& _galsim) + void pyExportSBInclinedSersic(PY_MODULE& _galsim) { - bp::class_( + py::class_( GALSIM_COMMA "SBInclinedSersic" BP_NOINIT) - .def(bp::init()); + .def(py::init()); } } // namespace galsim diff --git a/pysrc/SBInterpolatedImage.cpp b/pysrc/SBInterpolatedImage.cpp index ebb28f36ec1..3103fd79a40 100644 --- a/pysrc/SBInterpolatedImage.cpp +++ b/pysrc/SBInterpolatedImage.cpp @@ -23,9 +23,9 @@ namespace galsim { template - static void WrapTemplates(PB11_MODULE& _galsim, W& wrapper) + static void WrapTemplates(PY_MODULE& _galsim, W& wrapper) { - wrapper.def(bp::init &, const Bounds&, const Bounds&, + wrapper.def(py::init &, const Bounds&, const Bounds&, const Interpolant&, const Interpolant&, double, double, GSParams>()); @@ -33,19 +33,19 @@ namespace galsim { GALSIM_DOT def("CalculateSizeContainingFlux", cscf_func_type(&CalculateSizeContainingFlux)); } - void pyExportSBInterpolatedImage(PB11_MODULE& _galsim) + void pyExportSBInterpolatedImage(PY_MODULE& _galsim) { - bp::class_ pySBInterpolatedImage( + py::class_ pySBInterpolatedImage( GALSIM_COMMA "SBInterpolatedImage" BP_NOINIT); pySBInterpolatedImage .def("calculateMaxK", &SBInterpolatedImage::calculateMaxK); WrapTemplates(_galsim, pySBInterpolatedImage); WrapTemplates(_galsim, pySBInterpolatedImage); - bp::class_ pySBInterpolatedKImage( + py::class_ pySBInterpolatedKImage( GALSIM_COMMA "SBInterpolatedKImage" BP_NOINIT); pySBInterpolatedKImage - .def(bp::init > &, + .def(py::init > &, double, const Interpolant&, GSParams>()); } diff --git a/pysrc/SBKolmogorov.cpp b/pysrc/SBKolmogorov.cpp index 0dc7f6d7aea..25514954863 100644 --- a/pysrc/SBKolmogorov.cpp +++ b/pysrc/SBKolmogorov.cpp @@ -22,10 +22,10 @@ namespace galsim { - void pyExportSBKolmogorov(PB11_MODULE& _galsim) + void pyExportSBKolmogorov(PY_MODULE& _galsim) { - bp::class_(GALSIM_COMMA "SBKolmogorov" BP_NOINIT) - .def(bp::init()); + py::class_(GALSIM_COMMA "SBKolmogorov" BP_NOINIT) + .def(py::init()); } } // namespace galsim diff --git a/pysrc/SBMoffat.cpp b/pysrc/SBMoffat.cpp index 928121fa67c..50c261a8459 100644 --- a/pysrc/SBMoffat.cpp +++ b/pysrc/SBMoffat.cpp @@ -22,10 +22,10 @@ namespace galsim { - void pyExportSBMoffat(PB11_MODULE& _galsim) + void pyExportSBMoffat(PY_MODULE& _galsim) { - bp::class_(GALSIM_COMMA "SBMoffat" BP_NOINIT) - .def(bp::init()) + py::class_(GALSIM_COMMA "SBMoffat" BP_NOINIT) + .def(py::init()) .def("getHalfLightRadius", &SBMoffat::getHalfLightRadius); GALSIM_DOT def("MoffatCalculateSRFromHLR", &MoffatCalculateScaleRadiusFromHLR); diff --git a/pysrc/SBProfile.cpp b/pysrc/SBProfile.cpp index d0b8f84877a..8dc44ccc0b7 100644 --- a/pysrc/SBProfile.cpp +++ b/pysrc/SBProfile.cpp @@ -31,14 +31,14 @@ namespace galsim { &SBProfile::drawK); } - void pyExportSBProfile(PB11_MODULE& _galsim) + void pyExportSBProfile(PY_MODULE& _galsim) { - bp::class_(GALSIM_COMMA "GSParams" BP_NOINIT) - .def(bp::init< + py::class_(GALSIM_COMMA "GSParams" BP_NOINIT) + .def(py::init< int, int, double, double, double, double, double, double, double, double, double, double, double, double, int, double>()); - bp::class_ pySBProfile(GALSIM_COMMA "SBProfile" BP_NOINIT); + py::class_ pySBProfile(GALSIM_COMMA "SBProfile" BP_NOINIT); pySBProfile .def("xValue", &SBProfile::xValue) .def("kValue", &SBProfile::kValue) diff --git a/pysrc/SBSersic.cpp b/pysrc/SBSersic.cpp index 60c43dd41aa..67bf388c31d 100644 --- a/pysrc/SBSersic.cpp +++ b/pysrc/SBSersic.cpp @@ -22,10 +22,10 @@ namespace galsim { - void pyExportSBSersic(PB11_MODULE& _galsim) + void pyExportSBSersic(PY_MODULE& _galsim) { - bp::class_(GALSIM_COMMA "SBSersic" BP_NOINIT) - .def(bp::init()); + py::class_(GALSIM_COMMA "SBSersic" BP_NOINIT) + .def(py::init()); GALSIM_DOT def("SersicTruncatedScale", &SersicTruncatedScale); GALSIM_DOT def("SersicIntegratedFlux", &SersicIntegratedFlux); diff --git a/pysrc/SBShapelet.cpp b/pysrc/SBShapelet.cpp index cce78071b7e..1432088b101 100644 --- a/pysrc/SBShapelet.cpp +++ b/pysrc/SBShapelet.cpp @@ -34,21 +34,21 @@ namespace galsim { for (int i=0; i(idata); int size = PQIndex::size(order); VectorXd v(size); for (int i=0; i(GALSIM_COMMA "SBShapelet" BP_NOINIT) - .def(BP_MAKE_CONSTRUCTOR(&construct)); + py::class_(GALSIM_COMMA "SBShapelet" BP_NOINIT) + .def(PY_INIT(&construct)); GALSIM_DOT def("ShapeletFitImage", &fit); } diff --git a/pysrc/SBSpergel.cpp b/pysrc/SBSpergel.cpp index 52dda21910d..13cdb99d48b 100644 --- a/pysrc/SBSpergel.cpp +++ b/pysrc/SBSpergel.cpp @@ -22,10 +22,10 @@ namespace galsim { - void pyExportSBSpergel(PB11_MODULE& _galsim) + void pyExportSBSpergel(PY_MODULE& _galsim) { - bp::class_(GALSIM_COMMA "SBSpergel" BP_NOINIT) - .def(bp::init()) + py::class_(GALSIM_COMMA "SBSpergel" BP_NOINIT) + .def(py::init()) .def("calculateIntegratedFlux", &SBSpergel::calculateIntegratedFlux) .def("calculateFluxRadius", &SBSpergel::calculateFluxRadius); diff --git a/pysrc/SBTransform.cpp b/pysrc/SBTransform.cpp index 94075f301e2..c55463944b2 100644 --- a/pysrc/SBTransform.cpp +++ b/pysrc/SBTransform.cpp @@ -22,10 +22,10 @@ namespace galsim { - void pyExportSBTransform(PB11_MODULE& _galsim) + void pyExportSBTransform(PY_MODULE& _galsim) { - bp::class_(GALSIM_COMMA "SBTransform" BP_NOINIT) - .def(bp::init(GALSIM_COMMA "SBTransform" BP_NOINIT) + .def(py::init, double, GSParams>()); } diff --git a/pysrc/Silicon.cpp b/pysrc/Silicon.cpp index 73ff5f347aa..add5a60dc18 100644 --- a/pysrc/Silicon.cpp +++ b/pysrc/Silicon.cpp @@ -30,24 +30,24 @@ namespace galsim { wrapper.def("accumulate", (accumulate_fn)&Silicon::accumulate); } - static BP_CONSTRUCTOR(MakeSilicon, Silicon, - int NumVertices, double NumElect, int Nx, int Ny, int QDist, - double Nrecalc, double DiffStep, double PixelSize, - double SensorThickness, size_t idata, - const Table& treeRingTable, - const Position& treeRingCenter, - const Table& abs_length_table) + static Silicon* MakeSilicon( + int NumVertices, double NumElect, int Nx, int Ny, int QDist, + double Nrecalc, double DiffStep, double PixelSize, + double SensorThickness, size_t idata, + const Table& treeRingTable, + const Position& treeRingCenter, + const Table& abs_length_table) { double* data = reinterpret_cast(idata); - PB11_PLACEMENT_NEW Silicon(NumVertices, NumElect, Nx, Ny, QDist, - Nrecalc, DiffStep, PixelSize, SensorThickness, data, - treeRingTable, treeRingCenter, abs_length_table); + return new Silicon(NumVertices, NumElect, Nx, Ny, QDist, + Nrecalc, DiffStep, PixelSize, SensorThickness, data, + treeRingTable, treeRingCenter, abs_length_table); } - void pyExportSilicon(PB11_MODULE& _galsim) + void pyExportSilicon(PY_MODULE& _galsim) { - bp::class_ pySilicon(GALSIM_COMMA "Silicon" BP_NOINIT); - pySilicon.def(BP_MAKE_CONSTRUCTOR(&MakeSilicon)); + py::class_ pySilicon(GALSIM_COMMA "Silicon" BP_NOINIT); + pySilicon.def(PY_INIT(&MakeSilicon)); WrapTemplates(pySilicon); WrapTemplates(pySilicon); diff --git a/pysrc/Table.cpp b/pysrc/Table.cpp index 5991a4e5f9d..1080178dbbc 100644 --- a/pysrc/Table.cpp +++ b/pysrc/Table.cpp @@ -22,8 +22,7 @@ namespace galsim { - static BP_CONSTRUCTOR(MakeTable, Table, - size_t iargs, size_t ivals, int N, const char* interp_c) + static Table* MakeTable(size_t iargs, size_t ivals, int N, const char* interp_c) { const double* args = reinterpret_cast(iargs); const double* vals = reinterpret_cast(ivals); @@ -35,7 +34,7 @@ namespace galsim { else if (interp == "ceil") i = Table::ceil; else if (interp == "nearest") i = Table::nearest; - PB11_PLACEMENT_NEW Table(args, vals, N, i); + return new Table(args, vals, N, i); } static void InterpMany(const Table& table, size_t iargs, size_t ivals, int N) @@ -45,9 +44,8 @@ namespace galsim { table.interpMany(args, vals, N); } - static BP_CONSTRUCTOR(MakeTable2D, Table2D, - size_t ix, size_t iy, size_t ivals, int Nx, int Ny, - const char* interp_c) + static Table2D* MakeTable2D(size_t ix, size_t iy, size_t ivals, int Nx, int Ny, + const char* interp_c) { const double* x = reinterpret_cast(ix); const double* y = reinterpret_cast(iy); @@ -59,7 +57,7 @@ namespace galsim { else if (interp == "ceil") i = Table2D::ceil; else if (interp == "nearest") i = Table2D::nearest; - PB11_PLACEMENT_NEW Table2D(x, y, vals, Nx, Ny, i); + return new Table2D(x, y, vals, Nx, Ny, i); } static void InterpMany2D(const Table2D& table2d, size_t ix, size_t iy, size_t ivals, int N) @@ -86,15 +84,15 @@ namespace galsim { table2d.gradientMany(x, y, dfdx, dfdy, N); } - void pyExportTable(PB11_MODULE& _galsim) + void pyExportTable(PY_MODULE& _galsim) { - bp::class_
(GALSIM_COMMA "_LookupTable" BP_NOINIT) - .def(BP_MAKE_CONSTRUCTOR(&MakeTable)) + py::class_
(GALSIM_COMMA "_LookupTable" BP_NOINIT) + .def(PY_INIT(&MakeTable)) .def("interp", &Table::lookup) .def("interpMany", &InterpMany); - bp::class_(GALSIM_COMMA "_LookupTable2D" BP_NOINIT) - .def(BP_MAKE_CONSTRUCTOR(&MakeTable2D)) + py::class_(GALSIM_COMMA "_LookupTable2D" BP_NOINIT) + .def(PY_INIT(&MakeTable2D)) .def("interp", &Table2D::lookup) .def("interpMany", &InterpMany2D) .def("gradient", &Gradient) diff --git a/pysrc/WCS.cpp b/pysrc/WCS.cpp index 7cff4148d34..871055a0c19 100644 --- a/pysrc/WCS.cpp +++ b/pysrc/WCS.cpp @@ -38,22 +38,22 @@ namespace galsim { ApplyPV(n, m, uar, var, pvar); } - bp::tuple CallInvertPV(double u, double v, size_t pv_data) + py::tuple CallInvertPV(double u, double v, size_t pv_data) { const double* pvar = reinterpret_cast(pv_data); InvertPV(u, v, pvar); - return bp::make_tuple(u,v); + return py::make_tuple(u,v); } - bp::tuple CallInvertAB(int m, double x, double y, size_t ab_data, size_t abp_data) + py::tuple CallInvertAB(int m, double x, double y, size_t ab_data, size_t abp_data) { const double* abar = reinterpret_cast(ab_data); const double* abpar = reinterpret_cast(abp_data); InvertAB(m, x, y, abar, abpar); - return bp::make_tuple(x,y); + return py::make_tuple(x,y); } - void pyExportWCS(PB11_MODULE& _galsim) + void pyExportWCS(PY_MODULE& _galsim) { GALSIM_DOT def("ApplyPV", &CallApplyPV); GALSIM_DOT def("ApplyCD", &CallApplyCD); diff --git a/pysrc/module.cpp b/pysrc/module.cpp index f1ef8a68b0a..716cc8daf3a 100644 --- a/pysrc/module.cpp +++ b/pysrc/module.cpp @@ -21,53 +21,53 @@ #include "PyBind11Helper.h" namespace galsim { - void pyExportBounds(PB11_MODULE&); - void pyExportPhotonArray(PB11_MODULE&); - void pyExportImage(PB11_MODULE&); - void pyExportSBProfile(PB11_MODULE&); - void pyExportSBAdd(PB11_MODULE&); - void pyExportSBConvolve(PB11_MODULE&); - void pyExportSBDeconvolve(PB11_MODULE&); - void pyExportSBFourierSqrt(PB11_MODULE&); - void pyExportSBTransform(PB11_MODULE&); - void pyExportSBBox(PB11_MODULE&); - void pyExportSBGaussian(PB11_MODULE&); - void pyExportSBDeltaFunction(PB11_MODULE&); - void pyExportSBExponential(PB11_MODULE&); - void pyExportSBSersic(PB11_MODULE&); - void pyExportSBSpergel(PB11_MODULE&); - void pyExportSBMoffat(PB11_MODULE&); - void pyExportSBAiry(PB11_MODULE&); - void pyExportSBShapelet(PB11_MODULE&); - void pyExportSBInterpolatedImage(PB11_MODULE&); - void pyExportSBKolmogorov(PB11_MODULE&); - void pyExportSBInclinedExponential(PB11_MODULE&); - void pyExportSBInclinedSersic(PB11_MODULE&); - void pyExportRandom(PB11_MODULE&); - void pyExportTable(PB11_MODULE&); - void pyExportInterpolant(PB11_MODULE&); - void pyExportCDModel(PB11_MODULE&); - void pyExportSilicon(PB11_MODULE&); - void pyExportRealGalaxy(PB11_MODULE&); - void pyExportWCS(PB11_MODULE&); + void pyExportBounds(PY_MODULE&); + void pyExportPhotonArray(PY_MODULE&); + void pyExportImage(PY_MODULE&); + void pyExportSBProfile(PY_MODULE&); + void pyExportSBAdd(PY_MODULE&); + void pyExportSBConvolve(PY_MODULE&); + void pyExportSBDeconvolve(PY_MODULE&); + void pyExportSBFourierSqrt(PY_MODULE&); + void pyExportSBTransform(PY_MODULE&); + void pyExportSBBox(PY_MODULE&); + void pyExportSBGaussian(PY_MODULE&); + void pyExportSBDeltaFunction(PY_MODULE&); + void pyExportSBExponential(PY_MODULE&); + void pyExportSBSersic(PY_MODULE&); + void pyExportSBSpergel(PY_MODULE&); + void pyExportSBMoffat(PY_MODULE&); + void pyExportSBAiry(PY_MODULE&); + void pyExportSBShapelet(PY_MODULE&); + void pyExportSBInterpolatedImage(PY_MODULE&); + void pyExportSBKolmogorov(PY_MODULE&); + void pyExportSBInclinedExponential(PY_MODULE&); + void pyExportSBInclinedSersic(PY_MODULE&); + void pyExportRandom(PY_MODULE&); + void pyExportTable(PY_MODULE&); + void pyExportInterpolant(PY_MODULE&); + void pyExportCDModel(PY_MODULE&); + void pyExportSilicon(PY_MODULE&); + void pyExportRealGalaxy(PY_MODULE&); + void pyExportWCS(PY_MODULE&); namespace hsm { - void pyExportHSM(PB11_MODULE&); + void pyExportHSM(PY_MODULE&); } namespace integ { - void pyExportInteg(PB11_MODULE&); + void pyExportInteg(PY_MODULE&); } namespace math { - void pyExportBessel(PB11_MODULE&); + void pyExportBessel(PY_MODULE&); } } // namespace galsim -PB11_MAKE_MODULE(_galsim) +PYBIND11_MODULE(_galsim, _galsim) { - PB11_START_MODULE(_galsim); + BP_SCOPE(_galsim); galsim::pyExportBounds(_galsim); galsim::pyExportPhotonArray(_galsim); @@ -102,6 +102,4 @@ PB11_MAKE_MODULE(_galsim) galsim::hsm::pyExportHSM(_galsim); galsim::integ::pyExportInteg(_galsim); galsim::math::pyExportBessel(_galsim); - - PB11_END_MODULE(_galsim); } From f417a0c7de998e2956620db99303b91c75df45e9 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Thu, 11 Jan 2018 18:21:51 -0500 Subject: [PATCH 047/111] Make sure setup.py knows about the pybind11>=2.2 requirement (#809-pybind11) --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index ff5963f103d..dab6f09bf79 100644 --- a/setup.py +++ b/setup.py @@ -398,7 +398,7 @@ def run(self): # Note: We don't actually need cython or setuptools_scm, but eigency depends on them at build time, # and their setup.py is broken such that if they're not already installed it fails catastrophically. -build_dep = ['pybind11', 'setuptools_scm', 'cython', 'eigency'] +build_dep = ['pybind11>=2.2', 'setuptools_scm', 'cython', 'eigency>=1.76'] run_dep = ['numpy', 'future', 'astropy', 'pyyaml', 'LSSTDESC.Coord', 'pandas'] with open('README.md') as file: From 163e5138987754b6a454a4f9f5afc1ea1f264d8d Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sun, 14 Jan 2018 16:44:01 -0500 Subject: [PATCH 048/111] Remove some old deprecated test files (#809-pybind11) --- tests/deprecated/test_atmosphere.py | 174 ---------------------------- tests/deprecated/test_ellipse.py | 165 -------------------------- 2 files changed, 339 deletions(-) delete mode 100644 tests/deprecated/test_atmosphere.py delete mode 100644 tests/deprecated/test_ellipse.py diff --git a/tests/deprecated/test_atmosphere.py b/tests/deprecated/test_atmosphere.py deleted file mode 100644 index 444bb1b9983..00000000000 --- a/tests/deprecated/test_atmosphere.py +++ /dev/null @@ -1,174 +0,0 @@ -# Copyright (c) 2012-2017 by the GalSim developers team on GitHub -# https://github.com/GalSim-developers -# -# This file is part of GalSim: The modular galaxy image simulation toolkit. -# https://github.com/GalSim-developers/GalSim -# -# GalSim is free software: redistribution and use in source and binary forms, -# with or without modification, are permitted provided that the following -# conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, this -# list of conditions, and the disclaimer given in the accompanying LICENSE -# file. -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions, and the disclaimer given in the documentation -# and/or other materials provided with the distribution. -# -import os -import sys - -import numpy as np -import galsim.deprecated.atmosphere - -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - -imgdir = os.path.join(".", "SBProfile_comparison_images") # Directory containing the reference - # images. - -# AtmosphericPSF / Kolmogorov params and reference values -test_fwhm = 1.9 -test_lor0 = 1.9 -test_oversampling = 1.7 - -atmos_ref_fwhm_from_lor0 = test_lor0 * 0.976 -atmos_ref_lor0_from_fwhm = test_fwhm / 0.976 - -# for flux normalization tests -test_flux = 1.9 - -# decimal point to go to for parameter value comparisons -param_decimal = 12 - -def funcname(): - import inspect - return inspect.stack()[1][3] - - -def test_AtmosphericPSF_properties(): - """Test some basic properties of a known Atmospheric PSF. - """ - import time - t1 = time.time() - apsf = galsim.deprecated.AtmosphericPSF(lam_over_r0=1.5) - # Check that we are centered on (0, 0) - cen = galsim._galsim.PositionD(0, 0) - np.testing.assert_array_almost_equal( - [apsf.centroid().x, apsf.centroid().y], [cen.x, cen.y], 10, - err_msg="Atmospheric PSF not centered on (0, 0)") - # Check Fourier properties - np.testing.assert_almost_equal(apsf.maxK(), 5.8341564391716183, 9, - err_msg="Atmospheric PSF .maxk() does not return known value.") - np.testing.assert_almost_equal(apsf.stepK(), 1.0275679547331542, 9, - err_msg="Atmospheric PSF .stepk() does not return known value.") - np.testing.assert_almost_equal(apsf.kValue(cen), 1+0j, 4, - err_msg="Atmospheric PSF k value at (0, 0) is not 1+0j.") - t2 = time.time() - print 'time for %s = %.2f'%(funcname(),t2-t1) - -def test_AtmosphericPSF_flux(): - """Test that the flux of the atmospheric PSF is normalized to unity. - """ - import time - t1 = time.time() - lors = np.linspace(0.5, 2., 5) # Different lambda_over_r0 values - for lor in lors: - apsf = galsim.deprecated.AtmosphericPSF(lam_over_r0=lor) - print 'apsf.getFlux = ',apsf.getFlux() - np.testing.assert_almost_equal(apsf.getFlux(), 1., 6, - err_msg="Flux of atmospheric PSF (ImageViewD) is not 1.") - # .draw() throws a warning if it doesn't get a float. This includes np.float64. Convert to - # have the test pass. - dx = float(lor / 10.) - img = galsim.ImageF(256,256) - img_array = apsf.draw(image=img, dx=dx).array - np.testing.assert_almost_equal(img_array.sum(), 1., 3, - err_msg="Flux of atmospheric PSF (image array) is not 1.") - t2 = time.time() - print 'time for %s = %.2f'%(funcname(),t2-t1) - -def test_AtmosphericPSF_fwhm(): - """Test that the FWHM of the atmospheric PSF corresponds to the one expected from the - lambda / r0 input.""" - import time - t1 = time.time() - lors = np.linspace(0.5, 2., 5) # Different lambda_over_r0 values - for lor in lors: - apsf = galsim.deprecated.AtmosphericPSF(lam_over_r0=lor) - # .draw() throws a warning if it doesn't get a float. This includes np.float64. Convert to - # have the test pass. - dx_scale = 10 - dx = float(lor / dx_scale) - # Need use_true_center=False, since we want the maximum to actually be drawn in one - # of the pixels, rather than between the central 4 pixels. - psf_array = apsf.draw(dx=dx, use_true_center=False).array - nx, ny = psf_array.shape - profile = psf_array[nx / 2, ny / 2:] - # Now get the last array index where the profile value exceeds half the peak value as a - # rough estimator of the HWHM. - hwhm_index = np.where(profile > profile.max() / 2.)[0][-1] - np.testing.assert_equal(hwhm_index, dx_scale / 2, - err_msg="Kolmogorov PSF does not have the expected FWHM.") - t2 = time.time() - print 'time for %s = %.2f'%(funcname(),t2-t1) - -def test_atmos_flux_scaling(): - """Test flux scaling for AtmosphericPSF. - """ - import time - t1 = time.time() - # init with lam_over_r0 and flux only (should be ok given last tests) - obj = galsim.deprecated.AtmosphericPSF(lam_over_r0=test_lor0, flux=test_flux) - obj *= 2. - np.testing.assert_almost_equal( - obj.getFlux(), test_flux * 2., decimal=param_decimal, - err_msg="Flux param inconsistent after __imul__.") - obj = galsim.deprecated.AtmosphericPSF(lam_over_r0=test_lor0, flux=test_flux) - obj /= 2. - np.testing.assert_almost_equal( - obj.getFlux(), test_flux / 2., decimal=param_decimal, - err_msg="Flux param inconsistent after __idiv__.") - obj = galsim.deprecated.AtmosphericPSF(lam_over_r0=test_lor0, flux=test_flux) - obj2 = obj * 2. - # First test that original obj is unharmed... - np.testing.assert_almost_equal( - obj.getFlux(), test_flux, decimal=param_decimal, - err_msg="Flux param inconsistent after __rmul__ (original).") - # Then test new obj2 flux - np.testing.assert_almost_equal( - obj2.getFlux(), test_flux * 2., decimal=param_decimal, - err_msg="Flux param inconsistent after __rmul__ (result).") - obj = galsim.deprecated.AtmosphericPSF(lam_over_r0=test_lor0, flux=test_flux) - obj2 = 2. * obj - # First test that original obj is unharmed... - np.testing.assert_almost_equal( - obj.getFlux(), test_flux, decimal=param_decimal, - err_msg="Flux param inconsistent after __mul__ (original).") - # Then test new obj2 flux - np.testing.assert_almost_equal( - obj2.getFlux(), test_flux * 2., decimal=param_decimal, - err_msg="Flux param inconsistent after __mul__ (result).") - obj = galsim.deprecated.AtmosphericPSF(lam_over_r0=test_lor0, flux=test_flux) - obj2 = obj / 2. - # First test that original obj is unharmed... - np.testing.assert_almost_equal( - obj.getFlux(), test_flux, decimal=param_decimal, - err_msg="Flux param inconsistent after __div__ (original).") - # Then test new obj2 flux - np.testing.assert_almost_equal( - obj2.getFlux(), test_flux / 2., decimal=param_decimal, - err_msg="Flux param inconsistent after __div__ (result).") - t2 = time.time() - print 'time for %s = %.2f'%(funcname(),t2-t1) - - -if __name__ == "__main__": - test_AtmosphericPSF_flux() - test_AtmosphericPSF_properties() - test_AtmosphericPSF_fwhm() - test_atmos_flux_scaling() diff --git a/tests/deprecated/test_ellipse.py b/tests/deprecated/test_ellipse.py deleted file mode 100644 index a349f9dacf1..00000000000 --- a/tests/deprecated/test_ellipse.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright (c) 2012-2017 by the GalSim developers team on GitHub -# https://github.com/GalSim-developers -# -# This file is part of GalSim: The modular galaxy image simulation toolkit. -# https://github.com/GalSim-developers/GalSim -# -# GalSim is free software: redistribution and use in source and binary forms, -# with or without modification, are permitted provided that the following -# conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, this -# list of conditions, and the disclaimer given in the accompanying LICENSE -# file. -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions, and the disclaimer given in the documentation -# and/or other materials provided with the distribution. -# -import numpy as np -import os -import sys - -try: - import galsim -except ImportError: - path, filename = os.path.split(__file__) - sys.path.append(os.path.abspath(os.path.join(path, ".."))) - import galsim - -from galsim import pyfits - -##### set up necessary info for tests -# a few shear values over which we will loop so we can check them all -# note: Rachel started with these q and beta, and then calculated all the other numbers in IDL using -# the standard formulae -q = [0.5, 0.3, 0.1, 0.7] -n_shear = len(q) -beta = [0.5*np.pi, 0.25*np.pi, 0.0*np.pi, np.pi/3.0] -g = [0.333333, 0.538462, 0.818182, 0.176471] -g1 = [-0.33333334, 0.0, 0.81818175, -0.088235296] -g2 = [0.0, 0.53846157, 0.0, 0.15282802] -e = [0.600000, 0.834862, 0.980198, 0.342282] -e1 = [-0.6000000, 0.0, 0.98019803, -0.17114094] -e2 = [0.0, 0.83486235, 0.0, 0.29642480] -eta = [0.693147, 1.20397, 2.30259, 0.356675] -eta1 = [-0.69314718, 0.0, 2.3025851, -0.17833748] -eta2 = [0.0, 1.2039728, 0.0, 0.30888958] -decimal = 5 - -# some ellipse properties over which we will loop - use the shear values above, and: -mu = [0.0, 0.5, -0.1] -n_mu = len(mu) -x_shift = [0.0, 1.7, -3.0] -y_shift = [-1.3, 0.0, 9.1] -n_shift = len(x_shift) - -def funcname(): - import inspect - return inspect.stack()[1][3] - -def all_ellipse_vals(test_ellipse, ind_shear, ind_mu, ind_shift, check_shear=1.0, check_mu=1.0, - check_shift = 1.0): - # this function tests that the various numbers stored in some Ellipse object are consistent with - # the tabulated values that we expect, given indices against which to test - vec = [test_ellipse.getS().g1, test_ellipse.getS().g2, test_ellipse.getMu(), - test_ellipse.getX0().x, test_ellipse.getX0().y] - test_vec = [check_shear*g1[ind_shear], check_shear*g2[ind_shear], check_mu*mu[ind_mu], - check_shift*x_shift[ind_shift], check_shift*y_shift[ind_shift]] - np.testing.assert_array_almost_equal(vec, test_vec, decimal=decimal, - err_msg = "Incorrectly initialized Ellipse") - -def test_ellipse_initialization(): - """Test that Ellipses can be initialized in a variety of ways and get the expected results.""" - import time - t1 = time.time() - # make an empty Ellipse and make sure everything is zero - e = galsim.deprecated.Ellipse() - vec = [e.getS().g1, e.getS().g2, e.getMu(), e.getX0().x, e.getX0().y] - vec_ideal = [0.0, 0.0, 0.0, 0.0, 0.0] - np.testing.assert_array_almost_equal(vec, vec_ideal, decimal = decimal, - err_msg = "Incorrectly initialized empty ellipse") - - # then loop over the ways we can initialize, with all things initialized and with only those - # that are non-zero initialized, using args, kwargs in various ways - for ind_shear in range(n_shear): - for ind_mu in range(n_mu): - for ind_shift in range(n_shift): - # initialize with all of shear, mu, shift - ## using a Shear, either as arg or kwarg - ## using a mu, either as arg or kwarg - ## using a shift, either as Position arg or kwargs - ## using the various ways of making a Shear passed through as kwargs - s = galsim.Shear(g1 = g1[ind_shear], g2 = g2[ind_shear]) - p = galsim.PositionD(x_shift[ind_shift], y_shift[ind_shift]) - e = galsim.deprecated.Ellipse(s, mu[ind_mu], p) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift) - e = galsim.deprecated.Ellipse(p, shear=s, mu=mu[ind_mu]) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift) - e = galsim.deprecated.Ellipse(s, mu[ind_mu], x_shift=p.x, y_shift=p.y) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift) - e = galsim.deprecated.Ellipse(shear=s, mu=mu[ind_mu], x_shift=p.x, y_shift=p.y) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift) - e = galsim.deprecated.Ellipse(q = q[ind_shear], - beta = beta[ind_shear]*galsim.radians, - mu=mu[ind_mu], x_shift = p.x, y_shift = p.y) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift) - - # now initialize with only 2 of the 3 and make sure the other is zero - e = galsim.deprecated.Ellipse(mu[ind_mu], p) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_shear=0.0) - e = galsim.deprecated.Ellipse(p, mu=mu[ind_mu]) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_shear=0.0) - e = galsim.deprecated.Ellipse(mu[ind_mu], x_shift = p.x, y_shift = p.y) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_shear=0.0) - e = galsim.deprecated.Ellipse(mu = mu[ind_mu], x_shift = p.x, y_shift = p.y) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_shear=0.0) - e = galsim.deprecated.Ellipse(s, p) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_mu=0.0) - e = galsim.deprecated.Ellipse(p, shear=s) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_mu=0.0) - e = galsim.deprecated.Ellipse(s, x_shift = p.x, y_shift = p.y) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_mu=0.0) - e = galsim.deprecated.Ellipse(shear=s, x_shift = p.x, y_shift = p.y) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_mu=0.0) - e = galsim.deprecated.Ellipse(s, mu[ind_mu]) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_shift=0.0) - e = galsim.deprecated.Ellipse(s, mu=mu[ind_mu]) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_shift=0.0) - e = galsim.deprecated.Ellipse(mu[ind_mu], shear=s) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_shift=0.0) - e = galsim.deprecated.Ellipse(shear=s, mu=mu[ind_mu]) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_shift=0.0) - - # now initialize with only 1 of the 3 and make sure the other is zero - e = galsim.deprecated.Ellipse(s) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_mu=0.0, check_shift=0.0) - e = galsim.deprecated.Ellipse(shear=s) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_mu=0.0, check_shift=0.0) - e = galsim.deprecated.Ellipse(eta1=eta1[ind_shear], eta2=eta2[ind_shear]) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_mu=0.0, check_shift=0.0) - e = galsim.deprecated.Ellipse(mu[ind_mu]) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_shear=0.0, check_shift=0.0) - e = galsim.deprecated.Ellipse(mu=mu[ind_mu]) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_shear=0.0, check_shift=0.0) - e = galsim.deprecated.Ellipse(p) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_mu=0.0, check_shear=0.0) - e = galsim.deprecated.Ellipse(x_shift = p.x, y_shift = p.y) - all_ellipse_vals(e, ind_shear, ind_mu, ind_shift, check_mu=0.0, check_shear=0.0) - # check for some cases that should fail - s = galsim.Shear() - try: - np.testing.assert_raises(TypeError, galsim.deprecated.Ellipse, s, g2=0.3) - np.testing.assert_raises(TypeError, galsim.deprecated.Ellipse, shear=s, x_shift=1, g1=0.2) - np.testing.assert_raises(TypeError, galsim.deprecated.Ellipse, s, - shift=galsim.PositionD(), x_shift=0.1) - np.testing.assert_raises(TypeError, galsim.deprecated.Ellipse, s, s) - np.testing.assert_raises(TypeError, galsim.deprecated.Ellipse, g1=0.1, randomkwarg=0.7) - np.testing.assert_raises(TypeError, galsim.deprecated.Ellipse, shear=0.1) - except ImportError: - print 'The assert_raises tests require nose' - - t2 = time.time() - print 'time for %s = %.2f'%(funcname(),t2-t1) - -if __name__ == "__main__": - test_ellipse_initialization() From fb882711303bf724eceeff2683a7db095ddf5a53 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sun, 14 Jan 2018 17:22:01 -0500 Subject: [PATCH 049/111] Make python setup.py test run the unit tests (#809-pybind11) --- setup.py | 38 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index dab6f09bf79..82cc4f12e33 100644 --- a/setup.py +++ b/setup.py @@ -9,6 +9,7 @@ from setuptools.command.install import install from setuptools.command.install_scripts import install_scripts from setuptools.command.easy_install import easy_install +from setuptools.command.test import test import setuptools print("Using setuptools version",setuptools.__version__) @@ -391,6 +392,36 @@ def run(self): install_scripts.run(self) self.distribution.script_install_dir = self.install_dir +class my_test(test): + # cf. https://pytest.readthedocs.io/en/2.7.3/goodpractises.html + user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")] + + def initialize_options(self): + test.initialize_options(self) + self.pytest_args = None + + def finalize_options(self): + test.finalize_options(self) + self.test_args = [] + self.test_suite = True + + def run_tests(self): + #import here, cause outside the eggs aren't loaded + import pytest + ncpu = cpu_count() + if self.pytest_args is None: + self.pytest_args = ['-n=%d'%ncpu, '--timeout=60'] + else: + self.pytest_args = self.pytest_args.split() + print('Using pytest args: ',self.pytest_args,' (can update with -a pytest_args)') + original_dir = os.getcwd() + os.chdir('tests') + test_files = glob.glob('test*.py') + errno = pytest.main(self.pytest_args + test_files) + if errno != 0: + sys.exit(errno) + os.chdir(original_dir) + ext=Extension("galsim._galsim", sources, depends=headers, @@ -398,8 +429,9 @@ def run(self): # Note: We don't actually need cython or setuptools_scm, but eigency depends on them at build time, # and their setup.py is broken such that if they're not already installed it fails catastrophically. -build_dep = ['pybind11>=2.2', 'setuptools_scm', 'cython', 'eigency>=1.76'] -run_dep = ['numpy', 'future', 'astropy', 'pyyaml', 'LSSTDESC.Coord', 'pandas'] +build_dep = ['pybind11>=2.2', 'setuptools_scm', 'cython', 'eigency'] +run_dep = ['numpy', 'future', 'astropy', 'pyyaml', 'LSSTDESC.Coord', 'pandas', 'starlink-pyast'] +test_dep = ['pytest', 'pytest-xdist', 'pytest-timeout', 'scipy'] with open('README.md') as file: long_description = file.read() @@ -467,10 +499,12 @@ def run(self): ext_modules=[ext], setup_requires=build_dep, install_requires=build_dep + run_dep, + tests_require=test_dep, cmdclass = {'build_ext': my_builder, 'install': my_install, 'install_scripts': my_install_scripts, 'easy_install': my_easy_install, + 'test': my_test, }, entry_points = {'console_scripts' : [ 'galsim = galsim.__main__:main', From 1c22aebe8abc1437d619c54ab71d64f71786befd Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Wed, 17 Jan 2018 14:03:37 -0500 Subject: [PATCH 050/111] Break up the sources into a library and extension (#809-pybind11) --- setup.py | 150 +++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 96 insertions(+), 54 deletions(-) diff --git a/setup.py b/setup.py index 82cc4f12e33..b4cba6e5357 100644 --- a/setup.py +++ b/setup.py @@ -6,6 +6,7 @@ from setuptools import setup, Extension, find_packages from setuptools.command.build_ext import build_ext +from setuptools.command.build_clib import build_clib from setuptools.command.install import install from setuptools.command.install_scripts import install_scripts from setuptools.command.easy_install import easy_install @@ -27,7 +28,8 @@ def all_files_from(dir, ext=''): files.append(os.path.join(root, filename)) return files -sources = all_files_from('src', '.cpp') + all_files_from('pysrc', '.cpp') +py_sources = all_files_from('pysrc', '.cpp') +cpp_sources = all_files_from('src', '.cpp') headers = all_files_from('include') shared_data = all_files_from('share') @@ -267,8 +269,80 @@ def _single_compile(obj): return objects +def fix_compiler(compiler, parallel): + # Remove any -Wstrict-prototypes in the compiler flags (since invalid for C++) + try: + compiler.compiler_so.remove("-Wstrict-prototypes") + except (AttributeError, ValueError): + pass + + # Figure out what compiler it will use + #print('compiler = ',compiler.compiler) + cc = compiler.compiler_so[0] + cflags = compiler.compiler_so[1:] + comp_type = get_compiler(cc) + if cc == comp_type: + print('Using compiler %s'%(cc)) + else: + print('Using compiler %s, which is %s'%(cc,comp_type)) + + # Check if we can use ccache to speed up repeated compilation. + if try_cc('ccache ' + cc, cflags): + print('Using ccache') + compiler.set_executable('compiler_so', ['ccache',cc] + cflags) + + if parallel is None or parallel is True: + ncpu = cpu_count() + elif parallel: # is an integer + ncpu = parallel + else: + ncpu = 1 + if ncpu > 1: + print('Using %d cpus for compiling'%ncpu) + if parallel is None: + print('To override, you may do python setup.py build -j1') + compiler.compile = types.MethodType(parallel_compile, compiler) + + extra_cflags = copt[comp_type] + print('Using extra flags ',extra_cflags) + + # Return the extra cflags, since those will be added to the build step in a different place. + return extra_cflags + +# Make a subclass of build_ext so we can add to the -I list. +class my_build_clib(build_clib): + # Adding the libraries and include_dirs here rather than when declaring the Extension + # means that the setup_requires modules should already be installed, so pybind11, eigency, + # and fftw3 should all import properly. + def finalize_options(self): + build_clib.finalize_options(self) + self.include_dirs.append('include') + self.include_dirs.append('include/galsim') + self.include_dirs.append('include/fftw3') + + import eigency + self.include_dirs.append(eigency.get_includes()[2]) + + # Add any extra things based on the compiler being used.. + def build_libraries(self, libraries): + + # They didn't put the parallel option into build_clib like they did with build_ext, so + # look for the parallel option there instead. + build_ext = self.distribution.get_command_obj('build_ext') + parallel = getattr(build_ext, 'parallel', True) + + cflags = fix_compiler(self.compiler, parallel) + + # Add the appropriate extra flags for that compiler. + for (lib_name, build_info) in libraries: + build_info['cflags'] = build_info.get('cflags',[]) + cflags + + # Now run the normal build function. + build_clib.build_libraries(self, libraries) + + # Make a subclass of build_ext so we can add to the -I list. -class my_builder( build_ext ): +class my_build_ext(build_ext): # Adding the libraries and include_dirs here rather than when declaring the Extension # means that the setup_requires modules should already be installed, so pybind11, eigency, # and fftw3 should all import properly. @@ -276,6 +350,10 @@ def finalize_options(self): build_ext.finalize_options(self) self.include_dirs.append('include') self.include_dirs.append('include/galsim') + self.include_dirs.append('include/fftw3') + + import eigency + self.include_dirs.append(eigency.get_includes()[2]) import pybind11 # Include both the standard location and the --user location, since it's hard to tell @@ -283,65 +361,23 @@ def finalize_options(self): self.include_dirs.append(pybind11.get_include(user=False)) self.include_dirs.append(pybind11.get_include(user=True)) - self.include_dirs.append('include/fftw3') fftw_lib = find_fftw_lib() fftw_libpath, fftw_libname = os.path.split(fftw_lib) self.library_dirs.append(os.path.split(fftw_lib)[0]) self.libraries.append(os.path.split(fftw_lib)[1].split('.')[0][3:]) - import eigency - self.include_dirs.append(eigency.get_includes()[2]) - # Add any extra things based on the compiler being used.. def build_extensions(self): - # Remove any -Wstrict-prototypes in the compiler flags (since invalid for C++) - try: - self.compiler.compiler_so.remove("-Wstrict-prototypes") - except (AttributeError, ValueError): - pass - - print('Platform is ',self.plat_name) - - # Figure out what compiler it will use - #print('compiler_so = ',self.compiler.compiler_so) - cc = self.compiler.compiler_so[0] - cflags = self.compiler.compiler_so[1:] - comp_type = get_compiler(cc) - if cc == comp_type: - print('Using compiler %s'%(cc)) - else: - print('Using compiler %s, which is %s'%(cc,comp_type)) + + # The -jN option was new in distutils version 3.5. + # If user has older version, just set parallel to True and move on. + parallel = getattr(self, 'parallel', True) + + cflags = fix_compiler(self.compiler, parallel) # Add the appropriate extra flags for that compiler. - print('Using extra args ',copt[comp_type]) - #cflags += copt[comp_type] - # It didn't work for Erin to add this to the end of cflags for some reason. Maybe related - # to the distutils version? Not sure. Anyway, this way should work. for e in self.extensions: - e.extra_compile_args = copt[comp_type] - - # Check if we can use ccache to speed up repeated compilation. - if try_cc('ccache ' + cc, cflags): - print('Using ccache') - self.compiler.set_executable('compiler_so', ['ccache',cc] + cflags) - #print('compiler_so => ',self.compiler.compiler_so) - - # Try to compile in parallel - if not hasattr('self', 'parallel'): - # This was new in distutils version 3.5. - # If user has older version, just set parallel to True and move on. - self.parallel = True - if self.parallel is None or self.parallel is True: - ncpu = cpu_count() - elif self.parallel: # is an integer - ncpu = self.parallel - else: - ncpu = 1 - if ncpu > 1: - print('Using %d cpus for compiling'%ncpu) - if self.parallel is None: - print('To override, you may do python setup.py build -j1') - self.compiler.compile = types.MethodType(parallel_compile, self.compiler) + e.extra_compile_args = cflags # Now run the normal build function. build_ext.build_extensions(self) @@ -422,9 +458,13 @@ def run_tests(self): sys.exit(errno) os.chdir(original_dir) + +lib=("galsim", {'sources' : cpp_sources, + 'depends' : headers, + 'include_dirs' : ['include', 'include/galsim'], + 'undef_macros' : undef_macros }) ext=Extension("galsim._galsim", - sources, - depends=headers, + py_sources, undef_macros = undef_macros) # Note: We don't actually need cython or setuptools_scm, but eigency depends on them at build time, @@ -496,11 +536,13 @@ def run_tests(self): packages=find_packages(), package_data={'galsim' : shared_data}, #include_package_data=True, + libraries=[lib], ext_modules=[ext], setup_requires=build_dep, install_requires=build_dep + run_dep, tests_require=test_dep, - cmdclass = {'build_ext': my_builder, + cmdclass = {'build_ext': my_build_ext, + 'build_clib': my_build_clib, 'install': my_install, 'install_scripts': my_install_scripts, 'easy_install': my_easy_install, From 688a2634a1825e188187be4b49fbf2bf8c1670f1 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sat, 10 Feb 2018 14:05:53 -0800 Subject: [PATCH 051/111] Don't require eigency. Just look for Eigen in various places (#809-pybind11) --- setup.py | 122 ++++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 85 insertions(+), 37 deletions(-) diff --git a/setup.py b/setup.py index b4cba6e5357..8802c5d3d56 100644 --- a/setup.py +++ b/setup.py @@ -93,18 +93,24 @@ def get_compiler(cc): def find_fftw_lib(): try_libdirs = [] lib_ext = '.so' - if 'FFTW_PATH' in os.environ: - try_libdirs.append(os.environ['FFTW_PATH']) - try_libdirs.append(os.path.join(os.environ['FFTW_PATH'],'lib')) + if 'FFTW_DIR' in os.environ: + try_libdirs.append(os.environ['FFTW_DIR']) + try_libdirs.append(os.path.join(os.environ['FFTW_DIR'],'lib')) if 'posix' in os.name.lower(): try_libdirs.extend(['/usr/local/lib', '/usr/lib']) if 'darwin' in platform.system().lower(): - try_libdirs.extend(['/sw/lib', '/opt/local/lib']) + try_libdirs.extend(['/usr/local/lib', '/usr/lib', '/sw/lib', '/opt/local/lib']) lib_ext = '.dylib' for path in ['LIBRARY_PATH', 'LD_LIBRARY_PATH', 'DYLD_LIBRARY_PATH']: if path in os.environ: for dir in os.environ[path].split(':'): try_libdirs.append(dir) + # If the above don't work, the fftw3 module may have the right directory. + try: + import fftw3 + try_libdirs.append(fftw3.lib.libdir) + except ImportError: + pass name = 'libfftw3' + lib_ext for dir in try_libdirs: @@ -128,9 +134,46 @@ def find_fftw_lib(): except Exception as e: print("Could not find fftw3 library. Make sure it is installed either in a standard ") print("location such as /usr/local/lib, or the installation directory is either in ") - print("your LIBRARY_PATH or FFTW_PATH environment variable.") + print("your LIBRARY_PATH or FFTW_DIR environment variable.") raise +# Check for Eigen in some likely places +def find_eigen_dir(): + try_dirs = [] + if 'EIGEN_DIR' in os.environ: + try_dirs.append(os.environ['EIGEN_DIR']) + try_dirs.append(os.path.join(os.environ['EIGEN_DIR'])) + if 'posix' in os.name.lower(): + try_dirs.extend(['/usr/local/include', '/usr/include']) + if 'darwin' in platform.system().lower(): + try_dirs.extend(['/usr/local/include', '/usr/include', '/sw/include', + '/opt/local/include']) + for path in ['C_INCLUDE_PATH']: + if path in os.environ: + for dir in os.environ[path].split(':'): + try_dirs.append(dir) + # eigency is a python package that bundles the Eigen header files, so if that's there, + # can use that. + try: + import eigency + try_dirs.append(eigency.get_includes()[2]) + except ImportError: + pass + + for dir in try_dirs: + if os.path.isfile(os.path.join(dir, 'Eigen/Core')): + print("found Eigen at", dir) + return dir + if os.path.isfile(os.path.join(dir, 'eigen3', 'Eigen/Core')): + dir = os.path.join(dir, 'eigen3') + print("found Eigen at", dir) + return dir + print("Could not find Eigen. Make sure it is installed either in a standard ") + print("location such as /usr/local/include, or the installation directory is either in ") + print("your C_INCLUDE_PATH or EIGEN_DIR environment variable.") + raise OSError("Could not find Eigen") + + def try_cc(cc, cflags=[], lflags=[]): """Check if compiling a simple bit of c++ code with the given compiler works properly. """ @@ -309,19 +352,45 @@ def fix_compiler(compiler, parallel): # Return the extra cflags, since those will be added to the build step in a different place. return extra_cflags +def add_dirs(builder): + # We need to do most of this both for build_clib and build_ext, so separate it out here. + + # First some basic ones we always need. + builder.include_dirs.append('include') + builder.include_dirs.append('include/galsim') + + # Look for fftw3. + fftw_lib = find_fftw_lib() + fftw_libpath, fftw_libname = os.path.split(fftw_lib) + if hasattr(builder, 'library_dirs'): + builder.library_dirs.append(os.path.split(fftw_lib)[0]) + builder.libraries.append(os.path.split(fftw_lib)[1].split('.')[0][3:]) + fftw_include = os.path.join(os.path.split(fftw_libpath)[0], 'include') + if os.path.isfile(os.path.join(fftw_include, 'fftw3.h')): + # Usually, the fftw3.h file is in an associated include dir, but not always. + builder.include_dirs.append(fftw_include) + else: + # If not, we have our own copy of fftw3.h here. + builder.include_dirs.append('include/fftw3') + + # Look for Eigen/Core + eigen_dir = find_eigen_dir() + builder.include_dirs.append(eigen_dir) + + # Finally, add pybind11's include dir + import pybind11 + # Include both the standard location and the --user location, since it's hard to tell + # which one is the right choice. + builder.include_dirs.append(pybind11.get_include(user=False)) + builder.include_dirs.append(pybind11.get_include(user=True)) + + + # Make a subclass of build_ext so we can add to the -I list. class my_build_clib(build_clib): - # Adding the libraries and include_dirs here rather than when declaring the Extension - # means that the setup_requires modules should already be installed, so pybind11, eigency, - # and fftw3 should all import properly. def finalize_options(self): build_clib.finalize_options(self) - self.include_dirs.append('include') - self.include_dirs.append('include/galsim') - self.include_dirs.append('include/fftw3') - - import eigency - self.include_dirs.append(eigency.get_includes()[2]) + add_dirs(self) # Add any extra things based on the compiler being used.. def build_libraries(self, libraries): @@ -343,28 +412,9 @@ def build_libraries(self, libraries): # Make a subclass of build_ext so we can add to the -I list. class my_build_ext(build_ext): - # Adding the libraries and include_dirs here rather than when declaring the Extension - # means that the setup_requires modules should already be installed, so pybind11, eigency, - # and fftw3 should all import properly. def finalize_options(self): build_ext.finalize_options(self) - self.include_dirs.append('include') - self.include_dirs.append('include/galsim') - self.include_dirs.append('include/fftw3') - - import eigency - self.include_dirs.append(eigency.get_includes()[2]) - - import pybind11 - # Include both the standard location and the --user location, since it's hard to tell - # which one is the right choice. - self.include_dirs.append(pybind11.get_include(user=False)) - self.include_dirs.append(pybind11.get_include(user=True)) - - fftw_lib = find_fftw_lib() - fftw_libpath, fftw_libname = os.path.split(fftw_lib) - self.library_dirs.append(os.path.split(fftw_lib)[0]) - self.libraries.append(os.path.split(fftw_lib)[1].split('.')[0][3:]) + add_dirs(self) # Add any extra things based on the compiler being used.. def build_extensions(self): @@ -467,9 +517,7 @@ def run_tests(self): py_sources, undef_macros = undef_macros) -# Note: We don't actually need cython or setuptools_scm, but eigency depends on them at build time, -# and their setup.py is broken such that if they're not already installed it fails catastrophically. -build_dep = ['pybind11>=2.2', 'setuptools_scm', 'cython', 'eigency'] +build_dep = ['pybind11>=2.2'] run_dep = ['numpy', 'future', 'astropy', 'pyyaml', 'LSSTDESC.Coord', 'pandas', 'starlink-pyast'] test_dep = ['pytest', 'pytest-xdist', 'pytest-timeout', 'scipy'] From c8fea5e52ecf718e9299e5b67f037dfe949a919a Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sat, 10 Feb 2018 14:18:16 -0800 Subject: [PATCH 052/111] Add C++ unit tests to python setup.py test (#809-pybind11) --- setup.py | 59 ++++++++++++++++++++++++++++++++++++++++++++++- tests/TestAll.cpp | 5 ++++ 2 files changed, 63 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 8802c5d3d56..942e73b22ba 100644 --- a/setup.py +++ b/setup.py @@ -30,6 +30,7 @@ def all_files_from(dir, ext=''): py_sources = all_files_from('pysrc', '.cpp') cpp_sources = all_files_from('src', '.cpp') +test_sources = all_files_from('tests', '.cpp') headers = all_files_from('include') shared_data = all_files_from('share') @@ -491,9 +492,59 @@ def finalize_options(self): self.test_args = [] self.test_suite = True + def run_cpp_tests(self): + import subprocess + + builder = self.distribution.get_command_obj('build_ext') + compiler = builder.compiler + ext = builder.extensions[0] + objects = compiler.compile(test_sources, + output_dir=builder.build_temp, + macros=ext.define_macros, + include_dirs=ext.include_dirs, + debug=builder.debug, + extra_postargs=ext.extra_compile_args, + depends=ext.depends) + + if ext.extra_objects: + objects.extend(ext.extra_objects) + extra_args = ext.extra_link_args or [] + + libraries = builder.get_libraries(ext) + library_dirs = ext.library_dirs + fftw_lib = find_fftw_lib() + fftw_libpath, fftw_libname = os.path.split(fftw_lib) + library_dirs.append(os.path.split(fftw_lib)[0]) + libraries.append(os.path.split(fftw_lib)[1].split('.')[0][3:]) + libraries.append('galsim') + + exe_file = os.path.join(builder.build_temp,'cpp_test') + compiler.link_executable( + objects, 'cpp_test', + output_dir=builder.build_temp, + libraries=libraries, + library_dirs=library_dirs, + runtime_library_dirs=ext.runtime_library_dirs, + extra_postargs=extra_args, + debug=builder.debug, + target_lang='c++') + + p = subprocess.Popen([exe_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + lines = p.stdout.readlines() + p.communicate() + for line in lines: + print(line.decode().strip()) + if p.returncode == 0: + print("All C++ tests passed.") + else: + raise RuntimeError("C++ tests failed") + def run_tests(self): - #import here, cause outside the eggs aren't loaded import pytest + + # Build and run the C++ tests + self.run_cpp_tests() + ncpu = cpu_count() if self.pytest_args is None: self.pytest_args = ['-n=%d'%ncpu, '--timeout=60'] @@ -504,10 +555,16 @@ def run_tests(self): os.chdir('tests') test_files = glob.glob('test*.py') errno = pytest.main(self.pytest_args + test_files) + print('pytest ',self.pytest_args,test_files) + errno = 0 if errno != 0: sys.exit(errno) os.chdir(original_dir) + print("Note: There might be some TypeError's after this. It seems to be a bug in some") + print(" versions of Python's multiprocessing module. ") + print(" They are harmless and can be ignored.\n") + lib=("galsim", {'sources' : cpp_sources, 'depends' : headers, diff --git a/tests/TestAll.cpp b/tests/TestAll.cpp index 636db85b0d8..95d36029918 100644 --- a/tests/TestAll.cpp +++ b/tests/TestAll.cpp @@ -22,6 +22,7 @@ #include #include "Test.h" +#include extern void TestImage(); extern void TestInteg(); @@ -30,10 +31,14 @@ extern void TestVersion(); int main() { try { + std::cout<<"Start C++ tests.\n"; // Run them all here: TestImage(); + std::cout<<"TestImage passed all tests.\n"; TestInteg(); + std::cout<<"TestInteg passed all tests.\n"; TestVersion(); + std::cout<<"TestVersion passed all tests.\n"; } catch (std::exception& e) { std::cerr< Date: Fri, 16 Feb 2018 17:41:27 -0500 Subject: [PATCH 053/111] Don't use eigency for Eigen. (#809-pybind11) --- conda_requirements.txt | 4 ---- requirements.txt | 10 +--------- 2 files changed, 1 insertion(+), 13 deletions(-) diff --git a/conda_requirements.txt b/conda_requirements.txt index 5a7e0de4256..dc78286d933 100644 --- a/conda_requirements.txt +++ b/conda_requirements.txt @@ -8,7 +8,3 @@ pandas >= 0.20 pybind11 >= 2.2 pip >= 9.0 gcc >= 4.8 -cython >= 0.26 -setuptools >= 38.2 -setuptools_scm >= 1.15.6 - diff --git a/requirements.txt b/requirements.txt index 730bbc37706..0f55316089b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,14 +11,6 @@ pyyaml >= 3.12 pandas >= 0.20 pybind11 >= 2.2 pip >= 9.0 -cython >= 0.26 -setuptools >= 38.2 -setuptools_scm >= 1.15.6 -# These are not in conda. Let pip install these. +# This is not in conda. Let pip install these. LSSTDESC.Coord >= 1.0.5 - -# The version of eigency (1.75) on pip doens't install the Eigen directory properly. -# cf. https://github.com/wouterboomsma/eigency/issues/17 -# It also improperly depends on cython at build time. This commit fixes that issue. -git+git://github.com/rmjarvis/eigency.git@33d8d65417484318255dbb422e6ad49dda803f06 From 5518d382956b34b2389590e2357f1b496a4e1099 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Fri, 16 Feb 2018 17:41:53 -0500 Subject: [PATCH 054/111] Switch order of pybind11 include directories to have user path take precedence (#809-pybind11) --- setup.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 942e73b22ba..d82ff53b054 100644 --- a/setup.py +++ b/setup.py @@ -380,11 +380,12 @@ def add_dirs(builder): # Finally, add pybind11's include dir import pybind11 + print('PyBind11 is version ',pybind11.__version__) # Include both the standard location and the --user location, since it's hard to tell # which one is the right choice. - builder.include_dirs.append(pybind11.get_include(user=False)) builder.include_dirs.append(pybind11.get_include(user=True)) - + builder.include_dirs.append(pybind11.get_include(user=False)) + print('Include files for pybind11 are ',builder.include_dirs[-2:]) # Make a subclass of build_ext so we can add to the -I list. From 3e64dd503cae89321d710be30e90d4073a0c10a0 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Fri, 16 Feb 2018 18:03:33 -0500 Subject: [PATCH 055/111] Rework old INSTALL.md file to be about SCons installation. (#809-pybind11) --- INSTALL.md => INSTALL_SCONS.md | 349 +++++---------------------------- SConstruct | 16 +- 2 files changed, 60 insertions(+), 305 deletions(-) rename INSTALL.md => INSTALL_SCONS.md (67%) diff --git a/INSTALL.md b/INSTALL_SCONS.md similarity index 67% rename from INSTALL.md rename to INSTALL_SCONS.md index 3c9ca3c3353..bfdb3a71521 100644 --- a/INSTALL.md +++ b/INSTALL_SCONS.md @@ -1,58 +1,13 @@ -Installation Instructions -========================= +Installation Using SCons +======================== -System requirements: GalSim currently only supports Linux and Mac OSX. +Prior to version 2.0, this was the only installation method for installing +GalSim. It is still supported, mostly in case some users have trouble with +the setup.py method, but not recommended for most users. -Table of Contents: +Please see the instructions in INSTALL.md first to see if that method +will work for you. -0) [Overall summary](#0-overall-summary) - -1) [Software required before building GalSim](#1-software-required-before-building-galsim) - -2) [Installing the GalSim Python package](#2-installing-the-galsim-python-package) - -3) [Running tests and installing example executables](#3-running-tests-and-installing-example-executables) - -4) [Running example scripts](#4-running-example-scripts) - -5) [Platform-specific notes](#5-platform-specific-notes) - -6) [More SCons options](#6-more-scons-options) - - -0. Overall summary -================== - -While the sections below detail how to install GalSim including its required and -optional dependencies, this section gives a brief summary. A minimal -installation of GalSim requires the following dependencies. This dependency list -includes a canonical version number that is known to work. In most cases, other -recent versions will also work: - -- Python (2.7, 3.4, 3.5, 3.6) -- SCons (2.1.0) -- NumPy (1.11) -- LSSTDESC.Coord (1.0.4) -- Astropy (1.1.1) -- Future (0.16.0) -- FFTW (3.3) -- TMV (0.73) -- Boost (1.61) - -A few optional dependencies provide additional functionality, but GalSim can -otherwise be compiled and used without them. Basic WCS functionality is native -to GalSim, but for users with more complicated WCS needs, we recommend -installing starlink-pyast. Thee Astropy WCS package is also supported, but note -that it requires scipy as an additional dependency. To use yaml for config -parsing, the pyyaml module is needed. Faster text file parsing for reading in -bandpasses and SEDs can be enabled if you have the pandas module (but the code -will work, albeit more slowly, without this module). - -The sections below give a lot more details about how to obtain these -dependencies; many are available from sources like pip or easy_install, rather -than having to be installed from source. Third party packages like Anaconda -often include many of these dependencies automatically. GalSim and all of its -dependencies can be installed via fink, for users with Macs. 1. Software required before building GalSim =========================================== @@ -60,189 +15,15 @@ dependencies can be installed via fink, for users with Macs. Please note: Mac users who want to use fink can skip down to Section 5.ii and use that to satisfy all dependencies before installing. -i) Python (2.7, 3.4, 3.5, or 3.6 series), with some additional modules installed --------------------------------------------------------------------------------- +i) Python (2.7, 3.4, 3.5, or 3.6 series) +---------------------------------------- The interface to the GalSim code is via the Python package `galsim`, and its associated modules. Therefore you must have Python installed on your system. Python is free, and available from a number of sources online (see below). Currently GalSim supports Python versions 2.7, 3.4, 3.5, and 3.6. It is likely -that other Python 3.x versions are compatible, but these two are the only ones -actively tested. - -Many systems have a version of Python pre-installed. To check whether you -already have a compatible version, type - - python --version - -at the terminal prompt. If you get a "Command not found" error, or the reported -version is not one of the supported versions, you should read the "Getting -Python and required modules" section below. - -It may be that there is or soon will be more than one version of Python -installed on your operating system, in which case please see the "Making sure -you are using the right Python" Section below. - -### Getting Python and required modules ### - -For a list of places to download Python, see http://www.python.org/download/. - -The GalSim package also requires - -* the numerical Python module NumPy (http://www.numpy.org). Currently GalSim is - regularly tested to ensure it works with NumPy 1.11.2, but other versions will - likely work. - -* the astronomical FITS file input/output module PyFITS available - either as a standalone package: - http://www.stsci.edu/institute/software_hardware/pyfits - or as part of the astropy library: - http://www.astropy.org/ - The latter is preferred, since this is now where all future development of - this package is happening. Currently GalSim is regularly tested to ensure - it works with astropy version 1.1.1, but it is likely that most recent - versions will also work. - -* the future module, which is used to ease compatibility between Python 2 - and Python 3. Currently GalSim is regularly tested to ensure - it works with version 0.16.0 of this module, but other versions may work. - -* the PyYAML package for parsing YAML files (http://pyyaml.org/wiki/PyYAML) - Note: PyYAML is only technically required if you are using the `galsim` - executable for parsing YAML config files. Users who will only use GalSim - in Python (or use only JSON config files) may skip this dependency. - Currently GalSim is regularly tested to ensure it works with version 3.12 - of this package, but other versions may work. - -* the LSSTDESC.Coord module (https://github.com/LSSTDESC/Coord), which is - used for angles and coordinates. It is a faster alternative to the - astropy.coordinates module for the use cases that we need. - -* Optional dependency: PyAst WCS package. This is a really nice front end - for the Starlink AST astrometry code. It seems to support pretty much - every WCS encoding there is. (At least every one we tried.) Their - preferred installation method is via pip: - pip install starlink-pyast - For more information, see their website: - https://pypi.python.org/pypi/starlink-pyast/ - With this installed, you can use the galsim.PyAstWCS class, which in - turn means that galsim.FitsWCS will pretty much always work. - -* Optional dependency: Astropy WCS package. We already mentioned astropy - above for astropy.io.fits. Another package we can use from astropy is - their WCS package, astropy.wcs. They cannot read all that many WCS types - (compared to PyAst at least), but hopefully the functionality will include - in time. Unfortunately, this package has scipy as a dependency, which - is kind of a gargantuan package. But if you are willing to install that - too, then you can use the galsim.AstropyWCS class. - -* Optional dependency: Astropy Units package. This is now required for - GalSim chromatic functionality, but can be omitted if you are not using - this part of GalSim. - -* Optional dependency: Pandas. This has a very fast function for reading ASCII - tables. If this is not available (e.g. when reading in Bandpass or SED - files) then we fall back to the (much) slower numpy loadtxt function. - -These should installed onto your Python system so that they can be imported by: - - >>> import numpy - >>> import astropy.io.fits [ Either this (preferred)... ] - >>> import pyfits [ ... or this. ] - >>> import future - >>> import yaml - >>> import coord - >>> import starlink.Ast [ if planning to use PyAstWCS class ] - >>> import astropy.wcs [ if planning to use AstropyWCS class ] - >>> import pandas [ for faster ASCII table input ] - -within Python. You can test this by loading up the Python interpreter for the -version of Python you will be using with the GalSim toolkit. This is usually -achieved by typing `python` or `/path/to/executable/bin/python` if your desired -Python is not the system default, and typing the `import` commands above. If -you get no warning message, things are OK. - -If you do not have these modules, follow the links above or alternatively try -`easy_install` (or equivalently `/path/to/executable/bin/easy_install` if your -desired Python is not the default). - -As an example, if using the default system Python, connected to the internet -and with root/admin privileges simply type - - easy_install numpy - easy_install pyfits - easy_install future - easy_install pyyaml - -at the prompt. If not using an admin account, prefix the commands above with -`sudo` and enter your admin password when prompted. The required modules should -then be installed. - -See http://packages.python.org/distribute/easy_install.html#using-easy-install -for more details about the extremely useful `easy_install` feature. - -Another option for installing these packages is pip. See pypi.python.org for -details about getting this installed if you do not already have it on your -system. Then - - pip install numpy - pip install astropy - pip install future - pip install pyyaml - pip install lsstdesc.coord - pip install starlink-pyast - pip install scipy - -### Third party-maintained Python packages ### - -There are a number of third party-maintained packages which bundle Python with -many of the numerical and scientific libraries that are commonly used, and -many of these are free for non-commercial or academic use. - -One good example of such a package, which includes all of the Python -dependencies required by GalSim (NumPy, PyFITS, PyYAML as well as SCons and -pytest; see Section 2 below) was the Enthought Python Distribution (EPD; see -https://enthought.com/products/canopy/academic/ for the academic download -instructions). - -The new Enthought "Canopy" package, a successor to EPD, provides the same -functionality. However, it has been found that Canopy on Mac OSX can give -problems building against Boost.Python, another GalSim dependency. A solution -to these issues is described here: -https://github.com/GalSim-developers/GalSim/wiki/Installation-FAQ#wiki-canopy - -Other re-packaged Python downloads can be found at -http://www.python.org/download/. - -### Making sure you are using the right Python ### - -Some users will find they have a few versions of Python around their operating -system (determined, for example, using `locate python` at the prompt). A common -way this will happen if there is already an older build (e.g. Python 2.4.X) -being used by the operating system and then you install a newer version from -one of the sources described above. - -It will be important to make sure that the version of Python for which NumPy, -PyFITS and PyYAML etc. are installed is also the one being used for GalSim, -and that this is the one *you* want to use GalSim from! Knowing which installed -version of Python will be used is also important for the installation of the -Boost libraries (see Section 1.v, below). - -To check which Python is your default you can identify the location of the -executable by, for example, typing - - which python - -at the prompt. This will tell you the location of the executable, something like - /path/to/executable/bin/python - -If this is not the Python you want, please edit your startup scripts (e.g. -`.profile` or `.bashrc`), and be sure to specify where your desired Python -version resides when installing the Boost C++ libraries (see Section 1.v). - -See Section 5 of this document for some suggestions about getting Python, Boost -and all the other dependencies all working well together on your specific -system. +that other Python 3.x versions are compatible, but these are the only ones +being actively tested. ii) SCons (http://www.scons.org) @@ -267,12 +48,16 @@ is often distributed as fftw3. See Section 5 for some suggestions about installing this on your platform. -iv) TMV (https://github.com/rmjarvis/tmv/) (version >= 0.72 required) +iv) TMV (https://github.com/rmjarvis/tmv/) (version >= 0.72) ----------------------------------------------------------------------- -GalSim uses the TMV library for its linear algebra routines. You should -download it from the site above and follow the instructions in its INSTALL -file for how to install it. Usually installing TMV just requires the command +GalSim can use either Eigen or TMV for its linear algebra routines. See +the appropriate section in INSTALL.md if you want to use Eigen. To use +TMV (which was required prior to version 2.0), read on. + +You should download TMV from the site above and follow the instructions in its +INSTALL file for how to install it. Usually installing TMV just requires the +command scons install PREFIX= @@ -282,19 +67,14 @@ installation directory if you are comfortable installing it into `/usr/local`. However, if you are trying to install it into a system directory then you need to use sudo scons install [PREFIX=]. -Note: On Mac OS 10.7, the Apple BLAS library has problems when run using -multiple processes. So if you have such a system, we recommend getting a -different BLAS library, such as ATLAS (and making sure TMV finds it instead -of the system BLAS) or compiling TMV with no BLAS library at all (using -the SCons option `WITH_BLAS=false`). Otherwise, Galsim programs may hang -when run with multiple processes. e.g. `scons tests` by default uses -multiple processes, and multiple people reported problems using the Apple -system BLAS on OS 10.7. - v) Boost C++ (http://www.boost.org) ----------------------------------- +GalSim can use either PyBind11 or Boost for wrapping the C++ code to use in +Python. See the appropriate section in INSTALL.md if you want to use PyBind11. +To use Boost (which was required prior to version 2.0), read on. + GalSim makes use of some of the Boost C++ libraries, and these parts of Boost must be installed. Currently GalSim is regularly tested to ensure it works with Boost version 1.61, but it is likely that most versions released within the @@ -360,8 +140,7 @@ options to the ./bootstrap.sh installation script (defaults in `[]` brackets): ======================================= Once you have installed all the dependencies described above, you are ready to -build GalSim. From the GalSim base directory (in which this INSTALL.md file is -found) type +build GalSim. From the GalSim base directory (in which this file is found) type scons @@ -384,10 +163,13 @@ installing TMV, i.e. The TMV library and include files are installed in `/lib` and `/include`. Some important options that you may need to set are: -* `TMV_DIR`: Explicitly give the TMV prefix - * `FFTW_DIR`: Explicitly give the FFTW prefix +* `USE_TMV`: Specify that you want to use TMV rather than Eigen. +* `TMV_DIR`: Explicitly give the TMV prefix +* `EIGEN_DIR`: Explicitly give the Eigen prefix + +# `USE_BOOST`: Specify that you want to use Boost rather than PyBind11. * `BOOST_DIR`: Explicitly give the Boost prefix * `EXTRA_LIBS`: Additional libraries to send to the linker @@ -582,8 +364,8 @@ intend to use for running GalSim. The solution may be to install Boost C++ manually. This can be done by following the instructions of Section 1.v), above. -ii) Mac OSX 10.8 and earlier ----------------------------- +ii) Mac OSX +----------- a) Use of Fink -- the `fink` (http://www.finkproject.org) package management software is popular with Mac users. Once it is installed, you can get either most or all of the prerequisites using it, depending on whether you want @@ -691,27 +473,6 @@ Some users may find that the last step results in an inability to import the GalSim module. In that case, you can clear that addition to DYLD_LIBRARY_PATH and instead add /opt/local/lib to DYLD_FALLBACK_LIBRARY_PATH. -Notes on MacPorts with Mac OS X 10.8: -The use of `sudo` in the above commands may elicit an error message that says -"dyld: DYLD_ environment variables being ignored because main executable -(/usr/bin/sudo) is setuid or setgid". This is the result of a bug in Mac OS X -10.8, and will not prevent the installation of GalSim with the above steps from -being successful. - -Notes on MacPorts version of gcc with Mac OS X 10.5.8: -If you have installed a MacPorts version of gcc (e.g., "mp-gcc47"), it may not -link correctly with the other MacPorts installed modules, which are compiled in -the system gcc versions. To check what gcc versions are available to you, try -the command - - port select --list gcc - -then switch to the system gcc version (either 4.0 or 4.2) with - - sudo port select --set gcc gcc42 - -and compile GalSim with the system gcc. - c) Homebrew (http://mxcl.github.com/homebrew/) -- another package manager for Max OSX. Currently GalSim is available on homebrew, so it (plus dependencies) should be installable via @@ -720,23 +481,6 @@ should be installable via brew install gal-sim -iii) Mac OSX 10.9 (Mavericks) ------------------------------ - -Most of what applies above for earlier Mac OSX versions seems to apply for -GalSim on Mavericks too, although not all combinations have yet been tested. - -However, it has been found that GalSim and its dependencies can be sensitive -(e.g. Issue #483) to the fact that under Mavericks the system `gcc` is NOT in -fact the Gnu Compiler Collection, but in fact Clang masquerading as such. This -can lead to problems when linking libraries, as described in the following -GalSim Wiki FAQ item: -https://github.com/GalSim-developers/GalSim/wiki/Installation-FAQ#wiki-what-should-i-do-about-undefined-symbols-for-architecture-x86_64-errors - -The best success seems to be achieved in Mavericks by *explicitly* specifying -`clang` and `clang++` as the compiler to GalSim and all its dependencies when -building (as in the example above). - iv) Docker ---------- @@ -746,6 +490,7 @@ Karen Ng has created a Docker file for containerizing GalSim. See her repo: for instructions about how to either use her image or create your own. + 6. More SCons options ===================== @@ -791,6 +536,8 @@ You can list these options from the command line with * `WARN` (False) specifies whether to add warning compiler flags such as `-Wall`. +* `COVER` (False) specifies whether to add unit test coverage of the C++ layer. + * `PYTHON` (/usr/bin/env python) specifies which version of Python you are planning to use GalSim with. If you choose not to use the default here, then you need to remember to use the correct Python version @@ -812,6 +559,15 @@ You can list these options from the command line with ### Flags that specify where to look for external libraries ### +* `FFTW_DIR` ('') specifies the root location of FFTW. The header files should + be in `FFTW_DIR/include` and the library files in `FFTW_DIR/lib`. + +* `EIGEN_DIR` ('') specifies the root location of the Eigen header files. + The Core include file for Eigen should located at `EIGEN_DIR/Eigen/Core`. + +* `USE_TMV` (False) specifies to use TMV rather than Eigen for the linear + algebra code in the C++ layer. + * `TMV_DIR` ('') specifies the location of TMV if it is not in a standard location. This should be the same value as you used for PREFIX when installing TMV. @@ -819,21 +575,20 @@ You can list these options from the command line with * `TMV_LINK` ('') specifies the location of the tmv-link file. Normally, this is in `TMV_DIR/share`, but if not, you can specify the correct location here. -* `FFTW_DIR` ('') specifies the root location of FFTW. The header files should - be in `FFTW_DIR/include` and the library files in `FFTW_DIR/lib`. - -* `BOOST_DIR` ('') specifies the root location of BOOST The header files should - be in `BOOST_DIR/include/boost` and the library files in `BOOST_DIR/lib`. - -* `USE_BOOST` (False) specifies whether to use a local boost installation for - some optional boost header files. We bundle the boost.random implementation - from a specific boost version (1.48) to make sure "random" variable generation - is deterministic across machines and over time. To make it fully self- +* `USE_BOOST` (False) specifies whether to use Boost.Python for wrapping the + C++ code rather than PyBind11. If this is set, it will also use your + Boost installation for some header files used by the random number + generator code. We bundle the boost.random implementation from a specific + boost version (1.48) to make sure "random" variable generation is + deterministic across machines and over time. To make it fully self- contained, we edited them slightly to not include many of the complicated workarounds boost has for specific compilers and such. However, those workarounds can be reenabled by setting USE_BOOST=True if your system needs them. +* `BOOST_DIR` ('') specifies the root location of BOOST The header files should + be in `BOOST_DIR/include/boost` and the library files in `BOOST_DIR/lib`. + * `EXTRA_INCLUDE_PATH` ('') specifies extra directories in which to search for header files in addition to the standard locations such as `/usr/include` and `/usr/local/include` and the ones derived from the above options. Sometimes diff --git a/SConstruct b/SConstruct index 710a13f9e7f..4908565dabf 100644 --- a/SConstruct +++ b/SConstruct @@ -87,6 +87,13 @@ opts.Add(BoolVariable('WITH_UPS','Install ups/ directory for use with EUPS', Fal opts.Add('FFTW_DIR','Explicitly give the fftw3 prefix','') opts.Add('EIGEN_DIR','Explicitly give the Eigen prefix','') +opts.Add(BoolVariable('USE_TMV','Use TMV for linear algebra, rather than Eigen',False)) +opts.Add('TMV_DIR','Explicitly give the tmv prefix','') +opts.Add('TMV_LINK','File that contains the linking instructions for TMV','') + +opts.Add(BoolVariable('USE_BOOST','Use boost python for the wrapping, rather than pybind11',False)) +opts.Add('BOOST_DIR','Explicitly give the boost prefix','') + opts.Add(PathVariable('EXTRA_INCLUDE_PATH', 'Extra paths for header files (separated by : if more than 1)', '', PathVariable.PathAccept)) @@ -120,19 +127,12 @@ opts.Add(PathVariable('LD_LIBRARY_PATH', 'cf. DYLD_LIBRARY_PATH for why this may be useful.', '', PathVariable.PathAccept)) -opts.Add(BoolVariable('USE_TMV','Use TMV for linear algebra, rather than Eigen',False)) -opts.Add('TMV_DIR','Explicitly give the tmv prefix','') -opts.Add('TMV_LINK','File that contains the linking instructions for TMV','') -opts.Add(BoolVariable('TMV_DEBUG','Turn on extra debugging statements within TMV library',False)) - -opts.Add(BoolVariable('USE_BOOST','Use boost python for the wrapping, rather than pybind11',False)) -opts.Add('BOOST_DIR','Explicitly give the boost prefix','') - opts.Add('PYTEST','Name of pytest executable','') opts.Add(BoolVariable('CACHE_LIB','Cache the results of the library checks',True)) opts.Add(BoolVariable('WITH_PROF', 'Use the compiler flag -pg to include profiling info for gprof', False)) opts.Add(BoolVariable('MEM_TEST','Test for memory leaks', False)) +opts.Add(BoolVariable('TMV_DEBUG','Turn on extra debugging statements within TMV library',False)) # None of the code uses openmp yet. Re-enable this if we start using it. #opts.Add(BoolVariable('WITH_OPENMP','Look for openmp and use if found.', False)) opts.Add(BoolVariable('USE_UNKNOWN_VARS', From a3ef511f05cbeb09dadfedf92e496d4d6b1e85eb Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Fri, 16 Feb 2018 18:04:05 -0500 Subject: [PATCH 056/111] Describe new installation method in INSTALL.md (#809-pybind11) --- INSTALL.md | 428 +++++++++++++++++++++++++++++++++++++++++++++++ requirements.txt | 1 + setup.py | 3 +- 3 files changed, 431 insertions(+), 1 deletion(-) create mode 100644 INSTALL.md diff --git a/INSTALL.md b/INSTALL.md new file mode 100644 index 00000000000..20a61f6cb30 --- /dev/null +++ b/INSTALL.md @@ -0,0 +1,428 @@ +Installation Instructions +========================= + +System requirements: GalSim currently only supports Linux and Mac OSX. + +Table of Contents: + +1) [Overall summary](#1-overall-summary) + +2) [Installing Eigen](#2-installing-eigen) + +3) [Installing FFTW](#3-installing-fftw) + +4) [Using Conda](#4-using-conda) + +5) [Installing With SCons](#5-installing-with-scons) + +6) [Running tests](#6-running-tests) + +7) [Running example scripts](#7-running-example-scripts) + + +1. Overall summary +================== + +GalSim is a python module that has much of its implementation in C++ for +improved computational efficiency. GalSim supports both Python 2 and +Python 3. It is regularly tested on Python versions (2.7, 3.4, 3.5, 3.6). + +The usual way to install GalSim is now (starting with version 2.0) simply + + pip install galsim + +or by cloning the repo and doing + + python setup.py install + +Note that you may need to use sudo with the above commands if they are +installing into system directories. If you do not have write privileges for +the directory that the above commands would install into, you can use the +--user flag to install into a local directory. (Normally something like +$HOME/Library/Python/2.7 or $HOME/.local, depending on your system and which +brand of Python you are using.) + +Either of these installation methods will automatically install most of the +required dependencies for you if you do not have them already installed on your +machine. There are two exceptions, however. Eigen and FFTW are not directly +pip installable, so if the above installation fails, you may need to install +these separately. See the sections 2 and 3 below for more details about how to +do this. + +The other dependencies should all be installed automatically, but they +are listed here for completeness along with versions that are known to work. +In most cases, other recent versions will also work: + +- NumPy (1.14.0) +- Future (0.16.0) +- Astropy (2.0.3) +- PyBind11 (2.2.1) +- LSSTDESC.Coord (1.0.5) + +There are a few others modules are not technically required, but we let pip +install them along with GalSim, because they either add useful functionality +or efficiency to GalSim. These are listed in the requirements.txt file that +pip uses to determine what else to install. But if you install with +`python setup.py install`, then these will not be installed. + +- Starlink (3.10.0) (Improved WCS functionality) +- PyYaml (3.12) (Reads YAML config files) +- Pandas (0.20) (Faster reading of ASCII input files) + +If you would like to install all the above pip-installable dependencies +separately from installing GalSim, the easiest way is to use the command + + pip install -r requirements.txt + +in the GalSim directory. + + +2. Installing Eigen +=================== + +GalSim uses Eigen for the C++-layer linear algebra calculations. It is a +header-only library, which means that nothing needs to be compiled to use them. +You just need to download the header files and make sure GalSim can find them. + +We require Eigen version >= 3.0. Most tests have been done with Eigen 3.3.4, +but we have also used 3.2.8 and 3.0.4, so probably any 3.x version will work. +However, if you have trouble with another version, try upgrading to 3.3.4 or +later. + +Note: Prior to version 2.0, GalSim used TMV for the linear algebra back end. +This is still an option if you prefer (e.g. it may be faster for some use +cases, since it can use an optimized BLAS library on your system), but to +use TMV, you need to use the SCons installation option described below. + + +i) Installing it yourself +------------------------- + +Eigen is available at the URL + + http://eigen.tuxfamily.org/index.php + +As of this writing, version 3.3.4 is the current latest release, for which +the following commands should work to download and install it: + + wget http://bitbucket.org/eigen/eigen/get/3.3.4.tar.bz2 + tar xfj 3.3.4.tar.bz2 + sudo cp eigen-eigen-5a0156e40feb/Eigen /usr/local/include + +In the final cp line, the MD5 hash (5a0156e40feb) will presumably change for +other versions, so use whatever directory tar expands into if you are using +a different version than 3.3.4. + +If you do not have sudo privileges, you can copy to a different directory such +as $HOME/include instead and leave off the sudo from the cp command. In this +case, make sure this directory is in your C_INCLUDE_PATH environment variable. + +Finally, you can also skip the last command above and instead set EIGEN_DIR +as an environment variabe to tell GalSim where the files are + + export EIGEN_DIR=/some/path/to/eigen + +This should be the directory in which the Eigen subdirectory is found. E.g. + + export EIGEN_DIR=$HOME/eigen-eigen-5a0156e40feb + +Probably, you should put this into your .bash_profile file so it always gets +set when you log in. + + +ii) Using an existing installation +---------------------------------- + +If Eigen is already installed on your system, there may be nothing to do. +If it is in a standard location like /usr/local/include or in some other +directory in your C_INCLUDE_PATH, then GalSim should find it without +any extra work on your part. + +If it is in a non-standard location, and you do not want to add this path +to your C_INCLUDE_PATH, then you can instead set the EIGEN_DIR environment +variable to tell GalSim where to look + + export EIGEN_DIR=/some/path/to/eigen + +For instance, if Eigen was installed into /usr/include/eigen3, then you +could use that with + + export EIGEN_DIR=/usr/include/eigen3 + +This command would normally be done in your .bash_profile file so it gets +executed every time you log in. + +If you have multiple versions of Eigen installed on your system, this variable +can be used to specify which version you want GalSim to use as this will be +the first location it will check during the installation process. + + +iii) Using conda +---------------- + +If you use conda, Eigen can be install with + + conda install eigen + +This will put it into the anaconda/include directory on your system (within +your active environment if appropriate). GalSim knows to look here, so there +is nothing dditional you need to do. + + +iv) Using fink +-------------- + +If you use fink on a Mac, Eigen can be installed with + + fink install eigen + +This will put it into the /sw/include directory on your system. GalSim knows +to look here, so there is nothing dditional you need to do. + + +v) Using MacPorts +----------------- + +If you use MacPorts, Eigen can be installed with + + port install eigen + +This will put it into the /opt/local/include directory on your system. GalSim +knows to look here, so there is nothing dditional you need to do. + + +vi) Using eigency +----------------- + +Eigency is a pip-installable module that bundles the Eigen header files, so it +can also be used to install these files on your system. Unfortunately, at least +as of version 1.75, there are errors in their setup.py file, so the pip version +does not actually work for this purpose. + +However, Mike Jarvis has a fork that fixes these errors, which you can pip +install manually using the command + + pip install git+git://github.com/rmjarvis/eigency.git@33d8d65417484 + + +3. Installing FFTW +================== + +GalSim uses FFTW (The Fastest Fourier Transform in the West) for performing +fast fourier transforms. + +We require FFTW version >= 3.0. Most tests have been done with FFTW 3.3.7, +so if you have trouble with an earlier version, try upgrading to 3.3.7 or later. + + +i) Installing it yourself +------------------------- + +FFTW is available at the URL + + http://www.fftw.org/download.html + +As of this writing, version 3.3.7 is the current latest release, for which +the following commands should work to download and install it: + + wget http://www.fftw.org/fftw-3.3.7.tar.gz + tar xfz fftw-3.3.7.tar.gz + cd fftw-3.3.7 + ./configure + make + sudo make install + +If you want to install into a different directory (e.g. because you do not +have sudo privileges on your machine), then specify the alternate directory +with the --prefix flag to configure. E.g. + + ./congigure --prefix=$HOME + +which will install the library into $HOME/lib and the header file into +$HOME/include. In this case, leave of the sudo from the last line. +Also, you should make sure these directories are in your LD_LIBRARY_PATh +and C_INCLUDE_PATH environment variables, respectively. + +Alternatively, if you do not want to modify your LD_LIBRARY_PATH and/or +C_INCLUDE_PATH, you can instead set an environment variabe to tell GalSim +where the files are + + export FFTW_DIR=/path/to/fftw/prefix + +E.g. in the above case where prefix is $HOME, you would do + + export FFTW_DIR=$HOME + +Probably, you should put this into your .bash_profile file so it always gets +set when you log in. + + +ii) Using an existing installation +---------------------------------- + +If FFTW is already installed on your system, there may be nothing to do. +If it is in a standard location like /usr/local/lib or in some other +directory in your LD_LIBRARY_PATH, then GalSim should find it without +any extra work on your part. + +If it is in a non-standard location, and you do not want to add this path +to your LD_LIBRARY_PATH, then you can instead set the FFTW_DIR environment +variable to tell GalSim where to look + + export FFTW_DIR=/some/path/to/fftw + +For instance, if libfftw3.so is located in /opt/cray/pe/lib64, you could use +that with + + export FFTW_DIR=/opt/cray/pe/lib64 + +This command would normally be done in your .bash_profile file so it gets +executed every time you log in. + +If you have multiple versions of FFTW installed on your system, this variable +can be used to specify which version you want GalSim to use as this will be +the first location it will check during the installation process. + + +iii) Using conda +---------------- + +If you use conda, FFTW can be install with + + conda install fftw + +This will put it into the anaconda/lib directory on your system (within your +active environment if appropriate). GalSim knows to look here, so there is +nothing dditional you need to do. + + +iv) Using fink +-------------- + +If you use fink on a Mac, FFTW can be installed with + + fink install fftw3 + +(Make sure to use fftw3, not fftw, since fftw is version 2.) + +This will put it into the /sw/lib directory on your system. GalSim knows to +look here, so there is nothing dditional you need to do. + + +v) Using MacPorts +----------------- + +If you use MacPorts, FFTW can be installed with + + port install fftw-3 + +This will put it into the /opt/loca/lib directory on your system. GalSim knows +to look here, so there is nothing dditional you need to do. + + +4. Using Conda +============== + +If you use conda (normally via the Anaconda Python distribution), then all of +the prerequisites are available from the conda-forge channel, so you can use +that as follows (from within the main GalSim directory): + + conda create -y -n galsim + conda activate galsim + conda install -y -c conda-forge --file conda_requirements.txt + pip install . + +The first two lines are optional, but they let you keep the GalSim installation +separate from any other conda environments you might have. + +If your conda version is 4.3 or earlier, replace the above conda activate line +with + + source activate galsim + +which does the same thing. They just changed the name of this command to use +the conda executable instead of source. + +Also, if you prefer to use the defaults channel, then (at least as of this +writing), it had all the items in conda_requirements.txt, except for pybind11. +So if you have conda-forge in your list of channels, but it comes after +defaults, then that should still work and pybind11 will be the only one that +will need the conda-forge channel. + + +5. Installing With SCons +======================== + +Prior to version 2.0, GalSim installation used SCons. This installation +mode is still supported, but is not recommended unless you have difficulties +with the setup.py installation. + +Note: Two options that are available with the SCons installation method, +but not the setup.py method, are (1) using TMV instead of Eigen for the linear +algebra back end, and (2) using Boost.Python instead of PyBind11 for the +wrapping the C++ code to be called from Python. If you need either of these +options, then you should use the SCons installation. + +See the file INSTALL_SCONS.md for complete details about this method of +installation. + + +6. Running tests +================ + +You can run our test suite by typing + + python setup.py test + +This should run all the python-layer tests with pytest and also compile and +run the C++ test suite. + +By default, the python tests will use the pytest plugins `pytest-xdist` (for +running tests in parallel) and `pytest-timeout` (to manage how much time each +test is allowed to run). These plugins are usually installable using pip: + + pip install pytest-xdist pytest-timeout + +Sometimes the `--user` flag may be needed in the above command to make the +plugins discoverable. If you want to run the python tests without these +plugins (serially!), you can still do this via + + python setup.py test -j1 + +Note: if your system does not have `pytest` installed, and you do not want to +installq it, you can run all the Python tests with the script run_all_tests in +the `tests` directory. If this finishes without an error, then all the tests +have passed. However, note that this script runs more tests than our normal +test run using pytest, so it may take quite a while to finish. (The *all* in +the file name means run all the tests including the slow ones that we normally +skip.) + + +7. Running example scripts +========================== + +The `examples` directory has a series of demo scripts: + + demo1.py, demo2.py, ... + +These can be considered a tutorial on getting up to speed with GalSim. Reading +through these in order will introduce you to how to use most of the features of +GalSim in Python. To run these scripts, type (e.g.): + + python demo1.py + +There are also a corresponding set of config files: + + demo1.yaml, demo2.yaml, ... + +These files can be run using the executable `galsim`, and will produce the +same output images as the Python scripts: + + galsim demo1.yaml + +They are also well commented, and can be considered a parallel tutorial for +learning the config file usage of GalSim. + +All demo scripts are designed to be run in the `GalSim/examples` directory. +Some of them access files in subdirectories of the `examples` directory, so they +would not work correctly from other locations. diff --git a/requirements.txt b/requirements.txt index 0f55316089b..c5b8a3d074e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,3 +14,4 @@ pip >= 9.0 # This is not in conda. Let pip install these. LSSTDESC.Coord >= 1.0.5 +starlink-pyast >= 3.9.0 diff --git a/setup.py b/setup.py index d82ff53b054..7f6d7e9a55f 100644 --- a/setup.py +++ b/setup.py @@ -576,7 +576,8 @@ def run_tests(self): undef_macros = undef_macros) build_dep = ['pybind11>=2.2'] -run_dep = ['numpy', 'future', 'astropy', 'pyyaml', 'LSSTDESC.Coord', 'pandas', 'starlink-pyast'] +run_dep = ['numpy', 'future', 'astropy', 'LSSTDESC.Coord', # Required. + 'pyyaml', 'pandas', 'starlink-pyast'] # Not technically required, but useful. test_dep = ['pytest', 'pytest-xdist', 'pytest-timeout', 'scipy'] with open('README.md') as file: From acf6c7f3550e7e7763472b87c518a20a5b845533 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Fri, 16 Feb 2018 18:15:40 -0500 Subject: [PATCH 057/111] Update CHANGELOG (#809-pybind11) --- CHANGELOG.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4742e2126cf..ade742af661 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,9 @@ Dependency Changes repo so people could more easily use this functionality without requiring all of GalSim as a dependency. (#809b) - Removed dependency on boost. -- Added dependency on (pybind11 or cffi...) +- Removed dependency on TMV. +- Added dependency on pybind11. +- Added dependency on Eigen. API Changes @@ -43,6 +45,8 @@ API Changes InclinedSersic has been changed to disk_half_light_radius, since it does not really correspond to the realized half-light radius of the inclined profile (unless the inclination angle is 0 degrees). (#809f) +- Removed galsim_yaml and galsim_json scripts, which were essentially just + aliases for galsim -f yaml and galsim -f json respectively. (#809f) Bug Fixes From 94a7e5dd8ffb1d40ef01ef8518486ce9930a151a Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 19 Feb 2018 15:15:09 -0500 Subject: [PATCH 058/111] Go back to using eigency for Eigen now that 1.77 is released. (#809-pybind11) --- CHANGELOG.md | 9 +- INSTALL.md | 258 +++++++++++++++++++++-------------------- conda_requirements.txt | 2 + requirements.txt | 1 + setup.py | 54 ++++++--- 5 files changed, 174 insertions(+), 150 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ade742af661..c5226052ba4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,10 +12,11 @@ Dependency Changes GalSim as the Angle and CelestialCoord classes. We moved it to a separate repo so people could more easily use this functionality without requiring all of GalSim as a dependency. (#809b) -- Removed dependency on boost. -- Removed dependency on TMV. -- Added dependency on pybind11. -- Added dependency on Eigen. +- Removed dependency on boost. (#809) +- Removed dependency on TMV. (#809) +- Added dependency on pybind11. (#809) +- Added dependency on Eigen. (#809) +- FFTW is now the only dependency that pip cannot handle automatically. (#809) API Changes diff --git a/INSTALL.md b/INSTALL.md index 20a61f6cb30..a3b093a9887 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -7,9 +7,9 @@ Table of Contents: 1) [Overall summary](#1-overall-summary) -2) [Installing Eigen](#2-installing-eigen) +2) [Installing FFTW](#2-installing-fftw) -3) [Installing FFTW](#3-installing-fftw) +3) [Installing Eigen](#3-installing-eigen) 4) [Using Conda](#4-using-conda) @@ -44,15 +44,15 @@ brand of Python you are using.) Either of these installation methods will automatically install most of the required dependencies for you if you do not have them already installed on your -machine. There are two exceptions, however. Eigen and FFTW are not directly -pip installable, so if the above installation fails, you may need to install -these separately. See the sections 2 and 3 below for more details about how to -do this. +machine. There is one exception, however. FFTW is not directly pip +installable, so if the above installation fails, you may need to install +it separately. See the sections 2 below for more details about how to do this. The other dependencies should all be installed automatically, but they are listed here for completeness along with versions that are known to work. In most cases, other recent versions will also work: +- Eigen (3.2.8) (via eigency 1.77) - NumPy (1.14.0) - Future (0.16.0) - Astropy (2.0.3) @@ -77,54 +77,53 @@ separately from installing GalSim, the easiest way is to use the command in the GalSim directory. -2. Installing Eigen -=================== - -GalSim uses Eigen for the C++-layer linear algebra calculations. It is a -header-only library, which means that nothing needs to be compiled to use them. -You just need to download the header files and make sure GalSim can find them. +2. Installing FFTW +================== -We require Eigen version >= 3.0. Most tests have been done with Eigen 3.3.4, -but we have also used 3.2.8 and 3.0.4, so probably any 3.x version will work. -However, if you have trouble with another version, try upgrading to 3.3.4 or -later. +GalSim uses FFTW (The Fastest Fourier Transform in the West) for performing +fast fourier transforms. -Note: Prior to version 2.0, GalSim used TMV for the linear algebra back end. -This is still an option if you prefer (e.g. it may be faster for some use -cases, since it can use an optimized BLAS library on your system), but to -use TMV, you need to use the SCons installation option described below. +We require FFTW version >= 3.0. Most tests have been done with FFTW 3.3.7, +so if you have trouble with an earlier version, try upgrading to 3.3.7 or later. i) Installing it yourself ------------------------- -Eigen is available at the URL +FFTW is available at the URL - http://eigen.tuxfamily.org/index.php + http://www.fftw.org/download.html -As of this writing, version 3.3.4 is the current latest release, for which +As of this writing, version 3.3.7 is the current latest release, for which the following commands should work to download and install it: - wget http://bitbucket.org/eigen/eigen/get/3.3.4.tar.bz2 - tar xfj 3.3.4.tar.bz2 - sudo cp eigen-eigen-5a0156e40feb/Eigen /usr/local/include + wget http://www.fftw.org/fftw-3.3.7.tar.gz + tar xfz fftw-3.3.7.tar.gz + cd fftw-3.3.7 + ./configure + make + sudo make install -In the final cp line, the MD5 hash (5a0156e40feb) will presumably change for -other versions, so use whatever directory tar expands into if you are using -a different version than 3.3.4. +If you want to install into a different directory (e.g. because you do not +have sudo privileges on your machine), then specify the alternate directory +with the --prefix flag to configure. E.g. -If you do not have sudo privileges, you can copy to a different directory such -as $HOME/include instead and leave off the sudo from the cp command. In this -case, make sure this directory is in your C_INCLUDE_PATH environment variable. + ./congigure --prefix=$HOME -Finally, you can also skip the last command above and instead set EIGEN_DIR -as an environment variabe to tell GalSim where the files are +which will install the library into $HOME/lib and the header file into +$HOME/include. In this case, leave of the sudo from the last line. +Also, you should make sure these directories are in your LD_LIBRARY_PATh +and C_INCLUDE_PATH environment variables, respectively. - export EIGEN_DIR=/some/path/to/eigen +Alternatively, if you do not want to modify your LD_LIBRARY_PATH and/or +C_INCLUDE_PATH, you can instead set an environment variabe to tell GalSim +where the files are -This should be the directory in which the Eigen subdirectory is found. E.g. + export FFTW_DIR=/path/to/fftw/prefix - export EIGEN_DIR=$HOME/eigen-eigen-5a0156e40feb +E.g. in the above case where prefix is $HOME, you would do + + export FFTW_DIR=$HOME Probably, you should put this into your .bash_profile file so it always gets set when you log in. @@ -133,26 +132,26 @@ set when you log in. ii) Using an existing installation ---------------------------------- -If Eigen is already installed on your system, there may be nothing to do. -If it is in a standard location like /usr/local/include or in some other -directory in your C_INCLUDE_PATH, then GalSim should find it without +If FFTW is already installed on your system, there may be nothing to do. +If it is in a standard location like /usr/local/lib or in some other +directory in your LD_LIBRARY_PATH, then GalSim should find it without any extra work on your part. If it is in a non-standard location, and you do not want to add this path -to your C_INCLUDE_PATH, then you can instead set the EIGEN_DIR environment +to your LD_LIBRARY_PATH, then you can instead set the FFTW_DIR environment variable to tell GalSim where to look - export EIGEN_DIR=/some/path/to/eigen + export FFTW_DIR=/some/path/to/fftw -For instance, if Eigen was installed into /usr/include/eigen3, then you -could use that with +For instance, if libfftw3.so is located in /opt/cray/pe/lib64, you could use +that with - export EIGEN_DIR=/usr/include/eigen3 + export FFTW_DIR=/opt/cray/pe/lib64 This command would normally be done in your .bash_profile file so it gets executed every time you log in. -If you have multiple versions of Eigen installed on your system, this variable +If you have multiple versions of FFTW installed on your system, this variable can be used to specify which version you want GalSim to use as this will be the first location it will check during the installation process. @@ -160,98 +159,95 @@ the first location it will check during the installation process. iii) Using conda ---------------- -If you use conda, Eigen can be install with +If you use conda, FFTW can be install with - conda install eigen + conda install fftw -This will put it into the anaconda/include directory on your system (within -your active environment if appropriate). GalSim knows to look here, so there -is nothing dditional you need to do. +This will put it into the anaconda/lib directory on your system (within your +active environment if appropriate). GalSim knows to look here, so there is +nothing dditional you need to do. iv) Using fink -------------- -If you use fink on a Mac, Eigen can be installed with - - fink install eigen - -This will put it into the /sw/include directory on your system. GalSim knows -to look here, so there is nothing dditional you need to do. - - -v) Using MacPorts ------------------ +If you use fink on a Mac, FFTW can be installed with -If you use MacPorts, Eigen can be installed with + fink install fftw3 - port install eigen +(Make sure to use fftw3, not fftw, since fftw is version 2.) -This will put it into the /opt/local/include directory on your system. GalSim -knows to look here, so there is nothing dditional you need to do. +This will put it into the /sw/lib directory on your system. GalSim knows to +look here, so there is nothing dditional you need to do. -vi) Using eigency +v) Using MacPorts ----------------- -Eigency is a pip-installable module that bundles the Eigen header files, so it -can also be used to install these files on your system. Unfortunately, at least -as of version 1.75, there are errors in their setup.py file, so the pip version -does not actually work for this purpose. +If you use MacPorts, FFTW can be installed with -However, Mike Jarvis has a fork that fixes these errors, which you can pip -install manually using the command + port install fftw-3 - pip install git+git://github.com/rmjarvis/eigency.git@33d8d65417484 +This will put it into the /opt/loca/lib directory on your system. GalSim knows +to look here, so there is nothing dditional you need to do. -3. Installing FFTW -================== +3. Installing Eigen +=================== -GalSim uses FFTW (The Fastest Fourier Transform in the West) for performing -fast fourier transforms. +GalSim uses Eigen for the C++-layer linear algebra calculations. It is a +header-only library, which means that nothing needs to be compiled to use them. +You can download the header files yourself, but if you do not, then we use +the pip-installable eigency module, which bundles the header files in their +installed python directory. So usually, this dependency should require no +work on your part. + +However, it might become useful to install Eigen separately from eigency +e.g. if you want to upgrade to a newer version of Eigen than the one that is +bundled with eigency. (Eigen 3.2.8 is bundled with eigency 1.77.) Therefore, +this section describes several options for how to obtain and install Eigen. + +We require Eigen version >= 3.0. Most tests have been done with Eigen 3.2.8 +or 3.3.4, but we have also 3.0.4, so probably any 3.x version will work. +However, if you have trouble with another version, try upgrading to 3.2.8 or +later. -We require FFTW version >= 3.0. Most tests have been done with FFTW 3.3.7, -so if you have trouble with an earlier version, try upgrading to 3.3.7 or later. +Note: Prior to version 2.0, GalSim used TMV for the linear algebra back end. +This is still an option if you prefer (e.g. it may be faster for some use +cases, since it can use an optimized BLAS library on your system), but to +use TMV, you need to use the SCons installation option described below. i) Installing it yourself ------------------------- -FFTW is available at the URL +Eigen is available at the URL - http://www.fftw.org/download.html + http://eigen.tuxfamily.org/index.php -As of this writing, version 3.3.7 is the current latest release, for which +As of this writing, version 3.3.4 is the current latest release, for which the following commands should work to download and install it: - wget http://www.fftw.org/fftw-3.3.7.tar.gz - tar xfz fftw-3.3.7.tar.gz - cd fftw-3.3.7 - ./configure - make - sudo make install - -If you want to install into a different directory (e.g. because you do not -have sudo privileges on your machine), then specify the alternate directory -with the --prefix flag to configure. E.g. + wget http://bitbucket.org/eigen/eigen/get/3.3.4.tar.bz2 + tar xfj 3.3.4.tar.bz2 + sudo cp eigen-eigen-5a0156e40feb/Eigen /usr/local/include - ./congigure --prefix=$HOME +In the final cp line, the MD5 hash (5a0156e40feb) will presumably change for +other versions, so use whatever directory tar expands into if you are using +a different version than 3.3.4. -which will install the library into $HOME/lib and the header file into -$HOME/include. In this case, leave of the sudo from the last line. -Also, you should make sure these directories are in your LD_LIBRARY_PATh -and C_INCLUDE_PATH environment variables, respectively. +If you do not have sudo privileges, you can copy to a different directory such +as $HOME/include instead and leave off the sudo from the cp command. In this +case, make sure this directory is in your C_INCLUDE_PATH environment variable. -Alternatively, if you do not want to modify your LD_LIBRARY_PATH and/or -C_INCLUDE_PATH, you can instead set an environment variabe to tell GalSim -where the files are +Finally, you can also skip the last command above and instead set EIGEN_DIR +as an environment variabe to tell GalSim where the files are - export FFTW_DIR=/path/to/fftw/prefix + export EIGEN_DIR=/some/path/to/eigen -E.g. in the above case where prefix is $HOME, you would do +This should be the directory in which the Eigen subdirectory is found. E.g. - export FFTW_DIR=$HOME + export EIGEN_DIR=$HOME/eigen-eigen-5a0156e40feb Probably, you should put this into your .bash_profile file so it always gets set when you log in. @@ -260,26 +256,26 @@ set when you log in. ii) Using an existing installation ---------------------------------- -If FFTW is already installed on your system, there may be nothing to do. -If it is in a standard location like /usr/local/lib or in some other -directory in your LD_LIBRARY_PATH, then GalSim should find it without +If Eigen is already installed on your system, there may be nothing to do. +If it is in a standard location like /usr/local/include or in some other +directory in your C_INCLUDE_PATH, then GalSim should find it without any extra work on your part. If it is in a non-standard location, and you do not want to add this path -to your LD_LIBRARY_PATH, then you can instead set the FFTW_DIR environment +to your C_INCLUDE_PATH, then you can instead set the EIGEN_DIR environment variable to tell GalSim where to look - export FFTW_DIR=/some/path/to/fftw + export EIGEN_DIR=/some/path/to/eigen -For instance, if libfftw3.so is located in /opt/cray/pe/lib64, you could use -that with +For instance, if Eigen was installed into /usr/include/eigen3, then you +could use that with - export FFTW_DIR=/opt/cray/pe/lib64 + export EIGEN_DIR=/usr/include/eigen3 This command would normally be done in your .bash_profile file so it gets executed every time you log in. -If you have multiple versions of FFTW installed on your system, this variable +If you have multiple versions of Eigen installed on your system, this variable can be used to specify which version you want GalSim to use as this will be the first location it will check during the installation process. @@ -287,37 +283,45 @@ the first location it will check during the installation process. iii) Using conda ---------------- -If you use conda, FFTW can be install with +If you use conda, Eigen can be install with - conda install fftw + conda install eigen -This will put it into the anaconda/lib directory on your system (within your -active environment if appropriate). GalSim knows to look here, so there is -nothing dditional you need to do. +This will put it into the anaconda/include directory on your system (within +your active environment if appropriate). GalSim knows to look here, so there +is nothing dditional you need to do. iv) Using fink -------------- -If you use fink on a Mac, FFTW can be installed with - - fink install fftw3 +If you use fink on a Mac, Eigen can be installed with -(Make sure to use fftw3, not fftw, since fftw is version 2.) + fink install eigen -This will put it into the /sw/lib directory on your system. GalSim knows to -look here, so there is nothing dditional you need to do. +This will put it into the /sw/include directory on your system. GalSim knows +to look here, so there is nothing dditional you need to do. v) Using MacPorts ----------------- -If you use MacPorts, FFTW can be installed with +If you use MacPorts, Eigen can be installed with - port install fftw-3 + port install eigen -This will put it into the /opt/loca/lib directory on your system. GalSim knows -to look here, so there is nothing dditional you need to do. +This will put it into the /opt/local/include directory on your system. GalSim +knows to look here, so there is nothing dditional you need to do. + + +vi) Using eigency +----------------- + +Eigency is a pip-installable module that bundles the Eigen header files, so it +can also be used to install these files on your system. Indeed, as mentioned +above, we will use eigency automatically if Eigen is not found in one of the +above locations. So the above installations will take precendence, but +eigency should work as a fall-back. 4. Using Conda diff --git a/conda_requirements.txt b/conda_requirements.txt index dc78286d933..72c74af64f3 100644 --- a/conda_requirements.txt +++ b/conda_requirements.txt @@ -8,3 +8,5 @@ pandas >= 0.20 pybind11 >= 2.2 pip >= 9.0 gcc >= 4.8 +fftw >= 3.3 +eigen >= 3.3 diff --git a/requirements.txt b/requirements.txt index c5b8a3d074e..9338408ee06 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,6 +4,7 @@ # These are in conda_requirements.txt. If using that, you may prefer to do # conda install -c conda-forge --file conda_requirements.txt # prior to running pip install -r requirements.txt +eigency >= 1.77 numpy >= 1.13 future >= 0.15 astropy >= 2.0 diff --git a/setup.py b/setup.py index 7f6d7e9a55f..ba972225d7b 100644 --- a/setup.py +++ b/setup.py @@ -91,7 +91,7 @@ def get_compiler(cc): return 'unknown' # Check for the fftw3 library in some likely places -def find_fftw_lib(): +def find_fftw_lib(output=False): try_libdirs = [] lib_ext = '.so' if 'FFTW_DIR' in os.environ: @@ -118,28 +118,33 @@ def find_fftw_lib(): try: libpath = os.path.join(dir, name) lib = ctypes.cdll.LoadLibrary(libpath) - print("found %s at %s" %(name, libpath)) + if output: + print("found %s at %s" %(name, libpath)) return libpath except OSError as e: - print("Did not find %s in %s" %(name, libpath)) + if output: + print("Did not find %s in %s" %(name, libpath)) continue - print("Could not find %s in any of the normal locations"%name) - print("Trying ctypes.util.find_library") + if output: + print("Could not find %s in any of the normal locations"%name) + print("Trying ctypes.util.find_library") try: libpath = ctypes.util.find_library('fftw3') if libpath == None: raise OSError lib = ctypes.cdll.LoadLibrary(libpath) - print("found %s at %s" %(name, libpath)) + if output: + print("found %s at %s" %(name, libpath)) return libpath except Exception as e: - print("Could not find fftw3 library. Make sure it is installed either in a standard ") - print("location such as /usr/local/lib, or the installation directory is either in ") - print("your LIBRARY_PATH or FFTW_DIR environment variable.") + if output: + print("Could not find fftw3 library. Make sure it is installed either in a standard ") + print("location such as /usr/local/lib, or the installation directory is either in ") + print("your LIBRARY_PATH or FFTW_DIR environment variable.") raise # Check for Eigen in some likely places -def find_eigen_dir(): +def find_eigen_dir(output=False): try_dirs = [] if 'EIGEN_DIR' in os.environ: try_dirs.append(os.environ['EIGEN_DIR']) @@ -163,15 +168,18 @@ def find_eigen_dir(): for dir in try_dirs: if os.path.isfile(os.path.join(dir, 'Eigen/Core')): - print("found Eigen at", dir) + if output: + print("found Eigen at", dir) return dir if os.path.isfile(os.path.join(dir, 'eigen3', 'Eigen/Core')): dir = os.path.join(dir, 'eigen3') - print("found Eigen at", dir) + if output: + print("found Eigen at", dir) return dir - print("Could not find Eigen. Make sure it is installed either in a standard ") - print("location such as /usr/local/include, or the installation directory is either in ") - print("your C_INCLUDE_PATH or EIGEN_DIR environment variable.") + if output: + print("Could not find Eigen. Make sure it is installed either in a standard ") + print("location such as /usr/local/include, or the installation directory is either in ") + print("your C_INCLUDE_PATH or EIGEN_DIR environment variable.") raise OSError("Could not find Eigen") @@ -353,7 +361,7 @@ def fix_compiler(compiler, parallel): # Return the extra cflags, since those will be added to the build step in a different place. return extra_cflags -def add_dirs(builder): +def add_dirs(builder, output=False): # We need to do most of this both for build_clib and build_ext, so separate it out here. # First some basic ones we always need. @@ -361,7 +369,7 @@ def add_dirs(builder): builder.include_dirs.append('include/galsim') # Look for fftw3. - fftw_lib = find_fftw_lib() + fftw_lib = find_fftw_lib(output=output) fftw_libpath, fftw_libname = os.path.split(fftw_lib) if hasattr(builder, 'library_dirs'): builder.library_dirs.append(os.path.split(fftw_lib)[0]) @@ -375,7 +383,7 @@ def add_dirs(builder): builder.include_dirs.append('include/fftw3') # Look for Eigen/Core - eigen_dir = find_eigen_dir() + eigen_dir = find_eigen_dir(output=output) builder.include_dirs.append(eigen_dir) # Finally, add pybind11's include dir @@ -416,7 +424,7 @@ def build_libraries(self, libraries): class my_build_ext(build_ext): def finalize_options(self): build_ext.finalize_options(self) - add_dirs(self) + add_dirs(self, output=True) # Add any extra things based on the compiler being used.. def build_extensions(self): @@ -580,6 +588,14 @@ def run_tests(self): 'pyyaml', 'pandas', 'starlink-pyast'] # Not technically required, but useful. test_dep = ['pytest', 'pytest-xdist', 'pytest-timeout', 'scipy'] +# If Eigen doesn't exist in the normal places, add eigency ad a build dependency. +try: + find_eigen_dir() +except OSError: + print('Adding eigency to build_dep') + build_dep += ['eigency>=1.77'] + + with open('README.md') as file: long_description = file.read() From d053cb089302c4847d4e4af98333ebd72b4dc924 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 19 Feb 2018 17:04:26 -0500 Subject: [PATCH 059/111] Don't require pyyaml, pandas or starlink in setup.py. But leave in requirements.txt (#809-pybind11) --- conda_requirements.txt | 6 ++++-- requirements.txt | 8 +++++--- setup.py | 3 +-- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/conda_requirements.txt b/conda_requirements.txt index 72c74af64f3..e0e1c9bb5fa 100644 --- a/conda_requirements.txt +++ b/conda_requirements.txt @@ -3,10 +3,12 @@ numpy >= 1.13 future >= 0.15 astropy >= 2.0 -pyyaml >= 3.12 -pandas >= 0.20 pybind11 >= 2.2 pip >= 9.0 gcc >= 4.8 fftw >= 3.3 eigen >= 3.3 + +# Not technically required, but useful. +pyyaml >= 3.12 +pandas >= 0.20 diff --git a/requirements.txt b/requirements.txt index 9338408ee06..d2f018466c9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,11 +8,13 @@ eigency >= 1.77 numpy >= 1.13 future >= 0.15 astropy >= 2.0 -pyyaml >= 3.12 -pandas >= 0.20 pybind11 >= 2.2 pip >= 9.0 +# Not technically required, but useful. +pyyaml >= 3.12 +pandas >= 0.20 + # This is not in conda. Let pip install these. LSSTDESC.Coord >= 1.0.5 -starlink-pyast >= 3.9.0 +starlink-pyast >= 3.9.0 # Also not required, but useful. diff --git a/setup.py b/setup.py index ba972225d7b..9e90b1ad305 100644 --- a/setup.py +++ b/setup.py @@ -584,8 +584,7 @@ def run_tests(self): undef_macros = undef_macros) build_dep = ['pybind11>=2.2'] -run_dep = ['numpy', 'future', 'astropy', 'LSSTDESC.Coord', # Required. - 'pyyaml', 'pandas', 'starlink-pyast'] # Not technically required, but useful. +run_dep = ['numpy', 'future', 'astropy', 'LSSTDESC.Coord'] test_dep = ['pytest', 'pytest-xdist', 'pytest-timeout', 'scipy'] # If Eigen doesn't exist in the normal places, add eigency ad a build dependency. From d3bca56ddc203aeb1780cc9a631b1aab2d5ad117 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 19 Feb 2018 17:34:01 -0500 Subject: [PATCH 060/111] Nicer output when lookign for FFTW and Eigen directories (#809-pybind11) --- setup.py | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/setup.py b/setup.py index 9e90b1ad305..c030f2d3600 100644 --- a/setup.py +++ b/setup.py @@ -114,27 +114,30 @@ def find_fftw_lib(output=False): pass name = 'libfftw3' + lib_ext + if output: + print("Looking for ",name) for dir in try_libdirs: + if output: + print(" ", dir, end='') try: libpath = os.path.join(dir, name) lib = ctypes.cdll.LoadLibrary(libpath) if output: - print("found %s at %s" %(name, libpath)) + print(" (yes)") return libpath except OSError as e: if output: - print("Did not find %s in %s" %(name, libpath)) + print(" (no)") continue - if output: - print("Could not find %s in any of the normal locations"%name) - print("Trying ctypes.util.find_library") try: libpath = ctypes.util.find_library('fftw3') if libpath == None: raise OSError + if output: + print(" ", os.path.split(libpath)[0], end='') lib = ctypes.cdll.LoadLibrary(libpath) if output: - print("found %s at %s" %(name, libpath)) + print(" (yes)") return libpath except Exception as e: if output: @@ -145,10 +148,14 @@ def find_fftw_lib(output=False): # Check for Eigen in some likely places def find_eigen_dir(output=False): + import distutils.sysconfig + try_dirs = [] if 'EIGEN_DIR' in os.environ: try_dirs.append(os.environ['EIGEN_DIR']) try_dirs.append(os.path.join(os.environ['EIGEN_DIR'])) + # This is where conda will install it. + try_dirs.append(distutils.sysconfig.get_config_var('INCLUDEDIR')) if 'posix' in os.name.lower(): try_dirs.extend(['/usr/local/include', '/usr/include']) if 'darwin' in platform.system().lower(): @@ -166,16 +173,23 @@ def find_eigen_dir(output=False): except ImportError: pass + if output: + print("Looking for Eigen:") for dir in try_dirs: + if output: + print(" ", dir, end='') if os.path.isfile(os.path.join(dir, 'Eigen/Core')): if output: - print("found Eigen at", dir) + print(" (yes)") return dir if os.path.isfile(os.path.join(dir, 'eigen3', 'Eigen/Core')): dir = os.path.join(dir, 'eigen3') if output: - print("found Eigen at", dir) + # Only print this if the eigen3 addition was key to finding it. + print("\n ", dir, " (yes)") return dir + if output: + print(" (no)") if output: print("Could not find Eigen. Make sure it is installed either in a standard ") print("location such as /usr/local/include, or the installation directory is either in ") From f30351f9bc4a029bd63689020a3e8d583467fc1a Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 19 Feb 2018 16:16:23 -0500 Subject: [PATCH 061/111] Minor cleanup. (#809-pybind11) --- setup.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/setup.py b/setup.py index c030f2d3600..3ca7a852eba 100644 --- a/setup.py +++ b/setup.py @@ -414,7 +414,7 @@ def add_dirs(builder, output=False): class my_build_clib(build_clib): def finalize_options(self): build_clib.finalize_options(self) - add_dirs(self) + add_dirs(self, output=True) # This happens first, so only output for this call. # Add any extra things based on the compiler being used.. def build_libraries(self, libraries): @@ -438,7 +438,7 @@ def build_libraries(self, libraries): class my_build_ext(build_ext): def finalize_options(self): build_ext.finalize_options(self) - add_dirs(self, output=True) + add_dirs(self) # Add any extra things based on the compiler being used.. def build_extensions(self): @@ -573,21 +573,18 @@ def run_tests(self): self.pytest_args = ['-n=%d'%ncpu, '--timeout=60'] else: self.pytest_args = self.pytest_args.split() - print('Using pytest args: ',self.pytest_args,' (can update with -a pytest_args)') + + #print('Using pytest args: ',self.pytest_args,' (can update with -a pytest_args)') original_dir = os.getcwd() os.chdir('tests') test_files = glob.glob('test*.py') + errno = pytest.main(self.pytest_args + test_files) - print('pytest ',self.pytest_args,test_files) errno = 0 if errno != 0: sys.exit(errno) os.chdir(original_dir) - print("Note: There might be some TypeError's after this. It seems to be a bug in some") - print(" versions of Python's multiprocessing module. ") - print(" They are harmless and can be ignored.\n") - lib=("galsim", {'sources' : cpp_sources, 'depends' : headers, @@ -675,7 +672,7 @@ def run_tests(self): libraries=[lib], ext_modules=[ext], setup_requires=build_dep, - install_requires=build_dep + run_dep, + install_requires=run_dep, tests_require=test_dep, cmdclass = {'build_ext': my_build_ext, 'build_clib': my_build_clib, From 4a466eb296267fcebc66aafb62af811b850903aa Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 19 Feb 2018 16:38:49 -0500 Subject: [PATCH 062/111] Cleanup README (#809-pybind11) --- README.md | 58 +++++++++++++++++++++++++++---------------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/README.md b/README.md index 81d34df9fa1..83e9d01eb32 100644 --- a/README.md +++ b/README.md @@ -25,9 +25,24 @@ development. For details of algorithms and code validation, please see http://adsabs.harvard.edu/abs/2015A%26C....10..121R -Distribution +Installation ------------ +Normally, to install GalSim, you should just need to run + + pip install galsim + +Depending on your setup, you may need to add either sudo to the start +or --user to the end of this command as you normally do when pip installing +packages. + +See INSTALL.md for full details including one dependency (FFTW) that is not +pip installable, so you may need to install before running this command. + + +Source Distribution +------------------- + The current released version of GalSim is version 1.5. To get the code, you can grab the tarball (or zip file) from @@ -42,11 +57,6 @@ Or clone the repository with either of the following: git clone git@github.com:GalSim-developers/GalSim.git git clone https://github.com/GalSim-developers/GalSim.git -although after doing so, if you are not a developer, you should probably -checkout the latest release tag, rather than use the master branch: - - git checkout v1.5.0 - The code is also distributed via Fink, Macports, and Homebrew for Mac users. See INSTALL.md for more information. @@ -104,26 +114,15 @@ If none of these communication avenues seem appropriate, you can also contact us directly at the above email addresses. -Installation ------------- - -For installation instructions, please see the file `INSTALL.md` in the main -repository directory. - -There are tagged versions of the code corresponding to specific project -releases and development milestones. (For more info, see the "Tagged versions" -section below, and `devel/git.txt`) - - Getting started --------------- -* Install the code as in `INSTALL.md`. +* Install the code as above (see also INSTALL.md). * Optional, but recommended whenever you try a new version of the code: run the unit tests to make sure that there are no errors. You can do this by running - `scons tests`. If there are any issues, please open an Issue on our GitHub - page. + `python setup.py test`. If there are any issues, please open an Issue on our + GitHub page. * Optional: run `doxygen` to generate documentation, using `Doxyfile` in the main repository directory to specify all doxygen settings. Alternatively, @@ -195,6 +194,7 @@ As the project develops through further versions, and adds further capabilities to the software, more demo scripts may be added to `examples/` to illustrate what GalSim can do. + Tagged versions --------------- @@ -288,20 +288,20 @@ Summary of planned future development We plan to add the following additional capabilities in future versions of GalSim: -* Easier installation -- removing the boost dependency in particular. We are - planning to have v2.0 be pip installable, rather than using SCons, which - will make it much easier to install for many systems. This requires ripping - out the Boost Python wrapping and replacing with either cffi or pybind11 - (probably the latter, but still TBD). This effort is proceeding in issue - #809, with changes being merged to branch "noboost". - * Wavelength-dependent photon shooting. Currently, the chromatic functionality is only available for FFT rendering, which is quite slow. For most use cases, photon shooting should be orders of magnitude faster, so this is - a near-term priority to get done. (cf. Issue #540.) + a near-term priority to get done. (cf. Issue #540) * Simulating more sophisticated detector defects and image artifacts. E.g. - cosmic rays, saturation, bleeding, ... + vignetting, fringing, cosmic rays, saturation, bleeding, ... (cf. Issues + #553, #828) + +* Proper modeling of extinction due to dust. (cf. Issues #541, #550) + +* Various speed improvements. (cf. Issues #205, #566, #875, #935) + +* Switch docs to Sphinx. (cf. Issue #160) There are many others as well. Please see From 5e3ff5d05288514823f1468c7bc1508670b35fbc Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 19 Feb 2018 18:56:57 -0500 Subject: [PATCH 063/111] Add setuptools>=38 as a build_dep. --- conda_requirements.txt | 21 +++++++++++---------- pyproject.toml | 2 ++ requirements.txt | 21 +++++++++++---------- setup.py | 32 +++++++++++++++++++++----------- 4 files changed, 45 insertions(+), 31 deletions(-) create mode 100644 pyproject.toml diff --git a/conda_requirements.txt b/conda_requirements.txt index e0e1c9bb5fa..3bb643b2cd9 100644 --- a/conda_requirements.txt +++ b/conda_requirements.txt @@ -1,14 +1,15 @@ # The requirements packages that can be installed with # conda install -y -c conda-forge --file conda_requirements.txt -numpy >= 1.13 -future >= 0.15 -astropy >= 2.0 -pybind11 >= 2.2 -pip >= 9.0 -gcc >= 4.8 -fftw >= 3.3 -eigen >= 3.3 +setuptools>=38 +numpy>=1.13 +future>=0.15 +astropy>=2.0 +pybind11>=2.2 +pip>=9.0 +gcc>=4.8 +fftw>=3.3 +eigen>=3.3 # Not technically required, but useful. -pyyaml >= 3.12 -pandas >= 0.20 +pyyaml>=3.12 +pandas>=0.20 diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000000..322e03d71b3 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,2 @@ +[build-system] +requires = ["setuptools>=38", "pybind11>=2.2"] diff --git a/requirements.txt b/requirements.txt index d2f018466c9..04d97053e93 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,17 +4,18 @@ # These are in conda_requirements.txt. If using that, you may prefer to do # conda install -c conda-forge --file conda_requirements.txt # prior to running pip install -r requirements.txt -eigency >= 1.77 -numpy >= 1.13 -future >= 0.15 -astropy >= 2.0 -pybind11 >= 2.2 -pip >= 9.0 +setuptools>=38 +eigency>=1.77 +numpy>=1.13 +future>=0.15 +astropy>=2.0 +pybind11>=2.2 +pip>=9.0 # Not technically required, but useful. -pyyaml >= 3.12 -pandas >= 0.20 +pyyaml>=3.12 +pandas>=0.20 # This is not in conda. Let pip install these. -LSSTDESC.Coord >= 1.0.5 -starlink-pyast >= 3.9.0 # Also not required, but useful. +LSSTDESC.Coord>=1.0.5 +starlink-pyast>=3.9.0 # Also not required, but useful. diff --git a/setup.py b/setup.py index 3ca7a852eba..871ee3980cf 100644 --- a/setup.py +++ b/setup.py @@ -4,15 +4,24 @@ import ctypes import types -from setuptools import setup, Extension, find_packages -from setuptools.command.build_ext import build_ext -from setuptools.command.build_clib import build_clib -from setuptools.command.install import install -from setuptools.command.install_scripts import install_scripts -from setuptools.command.easy_install import easy_install -from setuptools.command.test import test -import setuptools -print("Using setuptools version",setuptools.__version__) +try: + from setuptools import setup, Extension, find_packages + from setuptools.command.build_ext import build_ext + from setuptools.command.build_clib import build_clib + from setuptools.command.install import install + from setuptools.command.install_scripts import install_scripts + from setuptools.command.easy_install import easy_install + from setuptools.command.test import test + import setuptools + print("Using setuptools version",setuptools.__version__) +except ImportError: + print() + print("****") + print(" Installation requires setuptools version >= 38.") + print(" Please upgrade or install with pip install -U setuptools") + print("****") + print() + raise print('Python version = ',sys.version) py_version = "%d.%d"%sys.version_info[0:2] # we check things based on the major.minor version. @@ -403,6 +412,7 @@ def add_dirs(builder, output=False): # Finally, add pybind11's include dir import pybind11 print('PyBind11 is version ',pybind11.__version__) + #print(pybind11.__file__) # Include both the standard location and the --user location, since it's hard to tell # which one is the right choice. builder.include_dirs.append(pybind11.get_include(user=True)) @@ -594,7 +604,7 @@ def run_tests(self): py_sources, undef_macros = undef_macros) -build_dep = ['pybind11>=2.2'] +build_dep = ['setuptools>=38', 'pybind11>=2.2'] run_dep = ['numpy', 'future', 'astropy', 'LSSTDESC.Coord'] test_dep = ['pytest', 'pytest-xdist', 'pytest-timeout', 'scipy'] @@ -672,7 +682,7 @@ def run_tests(self): libraries=[lib], ext_modules=[ext], setup_requires=build_dep, - install_requires=run_dep, + install_requires=build_dep + run_dep, tests_require=test_dep, cmdclass = {'build_ext': my_build_ext, 'build_clib': my_build_clib, From 089214d86ae0f8b7e81dc90ec312b6d7cdd499c1 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 19 Feb 2018 21:12:32 -0500 Subject: [PATCH 064/111] Don't add '' to library_dirs (#809-pybind11) --- setup.py | 49 +++++++++++++++++++++++++------------------------ 1 file changed, 25 insertions(+), 24 deletions(-) diff --git a/setup.py b/setup.py index 871ee3980cf..2fe39853669 100644 --- a/setup.py +++ b/setup.py @@ -2,6 +2,7 @@ import sys,os,glob,re import platform import ctypes +import ctypes.util import types try: @@ -123,37 +124,39 @@ def find_fftw_lib(output=False): pass name = 'libfftw3' + lib_ext - if output: - print("Looking for ",name) + if output: print("Looking for ",name) + tried_dirs = set() # Keep track, so we don't try the same thing twice. for dir in try_libdirs: - if output: - print(" ", dir, end='') + if dir == '': continue # This messes things up if it's in there. + if dir in tried_dirs: continue + else: tried_dirs.add(dir) + if output: print(" ", dir, end='') try: libpath = os.path.join(dir, name) lib = ctypes.cdll.LoadLibrary(libpath) - if output: - print(" (yes)") + if output: print(" (yes)") return libpath except OSError as e: - if output: - print(" (no)") + if output: print(" (no)") continue try: libpath = ctypes.util.find_library('fftw3') if libpath == None: raise OSError - if output: - print(" ", os.path.split(libpath)[0], end='') lib = ctypes.cdll.LoadLibrary(libpath) - if output: - print(" (yes)") - return libpath except Exception as e: if output: print("Could not find fftw3 library. Make sure it is installed either in a standard ") print("location such as /usr/local/lib, or the installation directory is either in ") print("your LIBRARY_PATH or FFTW_DIR environment variable.") raise + else: + dir, name = os.path.split(libpath) + if output: + if dir == '': dir = '[none]' + print(" ", dir, " (yes)") + return libpath + # Check for Eigen in some likely places def find_eigen_dir(output=False): @@ -182,14 +185,11 @@ def find_eigen_dir(output=False): except ImportError: pass - if output: - print("Looking for Eigen:") + if output: print("Looking for Eigen:") for dir in try_dirs: - if output: - print(" ", dir, end='') + if output: print(" ", dir, end='') if os.path.isfile(os.path.join(dir, 'Eigen/Core')): - if output: - print(" (yes)") + if output: print(" (yes)") return dir if os.path.isfile(os.path.join(dir, 'eigen3', 'Eigen/Core')): dir = os.path.join(dir, 'eigen3') @@ -197,8 +197,7 @@ def find_eigen_dir(output=False): # Only print this if the eigen3 addition was key to finding it. print("\n ", dir, " (yes)") return dir - if output: - print(" (no)") + if output: print(" (no)") if output: print("Could not find Eigen. Make sure it is installed either in a standard ") print("location such as /usr/local/include, or the installation directory is either in ") @@ -395,7 +394,8 @@ def add_dirs(builder, output=False): fftw_lib = find_fftw_lib(output=output) fftw_libpath, fftw_libname = os.path.split(fftw_lib) if hasattr(builder, 'library_dirs'): - builder.library_dirs.append(os.path.split(fftw_lib)[0]) + if fftw_libpath != '': + builder.library_dirs.append(fftw_libpath) builder.libraries.append(os.path.split(fftw_lib)[1].split('.')[0][3:]) fftw_include = os.path.join(os.path.split(fftw_libpath)[0], 'include') if os.path.isfile(os.path.join(fftw_include, 'fftw3.h')): @@ -547,8 +547,9 @@ def run_cpp_tests(self): library_dirs = ext.library_dirs fftw_lib = find_fftw_lib() fftw_libpath, fftw_libname = os.path.split(fftw_lib) - library_dirs.append(os.path.split(fftw_lib)[0]) - libraries.append(os.path.split(fftw_lib)[1].split('.')[0][3:]) + if fftw_libpath != '': + library_dirs.append(fftw_libpath) + libraries.append(fftw_libname.split('.')[0][3:]) libraries.append('galsim') exe_file = os.path.join(builder.build_temp,'cpp_test') From af845299775510105343cfc22fe0993119dc4457 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 19 Feb 2018 22:16:14 -0500 Subject: [PATCH 065/111] Fix ThreadPool usage --- setup.py | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/setup.py b/setup.py index 2fe39853669..ba4f165c9a0 100644 --- a/setup.py +++ b/setup.py @@ -4,6 +4,7 @@ import ctypes import ctypes.util import types +import subprocess try: from setuptools import setup, Extension, find_packages @@ -67,7 +68,6 @@ def get_compiler(cc): be called cc or gcc. """ cmd = [cc,'--version'] - import subprocess p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) lines = p.stdout.readlines() print('compiler version information: ') @@ -208,7 +208,6 @@ def find_eigen_dir(output=False): def try_cc(cc, cflags=[], lflags=[]): """Check if compiling a simple bit of c++ code with the given compiler works properly. """ - import subprocess import tempfile from textwrap import dedent cpp_code = dedent(""" @@ -334,10 +333,11 @@ def _single_compile(obj): for obj in objects: _single_compile(obj) else: - # This next bit is taken from here: - # https://stackoverflow.com/questions/11013851/speeding-up-build-process-with-distutils - # convert to list, imap is evaluated on-demand - list(multiprocessing.pool.ThreadPool(ncpu).imap(_single_compile,objects)) + # Use ThreadPool, rather than Pool, since the objects are picklable. + pool = multiprocessing.pool.ThreadPool(ncpu) + pool.map(_single_compile, objects) + pool.close() + pool.join() # Return *all* object filenames, not just the ones we just built. return objects @@ -526,8 +526,6 @@ def finalize_options(self): self.test_suite = True def run_cpp_tests(self): - import subprocess - builder = self.distribution.get_command_obj('build_ext') compiler = builder.compiler ext = builder.extensions[0] @@ -574,12 +572,13 @@ def run_cpp_tests(self): raise RuntimeError("C++ tests failed") def run_tests(self): - import pytest # Build and run the C++ tests self.run_cpp_tests() ncpu = cpu_count() + # PyTest sometimes has issues with a large number of processes. Limit to 8. + if ncpu > 8: ncpu = 8 if self.pytest_args is None: self.pytest_args = ['-n=%d'%ncpu, '--timeout=60'] else: @@ -590,10 +589,21 @@ def run_tests(self): os.chdir('tests') test_files = glob.glob('test*.py') - errno = pytest.main(self.pytest_args + test_files) - errno = 0 - if errno != 0: - sys.exit(errno) + if True: + import pytest + errno = pytest.main(self.pytest_args + test_files) + errno = 0 + if errno != 0: + sys.exit(errno) + else: + # Alternate method calls pytest executable. But the above code seems to work. + p = subprocess.Popen(['pytest'] + self.pytest_args + test_files) + p.communicate() + if p.returncode == 0: + print("All python tests passed.") + else: + raise RuntimeError("Some Python tests failed") + os.chdir(original_dir) From e0cb9e01d527e6afc25b908f9a298a18344d2870 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 19 Feb 2018 22:33:46 -0500 Subject: [PATCH 066/111] Check for the pybind11 header files --- setup.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/setup.py b/setup.py index ba4f165c9a0..4c125e49141 100644 --- a/setup.py +++ b/setup.py @@ -412,12 +412,20 @@ def add_dirs(builder, output=False): # Finally, add pybind11's include dir import pybind11 print('PyBind11 is version ',pybind11.__version__) - #print(pybind11.__file__) - # Include both the standard location and the --user location, since it's hard to tell - # which one is the right choice. - builder.include_dirs.append(pybind11.get_include(user=True)) - builder.include_dirs.append(pybind11.get_include(user=False)) - print('Include files for pybind11 are ',builder.include_dirs[-2:]) + for user in [True, False, None]: + if user is None: + # Last time through, raise an error. + print("Could not find pybind11 header files.") + print("They should have been in one of the following two locations:") + print(" ",pybind11.get_include(True)) + print(" ",pybind11.get_include(False)) + raise OSError("Could not find PyBind11") + + try_dir = pybind11.get_include(user=user) + if os.path.isfile(os.path.join(try_dir, 'pybind11/pybind11.h')): + print('Include files for pybind11 are in',try_dir) + builder.include_dirs.append(try_dir) + break # Make a subclass of build_ext so we can add to the -I list. From 97a81c6690cf1eb05706fe04e2234be556e6a9a8 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 19 Feb 2018 22:34:00 -0500 Subject: [PATCH 067/111] Make it easier to change the number of jobs for pytest --- setup.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/setup.py b/setup.py index 4c125e49141..6792fb29185 100644 --- a/setup.py +++ b/setup.py @@ -522,11 +522,13 @@ def run(self): class my_test(test): # cf. https://pytest.readthedocs.io/en/2.7.3/goodpractises.html - user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")] + user_options = [('pytest-args=', 'a', "Arguments to pass to py.test"), + ('njobs=', 'j', "Number of jobs to use in py.test")] def initialize_options(self): test.initialize_options(self) self.pytest_args = None + self.njobs = None def finalize_options(self): test.finalize_options(self) @@ -584,11 +586,14 @@ def run_tests(self): # Build and run the C++ tests self.run_cpp_tests() - ncpu = cpu_count() - # PyTest sometimes has issues with a large number of processes. Limit to 8. - if ncpu > 8: ncpu = 8 if self.pytest_args is None: - self.pytest_args = ['-n=%d'%ncpu, '--timeout=60'] + if self.njobs is None: + self.njobs = cpu_count() + else: + self.njobs = int(self.njobs) + print('Using %d processes for pytest.'%self.njobs) + print('To change this use python setup.py test -jN') + self.pytest_args = ['-n=%d'%self.njobs, '--timeout=60'] else: self.pytest_args = self.pytest_args.split() From 7340089fa6e480302872b1a82be9343a61f392bd Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 19 Feb 2018 23:20:24 -0500 Subject: [PATCH 068/111] Rework credo.txt into a developer's README (#809-pybind11) --- devel/README | 236 ++++++++++++++++++++++++++++++++++++++++++++++++ devel/credo.txt | 232 ----------------------------------------------- 2 files changed, 236 insertions(+), 232 deletions(-) create mode 100644 devel/README delete mode 100644 devel/credo.txt diff --git a/devel/README b/devel/README new file mode 100644 index 00000000000..d158f002bf7 --- /dev/null +++ b/devel/README @@ -0,0 +1,236 @@ +Guildelines for GalSim developers: + +1. Style + +For C++, we roughly adhere to the LSST C++ style. At this point, the best bet for new +C++ developers would be to look at some of the existing code and try to make your code +have a similar style. + +For Python, we mostly adhere to PEP8, although please pay attention to the first rule: +"A Foolish Consistency is the Hobgoblin of Little Minds". In other words, please do +break the "rules" if it improves readability. + +Biggish things to highlight/add/modify are... + +* 4 space indentation, rather than 2 space. + +* No tabs. Just spaces. + +* No using statements. Now all namespaces (especially std::) are explicit (equvalent will be + adopted in Python, i.e. no "import * from moduleX" or "from moduleY import Z"). + +* Use C++ std library when possible. e.g. MAX -> std::max, Assert -> assert, PI -> M_PI, etc. + +* Will be readable at 100 character width (this is a departure from LSST style, which specifies + 120 but is slightly annoying for laptop use). + +* We adhere to the Zen of Python; open python, type "import this", hit enter. + +* We use all lowercase letters for all Python packages. That's a bit of a Python convention, + and while it's mostly aimed at compatibility with case-insensitive filesystems, we think we + should stick with it anyway. + +* We will adopt the SBProfile capitalization style wherever sensible for code filenames, as it's by + far the most significant chunk of C/C++ we are currently using. This makes include/*.h files + capitalized. + +* Overall capitalization rules: + * File names are CamelCase** + * Classes (and structs) are CamelCase + * Free functions are CamelCase + * Member functions are camelCase + * Public variables (including function parameters and kwargs) are lower_case + * Private variables are _lower_case + * Local scope variables/functions can be whatever the author prefers. + * Note that when using camelCase or CamelCase, acronyms should still be capitalized, i.e. CCD and + PSF, not Ccd and Psf. + +* Python unit testing modules are placed in tests/, and called test_.py. + +* C++ unit tests are also in tests/, called test_*.cpp. + + +For vim users, Mike Jarvis has put the c.vim file in the devutils/ directory. If you put that +in .vim/ftplugin/ and add the line "filetype plugin on" in your .vimrc file, then you will +automatically get the formatting to match what is currently in SBProfile. We don't (yet) +have a corresponding file for emacs. (Sorry.) + +LSST Style Guides available here -- +http://dev.lsstcorp.org/trac/wiki/DocumentationStandards +http://dev.lsstcorp.org/trac/wiki/PythonCodeStandards +http://dev.lsstcorp.org/trac/wiki/C%2B%2BStandard + + +2. Workflow + +Prior to version 2.0, SCons was our only build option. It is still available, but we +don't recommend it for end users. It remains to be seen which build system is more convenient +for developers, so we have kept it around for the 2.x series. + +With setup.py, the work flow is as follows: + + 1. Edit code + 2. python setup.py install + 3. python test_whatever.py # Check if your code works on the unit tests for this module. + 4. If errors, goto 1 + 5. python setup.py test # Check that the new code didn't break any other tests. + 6. If errors, goto 1 + 7. git add -p # Add only the code changes that are relevant. (Not debugging, etc.) + 8. git commit -m "Summary of changes" + 9. git push or goto 1 if still more to do. + +With SCons, the build steps are slightly different. + + 2. scons install + 5. scons tests + + +3. Commits + +Please do your best to have each commit be atomic. + +That is, the commit should, as far as possible, be a single conceptual change in the code, +complete with whatever additional unit tests are necessary to test it. Ideally all tests +should pass for every commit. This makes cherry-picking and bisecting much easier. + +Sometimes when working on some change, you may notice something else you want to change as well. +Maybe a typo, or a semi-related change that occurs to you because of your work on something else. + +In any case, go ahead and make the change. But when committing, use `git add -p` to select +the lines that go together to make a single atomic change. Commit them, and then go back and +do it again to add the other conceptual change as a separate commit. + +This is also a useful command to let you notice any debugging lines that you might have left in +the code, which you don't want to include in the commit. + +Another useful git command to become familiar with is `git stash`. This temporarily stores any +local changes, so you can (for instance) run the unit tests on the current committed version +to make sure they all pass before adding more items as the next commit. To bring the stashed +changes back, just do `git stash pop`. + + +4. Array/pixel indexing: + +Numpy arrays in Python use the (matrix-style) indexing [y, x], whereas the SBProfile class and the +more arguably natural ordering is (x, y). PyFITS, and a number of other Numpy-dependent Python +libraries have also adopted the [y, x] paradigm, as well as a number of astronomers who do a lot in +Python. + +We will write our Python classes to accept arguments in the (x, y) order, particularly the +galsim.Image class. However, it will be possible to create an Image using a Numpy array, and also +to get a Numpy view into an image. + +This places the boundary between our classes and NumPy. Our classes would be (x,y) in both C++ and +Python, but we wouldn't make any effort to fit NumPy into that paradigm. + +Jim gives a couple more reasons on why this is a good place to put the boundary: +- Square brackets will consistently be [y,x], and parentheses will consistently be (x,y), due to + the (usually annoying) fact that you can't overload operator[] with two argument in C++. + +- Even in Python, [y,x] is really only used for NumPy arrays - note that matplotlib's plot + function takes 1-d x and y arrays in that order, for instance, even though matplotlib expects + arrays used as images to be [y,x]. + +Jim gives a nice example of this functionality for what he has in mind for the Python API of the +galsim.Image class: + +>>> import galsim +>>> import numpy +>>> arr1 = numpy.arange(6).reshape(2,3) +>>> print arr1 +[[0 1 2] +[3 4 5]] +>>> im1 = galsim.ImageD(arr1, x0=10, y0=100) # im shares data with arr1 +>>> arr2 = im1.array +>>> assert(arr2 is arr1) +>>> print im1(12, 101) # (x, y); includes offsets passed in constructor +5 +>>> im2 = galsim.ImageD(x0=1, y0=2, w=3, h=4) +>>> arr3 = im2.array # arr3 shares data with m3 +>>> print arr3.shape # shape is (h, w) +(4, 3) +>>> arr4 = arr1.transpose() # arr4 is column-major +>>> im3 = galsim.ImageD(arr4) # can't do this +Traceback (most recent call last): + File "", line 1, in +ValueError: Cannot create image from noncontiguous array. + +This last point is important: Numpy arrays must be kept in c_contiguous storage order, i.e. row- +major. Some of numpy's array routines invisibly change arrays to Fortran-style (i.e. column-major) +storage, or disrupt contiguous storage altogether. While developing, keep track of this using + +>>> array.flags + +in particular ensure that + +>>> array.flags.c_contiguous == True + +Check out the np.copy() function in Numpy, or array.copy() method to see how to make a c_contiguous +array, also see np.ascontiguousarray() or the array.transpose() method. + +Finally, the FITS standard is to begin indexing all arrays at (1, 1), and this is the convention +SBProfile currently adopts. However, we think that our image objects will also carry around an +attribute for the coordinate origin, so this should not be too much of a headache at the interface +with Python/Numpy (famous last words). + + +5. Compiler warning flags + +One of the SCons defaults is WARN=false. This is recommended for end users, so we don't +saddle them with a bunch of warning messages if they use a new compiler that we haven't +tested on yet. + +However, developers should always run with WARN=true to help catch bugs. This tends to +catch a lot of things that cause portability issues as we use the code on different systems +as well as outright bugs in the coding that are otherwise missed. Not all of the things +that come up are bugs per se, but it catches enough things that really are bugs that we feel +it worthwhile to make that the default. Developers are expected to fix their code to get rid +of all these warnings before committing. Even if you know the warning is benign, please fix it. +We want all compilations to be warning-free on as many compilers as possible. + +Note: because SCons automatically caches all parameters you pass it, you will only need +to do `scons WARN=true` once, and then it will be set that way for all future scons +commands. + +The setup.py build always shows all these warnings, so you don't need to do anything special +to turn that on. + +Even if everyone does this, it is possible that you might come across warnings from someone +else's code. e.g. They may use a different compiler that warns about somewhat different things. +If you know how to fix the problem, go ahead and do so. If you don't, please email the +person responsible (or go to our GitHub page and comment on the commit that causes the problem) +to ask them to fix it. Basically the same thing you would do if code failed to compile for a +non-warning compiler error. + + +6. Branch names + +When starting work on an issue, create a branch with the same name as the issue. +In particular, include the # before the number. E.g. when working on Issue #999, +the branch you make should be called #999. + +git checkout -b "#999" + +Note the quotes around "#999". This is because bash uses the # symbol for comments, so +if you don't include the quotes, the command won't work. + +If you want, it is permissible to add extra text after the issue number. + +git checkout -b "#999-some_cool_new_stuff" + +We don't normally do this, but sometimes it is useful to help remember which issue is which +(and therefore which branch you want to switch to). + +Why do we do this? Especially given the annoying quotes thing? Because if you follow the +instructions in the file .git/hooks/commit_msg, then git will add the branch name to the +ends of your commit messages automatically. This in turn lets GitHub know that the commit +is connected with that issue, so it shows up in the issue thread. This is often quite useful. + +The instructions in commit-msg repeated here for convenience: + + Copy this file to .git/hooks/commit-msg to enable automatically appending the + branch name to commit messages (when on a branch). Ensure it remains executable. + Branches should usually be named according to the issue number they are for. + e.g. "#12" for a branch that works on issue 12. + Then commits will automatically be linked in the comments for that issue. + diff --git a/devel/credo.txt b/devel/credo.txt deleted file mode 100644 index 151fbd7592e..00000000000 --- a/devel/credo.txt +++ /dev/null @@ -1,232 +0,0 @@ -***The hopefully not-too-crippling dogma of GalSim development***, v0.1 - -1. Style: - -Use the LSST documents on C++, Python and Documentation overall as a guide for new code, -exert pragmatism with pre-existing code. Unless you have good reason for hating LSST style, -or the style suggested here, in which case share it! - -Biggish things to highlight/add/modify are... - 4 space indentation, rather than 2 space. - - No tabs. Just spaces. - - No using statements. Now all namespaces (especially std::) are explicit (equvalent will be - adopted in Python, i.e. no "import * from moduleX" or "from moduleY import Z"). - - Use C++ std library when possible. e.g. MAX -> std::max, Assert -> assert, PI -> M_PI, etc. - - Will be readable at 100 character width (this is a departure from LSST style, which specifies - 120 but is slightly annoying for laptop use). - - Python 2.7.x will be supported, not Python 3.x... - - We adhere to the Zen of Python; open python, type "import this", hit enter. - - We use all lowercase letters for all Python packages. That's a bit of a Python convention, - and while it's mostly aimed at compatibility with case-insensitive filesystems, we think we - should stick with it anyway. - - We will adopt the SBProfile capitalization style wherever sensible for code filenames, as it's by - far the most significant chunk of C/C++ we are currently using. This makes include/*.h files - capitalized. - - Overall capitalization rules: - * File names are CamelCase** - * Classes (and structs) are CamelCase - * Free functions are CamelCase - * Member functions are camelCase - * Public variables (including function parameters and kwargs) are lower_case - * Private variables are _lower_case - * Local scope variables/functions can be whatever the author prefers. - ** Note that when using camelCase or CamelCase, acronyms should still be capitalized, i.e. CCD and - PSF, not Ccd and Psf. - - Python unit testing modules will be placed in tests/, and called test_.py - - For executables that perform tests (e.g. in non-Python code units) these will be called - test_* to match the naming conventions of Python test modules. - - -For vim users, Mike Jarvis has put the c.vim file in the devutils/ directory. If you put that -in .vim/ftplugin/ and add the line "filetype plugin on" in your .vimrc file, then you will -automatically get the formatting to match what is currently in SBProfile. We don't (yet) -have a corresponding file for emacs. (Sorry.) - -LSST Style Guides available here -- -http://dev.lsstcorp.org/trac/wiki/DocumentationStandards -http://dev.lsstcorp.org/trac/wiki/PythonCodeStandards -http://dev.lsstcorp.org/trac/wiki/C%2B%2BStandard - -Broad reasons for choice of LSST style: These documents just seem to be a fairly sensible -source of code and documentation formatting guidance, although note that we have diverged in some -places. See also Peter Melchior's slides for sound advice in general, and on documentation: -dl.dropbox.com/u/26820102/talks/software_engineering_150410.pdf - - -2. Version control: -Git - -Broad reasons why: Modern/distributed. Slightly better general purpose/branching capabilities -than Hg, albeit at the cost of a mildly steeper learning curve. Neither is rocket science! - - -3. Repository hosting: -Github, with push/pull access to all those in the GalSim-developers organization (based on -the great3-code@princeton.edu mailing list). - -Broad reasons why: Code review features, wiki features, popularity within GalSim-developers, -project forking. - - -4. Documentation: DOxygen -Broad reasons why: Well-supported by many in GalSim-developers. - - -5. Builds: SCons -Broad reasons why: Seemingly greater experience among GalSim-developers. - -One of the SCons defaults is WARN=false. This is recommended for end users, so we don't -saddle them with a bunch of warning messages if they use a new compiler that we haven't -tested on yet. However, developers should always run with WARN=true to help catch -bugs. Even if you know the warning is benign, please fix it. We want all compilations -to be warning-free on as many compilers as possible. - -Note: because SCons automatically caches all parameters you pass it, you will only need -to do `scons WARN=true` once, and then it will be set that way for all future scons -commands. - - -6. Libraries: -FFTW, Numpy, Pyfits, TMV (+BLAS & LAPACK if tuned versions present), Boost.python, -Boost.shared_ptr, Boost.random (flexibility to other RNGs) - -Notes: will add more if really useful/necessary, but want to keep this list as short as -possible. Matplotlib plotting in Python not featured by default. - - -7. Array/pixel indexing: - -Numpy arrays in Python use the (matrix-style) indexing [y, x], whereas the SBProfile class and the -more arguably natural ordering is (x, y). PyFITS, and a number of other Numpy-dependent Python -libraries have also adopted the [y, x] paradigm, as well as a number of astronomers who do a lot in -Python. - -We will write our Python classes to accept arguments in the (x, y) order, particularly the -galsim.Image class. However, it will be possible to create an Image using a Numpy array, and also -to get a Numpy view into an image. - -This places the boundary between our classes and NumPy. Our classes would be (x,y) in both C++ and -Python, but we wouldn't make any effort to fit NumPy into that paradigm. - -Jim gives a couple more reasons on why this is a good place to put the boundary: -- Square brackets will consistently be [y,x], and parentheses will consistently be (x,y), due to - the (usually annoying) fact that you can't overload operator[] with two argument in C++. - -- Even in Python, [y,x] is really only used for NumPy arrays - note that matplotlib's plot - function takes 1-d x and y arrays in that order, for instance, even though matplotlib expects - arrays used as images to be [y,x]. - -Jim gives a nice example of this functionality for what he has in mind for the Python API of the -galsim.Image class: - ->>> import galsim ->>> import numpy ->>> arr1 = numpy.arange(6).reshape(2,3) ->>> print arr1 -[[0 1 2] -[3 4 5]] ->>> im1 = galsim.ImageD(arr1, x0=10, y0=100) # im shares data with arr1 ->>> arr2 = im1.array ->>> assert(arr2 is arr1) ->>> print im1(12, 101) # (x, y); includes offsets passed in constructor -5 ->>> im2 = galsim.ImageD(x0=1, y0=2, w=3, h=4) ->>> arr3 = im2.array # arr3 shares data with m3 ->>> print arr3.shape # shape is (h, w) -(4, 3) ->>> arr4 = arr1.transpose() # arr4 is column-major ->>> im3 = galsim.ImageD(arr4) # can't do this -Traceback (most recent call last): - File "", line 1, in -ValueError: Cannot create image from noncontiguous array. - -This last point is important: Numpy arrays must be kept in c_contiguous storage order, i.e. row- -major. Some of numpy's array routines invisibly change arrays to Fortran-style (i.e. column-major) -storage, or disrupt contiguous storage altogether. While developing, keep track of this using - ->>> array.flags - -in particular ensure that - ->>> array.flags.c_contiguous == True - -Check out the np.copy() function in Numpy, or array.copy() method to see how to make a c_contiguous -array, also see np.ascontiguousarray() or the array.transpose() method. - -Finally, the FITS standard is to begin indexing all arrays at (1, 1), and this is the convention -SBProfile currently adopts. However, we think that our image objects will also carry around an -attribute for the coordinate origin, so this should not be too much of a headache at the interface -with Python/Numpy (famous last words). - - -8. Compiler warning flags - -By default, SCons adds the flags -Wall -Werror to the list of compiler flags. This tends to -catch a lot of things that cause portability issues as we use the code on different systems -as well as outright bugs in the coding that are otherwise missed. Not all of the things -that come up are bugs per se, but it catches enough things that really are bugs that we feel -it worthwhile to make that the default. Developers are expected to fix their code to get rid -of all these warnings before committing. - -Even if everyone does this, it is possible that you might come across warnings from someone -else's code. e.g. They may use a different compiler that warns about somewhat different things. -If you know how to fix the problem, go ahead and do so. If you don't, please email the -person responsible (or go to our GitHub page and comment on the commit that causes the problem) -to ask them to fix it. Basically the same thing you would do if code failed to compile for a -non-warning compiler error. - -When we eventually release the code to the public, we will switch the default to not use -these warning flags in case some users have a different compiler that none of us tested on. -The code is regularly tested with various versions of g++, clang++ and icpc. Maybe more. -But if they use something different, we don't want the code to fail because it is a stickler -for some C++ detail that we didn't catch in our builds. Turning on and off the warnings is -done with the SCons option WARN. Use WARN=false to turn them off, and WARN=true to turn them -back on. But again, you shouldn't really have to do this. You should fix the warnings rather -than ignore them by turning them off. - - -9. Random number generators (RNG) - -First, we realize that it's unlikely that *any* library RNG would come with a guarantee that a -given seed will produce the same sequence in all past and future versions (unless it's software -that is never revised, like NR!). So if we want to GalSim to have this property (we do), then we -need to package some fixed version of something with GalSim. - -Gary has created a subdirectory GalSim/include/galsim/boost1_48_0.random which contains the needed -files (and subdirectory) from Boost.Random. If the compiler flag DIVERT_BOOST_RANDOM is defined, -then all of the include directives for Random.h are directed to this directory instead of to -boost/random. And I hacked all these Boost files so that their includes for any element of -Boost.Random are also sent to galsim/boost1_48_0.random. Hence a compilation will not reach the -local boost/random and there will be no name collisions... - -***as long as no module that includes our Random.h also explicitly includes the normal boost/random -files.*** - -In other words we need to have a rule that any use of RNG's accesses them ONLY via our Random.h -wrappers. This would be good practice in any case. - -Some of the hacked Boost.Random files refer to other parts of Boost. I have *not* diverted these to -private copies - they will come from the user's distribution so there will be no name or code -collisions if anyone uses other parts of Boost. I've confirmed that the Boost.Random 1.48.0 -routines work when the 1.46 versions of the other Boost libraries are included (that's the one that -fink likes). - -At the Python level, we want to be using this same RNG. This means we'll need to make a Python -wrapper for the C++ Random class, and use it, which means we need to avoid the temptation to use -numpy.random. Work on this wrapper will begin ASAP (we'll probably want to put some effort into -making the wrappers for the C++ RNG class very NumPy-friendly anyway). - -There remains the risk that future Boost releases will break our "frozen" Boost.Random. We'll need -to record that possibility for posterity. - From 31b58713899cf7769972b59cc950cfee6fe434c9 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Tue, 20 Feb 2018 11:22:33 -0500 Subject: [PATCH 069/111] Update instructions about best way to install prereqs (#809-pybind11) --- INSTALL.md | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index a3b093a9887..b954153ec58 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -31,26 +31,33 @@ The usual way to install GalSim is now (starting with version 2.0) simply pip install galsim -or by cloning the repo and doing - +which will install the latest official release of GalSim. + +Note that you may need to use sudo with the above command if you are installing +into system directories. If you do not have write privileges for the directory +it is trying to install into, you can use the --user flag to install into a +local directory instead. (Normally something like $HOME/Library/Python/2.7 +or $HOME/.local, depending on your system.) + +If you would rather install from source (e.g. to work on a development branch), +you can do + python setup.py install -Note that you may need to use sudo with the above commands if they are -installing into system directories. If you do not have write privileges for -the directory that the above commands would install into, you can use the ---user flag to install into a local directory. (Normally something like -$HOME/Library/Python/2.7 or $HOME/.local, depending on your system and which -brand of Python you are using.) +(again possibly with either sudo or --user). This sometimes does not properly +install all of the dependencies properly, so you might need to first run + + pip install -r requirements.txt -Either of these installation methods will automatically install most of the -required dependencies for you if you do not have them already installed on your -machine. There is one exception, however. FFTW is not directly pip -installable, so if the above installation fails, you may need to install -it separately. See the sections 2 below for more details about how to do this. +Either of these installation methods should handle most of the required +dependencies for you if you do not have them already installed on your machine. +There is one exception, however. FFTW is not directly pip installable, so if +the above installation fails, you may need to install it separately. See +sections 2 below for more details about how to do this. -The other dependencies should all be installed automatically, but they -are listed here for completeness along with versions that are known to work. -In most cases, other recent versions will also work: +The other dependencies should all be installed by pip, but we list them here +for completeness along with versions that are known to work. In most cases, +other recent versions will also work: - Eigen (3.2.8) (via eigency 1.77) - NumPy (1.14.0) @@ -69,13 +76,6 @@ pip uses to determine what else to install. But if you install with - PyYaml (3.12) (Reads YAML config files) - Pandas (0.20) (Faster reading of ASCII input files) -If you would like to install all the above pip-installable dependencies -separately from installing GalSim, the easiest way is to use the command - - pip install -r requirements.txt - -in the GalSim directory. - 2. Installing FFTW ================== From c7e55a4cf3406eb19bae6802b1ccd4da60aa3454 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Tue, 20 Feb 2018 11:23:00 -0500 Subject: [PATCH 070/111] Describe setup.py installation in demo1.yaml (#809-pybind11) --- examples/demo1.yaml | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/examples/demo1.yaml b/examples/demo1.yaml index 0d6e3bfe8e5..5fa092cbffe 100644 --- a/examples/demo1.yaml +++ b/examples/demo1.yaml @@ -28,15 +28,27 @@ # writing the corresponding python code. # # The executable that reads these YAML files is called galsim, which should be installed -# in your PREFIX/bin directory (after doing `scons install`, that is, where PREFIX is either -# /usr/local or whatever other PREFIX you set when running scons). So to run this config -# file, you should be able to type simply: +# by either `pip install galsim` or `python setup.py install`. If you used the latter, +# the output should end with a line something along the lines of: # -# galsim demo1.yaml +# scripts installed into /usr/local/bin +# +# telling you which directory they were installed in. If that directory is not in your +# path, then there should also be a message telling you to add it to your $PATH +# environment variable. If you used `pip install galsim --user`, then it was probably +# installed into a directory called .local/bin in your home directory. You can have +# pip tell you where it is installing things by adding the `-v` option. +# +# In any case, you can confirm that `galsim` is in your path by typing # -# If you haven't run `scons install` for whatever reason, you can instead write: +# which galsim # -# ../bin/galsim demo1.yaml +# which should show you which executable will be used. (If nothing shows up, then `galsim` +# is not in your path.) +# +# Then to run this config file, you should be able to type simply: +# +# galsim demo1.yaml # # If you don't have PyYAML installed, you can use JSON files instead. The directory json has # JSON configuration files that are exactly equivalent to these YAML files. The YAML format From 655b2e6d797c0341476e4766db25ca446ea5d84c Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 26 Feb 2018 15:58:21 -0500 Subject: [PATCH 071/111] Ignore .pytest_cache (#809-pybind11) --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 02d802a3ef1..9c932eec0ae 100644 --- a/.gitignore +++ b/.gitignore @@ -25,3 +25,4 @@ examples_bin *.egg* build dist +.pytest_cache From 80865e90d7e32c85160be8862906bd1c49fd331e Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 26 Feb 2018 16:21:19 -0500 Subject: [PATCH 072/111] First try at setup.py travis runs (#809-pybind11) --- .travis.yml | 76 +++++++++++++------------------------------ test_requirements.txt | 4 +++ 2 files changed, 26 insertions(+), 54 deletions(-) create mode 100644 test_requirements.txt diff --git a/.travis.yml b/.travis.yml index 5759b707ef3..e2f4f8c8d82 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,18 +2,14 @@ branches: only: - master - noboost + - "#809-pybind11" language: python python: - #- 2.6 # disabled until Travis fixes their issue #6732 - # https://github.com/travis-ci/travis-ci/issues/6732 - 2.7 - 3.4 - 3.5 - #- 3.6 # disabled until Travis fixes their issue #4990 - # https://github.com/travis-ci/travis-ci/issues/4990 - # i.e. has python 3.6 pre-installed. Otherwise it can time out from having to install - # all the python 3.6 stuff from scratch. + - 3.6 compiler: - g++ @@ -21,12 +17,10 @@ compiler: before_install: - export PATH=$(echo $PATH | tr ':' "\n" | sed '/\/opt\/python/d' | tr "\n" ":" | sed "s|::|:|g") - sudo apt-get -qq update - - sudo apt-get install -y python-dev libfftw3-dev scons libblas-dev liblapack-dev gfortran libav-tools + - sudo apt-get install -y python-dev libfftw3-dev libblas-dev liblapack-dev gfortran libav-tools # List current contents of directories that should be being cached. - ls -l $HOME - - if test -d $HOME/tmv-0.73; then ls -l $HOME/tmv-0.73; fi - - if test -d $HOME/boost_1_61_0; then ls -l $HOME/boost_1_61_0; fi - if test -d $HOME/des_data; then ls -l $HOME/des_data; fi # Add ~/bin and ~/lib, etc. to the appropriate paths where scons install will put things. @@ -34,34 +28,6 @@ before_install: - export PATH=$HOME/bin:$PATH - export LD_LIBRARY_PATH=$HOME/lib:$LD_LIBRARY_PATH - # Fix a directory name in 3.x installations so boost can find it. - - if test -d $PYHOME/include/python${TRAVIS_PYTHON_VERSION}m; then ln -s $PYHOME/include/python${TRAVIS_PYTHON_VERSION}m $PYHOME/include/python${TRAVIS_PYTHON_VERSION}; fi - - # To get coverage of the WcsToolsWCS class: - #- sudo add-apt-repository "deb http://archive.ubuntu.com/ubuntu $(lsb_release -sc) universe" - #- sudo apt-get -qq update - #- sudo apt-get install -y wcstools - # Hm. This didn't work, and I can't figure out why. I get the following error: - # Reading package lists... Done - # Building dependency tree - # Reading state information... Done - # E: Unable to locate package wcstools - # Perhaps someone with more familiarity with apt-get can figure this out, but for now, we'll - # live with lack of coverage of WcsToolsWCS. - - # Only get TMV if not cached - - pushd $HOME - - if ! test -d tmv-0.73 || ! test -f tmv-0.73/SConstruct; then wget https://github.com/rmjarvis/tmv/archive/v0.73.tar.gz && tar -xf v0.73.tar.gz ; else echo Using cached TMV; fi - # But always install it to /usr/local - - cd tmv-0.73 && sudo scons install - - popd - - # Only get Boost if not cached - - pushd $HOME - - if ! test -d boost_1_61_0 || ! test -f boost_1_61_0/bootstrap.sh; then wget https://sourceforge.net/projects/boost/files/boost/1.61.0/boost_1_61_0.tar.bz2 --no-check-certificate && tar --bzip2 -xf boost_1_61_0.tar.bz2 && cd boost_1_61_0 && ./bootstrap.sh --with-python=python$TRAVIS_PYTHON_VERSION && ./b2 link=shared && cd ..; else echo Using cached Boost; fi - - cd boost_1_61_0 && sudo ./b2 -d0 link=shared install - - popd - # Get the des data needed for the check_des test. - if ! test -d $HOME/des_data || ! test -f $HOME/des_data/DECam_00154912_01.fits.fz; then wget http://www.sas.upenn.edu/~mjarvis/des_data.tar.gz && tar xfz des_data.tar.gz -C $HOME --wildcards *_01*; fi - ln -s $HOME/des_data examples/des/ @@ -69,33 +35,35 @@ before_install: cache: pip: true directories: - - $HOME/tmv-0.73 - - $HOME/boost_1_61_0 - $HOME/des_data install: # Travis doesn't always have the most up-to-date nupy already installed, so use -U - - travis_wait 30 pip install -U numpy + - pip install -U numpy + + # Install the requirements + - pip install -r requirements.txt + + # Also some things just required for tests + # (This includes scipy, which can take a while to install. So tell Travis to be patient.) + - travis_wait 30 pip install -r test_requirements.txt + # Note: matplotlib is only required because starlink has an `import matplotlib` in their # code, despite that not being a dependency. - # Allow 30 minutes for this one, since sometimes scipy in particular can take a long time - # if it's not in the cache yet. - - travis_wait 30 pip install numpy astropy future lsstdesc.coord pyyaml starlink-pyast nose codecov coveralls matplotlib==1.5.0 scipy pandas coverage - - if [[ $TRAVIS_PYTHON_VERSION == 2.6 ]]; then pip install simplejson ordereddict; fi + - pip install matplotlib + + # Finally, a few things for the code coverage + - pip nose codecov coveralls coverage script: - # This lets scons work even on Python 3 builds - # cf. https://github.com/travis-ci/travis-ci/issues/5961 - - source $HOME/virtualenv/python2.7/bin/activate - # But now we need to manually set the python, since it's not the default in this virtualenv. - - scons PREFIX=$HOME PYTHON=$PYHOME/bin/python PYPREFIX=$PYHOME/lib/python${TRAVIS_PYTHON_VERSION}/site-packages BOOST_DIR=$PYHOME && scons install - - if test -f gs.error; then cat gs.error; fi - # Go back to the regular python environment for the tests - - source $PYHOME/bin/activate + # Install GalSim + - python setup.py install + # If galsim_download_cosmos.py changed, then run it. - - if git --no-pager diff $TRAVIS_COMMIT_RANGE --name-only | grep -Fxq 'bin/galsim_download_cosmos.py'; then galsim_download_cosmos -s 23.5 -v1; fi + - if git --no-pager diff $TRAVIS_COMMIT_RANGE --name-only | grep -Fxq 'galsim/download_cosmos.py'; then galsim_download_cosmos -s 23.5 -v1; fi + - cd tests - # Use this rather than scons tests, so we can get the coverage options. + # Use this rather than setup.py test, so we can get the coverage options. - "nosetests test*.py --with-coverage --cover-package=galsim --with-doctest --cover-erase" # Without cover-erase, this will append to the .coverage file - "nosetests run_examples.py --with-coverage --cover-package=galsim --with-doctest" diff --git a/test_requirements.txt b/test_requirements.txt new file mode 100644 index 00000000000..56217908b3b --- /dev/null +++ b/test_requirements.txt @@ -0,0 +1,4 @@ +pytest>=3.4 +pytest-xdist>=1.19 +pytest-timeout>=1.2 +scipy>=1.0 From 37381c48b11737839bbfb408913dd516e02ed786 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 26 Feb 2018 21:33:54 -0500 Subject: [PATCH 073/111] Use ccache on Travis. And suggest usage in devel/README --- .travis.yml | 1 + devel/README | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/.travis.yml b/.travis.yml index e2f4f8c8d82..3e97ef5f9db 100644 --- a/.travis.yml +++ b/.travis.yml @@ -33,6 +33,7 @@ before_install: - ln -s $HOME/des_data examples/des/ cache: + ccache: true pip: true directories: - $HOME/des_data diff --git a/devel/README b/devel/README index d158f002bf7..47f02165546 100644 --- a/devel/README +++ b/devel/README @@ -84,6 +84,15 @@ With SCons, the build steps are slightly different. 2. scons install 5. scons tests +Note that SCons only remakes files that have changed, so it's not too time consuming to rebuild +each time. However, setup.py does not have that feature. So, with the setup.py workflow, we +highly recommend installing ccache. This keeps track of which cc commands have been run +previously and stores the results, which vastly speeds up builds using setup.py. + +You can download ccache from + +https://ccache.samba.org/ + 3. Commits From 23cead611c381c3010c4aeed1054982277a24a1e Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 26 Feb 2018 18:50:54 -0500 Subject: [PATCH 074/111] Use apt-get for eigen (#809-pybind11) --- .travis.yml | 2 +- requirements.txt | 2 +- setup.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 3e97ef5f9db..51f9240880e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,7 +17,7 @@ compiler: before_install: - export PATH=$(echo $PATH | tr ':' "\n" | sed '/\/opt\/python/d' | tr "\n" ":" | sed "s|::|:|g") - sudo apt-get -qq update - - sudo apt-get install -y python-dev libfftw3-dev libblas-dev liblapack-dev gfortran libav-tools + - sudo apt-get install -y python-dev libfftw3-dev libav-tools libeigen3-dev # List current contents of directories that should be being cached. - ls -l $HOME diff --git a/requirements.txt b/requirements.txt index 04d97053e93..8b3c4039852 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,7 +5,7 @@ # conda install -c conda-forge --file conda_requirements.txt # prior to running pip install -r requirements.txt setuptools>=38 -eigency>=1.77 +#eigency>=1.78 # 1.77 still doesn't work. Oops. (My fault.) numpy>=1.13 future>=0.15 astropy>=2.0 diff --git a/setup.py b/setup.py index 6792fb29185..ec51de3cc90 100644 --- a/setup.py +++ b/setup.py @@ -637,7 +637,7 @@ def run_tests(self): find_eigen_dir() except OSError: print('Adding eigency to build_dep') - build_dep += ['eigency>=1.77'] + build_dep += ['eigency>=1.78'] with open('README.md') as file: From 1dfec5b571fc2d77040012766708cce8d6861055 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 26 Feb 2018 20:00:27 -0500 Subject: [PATCH 075/111] Report include directory for fftw3 --- .travis.yml | 2 ++ setup.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/.travis.yml b/.travis.yml index 51f9240880e..3f458305acf 100644 --- a/.travis.yml +++ b/.travis.yml @@ -19,6 +19,8 @@ before_install: - sudo apt-get -qq update - sudo apt-get install -y python-dev libfftw3-dev libav-tools libeigen3-dev + - dpkg -L libfftw3-dev + # List current contents of directories that should be being cached. - ls -l $HOME - if test -d $HOME/des_data; then ls -l $HOME/des_data; fi diff --git a/setup.py b/setup.py index ec51de3cc90..08106e7191a 100644 --- a/setup.py +++ b/setup.py @@ -399,10 +399,12 @@ def add_dirs(builder, output=False): builder.libraries.append(os.path.split(fftw_lib)[1].split('.')[0][3:]) fftw_include = os.path.join(os.path.split(fftw_libpath)[0], 'include') if os.path.isfile(os.path.join(fftw_include, 'fftw3.h')): + print('Include directory for fftw3 is ',fftw_include) # Usually, the fftw3.h file is in an associated include dir, but not always. builder.include_dirs.append(fftw_include) else: # If not, we have our own copy of fftw3.h here. + print('Using local copy of fftw3.h') builder.include_dirs.append('include/fftw3') # Look for Eigen/Core From 9bd307b1e4bccd4f64aac800c939504a02eae201 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 26 Feb 2018 19:28:23 -0500 Subject: [PATCH 076/111] Try a different path finder for fftw3 on unix --- .travis.yml | 3 +++ setup.py | 21 +++++++++++++++++++-- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 3f458305acf..5727b03c118 100644 --- a/.travis.yml +++ b/.travis.yml @@ -61,8 +61,11 @@ install: script: # Install GalSim - python setup.py install + - "ldd build/lib.*/galsim/_galsim.so" + - "ldd build/temp.*/libgalsim.a" # If galsim_download_cosmos.py changed, then run it. + - echo $TRAVIS_COMMIT_RANGE - if git --no-pager diff $TRAVIS_COMMIT_RANGE --name-only | grep -Fxq 'galsim/download_cosmos.py'; then galsim_download_cosmos -s 23.5 -v1; fi - cd tests diff --git a/setup.py b/setup.py index 08106e7191a..2c0cfc1e110 100644 --- a/setup.py +++ b/setup.py @@ -130,6 +130,7 @@ def find_fftw_lib(output=False): if dir == '': continue # This messes things up if it's in there. if dir in tried_dirs: continue else: tried_dirs.add(dir) + if not os.path.isdir(dir): continue if output: print(" ", dir, end='') try: libpath = os.path.join(dir, name) @@ -138,11 +139,26 @@ def find_fftw_lib(output=False): return libpath except OSError as e: if output: print(" (no)") - continue + # Some places use lib64 rather than/in addition to lib. Try that as well. + if dir.endswith('lib') and os.path.isdir(dir + '64'): + dir += '64' + try: + libpath = os.path.join(dir, name) + lib = ctypes.cdll.LoadLibrary(libpath) + if output: print(" ", dir, " (yes)") + return libpath + except OSError: + pass try: libpath = ctypes.util.find_library('fftw3') if libpath == None: raise OSError + if os.path.split(libpath)[0] == '': + # If the above doesn't return a real path, try this instead. + libpath = ctypes.util._findLib_gcc('fftw3') + if libpath == None: + raise OSError + libpath = os.path.realpath(libpath) lib = ctypes.cdll.LoadLibrary(libpath) except Exception as e: if output: @@ -187,6 +203,7 @@ def find_eigen_dir(output=False): if output: print("Looking for Eigen:") for dir in try_dirs: + if not os.path.isdir(dir): continue if output: print(" ", dir, end='') if os.path.isfile(os.path.join(dir, 'Eigen/Core')): if output: print(" (yes)") @@ -639,7 +656,7 @@ def run_tests(self): find_eigen_dir() except OSError: print('Adding eigency to build_dep') - build_dep += ['eigency>=1.78'] + build_dep += ['eigency>=1.77'] with open('README.md') as file: From 72d1d497064b0ee4cb30c28bd5999d6712f971dc Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 26 Feb 2018 21:20:10 -0500 Subject: [PATCH 077/111] Make sure galsim comes before fftw3 in libraries list --- setup.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 2c0cfc1e110..a2ddecac2da 100644 --- a/setup.py +++ b/setup.py @@ -413,6 +413,7 @@ def add_dirs(builder, output=False): if hasattr(builder, 'library_dirs'): if fftw_libpath != '': builder.library_dirs.append(fftw_libpath) + builder.libraries.append('galsim') # Make sure galsim comes before fftw3 builder.libraries.append(os.path.split(fftw_lib)[1].split('.')[0][3:]) fftw_include = os.path.join(os.path.split(fftw_libpath)[0], 'include') if os.path.isfile(os.path.join(fftw_include, 'fftw3.h')): @@ -570,6 +571,8 @@ def run_cpp_tests(self): objects.extend(ext.extra_objects) extra_args = ext.extra_link_args or [] + libraries.append('galsim') + libraries = builder.get_libraries(ext) library_dirs = ext.library_dirs fftw_lib = find_fftw_lib() @@ -577,7 +580,6 @@ def run_cpp_tests(self): if fftw_libpath != '': library_dirs.append(fftw_libpath) libraries.append(fftw_libname.split('.')[0][3:]) - libraries.append('galsim') exe_file = os.path.join(builder.build_temp,'cpp_test') compiler.link_executable( From f6534d77682a9b44a4110fed087932d9ccf42373 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 26 Feb 2018 16:57:45 -0500 Subject: [PATCH 078/111] List pip installed versions. (#809-pybind11) --- .travis.yml | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/.travis.yml b/.travis.yml index 5727b03c118..16ff2bb70ef 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,7 +2,6 @@ branches: only: - master - noboost - - "#809-pybind11" language: python python: @@ -19,17 +18,10 @@ before_install: - sudo apt-get -qq update - sudo apt-get install -y python-dev libfftw3-dev libav-tools libeigen3-dev - - dpkg -L libfftw3-dev - # List current contents of directories that should be being cached. - ls -l $HOME - if test -d $HOME/des_data; then ls -l $HOME/des_data; fi - # Add ~/bin and ~/lib, etc. to the appropriate paths where scons install will put things. - - export PYHOME=$HOME/virtualenv/python${TRAVIS_PYTHON_VERSION} - - export PATH=$HOME/bin:$PATH - - export LD_LIBRARY_PATH=$HOME/lib:$LD_LIBRARY_PATH - # Get the des data needed for the check_des test. - if ! test -d $HOME/des_data || ! test -f $HOME/des_data/DECam_00154912_01.fits.fz; then wget http://www.sas.upenn.edu/~mjarvis/des_data.tar.gz && tar xfz des_data.tar.gz -C $HOME --wildcards *_01*; fi - ln -s $HOME/des_data examples/des/ @@ -41,34 +33,34 @@ cache: - $HOME/des_data install: - # Travis doesn't always have the most up-to-date nupy already installed, so use -U - - pip install -U numpy - # Install the requirements - - pip install -r requirements.txt + # Use -U to make sure we get the latest versions of everything so we notice any + # incompatibilities as soon as possible. + - pip install -U -r requirements.txt # Also some things just required for tests # (This includes scipy, which can take a while to install. So tell Travis to be patient.) - - travis_wait 30 pip install -r test_requirements.txt + - travis_wait 30 pip install -U -r test_requirements.txt # Note: matplotlib is only required because starlink has an `import matplotlib` in their # code, despite that not being a dependency. - pip install matplotlib # Finally, a few things for the code coverage - - pip nose codecov coveralls coverage + - pip install nose codecov coveralls coverage + + - pip list script: # Install GalSim - python setup.py install - - "ldd build/lib.*/galsim/_galsim.so" - - "ldd build/temp.*/libgalsim.a" # If galsim_download_cosmos.py changed, then run it. - echo $TRAVIS_COMMIT_RANGE - if git --no-pager diff $TRAVIS_COMMIT_RANGE --name-only | grep -Fxq 'galsim/download_cosmos.py'; then galsim_download_cosmos -s 23.5 -v1; fi - cd tests + # Use this rather than setup.py test, so we can get the coverage options. - "nosetests test*.py --with-coverage --cover-package=galsim --with-doctest --cover-erase" # Without cover-erase, this will append to the .coverage file From a7b98d97e81bea4cf15afcaf41aefbdd140ab1e2 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 26 Feb 2018 23:24:07 -0500 Subject: [PATCH 079/111] Mention apt-get option in INSTALL.md --- INSTALL.md | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index b954153ec58..19ad77b6460 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -168,8 +168,16 @@ active environment if appropriate). GalSim knows to look here, so there is nothing dditional you need to do. -iv) Using fink --------------- +iv) Using apt-get +----------------- + +On Linux machines that use apt-get, FFTW can be installed with + + apt-get install libfftw3-dev + + +v) Using fink +------------- If you use fink on a Mac, FFTW can be installed with @@ -181,8 +189,8 @@ This will put it into the /sw/lib directory on your system. GalSim knows to look here, so there is nothing dditional you need to do. -v) Using MacPorts ------------------ +vi) Using MacPorts +------------------ If you use MacPorts, FFTW can be installed with @@ -292,8 +300,16 @@ your active environment if appropriate). GalSim knows to look here, so there is nothing dditional you need to do. -iv) Using fink --------------- +iv) Using apt-get +----------------- + +On Linux machines that use apt-get, Eigen can be installed with + + apt-get install libeigen3-dev + + +v) Using fink +------------- If you use fink on a Mac, Eigen can be installed with @@ -303,8 +319,8 @@ This will put it into the /sw/include directory on your system. GalSim knows to look here, so there is nothing dditional you need to do. -v) Using MacPorts ------------------ +vi) Using MacPorts +------------------ If you use MacPorts, Eigen can be installed with @@ -334,6 +350,7 @@ that as follows (from within the main GalSim directory): conda create -y -n galsim conda activate galsim conda install -y -c conda-forge --file conda_requirements.txt + pip install -r requirements.txt pip install . The first two lines are optional, but they let you keep the GalSim installation From af1856295fbb5a18d661c7101b477e4438306a85 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 26 Feb 2018 23:24:44 -0500 Subject: [PATCH 080/111] Fix error in libraries for test --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index a2ddecac2da..4636e89f579 100644 --- a/setup.py +++ b/setup.py @@ -571,9 +571,9 @@ def run_cpp_tests(self): objects.extend(ext.extra_objects) extra_args = ext.extra_link_args or [] + libraries = builder.get_libraries(ext) libraries.append('galsim') - libraries = builder.get_libraries(ext) library_dirs = ext.library_dirs fftw_lib = find_fftw_lib() fftw_libpath, fftw_libname = os.path.split(fftw_lib) From 49baef422cf7faf99f67ce82429ed5bbbbf04cfb Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 26 Feb 2018 23:26:07 -0500 Subject: [PATCH 081/111] Name this version 2.0.0-alpha --- galsim/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galsim/_version.py b/galsim/_version.py index 36db5392267..7b2483af1e6 100644 --- a/galsim/_version.py +++ b/galsim/_version.py @@ -15,5 +15,5 @@ # this list of conditions, and the disclaimer given in the documentation # and/or other materials provided with the distribution. # -__version__ = '2.0' +__version__ = '2.0.0-alpha' __version_info__ = tuple(map(int, __version__.split('.'))) From 587b4086a530b48f4019fa94939ac4b08d14f020 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Tue, 27 Feb 2018 00:01:10 -0500 Subject: [PATCH 082/111] Handle semantic versions with words after the release number (#809-pybind11) --- galsim/__init__.py | 6 ++++-- galsim/_version.py | 1 - setup.py | 4 +++- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/galsim/__init__.py b/galsim/__init__.py index 2e475d296db..86c40ce50ce 100644 --- a/galsim/__init__.py +++ b/galsim/__init__.py @@ -76,13 +76,15 @@ lost profits, business interruption, or indirect special or consequential damages of any kind. """ +import re # The version is stored in _version.py as recommended here: # http://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package # We don't use setup.py, so it's not so important to do it this way, but if we ever switch... # And it does make it a bit easier to get the version number in SCons too. -from ._version import __version__, __version_info__ - +from ._version import __version__ +vi = re.split('\.|-',__version__) +__version_info__ = tuple([int(x) for x in vi if x.isdigit()]) # Define the current code version, in addition to the hidden attribute, to be consistent with # previous GalSim versions that indicated the version number in this way. diff --git a/galsim/_version.py b/galsim/_version.py index 7b2483af1e6..b2ea87f5c1d 100644 --- a/galsim/_version.py +++ b/galsim/_version.py @@ -16,4 +16,3 @@ # and/or other materials provided with the distribution. # __version__ = '2.0.0-alpha' -__version_info__ = tuple(map(int, __version__.split('.'))) diff --git a/setup.py b/setup.py index 4636e89f579..7b19c5ef4df 100644 --- a/setup.py +++ b/setup.py @@ -5,6 +5,7 @@ import ctypes.util import types import subprocess +import re try: from setuptools import setup, Extension, find_packages @@ -677,7 +678,8 @@ def run_tests(self): print('GalSim version is %s'%(galsim_version)) # Write a Version.h file that has this information for people using the C++ library. -version_info = tuple(map(int, galsim_version.split('.'))) +vi = re.split('\.|-',galsim_version) +version_info = tuple([int(x) for x in vi if x.isdigit()]) if len(version_info) == 2: version_info = version_info + (0,) version_h_text = """ From 9aa560306065e69c5e90b005b3179314b09667f4 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Tue, 27 Feb 2018 00:01:43 -0500 Subject: [PATCH 083/111] Max of 4 jobs for test by default (#809-pybind11) --- setup.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/setup.py b/setup.py index 7b19c5ef4df..35c306b262a 100644 --- a/setup.py +++ b/setup.py @@ -611,6 +611,10 @@ def run_tests(self): if self.pytest_args is None: if self.njobs is None: self.njobs = cpu_count() + if self.njobs > 4: + # Usually 4 is plenty. Testing with too many jobs tends to lead to + # memory and timeout errors. The user can bump this up if they want. + self.njobs = 4 else: self.njobs = int(self.njobs) print('Using %d processes for pytest.'%self.njobs) From 1c8b43869654d535d1031c48566f97fa3795550b Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Tue, 27 Feb 2018 01:59:26 -0500 Subject: [PATCH 084/111] Try the user's home directory for possible fftw3 installation dir --- setup.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/setup.py b/setup.py index 35c306b262a..951ae8fbff3 100644 --- a/setup.py +++ b/setup.py @@ -105,18 +105,28 @@ def get_compiler(cc): def find_fftw_lib(output=False): try_libdirs = [] lib_ext = '.so' + + # Start with the explicit FFTW_DIR, if present. if 'FFTW_DIR' in os.environ: try_libdirs.append(os.environ['FFTW_DIR']) try_libdirs.append(os.path.join(os.environ['FFTW_DIR'],'lib')) + + # Try some standard locations where things get installed if 'posix' in os.name.lower(): try_libdirs.extend(['/usr/local/lib', '/usr/lib']) if 'darwin' in platform.system().lower(): try_libdirs.extend(['/usr/local/lib', '/usr/lib', '/sw/lib', '/opt/local/lib']) lib_ext = '.dylib' + + # Check the directories in LD_LIBRARY_PATH. This doesn't work on OSX >= 10.11 for path in ['LIBRARY_PATH', 'LD_LIBRARY_PATH', 'DYLD_LIBRARY_PATH']: if path in os.environ: for dir in os.environ[path].split(':'): try_libdirs.append(dir) + + # The user's home directory is often a good place to check. + try_libdirs.append(os.path.join(os.path.expanduser("~"),"lib")) + # If the above don't work, the fftw3 module may have the right directory. try: import fftw3 From 684150230af824c180c24660633338a8ecaed160 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Tue, 27 Feb 2018 02:00:05 -0500 Subject: [PATCH 085/111] Add enable-shared to fftw3 instructions --- INSTALL.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index 19ad77b6460..38f4133cda6 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -100,7 +100,7 @@ the following commands should work to download and install it: wget http://www.fftw.org/fftw-3.3.7.tar.gz tar xfz fftw-3.3.7.tar.gz cd fftw-3.3.7 - ./configure + ./configure --enable-shared make sudo make install @@ -108,7 +108,7 @@ If you want to install into a different directory (e.g. because you do not have sudo privileges on your machine), then specify the alternate directory with the --prefix flag to configure. E.g. - ./congigure --prefix=$HOME + ./configure --enable-shared --prefix=$HOME which will install the library into $HOME/lib and the header file into $HOME/include. In this case, leave of the sudo from the last line. @@ -138,7 +138,8 @@ directory in your LD_LIBRARY_PATH, then GalSim should find it without any extra work on your part. If it is in a non-standard location, and you do not want to add this path -to your LD_LIBRARY_PATH, then you can instead set the FFTW_DIR environment +to your LD_LIBRARY_PATH (or you are on a modern Mac that hides such system +variable from setup.py), then you can instead set the FFTW_DIR environment variable to tell GalSim where to look export FFTW_DIR=/some/path/to/fftw From bf3851ed015214cfa5efdaf313ff2aaaea2438c0 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Tue, 27 Feb 2018 02:00:31 -0500 Subject: [PATCH 086/111] Add cython if we need to use eigency --- INSTALL.md | 11 +++++++++++ setup.py | 3 ++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/INSTALL.md b/INSTALL.md index 38f4133cda6..2da0f06afb3 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -340,6 +340,17 @@ above, we will use eigency automatically if Eigen is not found in one of the above locations. So the above installations will take precendence, but eigency should work as a fall-back. +Note: At the time of this writing, installation of eigency depends on having +cython already installed. I thought I fixed this with PR #26, but it was +not quite complete. There is now an open PR #27, which I believe will +finish making pip install eigency work, even if you do not have cython +installed. But for now, you need to do + + pip install cython + pip install eigency + +(in that order) for it to work. + 4. Using Conda ============== diff --git a/setup.py b/setup.py index 951ae8fbff3..846400b3f4f 100644 --- a/setup.py +++ b/setup.py @@ -673,7 +673,8 @@ def run_tests(self): find_eigen_dir() except OSError: print('Adding eigency to build_dep') - build_dep += ['eigency>=1.77'] + # Once 1.78 is out I *think* we can remove the cython dependency here. + build_dep += ['cython', 'eigency>=1.77'] with open('README.md') as file: From 39bce2e092317398cb952b56a1bbecc3f70f93fb Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 26 Feb 2018 23:02:24 -0800 Subject: [PATCH 087/111] Write -jN, not -j1 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 846400b3f4f..3fa28c35f8d 100644 --- a/setup.py +++ b/setup.py @@ -402,7 +402,7 @@ def fix_compiler(compiler, parallel): if ncpu > 1: print('Using %d cpus for compiling'%ncpu) if parallel is None: - print('To override, you may do python setup.py build -j1') + print('To override, you may do python setup.py build -jN') compiler.compile = types.MethodType(parallel_compile, compiler) extra_cflags = copt[comp_type] From 8bdbb776c628a43491e9522284bd43c82c8ab57d Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Mon, 26 Feb 2018 23:20:25 -0800 Subject: [PATCH 088/111] Remove workaround for Python 2.6 --- CHANGELOG.md | 1 + setup.py | 16 +++++----------- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c5226052ba4..191e07c3513 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ Dependency Changes - Added dependency on pybind11. (#809) - Added dependency on Eigen. (#809) - FFTW is now the only dependency that pip cannot handle automatically. (#809) +- Officially no longer support Python 2.6. (Pretty sure no one cares.) API Changes diff --git a/setup.py b/setup.py index 3fa28c35f8d..23e712ccea2 100644 --- a/setup.py +++ b/setup.py @@ -74,17 +74,11 @@ def get_compiler(cc): print('compiler version information: ') for line in lines: print(line.decode().strip()) - try: - # Python3 needs this decode bit. - # Python2.7 doesn't need it, but it works fine. - line = lines[0].decode(encoding='UTF-8') - if line.startswith('Configured'): - line = lines[1].decode(encoding='UTF-8') - except TypeError: - # Python2.6 throws a TypeError, so just use the lines as they are. - line = lines[0] - if line.startswith('Configured'): - line = lines[1] + # Python3 needs this decode bit. + # Python2.7 doesn't need it, but it works fine. + line = lines[0].decode(encoding='UTF-8') + if line.startswith('Configured'): + line = lines[1].decode(encoding='UTF-8') if 'clang' in line: return 'clang' From cccc917aa2647002b835565e428e7cbcb350f110 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Tue, 27 Feb 2018 07:53:04 -0500 Subject: [PATCH 089/111] Don't try to cover the files for the executables, since nosetests doesn't run them. (#809-pybind11) --- tests/.coveragerc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/.coveragerc b/tests/.coveragerc index 4959a661b0d..f287d756151 100644 --- a/tests/.coveragerc +++ b/tests/.coveragerc @@ -13,6 +13,12 @@ omit = # This is a utility for tracking down OSErrors. Don't include in coverage. fds_test.py + # These files are used for the executables, galsim and galsim_download_cosmos. + # They don't get run via nosetests, so they don't really get covered. + __main__.py + main.py + download_cosmos.py + # Without this, coverage misses anything that is only run in multiprocessing mode. concurrency = multiprocessing From 125d82fb11d11bef85de9fea31490b74acd55a21 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Tue, 27 Feb 2018 08:00:10 -0500 Subject: [PATCH 090/111] Fix version_info in SCons, which I messed up. (#809-pybind11) --- src/SConscript | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/SConscript b/src/SConscript index 81cb78f1db4..63e542b7d03 100644 --- a/src/SConscript +++ b/src/SConscript @@ -3,11 +3,11 @@ from __future__ import print_function import os, sys import glob +import re try: sys.path = [GetBuildPath('#galsim')] + sys.path from _version import __version__ as version - from _version import __version_info__ as version_info except Exception as e: print('Caught exception: ',e) # If SCons is using an old python (2.4 is relatively common still), then the above may fail. @@ -15,8 +15,9 @@ except Exception as e: vfilename = os.path.join(GetBuildPath('#galsim'),'_version.py') exec(compile(open(vfilename, "rb").read(), vfilename, 'exec')) version = __version__ - version_info = __version_info__ +vi = re.split('\.|-',version) +version_info = tuple([int(x) for x in vi if x.isdigit()]) full_version_info = version_info if len(full_version_info) == 2: full_version_info = version_info + (0,) From af4f9777d6a3bb692162ece3fad3b48333e1825d Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Tue, 27 Feb 2018 09:11:17 -0500 Subject: [PATCH 091/111] Add * in front of omitted files (#809-pybind11) --- tests/.coveragerc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/.coveragerc b/tests/.coveragerc index f287d756151..cb662bed3f2 100644 --- a/tests/.coveragerc +++ b/tests/.coveragerc @@ -11,13 +11,13 @@ omit = *deprecated/* # This is a utility for tracking down OSErrors. Don't include in coverage. - fds_test.py + *fds_test.py # These files are used for the executables, galsim and galsim_download_cosmos. # They don't get run via nosetests, so they don't really get covered. - __main__.py - main.py - download_cosmos.py + *__main__.py + *main.py + *download_cosmos.py # Without this, coverage misses anything that is only run in multiprocessing mode. concurrency = multiprocessing From 4414f4699e6588a9f311fdf3c65862eba0a7e23b Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Tue, 27 Feb 2018 11:10:19 -0500 Subject: [PATCH 092/111] Add matplotlib as test dependency for starlink (#809-pybind11) --- setup.py | 3 ++- test_requirements.txt | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 23e712ccea2..0373ee7b736 100644 --- a/setup.py +++ b/setup.py @@ -660,7 +660,8 @@ def run_tests(self): build_dep = ['setuptools>=38', 'pybind11>=2.2'] run_dep = ['numpy', 'future', 'astropy', 'LSSTDESC.Coord'] -test_dep = ['pytest', 'pytest-xdist', 'pytest-timeout', 'scipy'] +test_dep = ['pytest', 'pytest-xdist', 'pytest-timeout', + 'scipy', 'pyyaml', 'starlink-pyast', 'matplotlib'] # If Eigen doesn't exist in the normal places, add eigency ad a build dependency. try: diff --git a/test_requirements.txt b/test_requirements.txt index 56217908b3b..aacbac148a6 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -2,3 +2,4 @@ pytest>=3.4 pytest-xdist>=1.19 pytest-timeout>=1.2 scipy>=1.0 +matplotlib>=2.0 # Not needed by GalSim, but an implicit requirement of starlink From 1f1435912a117ec9bcc4ec217d67744ba3a1ddbf Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Tue, 27 Feb 2018 13:56:20 -0500 Subject: [PATCH 093/111] Add nose as test_dep, so assert_raises, etc. works. (#809-pybind11) --- setup.py | 4 +++- test_requirements.txt | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 0373ee7b736..3f7cc783d26 100644 --- a/setup.py +++ b/setup.py @@ -660,8 +660,10 @@ def run_tests(self): build_dep = ['setuptools>=38', 'pybind11>=2.2'] run_dep = ['numpy', 'future', 'astropy', 'LSSTDESC.Coord'] -test_dep = ['pytest', 'pytest-xdist', 'pytest-timeout', +test_dep = ['pytest', 'pytest-xdist', 'pytest-timeout', 'nose', 'scipy', 'pyyaml', 'starlink-pyast', 'matplotlib'] +# Note: Even though we don't use nosetests, nose is required for some tests to work. +# cf. https://gist.github.com/dannygoldstein/e18866ebb9c39a2739f7b9f16440e2f5 # If Eigen doesn't exist in the normal places, add eigency ad a build dependency. try: diff --git a/test_requirements.txt b/test_requirements.txt index aacbac148a6..cf91a082b96 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -2,4 +2,5 @@ pytest>=3.4 pytest-xdist>=1.19 pytest-timeout>=1.2 scipy>=1.0 +nose>=1.3 matplotlib>=2.0 # Not needed by GalSim, but an implicit requirement of starlink From 13b3dc773bd312df68a32c2dab0a5f63504c2fab Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Wed, 28 Feb 2018 13:08:22 -0500 Subject: [PATCH 094/111] Make sure candidate library exists before trying to load it. (#809-pybind11) --- setup.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 3f7cc783d26..05e1f4c20b2 100644 --- a/setup.py +++ b/setup.py @@ -136,9 +136,10 @@ def find_fftw_lib(output=False): if dir in tried_dirs: continue else: tried_dirs.add(dir) if not os.path.isdir(dir): continue + libpath = os.path.join(dir, name) + if not os.path.isfile(libpath): continue if output: print(" ", dir, end='') try: - libpath = os.path.join(dir, name) lib = ctypes.cdll.LoadLibrary(libpath) if output: print(" (yes)") return libpath @@ -149,6 +150,7 @@ def find_fftw_lib(output=False): dir += '64' try: libpath = os.path.join(dir, name) + if not os.path.isfile(libpath): continue lib = ctypes.cdll.LoadLibrary(libpath) if output: print(" ", dir, " (yes)") return libpath From f4705ad32e463feba2cbcf50f039b131107404fd Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Wed, 28 Feb 2018 13:08:33 -0500 Subject: [PATCH 095/111] Check EIGEN_DIR/include (#809-pybind11) --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 05e1f4c20b2..61c23f15d4f 100644 --- a/setup.py +++ b/setup.py @@ -188,7 +188,7 @@ def find_eigen_dir(output=False): try_dirs = [] if 'EIGEN_DIR' in os.environ: try_dirs.append(os.environ['EIGEN_DIR']) - try_dirs.append(os.path.join(os.environ['EIGEN_DIR'])) + try_dirs.append(os.path.join(os.environ['EIGEN_DIR'], 'include')) # This is where conda will install it. try_dirs.append(distutils.sysconfig.get_config_var('INCLUDEDIR')) if 'posix' in os.name.lower(): From 06e0c09f4db5a508e9330373436680bf2b399973 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Wed, 28 Feb 2018 13:39:35 -0500 Subject: [PATCH 096/111] Check if clang needs the -stdlib=libc++ flag (#809-pybind11) --- setup.py | 73 +++++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 56 insertions(+), 17 deletions(-) diff --git a/setup.py b/setup.py index 61c23f15d4f..34d95dcf40e 100644 --- a/setup.py +++ b/setup.py @@ -229,23 +229,10 @@ def find_eigen_dir(output=False): raise OSError("Could not find Eigen") -def try_cc(cc, cflags=[], lflags=[]): - """Check if compiling a simple bit of c++ code with the given compiler works properly. +def try_compile(cpp_code, cc, cflags=[], lflags=[]): + """Check if compiling some code with the given compiler and flags works properly. """ import tempfile - from textwrap import dedent - cpp_code = dedent(""" - #include - #include - int main() { - int n = 500; - std::vector x(n,0.); - for (int i=0; i + #include + int main() { + int n = 500; + std::vector x(n,0.); + for (int i=0; i + #include + #include + + int main(void) { + std::cout << std::tgamma(1.3) << std::endl; + return 0; + } + """) + return try_compile(cpp_code, cc, cflags, lflags) + + def cpu_count(): """Get the number of cpus """ @@ -384,8 +407,14 @@ def fix_compiler(compiler, parallel): else: print('Using compiler %s, which is %s'%(cc,comp_type)) + # Make sure the compiler works with a simple c++ code + if not try_cpp(cc, cflags): + print("There seems to be something wrong with the compiler or cflags") + print("%s %s"%(cc, ' '.join(cflags))) + raise OSError("Compiler does not work for compiling C++ code") + # Check if we can use ccache to speed up repeated compilation. - if try_cc('ccache ' + cc, cflags): + if try_cpp('ccache ' + cc, cflags): print('Using ccache') compiler.set_executable('compiler_so', ['ccache',cc] + cflags) @@ -402,9 +431,19 @@ def fix_compiler(compiler, parallel): compiler.compile = types.MethodType(parallel_compile, compiler) extra_cflags = copt[comp_type] - print('Using extra flags ',extra_cflags) + + success = try_cpp11(cc, cflags + extra_cflags) + if not success: + # Sometimes clang requires an extra flag to use c++11 properly + extra_cflags += ['-stdlib=libc++'] + success = try_cpp11(cc, cflags + extra_cflags) + if not success: + print('The compiler %s with flags %s did not successfully compile C++11 code'% + (cc, ' '.join(extra_cflags))) + raise OSError("Compiler is not C++-11 compatible") # Return the extra cflags, since those will be added to the build step in a different place. + print('Using extra flags ',extra_cflags) return extra_cflags def add_dirs(builder, output=False): From ed2b677b23586d03bf362f5f4d8d1a29434c943f Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Wed, 28 Feb 2018 14:00:46 -0500 Subject: [PATCH 097/111] Fix -j flag to work even with older distutils (#809-pybind11) --- setup.py | 50 +++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/setup.py b/setup.py index 34d95dcf40e..4646c929603 100644 --- a/setup.py +++ b/setup.py @@ -354,7 +354,7 @@ def cpu_count(): return ncpus return 1 # Default -def parallel_compile(self, sources, output_dir=None, macros=None, +def parallel_compile(self, sources, njobs, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): """New compile function that we monkey patch into the existing compiler instance. @@ -362,7 +362,6 @@ def parallel_compile(self, sources, output_dir=None, macros=None, import multiprocessing.pool # Copied from the regular compile function - ncpu = cpu_count() macros, objects, extra_postargs, pp_opts, build = \ self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs) @@ -375,13 +374,13 @@ def _single_compile(obj): return self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) - if ncpu == 1: + if njobs == 1: # This is equivalent to regular compile function for obj in objects: _single_compile(obj) else: # Use ThreadPool, rather than Pool, since the objects are picklable. - pool = multiprocessing.pool.ThreadPool(ncpu) + pool = multiprocessing.pool.ThreadPool(njobs) pool.map(_single_compile, objects) pool.close() pool.join() @@ -390,7 +389,7 @@ def _single_compile(obj): return objects -def fix_compiler(compiler, parallel): +def fix_compiler(compiler, njobs): # Remove any -Wstrict-prototypes in the compiler flags (since invalid for C++) try: compiler.compiler_so.remove("-Wstrict-prototypes") @@ -418,17 +417,8 @@ def fix_compiler(compiler, parallel): print('Using ccache') compiler.set_executable('compiler_so', ['ccache',cc] + cflags) - if parallel is None or parallel is True: - ncpu = cpu_count() - elif parallel: # is an integer - ncpu = parallel - else: - ncpu = 1 - if ncpu > 1: - print('Using %d cpus for compiling'%ncpu) - if parallel is None: - print('To override, you may do python setup.py build -jN') - compiler.compile = types.MethodType(parallel_compile, compiler) + if njobs > 1: + compiler.compile = types.MethodType(parallel_compile, compiler, njobs) extra_cflags = copt[comp_type] @@ -503,12 +493,10 @@ def finalize_options(self): # Add any extra things based on the compiler being used.. def build_libraries(self, libraries): - # They didn't put the parallel option into build_clib like they did with build_ext, so - # look for the parallel option there instead. build_ext = self.distribution.get_command_obj('build_ext') - parallel = getattr(build_ext, 'parallel', True) + njobs = getattr(build_ext, 'njobs', None) - cflags = fix_compiler(self.compiler, parallel) + cflags = fix_compiler(self.compiler, njobs) # Add the appropriate extra flags for that compiler. for (lib_name, build_info) in libraries: @@ -520,6 +508,10 @@ def build_libraries(self, libraries): # Make a subclass of build_ext so we can add to the -I list. class my_build_ext(build_ext): + def initialize_options(self): + build_ext.initialize_options(self) + self.njobs = None + def finalize_options(self): build_ext.finalize_options(self) add_dirs(self) @@ -527,11 +519,19 @@ def finalize_options(self): # Add any extra things based on the compiler being used.. def build_extensions(self): - # The -jN option was new in distutils version 3.5. - # If user has older version, just set parallel to True and move on. - parallel = getattr(self, 'parallel', True) - - cflags = fix_compiler(self.compiler, parallel) + if self.njobs is None: + njobs = cpu_count() + if njobs > 4: + # Usually 4 is plenty. Testing with too many jobs tends to lead to + # memory and timeout errors. The user can bump this up if they want. + njobs = 4 + print('Using %d cpus for compiling'%njobs) + print('To override, you may do python setup.py install -jN') + else: + njobs = int(self.njobs) + print('Using %d cpus for compiling'%njobs) + + cflags = fix_compiler(self.compiler, njobs) # Add the appropriate extra flags for that compiler. for e in self.extensions: From 990b6436a63eaaa18157dbd979121753372725c9 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Wed, 28 Feb 2018 15:54:14 -0500 Subject: [PATCH 098/111] Fix -jN option to control number of jobs for compiling (#809-pybind11) --- setup.py | 120 ++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 75 insertions(+), 45 deletions(-) diff --git a/setup.py b/setup.py index 4646c929603..c06b6199349 100644 --- a/setup.py +++ b/setup.py @@ -354,7 +354,7 @@ def cpu_count(): return ncpus return 1 # Default -def parallel_compile(self, sources, njobs, output_dir=None, macros=None, +def parallel_compile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): """New compile function that we monkey patch into the existing compiler instance. @@ -374,13 +374,15 @@ def _single_compile(obj): return self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) - if njobs == 1: + # Set by fix_compiler + global glob_use_njobs + if glob_use_njobs == 1: # This is equivalent to regular compile function for obj in objects: _single_compile(obj) else: # Use ThreadPool, rather than Pool, since the objects are picklable. - pool = multiprocessing.pool.ThreadPool(njobs) + pool = multiprocessing.pool.ThreadPool(glob_use_njobs) pool.map(_single_compile, objects) pool.close() pool.join() @@ -418,7 +420,12 @@ def fix_compiler(compiler, njobs): compiler.set_executable('compiler_so', ['ccache',cc] + cflags) if njobs > 1: - compiler.compile = types.MethodType(parallel_compile, compiler, njobs) + # Global variable for tracking the number of jobs to use. + # We can't pass this to parallel compile, since the signature is fixed. + # So if using parallel compile, set this value to use within parallel compile. + global glob_use_njobs + glob_use_njobs = njobs + compiler.compile = types.MethodType(parallel_compile, compiler) extra_cflags = copt[comp_type] @@ -483,18 +490,52 @@ def add_dirs(builder, output=False): builder.include_dirs.append(try_dir) break +def parse_njobs(njobs, task=None, command=None, maxn=4): + """Helper function to parse njobs, which may be None (use ncpu) or an int. + Returns an int value for njobs + """ + if njobs is None: + njobs = cpu_count() + if maxn != None and njobs > maxn: + # Usually 4 is plenty. Testing with too many jobs tends to lead to + # memory and timeout errors. The user can bump this up if they want. + njobs = maxn + if task is not None: + if njobs == 1: + print('Using a single process for %s.'%task) + else: + print('Using %d cpus for %s'%(njobs,task)) + print('To override, you may do python setup.py %s -jN'%command) + else: + njobs = int(njobs) + if task is not None: + if njobs == 1: + print('Using a single process for %s.'%task) + else: + print('Using %d cpus for %s'%(njobs,task)) + return njobs + # Make a subclass of build_ext so we can add to the -I list. class my_build_clib(build_clib): + user_options = build_ext.user_options + [('njobs=', 'j', "Number of jobs to use for compiling")] + + def initialize_options(self): + build_clib.initialize_options(self) + self.njobs = None + def finalize_options(self): build_clib.finalize_options(self) + if self.njobs is None and 'glob_njobs' in globals(): + global glob_njobs + self.njobs = glob_njobs add_dirs(self, output=True) # This happens first, so only output for this call. # Add any extra things based on the compiler being used.. def build_libraries(self, libraries): build_ext = self.distribution.get_command_obj('build_ext') - njobs = getattr(build_ext, 'njobs', None) + njobs = parse_njobs(self.njobs, 'compiling', 'install') cflags = fix_compiler(self.compiler, njobs) @@ -508,29 +549,25 @@ def build_libraries(self, libraries): # Make a subclass of build_ext so we can add to the -I list. class my_build_ext(build_ext): + user_options = build_ext.user_options + [('njobs=', 'j', "Number of jobs to use for compiling")] + def initialize_options(self): build_ext.initialize_options(self) self.njobs = None def finalize_options(self): build_ext.finalize_options(self) + # I couldn't find an easy way to send the user option from my_install to my_buld_ext. + # So use a global variable. (UGH!) + if self.njobs is None and 'glob_njobs' in globals(): + global glob_njobs + self.njobs = glob_njobs add_dirs(self) # Add any extra things based on the compiler being used.. def build_extensions(self): - if self.njobs is None: - njobs = cpu_count() - if njobs > 4: - # Usually 4 is plenty. Testing with too many jobs tends to lead to - # memory and timeout errors. The user can bump this up if they want. - njobs = 4 - print('Using %d cpus for compiling'%njobs) - print('To override, you may do python setup.py install -jN') - else: - njobs = int(self.njobs) - print('Using %d cpus for compiling'%njobs) - + njobs = parse_njobs(self.njobs, 'compiling', 'install') cflags = fix_compiler(self.compiler, njobs) # Add the appropriate extra flags for that compiler. @@ -564,6 +601,17 @@ def make_meta_data(install_dir): return meta_data_file class my_install(install): + user_options = install.user_options + [('njobs=', 'j', "Number of jobs to use for compiling")] + + def initialize_options(self): + install.initialize_options(self) + self.njobs = None + + def finalize_options(self): + install.finalize_options(self) + global glob_njobs + glob_njobs = self.njobs + def run(self): # Make the meta_data.py file based on the actual installation directory. meta_data_file = make_meta_data(self.install_lib) @@ -588,8 +636,7 @@ def run(self): class my_test(test): # cf. https://pytest.readthedocs.io/en/2.7.3/goodpractises.html - user_options = [('pytest-args=', 'a', "Arguments to pass to py.test"), - ('njobs=', 'j', "Number of jobs to use in py.test")] + user_options = [('njobs=', 'j', "Number of jobs to use in py.test")] def initialize_options(self): test.initialize_options(self) @@ -643,52 +690,35 @@ def run_cpp_tests(self): p.communicate() for line in lines: print(line.decode().strip()) - if p.returncode == 0: - print("All C++ tests passed.") - else: + if p.returncode != 0: raise RuntimeError("C++ tests failed") + print("All C++ tests passed.") def run_tests(self): # Build and run the C++ tests self.run_cpp_tests() - if self.pytest_args is None: - if self.njobs is None: - self.njobs = cpu_count() - if self.njobs > 4: - # Usually 4 is plenty. Testing with too many jobs tends to lead to - # memory and timeout errors. The user can bump this up if they want. - self.njobs = 4 - else: - self.njobs = int(self.njobs) - print('Using %d processes for pytest.'%self.njobs) - print('To change this use python setup.py test -jN') - self.pytest_args = ['-n=%d'%self.njobs, '--timeout=60'] - else: - self.pytest_args = self.pytest_args.split() + njobs = parse_njobs(self.njobs, 'pytest', 'test') + pytest_args = ['-n=%d'%njobs, '--timeout=60'] - #print('Using pytest args: ',self.pytest_args,' (can update with -a pytest_args)') original_dir = os.getcwd() os.chdir('tests') test_files = glob.glob('test*.py') if True: import pytest - errno = pytest.main(self.pytest_args + test_files) - errno = 0 + errno = pytest.main(pytest_args + test_files) if errno != 0: - sys.exit(errno) + raise RuntimeError("Some Python tests failed") else: # Alternate method calls pytest executable. But the above code seems to work. - p = subprocess.Popen(['pytest'] + self.pytest_args + test_files) + p = subprocess.Popen(['pytest'] + pytest_args + test_files) p.communicate() - if p.returncode == 0: - print("All python tests passed.") - else: + if p.returncode != 0: raise RuntimeError("Some Python tests failed") - os.chdir(original_dir) + print("All python tests passed.") lib=("galsim", {'sources' : cpp_sources, From 956344534f44179d1a32a41f2422ccfb95cb9079 Mon Sep 17 00:00:00 2001 From: Josh Meyers Date: Wed, 28 Feb 2018 16:04:38 -0800 Subject: [PATCH 099/111] Prefer libc++ for clang compiling and linking --- setup.py | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/setup.py b/setup.py index c06b6199349..f27a412e246 100644 --- a/setup.py +++ b/setup.py @@ -54,7 +54,8 @@ def all_files_from(dir, ext=''): copt = { 'gcc' : ['-O2','-msse2','-std=c++11','-fvisibility=hidden'], 'icc' : ['-O2','-msse2','-vec-report0','-std=c++11'], - 'clang' : ['-O2','-msse2','-std=c++11','-Wno-shorten-64-to-32','-fvisibility=hidden'], + 'clang' : ['-O2','-msse2','-std=c++11','-Wno-shorten-64-to-32','-fvisibility=hidden', + '-stdlib=libc++'], 'unknown' : [], } @@ -398,6 +399,10 @@ def fix_compiler(compiler, njobs): except (AttributeError, ValueError): pass + # Remove ccache if present so it isn't interpretted as the compiler + if compiler.compiler_so[0] == 'ccache': + del compiler.compiler_so[0] + # Figure out what compiler it will use #print('compiler = ',compiler.compiler) cc = compiler.compiler_so[0] @@ -431,9 +436,13 @@ def fix_compiler(compiler, njobs): success = try_cpp11(cc, cflags + extra_cflags) if not success: - # Sometimes clang requires an extra flag to use c++11 properly - extra_cflags += ['-stdlib=libc++'] - success = try_cpp11(cc, cflags + extra_cflags) + # In case libc++ doesn't work, try letting the system use the default stdlib + try: + extra_cflags.remove('-stdlib=libc++') + except (AttributeError, ValueError): + pass + else: + success = try_cpp11(cc, cflags + extra_cflags) if not success: print('The compiler %s with flags %s did not successfully compile C++11 code'% (cc, ' '.join(extra_cflags))) @@ -573,6 +582,9 @@ def build_extensions(self): # Add the appropriate extra flags for that compiler. for e in self.extensions: e.extra_compile_args = cflags + for flag in cflags: + if 'stdlib' in flag: + e.extra_link_args.append(flag) # Now run the normal build function. build_ext.build_extensions(self) @@ -664,6 +676,9 @@ def run_cpp_tests(self): objects.extend(ext.extra_objects) extra_args = ext.extra_link_args or [] + cflags = fix_compiler(compiler, False) + extra_args.extend(cflags) + libraries = builder.get_libraries(ext) libraries.append('galsim') @@ -843,4 +858,3 @@ def run_tests(self): print(' Alternatively, you can specify a different prefix with --prefix=PREFIX,') print(' in which case the scripts will be installed in PREFIX/bin.') print(' If you are installing via pip use --install-option="--prefix=PREFIX"') - From b7832373c06f412292669b15872a57cda31e6d68 Mon Sep 17 00:00:00 2001 From: Josh Meyers Date: Wed, 14 Mar 2018 14:40:34 -0700 Subject: [PATCH 100/111] typos --- INSTALL.md | 56 +++++++++++++++++++++++++++--------------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index 2da0f06afb3..94d414cbfdc 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -24,7 +24,7 @@ Table of Contents: ================== GalSim is a python module that has much of its implementation in C++ for -improved computational efficiency. GalSim supports both Python 2 and +improved computational efficiency. GalSim supports both Python 2 and Python 3. It is regularly tested on Python versions (2.7, 3.4, 3.5, 3.6). The usual way to install GalSim is now (starting with version 2.0) simply @@ -44,8 +44,8 @@ you can do python setup.py install -(again possibly with either sudo or --user). This sometimes does not properly -install all of the dependencies properly, so you might need to first run +(again possibly with either sudo or --user). This sometimes does not install +all of the dependencies properly, so you might need to first run pip install -r requirements.txt @@ -53,7 +53,7 @@ Either of these installation methods should handle most of the required dependencies for you if you do not have them already installed on your machine. There is one exception, however. FFTW is not directly pip installable, so if the above installation fails, you may need to install it separately. See -sections 2 below for more details about how to do this. +section 2 below for more details about how to do this. The other dependencies should all be installed by pip, but we list them here for completeness along with versions that are known to work. In most cases, @@ -111,12 +111,12 @@ with the --prefix flag to configure. E.g. ./configure --enable-shared --prefix=$HOME which will install the library into $HOME/lib and the header file into -$HOME/include. In this case, leave of the sudo from the last line. -Also, you should make sure these directories are in your LD_LIBRARY_PATh +$HOME/include. In this case, leave off the sudo from the last line. +Also, you should make sure these directories are in your LD_LIBRARY_PATH and C_INCLUDE_PATH environment variables, respectively. Alternatively, if you do not want to modify your LD_LIBRARY_PATH and/or -C_INCLUDE_PATH, you can instead set an environment variabe to tell GalSim +C_INCLUDE_PATH, you can instead set an environment variable to tell GalSim where the files are export FFTW_DIR=/path/to/fftw/prefix @@ -139,7 +139,7 @@ any extra work on your part. If it is in a non-standard location, and you do not want to add this path to your LD_LIBRARY_PATH (or you are on a modern Mac that hides such system -variable from setup.py), then you can instead set the FFTW_DIR environment +variables from setup.py), then you can instead set the FFTW_DIR environment variable to tell GalSim where to look export FFTW_DIR=/some/path/to/fftw @@ -163,10 +163,10 @@ iii) Using conda If you use conda, FFTW can be install with conda install fftw - + This will put it into the anaconda/lib directory on your system (within your active environment if appropriate). GalSim knows to look here, so there is -nothing dditional you need to do. +nothing additional you need to do. iv) Using apt-get @@ -187,7 +187,7 @@ If you use fink on a Mac, FFTW can be installed with (Make sure to use fftw3, not fftw, since fftw is version 2.) This will put it into the /sw/lib directory on your system. GalSim knows to -look here, so there is nothing dditional you need to do. +look here, so there is nothing additional you need to do. vi) Using MacPorts @@ -197,15 +197,15 @@ If you use MacPorts, FFTW can be installed with port install fftw-3 -This will put it into the /opt/loca/lib directory on your system. GalSim knows -to look here, so there is nothing dditional you need to do. +This will put it into the /opt/local/lib directory on your system. GalSim knows +to look here, so there is nothing additional you need to do. 3. Installing Eigen =================== GalSim uses Eigen for the C++-layer linear algebra calculations. It is a -header-only library, which means that nothing needs to be compiled to use them. +header-only library, which means that nothing needs to be compiled to use it. You can download the header files yourself, but if you do not, then we use the pip-installable eigency module, which bundles the header files in their installed python directory. So usually, this dependency should require no @@ -217,9 +217,9 @@ bundled with eigency. (Eigen 3.2.8 is bundled with eigency 1.77.) Therefore, this section describes several options for how to obtain and install Eigen. We require Eigen version >= 3.0. Most tests have been done with Eigen 3.2.8 -or 3.3.4, but we have also 3.0.4, so probably any 3.x version will work. -However, if you have trouble with another version, try upgrading to 3.2.8 or -later. +or 3.3.4, but we have also tested on 3.0.4, so probably any 3.x version will +work. However, if you have trouble with another version, try upgrading to +3.2.8 or later. Note: Prior to version 2.0, GalSim used TMV for the linear algebra back end. This is still an option if you prefer (e.g. it may be faster for some use @@ -250,7 +250,7 @@ as $HOME/include instead and leave off the sudo from the cp command. In this case, make sure this directory is in your C_INCLUDE_PATH environment variable. Finally, you can also skip the last command above and instead set EIGEN_DIR -as an environment variabe to tell GalSim where the files are +as an environment variable to tell GalSim where the files are export EIGEN_DIR=/some/path/to/eigen @@ -295,10 +295,10 @@ iii) Using conda If you use conda, Eigen can be install with conda install eigen - + This will put it into the anaconda/include directory on your system (within your active environment if appropriate). GalSim knows to look here, so there -is nothing dditional you need to do. +is nothing additional you need to do. iv) Using apt-get @@ -317,7 +317,7 @@ If you use fink on a Mac, Eigen can be installed with fink install eigen This will put it into the /sw/include directory on your system. GalSim knows -to look here, so there is nothing dditional you need to do. +to look here, so there is nothing additional you need to do. vi) Using MacPorts @@ -328,14 +328,14 @@ If you use MacPorts, Eigen can be installed with port install eigen This will put it into the /opt/local/include directory on your system. GalSim -knows to look here, so there is nothing dditional you need to do. +knows to look here, so there is nothing additional you need to do. vi) Using eigency ----------------- Eigency is a pip-installable module that bundles the Eigen header files, so it -can also be used to install these files on your system. Indeed, as mentioned +can also be used to install these files on your system. Indeed, as mentioned above, we will use eigency automatically if Eigen is not found in one of the above locations. So the above installations will take precendence, but eigency should work as a fall-back. @@ -410,20 +410,20 @@ You can run our test suite by typing This should run all the python-layer tests with pytest and also compile and run the C++ test suite. -By default, the python tests will use the pytest plugins `pytest-xdist` (for -running tests in parallel) and `pytest-timeout` (to manage how much time each +By default, the python tests will use the pytest plugins `pytest-xdist` (for +running tests in parallel) and `pytest-timeout` (to manage how much time each test is allowed to run). These plugins are usually installable using pip: pip install pytest-xdist pytest-timeout - -Sometimes the `--user` flag may be needed in the above command to make the + +Sometimes the `--user` flag may be needed in the above command to make the plugins discoverable. If you want to run the python tests without these plugins (serially!), you can still do this via python setup.py test -j1 Note: if your system does not have `pytest` installed, and you do not want to -installq it, you can run all the Python tests with the script run_all_tests in +install it, you can run all the Python tests with the script run_all_tests in the `tests` directory. If this finishes without an error, then all the tests have passed. However, note that this script runs more tests than our normal test run using pytest, so it may take quite a while to finish. (The *all* in From d95730f95ac5fcca95add662ef3df8e20f2a5727 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Thu, 15 Mar 2018 12:20:21 -0400 Subject: [PATCH 101/111] Fix boost version of PYBIND11_MODULE definition (#809-pybind11) --- pysrc/PyBind11Helper.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pysrc/PyBind11Helper.h b/pysrc/PyBind11Helper.h index 8c8d73fcbe8..0a2fa15cb59 100644 --- a/pysrc/PyBind11Helper.h +++ b/pysrc/PyBind11Helper.h @@ -36,7 +36,7 @@ namespace py = boost::python; // macros allow us to write code that works for either boost python or pybind11. // First some things where the boost equivalent of some pybind11 function is different: -#define PYBIND11_MODULE(x,x) BOOST_PYTHON_MODULE(x) +#define PYBIND11_MODULE(x,y) BOOST_PYTHON_MODULE(x) #define PY_MODULE py::scope #define PY_CAST py::extract #define PY_INIT(args...) "__init__", py::make_constructor(args, py::default_call_policies()) From 7f623059a575dfb50c8e30ae542d961f6108ad5b Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Thu, 15 Mar 2018 12:42:55 -0400 Subject: [PATCH 102/111] Apply Josh's suggested changes in INSTALL files (#809-pybind11) --- INSTALL.md | 92 ++++++++++++++++++++++++++++-------------------- INSTALL_SCONS.md | 34 +++++++++--------- 2 files changed, 70 insertions(+), 56 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index 94d414cbfdc..306df966e0f 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -5,23 +5,23 @@ System requirements: GalSim currently only supports Linux and Mac OSX. Table of Contents: -1) [Overall summary](#1-overall-summary) +1) [Overall summary](overall-summary) -2) [Installing FFTW](#2-installing-fftw) +2) [Installing FFTW](installing-fftw) -3) [Installing Eigen](#3-installing-eigen) +3) [Installing Eigen](installing-eigen) -4) [Using Conda](#4-using-conda) +4) [Using Conda](using-conda) -5) [Installing With SCons](#5-installing-with-scons) +5) [Installing With SCons](installing-with-scons) -6) [Running tests](#6-running-tests) +6) [Running tests](running-tests) -7) [Running example scripts](#7-running-example-scripts) +7) [Running example scripts](running-example-scripts) -1. Overall summary -================== +Overall summary +=============== GalSim is a python module that has much of its implementation in C++ for improved computational efficiency. GalSim supports both Python 2 and @@ -42,12 +42,18 @@ or $HOME/.local, depending on your system.) If you would rather install from source (e.g. to work on a development branch), you can do + git clone git@github.com:GalSim-developers/GalSim.git + cd GalSim + pip install -r requirements.txt python setup.py install -(again possibly with either sudo or --user). This sometimes does not install -all of the dependencies properly, so you might need to first run +(again possibly with either sudo or --user). - pip install -r requirements.txt + +If you use Anaconda Python, you can use that to install most of the +requirements with their conda installer. See [Using Conda](using-conda) +below. + Either of these installation methods should handle most of the required dependencies for you if you do not have them already installed on your machine. @@ -57,12 +63,12 @@ section 2 below for more details about how to do this. The other dependencies should all be installed by pip, but we list them here for completeness along with versions that are known to work. In most cases, -other recent versions will also work: +other recent (especially later) versions will also work: -- Eigen (3.2.8) (via eigency 1.77) -- NumPy (1.14.0) +- Eigen (3.2.5) +- NumPy (1.13.1) - Future (0.16.0) -- Astropy (2.0.3) +- Astropy (2.0.1) - PyBind11 (2.2.1) - LSSTDESC.Coord (1.0.5) @@ -77,8 +83,8 @@ pip uses to determine what else to install. But if you install with - Pandas (0.20) (Faster reading of ASCII input files) -2. Installing FFTW -================== +Installing FFTW +=============== GalSim uses FFTW (The Fastest Fourier Transform in the West) for performing fast fourier transforms. @@ -86,7 +92,7 @@ fast fourier transforms. We require FFTW version >= 3.0. Most tests have been done with FFTW 3.3.7, so if you have trouble with an earlier version, try upgrading to 3.3.7 or later. - +o i) Installing it yourself ------------------------- @@ -125,8 +131,8 @@ E.g. in the above case where prefix is $HOME, you would do export FFTW_DIR=$HOME -Probably, you should put this into your .bash_profile file so it always gets -set when you log in. +Probably, you should put this into your shell login file (e.g. .bash_profile) +so it always gets set when you log in. ii) Using an existing installation @@ -201,8 +207,8 @@ This will put it into the /opt/local/lib directory on your system. GalSim knows to look here, so there is nothing additional you need to do. -3. Installing Eigen -=================== +Installing Eigen +================ GalSim uses Eigen for the C++-layer linear algebra calculations. It is a header-only library, which means that nothing needs to be compiled to use it. @@ -352,8 +358,8 @@ installed. But for now, you need to do (in that order) for it to work. -4. Using Conda -============== +Using Conda +=========== If you use conda (normally via the Anaconda Python distribution), then all of the prerequisites are available from the conda-forge channel, so you can use @@ -383,8 +389,8 @@ defaults, then that should still work and pybind11 will be the only one that will need the conda-forge channel. -5. Installing With SCons -======================== +Installing With SCons +===================== Prior to version 2.0, GalSim installation used SCons. This installation mode is still supported, but is not recommended unless you have difficulties @@ -400,8 +406,8 @@ See the file INSTALL_SCONS.md for complete details about this method of installation. -6. Running tests -================ +Running tests +============= You can run our test suite by typing @@ -410,29 +416,37 @@ You can run our test suite by typing This should run all the python-layer tests with pytest and also compile and run the C++ test suite. -By default, the python tests will use the pytest plugins `pytest-xdist` (for -running tests in parallel) and `pytest-timeout` (to manage how much time each -test is allowed to run). These plugins are usually installable using pip: +There are a number of packages that are used by the tests, but which are not +required for GalSim installation and running. These should be installed +automatically by the above command, but you can install them manually via - pip install pytest-xdist pytest-timeout + pip install -r test_requirements.txt -Sometimes the `--user` flag may be needed in the above command to make the -plugins discoverable. If you want to run the python tests without these -plugins (serially!), you can still do this via +(As usually, you may need to add either `sudo` or `--user`.) + +By default, the tests will run in parallel using the pytest plugins +`pytest-xdist` and `pytest-timeout` (to manage how much time each test is +allowed to run). If you want to run the python tests in serial instead, +you can do this via python setup.py test -j1 -Note: if your system does not have `pytest` installed, and you do not want to +You can also use this to modify how many jobs will be spawned for running the +tests. + + +If your system does not have `pytest` installed, and you do not want to install it, you can run all the Python tests with the script run_all_tests in the `tests` directory. If this finishes without an error, then all the tests have passed. However, note that this script runs more tests than our normal test run using pytest, so it may take quite a while to finish. (The *all* in the file name means run all the tests including the slow ones that we normally skip.) + -7. Running example scripts -========================== +Running example scripts +======================= The `examples` directory has a series of demo scripts: diff --git a/INSTALL_SCONS.md b/INSTALL_SCONS.md index bfdb3a71521..e4462cd36a8 100644 --- a/INSTALL_SCONS.md +++ b/INSTALL_SCONS.md @@ -9,8 +9,8 @@ Please see the instructions in INSTALL.md first to see if that method will work for you. -1. Software required before building GalSim -=========================================== +Software required before building GalSim +======================================== Please note: Mac users who want to use fink can skip down to Section 5.ii and use that to satisfy all dependencies before installing. @@ -136,8 +136,8 @@ options to the ./bootstrap.sh installation script (defaults in `[]` brackets): it detected the wrong one] -2. Installing the GalSim Python package -======================================= +Installing the GalSim Python package +==================================== Once you have installed all the dependencies described above, you are ready to build GalSim. From the GalSim base directory (in which this file is found) type @@ -169,7 +169,7 @@ need to set are: * `TMV_DIR`: Explicitly give the TMV prefix * `EIGEN_DIR`: Explicitly give the Eigen prefix -# `USE_BOOST`: Specify that you want to use Boost rather than PyBind11. +* `USE_BOOST`: Specify that you want to use Boost rather than PyBind11. * `BOOST_DIR`: Explicitly give the Boost prefix * `EXTRA_LIBS`: Additional libraries to send to the linker @@ -274,8 +274,8 @@ explaining what your particular problem is, and hopefully someone can help you figure out a solution. -3. Running tests and installing example executables -=================================================== +Running tests and installing example executables +================================================ You can run our test suite by typing @@ -292,13 +292,13 @@ above (see also https://docs.pytest.org/en/latest/). Many third party- maintained Python distributions, such as the Enthought Python Distribution, include `pytest`. -By default, the python tests will use the pytest plugins `pytest-xdist` (for -running tests in parallel) and `pytest-timeout` (to manage how much time each +By default, the python tests will use the pytest plugins `pytest-xdist` (for +running tests in parallel) and `pytest-timeout` (to manage how much time each test is allowed to run). These plugins are usually installable using pip: pip install pytest-xdist pytest-timeout - -Sometimes the `--user` flag may be needed in the above command to make the + +Sometimes the `--user` flag may be needed in the above command to make the plugins discoverable. If you want to run the python tests without these plugins (serially!), you can still do this via @@ -310,8 +310,8 @@ the `tests` directory. If this finishes without an error, then all the tests have passed. -4. Running example scripts -========================== +Running example scripts +======================= The `examples` directory has a series of demo scripts: @@ -349,8 +349,8 @@ Some of them access files in subdirectories of the `examples` directory, so they would not work correctly from other locations. -5. Platform-specific notes -========================== +Platform-specific notes +======================= i) Linux -------- @@ -491,8 +491,8 @@ Karen Ng has created a Docker file for containerizing GalSim. See her repo: for instructions about how to either use her image or create your own. -6. More SCons options -===================== +More SCons options +================== Here is a fairly complete list of the options you can pass to SCons to control the build process. The options are listed with their default value. You change From 06630610233953080ab0fdb2e915174d78317d3d Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Thu, 15 Mar 2018 12:58:42 -0400 Subject: [PATCH 103/111] Fix links (#809-pybind11) --- INSTALL.md | 78 +++++++++++++++++++++++++++++++----------------------- 1 file changed, 45 insertions(+), 33 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index 306df966e0f..6395cc56298 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -5,19 +5,32 @@ System requirements: GalSim currently only supports Linux and Mac OSX. Table of Contents: -1) [Overall summary](overall-summary) +1) [Overall summary](#overall-summary) -2) [Installing FFTW](installing-fftw) +2) [Installing FFTW](#installing-fftw) +* [Installing FFTW yourself](#i-installing-fftw-yourself) +* [Using an existing installation](#ii-using-an-existing-installation-of-fftw) +* [Using conda](#iii-installing-fftw-with-conda) +* [Using apt-get](#iv-installing-fftw-with-apt-get) +* [Using fink](#v-installing-fftw-with-fink) +* [Using MacPorts](#vi-installing-fftw-with-macports) -3) [Installing Eigen](installing-eigen) +3) [Installing Eigen](#installing-eigen) +* [Installing Eigen yourself](#i-installing-eigen-yourself) +* [Using an existing installation](#ii-using-an-existing-installation-of-eigen) +* [Using conda](#iii-installing-eigen-with-conda) +* [Using apt-get](#iv-installing-eigen-with-apt-get) +* [Using fink](#v-installing-eigen-with-fink) +* [Using MacPorts](#vi-installing-eigen-with-macports) +* [Using eigency](#vii-using-eigency) -4) [Using Conda](using-conda) +4) [Using Conda](#using-conda) -5) [Installing With SCons](installing-with-scons) +5) [Installing With SCons](#installing-with-scons) -6) [Running tests](running-tests) +6) [Running tests](#running-tests) -7) [Running example scripts](running-example-scripts) +7) [Running example scripts](#running-example-scripts) Overall summary @@ -92,8 +105,7 @@ fast fourier transforms. We require FFTW version >= 3.0. Most tests have been done with FFTW 3.3.7, so if you have trouble with an earlier version, try upgrading to 3.3.7 or later. -o -i) Installing it yourself +i) Installing FFTW yourself ------------------------- FFTW is available at the URL @@ -135,8 +147,8 @@ Probably, you should put this into your shell login file (e.g. .bash_profile) so it always gets set when you log in. -ii) Using an existing installation ----------------------------------- +ii) Using an existing installation of FFTW +------------------------------------------ If FFTW is already installed on your system, there may be nothing to do. If it is in a standard location like /usr/local/lib or in some other @@ -163,8 +175,8 @@ can be used to specify which version you want GalSim to use as this will be the first location it will check during the installation process. -iii) Using conda ----------------- +iii) Installing FFTW with conda +------------------------------- If you use conda, FFTW can be install with @@ -175,16 +187,16 @@ active environment if appropriate). GalSim knows to look here, so there is nothing additional you need to do. -iv) Using apt-get ------------------ +iv) Installing FFTW with apt-get +-------------------------------- On Linux machines that use apt-get, FFTW can be installed with apt-get install libfftw3-dev -v) Using fink -------------- +v) Installing FFTW with fink +---------------------------- If you use fink on a Mac, FFTW can be installed with @@ -196,8 +208,8 @@ This will put it into the /sw/lib directory on your system. GalSim knows to look here, so there is nothing additional you need to do. -vi) Using MacPorts ------------------- +vi) Installing FFTW with MacPorts +--------------------------------- If you use MacPorts, FFTW can be installed with @@ -233,8 +245,8 @@ cases, since it can use an optimized BLAS library on your system), but to use TMV, you need to use the SCons installation option described below. -i) Installing it yourself -------------------------- +i) Installing Eigen yourself +---------------------------- Eigen is available at the URL @@ -268,8 +280,8 @@ Probably, you should put this into your .bash_profile file so it always gets set when you log in. -ii) Using an existing installation ----------------------------------- +ii) Using an existing installation of Eigen +------------------------------------------- If Eigen is already installed on your system, there may be nothing to do. If it is in a standard location like /usr/local/include or in some other @@ -295,8 +307,8 @@ can be used to specify which version you want GalSim to use as this will be the first location it will check during the installation process. -iii) Using conda ----------------- +iii) Installing Eigen with conda +-------------------------------- If you use conda, Eigen can be install with @@ -307,16 +319,16 @@ your active environment if appropriate). GalSim knows to look here, so there is nothing additional you need to do. -iv) Using apt-get ------------------ +iv) Installing Eigen with apt-get +--------------------------------- On Linux machines that use apt-get, Eigen can be installed with apt-get install libeigen3-dev -v) Using fink -------------- +v) Installing Eigen with fink +----------------------------- If you use fink on a Mac, Eigen can be installed with @@ -326,8 +338,8 @@ This will put it into the /sw/include directory on your system. GalSim knows to look here, so there is nothing additional you need to do. -vi) Using MacPorts ------------------- +vi) Installing Eigen with MacPorts +---------------------------------- If you use MacPorts, Eigen can be installed with @@ -337,8 +349,8 @@ This will put it into the /opt/local/include directory on your system. GalSim knows to look here, so there is nothing additional you need to do. -vi) Using eigency ------------------ +vii) Using eigency +------------------ Eigency is a pip-installable module that bundles the Eigen header files, so it can also be used to install these files on your system. Indeed, as mentioned From 0f6d23665df3633518c70cc80af01f42cace8053 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Thu, 15 Mar 2018 13:07:35 -0400 Subject: [PATCH 104/111] Try to fix aside spec (#809-pybind11) --- INSTALL.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index 6395cc56298..d7cd4bfd75f 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -62,7 +62,7 @@ you can do (again possibly with either sudo or --user). - +