diff --git a/01-Introduction_8md_source.html b/01-Introduction_8md_source.html index 99cf2a1e8..83add5df8 100644 --- a/01-Introduction_8md_source.html +++ b/01-Introduction_8md_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/02-ModelObject_8md_source.html b/02-ModelObject_8md_source.html index 54e5a2cb4..099be61b3 100644 --- a/02-ModelObject_8md_source.html +++ b/02-ModelObject_8md_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/03-Sparsity_8md_source.html b/03-Sparsity_8md_source.html index 8cbbf9dfd..79a8c3bdb 100644 --- a/03-Sparsity_8md_source.html +++ b/03-Sparsity_8md_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/04-CovStruct_8md_source.html b/04-CovStruct_8md_source.html index 7a338dad9..99ab08bcb 100644 --- a/04-CovStruct_8md_source.html +++ b/04-CovStruct_8md_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/05-Simulation_8md_source.html b/05-Simulation_8md_source.html index 4bf8d286c..ff1e44c26 100644 --- a/05-Simulation_8md_source.html +++ b/05-Simulation_8md_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/06-Validation_8md_source.html b/06-Validation_8md_source.html index 3229c3748..c9b510942 100644 --- a/06-Validation_8md_source.html +++ b/06-Validation_8md_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/07-AtomicFunctions_8md_source.html b/07-AtomicFunctions_8md_source.html index ba1ae9d30..967bf8592 100644 --- a/07-AtomicFunctions_8md_source.html +++ b/07-AtomicFunctions_8md_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/08-Parallelization_8md_source.html b/08-Parallelization_8md_source.html index e15c9a2d2..2642b5db7 100644 --- a/08-Parallelization_8md_source.html +++ b/08-Parallelization_8md_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/09-Appendix_8md_source.html b/09-Appendix_8md_source.html index da90a6baa..9b7c33773 100644 --- a/09-Appendix_8md_source.html +++ b/09-Appendix_8md_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/Appendix.html b/Appendix.html index 7329d9b1d..0ff69ba62 100644 --- a/Appendix.html +++ b/Appendix.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/AtomicFunctions.html b/AtomicFunctions.html index 2210b5de7..eb173e8f0 100644 --- a/AtomicFunctions.html +++ b/AtomicFunctions.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/Introduction.html b/Introduction.html index 6b4f15d7b..71760fc48 100644 --- a/Introduction.html +++ b/Introduction.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/ModelObject.html b/ModelObject.html index 30dc63497..7f9f12ad5 100644 --- a/ModelObject.html +++ b/ModelObject.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/R__inla_8hpp.html b/R__inla_8hpp.html index e89c22747..36aff5aea 100644 --- a/R__inla_8hpp.html +++ b/R__inla_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/R__inla_8hpp_source.html b/R__inla_8hpp_source.html index a648ac2e3..b43255d92 100644 --- a/R__inla_8hpp_source.html +++ b/R__inla_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/Rstream_8hpp_source.html b/Rstream_8hpp_source.html index cff71f0b4..e5931f7b4 100644 --- a/Rstream_8hpp_source.html +++ b/Rstream_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/Simulation.html b/Simulation.html index 7cd5abf5d..32666f8d4 100644 --- a/Simulation.html +++ b/Simulation.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/Sparsity.html b/Sparsity.html index 40be20a52..3d7ab9bdd 100644 --- a/Sparsity.html +++ b/Sparsity.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/TMB_2inst_2include_2tiny__ad_2gamma_2gamma_8cpp_source.html b/TMB_2inst_2include_2tiny__ad_2gamma_2gamma_8cpp_source.html index 0d41cadb6..57df60183 100644 --- a/TMB_2inst_2include_2tiny__ad_2gamma_2gamma_8cpp_source.html +++ b/TMB_2inst_2include_2tiny__ad_2gamma_2gamma_8cpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/TMB_8hpp.html b/TMB_8hpp.html index 96f28b28e..ae70ed77f 100644 --- a/TMB_8hpp.html +++ b/TMB_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/TMB_8hpp_source.html b/TMB_8hpp_source.html index 9a1d83f98..0369fb05c 100644 --- a/TMB_8hpp_source.html +++ b/TMB_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
@@ -73,11 +73,11 @@
TMB.hpp
-Go to the documentation of this file.
1 // Copyright (C) 2013-2015 Kasper Kristensen
2 // License: GPL-2
3 /* Utility: Compile time test for Type=double */
4 template<class Type>
5 struct isDouble{
6  enum{value=false};
7 };
8 template<>
9 struct isDouble<double>{
10  enum{value=true};
11 };
12 
16 /* To be removed */
17 #define TMB_DEBUG 0
18 #define TMB_PRINT(x)std::cout << #x << ": " << x << "\n"; std::cout.flush();
19 
20 /* Conditionally skip compilation */
21 #ifdef WITH_LIBTMB
22 #define CSKIP(...) ;
23 #define TMB_EXTERN extern
24 #else
25 #define CSKIP(...) __VA_ARGS__
26 #define TMB_EXTERN
27 #endif
28 #ifdef TMB_PRECOMPILE_ATOMICS
29 #define IF_TMB_PRECOMPILE_ATOMICS(...) __VA_ARGS__
30 #else
31 #define IF_TMB_PRECOMPILE_ATOMICS(...)
32 #endif
33 #ifdef HAVE_PRECOMPILED_ATOMICS
34 #define CSKIP_ATOMIC(...) ;
35 #else
36 #define CSKIP_ATOMIC(...) __VA_ARGS__
37 #endif
38 
39 /* Must come before Rinternals.h */
40 #ifdef _OPENMP
41 #include <omp.h>
42 #endif
43 
44 /* Early inclusion of Rprintf and REprintf */
45 #include <R_ext/Print.h>
46 #include "Rstream.hpp"
47 
48 /* Flag to bypass abort() */
49 #ifndef TMB_ABORT
50 #define TMB_ABORT abort()
51 #endif
52 
53 /* Include the Eigen library. */
54 #ifdef TMB_SAFEBOUNDS
55 #undef NDEBUG
56 #undef eigen_assert
57 void eigen_REprintf(const char* x);
58 #define eigen_assert(x) if (!(x)) { eigen_REprintf("TMB has received an error from Eigen. "); \
59  eigen_REprintf("The following condition was not met:\n"); \
60  eigen_REprintf(#x); \
61  eigen_REprintf("\nPlease check your matrix-vector bounds etc., "); \
62  eigen_REprintf("or run your program through a debugger.\n"); \
63  TMB_ABORT;}
64 #define TMBAD_ASSERT2(x,msg) \
65 if (!(x)) { \
66  Rcerr << "TMBad assertion failed.\n"; \
67  Rcerr << "The following condition was not met: " << #x << "\n"; \
68  Rcerr << "Possible reason: " msg << "\n"; \
69  Rcerr << "For more info run your program through a debugger.\n"; \
70  TMB_ABORT; \
71 }
72 #define TMBAD_ASSERT(x) TMBAD_ASSERT2(x,"Unknown")
73 #else
74 #undef NDEBUG
75 #define NDEBUG 1
76 #define TMBAD_ASSERT2(x,msg) (void) (x);
77 #define TMBAD_ASSERT(x) (void) (x);
78 #endif
79 /* Provide access to file 'DisableStupidWarnings.h' which has been
80  patched by RcppEigen to satisfy CRAN policy. This file may need
81  regular updating. The renaming is to aviod a CRAN note. */
82 #ifdef TMB_EIGEN_DISABLE_WARNINGS
83 #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
84 #define EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS 1
85 #endif
86 #include "EigenWarnings/DisableStupidWarnings"
87 #endif
88 #include <Eigen/Dense>
89 
90 // Default: Include Eigen/Sparse normally
91 #ifndef TMB_SPARSE_STORAGE_INDEX
92 #include <Eigen/Sparse>
93 #else
94 // Alternative: Include Eigen/Sparse with custom sparse matrix integer type
95 #define SparseMatrix SparseMatrix_rename
96 #include <Eigen/Sparse>
97 #undef SparseMatrix
98 namespace Eigen {
99 template<class T, int Flags = 0, class StorageIndex = TMB_SPARSE_STORAGE_INDEX>
100 using SparseMatrix = SparseMatrix_rename<T, Flags, StorageIndex>;
101 }
102 #endif
103 
104 /* Workaround side effect when -DEIGEN_USE_LAPACKE is set */
105 #undef I
106 
107 /* Select AD framework: TMBAD or CPPAD */
108 #ifndef CPPAD_FRAMEWORK
109 #ifndef TMBAD_FRAMEWORK
110 #define CPPAD_FRAMEWORK
111 #endif
112 #endif
113 
114 /* Include the CppAD library. (Always turn off debug for cppad) */
115 #undef NDEBUG
116 #define NDEBUG 1
117 #include "cppad/cppad.hpp"
118 #ifdef TMBAD_FRAMEWORK
119 #include "TMBad/TMBad.hpp"
120 #include "TMBad/tmbad_allow_comparison.hpp"
121 #include "TMBad/eigen_numtraits.hpp"
122 #undef error
123 #include "TMBad/compile.hpp"
124 #include "TMBad/graph2dot.hpp"
125 #include "TMBad/compression.hpp"
126 #include "TMBad/ad_blas.hpp"
127 #ifndef WITH_LIBTMB
128 #include "TMBad/TMBad.cpp"
129 #endif
130 #define error Rf_error
131 // Workaround to make CppAD::Integer working with TMBad
132 namespace CppAD {
133 int Integer(const TMBad::ad_aug &x) CSKIP ({ return (int) x.Value(); })
134 TMBad::ad_aug abs(const TMBad::ad_aug &x) CSKIP ({ return TMBad::fabs(x); })
135 #define TMBAD_CONDEXP(NAME) \
136 TMBad::ad_aug CondExp ## NAME( \
137  const TMBad::ad_aug &x0, \
138  const TMBad::ad_aug &x1, \
139  const TMBad::ad_aug &x2, \
140  const TMBad::ad_aug &x3) CSKIP ( { \
141  return TMBad::CondExp ## NAME(x0, x1, x2, x3); \
142 })
143 TMBAD_CONDEXP(Eq)
144 TMBAD_CONDEXP(Ne)
145 TMBAD_CONDEXP(Lt)
146 TMBAD_CONDEXP(Gt)
147 TMBAD_CONDEXP(Le)
148 TMBAD_CONDEXP(Ge)
149 #undef TMBAD_CONDEXP
150 bool Variable(const TMBad::ad_aug &x) CSKIP ({ return !x.constant(); })
151 }
152 // FIXME: Move to TMBad source?
153 namespace TMBad {
154  /* Add 'isfinite', 'isinf' and 'isnan' to TMBad */
155  using std::isfinite;
156  bool isfinite(const TMBad::ad_aug &x)CSKIP({ return isfinite(x.Value()); })
157  using std::isinf;
158  bool isinf(const TMBad::ad_aug &x)CSKIP({ return isinf(x.Value()); })
159  using std::isnan;
160  bool isnan(const TMBad::ad_aug &x)CSKIP({ return isnan(x.Value()); })
161 }
162 #endif
163 
164 /* Include the R library _after_ Eigen and CppAD. Otherwise, the R
165  macros can cause conflicts (as they do not respect the Eigen and
166  CppAD namespace limits). E.g., the 'length' macro conflicts with
167  CppAD when compiling with '-std=c++11'. */
168 #include <R.h>
169 #include <Rinternals.h>
170 #include "toggle_thread_safe_R.hpp"
171 void eigen_REprintf(const char* x)CSKIP({REprintf("%s",x);})
172 
173 #include "tmbutils/tmbutils.hpp"
174 #include "tmbutils/vectorize.hpp"
175 using tmbutils::matrix;
176 using tmbutils::vector;
177 using CppAD::AD;
178 using CppAD::ADFun;
179 namespace CppAD{
180  /* Add to CppAD so that 'Variable' works for any 'Type' */
181  bool Variable(double x)CSKIP({ return false; })
182  /* Add 'isfinite', 'isinf' and 'isnan' to CppAD */
183  using std::isfinite;
184  template <class T>
185  bool isfinite(const AD<T> &x)CSKIP({ return isfinite(Value(x)); })
186  using std::isinf;
187  template <class T>
188  bool isinf(const AD<T> &x)CSKIP({ return isinf(Value(x)); })
189  using std::isnan;
190  template <class T>
191  bool isnan(const AD<T> &x)CSKIP({ return isnan(Value(x)); })
192 }
193 #include "convert.hpp" // asSEXP, asMatrix, asVector
194 #include "config.hpp"
195 #include "tmbutils/getListElement.hpp"
196 #include "atomic_math.hpp"
197 #include "expm.hpp"
198 #include "atomic_convolve.hpp"
199 #include "tiny_ad/atomic.hpp"
200 #include "tiny_ad/integrate/integrate.hpp"
201 #include "dynamic_data.hpp" // Requires atomic namespace
202 #include "Vectorize.hpp"
203 #include "dnorm.hpp" // harmless
204 #include "lgamma.hpp" // harmless
205 #include "start_parallel.hpp"
206 #include "tmbutils/newton.hpp" // Newton solver + Laplace used by TransformADFunObject
207 #ifndef TMB_SKINNY
208 #include "tmb_core.hpp"
209 #endif
210 #include "distributions_R.hpp"
211 #include "convenience.hpp" // Requires besselK
212 #include "tmbutils/tmbutils_extra.hpp"
213 #include "tmbutils/R_inla.hpp"
214 #include "tmbutils/sparse_matrix_exponential.hpp"
215 #include "tmbutils/concat.hpp"
216 #include "precompile.hpp" // Must come last
217 using tmbutils::array;
218 using Eigen::Matrix;
219 using Eigen::Array;
220 
221 /* Cleanup */
222 
223 // Nothing more to precompile
224 #undef CSKIP
225 #define CSKIP(...) __VA_ARGS__
226 #undef CSKIP_ATOMIC
227 #define CSKIP_ATOMIC(...) __VA_ARGS__
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:153
+Go to the documentation of this file.
1 // Copyright (C) 2013-2015 Kasper Kristensen
2 // License: GPL-2
3 /* Utility: Compile time test for Type=double */
4 template<class Type>
5 struct isDouble{
6  enum{value=false};
7 };
8 template<>
9 struct isDouble<double>{
10  enum{value=true};
11 };
12 
16 /* To be removed */
17 #define TMB_DEBUG 0
18 #define TMB_PRINT(x)std::cout << #x << ": " << x << "\n"; std::cout.flush();
19 
20 /* Conditionally skip compilation */
21 #ifdef WITH_LIBTMB
22 #define CSKIP(...) ;
23 #define TMB_EXTERN extern
24 #else
25 #define CSKIP(...) __VA_ARGS__
26 #define TMB_EXTERN
27 #endif
28 #ifdef TMB_PRECOMPILE_ATOMICS
29 #define IF_TMB_PRECOMPILE_ATOMICS(...) __VA_ARGS__
30 #else
31 #define IF_TMB_PRECOMPILE_ATOMICS(...)
32 #endif
33 #ifdef HAVE_PRECOMPILED_ATOMICS
34 #define CSKIP_ATOMIC(...) ;
35 #else
36 #define CSKIP_ATOMIC(...) __VA_ARGS__
37 #endif
38 
39 /* Must come before Rinternals.h */
40 #ifdef _OPENMP
41 #include <omp.h>
42 #endif
43 
44 /* Early inclusion of Rprintf and REprintf */
45 #include <R_ext/Print.h>
46 #include "Rstream.hpp"
47 
48 /* Flag to bypass abort() */
49 #ifndef TMB_ABORT
50 #define TMB_ABORT abort()
51 #endif
52 
53 /* Include the Eigen library. */
54 #ifdef TMB_SAFEBOUNDS
55 #undef NDEBUG
56 #undef eigen_assert
57 void eigen_REprintf(const char* x);
58 #define eigen_assert(x) if (!(x)) { eigen_REprintf("TMB has received an error from Eigen. "); \
59  eigen_REprintf("The following condition was not met:\n"); \
60  eigen_REprintf(#x); \
61  eigen_REprintf("\nPlease check your matrix-vector bounds etc., "); \
62  eigen_REprintf("or run your program through a debugger.\n"); \
63  TMB_ABORT;}
64 #define TMBAD_ASSERT2(x,msg) \
65 if (!(x)) { \
66  Rcerr << "TMBad assertion failed.\n"; \
67  Rcerr << "The following condition was not met: " << #x << "\n"; \
68  Rcerr << "Possible reason: " msg << "\n"; \
69  Rcerr << "For more info run your program through a debugger.\n"; \
70  TMB_ABORT; \
71 }
72 #define TMBAD_ASSERT(x) TMBAD_ASSERT2(x,"Unknown")
73 #else
74 #undef NDEBUG
75 #define NDEBUG 1
76 #define TMBAD_ASSERT2(x,msg) (void) (x);
77 #define TMBAD_ASSERT(x) (void) (x);
78 #endif
79 /* Provide access to file 'DisableStupidWarnings.h' which has been
80  patched by RcppEigen to satisfy CRAN policy. This file may need
81  regular updating. The renaming is to aviod a CRAN note. */
82 #ifdef TMB_EIGEN_DISABLE_WARNINGS
83 #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
84 #define EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS 1
85 #endif
86 #include "EigenWarnings/DisableStupidWarnings"
87 #endif
88 /* We cannot use Eigen's parallel matrix multiply for AD types (GH390). */
89 #ifndef EIGEN_DONT_PARALLELIZE
90 #define EIGEN_DONT_PARALLELIZE
91 #endif
92 #include <Eigen/Dense>
93 
94 // Default: Include Eigen/Sparse normally
95 #ifndef TMB_SPARSE_STORAGE_INDEX
96 #include <Eigen/Sparse>
97 #else
98 // Alternative: Include Eigen/Sparse with custom sparse matrix integer type
99 #define SparseMatrix SparseMatrix_rename
100 #include <Eigen/Sparse>
101 #undef SparseMatrix
102 namespace Eigen {
103 template<class T, int Flags = 0, class StorageIndex = TMB_SPARSE_STORAGE_INDEX>
104 using SparseMatrix = SparseMatrix_rename<T, Flags, StorageIndex>;
105 }
106 #endif
107 
108 /* Workaround side effect when -DEIGEN_USE_LAPACKE is set */
109 #undef I
110 
111 /* Select AD framework: TMBAD or CPPAD */
112 #ifndef CPPAD_FRAMEWORK
113 #ifndef TMBAD_FRAMEWORK
114 #define CPPAD_FRAMEWORK
115 #endif
116 #endif
117 
118 /* Include the CppAD library. (Always turn off debug for cppad) */
119 #undef NDEBUG
120 #define NDEBUG 1
121 #include "cppad/cppad.hpp"
122 #ifdef TMBAD_FRAMEWORK
123 #include "TMBad/TMBad.hpp"
124 #include "TMBad/tmbad_allow_comparison.hpp"
125 #include "TMBad/eigen_numtraits.hpp"
126 #undef error
127 #include "TMBad/compile.hpp"
128 #include "TMBad/graph2dot.hpp"
129 #include "TMBad/compression.hpp"
130 #include "TMBad/ad_blas.hpp"
131 #ifndef WITH_LIBTMB
132 #include "TMBad/TMBad.cpp"
133 #endif
134 #define error Rf_error
135 // Workaround to make CppAD::Integer working with TMBad
136 namespace CppAD {
137 int Integer(const TMBad::ad_aug &x) CSKIP ({ return (int) x.Value(); })
138 TMBad::ad_aug abs(const TMBad::ad_aug &x) CSKIP ({ return TMBad::fabs(x); })
139 #define TMBAD_CONDEXP(NAME) \
140 TMBad::ad_aug CondExp ## NAME( \
141  const TMBad::ad_aug &x0, \
142  const TMBad::ad_aug &x1, \
143  const TMBad::ad_aug &x2, \
144  const TMBad::ad_aug &x3) CSKIP ( { \
145  return TMBad::CondExp ## NAME(x0, x1, x2, x3); \
146 })
147 TMBAD_CONDEXP(Eq)
148 TMBAD_CONDEXP(Ne)
149 TMBAD_CONDEXP(Lt)
150 TMBAD_CONDEXP(Gt)
151 TMBAD_CONDEXP(Le)
152 TMBAD_CONDEXP(Ge)
153 #undef TMBAD_CONDEXP
154 bool Variable(const TMBad::ad_aug &x) CSKIP ({ return !x.constant(); })
155 }
156 // FIXME: Move to TMBad source?
157 namespace TMBad {
158  /* Add 'isfinite', 'isinf' and 'isnan' to TMBad */
159  using std::isfinite;
160  bool isfinite(const TMBad::ad_aug &x)CSKIP({ return isfinite(x.Value()); })
161  using std::isinf;
162  bool isinf(const TMBad::ad_aug &x)CSKIP({ return isinf(x.Value()); })
163  using std::isnan;
164  bool isnan(const TMBad::ad_aug &x)CSKIP({ return isnan(x.Value()); })
165 }
166 #endif
167 
168 /* Include the R library _after_ Eigen and CppAD. Otherwise, the R
169  macros can cause conflicts (as they do not respect the Eigen and
170  CppAD namespace limits). E.g., the 'length' macro conflicts with
171  CppAD when compiling with '-std=c++11'. */
172 #include <R.h>
173 #include <Rinternals.h>
174 #include "toggle_thread_safe_R.hpp"
175 void eigen_REprintf(const char* x)CSKIP({REprintf("%s",x);})
176 
177 #include "tmbutils/tmbutils.hpp"
178 #include "tmbutils/vectorize.hpp"
179 using tmbutils::matrix;
180 using tmbutils::vector;
181 using CppAD::AD;
182 using CppAD::ADFun;
183 namespace CppAD{
184  /* Add to CppAD so that 'Variable' works for any 'Type' */
185  bool Variable(double x)CSKIP({ return false; })
186  /* Add 'isfinite', 'isinf' and 'isnan' to CppAD */
187  using std::isfinite;
188  template <class T>
189  bool isfinite(const AD<T> &x)CSKIP({ return isfinite(Value(x)); })
190  using std::isinf;
191  template <class T>
192  bool isinf(const AD<T> &x)CSKIP({ return isinf(Value(x)); })
193  using std::isnan;
194  template <class T>
195  bool isnan(const AD<T> &x)CSKIP({ return isnan(Value(x)); })
196 }
197 #include "convert.hpp" // asSEXP, asMatrix, asVector
198 #include "config.hpp"
199 #include "tmbutils/getListElement.hpp"
200 #include "atomic_math.hpp"
201 #include "expm.hpp"
202 #include "atomic_convolve.hpp"
203 #include "tiny_ad/atomic.hpp"
204 #include "tiny_ad/integrate/integrate.hpp"
205 #include "dynamic_data.hpp" // Requires atomic namespace
206 #include "Vectorize.hpp"
207 #include "dnorm.hpp" // harmless
208 #include "lgamma.hpp" // harmless
209 #include "start_parallel.hpp"
210 #include "tmbutils/newton.hpp" // Newton solver + Laplace used by TransformADFunObject
211 #ifndef TMB_SKINNY
212 #include "tmb_core.hpp"
213 #endif
214 #include "distributions_R.hpp"
215 #include "convenience.hpp" // Requires besselK
216 #include "tmbutils/tmbutils_extra.hpp"
217 #include "tmbutils/R_inla.hpp"
218 #include "tmbutils/sparse_matrix_exponential.hpp"
219 #include "tmbutils/concat.hpp"
220 #include "precompile.hpp" // Must come last
221 using tmbutils::array;
222 using Eigen::Matrix;
223 using Eigen::Array;
224 
225 /* Cleanup */
226 
227 // Nothing more to precompile
228 #undef CSKIP
229 #define CSKIP(...) __VA_ARGS__
230 #undef CSKIP_ATOMIC
231 #define CSKIP_ATOMIC(...) __VA_ARGS__
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:157
Array class used by TMB.
Definition: tmbutils.hpp:23
Augmented AD type.
Definition: global.hpp:2831
-
Definition: TMB.hpp:132
+
Definition: TMB.hpp:136
Scalar Value() const
Return the underlying scalar value of this ad_aug.
Definition: TMBad.cpp:2188
Matrix class used by TMB.
Definition: tmbutils.hpp:102
Vector class used by TMB.
Definition: tmbutils.hpp:18
diff --git a/TMBad_2config_8hpp_source.html b/TMBad_2config_8hpp_source.html index 7bc4e084f..b4247ded7 100644 --- a/TMBad_2config_8hpp_source.html +++ b/TMBad_2config_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/TMBad_2integrate_8hpp_source.html b/TMBad_2integrate_8hpp_source.html index 70ba8f84c..892387e0d 100644 --- a/TMBad_2integrate_8hpp_source.html +++ b/TMBad_2integrate_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
@@ -73,7 +73,7 @@
TMBad/integrate.hpp
-
1 #ifndef HAVE_INTEGRATE_HPP
2 #define HAVE_INTEGRATE_HPP
3 // Autogenerated - do not edit by hand !
4 #include <float.h> // INFINITY etc
5 #include "global.hpp"
6 
7 namespace TMBad {
8 
14 template <class T>
15 double value(T x) {
16  return TMBad::Value(x);
17 }
18 double value(double x);
19 template <class S, class T>
20 int imin2(S x, T y) {
21  return (x < y) ? x : y;
22 }
23 template <class S, class T>
24 double fmin2(S x, T y) {
25  return (value(x) < value(y)) ? value(x) : value(y);
26 }
27 template <class S, class T>
28 double fmax2(S x, T y) {
29  return (value(x) < value(y)) ? value(y) : value(x);
30 }
31 template <class Float, class integr_fn>
32 static void rdqagie(integr_fn f, void *ex, Float *, int *, Float *, Float *,
33  int *, Float *, Float *, int *, int *, Float *, Float *,
34  Float *, Float *, int *, int *);
35 
36 template <class Float, class integr_fn>
37 static void rdqk15i(integr_fn f, void *ex, Float *, int *, Float *, Float *,
38  Float *, Float *, Float *, Float *);
39 
40 template <class Float, class integr_fn>
41 static void rdqagse(integr_fn f, void *ex, Float *, Float *, Float *, Float *,
42  int *, Float *, Float *, int *, int *, Float *, Float *,
43  Float *, Float *, int *, int *);
44 
45 template <class Float, class integr_fn>
46 static void rdqk21(integr_fn f, void *ex, Float *, Float *, Float *, Float *,
47  Float *, Float *);
48 
49 template <class Float>
50 static void rdqpsrt(int *, int *, int *, Float *, Float *, int *, int *);
51 
52 template <class Float>
53 static void rdqelg(int *, Float *, Float *, Float *, Float *, int *);
54 
55 template <class Float, class integr_fn>
56 void Rdqagi(integr_fn f, void *ex, Float *bound, int *inf, Float *epsabs,
57  Float *epsrel, Float *result, Float *abserr, int *neval, int *ier,
58  int *limit, int *lenw, int *last, int *iwork, Float *work) {
59  int l1, l2, l3;
60  *ier = 6;
61  *neval = 0;
62  *last = 0;
63  *result = 0.;
64  *abserr = 0.;
65  if (*limit < 1 || *lenw < *limit << 2) return;
66 
67  l1 = *limit;
68  l2 = *limit + l1;
69  l3 = *limit + l2;
70 
71  rdqagie(f, ex, bound, inf, epsabs, epsrel, limit, result, abserr, neval, ier,
72  work, &work[l1], &work[l2], &work[l3], iwork, last);
73 
74  return;
75 }
76 
77 template <class Float, class integr_fn>
78 static void rdqagie(integr_fn f, void *ex, Float *bound, int *inf,
79  Float *epsabs, Float *epsrel, int *limit, Float *result,
80  Float *abserr, int *neval, int *ier, Float *alist,
81  Float *blist, Float *rlist, Float *elist, int *iord,
82  int *last) {
83  Float area, dres;
84  int ksgn;
85  Float boun;
86  int nres;
87  Float area1, area2, area12;
88  int k;
89  Float small = 0.0, erro12;
90  int ierro;
91  Float a1, a2, b1, b2, defab1, defab2, oflow;
92  int ktmin, nrmax;
93  Float uflow;
94  bool noext;
95  int iroff1, iroff2, iroff3;
96  Float res3la[3], error1, error2;
97  int id;
98  Float rlist2[52];
99  int numrl2;
100  Float defabs, epmach, erlarg = 0.0, abseps, correc = 0.0, errbnd, resabs;
101  int jupbnd;
102  Float erlast, errmax;
103  int maxerr;
104  Float reseps;
105  bool extrap;
106  Float ertest = 0.0, errsum;
107  --iord;
108  --elist;
109  --rlist;
110  --blist;
111  --alist;
112 
113  epmach = DBL_EPSILON;
114 
115  *ier = 0;
116  *neval = 0;
117  *last = 0;
118  *result = 0.;
119  *abserr = 0.;
120  alist[1] = 0.;
121  blist[1] = 1.;
122  rlist[1] = 0.;
123  elist[1] = 0.;
124  iord[1] = 0;
125  if (*epsabs <= 0. && (*epsrel < fmax2(epmach * 50., 5e-29))) *ier = 6;
126  if (*ier == 6) return;
127  boun = *bound;
128  if (*inf == 2) {
129  boun = 0.;
130  }
131 
132  static Float c_b6 = 0.;
133  static Float c_b7 = 1.;
134 
135  rdqk15i(f, ex, &boun, inf, &c_b6, &c_b7, result, abserr, &defabs, &resabs);
136 
137  *last = 1;
138  rlist[1] = *result;
139  elist[1] = *abserr;
140  iord[1] = 1;
141  dres = fabs(*result);
142  errbnd = fmax2(*epsabs, *epsrel * dres);
143  if (*abserr <= epmach * 100. * defabs && *abserr > errbnd) *ier = 2;
144  if (*limit == 1) *ier = 1;
145  if (*ier != 0 || (*abserr <= errbnd && *abserr != resabs) || *abserr == 0.)
146  goto L130;
147 
148  uflow = DBL_MIN;
149  oflow = DBL_MAX;
150  rlist2[0] = *result;
151  errmax = *abserr;
152  maxerr = 1;
153  area = *result;
154  errsum = *abserr;
155  *abserr = oflow;
156  nrmax = 1;
157  nres = 0;
158  ktmin = 0;
159  numrl2 = 2;
160  extrap = false;
161  noext = false;
162  ierro = 0;
163  iroff1 = 0;
164  iroff2 = 0;
165  iroff3 = 0;
166  ksgn = -1;
167  if (dres >= (1. - epmach * 50.) * defabs) {
168  ksgn = 1;
169  }
170 
171  for (*last = 2; *last <= *limit; ++(*last)) {
172  a1 = alist[maxerr];
173  b1 = (alist[maxerr] + blist[maxerr]) * .5;
174  a2 = b1;
175  b2 = blist[maxerr];
176  erlast = errmax;
177  rdqk15i(f, ex, &boun, inf, &a1, &b1, &area1, &error1, &resabs, &defab1);
178  rdqk15i(f, ex, &boun, inf, &a2, &b2, &area2, &error2, &resabs, &defab2);
179 
180  area12 = area1 + area2;
181  erro12 = error1 + error2;
182  errsum = errsum + erro12 - errmax;
183  area = area + area12 - rlist[maxerr];
184  if (!(defab1 == error1 || defab2 == error2)) {
185  if (fabs(rlist[maxerr] - area12) <= fabs(area12) * 1e-5 &&
186  erro12 >= errmax * .99) {
187  if (extrap)
188  ++iroff2;
189  else
190  ++iroff1;
191  }
192  if (*last > 10 && erro12 > errmax) ++iroff3;
193  }
194 
195  rlist[maxerr] = area1;
196  rlist[*last] = area2;
197  errbnd = fmax2(*epsabs, *epsrel * fabs(area));
198 
199  if (iroff1 + iroff2 >= 10 || iroff3 >= 20) *ier = 2;
200  if (iroff2 >= 5) ierro = 3;
201 
202  if (*last == *limit) *ier = 1;
203 
204  if (fmax2(fabs(a1), fabs(b2)) <=
205  (epmach * 100. + 1.) * (fabs(a2) + uflow * 1e3)) {
206  *ier = 4;
207  }
208 
209  if (error2 <= error1) {
210  alist[*last] = a2;
211  blist[maxerr] = b1;
212  blist[*last] = b2;
213  elist[maxerr] = error1;
214  elist[*last] = error2;
215  } else {
216  alist[maxerr] = a2;
217  alist[*last] = a1;
218  blist[*last] = b1;
219  rlist[maxerr] = area2;
220  rlist[*last] = area1;
221  elist[maxerr] = error2;
222  elist[*last] = error1;
223  }
224 
225  rdqpsrt(limit, last, &maxerr, &errmax, &elist[1], &iord[1], &nrmax);
226  if (errsum <= errbnd) {
227  goto L115;
228  }
229  if (*ier != 0) break;
230  if (*last == 2) {
231  small = .375;
232  erlarg = errsum;
233  ertest = errbnd;
234  rlist2[1] = area;
235  continue;
236  }
237  if (noext) continue;
238 
239  erlarg -= erlast;
240  if (fabs(b1 - a1) > small) {
241  erlarg += erro12;
242  }
243  if (!extrap) {
244  if (fabs(blist[maxerr] - alist[maxerr]) > small) {
245  continue;
246  }
247  extrap = true;
248  nrmax = 2;
249  }
250 
251  if (ierro != 3 && erlarg > ertest) {
252  id = nrmax;
253  jupbnd = *last;
254  if (*last > *limit / 2 + 2) {
255  jupbnd = *limit + 3 - *last;
256  }
257  for (k = id; k <= jupbnd; ++k) {
258  maxerr = iord[nrmax];
259  errmax = elist[maxerr];
260  if (fabs(blist[maxerr] - alist[maxerr]) > small) {
261  goto L90;
262  }
263  ++nrmax;
264  }
265  }
266 
267  ++numrl2;
268  rlist2[numrl2 - 1] = area;
269  rdqelg(&numrl2, rlist2, &reseps, &abseps, res3la, &nres);
270  ++ktmin;
271  if (ktmin > 5 && *abserr < errsum * .001) {
272  *ier = 5;
273  }
274  if (abseps >= *abserr) {
275  goto L70;
276  }
277  ktmin = 0;
278  *abserr = abseps;
279  *result = reseps;
280  correc = erlarg;
281  ertest = fmax2(*epsabs, *epsrel * fabs(reseps));
282  if (*abserr <= ertest) {
283  break;
284  }
285 
286  L70:
287  if (numrl2 == 1) {
288  noext = true;
289  }
290  if (*ier == 5) {
291  break;
292  }
293  maxerr = iord[1];
294  errmax = elist[maxerr];
295  nrmax = 1;
296  extrap = false;
297  small *= .5;
298  erlarg = errsum;
299  L90:;
300  }
301 
302  if (*abserr == oflow) {
303  goto L115;
304  }
305  if (*ier + ierro == 0) {
306  goto L110;
307  }
308  if (ierro == 3) {
309  *abserr += correc;
310  }
311  if (*ier == 0) {
312  *ier = 3;
313  }
314  if (*result == 0. || area == 0.) {
315  if (*abserr > errsum) goto L115;
316 
317  if (area == 0.) goto L130;
318  } else {
319  if (*abserr / fabs(*result) > errsum / fabs(area)) {
320  goto L115;
321  }
322  }
323 
324 L110:
325  if (ksgn == -1 && fmax2(fabs(*result), fabs(area)) <= defabs * .01) {
326  goto L130;
327  }
328  if (.01 > *result / area || *result / area > 100. || errsum > fabs(area)) {
329  *ier = 6;
330  }
331  goto L130;
332 
333 L115:
334  *result = 0.;
335  for (k = 1; k <= *last; ++k) *result += rlist[k];
336 
337  *abserr = errsum;
338 L130:
339  *neval = *last * 30 - 15;
340  if (*inf == 2) {
341  *neval <<= 1;
342  }
343  if (*ier > 2) {
344  --(*ier);
345  }
346  return;
347 }
348 
349 template <class Float, class integr_fn>
350 void Rdqags(integr_fn f, void *ex, Float *a, Float *b, Float *epsabs,
351  Float *epsrel, Float *result, Float *abserr, int *neval, int *ier,
352  int *limit, int *lenw, int *last, int *iwork, Float *work) {
353  int l1, l2, l3;
354  *ier = 6;
355  *neval = 0;
356  *last = 0;
357  *result = 0.;
358  *abserr = 0.;
359  if (*limit < 1 || *lenw < *limit * 4) return;
360 
361  l1 = *limit;
362  l2 = *limit + l1;
363  l3 = *limit + l2;
364 
365  rdqagse(f, ex, a, b, epsabs, epsrel, limit, result, abserr, neval, ier, work,
366  &work[l1], &work[l2], &work[l3], iwork, last);
367 
368  return;
369 }
370 
371 template <class Float, class integr_fn>
372 static void rdqagse(integr_fn f, void *ex, Float *a, Float *b, Float *epsabs,
373  Float *epsrel, int *limit, Float *result, Float *abserr,
374  int *neval, int *ier, Float *alist, Float *blist,
375  Float *rlist, Float *elist, int *iord, int *last) {
376  bool noext, extrap;
377  int k, ksgn, nres;
378  int ierro;
379  int ktmin, nrmax;
380  int iroff1, iroff2, iroff3;
381  int id;
382  int numrl2;
383  int jupbnd;
384  int maxerr;
385  Float res3la[3];
386  Float rlist2[52];
387  Float abseps, area, area1, area2, area12, dres, epmach;
388  Float a1, a2, b1, b2, defabs, defab1, defab2, oflow, uflow, resabs, reseps;
389  Float error1, error2, erro12, errbnd, erlast, errmax, errsum;
390 
391  Float correc = 0.0, erlarg = 0.0, ertest = 0.0, small = 0.0;
392  --iord;
393  --elist;
394  --rlist;
395  --blist;
396  --alist;
397 
398  epmach = DBL_EPSILON;
399 
400  *ier = 0;
401  *neval = 0;
402  *last = 0;
403  *result = 0.;
404  *abserr = 0.;
405  alist[1] = *a;
406  blist[1] = *b;
407  rlist[1] = 0.;
408  elist[1] = 0.;
409  if (*epsabs <= 0. && *epsrel < fmax2(epmach * 50., 5e-29)) {
410  *ier = 6;
411  return;
412  }
413 
414  uflow = DBL_MIN;
415  oflow = DBL_MAX;
416  ierro = 0;
417  rdqk21(f, ex, a, b, result, abserr, &defabs, &resabs);
418 
419  dres = fabs(*result);
420  errbnd = fmax2(*epsabs, *epsrel * dres);
421  *last = 1;
422  rlist[1] = *result;
423  elist[1] = *abserr;
424  iord[1] = 1;
425  if (*abserr <= epmach * 100. * defabs && *abserr > errbnd) *ier = 2;
426  if (*limit == 1) *ier = 1;
427  if (*ier != 0 || (*abserr <= errbnd && *abserr != resabs) || *abserr == 0.)
428  goto L140;
429 
430  rlist2[0] = *result;
431  errmax = *abserr;
432  maxerr = 1;
433  area = *result;
434  errsum = *abserr;
435  *abserr = oflow;
436  nrmax = 1;
437  nres = 0;
438  numrl2 = 2;
439  ktmin = 0;
440  extrap = false;
441  noext = false;
442  iroff1 = 0;
443  iroff2 = 0;
444  iroff3 = 0;
445  ksgn = -1;
446  if (dres >= (1. - epmach * 50.) * defabs) {
447  ksgn = 1;
448  }
449 
450  for (*last = 2; *last <= *limit; ++(*last)) {
451  a1 = alist[maxerr];
452  b1 = (alist[maxerr] + blist[maxerr]) * .5;
453  a2 = b1;
454  b2 = blist[maxerr];
455  erlast = errmax;
456  rdqk21(f, ex, &a1, &b1, &area1, &error1, &resabs, &defab1);
457  rdqk21(f, ex, &a2, &b2, &area2, &error2, &resabs, &defab2);
458 
459  area12 = area1 + area2;
460  erro12 = error1 + error2;
461  errsum = errsum + erro12 - errmax;
462  area = area + area12 - rlist[maxerr];
463  if (!(defab1 == error1 || defab2 == error2)) {
464  if (fabs(rlist[maxerr] - area12) <= fabs(area12) * 1e-5 &&
465  erro12 >= errmax * .99) {
466  if (extrap)
467  ++iroff2;
468  else
469  ++iroff1;
470  }
471  if (*last > 10 && erro12 > errmax) ++iroff3;
472  }
473  rlist[maxerr] = area1;
474  rlist[*last] = area2;
475  errbnd = fmax2(*epsabs, *epsrel * fabs(area));
476 
477  if (iroff1 + iroff2 >= 10 || iroff3 >= 20) *ier = 2;
478  if (iroff2 >= 5) ierro = 3;
479 
480  if (*last == *limit) *ier = 1;
481 
482  if (fmax2(fabs(a1), fabs(b2)) <=
483  (epmach * 100. + 1.) * (fabs(a2) + uflow * 1e3)) {
484  *ier = 4;
485  }
486 
487  if (error2 > error1) {
488  alist[maxerr] = a2;
489  alist[*last] = a1;
490  blist[*last] = b1;
491  rlist[maxerr] = area2;
492  rlist[*last] = area1;
493  elist[maxerr] = error2;
494  elist[*last] = error1;
495  } else {
496  alist[*last] = a2;
497  blist[maxerr] = b1;
498  blist[*last] = b2;
499  elist[maxerr] = error1;
500  elist[*last] = error2;
501  }
502 
503  rdqpsrt(limit, last, &maxerr, &errmax, &elist[1], &iord[1], &nrmax);
504 
505  if (errsum <= errbnd) goto L115;
506  if (*ier != 0) break;
507  if (*last == 2) {
508  small = fabs(*b - *a) * .375;
509  erlarg = errsum;
510  ertest = errbnd;
511  rlist2[1] = area;
512  continue;
513  }
514  if (noext) continue;
515 
516  erlarg -= erlast;
517  if (fabs(b1 - a1) > small) {
518  erlarg += erro12;
519  }
520  if (!extrap) {
521  if (fabs(blist[maxerr] - alist[maxerr]) > small) {
522  continue;
523  }
524  extrap = true;
525  nrmax = 2;
526  }
527 
528  if (ierro != 3 && erlarg > ertest) {
529  id = nrmax;
530  jupbnd = *last;
531  if (*last > *limit / 2 + 2) {
532  jupbnd = *limit + 3 - *last;
533  }
534  for (k = id; k <= jupbnd; ++k) {
535  maxerr = iord[nrmax];
536  errmax = elist[maxerr];
537  if (fabs(blist[maxerr] - alist[maxerr]) > small) {
538  goto L90;
539  }
540  ++nrmax;
541  }
542  }
543 
544  ++numrl2;
545  rlist2[numrl2 - 1] = area;
546  rdqelg(&numrl2, rlist2, &reseps, &abseps, res3la, &nres);
547  ++ktmin;
548  if (ktmin > 5 && *abserr < errsum * .001) {
549  *ier = 5;
550  }
551  if (abseps < *abserr) {
552  ktmin = 0;
553  *abserr = abseps;
554  *result = reseps;
555  correc = erlarg;
556  ertest = fmax2(*epsabs, *epsrel * fabs(reseps));
557  if (*abserr <= ertest) {
558  break;
559  }
560  }
561 
562  if (numrl2 == 1) {
563  noext = true;
564  }
565  if (*ier == 5) {
566  break;
567  }
568  maxerr = iord[1];
569  errmax = elist[maxerr];
570  nrmax = 1;
571  extrap = false;
572  small *= .5;
573  erlarg = errsum;
574  L90:;
575  }
576 
577  if (*abserr == oflow) goto L115;
578  if (*ier + ierro == 0) goto L110;
579  if (ierro == 3) *abserr += correc;
580  if (*ier == 0) *ier = 3;
581  if (*result == 0. || area == 0.) {
582  if (*abserr > errsum) goto L115;
583  if (area == 0.) goto L130;
584  } else {
585  if (*abserr / fabs(*result) > errsum / fabs(area)) goto L115;
586  }
587 
588 L110:
589  if (ksgn == -1 && fmax2(fabs(*result), fabs(area)) <= defabs * .01) {
590  goto L130;
591  }
592  if (.01 > *result / area || *result / area > 100. || errsum > fabs(area)) {
593  *ier = 5;
594  }
595  goto L130;
596 
597 L115:
598  *result = 0.;
599  for (k = 1; k <= *last; ++k) *result += rlist[k];
600  *abserr = errsum;
601 L130:
602  if (*ier > 2)
603  L140:
604  *neval = *last * 42 - 21;
605  return;
606 }
607 
608 template <class Float, class integr_fn>
609 static void rdqk15i(integr_fn f, void *ex, Float *boun, int *inf, Float *a,
610  Float *b, Float *result, Float *abserr, Float *resabs,
611  Float *resasc) {
612  static double wg[8] = {0., .129484966168869693270611432679082,
613  0., .27970539148927666790146777142378,
614  0., .381830050505118944950369775488975,
615  0., .417959183673469387755102040816327};
616  static double xgk[8] = {
617  .991455371120812639206854697526329, .949107912342758524526189684047851,
618  .864864423359769072789712788640926, .741531185599394439863864773280788,
619  .58608723546769113029414483825873, .405845151377397166906606412076961,
620  .207784955007898467600689403773245, 0.};
621  static double wgk[8] = {
622  .02293532201052922496373200805897, .063092092629978553290700663189204,
623  .104790010322250183839876322541518, .140653259715525918745189590510238,
624  .16900472663926790282658342659855, .190350578064785409913256402421014,
625  .204432940075298892414161999234649, .209482141084727828012999174891714};
626 
627  Float absc, dinf, resg, resk, fsum, absc1, absc2, fval1, fval2;
628  int j;
629  Float hlgth, centr, reskh, uflow;
630  Float tabsc1, tabsc2, fc, epmach;
631  Float fv1[7], fv2[7], vec[15], vec2[15];
632  epmach = DBL_EPSILON;
633  uflow = DBL_MIN;
634  dinf = (double)imin2(1, *inf);
635 
636  centr = (*a + *b) * .5;
637  hlgth = (*b - *a) * .5;
638  tabsc1 = *boun + dinf * (1. - centr) / centr;
639  vec[0] = tabsc1;
640  if (*inf == 2) {
641  vec2[0] = -tabsc1;
642  }
643  for (j = 1; j <= 7; ++j) {
644  absc = hlgth * xgk[j - 1];
645  absc1 = centr - absc;
646  absc2 = centr + absc;
647  tabsc1 = *boun + dinf * (1. - absc1) / absc1;
648  tabsc2 = *boun + dinf * (1. - absc2) / absc2;
649  vec[(j << 1) - 1] = tabsc1;
650  vec[j * 2] = tabsc2;
651  if (*inf == 2) {
652  vec2[(j << 1) - 1] = -tabsc1;
653  vec2[j * 2] = -tabsc2;
654  }
655  }
656  f(vec, 15, ex);
657  if (*inf == 2) f(vec2, 15, ex);
658  fval1 = vec[0];
659  if (*inf == 2) fval1 += vec2[0];
660  fc = fval1 / centr / centr;
661 
662  resg = wg[7] * fc;
663  resk = wgk[7] * fc;
664  *resabs = fabs(resk);
665  for (j = 1; j <= 7; ++j) {
666  absc = hlgth * xgk[j - 1];
667  absc1 = centr - absc;
668  absc2 = centr + absc;
669  tabsc1 = *boun + dinf * (1. - absc1) / absc1;
670  tabsc2 = *boun + dinf * (1. - absc2) / absc2;
671  fval1 = vec[(j << 1) - 1];
672  fval2 = vec[j * 2];
673  if (*inf == 2) {
674  fval1 += vec2[(j << 1) - 1];
675  }
676  if (*inf == 2) {
677  fval2 += vec2[j * 2];
678  }
679  fval1 = fval1 / absc1 / absc1;
680  fval2 = fval2 / absc2 / absc2;
681  fv1[j - 1] = fval1;
682  fv2[j - 1] = fval2;
683  fsum = fval1 + fval2;
684  resg += wg[j - 1] * fsum;
685  resk += wgk[j - 1] * fsum;
686  *resabs += wgk[j - 1] * (fabs(fval1) + fabs(fval2));
687  }
688  reskh = resk * .5;
689  *resasc = wgk[7] * fabs(fc - reskh);
690  for (j = 1; j <= 7; ++j) {
691  *resasc +=
692  wgk[j - 1] * (fabs(fv1[j - 1] - reskh) + fabs(fv2[j - 1] - reskh));
693  }
694  *result = resk * hlgth;
695  *resasc *= hlgth;
696  *resabs *= hlgth;
697  *abserr = fabs((resk - resg) * hlgth);
698  if (*resasc != 0. && *abserr != 0.) {
699  *abserr = *resasc * fmin2(1., pow(*abserr * 200. / *resasc, 1.5));
700  }
701  if (*resabs > uflow / (epmach * 50.)) {
702  *abserr = fmax2(epmach * 50. * *resabs, *abserr);
703  }
704  return;
705 }
706 
707 template <class Float>
708 static void rdqelg(int *n, Float *epstab, Float *result, Float *abserr,
709  Float *res3la, int *nres) {
710  int i__, indx, ib, ib2, ie, k1, k2, k3, num, newelm, limexp;
711  Float delta1, delta2, delta3, e0, e1, e1abs, e2, e3, epmach, epsinf;
712  Float oflow, ss, res;
713  Float errA, err1, err2, err3, tol1, tol2, tol3;
714  --res3la;
715  --epstab;
716 
717  epmach = DBL_EPSILON;
718  oflow = DBL_MAX;
719  ++(*nres);
720  *abserr = oflow;
721  *result = epstab[*n];
722  if (*n < 3) {
723  goto L100;
724  }
725  limexp = 50;
726  epstab[*n + 2] = epstab[*n];
727  newelm = (*n - 1) / 2;
728  epstab[*n] = oflow;
729  num = *n;
730  k1 = *n;
731  for (i__ = 1; i__ <= newelm; ++i__) {
732  k2 = k1 - 1;
733  k3 = k1 - 2;
734  res = epstab[k1 + 2];
735  e0 = epstab[k3];
736  e1 = epstab[k2];
737  e2 = res;
738  e1abs = fabs(e1);
739  delta2 = e2 - e1;
740  err2 = fabs(delta2);
741  tol2 = fmax2(fabs(e2), e1abs) * epmach;
742  delta3 = e1 - e0;
743  err3 = fabs(delta3);
744  tol3 = fmax2(e1abs, fabs(e0)) * epmach;
745  if (err2 <= tol2 && err3 <= tol3) {
746  *result = res;
747  *abserr = err2 + err3;
748 
749  goto L100;
750  }
751 
752  e3 = epstab[k1];
753  epstab[k1] = e1;
754  delta1 = e1 - e3;
755  err1 = fabs(delta1);
756  tol1 = fmax2(e1abs, fabs(e3)) * epmach;
757 
758  if (err1 > tol1 && err2 > tol2 && err3 > tol3) {
759  ss = 1. / delta1 + 1. / delta2 - 1. / delta3;
760  epsinf = fabs(ss * e1);
761 
762  if (epsinf > 1e-4) {
763  goto L30;
764  }
765  }
766 
767  *n = i__ + i__ - 1;
768  goto L50;
769 
770  L30:
771 
772  res = e1 + 1. / ss;
773  epstab[k1] = res;
774  k1 += -2;
775  errA = err2 + fabs(res - e2) + err3;
776  if (errA <= *abserr) {
777  *abserr = errA;
778  *result = res;
779  }
780  }
781 
782 L50:
783  if (*n == limexp) {
784  *n = (limexp / 2 << 1) - 1;
785  }
786 
787  if (num / 2 << 1 == num)
788  ib = 2;
789  else
790  ib = 1;
791  ie = newelm + 1;
792  for (i__ = 1; i__ <= ie; ++i__) {
793  ib2 = ib + 2;
794  epstab[ib] = epstab[ib2];
795  ib = ib2;
796  }
797  if (num != *n) {
798  indx = num - *n + 1;
799  for (i__ = 1; i__ <= *n; ++i__) {
800  epstab[i__] = epstab[indx];
801  ++indx;
802  }
803  }
804 
805  if (*nres >= 4) {
806  *abserr = fabs(*result - res3la[3]) + fabs(*result - res3la[2]) +
807  fabs(*result - res3la[1]);
808  res3la[1] = res3la[2];
809  res3la[2] = res3la[3];
810  res3la[3] = *result;
811  } else {
812  res3la[*nres] = *result;
813  *abserr = oflow;
814  }
815 
816 L100:
817  *abserr = fmax2(*abserr, epmach * 5. * fabs(*result));
818  return;
819 }
820 
821 template <class Float, class integr_fn>
822 static void rdqk21(integr_fn f, void *ex, Float *a, Float *b, Float *result,
823  Float *abserr, Float *resabs, Float *resasc) {
824  static double wg[5] = {
825  .066671344308688137593568809893332, .149451349150580593145776339657697,
826  .219086362515982043995534934228163, .269266719309996355091226921569469,
827  .295524224714752870173892994651338};
828  static double xgk[11] = {.995657163025808080735527280689003,
829  .973906528517171720077964012084452,
830  .930157491355708226001207180059508,
831  .865063366688984510732096688423493,
832  .780817726586416897063717578345042,
833  .679409568299024406234327365114874,
834  .562757134668604683339000099272694,
835  .433395394129247190799265943165784,
836  .294392862701460198131126603103866,
837  .14887433898163121088482600112972,
838  0.};
839  static double wgk[11] = {
840  .011694638867371874278064396062192, .03255816230796472747881897245939,
841  .05475589657435199603138130024458, .07503967481091995276704314091619,
842  .093125454583697605535065465083366, .109387158802297641899210590325805,
843  .123491976262065851077958109831074, .134709217311473325928054001771707,
844  .142775938577060080797094273138717, .147739104901338491374841515972068,
845  .149445554002916905664936468389821};
846 
847  Float fv1[10], fv2[10], vec[21];
848  Float absc, resg, resk, fsum, fval1, fval2;
849  Float hlgth, centr, reskh, uflow;
850  Float fc, epmach, dhlgth;
851  int j, jtw, jtwm1;
852  epmach = DBL_EPSILON;
853  uflow = DBL_MIN;
854 
855  centr = (*a + *b) * .5;
856  hlgth = (*b - *a) * .5;
857  dhlgth = fabs(hlgth);
858 
859  resg = 0.;
860  vec[0] = centr;
861  for (j = 1; j <= 5; ++j) {
862  jtw = j << 1;
863  absc = hlgth * xgk[jtw - 1];
864  vec[(j << 1) - 1] = centr - absc;
865 
866  vec[j * 2] = centr + absc;
867  }
868  for (j = 1; j <= 5; ++j) {
869  jtwm1 = (j << 1) - 1;
870  absc = hlgth * xgk[jtwm1 - 1];
871  vec[(j << 1) + 9] = centr - absc;
872  vec[(j << 1) + 10] = centr + absc;
873  }
874  f(vec, 21, ex);
875  fc = vec[0];
876  resk = wgk[10] * fc;
877  *resabs = fabs(resk);
878  for (j = 1; j <= 5; ++j) {
879  jtw = j << 1;
880  absc = hlgth * xgk[jtw - 1];
881  fval1 = vec[(j << 1) - 1];
882  fval2 = vec[j * 2];
883  fv1[jtw - 1] = fval1;
884  fv2[jtw - 1] = fval2;
885  fsum = fval1 + fval2;
886  resg += wg[j - 1] * fsum;
887  resk += wgk[jtw - 1] * fsum;
888  *resabs += wgk[jtw - 1] * (fabs(fval1) + fabs(fval2));
889  }
890  for (j = 1; j <= 5; ++j) {
891  jtwm1 = (j << 1) - 1;
892  absc = hlgth * xgk[jtwm1 - 1];
893  fval1 = vec[(j << 1) + 9];
894  fval2 = vec[(j << 1) + 10];
895  fv1[jtwm1 - 1] = fval1;
896  fv2[jtwm1 - 1] = fval2;
897  fsum = fval1 + fval2;
898  resk += wgk[jtwm1 - 1] * fsum;
899  *resabs += wgk[jtwm1 - 1] * (fabs(fval1) + fabs(fval2));
900  }
901  reskh = resk * .5;
902  *resasc = wgk[10] * fabs(fc - reskh);
903  for (j = 1; j <= 10; ++j) {
904  *resasc +=
905  wgk[j - 1] * (fabs(fv1[j - 1] - reskh) + fabs(fv2[j - 1] - reskh));
906  }
907  *result = resk * hlgth;
908  *resabs *= dhlgth;
909  *resasc *= dhlgth;
910  *abserr = fabs((resk - resg) * hlgth);
911  if (*resasc != 0. && *abserr != 0.) {
912  *abserr = *resasc * fmin2(1., pow(*abserr * 200. / *resasc, 1.5));
913  }
914  if (*resabs > uflow / (epmach * 50.)) {
915  *abserr = fmax2(epmach * 50. * *resabs, *abserr);
916  }
917  return;
918 }
919 
920 template <class Float>
921 static void rdqpsrt(int *limit, int *last, int *maxerr, Float *ermax,
922  Float *elist, int *iord, int *nrmax) {
923  int i, j, k, ido, jbnd, isucc, jupbn;
924  Float errmin, errmax;
925  --iord;
926  --elist;
927 
928  if (*last <= 2) {
929  iord[1] = 1;
930  iord[2] = 2;
931  goto Last;
932  }
933 
934  errmax = elist[*maxerr];
935  if (*nrmax > 1) {
936  ido = *nrmax - 1;
937  for (i = 1; i <= ido; ++i) {
938  isucc = iord[*nrmax - 1];
939  if (errmax <= elist[isucc]) break;
940  iord[*nrmax] = isucc;
941  --(*nrmax);
942  }
943  }
944 
945  if (*last > *limit / 2 + 2)
946  jupbn = *limit + 3 - *last;
947  else
948  jupbn = *last;
949 
950  errmin = elist[*last];
951 
952  jbnd = jupbn - 1;
953  for (i = *nrmax + 1; i <= jbnd; ++i) {
954  isucc = iord[i];
955  if (errmax >= elist[isucc]) {
956  iord[i - 1] = *maxerr;
957  for (j = i, k = jbnd; j <= jbnd; j++, k--) {
958  isucc = iord[k];
959  if (errmin < elist[isucc]) {
960  iord[k + 1] = *last;
961  goto Last;
962  }
963  iord[k + 1] = isucc;
964  }
965  iord[i] = *last;
966  goto Last;
967  }
968  iord[i - 1] = isucc;
969  }
970 
971  iord[jbnd] = *maxerr;
972  iord[jupbn] = *last;
973 
974 Last:
975 
976  *maxerr = iord[*nrmax];
977  *ermax = elist[*maxerr];
978  return;
979 }
980 
988 struct control {
989  int subdivisions;
990  double reltol;
991  double abstol;
992  control(int subdivisions_ = 100, double reltol_ = 1e-4,
993  double abstol_ = 1e-4);
994 };
995 
1010 template <class Integrand>
1011 struct Integral {
1012  typedef typename Integrand::Scalar Type;
1013 
1014  struct vectorized_integrand {
1015  Integrand f;
1016  vectorized_integrand(Integrand f_) : f(f_) {}
1017  void operator()(Type *x, int n, void *ex) {
1018  for (int i = 0; i < n; i++) x[i] = f(x[i]);
1019  }
1020  } fn;
1022  Integrand &integrand() { return fn.f; }
1023 
1024  Type epsabs, epsrel, result, abserr;
1025  int neval, ier, limit, lenw, last;
1026  std::vector<int> iwork;
1027  std::vector<Type> work;
1028  void setAccuracy(double epsrel_ = 1e-4, double epsabs_ = 1e-4) {
1029  epsabs = epsabs_;
1030  epsrel = epsrel_;
1031  result = 0;
1032  abserr = 1e4;
1033  neval = 0;
1034  ier = 0;
1035  last = 0;
1036  }
1037  void setWorkspace(int subdivisions = 100) {
1038  limit = subdivisions;
1039  lenw = 4 * limit;
1040  iwork.resize(limit);
1041  work.resize(lenw);
1042  }
1043  Type a, b, bound;
1044  int inf;
1045  void setBounds(Type a_, Type b_) {
1046  int a_finite = (a_ != -INFINITY) && (a_ != INFINITY);
1047  int b_finite = (b_ != -INFINITY) && (b_ != INFINITY);
1048  if (a_finite && b_finite) {
1049  inf = 0;
1050  a = a_;
1051  b = b_;
1052  } else if (a_finite && !b_finite) {
1053  inf = 1;
1054  bound = a_;
1055  } else if (!a_finite && b_finite) {
1056  inf = -1;
1057  bound = b_;
1058  } else {
1059  inf = 2;
1060  }
1061  }
1068  Integral(Integrand f_, Type a_, Type b_, control c = control()) : fn(f_) {
1069  setAccuracy(c.reltol, c.abstol);
1070  setWorkspace(c.subdivisions);
1071  setBounds(a_, b_);
1072  }
1073  Type operator()() {
1074  if (inf)
1075  Rdqagi(fn, NULL, &bound, &inf, &epsabs, &epsrel, &result, &abserr, &neval,
1076  &ier, &limit, &lenw, &last, &iwork[0], &work[0]);
1077  else
1078  Rdqags(fn, NULL, &a, &b, &epsabs, &epsrel, &result, &abserr, &neval, &ier,
1079  &limit, &lenw, &last, &iwork[0], &work[0]);
1080  return result;
1081  }
1082 };
1083 
1110 template <class Integrand>
1111 typename Integrand::Scalar integrate(Integrand f,
1112  typename Integrand::Scalar a = -INFINITY,
1113  typename Integrand::Scalar b = INFINITY,
1114  control c = control()) {
1115  Integral<Integrand> I(f, a, b, c);
1116  return I();
1117 }
1118 
1133 template <class Integrand>
1134 struct mvIntegral {
1135  typedef typename Integrand::Scalar Scalar;
1136  struct evaluator {
1137  typedef typename Integrand::Scalar Scalar;
1138  Integrand &f;
1139  Scalar &x;
1140  evaluator(Integrand &f_, Scalar &x_) : f(f_), x(x_) {}
1141  Scalar operator()(const Scalar &x_) {
1142  x = x_;
1143  return f();
1144  }
1145  } ev;
1146  control c;
1148  mvIntegral(Integrand &f_, Scalar &x_, Scalar a = -INFINITY,
1149  Scalar b = INFINITY, control c_ = control())
1150  : ev(f_, x_), c(c_), I(ev, a, b, c_) {}
1151  Scalar operator()() { return I(); }
1153  mvIntegral<mvIntegral> wrt(Scalar &x, Scalar a = -INFINITY,
1154  Scalar b = INFINITY) {
1155  return mvIntegral<mvIntegral>(*this, x, a, b, c);
1156  }
1157 };
1158 
1159 template <class Integrand>
1160 struct mvIntegral0 {
1161  typedef typename Integrand::Scalar Scalar;
1162  Integrand &f;
1163  control c;
1164  mvIntegral0(Integrand &f_, control c_) : f(f_), c(c_) {}
1166  mvIntegral<Integrand> wrt(Scalar &x, Scalar a = -INFINITY,
1167  Scalar b = INFINITY) {
1168  return mvIntegral<Integrand>(f, x, a, b, c);
1169  }
1170 };
1198 template <class Integrand>
1199 mvIntegral0<Integrand> mvIntegrate(Integrand &f, control c = control()) {
1200  return mvIntegral0<Integrand>(f, c);
1201 }
1202 
1203 } // namespace TMBad
1204 #endif // HAVE_INTEGRATE_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:153
+
1 #ifndef HAVE_INTEGRATE_HPP
2 #define HAVE_INTEGRATE_HPP
3 // Autogenerated - do not edit by hand !
4 #include <float.h> // INFINITY etc
5 #include "global.hpp"
6 
7 namespace TMBad {
8 
14 template <class T>
15 double value(T x) {
16  return TMBad::Value(x);
17 }
18 double value(double x);
19 template <class S, class T>
20 int imin2(S x, T y) {
21  return (x < y) ? x : y;
22 }
23 template <class S, class T>
24 double fmin2(S x, T y) {
25  return (value(x) < value(y)) ? value(x) : value(y);
26 }
27 template <class S, class T>
28 double fmax2(S x, T y) {
29  return (value(x) < value(y)) ? value(y) : value(x);
30 }
31 template <class Float, class integr_fn>
32 static void rdqagie(integr_fn f, void *ex, Float *, int *, Float *, Float *,
33  int *, Float *, Float *, int *, int *, Float *, Float *,
34  Float *, Float *, int *, int *);
35 
36 template <class Float, class integr_fn>
37 static void rdqk15i(integr_fn f, void *ex, Float *, int *, Float *, Float *,
38  Float *, Float *, Float *, Float *);
39 
40 template <class Float, class integr_fn>
41 static void rdqagse(integr_fn f, void *ex, Float *, Float *, Float *, Float *,
42  int *, Float *, Float *, int *, int *, Float *, Float *,
43  Float *, Float *, int *, int *);
44 
45 template <class Float, class integr_fn>
46 static void rdqk21(integr_fn f, void *ex, Float *, Float *, Float *, Float *,
47  Float *, Float *);
48 
49 template <class Float>
50 static void rdqpsrt(int *, int *, int *, Float *, Float *, int *, int *);
51 
52 template <class Float>
53 static void rdqelg(int *, Float *, Float *, Float *, Float *, int *);
54 
55 template <class Float, class integr_fn>
56 void Rdqagi(integr_fn f, void *ex, Float *bound, int *inf, Float *epsabs,
57  Float *epsrel, Float *result, Float *abserr, int *neval, int *ier,
58  int *limit, int *lenw, int *last, int *iwork, Float *work) {
59  int l1, l2, l3;
60  *ier = 6;
61  *neval = 0;
62  *last = 0;
63  *result = 0.;
64  *abserr = 0.;
65  if (*limit < 1 || *lenw < *limit << 2) return;
66 
67  l1 = *limit;
68  l2 = *limit + l1;
69  l3 = *limit + l2;
70 
71  rdqagie(f, ex, bound, inf, epsabs, epsrel, limit, result, abserr, neval, ier,
72  work, &work[l1], &work[l2], &work[l3], iwork, last);
73 
74  return;
75 }
76 
77 template <class Float, class integr_fn>
78 static void rdqagie(integr_fn f, void *ex, Float *bound, int *inf,
79  Float *epsabs, Float *epsrel, int *limit, Float *result,
80  Float *abserr, int *neval, int *ier, Float *alist,
81  Float *blist, Float *rlist, Float *elist, int *iord,
82  int *last) {
83  Float area, dres;
84  int ksgn;
85  Float boun;
86  int nres;
87  Float area1, area2, area12;
88  int k;
89  Float small = 0.0, erro12;
90  int ierro;
91  Float a1, a2, b1, b2, defab1, defab2, oflow;
92  int ktmin, nrmax;
93  Float uflow;
94  bool noext;
95  int iroff1, iroff2, iroff3;
96  Float res3la[3], error1, error2;
97  int id;
98  Float rlist2[52];
99  int numrl2;
100  Float defabs, epmach, erlarg = 0.0, abseps, correc = 0.0, errbnd, resabs;
101  int jupbnd;
102  Float erlast, errmax;
103  int maxerr;
104  Float reseps;
105  bool extrap;
106  Float ertest = 0.0, errsum;
107  --iord;
108  --elist;
109  --rlist;
110  --blist;
111  --alist;
112 
113  epmach = DBL_EPSILON;
114 
115  *ier = 0;
116  *neval = 0;
117  *last = 0;
118  *result = 0.;
119  *abserr = 0.;
120  alist[1] = 0.;
121  blist[1] = 1.;
122  rlist[1] = 0.;
123  elist[1] = 0.;
124  iord[1] = 0;
125  if (*epsabs <= 0. && (*epsrel < fmax2(epmach * 50., 5e-29))) *ier = 6;
126  if (*ier == 6) return;
127  boun = *bound;
128  if (*inf == 2) {
129  boun = 0.;
130  }
131 
132  static Float c_b6 = 0.;
133  static Float c_b7 = 1.;
134 
135  rdqk15i(f, ex, &boun, inf, &c_b6, &c_b7, result, abserr, &defabs, &resabs);
136 
137  *last = 1;
138  rlist[1] = *result;
139  elist[1] = *abserr;
140  iord[1] = 1;
141  dres = fabs(*result);
142  errbnd = fmax2(*epsabs, *epsrel * dres);
143  if (*abserr <= epmach * 100. * defabs && *abserr > errbnd) *ier = 2;
144  if (*limit == 1) *ier = 1;
145  if (*ier != 0 || (*abserr <= errbnd && *abserr != resabs) || *abserr == 0.)
146  goto L130;
147 
148  uflow = DBL_MIN;
149  oflow = DBL_MAX;
150  rlist2[0] = *result;
151  errmax = *abserr;
152  maxerr = 1;
153  area = *result;
154  errsum = *abserr;
155  *abserr = oflow;
156  nrmax = 1;
157  nres = 0;
158  ktmin = 0;
159  numrl2 = 2;
160  extrap = false;
161  noext = false;
162  ierro = 0;
163  iroff1 = 0;
164  iroff2 = 0;
165  iroff3 = 0;
166  ksgn = -1;
167  if (dres >= (1. - epmach * 50.) * defabs) {
168  ksgn = 1;
169  }
170 
171  for (*last = 2; *last <= *limit; ++(*last)) {
172  a1 = alist[maxerr];
173  b1 = (alist[maxerr] + blist[maxerr]) * .5;
174  a2 = b1;
175  b2 = blist[maxerr];
176  erlast = errmax;
177  rdqk15i(f, ex, &boun, inf, &a1, &b1, &area1, &error1, &resabs, &defab1);
178  rdqk15i(f, ex, &boun, inf, &a2, &b2, &area2, &error2, &resabs, &defab2);
179 
180  area12 = area1 + area2;
181  erro12 = error1 + error2;
182  errsum = errsum + erro12 - errmax;
183  area = area + area12 - rlist[maxerr];
184  if (!(defab1 == error1 || defab2 == error2)) {
185  if (fabs(rlist[maxerr] - area12) <= fabs(area12) * 1e-5 &&
186  erro12 >= errmax * .99) {
187  if (extrap)
188  ++iroff2;
189  else
190  ++iroff1;
191  }
192  if (*last > 10 && erro12 > errmax) ++iroff3;
193  }
194 
195  rlist[maxerr] = area1;
196  rlist[*last] = area2;
197  errbnd = fmax2(*epsabs, *epsrel * fabs(area));
198 
199  if (iroff1 + iroff2 >= 10 || iroff3 >= 20) *ier = 2;
200  if (iroff2 >= 5) ierro = 3;
201 
202  if (*last == *limit) *ier = 1;
203 
204  if (fmax2(fabs(a1), fabs(b2)) <=
205  (epmach * 100. + 1.) * (fabs(a2) + uflow * 1e3)) {
206  *ier = 4;
207  }
208 
209  if (error2 <= error1) {
210  alist[*last] = a2;
211  blist[maxerr] = b1;
212  blist[*last] = b2;
213  elist[maxerr] = error1;
214  elist[*last] = error2;
215  } else {
216  alist[maxerr] = a2;
217  alist[*last] = a1;
218  blist[*last] = b1;
219  rlist[maxerr] = area2;
220  rlist[*last] = area1;
221  elist[maxerr] = error2;
222  elist[*last] = error1;
223  }
224 
225  rdqpsrt(limit, last, &maxerr, &errmax, &elist[1], &iord[1], &nrmax);
226  if (errsum <= errbnd) {
227  goto L115;
228  }
229  if (*ier != 0) break;
230  if (*last == 2) {
231  small = .375;
232  erlarg = errsum;
233  ertest = errbnd;
234  rlist2[1] = area;
235  continue;
236  }
237  if (noext) continue;
238 
239  erlarg -= erlast;
240  if (fabs(b1 - a1) > small) {
241  erlarg += erro12;
242  }
243  if (!extrap) {
244  if (fabs(blist[maxerr] - alist[maxerr]) > small) {
245  continue;
246  }
247  extrap = true;
248  nrmax = 2;
249  }
250 
251  if (ierro != 3 && erlarg > ertest) {
252  id = nrmax;
253  jupbnd = *last;
254  if (*last > *limit / 2 + 2) {
255  jupbnd = *limit + 3 - *last;
256  }
257  for (k = id; k <= jupbnd; ++k) {
258  maxerr = iord[nrmax];
259  errmax = elist[maxerr];
260  if (fabs(blist[maxerr] - alist[maxerr]) > small) {
261  goto L90;
262  }
263  ++nrmax;
264  }
265  }
266 
267  ++numrl2;
268  rlist2[numrl2 - 1] = area;
269  rdqelg(&numrl2, rlist2, &reseps, &abseps, res3la, &nres);
270  ++ktmin;
271  if (ktmin > 5 && *abserr < errsum * .001) {
272  *ier = 5;
273  }
274  if (abseps >= *abserr) {
275  goto L70;
276  }
277  ktmin = 0;
278  *abserr = abseps;
279  *result = reseps;
280  correc = erlarg;
281  ertest = fmax2(*epsabs, *epsrel * fabs(reseps));
282  if (*abserr <= ertest) {
283  break;
284  }
285 
286  L70:
287  if (numrl2 == 1) {
288  noext = true;
289  }
290  if (*ier == 5) {
291  break;
292  }
293  maxerr = iord[1];
294  errmax = elist[maxerr];
295  nrmax = 1;
296  extrap = false;
297  small *= .5;
298  erlarg = errsum;
299  L90:;
300  }
301 
302  if (*abserr == oflow) {
303  goto L115;
304  }
305  if (*ier + ierro == 0) {
306  goto L110;
307  }
308  if (ierro == 3) {
309  *abserr += correc;
310  }
311  if (*ier == 0) {
312  *ier = 3;
313  }
314  if (*result == 0. || area == 0.) {
315  if (*abserr > errsum) goto L115;
316 
317  if (area == 0.) goto L130;
318  } else {
319  if (*abserr / fabs(*result) > errsum / fabs(area)) {
320  goto L115;
321  }
322  }
323 
324 L110:
325  if (ksgn == -1 && fmax2(fabs(*result), fabs(area)) <= defabs * .01) {
326  goto L130;
327  }
328  if (.01 > *result / area || *result / area > 100. || errsum > fabs(area)) {
329  *ier = 6;
330  }
331  goto L130;
332 
333 L115:
334  *result = 0.;
335  for (k = 1; k <= *last; ++k) *result += rlist[k];
336 
337  *abserr = errsum;
338 L130:
339  *neval = *last * 30 - 15;
340  if (*inf == 2) {
341  *neval <<= 1;
342  }
343  if (*ier > 2) {
344  --(*ier);
345  }
346  return;
347 }
348 
349 template <class Float, class integr_fn>
350 void Rdqags(integr_fn f, void *ex, Float *a, Float *b, Float *epsabs,
351  Float *epsrel, Float *result, Float *abserr, int *neval, int *ier,
352  int *limit, int *lenw, int *last, int *iwork, Float *work) {
353  int l1, l2, l3;
354  *ier = 6;
355  *neval = 0;
356  *last = 0;
357  *result = 0.;
358  *abserr = 0.;
359  if (*limit < 1 || *lenw < *limit * 4) return;
360 
361  l1 = *limit;
362  l2 = *limit + l1;
363  l3 = *limit + l2;
364 
365  rdqagse(f, ex, a, b, epsabs, epsrel, limit, result, abserr, neval, ier, work,
366  &work[l1], &work[l2], &work[l3], iwork, last);
367 
368  return;
369 }
370 
371 template <class Float, class integr_fn>
372 static void rdqagse(integr_fn f, void *ex, Float *a, Float *b, Float *epsabs,
373  Float *epsrel, int *limit, Float *result, Float *abserr,
374  int *neval, int *ier, Float *alist, Float *blist,
375  Float *rlist, Float *elist, int *iord, int *last) {
376  bool noext, extrap;
377  int k, ksgn, nres;
378  int ierro;
379  int ktmin, nrmax;
380  int iroff1, iroff2, iroff3;
381  int id;
382  int numrl2;
383  int jupbnd;
384  int maxerr;
385  Float res3la[3];
386  Float rlist2[52];
387  Float abseps, area, area1, area2, area12, dres, epmach;
388  Float a1, a2, b1, b2, defabs, defab1, defab2, oflow, uflow, resabs, reseps;
389  Float error1, error2, erro12, errbnd, erlast, errmax, errsum;
390 
391  Float correc = 0.0, erlarg = 0.0, ertest = 0.0, small = 0.0;
392  --iord;
393  --elist;
394  --rlist;
395  --blist;
396  --alist;
397 
398  epmach = DBL_EPSILON;
399 
400  *ier = 0;
401  *neval = 0;
402  *last = 0;
403  *result = 0.;
404  *abserr = 0.;
405  alist[1] = *a;
406  blist[1] = *b;
407  rlist[1] = 0.;
408  elist[1] = 0.;
409  if (*epsabs <= 0. && *epsrel < fmax2(epmach * 50., 5e-29)) {
410  *ier = 6;
411  return;
412  }
413 
414  uflow = DBL_MIN;
415  oflow = DBL_MAX;
416  ierro = 0;
417  rdqk21(f, ex, a, b, result, abserr, &defabs, &resabs);
418 
419  dres = fabs(*result);
420  errbnd = fmax2(*epsabs, *epsrel * dres);
421  *last = 1;
422  rlist[1] = *result;
423  elist[1] = *abserr;
424  iord[1] = 1;
425  if (*abserr <= epmach * 100. * defabs && *abserr > errbnd) *ier = 2;
426  if (*limit == 1) *ier = 1;
427  if (*ier != 0 || (*abserr <= errbnd && *abserr != resabs) || *abserr == 0.)
428  goto L140;
429 
430  rlist2[0] = *result;
431  errmax = *abserr;
432  maxerr = 1;
433  area = *result;
434  errsum = *abserr;
435  *abserr = oflow;
436  nrmax = 1;
437  nres = 0;
438  numrl2 = 2;
439  ktmin = 0;
440  extrap = false;
441  noext = false;
442  iroff1 = 0;
443  iroff2 = 0;
444  iroff3 = 0;
445  ksgn = -1;
446  if (dres >= (1. - epmach * 50.) * defabs) {
447  ksgn = 1;
448  }
449 
450  for (*last = 2; *last <= *limit; ++(*last)) {
451  a1 = alist[maxerr];
452  b1 = (alist[maxerr] + blist[maxerr]) * .5;
453  a2 = b1;
454  b2 = blist[maxerr];
455  erlast = errmax;
456  rdqk21(f, ex, &a1, &b1, &area1, &error1, &resabs, &defab1);
457  rdqk21(f, ex, &a2, &b2, &area2, &error2, &resabs, &defab2);
458 
459  area12 = area1 + area2;
460  erro12 = error1 + error2;
461  errsum = errsum + erro12 - errmax;
462  area = area + area12 - rlist[maxerr];
463  if (!(defab1 == error1 || defab2 == error2)) {
464  if (fabs(rlist[maxerr] - area12) <= fabs(area12) * 1e-5 &&
465  erro12 >= errmax * .99) {
466  if (extrap)
467  ++iroff2;
468  else
469  ++iroff1;
470  }
471  if (*last > 10 && erro12 > errmax) ++iroff3;
472  }
473  rlist[maxerr] = area1;
474  rlist[*last] = area2;
475  errbnd = fmax2(*epsabs, *epsrel * fabs(area));
476 
477  if (iroff1 + iroff2 >= 10 || iroff3 >= 20) *ier = 2;
478  if (iroff2 >= 5) ierro = 3;
479 
480  if (*last == *limit) *ier = 1;
481 
482  if (fmax2(fabs(a1), fabs(b2)) <=
483  (epmach * 100. + 1.) * (fabs(a2) + uflow * 1e3)) {
484  *ier = 4;
485  }
486 
487  if (error2 > error1) {
488  alist[maxerr] = a2;
489  alist[*last] = a1;
490  blist[*last] = b1;
491  rlist[maxerr] = area2;
492  rlist[*last] = area1;
493  elist[maxerr] = error2;
494  elist[*last] = error1;
495  } else {
496  alist[*last] = a2;
497  blist[maxerr] = b1;
498  blist[*last] = b2;
499  elist[maxerr] = error1;
500  elist[*last] = error2;
501  }
502 
503  rdqpsrt(limit, last, &maxerr, &errmax, &elist[1], &iord[1], &nrmax);
504 
505  if (errsum <= errbnd) goto L115;
506  if (*ier != 0) break;
507  if (*last == 2) {
508  small = fabs(*b - *a) * .375;
509  erlarg = errsum;
510  ertest = errbnd;
511  rlist2[1] = area;
512  continue;
513  }
514  if (noext) continue;
515 
516  erlarg -= erlast;
517  if (fabs(b1 - a1) > small) {
518  erlarg += erro12;
519  }
520  if (!extrap) {
521  if (fabs(blist[maxerr] - alist[maxerr]) > small) {
522  continue;
523  }
524  extrap = true;
525  nrmax = 2;
526  }
527 
528  if (ierro != 3 && erlarg > ertest) {
529  id = nrmax;
530  jupbnd = *last;
531  if (*last > *limit / 2 + 2) {
532  jupbnd = *limit + 3 - *last;
533  }
534  for (k = id; k <= jupbnd; ++k) {
535  maxerr = iord[nrmax];
536  errmax = elist[maxerr];
537  if (fabs(blist[maxerr] - alist[maxerr]) > small) {
538  goto L90;
539  }
540  ++nrmax;
541  }
542  }
543 
544  ++numrl2;
545  rlist2[numrl2 - 1] = area;
546  rdqelg(&numrl2, rlist2, &reseps, &abseps, res3la, &nres);
547  ++ktmin;
548  if (ktmin > 5 && *abserr < errsum * .001) {
549  *ier = 5;
550  }
551  if (abseps < *abserr) {
552  ktmin = 0;
553  *abserr = abseps;
554  *result = reseps;
555  correc = erlarg;
556  ertest = fmax2(*epsabs, *epsrel * fabs(reseps));
557  if (*abserr <= ertest) {
558  break;
559  }
560  }
561 
562  if (numrl2 == 1) {
563  noext = true;
564  }
565  if (*ier == 5) {
566  break;
567  }
568  maxerr = iord[1];
569  errmax = elist[maxerr];
570  nrmax = 1;
571  extrap = false;
572  small *= .5;
573  erlarg = errsum;
574  L90:;
575  }
576 
577  if (*abserr == oflow) goto L115;
578  if (*ier + ierro == 0) goto L110;
579  if (ierro == 3) *abserr += correc;
580  if (*ier == 0) *ier = 3;
581  if (*result == 0. || area == 0.) {
582  if (*abserr > errsum) goto L115;
583  if (area == 0.) goto L130;
584  } else {
585  if (*abserr / fabs(*result) > errsum / fabs(area)) goto L115;
586  }
587 
588 L110:
589  if (ksgn == -1 && fmax2(fabs(*result), fabs(area)) <= defabs * .01) {
590  goto L130;
591  }
592  if (.01 > *result / area || *result / area > 100. || errsum > fabs(area)) {
593  *ier = 5;
594  }
595  goto L130;
596 
597 L115:
598  *result = 0.;
599  for (k = 1; k <= *last; ++k) *result += rlist[k];
600  *abserr = errsum;
601 L130:
602  if (*ier > 2)
603  L140:
604  *neval = *last * 42 - 21;
605  return;
606 }
607 
608 template <class Float, class integr_fn>
609 static void rdqk15i(integr_fn f, void *ex, Float *boun, int *inf, Float *a,
610  Float *b, Float *result, Float *abserr, Float *resabs,
611  Float *resasc) {
612  static double wg[8] = {0., .129484966168869693270611432679082,
613  0., .27970539148927666790146777142378,
614  0., .381830050505118944950369775488975,
615  0., .417959183673469387755102040816327};
616  static double xgk[8] = {
617  .991455371120812639206854697526329, .949107912342758524526189684047851,
618  .864864423359769072789712788640926, .741531185599394439863864773280788,
619  .58608723546769113029414483825873, .405845151377397166906606412076961,
620  .207784955007898467600689403773245, 0.};
621  static double wgk[8] = {
622  .02293532201052922496373200805897, .063092092629978553290700663189204,
623  .104790010322250183839876322541518, .140653259715525918745189590510238,
624  .16900472663926790282658342659855, .190350578064785409913256402421014,
625  .204432940075298892414161999234649, .209482141084727828012999174891714};
626 
627  Float absc, dinf, resg, resk, fsum, absc1, absc2, fval1, fval2;
628  int j;
629  Float hlgth, centr, reskh, uflow;
630  Float tabsc1, tabsc2, fc, epmach;
631  Float fv1[7], fv2[7], vec[15], vec2[15];
632  epmach = DBL_EPSILON;
633  uflow = DBL_MIN;
634  dinf = (double)imin2(1, *inf);
635 
636  centr = (*a + *b) * .5;
637  hlgth = (*b - *a) * .5;
638  tabsc1 = *boun + dinf * (1. - centr) / centr;
639  vec[0] = tabsc1;
640  if (*inf == 2) {
641  vec2[0] = -tabsc1;
642  }
643  for (j = 1; j <= 7; ++j) {
644  absc = hlgth * xgk[j - 1];
645  absc1 = centr - absc;
646  absc2 = centr + absc;
647  tabsc1 = *boun + dinf * (1. - absc1) / absc1;
648  tabsc2 = *boun + dinf * (1. - absc2) / absc2;
649  vec[(j << 1) - 1] = tabsc1;
650  vec[j * 2] = tabsc2;
651  if (*inf == 2) {
652  vec2[(j << 1) - 1] = -tabsc1;
653  vec2[j * 2] = -tabsc2;
654  }
655  }
656  f(vec, 15, ex);
657  if (*inf == 2) f(vec2, 15, ex);
658  fval1 = vec[0];
659  if (*inf == 2) fval1 += vec2[0];
660  fc = fval1 / centr / centr;
661 
662  resg = wg[7] * fc;
663  resk = wgk[7] * fc;
664  *resabs = fabs(resk);
665  for (j = 1; j <= 7; ++j) {
666  absc = hlgth * xgk[j - 1];
667  absc1 = centr - absc;
668  absc2 = centr + absc;
669  tabsc1 = *boun + dinf * (1. - absc1) / absc1;
670  tabsc2 = *boun + dinf * (1. - absc2) / absc2;
671  fval1 = vec[(j << 1) - 1];
672  fval2 = vec[j * 2];
673  if (*inf == 2) {
674  fval1 += vec2[(j << 1) - 1];
675  }
676  if (*inf == 2) {
677  fval2 += vec2[j * 2];
678  }
679  fval1 = fval1 / absc1 / absc1;
680  fval2 = fval2 / absc2 / absc2;
681  fv1[j - 1] = fval1;
682  fv2[j - 1] = fval2;
683  fsum = fval1 + fval2;
684  resg += wg[j - 1] * fsum;
685  resk += wgk[j - 1] * fsum;
686  *resabs += wgk[j - 1] * (fabs(fval1) + fabs(fval2));
687  }
688  reskh = resk * .5;
689  *resasc = wgk[7] * fabs(fc - reskh);
690  for (j = 1; j <= 7; ++j) {
691  *resasc +=
692  wgk[j - 1] * (fabs(fv1[j - 1] - reskh) + fabs(fv2[j - 1] - reskh));
693  }
694  *result = resk * hlgth;
695  *resasc *= hlgth;
696  *resabs *= hlgth;
697  *abserr = fabs((resk - resg) * hlgth);
698  if (*resasc != 0. && *abserr != 0.) {
699  *abserr = *resasc * fmin2(1., pow(*abserr * 200. / *resasc, 1.5));
700  }
701  if (*resabs > uflow / (epmach * 50.)) {
702  *abserr = fmax2(epmach * 50. * *resabs, *abserr);
703  }
704  return;
705 }
706 
707 template <class Float>
708 static void rdqelg(int *n, Float *epstab, Float *result, Float *abserr,
709  Float *res3la, int *nres) {
710  int i__, indx, ib, ib2, ie, k1, k2, k3, num, newelm, limexp;
711  Float delta1, delta2, delta3, e0, e1, e1abs, e2, e3, epmach, epsinf;
712  Float oflow, ss, res;
713  Float errA, err1, err2, err3, tol1, tol2, tol3;
714  --res3la;
715  --epstab;
716 
717  epmach = DBL_EPSILON;
718  oflow = DBL_MAX;
719  ++(*nres);
720  *abserr = oflow;
721  *result = epstab[*n];
722  if (*n < 3) {
723  goto L100;
724  }
725  limexp = 50;
726  epstab[*n + 2] = epstab[*n];
727  newelm = (*n - 1) / 2;
728  epstab[*n] = oflow;
729  num = *n;
730  k1 = *n;
731  for (i__ = 1; i__ <= newelm; ++i__) {
732  k2 = k1 - 1;
733  k3 = k1 - 2;
734  res = epstab[k1 + 2];
735  e0 = epstab[k3];
736  e1 = epstab[k2];
737  e2 = res;
738  e1abs = fabs(e1);
739  delta2 = e2 - e1;
740  err2 = fabs(delta2);
741  tol2 = fmax2(fabs(e2), e1abs) * epmach;
742  delta3 = e1 - e0;
743  err3 = fabs(delta3);
744  tol3 = fmax2(e1abs, fabs(e0)) * epmach;
745  if (err2 <= tol2 && err3 <= tol3) {
746  *result = res;
747  *abserr = err2 + err3;
748 
749  goto L100;
750  }
751 
752  e3 = epstab[k1];
753  epstab[k1] = e1;
754  delta1 = e1 - e3;
755  err1 = fabs(delta1);
756  tol1 = fmax2(e1abs, fabs(e3)) * epmach;
757 
758  if (err1 > tol1 && err2 > tol2 && err3 > tol3) {
759  ss = 1. / delta1 + 1. / delta2 - 1. / delta3;
760  epsinf = fabs(ss * e1);
761 
762  if (epsinf > 1e-4) {
763  goto L30;
764  }
765  }
766 
767  *n = i__ + i__ - 1;
768  goto L50;
769 
770  L30:
771 
772  res = e1 + 1. / ss;
773  epstab[k1] = res;
774  k1 += -2;
775  errA = err2 + fabs(res - e2) + err3;
776  if (errA <= *abserr) {
777  *abserr = errA;
778  *result = res;
779  }
780  }
781 
782 L50:
783  if (*n == limexp) {
784  *n = (limexp / 2 << 1) - 1;
785  }
786 
787  if (num / 2 << 1 == num)
788  ib = 2;
789  else
790  ib = 1;
791  ie = newelm + 1;
792  for (i__ = 1; i__ <= ie; ++i__) {
793  ib2 = ib + 2;
794  epstab[ib] = epstab[ib2];
795  ib = ib2;
796  }
797  if (num != *n) {
798  indx = num - *n + 1;
799  for (i__ = 1; i__ <= *n; ++i__) {
800  epstab[i__] = epstab[indx];
801  ++indx;
802  }
803  }
804 
805  if (*nres >= 4) {
806  *abserr = fabs(*result - res3la[3]) + fabs(*result - res3la[2]) +
807  fabs(*result - res3la[1]);
808  res3la[1] = res3la[2];
809  res3la[2] = res3la[3];
810  res3la[3] = *result;
811  } else {
812  res3la[*nres] = *result;
813  *abserr = oflow;
814  }
815 
816 L100:
817  *abserr = fmax2(*abserr, epmach * 5. * fabs(*result));
818  return;
819 }
820 
821 template <class Float, class integr_fn>
822 static void rdqk21(integr_fn f, void *ex, Float *a, Float *b, Float *result,
823  Float *abserr, Float *resabs, Float *resasc) {
824  static double wg[5] = {
825  .066671344308688137593568809893332, .149451349150580593145776339657697,
826  .219086362515982043995534934228163, .269266719309996355091226921569469,
827  .295524224714752870173892994651338};
828  static double xgk[11] = {.995657163025808080735527280689003,
829  .973906528517171720077964012084452,
830  .930157491355708226001207180059508,
831  .865063366688984510732096688423493,
832  .780817726586416897063717578345042,
833  .679409568299024406234327365114874,
834  .562757134668604683339000099272694,
835  .433395394129247190799265943165784,
836  .294392862701460198131126603103866,
837  .14887433898163121088482600112972,
838  0.};
839  static double wgk[11] = {
840  .011694638867371874278064396062192, .03255816230796472747881897245939,
841  .05475589657435199603138130024458, .07503967481091995276704314091619,
842  .093125454583697605535065465083366, .109387158802297641899210590325805,
843  .123491976262065851077958109831074, .134709217311473325928054001771707,
844  .142775938577060080797094273138717, .147739104901338491374841515972068,
845  .149445554002916905664936468389821};
846 
847  Float fv1[10], fv2[10], vec[21];
848  Float absc, resg, resk, fsum, fval1, fval2;
849  Float hlgth, centr, reskh, uflow;
850  Float fc, epmach, dhlgth;
851  int j, jtw, jtwm1;
852  epmach = DBL_EPSILON;
853  uflow = DBL_MIN;
854 
855  centr = (*a + *b) * .5;
856  hlgth = (*b - *a) * .5;
857  dhlgth = fabs(hlgth);
858 
859  resg = 0.;
860  vec[0] = centr;
861  for (j = 1; j <= 5; ++j) {
862  jtw = j << 1;
863  absc = hlgth * xgk[jtw - 1];
864  vec[(j << 1) - 1] = centr - absc;
865 
866  vec[j * 2] = centr + absc;
867  }
868  for (j = 1; j <= 5; ++j) {
869  jtwm1 = (j << 1) - 1;
870  absc = hlgth * xgk[jtwm1 - 1];
871  vec[(j << 1) + 9] = centr - absc;
872  vec[(j << 1) + 10] = centr + absc;
873  }
874  f(vec, 21, ex);
875  fc = vec[0];
876  resk = wgk[10] * fc;
877  *resabs = fabs(resk);
878  for (j = 1; j <= 5; ++j) {
879  jtw = j << 1;
880  absc = hlgth * xgk[jtw - 1];
881  fval1 = vec[(j << 1) - 1];
882  fval2 = vec[j * 2];
883  fv1[jtw - 1] = fval1;
884  fv2[jtw - 1] = fval2;
885  fsum = fval1 + fval2;
886  resg += wg[j - 1] * fsum;
887  resk += wgk[jtw - 1] * fsum;
888  *resabs += wgk[jtw - 1] * (fabs(fval1) + fabs(fval2));
889  }
890  for (j = 1; j <= 5; ++j) {
891  jtwm1 = (j << 1) - 1;
892  absc = hlgth * xgk[jtwm1 - 1];
893  fval1 = vec[(j << 1) + 9];
894  fval2 = vec[(j << 1) + 10];
895  fv1[jtwm1 - 1] = fval1;
896  fv2[jtwm1 - 1] = fval2;
897  fsum = fval1 + fval2;
898  resk += wgk[jtwm1 - 1] * fsum;
899  *resabs += wgk[jtwm1 - 1] * (fabs(fval1) + fabs(fval2));
900  }
901  reskh = resk * .5;
902  *resasc = wgk[10] * fabs(fc - reskh);
903  for (j = 1; j <= 10; ++j) {
904  *resasc +=
905  wgk[j - 1] * (fabs(fv1[j - 1] - reskh) + fabs(fv2[j - 1] - reskh));
906  }
907  *result = resk * hlgth;
908  *resabs *= dhlgth;
909  *resasc *= dhlgth;
910  *abserr = fabs((resk - resg) * hlgth);
911  if (*resasc != 0. && *abserr != 0.) {
912  *abserr = *resasc * fmin2(1., pow(*abserr * 200. / *resasc, 1.5));
913  }
914  if (*resabs > uflow / (epmach * 50.)) {
915  *abserr = fmax2(epmach * 50. * *resabs, *abserr);
916  }
917  return;
918 }
919 
920 template <class Float>
921 static void rdqpsrt(int *limit, int *last, int *maxerr, Float *ermax,
922  Float *elist, int *iord, int *nrmax) {
923  int i, j, k, ido, jbnd, isucc, jupbn;
924  Float errmin, errmax;
925  --iord;
926  --elist;
927 
928  if (*last <= 2) {
929  iord[1] = 1;
930  iord[2] = 2;
931  goto Last;
932  }
933 
934  errmax = elist[*maxerr];
935  if (*nrmax > 1) {
936  ido = *nrmax - 1;
937  for (i = 1; i <= ido; ++i) {
938  isucc = iord[*nrmax - 1];
939  if (errmax <= elist[isucc]) break;
940  iord[*nrmax] = isucc;
941  --(*nrmax);
942  }
943  }
944 
945  if (*last > *limit / 2 + 2)
946  jupbn = *limit + 3 - *last;
947  else
948  jupbn = *last;
949 
950  errmin = elist[*last];
951 
952  jbnd = jupbn - 1;
953  for (i = *nrmax + 1; i <= jbnd; ++i) {
954  isucc = iord[i];
955  if (errmax >= elist[isucc]) {
956  iord[i - 1] = *maxerr;
957  for (j = i, k = jbnd; j <= jbnd; j++, k--) {
958  isucc = iord[k];
959  if (errmin < elist[isucc]) {
960  iord[k + 1] = *last;
961  goto Last;
962  }
963  iord[k + 1] = isucc;
964  }
965  iord[i] = *last;
966  goto Last;
967  }
968  iord[i - 1] = isucc;
969  }
970 
971  iord[jbnd] = *maxerr;
972  iord[jupbn] = *last;
973 
974 Last:
975 
976  *maxerr = iord[*nrmax];
977  *ermax = elist[*maxerr];
978  return;
979 }
980 
988 struct control {
989  int subdivisions;
990  double reltol;
991  double abstol;
992  control(int subdivisions_ = 100, double reltol_ = 1e-4,
993  double abstol_ = 1e-4);
994 };
995 
1010 template <class Integrand>
1011 struct Integral {
1012  typedef typename Integrand::Scalar Type;
1013 
1014  struct vectorized_integrand {
1015  Integrand f;
1016  vectorized_integrand(Integrand f_) : f(f_) {}
1017  void operator()(Type *x, int n, void *ex) {
1018  for (int i = 0; i < n; i++) x[i] = f(x[i]);
1019  }
1020  } fn;
1022  Integrand &integrand() { return fn.f; }
1023 
1024  Type epsabs, epsrel, result, abserr;
1025  int neval, ier, limit, lenw, last;
1026  std::vector<int> iwork;
1027  std::vector<Type> work;
1028  void setAccuracy(double epsrel_ = 1e-4, double epsabs_ = 1e-4) {
1029  epsabs = epsabs_;
1030  epsrel = epsrel_;
1031  result = 0;
1032  abserr = 1e4;
1033  neval = 0;
1034  ier = 0;
1035  last = 0;
1036  }
1037  void setWorkspace(int subdivisions = 100) {
1038  limit = subdivisions;
1039  lenw = 4 * limit;
1040  iwork.resize(limit);
1041  work.resize(lenw);
1042  }
1043  Type a, b, bound;
1044  int inf;
1045  void setBounds(Type a_, Type b_) {
1046  int a_finite = (a_ != -INFINITY) && (a_ != INFINITY);
1047  int b_finite = (b_ != -INFINITY) && (b_ != INFINITY);
1048  if (a_finite && b_finite) {
1049  inf = 0;
1050  a = a_;
1051  b = b_;
1052  } else if (a_finite && !b_finite) {
1053  inf = 1;
1054  bound = a_;
1055  } else if (!a_finite && b_finite) {
1056  inf = -1;
1057  bound = b_;
1058  } else {
1059  inf = 2;
1060  }
1061  }
1068  Integral(Integrand f_, Type a_, Type b_, control c = control()) : fn(f_) {
1069  setAccuracy(c.reltol, c.abstol);
1070  setWorkspace(c.subdivisions);
1071  setBounds(a_, b_);
1072  }
1073  Type operator()() {
1074  if (inf)
1075  Rdqagi(fn, NULL, &bound, &inf, &epsabs, &epsrel, &result, &abserr, &neval,
1076  &ier, &limit, &lenw, &last, &iwork[0], &work[0]);
1077  else
1078  Rdqags(fn, NULL, &a, &b, &epsabs, &epsrel, &result, &abserr, &neval, &ier,
1079  &limit, &lenw, &last, &iwork[0], &work[0]);
1080  return result;
1081  }
1082 };
1083 
1110 template <class Integrand>
1111 typename Integrand::Scalar integrate(Integrand f,
1112  typename Integrand::Scalar a = -INFINITY,
1113  typename Integrand::Scalar b = INFINITY,
1114  control c = control()) {
1115  Integral<Integrand> I(f, a, b, c);
1116  return I();
1117 }
1118 
1133 template <class Integrand>
1134 struct mvIntegral {
1135  typedef typename Integrand::Scalar Scalar;
1136  struct evaluator {
1137  typedef typename Integrand::Scalar Scalar;
1138  Integrand &f;
1139  Scalar &x;
1140  evaluator(Integrand &f_, Scalar &x_) : f(f_), x(x_) {}
1141  Scalar operator()(const Scalar &x_) {
1142  x = x_;
1143  return f();
1144  }
1145  } ev;
1146  control c;
1148  mvIntegral(Integrand &f_, Scalar &x_, Scalar a = -INFINITY,
1149  Scalar b = INFINITY, control c_ = control())
1150  : ev(f_, x_), c(c_), I(ev, a, b, c_) {}
1151  Scalar operator()() { return I(); }
1153  mvIntegral<mvIntegral> wrt(Scalar &x, Scalar a = -INFINITY,
1154  Scalar b = INFINITY) {
1155  return mvIntegral<mvIntegral>(*this, x, a, b, c);
1156  }
1157 };
1158 
1159 template <class Integrand>
1160 struct mvIntegral0 {
1161  typedef typename Integrand::Scalar Scalar;
1162  Integrand &f;
1163  control c;
1164  mvIntegral0(Integrand &f_, control c_) : f(f_), c(c_) {}
1166  mvIntegral<Integrand> wrt(Scalar &x, Scalar a = -INFINITY,
1167  Scalar b = INFINITY) {
1168  return mvIntegral<Integrand>(f, x, a, b, c);
1169  }
1170 };
1198 template <class Integrand>
1199 mvIntegral0<Integrand> mvIntegrate(Integrand &f, control c = control()) {
1200  return mvIntegral0<Integrand>(f, c);
1201 }
1202 
1203 } // namespace TMBad
1204 #endif // HAVE_INTEGRATE_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:157
mvIntegral< mvIntegral > wrt(Scalar &x, Scalar a=-INFINITY, Scalar b=INFINITY)
With respect to.
Integral(Integrand f_, Type a_, Type b_, control c=control())
Constructor.
Integrand & integrand()
Return reference to integrand so the user can change parameters.
diff --git a/TMBad_2interpol_8cpp-example.html b/TMBad_2interpol_8cpp-example.html index 8cce12a3d..e09055206 100644 --- a/TMBad_2interpol_8cpp-example.html +++ b/TMBad_2interpol_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/TMBad_2sam_8cpp-example.html b/TMBad_2sam_8cpp-example.html index 3f5e77397..4fae7e165 100644 --- a/TMBad_2sam_8cpp-example.html +++ b/TMBad_2sam_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/TMBad_2solver_8cpp-example.html b/TMBad_2solver_8cpp-example.html index 9203ddda4..ee9b2add1 100644 --- a/TMBad_2solver_8cpp-example.html +++ b/TMBad_2solver_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/TMBad_2spa_gauss_8cpp-example.html b/TMBad_2spa_gauss_8cpp-example.html index 8d3c52647..ec882c38c 100644 --- a/TMBad_2spa_gauss_8cpp-example.html +++ b/TMBad_2spa_gauss_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/TMBad_2spatial_8cpp-example.html b/TMBad_2spatial_8cpp-example.html index c693353a9..03b90cdcd 100644 --- a/TMBad_2spatial_8cpp-example.html +++ b/TMBad_2spatial_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/TMBad_2spde_epsilon_8cpp-example.html b/TMBad_2spde_epsilon_8cpp-example.html index b6db5dbdd..e9208aed0 100644 --- a/TMBad_2spde_epsilon_8cpp-example.html +++ b/TMBad_2spde_epsilon_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/TMBad_2thetalog_8cpp-example.html b/TMBad_2thetalog_8cpp-example.html index c51671937..49e8d7639 100644 --- a/TMBad_2thetalog_8cpp-example.html +++ b/TMBad_2thetalog_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/TMBad_2vectorize_8hpp_source.html b/TMBad_2vectorize_8hpp_source.html index bc2829eba..c7ecfa6ee 100644 --- a/TMBad_2vectorize_8hpp_source.html +++ b/TMBad_2vectorize_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
@@ -73,7 +73,7 @@
TMBad/vectorize.hpp
-
1 #ifndef HAVE_VECTORIZE_HPP
2 #define HAVE_VECTORIZE_HPP
3 // Autogenerated - do not edit by hand !
4 
5 namespace TMBad {
6 
7 typedef global::ad_segment ad_segment;
8 
9 template <class Type, bool S0 = 0, bool S1 = 0>
10 struct Vectorized {
11  Type x;
12 
13  static constexpr bool stride(bool j) { return j == 0 ? S0 : S1; }
14  operator Type() { return x; }
15  Vectorized(Type x) : x(x) {}
16  Vectorized() {}
17 };
18 
19 template <class Type, bool S0, bool S1>
20 struct ForwardArgs<Vectorized<Type, S0, S1> > : ForwardArgs<Type> {
21  typedef Vectorized<Type, S0, S1> T;
22  typedef ForwardArgs<Type> Base;
23  size_t k;
25  Type x(bool j) const {
26  return Base::values[Base::input(j) + k * T::stride(j)];
27  }
29  Type &y(Index j) { return Base::values[Base::output(j) + k]; }
30  ForwardArgs(const Base &x) : Base(x) {}
31 };
32 
33 template <class Type, bool S0, bool S1>
34 struct ReverseArgs<Vectorized<Type, S0, S1> > : ReverseArgs<Type> {
35  typedef Vectorized<Type, S0, S1> T;
36  typedef ReverseArgs<Type> Base;
37  size_t k;
39  Type x(bool j) const {
40  return Base::values[Base::input(j) + k * T::stride(j)];
41  }
43  Type y(Index j) const { return Base::values[Base::output(j) + k]; }
46  Type &dx(bool j) const {
47  return Base::derivs[Base::input(j) + k * T::stride(j)];
48  }
51  Type dy(Index j) const { return Base::derivs[Base::output(j) + k]; }
52  ReverseArgs(const Base &x) : Base(x) {}
53 };
54 
55 struct VSumOp : global::DynamicOperator<1, 1> {
56  static const bool is_linear = true;
57  size_t n;
58  VSumOp(size_t n);
59  template <class Type>
60  void forward(ForwardArgs<Type> &args) {
61  const Type *x = args.x_ptr(0);
62  Type &y = args.y(0);
63  y = 0;
64  for (size_t i = 0; i < n; i++) y += x[i];
65  }
66  template <class Type>
67  void reverse(ReverseArgs<Type> &args) {
68  Type *dx = args.dx_ptr(0);
69  const Type &dy = args.dy(0);
70  for (size_t i = 0; i < n; i++) dx[i] += dy;
71  }
72 
73  void dependencies(Args<> &args, Dependencies &dep) const;
74  static const bool have_dependencies = true;
76  static const bool implicit_dependencies = true;
78  static const bool allow_remap = false;
79  void forward(ForwardArgs<Writer> &args);
80  void reverse(ReverseArgs<Writer> &args);
81  const char *op_name();
82 };
83 
84 ad_aug sum(ad_segment x);
85 
86 template <class dummy = void>
87 ad_segment operator/(ad_segment x, ad_segment y);
88 template <class dummy = void>
89 ad_segment operator*(ad_segment x, ad_segment y);
90 template <class dummy = void>
91 ad_segment operator+(ad_segment x, ad_segment y);
92 template <class dummy = void>
93 ad_segment operator-(ad_segment x, ad_segment y);
94 template <class dummy = void>
95 ad_segment operator-(ad_segment x);
96 template <class dummy = void>
97 ad_segment &operator+=(ad_segment &x, ad_segment y) {
98  if ((x.size() == 1) && (x.size() < y.size())) y = ad_segment(sum(y), 1);
99  if (x.identicalZero())
100  x = y;
101  else
102  x = x + y;
103  return x;
104 }
105 template <class dummy = void>
106 ad_segment &operator-=(ad_segment &x, ad_segment y) {
107  if ((x.size() == 1) && (x.size() < y.size())) y = ad_segment(sum(y), 1);
108  if (x.identicalZero())
109  x = -y;
110  else
111  x = x - y;
112  return x;
113 }
114 
115 template <class Operator, bool S0 = false, bool S1 = false>
116 struct Vectorize : global::DynamicOperator<Operator::ninput, -1> {
117  size_t n;
118  static const bool have_input_size_output_size = true;
119  Index input_size() const { return Operator::ninput; }
120  Index output_size() const { return this->n; }
121  Vectorize(size_t n) : n(n) {}
122  void forward(ForwardArgs<Scalar> &args) {
123  ForwardArgs<Vectorized<Scalar, S0, S1> > vargs(args);
124  typename global::CPL<Operator>::type Op;
125  for (vargs.k = 0; vargs.k < n; vargs.k++) {
126  Op.forward(vargs);
127  }
128  }
129  void forward(ForwardArgs<Replay> &args) {
130  ad_segment x0(args.x_ptr(0), (S0 ? n : 1));
131  ad_segment x1;
132  if (Operator::ninput > 1) {
133  x1 = ad_segment(args.x_ptr(1), (S1 ? n : 1));
134  }
135  global::Complete<Vectorize> F(*this);
136  ad_segment y = F(x0, x1);
137  for (size_t i = 0; i < y.size(); i++) args.y(i) = y[i];
138  }
139  void reverse(ReverseArgs<Scalar> &args) {
140  ReverseArgs<Vectorized<Scalar, S0, S1> > vargs(args);
141  typename global::CPL<Operator>::type Op;
142  for (vargs.k = 0; vargs.k < n; vargs.k++) {
143  Op.reverse(vargs);
144  }
145  }
146  void reverse(ReverseArgs<Replay> &args) {
147  std::vector<ad_segment> v;
148  std::vector<ad_segment> d;
149  std::vector<Index> i;
150  ad_segment zero;
151 
152  v.push_back(ad_segment(args.x_ptr(0), (S0 ? n : 1)));
153  d.push_back(zero);
154  i.push_back(i.size());
155  if (Operator::ninput > 1) {
156  v.push_back(ad_segment(args.x_ptr(1), (S1 ? n : 1)));
157  d.push_back(zero);
158  i.push_back(i.size());
159  }
160 
161  v.push_back(ad_segment(args.y_ptr(0), n));
162  d.push_back(ad_segment(args.dy_ptr(0), n));
163 
164  ReverseArgs<ad_segment> vargs(i, v, d);
165 
166  vargs.ptr.first = 0;
167  vargs.ptr.second = Operator::ninput;
168  typename global::CPL<Operator>::type Op;
169  Op.reverse(vargs);
170 
171  ad_segment dx_left(args.dx_ptr(0), (S0 ? n : 1), true);
172  dx_left += vargs.dx(0);
173 
174  for (size_t i = 0; i < dx_left.size(); i++) args.dx_ptr(0)[i] = dx_left[i];
175  if (Operator::ninput > 1) {
176  ad_segment dx_right(args.dx_ptr(1), (S1 ? n : 1), true);
177  dx_right += vargs.dx(1);
178 
179  for (size_t i = 0; i < dx_right.size(); i++)
180  args.dx_ptr(1)[i] = dx_right[i];
181  }
182  }
183 
184  void dependencies(Args<> &args, Dependencies &dep) const {
185  dep.add_segment(args.input(0), (S0 ? n : 1));
186  if (Operator::ninput == 2) {
187  dep.add_segment(args.input(1), (S1 ? n : 1));
188  }
189  }
190  static const bool have_dependencies = true;
192  static const bool implicit_dependencies = true;
194  static const bool allow_remap = false;
195  void forward(ForwardArgs<Writer> &args) { TMBAD_ASSERT(false); }
196  void reverse(ReverseArgs<Writer> &args) { TMBAD_ASSERT(false); }
197  const char *op_name() {
198  global::Complete<Operator> Op;
199  static const std::string name = std::string("V") + Op.op_name();
200  return name.c_str();
201  }
202  Vectorize(const ad_segment &x, const ad_segment &y)
203  : n(std::max(x.size(), y.size())) {}
204 };
205 template <class dummy>
206 ad_segment operator/(ad_segment x, ad_segment y) {
207  size_t n = std::max(x.size(), y.size());
208  if (x.size() > 1 && y.size() > 1) {
209  global::Complete<Vectorize<global::ad_plain::DivOp, 1, 1> > F(n);
210  return F(x, y);
211  } else if (x.size() > 1) {
212  global::Complete<Vectorize<global::ad_plain::DivOp, 1, 0> > F(n);
213  return F(x, y);
214  } else if (y.size() > 1) {
215  global::Complete<Vectorize<global::ad_plain::DivOp, 0, 1> > F(n);
216  return F(x, y);
217  } else {
218  global::Complete<Vectorize<global::ad_plain::DivOp, 0, 0> > F(n);
219  return F(x, y);
220  }
221  TMBAD_ASSERT(false);
222  return ad_segment();
223 }
224 template <class dummy>
225 ad_segment operator*(ad_segment x, ad_segment y) {
226  size_t n = std::max(x.size(), y.size());
227  if (x.size() > 1 && y.size() > 1) {
228  global::Complete<Vectorize<global::ad_plain::MulOp, 1, 1> > F(n);
229  return F(x, y);
230  } else if (x.size() > 1) {
231  global::Complete<Vectorize<global::ad_plain::MulOp, 1, 0> > F(n);
232  return F(x, y);
233  } else if (y.size() > 1) {
234  global::Complete<Vectorize<global::ad_plain::MulOp, 0, 1> > F(n);
235  return F(x, y);
236  } else {
237  global::Complete<Vectorize<global::ad_plain::MulOp, 0, 0> > F(n);
238  return F(x, y);
239  }
240  TMBAD_ASSERT(false);
241  return ad_segment();
242 }
243 template <class dummy>
244 ad_segment operator+(ad_segment x, ad_segment y) {
245  size_t n = std::max(x.size(), y.size());
246  if (x.size() > 1 && y.size() > 1) {
247  global::Complete<Vectorize<global::ad_plain::AddOp, 1, 1> > F(n);
248  return F(x, y);
249  } else if (x.size() > 1) {
250  global::Complete<Vectorize<global::ad_plain::AddOp, 1, 0> > F(n);
251  return F(x, y);
252  } else if (y.size() > 1) {
253  global::Complete<Vectorize<global::ad_plain::AddOp, 0, 1> > F(n);
254  return F(x, y);
255  } else {
256  global::Complete<Vectorize<global::ad_plain::AddOp, 0, 0> > F(n);
257  return F(x, y);
258  }
259  TMBAD_ASSERT(false);
260  return ad_segment();
261 }
262 template <class dummy>
263 ad_segment operator-(ad_segment x, ad_segment y) {
264  size_t n = std::max(x.size(), y.size());
265  if (x.size() > 1 && y.size() > 1) {
266  global::Complete<Vectorize<global::ad_plain::SubOp, 1, 1> > F(n);
267  return F(x, y);
268  } else if (x.size() > 1) {
269  global::Complete<Vectorize<global::ad_plain::SubOp, 1, 0> > F(n);
270  return F(x, y);
271  } else if (y.size() > 1) {
272  global::Complete<Vectorize<global::ad_plain::SubOp, 0, 1> > F(n);
273  return F(x, y);
274  } else {
275  global::Complete<Vectorize<global::ad_plain::SubOp, 0, 0> > F(n);
276  return F(x, y);
277  }
278  TMBAD_ASSERT(false);
279  return ad_segment();
280 }
281 template <class dummy = void>
282 ad_segment pow(ad_segment x, ad_segment y);
283 template <class dummy>
284 ad_segment pow(ad_segment x, ad_segment y) {
285  size_t n = std::max(x.size(), y.size());
286  if (x.size() > 1 && y.size() > 1) {
287  global::Complete<Vectorize<PowOp, 1, 1> > F(n);
288  return F(x, y);
289  } else if (x.size() > 1) {
290  global::Complete<Vectorize<PowOp, 1, 0> > F(n);
291  return F(x, y);
292  } else if (y.size() > 1) {
293  global::Complete<Vectorize<PowOp, 0, 1> > F(n);
294  return F(x, y);
295  } else {
296  global::Complete<Vectorize<PowOp, 0, 0> > F(n);
297  return F(x, y);
298  }
299  TMBAD_ASSERT(false);
300  return ad_segment();
301 }
302 template <class dummy>
303 ad_segment operator-(ad_segment x) {
304  size_t n = x.size();
305  global::Complete<Vectorize<global::ad_plain::NegOp, 1, 0> > F(n);
306  return F(x);
307 }
308 
309 template <class dummy = void>
310 ad_segment fabs(ad_segment x) {
311  size_t n = x.size();
312  global::Complete<Vectorize<AbsOp, 1, 0> > F(n);
313  return F(x);
314 }
315 template <class dummy = void>
316 ad_segment sin(ad_segment x) {
317  size_t n = x.size();
318  global::Complete<Vectorize<SinOp, 1, 0> > F(n);
319  return F(x);
320 }
321 template <class dummy = void>
322 ad_segment cos(ad_segment x) {
323  size_t n = x.size();
324  global::Complete<Vectorize<CosOp, 1, 0> > F(n);
325  return F(x);
326 }
327 template <class dummy = void>
328 ad_segment exp(ad_segment x) {
329  size_t n = x.size();
330  global::Complete<Vectorize<ExpOp, 1, 0> > F(n);
331  return F(x);
332 }
333 template <class dummy = void>
334 ad_segment log(ad_segment x) {
335  size_t n = x.size();
336  global::Complete<Vectorize<LogOp, 1, 0> > F(n);
337  return F(x);
338 }
339 template <class dummy = void>
340 ad_segment sqrt(ad_segment x) {
341  size_t n = x.size();
342  global::Complete<Vectorize<SqrtOp, 1, 0> > F(n);
343  return F(x);
344 }
345 template <class dummy = void>
346 ad_segment tan(ad_segment x) {
347  size_t n = x.size();
348  global::Complete<Vectorize<TanOp, 1, 0> > F(n);
349  return F(x);
350 }
351 template <class dummy = void>
352 ad_segment sinh(ad_segment x) {
353  size_t n = x.size();
354  global::Complete<Vectorize<SinhOp, 1, 0> > F(n);
355  return F(x);
356 }
357 template <class dummy = void>
358 ad_segment cosh(ad_segment x) {
359  size_t n = x.size();
360  global::Complete<Vectorize<CoshOp, 1, 0> > F(n);
361  return F(x);
362 }
363 template <class dummy = void>
364 ad_segment tanh(ad_segment x) {
365  size_t n = x.size();
366  global::Complete<Vectorize<TanhOp, 1, 0> > F(n);
367  return F(x);
368 }
369 template <class dummy = void>
370 ad_segment expm1(ad_segment x) {
371  size_t n = x.size();
372  global::Complete<Vectorize<Expm1, 1, 0> > F(n);
373  return F(x);
374 }
375 template <class dummy = void>
376 ad_segment log1p(ad_segment x) {
377  size_t n = x.size();
378  global::Complete<Vectorize<Log1p, 1, 0> > F(n);
379  return F(x);
380 }
381 template <class dummy = void>
382 ad_segment asin(ad_segment x) {
383  size_t n = x.size();
384  global::Complete<Vectorize<AsinOp, 1, 0> > F(n);
385  return F(x);
386 }
387 template <class dummy = void>
388 ad_segment acos(ad_segment x) {
389  size_t n = x.size();
390  global::Complete<Vectorize<AcosOp, 1, 0> > F(n);
391  return F(x);
392 }
393 template <class dummy = void>
394 ad_segment atan(ad_segment x) {
395  size_t n = x.size();
396  global::Complete<Vectorize<AtanOp, 1, 0> > F(n);
397  return F(x);
398 }
399 template <class T>
400 struct ScalarPack {
401  static const int size = (sizeof(T) - 1) / sizeof(Scalar) + 1;
402 };
403 
406 struct SegmentRef {
407  global *glob_ptr;
408  Index offset;
409  Index size;
410  Scalar *value_ptr();
411  Scalar *deriv_ptr();
412  SegmentRef();
413  SegmentRef(const Scalar *x);
414  SegmentRef(global *g, Index o, Index s);
415  SegmentRef(const ad_segment &x);
416  bool isNull();
417  void resize(ad_segment &pack, Index n);
418 };
419 
420 ad_segment pack(const ad_segment &x);
421 ad_segment unpack(const ad_segment &x);
422 
435 struct PackOp : global::DynamicOperator<1, ScalarPack<SegmentRef>::size> {
437  static const Index K = ScalarPack<SegmentRef>::size;
439  Index n;
440  PackOp(const Index n);
442  void forward(ForwardArgs<Scalar> &args);
444  void forward(ForwardArgs<Replay> &args);
446  void reverse(ReverseArgs<Scalar> &args);
448  void reverse(ReverseArgs<Replay> &args);
449  const char *op_name();
451  static const bool allow_remap = false;
452  static const bool have_dependencies = true;
453  static const bool implicit_dependencies = true;
454  void dependencies(Args<> &args, Dependencies &dep) const;
455 
456  template <class T>
457  void forward(ForwardArgs<T> &args) {
458  TMBAD_ASSERT2(false, "PackOp: Invalid method!");
459  }
460  template <class T>
461  void reverse(ReverseArgs<T> &args) {
462  TMBAD_ASSERT2(false, "PackOp: Invalid method!");
463  }
464 };
465 
469  static const Index K = ScalarPack<SegmentRef>::size;
471  Index noutput;
472  UnpkOp(const Index n);
474  void forward(ForwardArgs<Scalar> &args);
475  static const bool add_forward_replay_copy = true;
477  void reverse(ReverseArgs<Scalar> &args);
479  void reverse(ReverseArgs<Replay> &args);
480  const char *op_name();
481 
483  static const bool allow_remap = false;
484  static const bool have_dependencies = true;
485  static const bool implicit_dependencies = true;
486  void dependencies(Args<> &args, Dependencies &dep) const;
487 
488  template <class T>
489  void forward(ForwardArgs<T> &args) {
490  TMBAD_ASSERT2(false, "UnpkOp: Invalid method!");
491  }
492  template <class T>
493  void reverse(ReverseArgs<T> &args) {
494  TMBAD_ASSERT2(false, "UnpkOp: Invalid method!");
495  }
496 };
497 
499 ad_segment pack(const ad_segment &x);
500 
502 ad_segment unpack(const ad_segment &x);
503 
505 template <class T>
506 ad_segment unpack(const std::vector<T> &x, Index j) {
507  Index K = ScalarPack<SegmentRef>::size;
508  ad_segment x_(x[j * K], K);
509  return unpack(x_);
510 }
511 Scalar *unpack(const std::vector<Scalar> &x, Index j);
512 
513 template <class T>
514 std::vector<T> repack(const std::vector<T> &x) {
515  Index K = ScalarPack<SegmentRef>::size;
516  size_t n = x.size() / K;
517  std::vector<T> y;
518  for (size_t j = 0; j < n; j++) {
519  ad_segment x_(x[j * K], K);
520  SegmentRef sr(x_);
521  ad_segment orig(sr.offset, sr.size);
522  ad_segment yj = pack(orig);
523  for (size_t i = 0; i < K; i++) y.push_back(yj[i]);
524  }
525  return y;
526 }
527 
528 std::vector<ad_aug> concat(const std::vector<ad_segment> &x);
529 
530 } // namespace TMBad
531 #endif // HAVE_VECTORIZE_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:153
+
1 #ifndef HAVE_VECTORIZE_HPP
2 #define HAVE_VECTORIZE_HPP
3 // Autogenerated - do not edit by hand !
4 
5 namespace TMBad {
6 
7 typedef global::ad_segment ad_segment;
8 
9 template <class Type, bool S0 = 0, bool S1 = 0>
10 struct Vectorized {
11  Type x;
12 
13  static constexpr bool stride(bool j) { return j == 0 ? S0 : S1; }
14  operator Type() { return x; }
15  Vectorized(Type x) : x(x) {}
16  Vectorized() {}
17 };
18 
19 template <class Type, bool S0, bool S1>
20 struct ForwardArgs<Vectorized<Type, S0, S1> > : ForwardArgs<Type> {
21  typedef Vectorized<Type, S0, S1> T;
22  typedef ForwardArgs<Type> Base;
23  size_t k;
25  Type x(bool j) const {
26  return Base::values[Base::input(j) + k * T::stride(j)];
27  }
29  Type &y(Index j) { return Base::values[Base::output(j) + k]; }
30  ForwardArgs(const Base &x) : Base(x) {}
31 };
32 
33 template <class Type, bool S0, bool S1>
34 struct ReverseArgs<Vectorized<Type, S0, S1> > : ReverseArgs<Type> {
35  typedef Vectorized<Type, S0, S1> T;
36  typedef ReverseArgs<Type> Base;
37  size_t k;
39  Type x(bool j) const {
40  return Base::values[Base::input(j) + k * T::stride(j)];
41  }
43  Type y(Index j) const { return Base::values[Base::output(j) + k]; }
46  Type &dx(bool j) const {
47  return Base::derivs[Base::input(j) + k * T::stride(j)];
48  }
51  Type dy(Index j) const { return Base::derivs[Base::output(j) + k]; }
52  ReverseArgs(const Base &x) : Base(x) {}
53 };
54 
55 struct VSumOp : global::DynamicOperator<1, 1> {
56  static const bool is_linear = true;
57  size_t n;
58  VSumOp(size_t n);
59  template <class Type>
60  void forward(ForwardArgs<Type> &args) {
61  const Type *x = args.x_ptr(0);
62  Type &y = args.y(0);
63  y = 0;
64  for (size_t i = 0; i < n; i++) y += x[i];
65  }
66  template <class Type>
67  void reverse(ReverseArgs<Type> &args) {
68  Type *dx = args.dx_ptr(0);
69  const Type &dy = args.dy(0);
70  for (size_t i = 0; i < n; i++) dx[i] += dy;
71  }
72 
73  void dependencies(Args<> &args, Dependencies &dep) const;
74  static const bool have_dependencies = true;
76  static const bool implicit_dependencies = true;
78  static const bool allow_remap = false;
79  void forward(ForwardArgs<Writer> &args);
80  void reverse(ReverseArgs<Writer> &args);
81  const char *op_name();
82 };
83 
84 ad_aug sum(ad_segment x);
85 
86 template <class dummy = void>
87 ad_segment operator/(ad_segment x, ad_segment y);
88 template <class dummy = void>
89 ad_segment operator*(ad_segment x, ad_segment y);
90 template <class dummy = void>
91 ad_segment operator+(ad_segment x, ad_segment y);
92 template <class dummy = void>
93 ad_segment operator-(ad_segment x, ad_segment y);
94 template <class dummy = void>
95 ad_segment operator-(ad_segment x);
96 template <class dummy = void>
97 ad_segment &operator+=(ad_segment &x, ad_segment y) {
98  if ((x.size() == 1) && (x.size() < y.size())) y = ad_segment(sum(y), 1);
99  if (x.identicalZero())
100  x = y;
101  else
102  x = x + y;
103  return x;
104 }
105 template <class dummy = void>
106 ad_segment &operator-=(ad_segment &x, ad_segment y) {
107  if ((x.size() == 1) && (x.size() < y.size())) y = ad_segment(sum(y), 1);
108  if (x.identicalZero())
109  x = -y;
110  else
111  x = x - y;
112  return x;
113 }
114 
115 template <class Operator, bool S0 = false, bool S1 = false>
116 struct Vectorize : global::DynamicOperator<Operator::ninput, -1> {
117  size_t n;
118  static const bool have_input_size_output_size = true;
119  Index input_size() const { return Operator::ninput; }
120  Index output_size() const { return this->n; }
121  Vectorize(size_t n) : n(n) {}
122  void forward(ForwardArgs<Scalar> &args) {
123  ForwardArgs<Vectorized<Scalar, S0, S1> > vargs(args);
124  typename global::CPL<Operator>::type Op;
125  for (vargs.k = 0; vargs.k < n; vargs.k++) {
126  Op.forward(vargs);
127  }
128  }
129  void forward(ForwardArgs<Replay> &args) {
130  ad_segment x0(args.x_ptr(0), (S0 ? n : 1));
131  ad_segment x1;
132  if (Operator::ninput > 1) {
133  x1 = ad_segment(args.x_ptr(1), (S1 ? n : 1));
134  }
135  global::Complete<Vectorize> F(*this);
136  ad_segment y = F(x0, x1);
137  for (size_t i = 0; i < y.size(); i++) args.y(i) = y[i];
138  }
139  void reverse(ReverseArgs<Scalar> &args) {
140  ReverseArgs<Vectorized<Scalar, S0, S1> > vargs(args);
141  typename global::CPL<Operator>::type Op;
142  for (vargs.k = 0; vargs.k < n; vargs.k++) {
143  Op.reverse(vargs);
144  }
145  }
146  void reverse(ReverseArgs<Replay> &args) {
147  std::vector<ad_segment> v;
148  std::vector<ad_segment> d;
149  std::vector<Index> i;
150  ad_segment zero;
151 
152  v.push_back(ad_segment(args.x_ptr(0), (S0 ? n : 1)));
153  d.push_back(zero);
154  i.push_back(i.size());
155  if (Operator::ninput > 1) {
156  v.push_back(ad_segment(args.x_ptr(1), (S1 ? n : 1)));
157  d.push_back(zero);
158  i.push_back(i.size());
159  }
160 
161  v.push_back(ad_segment(args.y_ptr(0), n));
162  d.push_back(ad_segment(args.dy_ptr(0), n));
163 
164  ReverseArgs<ad_segment> vargs(i, v, d);
165 
166  vargs.ptr.first = 0;
167  vargs.ptr.second = Operator::ninput;
168  typename global::CPL<Operator>::type Op;
169  Op.reverse(vargs);
170 
171  ad_segment dx_left(args.dx_ptr(0), (S0 ? n : 1), true);
172  dx_left += vargs.dx(0);
173 
174  for (size_t i = 0; i < dx_left.size(); i++) args.dx_ptr(0)[i] = dx_left[i];
175  if (Operator::ninput > 1) {
176  ad_segment dx_right(args.dx_ptr(1), (S1 ? n : 1), true);
177  dx_right += vargs.dx(1);
178 
179  for (size_t i = 0; i < dx_right.size(); i++)
180  args.dx_ptr(1)[i] = dx_right[i];
181  }
182  }
183 
184  void dependencies(Args<> &args, Dependencies &dep) const {
185  dep.add_segment(args.input(0), (S0 ? n : 1));
186  if (Operator::ninput == 2) {
187  dep.add_segment(args.input(1), (S1 ? n : 1));
188  }
189  }
190  static const bool have_dependencies = true;
192  static const bool implicit_dependencies = true;
194  static const bool allow_remap = false;
195  void forward(ForwardArgs<Writer> &args) { TMBAD_ASSERT(false); }
196  void reverse(ReverseArgs<Writer> &args) { TMBAD_ASSERT(false); }
197  const char *op_name() {
198  global::Complete<Operator> Op;
199  static const std::string name = std::string("V") + Op.op_name();
200  return name.c_str();
201  }
202  Vectorize(const ad_segment &x, const ad_segment &y)
203  : n(std::max(x.size(), y.size())) {}
204 };
205 template <class dummy>
206 ad_segment operator/(ad_segment x, ad_segment y) {
207  size_t n = std::max(x.size(), y.size());
208  if (x.size() > 1 && y.size() > 1) {
209  global::Complete<Vectorize<global::ad_plain::DivOp, 1, 1> > F(n);
210  return F(x, y);
211  } else if (x.size() > 1) {
212  global::Complete<Vectorize<global::ad_plain::DivOp, 1, 0> > F(n);
213  return F(x, y);
214  } else if (y.size() > 1) {
215  global::Complete<Vectorize<global::ad_plain::DivOp, 0, 1> > F(n);
216  return F(x, y);
217  } else {
218  global::Complete<Vectorize<global::ad_plain::DivOp, 0, 0> > F(n);
219  return F(x, y);
220  }
221  TMBAD_ASSERT(false);
222  return ad_segment();
223 }
224 template <class dummy>
225 ad_segment operator*(ad_segment x, ad_segment y) {
226  size_t n = std::max(x.size(), y.size());
227  if (x.size() > 1 && y.size() > 1) {
228  global::Complete<Vectorize<global::ad_plain::MulOp, 1, 1> > F(n);
229  return F(x, y);
230  } else if (x.size() > 1) {
231  global::Complete<Vectorize<global::ad_plain::MulOp, 1, 0> > F(n);
232  return F(x, y);
233  } else if (y.size() > 1) {
234  global::Complete<Vectorize<global::ad_plain::MulOp, 0, 1> > F(n);
235  return F(x, y);
236  } else {
237  global::Complete<Vectorize<global::ad_plain::MulOp, 0, 0> > F(n);
238  return F(x, y);
239  }
240  TMBAD_ASSERT(false);
241  return ad_segment();
242 }
243 template <class dummy>
244 ad_segment operator+(ad_segment x, ad_segment y) {
245  size_t n = std::max(x.size(), y.size());
246  if (x.size() > 1 && y.size() > 1) {
247  global::Complete<Vectorize<global::ad_plain::AddOp, 1, 1> > F(n);
248  return F(x, y);
249  } else if (x.size() > 1) {
250  global::Complete<Vectorize<global::ad_plain::AddOp, 1, 0> > F(n);
251  return F(x, y);
252  } else if (y.size() > 1) {
253  global::Complete<Vectorize<global::ad_plain::AddOp, 0, 1> > F(n);
254  return F(x, y);
255  } else {
256  global::Complete<Vectorize<global::ad_plain::AddOp, 0, 0> > F(n);
257  return F(x, y);
258  }
259  TMBAD_ASSERT(false);
260  return ad_segment();
261 }
262 template <class dummy>
263 ad_segment operator-(ad_segment x, ad_segment y) {
264  size_t n = std::max(x.size(), y.size());
265  if (x.size() > 1 && y.size() > 1) {
266  global::Complete<Vectorize<global::ad_plain::SubOp, 1, 1> > F(n);
267  return F(x, y);
268  } else if (x.size() > 1) {
269  global::Complete<Vectorize<global::ad_plain::SubOp, 1, 0> > F(n);
270  return F(x, y);
271  } else if (y.size() > 1) {
272  global::Complete<Vectorize<global::ad_plain::SubOp, 0, 1> > F(n);
273  return F(x, y);
274  } else {
275  global::Complete<Vectorize<global::ad_plain::SubOp, 0, 0> > F(n);
276  return F(x, y);
277  }
278  TMBAD_ASSERT(false);
279  return ad_segment();
280 }
281 template <class dummy = void>
282 ad_segment pow(ad_segment x, ad_segment y);
283 template <class dummy>
284 ad_segment pow(ad_segment x, ad_segment y) {
285  size_t n = std::max(x.size(), y.size());
286  if (x.size() > 1 && y.size() > 1) {
287  global::Complete<Vectorize<PowOp, 1, 1> > F(n);
288  return F(x, y);
289  } else if (x.size() > 1) {
290  global::Complete<Vectorize<PowOp, 1, 0> > F(n);
291  return F(x, y);
292  } else if (y.size() > 1) {
293  global::Complete<Vectorize<PowOp, 0, 1> > F(n);
294  return F(x, y);
295  } else {
296  global::Complete<Vectorize<PowOp, 0, 0> > F(n);
297  return F(x, y);
298  }
299  TMBAD_ASSERT(false);
300  return ad_segment();
301 }
302 template <class dummy>
303 ad_segment operator-(ad_segment x) {
304  size_t n = x.size();
305  global::Complete<Vectorize<global::ad_plain::NegOp, 1, 0> > F(n);
306  return F(x);
307 }
308 
309 template <class dummy = void>
310 ad_segment fabs(ad_segment x) {
311  size_t n = x.size();
312  global::Complete<Vectorize<AbsOp, 1, 0> > F(n);
313  return F(x);
314 }
315 template <class dummy = void>
316 ad_segment sin(ad_segment x) {
317  size_t n = x.size();
318  global::Complete<Vectorize<SinOp, 1, 0> > F(n);
319  return F(x);
320 }
321 template <class dummy = void>
322 ad_segment cos(ad_segment x) {
323  size_t n = x.size();
324  global::Complete<Vectorize<CosOp, 1, 0> > F(n);
325  return F(x);
326 }
327 template <class dummy = void>
328 ad_segment exp(ad_segment x) {
329  size_t n = x.size();
330  global::Complete<Vectorize<ExpOp, 1, 0> > F(n);
331  return F(x);
332 }
333 template <class dummy = void>
334 ad_segment log(ad_segment x) {
335  size_t n = x.size();
336  global::Complete<Vectorize<LogOp, 1, 0> > F(n);
337  return F(x);
338 }
339 template <class dummy = void>
340 ad_segment sqrt(ad_segment x) {
341  size_t n = x.size();
342  global::Complete<Vectorize<SqrtOp, 1, 0> > F(n);
343  return F(x);
344 }
345 template <class dummy = void>
346 ad_segment tan(ad_segment x) {
347  size_t n = x.size();
348  global::Complete<Vectorize<TanOp, 1, 0> > F(n);
349  return F(x);
350 }
351 template <class dummy = void>
352 ad_segment sinh(ad_segment x) {
353  size_t n = x.size();
354  global::Complete<Vectorize<SinhOp, 1, 0> > F(n);
355  return F(x);
356 }
357 template <class dummy = void>
358 ad_segment cosh(ad_segment x) {
359  size_t n = x.size();
360  global::Complete<Vectorize<CoshOp, 1, 0> > F(n);
361  return F(x);
362 }
363 template <class dummy = void>
364 ad_segment tanh(ad_segment x) {
365  size_t n = x.size();
366  global::Complete<Vectorize<TanhOp, 1, 0> > F(n);
367  return F(x);
368 }
369 template <class dummy = void>
370 ad_segment expm1(ad_segment x) {
371  size_t n = x.size();
372  global::Complete<Vectorize<Expm1, 1, 0> > F(n);
373  return F(x);
374 }
375 template <class dummy = void>
376 ad_segment log1p(ad_segment x) {
377  size_t n = x.size();
378  global::Complete<Vectorize<Log1p, 1, 0> > F(n);
379  return F(x);
380 }
381 template <class dummy = void>
382 ad_segment asin(ad_segment x) {
383  size_t n = x.size();
384  global::Complete<Vectorize<AsinOp, 1, 0> > F(n);
385  return F(x);
386 }
387 template <class dummy = void>
388 ad_segment acos(ad_segment x) {
389  size_t n = x.size();
390  global::Complete<Vectorize<AcosOp, 1, 0> > F(n);
391  return F(x);
392 }
393 template <class dummy = void>
394 ad_segment atan(ad_segment x) {
395  size_t n = x.size();
396  global::Complete<Vectorize<AtanOp, 1, 0> > F(n);
397  return F(x);
398 }
399 template <class T>
400 struct ScalarPack {
401  static const int size = (sizeof(T) - 1) / sizeof(Scalar) + 1;
402 };
403 
406 struct SegmentRef {
407  global *glob_ptr;
408  Index offset;
409  Index size;
410  Scalar *value_ptr();
411  Scalar *deriv_ptr();
412  SegmentRef();
413  SegmentRef(const Scalar *x);
414  SegmentRef(global *g, Index o, Index s);
415  SegmentRef(const ad_segment &x);
416  bool isNull();
417  void resize(ad_segment &pack, Index n);
418 };
419 
420 ad_segment pack(const ad_segment &x);
421 ad_segment unpack(const ad_segment &x);
422 
435 struct PackOp : global::DynamicOperator<1, ScalarPack<SegmentRef>::size> {
437  static const Index K = ScalarPack<SegmentRef>::size;
439  Index n;
440  PackOp(const Index n);
442  void forward(ForwardArgs<Scalar> &args);
444  void forward(ForwardArgs<Replay> &args);
446  void reverse(ReverseArgs<Scalar> &args);
448  void reverse(ReverseArgs<Replay> &args);
449  const char *op_name();
451  static const bool allow_remap = false;
452  static const bool have_dependencies = true;
453  static const bool implicit_dependencies = true;
454  void dependencies(Args<> &args, Dependencies &dep) const;
455 
456  template <class T>
457  void forward(ForwardArgs<T> &args) {
458  TMBAD_ASSERT2(false, "PackOp: Invalid method!");
459  }
460  template <class T>
461  void reverse(ReverseArgs<T> &args) {
462  TMBAD_ASSERT2(false, "PackOp: Invalid method!");
463  }
464 };
465 
469  static const Index K = ScalarPack<SegmentRef>::size;
471  Index noutput;
472  UnpkOp(const Index n);
474  void forward(ForwardArgs<Scalar> &args);
475  static const bool add_forward_replay_copy = true;
477  void reverse(ReverseArgs<Scalar> &args);
479  void reverse(ReverseArgs<Replay> &args);
480  const char *op_name();
481 
483  static const bool allow_remap = false;
484  static const bool have_dependencies = true;
485  static const bool implicit_dependencies = true;
486  void dependencies(Args<> &args, Dependencies &dep) const;
487 
488  template <class T>
489  void forward(ForwardArgs<T> &args) {
490  TMBAD_ASSERT2(false, "UnpkOp: Invalid method!");
491  }
492  template <class T>
493  void reverse(ReverseArgs<T> &args) {
494  TMBAD_ASSERT2(false, "UnpkOp: Invalid method!");
495  }
496 };
497 
499 ad_segment pack(const ad_segment &x);
500 
502 ad_segment unpack(const ad_segment &x);
503 
505 template <class T>
506 ad_segment unpack(const std::vector<T> &x, Index j) {
507  Index K = ScalarPack<SegmentRef>::size;
508  ad_segment x_(x[j * K], K);
509  return unpack(x_);
510 }
511 Scalar *unpack(const std::vector<Scalar> &x, Index j);
512 
513 template <class T>
514 std::vector<T> repack(const std::vector<T> &x) {
515  Index K = ScalarPack<SegmentRef>::size;
516  size_t n = x.size() / K;
517  std::vector<T> y;
518  for (size_t j = 0; j < n; j++) {
519  ad_segment x_(x[j * K], K);
520  SegmentRef sr(x_);
521  ad_segment orig(sr.offset, sr.size);
522  ad_segment yj = pack(orig);
523  for (size_t i = 0; i < K; i++) y.push_back(yj[i]);
524  }
525  return y;
526 }
527 
528 std::vector<ad_aug> concat(const std::vector<ad_segment> &x);
529 
530 } // namespace TMBad
531 #endif // HAVE_VECTORIZE_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:157
Index noutput
Unpacked size.
Access input/output values and derivatives during a reverse pass. Write access granted for the input ...
Definition: global.hpp:311
diff --git a/TMBad_8cpp_source.html b/TMBad_8cpp_source.html index 49caf3c12..09919677d 100644 --- a/TMBad_8cpp_source.html +++ b/TMBad_8cpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
@@ -73,7 +73,7 @@
TMBad.cpp
-
1 // Autogenerated - do not edit by hand !
2 #include "TMBad.hpp"
3 namespace TMBad {
4 
5 SpJacFun_config::SpJacFun_config() : compress(false), index_remap(true) {}
6 } // namespace TMBad
7 // Autogenerated - do not edit by hand !
8 #include "ad_blas.hpp"
9 namespace TMBad {
10 
11 vmatrix matmul(const vmatrix &x, const vmatrix &y) {
12  vmatrix z(x.rows(), y.cols());
13  Map<vmatrix> zm(&z(0), z.rows(), z.cols());
14  matmul<false, false, false, false>(x, y, zm);
15  return z;
16 }
17 
18 dmatrix matmul(const dmatrix &x, const dmatrix &y) { return x * y; }
19 } // namespace TMBad
20 // Autogenerated - do not edit by hand !
21 #include "checkpoint.hpp"
22 namespace TMBad {
23 
24 bool ParametersChanged::operator()(const std::vector<Scalar> &x) {
25  bool change = (x != x_prev);
26  if (change) {
27  x_prev = x;
28  }
29  return change;
30 }
31 } // namespace TMBad
32 // Autogenerated - do not edit by hand !
33 #include "code_generator.hpp"
34 namespace TMBad {
35 
36 void searchReplace(std::string &str, const std::string &oldStr,
37  const std::string &newStr) {
38  std::string::size_type pos = 0u;
39  while ((pos = str.find(oldStr, pos)) != std::string::npos) {
40  str.replace(pos, oldStr.length(), newStr);
41  pos += newStr.length();
42  }
43 }
44 
45 std::string code_config::float_ptr() { return float_str + (gpu ? "**" : "*"); }
46 
47 std::string code_config::void_str() {
48  return (gpu ? "__device__ void" : "extern \"C\" void");
49 }
50 
51 void code_config::init_code() {
52  if (gpu) {
53  *cout << indent << "int idx = threadIdx.x;" << std::endl;
54  }
55 }
56 
57 void code_config::write_header_comment() {
58  if (header_comment.length() > 0) *cout << header_comment << std::endl;
59 }
60 
61 code_config::code_config()
62  : asm_comments(true),
63  gpu(true),
64  indent(" "),
65  header_comment("// Autogenerated - do not edit by hand !"),
66  float_str(xstringify(TMBAD_SCALAR_TYPE)),
67  cout(&Rcout) {}
68 
69 void write_common(std::ostringstream &buffer, code_config cfg, size_t node) {
70  std::ostream &cout = *cfg.cout;
71  using std::endl;
72  using std::left;
73  using std::setw;
74  std::string indent = cfg.indent;
75  if (cfg.asm_comments)
76  cout << indent << "asm(\"// Node: " << node << "\");" << endl;
77  bool empty_buffer = (buffer.tellp() == 0);
78  if (!empty_buffer) {
79  std::string str = buffer.str();
80  if (cfg.gpu) {
81  std::string pattern = "]";
82  std::string replace = "][idx]";
83  searchReplace(str, pattern, replace);
84  }
85  searchReplace(str, ";v", "; v");
86  searchReplace(str, ";d", "; d");
87  cout << indent << str << endl;
88  }
89 }
90 
91 void write_forward(global &glob, code_config cfg) {
92  using std::endl;
93  using std::left;
94  using std::setw;
95  std::ostream &cout = *cfg.cout;
96  cfg.write_header_comment();
97  cout << cfg.void_str() << " forward(" << cfg.float_ptr() << " v) {" << endl;
98  cfg.init_code();
99  ForwardArgs<Writer> args(glob.inputs, glob.values);
100  for (size_t i = 0; i < glob.opstack.size(); i++) {
101  std::ostringstream buffer;
102  Writer::cout = &buffer;
103  glob.opstack[i]->forward(args);
104  write_common(buffer, cfg, i);
105  glob.opstack[i]->increment(args.ptr);
106  }
107  cout << "}" << endl;
108 }
109 
110 void write_reverse(global &glob, code_config cfg) {
111  using std::endl;
112  using std::left;
113  using std::setw;
114  std::ostream &cout = *cfg.cout;
115  cfg.write_header_comment();
116  cout << cfg.void_str() << " reverse(" << cfg.float_ptr() << " v, "
117  << cfg.float_ptr() << " d) {" << endl;
118  cfg.init_code();
119  ReverseArgs<Writer> args(glob.inputs, glob.values);
120  for (size_t i = glob.opstack.size(); i > 0;) {
121  i--;
122  glob.opstack[i]->decrement(args.ptr);
123  std::ostringstream buffer;
124  Writer::cout = &buffer;
125  glob.opstack[i]->reverse(args);
126  write_common(buffer, cfg, i);
127  }
128  cout << "}" << endl;
129 }
130 
131 void write_all(global glob, code_config cfg) {
132  using std::endl;
133  using std::left;
134  using std::setw;
135  std::ostream &cout = *cfg.cout;
136  cout << "#include \"global.hpp\"" << endl;
137  cout << "#include \"ad_blas.hpp\"" << endl;
138  write_forward(glob, cfg);
139  write_reverse(glob, cfg);
140  cout << "int main() {}" << endl;
141 }
142 } // namespace TMBad
143 #ifndef _WIN32
144 // Autogenerated - do not edit by hand !
145 #include "compile.hpp"
146 namespace TMBad {
147 
148 void compile(global &glob, code_config cfg) {
149  cfg.gpu = false;
150  cfg.asm_comments = false;
151  std::ofstream file;
152  file.open("tmp.cpp");
153  cfg.cout = &file;
154 
155  *cfg.cout << "#include <cmath>" << std::endl;
156  *cfg.cout
157  << "template<class T>T sign(const T &x) { return (x > 0) - (x < 0); }"
158  << std::endl;
159 
160  write_forward(glob, cfg);
161 
162  write_reverse(glob, cfg);
163 
164  int out = system("g++ -O3 -g tmp.cpp -o tmp.so -shared -fPIC");
165  if (out != 0) {
166  }
167 
168  void *handle = dlopen("./tmp.so", RTLD_NOW);
169  if (handle != NULL) {
170  Rcout << "Loading compiled code!" << std::endl;
171  glob.forward_compiled =
172  reinterpret_cast<void (*)(Scalar *)>(dlsym(handle, "forward"));
173  glob.reverse_compiled = reinterpret_cast<void (*)(Scalar *, Scalar *)>(
174  dlsym(handle, "reverse"));
175  }
176 }
177 } // namespace TMBad
178 #endif
179 // Autogenerated - do not edit by hand !
180 #include "compression.hpp"
181 namespace TMBad {
182 
183 std::ostream &operator<<(std::ostream &os, const period &x) {
184  os << "begin: " << x.begin;
185  os << " size: " << x.size;
186  os << " rep: " << x.rep;
187  return os;
188 }
189 
190 std::vector<period> split_period(global *glob, period p,
191  size_t max_period_size) {
192  typedef std::ptrdiff_t ptrdiff_t;
193  glob->subgraph_cache_ptr();
194 
195  size_t offset = glob->subgraph_ptr[p.begin].first;
196 
197  size_t nrow = 0;
198  for (size_t i = 0; i < p.size; i++) {
199  nrow += glob->opstack[p.begin + i]->input_size();
200  }
201 
202  size_t ncol = p.rep;
203 
204  matrix_view<Index> x(&(glob->inputs[offset]), nrow, ncol);
205 
206  std::vector<bool> marks(ncol - 1, false);
207 
208  for (size_t i = 0; i < nrow; i++) {
209  std::vector<period> pd =
210  periodic<ptrdiff_t>(x.row_diff<ptrdiff_t>(i), max_period_size)
211  .find_all();
212 
213  for (size_t j = 0; j < pd.size(); j++) {
214  if (pd[j].begin > 0) {
215  marks[pd[j].begin - 1] = true;
216  }
217  size_t end = pd[j].begin + pd[j].size * pd[j].rep;
218  if (end < marks.size()) marks[end] = true;
219  }
220  }
221 
222  std::vector<period> ans;
223  p.rep = 1;
224  ans.push_back(p);
225  for (size_t j = 0; j < marks.size(); j++) {
226  if (marks[j]) {
227  period pnew = p;
228  pnew.begin = p.begin + (j + 1) * p.size;
229  pnew.rep = 1;
230  ans.push_back(pnew);
231  } else {
232  ans.back().rep++;
233  }
234  }
235 
236  return ans;
237 }
238 
239 size_t compressed_input::input_size() const { return n; }
240 
241 void compressed_input::update_increment_pattern() const {
242  for (size_t i = 0; i < (size_t)np; i++)
243  increment_pattern[which_periodic[i]] =
244  period_data[period_offsets[i] + counter % period_sizes[i]];
245 }
246 
247 void compressed_input::increment(Args<> &args) const {
248  if (np) {
249  update_increment_pattern();
250  counter++;
251  }
252  for (size_t i = 0; i < n; i++) inputs[i] += increment_pattern[i];
253  args.ptr.first = 0;
254 }
255 
256 void compressed_input::decrement(Args<> &args) const {
257  args.ptr.first = input_size();
258  for (size_t i = 0; i < n; i++) inputs[i] -= increment_pattern[i];
259  if (np) {
260  counter--;
261  update_increment_pattern();
262  }
263 }
264 
265 void compressed_input::forward_init(Args<> &args) const {
266  counter = 0;
267  inputs.resize(input_size());
268  for (size_t i = 0; i < inputs.size(); i++) inputs[i] = args.input(i);
269  args.inputs = inputs.data();
270  args.ptr.first = 0;
271 }
272 
273 void compressed_input::reverse_init(Args<> &args) {
274  inputs.resize(input_size());
275  for (size_t i = 0; i < inputs.size(); i++)
276  inputs[i] = args.input(i) + input_diff[i];
277 
278  args.inputs = inputs.data();
279  args.ptr.first = 0;
280  args.ptr.second += m * nrep;
281  counter = nrep - 1;
282  update_increment_pattern();
283  args.ptr.first = input_size();
284 }
285 
286 void compressed_input::dependencies_intervals(Args<> &args,
287  std::vector<Index> &lower,
288  std::vector<Index> &upper) const {
289  forward_init(args);
290  lower = inputs;
291  upper = inputs;
292  for (size_t i = 0; i < nrep; i++) {
293  for (size_t j = 0; j < inputs.size(); j++) {
294  if (inputs[j] < lower[j]) lower[j] = inputs[j];
295  if (inputs[j] > upper[j]) upper[j] = inputs[j];
296  }
297  increment(args);
298  }
299 }
300 
301 bool compressed_input::test_period(std::vector<ptrdiff_t> &x, size_t p) {
302  for (size_t j = 0; j < x.size(); j++) {
303  if (x[j] != x[j % p]) return false;
304  }
305  return true;
306 }
307 
308 size_t compressed_input::find_shortest(std::vector<ptrdiff_t> &x) {
309  for (size_t p = 1; p < max_period_size; p++) {
310  if (test_period(x, p)) return p;
311  }
312  return x.size();
313 }
314 
315 compressed_input::compressed_input() {}
316 
317 compressed_input::compressed_input(std::vector<Index> &x, size_t offset,
318  size_t nrow, size_t m, size_t ncol,
319  size_t max_period_size)
320  : n(nrow), m(m), nrep(ncol), counter(0), max_period_size(max_period_size) {
321  matrix_view<Index> xm(&x[offset], nrow, ncol);
322 
323  for (size_t i = 0; i < nrow; i++) {
324  std::vector<ptrdiff_t> rd = xm.row_diff<ptrdiff_t>(i);
325 
326  size_t p = find_shortest(rd);
327 
328  increment_pattern.push_back(rd[0]);
329  if (p != 1) {
330  which_periodic.push_back(i);
331  period_sizes.push_back(p);
332 
333  size_t pos = std::search(period_data.begin(), period_data.end(),
334  rd.begin(), rd.begin() + p) -
335  period_data.begin();
336  if (pos < period_data.size()) {
337  period_offsets.push_back(pos);
338  } else {
339  period_offsets.push_back(period_data.size());
340  period_data.insert(period_data.end(), rd.begin(), rd.begin() + p);
341  }
342  }
343  }
344 
345  np = which_periodic.size();
346 
347  input_diff.resize(n, 0);
348  Args<> args(input_diff);
349  forward_init(args);
350  for (size_t i = 0; i < nrep; i++) {
351  increment(args);
352  }
353  input_diff = inputs;
354 }
355 
356 StackOp::StackOp(global *glob, period p, IndexPair ptr,
357  size_t max_period_size) {
358  opstack.resize(p.size);
359  size_t n = 0, m = 0;
360  for (size_t i = 0; i < p.size; i++) {
361  opstack[i] = glob->opstack[p.begin + i]->copy();
362  n += opstack[i]->input_size();
363  m += opstack[i]->output_size();
364  }
365  ci = compressed_input(glob->inputs, ptr.first, n, m, p.rep, max_period_size);
366 }
367 
368 StackOp::StackOp(const StackOp &x) : opstack(x.opstack), ci(x.ci) {}
369 
370 void StackOp::print(global::print_config cfg) {
371  std::vector<const char *> tmp(opstack.size());
372  for (size_t i = 0; i < opstack.size(); i++) tmp[i] = opstack[i]->op_name();
373  Rcout << cfg.prefix << " opstack = " << tmp << "\n";
374 
375  Rcout << cfg.prefix << " "
376  << "nrep"
377  << " = " << ci.nrep << "\n";
378  ;
379  Rcout << cfg.prefix << " "
380  << "increment_pattern"
381  << " = " << ci.increment_pattern << "\n";
382  ;
383  if (ci.which_periodic.size() > 0) {
384  Rcout << cfg.prefix << " "
385  << "which_periodic"
386  << " = " << ci.which_periodic << "\n";
387  ;
388  Rcout << cfg.prefix << " "
389  << "period_sizes"
390  << " = " << ci.period_sizes << "\n";
391  ;
392  Rcout << cfg.prefix << " "
393  << "period_offsets"
394  << " = " << ci.period_offsets << "\n";
395  ;
396  Rcout << cfg.prefix << " "
397  << "period_data"
398  << " = " << ci.period_data << "\n";
399  ;
400  }
401 
402  Rcout << "\n";
403 }
404 
405 Index StackOp::input_size() const { return ci.n; }
406 
407 Index StackOp::output_size() const { return ci.m * ci.nrep; }
408 
409 void StackOp::forward(ForwardArgs<Writer> &args) {
410  size_t n = ci.n, m = ci.m, nrep = ci.nrep;
411  std::vector<Index> inputs(n);
412  for (size_t i = 0; i < (size_t)n; i++) inputs[i] = args.input(i);
413  std::vector<Index> outputs(m);
414  for (size_t i = 0; i < (size_t)m; i++) outputs[i] = args.output(i);
415  Writer w;
416  size_t np = ci.which_periodic.size();
417  size_t sp = ci.period_data.size();
418  w << "for (int count = 0, ";
419  if (n > 0) {
420  w << "i[" << n << "]=" << inputs << ", "
421  << "ip[" << n << "]=" << ci.increment_pattern << ", ";
422  }
423  if (np > 0) {
424  w << "wp[" << np << "]=" << ci.which_periodic << ", "
425  << "ps[" << np << "]=" << ci.period_sizes << ", "
426  << "po[" << np << "]=" << ci.period_offsets << ", "
427  << "pd[" << sp << "]=" << ci.period_data << ", ";
428  }
429  w << "o[" << m << "]=" << outputs << "; "
430  << "count < " << nrep << "; count++) {\n";
431 
432  w << " ";
433  ForwardArgs<Writer> args_cpy = args;
434  args_cpy.set_indirect();
435  for (size_t k = 0; k < opstack.size(); k++) {
436  opstack[k]->forward_incr(args_cpy);
437  }
438  w << "\n";
439 
440  if (np > 0) {
441  w << " ";
442  for (size_t k = 0; k < np; k++)
443  w << "ip[wp[" << k << "]] = pd[po[" << k << "] + count % ps[" << k
444  << "]]; ";
445  w << "\n";
446  }
447  if (n > 0) {
448  w << " ";
449  for (size_t k = 0; k < n; k++) w << "i[" << k << "] += ip[" << k << "]; ";
450  w << "\n";
451  }
452  w << " ";
453  for (size_t k = 0; k < m; k++) w << "o[" << k << "] += " << m << "; ";
454  w << "\n";
455 
456  w << " ";
457  w << "}";
458 }
459 
460 void StackOp::reverse(ReverseArgs<Writer> &args) {
461  size_t n = ci.n, m = ci.m, nrep = ci.nrep;
462  std::vector<ptrdiff_t> inputs(input_size());
463  for (size_t i = 0; i < inputs.size(); i++) {
464  ptrdiff_t tmp;
465  if (-ci.input_diff[i] < ci.input_diff[i]) {
466  tmp = -((ptrdiff_t)-ci.input_diff[i]);
467  } else {
468  tmp = ci.input_diff[i];
469  }
470  inputs[i] = args.input(i) + tmp;
471  }
472  std::vector<Index> outputs(ci.m);
473  for (size_t i = 0; i < (size_t)ci.m; i++)
474  outputs[i] = args.output(i) + ci.m * ci.nrep;
475  Writer w;
476  size_t np = ci.which_periodic.size();
477  size_t sp = ci.period_data.size();
478  w << "for (int count = " << nrep << ", ";
479  if (n > 0) {
480  w << "i[" << n << "]=" << inputs << ", "
481  << "ip[" << n << "]=" << ci.increment_pattern << ", ";
482  }
483  if (np > 0) {
484  w << "wp[" << np << "]=" << ci.which_periodic << ", "
485  << "ps[" << np << "]=" << ci.period_sizes << ", "
486  << "po[" << np << "]=" << ci.period_offsets << ", "
487  << "pd[" << sp << "]=" << ci.period_data << ", ";
488  }
489  w << "o[" << m << "]=" << outputs << "; "
490  << "count > 0 ; ) {\n";
491 
492  w << " ";
493  w << "count--;\n";
494  if (np > 0) {
495  w << " ";
496  for (size_t k = 0; k < np; k++)
497  w << "ip[wp[" << k << "]] = pd[po[" << k << "] + count % ps[" << k
498  << "]]; ";
499  w << "\n";
500  }
501  if (n > 0) {
502  w << " ";
503  for (size_t k = 0; k < n; k++) w << "i[" << k << "] -= ip[" << k << "]; ";
504  w << "\n";
505  }
506  w << " ";
507  for (size_t k = 0; k < m; k++) w << "o[" << k << "] -= " << m << "; ";
508  w << "\n";
509 
510  w << " ";
511 
512  ReverseArgs<Writer> args_cpy = args;
513  args_cpy.set_indirect();
514  args_cpy.ptr.first = ci.n;
515  args_cpy.ptr.second = ci.m;
516  for (size_t k = opstack.size(); k > 0;) {
517  k--;
518  opstack[k]->reverse_decr(args_cpy);
519  }
520  w << "\n";
521 
522  w << " ";
523  w << "}";
524 }
525 
526 void StackOp::dependencies(Args<> args, Dependencies &dep) const {
527  std::vector<Index> lower;
528  std::vector<Index> upper;
529  ci.dependencies_intervals(args, lower, upper);
530  for (size_t i = 0; i < lower.size(); i++) {
531  dep.add_interval(lower[i], upper[i]);
532  }
533 }
534 
535 const char *StackOp::op_name() { return "StackOp"; }
536 
539  cfg.strong_inv = false;
540  cfg.strong_const = false;
541  cfg.strong_output = false;
542  cfg.reduce = false;
543  cfg.deterministic = false;
544  std::vector<hash_t> h = glob.hash_sweep(cfg);
545  std::vector<Index> remap = radix::first_occurance<Index>(h);
546 
547  TMBAD_ASSERT(all_allow_remap(glob));
548 
549  Args<> args(glob.inputs);
550  for (size_t i = 0; i < glob.opstack.size(); i++) {
551  Dependencies dep;
552  glob.opstack[i]->dependencies(args, dep);
553 
554  Index var = args.ptr.second;
555  toposort_remap<Index> fb(remap, var);
556  dep.apply(fb);
557  glob.opstack[i]->increment(args.ptr);
558  }
559 
560  std::vector<Index> ord = radix::order<Index>(remap);
561  std::vector<Index> v2o = glob.var2op();
562  glob.subgraph_seq = subset(v2o, ord);
563 
564  glob = glob.extract_sub();
565 }
566 
568  std::vector<Index> remap(glob.values.size(), Index(-1));
569  Args<> args(glob.inputs);
570  for (size_t i = 0; i < glob.opstack.size(); i++) {
571  Dependencies dep;
572  glob.opstack[i]->dependencies(args, dep);
573  sort_unique_inplace(dep);
574  Index var = args.ptr.second;
575  temporaries_remap<Index> fb(remap, var);
576  dep.apply(fb);
577  glob.opstack[i]->increment(args.ptr);
578  }
579 
580  for (size_t i = remap.size(); i > 0;) {
581  i--;
582  if (remap[i] == Index(-1))
583  remap[i] = i;
584  else
585  remap[i] = remap[remap[i]];
586  }
587 
588  std::vector<Index> ord = radix::order<Index>(remap);
589  std::vector<Index> v2o = glob.var2op();
590  glob.subgraph_seq = subset(v2o, ord);
591 
592  glob = glob.extract_sub();
593 }
594 
596  std::vector<bool> visited(glob.opstack.size(), false);
597  std::vector<Index> v2o = glob.var2op();
598  std::vector<Index> stack;
599  std::vector<Index> result;
600  Args<> args(glob.inputs);
601  glob.subgraph_cache_ptr();
602  for (size_t k = 0; k < glob.dep_index.size(); k++) {
603  Index dep_var = glob.dep_index[k];
604  Index i = v2o[dep_var];
605 
606  stack.push_back(i);
607  visited[i] = true;
608  while (stack.size() > 0) {
609  Index i = stack.back();
610  args.ptr = glob.subgraph_ptr[i];
611  Dependencies dep;
612  glob.opstack[i]->dependencies(args, dep);
613  dfs_add_to_stack<Index> add_to_stack(stack, visited, v2o);
614  size_t before = stack.size();
615  dep.apply(add_to_stack);
616  size_t after = stack.size();
617  if (before == after) {
618  result.push_back(i);
619  stack.pop_back();
620  }
621  }
622  }
623 
624  glob.subgraph_seq = result;
625  glob = glob.extract_sub();
626 
627  glob.shrink_to_fit();
628 }
629 
630 void compress(global &glob, size_t max_period_size) {
631  size_t min_period_rep = TMBAD_MIN_PERIOD_REP;
632  periodic<global::OperatorPure *> p(glob.opstack, max_period_size,
633  min_period_rep);
634  std::vector<period> periods = p.find_all();
635 
636  std::vector<period> periods_expand;
637  for (size_t i = 0; i < periods.size(); i++) {
638  std::vector<period> tmp = split_period(&glob, periods[i], max_period_size);
639 
640  if (tmp.size() > 10) {
641  tmp.resize(0);
642  tmp.push_back(periods[i]);
643  }
644 
645  for (size_t j = 0; j < tmp.size(); j++) {
646  if (tmp[j].rep > 1) periods_expand.push_back(tmp[j]);
647  }
648  }
649 
650  std::swap(periods, periods_expand);
651  OperatorPure *null_op = get_glob()->getOperator<global::NullOp>();
652  IndexPair ptr(0, 0);
653  Index k = 0;
654  for (size_t i = 0; i < periods.size(); i++) {
655  period p = periods[i];
656  TMBAD_ASSERT(p.rep >= 1);
657  while (k < p.begin) {
658  glob.opstack[k]->increment(ptr);
659  k++;
660  }
661 
662  OperatorPure *pOp =
663  get_glob()->getOperator<StackOp>(&glob, p, ptr, max_period_size);
664  Index ninp = 0;
665  for (size_t j = 0; j < p.size * p.rep; j++) {
666  ninp += glob.opstack[p.begin + j]->input_size();
667  glob.opstack[p.begin + j]->deallocate();
668  glob.opstack[p.begin + j] = null_op;
669  }
670  glob.opstack[p.begin] = pOp;
671  ninp -= pOp->input_size();
672  glob.opstack[p.begin + 1] =
673  get_glob()->getOperator<global::NullOp2>(ninp, 0);
674  }
675 
676  std::vector<bool> marks(glob.values.size(), true);
677  glob.extract_sub_inplace(marks);
678  glob.shrink_to_fit();
679 }
680 } // namespace TMBad
681 // Autogenerated - do not edit by hand !
682 #include "global.hpp"
683 namespace TMBad {
684 
685 global *global_ptr_data[TMBAD_MAX_NUM_THREADS] = {NULL};
686 global **global_ptr = global_ptr_data;
687 std::ostream *Writer::cout = 0;
688 bool global::fuse = 0;
689 
690 global *get_glob() { return global_ptr[TMBAD_THREAD_NUM]; }
691 
692 Dependencies::Dependencies() {}
693 
694 void Dependencies::clear() {
695  this->resize(0);
696  I.resize(0);
697 }
698 
699 void Dependencies::add_interval(Index a, Index b) {
700  I.push_back(std::pair<Index, Index>(a, b));
701 }
702 
703 void Dependencies::add_segment(Index start, Index size) {
704  if (size > 0) add_interval(start, start + size - 1);
705 }
706 
707 void Dependencies::monotone_transform_inplace(const std::vector<Index> &x) {
708  for (size_t i = 0; i < this->size(); i++) (*this)[i] = x[(*this)[i]];
709  for (size_t i = 0; i < I.size(); i++) {
710  I[i].first = x[I[i].first];
711  I[i].second = x[I[i].second];
712  }
713 }
714 
715 bool Dependencies::any(const std::vector<bool> &x) const {
716  for (size_t i = 0; i < this->size(); i++)
717  if (x[(*this)[i]]) return true;
718  for (size_t i = 0; i < I.size(); i++) {
719  for (Index j = I[i].first; j <= I[i].second; j++) {
720  if (x[j]) return true;
721  }
722  }
723  return false;
724 }
725 
726 std::string tostr(const Index &x) {
727  std::ostringstream strs;
728  strs << x;
729  return strs.str();
730 }
731 
732 std::string tostr(const Scalar &x) {
733  std::ostringstream strs;
734  strs << x;
735  return strs.str();
736 }
737 
738 Writer::Writer(std::string str) : std::string(str) {}
739 
740 Writer::Writer(Scalar x) : std::string(tostr(x)) {}
741 
742 Writer::Writer() {}
743 
744 std::string Writer::p(std::string x) { return "(" + x + ")"; }
745 
746 Writer Writer::operator+(const Writer &other) {
747  return p(*this + " + " + other);
748 }
749 
750 Writer Writer::operator-(const Writer &other) {
751  return p(*this + " - " + other);
752 }
753 
754 Writer Writer::operator-() { return " - " + *this; }
755 
756 Writer Writer::operator*(const Writer &other) { return *this + " * " + other; }
757 
758 Writer Writer::operator/(const Writer &other) { return *this + " / " + other; }
759 
760 Writer Writer::operator*(const Scalar &other) {
761  return *this + "*" + tostr(other);
762 }
763 
764 Writer Writer::operator+(const Scalar &other) {
765  return p(*this + "+" + tostr(other));
766 }
767 
768 void Writer::operator=(const Writer &other) {
769  *cout << *this + " = " + other << ";";
770 }
771 
772 void Writer::operator+=(const Writer &other) {
773  *cout << *this + " += " + other << ";";
774 }
775 
776 void Writer::operator-=(const Writer &other) {
777  *cout << *this + " -= " + other << ";";
778 }
779 
780 void Writer::operator*=(const Writer &other) {
781  *cout << *this + " *= " + other << ";";
782 }
783 
784 void Writer::operator/=(const Writer &other) {
785  *cout << *this + " /= " + other << ";";
786 }
787 
788 Position::Position(Index node, Index first, Index second)
789  : node(node), ptr(first, second) {}
790 
791 Position::Position() : node(0), ptr(0, 0) {}
792 
793 bool Position::operator<(const Position &other) const {
794  return this->node < other.node;
795 }
796 
797 graph::graph() {}
798 
799 size_t graph::num_neighbors(Index node) { return p[node + 1] - p[node]; }
800 
801 Index *graph::neighbors(Index node) { return &(j[p[node]]); }
802 
803 bool graph::empty() { return p.size() == 0; }
804 
805 size_t graph::num_nodes() { return (empty() ? 0 : p.size() - 1); }
806 
807 void graph::print() {
808  for (size_t node = 0; node < num_nodes(); node++) {
809  Rcout << node << ": ";
810  for (size_t i = 0; i < num_neighbors(node); i++) {
811  Rcout << " " << neighbors(node)[i];
812  }
813  Rcout << "\n";
814  }
815 }
816 
817 std::vector<Index> graph::rowcounts() {
818  std::vector<Index> ans(num_nodes());
819  for (size_t i = 0; i < ans.size(); i++) ans[i] = num_neighbors(i);
820  return ans;
821 }
822 
823 std::vector<Index> graph::colcounts() {
824  std::vector<Index> ans(num_nodes());
825  for (size_t i = 0; i < j.size(); i++) ans[j[i]]++;
826  return ans;
827 }
828 
829 void graph::bfs(const std::vector<Index> &start, std::vector<bool> &visited,
830  std::vector<Index> &result) {
831  for (size_t i = 0; i < start.size(); i++) {
832  Index node = start[i];
833  for (size_t j_ = 0; j_ < num_neighbors(node); j_++) {
834  Index k = neighbors(node)[j_];
835  if (!visited[k]) {
836  result.push_back(k);
837  visited[k] = true;
838  }
839  }
840  }
841 }
842 
843 void graph::search(std::vector<Index> &start, bool sort_input,
844  bool sort_output) {
845  if (mark.size() == 0) mark.resize(num_nodes(), false);
846 
847  search(start, mark, sort_input, sort_output);
848 
849  for (size_t i = 0; i < start.size(); i++) mark[start[i]] = false;
850 }
851 
852 void graph::search(std::vector<Index> &start, std::vector<bool> &visited,
853  bool sort_input, bool sort_output) {
854  if (sort_input) sort_unique_inplace(start);
855 
856  for (size_t i = 0; i < start.size(); i++) visited[start[i]] = true;
857 
858  bfs(start, visited, start);
859 
860  if (sort_output) sort_inplace(start);
861 }
862 
863 std::vector<Index> graph::boundary(const std::vector<Index> &subgraph) {
864  if (mark.size() == 0) mark.resize(num_nodes(), false);
865 
866  std::vector<Index> boundary;
867 
868  for (size_t i = 0; i < subgraph.size(); i++) mark[subgraph[i]] = true;
869 
870  bfs(subgraph, mark, boundary);
871 
872  for (size_t i = 0; i < subgraph.size(); i++) mark[subgraph[i]] = false;
873  for (size_t i = 0; i < boundary.size(); i++) mark[boundary[i]] = false;
874 
875  return boundary;
876 }
877 
878 graph::graph(size_t num_nodes, const std::vector<IndexPair> &edges) {
879  std::vector<IndexPair>::const_iterator it;
880  std::vector<Index> row_counts(num_nodes, 0);
881  for (it = edges.begin(); it != edges.end(); it++) {
882  row_counts[it->first]++;
883  }
884 
885  p.resize(num_nodes + 1);
886  p[0] = 0;
887  for (size_t i = 0; i < num_nodes; i++) {
888  p[i + 1] = p[i] + row_counts[i];
889  }
890 
891  std::vector<Index> k(p);
892  j.resize(edges.size());
893  for (it = edges.begin(); it != edges.end(); it++) {
894  j[k[it->first]++] = it->second;
895  }
896 }
897 
898 op_info::op_info() : code(0) {
899  static_assert(sizeof(IntRep) * 8 >= op_flag_count,
900  "'IntRep' not wide enough!");
901 }
902 
903 op_info::op_info(op_flag f) : code(1 << f) {}
904 
905 bool op_info::test(op_flag f) const { return code & 1 << f; }
906 
907 op_info &op_info::operator|=(const op_info &other) {
908  code |= other.code;
909  return *this;
910 }
911 
912 op_info &op_info::operator&=(const op_info &other) {
913  code &= other.code;
914  return *this;
915 }
916 
917 global::operation_stack::operation_stack() {}
918 
919 global::operation_stack::operation_stack(const operation_stack &other) {
920  (*this).copy_from(other);
921 }
922 
923 void global::operation_stack::push_back(OperatorPure *x) {
924  Base::push_back(x);
925 
926  any |= x->info();
927 }
928 
929 operation_stack &global::operation_stack::operator=(
930  const operation_stack &other) {
931  if (this != &other) {
932  (*this).clear();
933  (*this).copy_from(other);
934  }
935  return *this;
936 }
937 
938 global::operation_stack::~operation_stack() { (*this).clear(); }
939 
940 void global::operation_stack::clear() {
941  if (any.test(op_info::dynamic)) {
942  for (size_t i = 0; i < (*this).size(); i++) (*this)[i]->deallocate();
943  }
944  (*this).resize(0);
945 }
946 
947 void global::operation_stack::copy_from(const operation_stack &other) {
948  if (other.any.test(op_info::dynamic)) {
949  for (size_t i = 0; i < other.size(); i++) Base::push_back(other[i]->copy());
950  } else {
951  Base::operator=(other);
952  }
953  this->any = other.any;
954 }
955 
956 global::global()
957  : forward_compiled(NULL),
958  reverse_compiled(NULL),
959  parent_glob(NULL),
960  in_use(false) {}
961 
962 void global::clear() {
963  values.resize(0);
964  derivs.resize(0);
965  inputs.resize(0);
966  inv_index.resize(0);
967  dep_index.resize(0);
968  subgraph_ptr.resize(0);
969  subgraph_seq.resize(0);
970  opstack.clear();
971 }
972 
973 void global::shrink_to_fit(double tol) {
974  std::vector<Scalar>().swap(derivs);
975  std::vector<IndexPair>().swap(subgraph_ptr);
976  if (values.size() < tol * values.capacity())
977  std::vector<Scalar>(values).swap(values);
978  if (inputs.size() < tol * inputs.capacity())
979  std::vector<Index>(inputs).swap(inputs);
980  if (opstack.size() < tol * opstack.capacity())
981  std::vector<OperatorPure *>(opstack).swap(opstack);
982 }
983 
984 void global::clear_deriv(Position start) {
985  derivs.resize(values.size());
986  std::fill(derivs.begin() + start.ptr.second, derivs.end(), 0);
987 }
988 
989 Scalar &global::value_inv(Index i) { return values[inv_index[i]]; }
990 
991 Scalar &global::deriv_inv(Index i) { return derivs[inv_index[i]]; }
992 
993 Scalar &global::value_dep(Index i) { return values[dep_index[i]]; }
994 
995 Scalar &global::deriv_dep(Index i) { return derivs[dep_index[i]]; }
996 
997 Position global::begin() { return Position(0, 0, 0); }
998 
999 Position global::end() {
1000  return Position(opstack.size(), inputs.size(), values.size());
1001 }
1002 
1003 CONSTEXPR bool global::no_filter::operator[](size_t i) const { return true; }
1004 
1005 void global::forward(Position start) {
1006  if (forward_compiled != NULL) {
1007  forward_compiled(values.data());
1008  return;
1009  }
1010  ForwardArgs<Scalar> args(inputs, values, this);
1011  args.ptr = start.ptr;
1012  forward_loop(args, start.node);
1013 }
1014 
1015 void global::reverse(Position start) {
1016  if (reverse_compiled != NULL) {
1017  reverse_compiled(values.data(), derivs.data());
1018  return;
1019  }
1020  ReverseArgs<Scalar> args(inputs, values, derivs, this);
1021  reverse_loop(args, start.node);
1022 }
1023 
1024 void global::forward_sub() {
1025  ForwardArgs<Scalar> args(inputs, values, this);
1026  forward_loop_subgraph(args);
1027 }
1028 
1029 void global::reverse_sub() {
1030  ReverseArgs<Scalar> args(inputs, values, derivs, this);
1031  reverse_loop_subgraph(args);
1032 }
1033 
1034 void global::forward(std::vector<bool> &marks) {
1035  intervals<Index> marked_intervals;
1036  ForwardArgs<bool> args(inputs, marks, marked_intervals);
1037  forward_loop(args);
1038 }
1039 
1040 void global::reverse(std::vector<bool> &marks) {
1041  intervals<Index> marked_intervals;
1042  ReverseArgs<bool> args(inputs, marks, marked_intervals);
1043  reverse_loop(args);
1044 }
1045 
1046 void global::forward_sub(std::vector<bool> &marks,
1047  const std::vector<bool> &node_filter) {
1048  intervals<Index> marked_intervals;
1049  ForwardArgs<bool> args(inputs, marks, marked_intervals);
1050  if (node_filter.size() == 0)
1051  forward_loop_subgraph(args);
1052  else
1053  forward_loop(args, 0, node_filter);
1054 }
1055 
1056 void global::reverse_sub(std::vector<bool> &marks,
1057  const std::vector<bool> &node_filter) {
1058  intervals<Index> marked_intervals;
1059  ReverseArgs<bool> args(inputs, marks, marked_intervals);
1060  if (node_filter.size() == 0)
1061  reverse_loop_subgraph(args);
1062  else
1063  reverse_loop(args, 0, node_filter);
1064 }
1065 
1066 void global::forward_dense(std::vector<bool> &marks) {
1067  intervals<Index> marked_intervals;
1068  ForwardArgs<bool> args(inputs, marks, marked_intervals);
1069  for (size_t i = 0; i < opstack.size(); i++) {
1070  opstack[i]->forward_incr_mark_dense(args);
1071  }
1072 }
1073 
1074 intervals<Index> global::updating_intervals() const {
1075  Dependencies dep;
1076  intervals<Index> marked_intervals;
1077  Args<> args(inputs);
1078  for (size_t i = 0; i < opstack.size(); i++) {
1079  if (opstack[i]->info().test(op_info::updating)) {
1080  dep.clear();
1081  opstack[i]->dependencies(args, dep);
1082 
1083  for (size_t i = 0; i < dep.I.size(); i++) {
1084  Index a = dep.I[i].first;
1085  Index b = dep.I[i].second;
1086  marked_intervals.insert(a, b);
1087  }
1088  }
1089  opstack[i]->increment(args.ptr);
1090  }
1091  return marked_intervals;
1092 }
1093 
1094 intervals<Index> global::updating_intervals_sub() const {
1095  Dependencies dep;
1096  intervals<Index> marked_intervals;
1097  Args<> args(inputs);
1098  subgraph_cache_ptr();
1099  for (size_t j = 0; j < subgraph_seq.size(); j++) {
1100  Index i = subgraph_seq[j];
1101  args.ptr = subgraph_ptr[i];
1102  if (opstack[i]->info().test(op_info::updating)) {
1103  dep.clear();
1104  opstack[i]->dependencies(args, dep);
1105 
1106  for (size_t i = 0; i < dep.I.size(); i++) {
1107  Index a = dep.I[i].first;
1108  Index b = dep.I[i].second;
1109  marked_intervals.insert(a, b);
1110  }
1111  }
1112  }
1113  return marked_intervals;
1114 }
1115 
1116 Replay &global::replay::value_inv(Index i) { return values[orig.inv_index[i]]; }
1117 
1118 Replay &global::replay::deriv_inv(Index i) { return derivs[orig.inv_index[i]]; }
1119 
1120 Replay &global::replay::value_dep(Index i) { return values[orig.dep_index[i]]; }
1121 
1122 Replay &global::replay::deriv_dep(Index i) { return derivs[orig.dep_index[i]]; }
1123 
1124 global::replay::replay(const global &orig, global &target)
1125  : orig(orig), target(target) {
1126  TMBAD_ASSERT(&orig != &target);
1127 }
1128 
1129 void global::replay::start() {
1130  parent_glob = get_glob();
1131  if (&target != parent_glob) target.ad_start();
1132  values = std::vector<Replay>(orig.values.begin(), orig.values.end());
1133 }
1134 
1135 void global::replay::stop() {
1136  if (&target != parent_glob) target.ad_stop();
1137  TMBAD_ASSERT(parent_glob == get_glob());
1138 }
1139 
1140 void global::replay::add_updatable_derivs(const intervals<Index> &I) {
1141  struct {
1142  Replay *p;
1143  void operator()(Index a, Index b) {
1144  Index n = b - a + 1;
1145  global::ZeroOp Z(n);
1146  Z(p + a, n);
1147  }
1148  } F = {derivs.data()};
1149  I.apply(F);
1150 }
1151 
1152 void global::replay::clear_deriv() {
1153  derivs.resize(values.size());
1154  std::fill(derivs.begin(), derivs.end(), Replay(0));
1155 
1156  if (orig.opstack.any.test(op_info::updating)) {
1157  intervals<Index> I = orig.updating_intervals();
1158  add_updatable_derivs(I);
1159  }
1160 }
1161 
1162 void global::replay::forward(bool inv_tags, bool dep_tags, Position start,
1163  const std::vector<bool> &node_filter) {
1164  TMBAD_ASSERT(&target == get_glob());
1165  if (inv_tags) {
1166  for (size_t i = 0; i < orig.inv_index.size(); i++)
1167  value_inv(i).Independent();
1168  }
1169  ForwardArgs<Replay> args(orig.inputs, values);
1170  if (node_filter.size() > 0) {
1171  TMBAD_ASSERT(node_filter.size() == orig.opstack.size());
1172  orig.forward_loop(args, start.node, node_filter);
1173  } else {
1174  orig.forward_loop(args, start.node);
1175  }
1176  if (dep_tags) {
1177  for (size_t i = 0; i < orig.dep_index.size(); i++) value_dep(i).Dependent();
1178  }
1179 }
1180 
1181 void global::replay::reverse(bool dep_tags, bool inv_tags, Position start,
1182  const std::vector<bool> &node_filter) {
1183  TMBAD_ASSERT(&target == get_glob());
1184  if (inv_tags) {
1185  for (size_t i = 0; i < orig.dep_index.size(); i++)
1186  deriv_dep(i).Independent();
1187  }
1188  ReverseArgs<Replay> args(orig.inputs, values, derivs);
1189  if (node_filter.size() > 0) {
1190  TMBAD_ASSERT(node_filter.size() == orig.opstack.size());
1191  orig.reverse_loop(args, start.node, node_filter);
1192  } else {
1193  orig.reverse_loop(args, start.node);
1194  }
1195 
1196  std::fill(derivs.begin(), derivs.begin() + start.ptr.second, Replay(0));
1197  if (dep_tags) {
1198  for (size_t i = 0; i < orig.inv_index.size(); i++) deriv_inv(i).Dependent();
1199  }
1200 }
1201 
1202 void global::replay::forward_sub() {
1203  ForwardArgs<Replay> args(orig.inputs, values);
1204  orig.forward_loop_subgraph(args);
1205 }
1206 
1207 void global::replay::reverse_sub() {
1208  ReverseArgs<Replay> args(orig.inputs, values, derivs);
1209  orig.reverse_loop_subgraph(args);
1210 }
1211 
1212 void global::replay::clear_deriv_sub() {
1213  orig.clear_array_subgraph(derivs);
1214 
1215  if (orig.opstack.any.test(op_info::updating)) {
1216  intervals<Index> I = orig.updating_intervals_sub();
1217  add_updatable_derivs(I);
1218  }
1219 }
1220 
1221 void global::forward_replay(bool inv_tags, bool dep_tags) {
1222  global new_glob;
1223  global::replay replay(*this, new_glob);
1224  replay.start();
1225  replay.forward(inv_tags, dep_tags);
1226  replay.stop();
1227  *this = new_glob;
1228 }
1229 
1230 void global::subgraph_cache_ptr() const {
1231  if (subgraph_ptr.size() == opstack.size()) return;
1232  TMBAD_ASSERT(subgraph_ptr.size() < opstack.size());
1233  if (subgraph_ptr.size() == 0) subgraph_ptr.push_back(IndexPair(0, 0));
1234  for (size_t i = subgraph_ptr.size(); i < opstack.size(); i++) {
1235  IndexPair ptr = subgraph_ptr[i - 1];
1236  opstack[i - 1]->increment(ptr);
1237  subgraph_ptr.push_back(ptr);
1238  }
1239 }
1240 
1241 void global::set_subgraph(const std::vector<bool> &marks, bool append) {
1242  std::vector<Index> v2o = var2op();
1243  if (!append) subgraph_seq.resize(0);
1244  Index previous = (Index)-1;
1245  for (size_t i = 0; i < marks.size(); i++) {
1246  if (marks[i] && (v2o[i] != previous)) {
1247  subgraph_seq.push_back(v2o[i]);
1248  previous = v2o[i];
1249  }
1250  }
1251 }
1252 
1253 void global::mark_subgraph(std::vector<bool> &marks) {
1254  TMBAD_ASSERT(marks.size() == values.size());
1255  clear_array_subgraph(marks, true);
1256 }
1257 
1258 void global::unmark_subgraph(std::vector<bool> &marks) {
1259  TMBAD_ASSERT(marks.size() == values.size());
1260  clear_array_subgraph(marks, false);
1261 }
1262 
1263 void global::subgraph_trivial() {
1264  subgraph_cache_ptr();
1265  subgraph_seq.resize(0);
1266  for (size_t i = 0; i < opstack.size(); i++) subgraph_seq.push_back(i);
1267 }
1268 
1269 void global::clear_deriv_sub() { clear_array_subgraph(derivs); }
1270 
1271 global global::extract_sub(std::vector<Index> &var_remap, global new_glob) {
1272  subgraph_cache_ptr();
1273  TMBAD_ASSERT(var_remap.size() == 0 || var_remap.size() == values.size());
1274  var_remap.resize(values.size(), 0);
1275  std::vector<bool> independent_variable = inv_marks();
1276  std::vector<bool> dependent_variable = dep_marks();
1277  ForwardArgs<Scalar> args(inputs, values, this);
1278  for (size_t j = 0; j < subgraph_seq.size(); j++) {
1279  Index i = subgraph_seq[j];
1280  args.ptr = subgraph_ptr[i];
1281 
1282  size_t nout = opstack[i]->output_size();
1283  for (size_t k = 0; k < nout; k++) {
1284  Index new_index = new_glob.values.size();
1285  Index old_index = args.output(k);
1286  var_remap[old_index] = new_index;
1287  new_glob.values.push_back(args.y(k));
1288  if (independent_variable[old_index]) {
1289  independent_variable[old_index] = false;
1290  }
1291  if (dependent_variable[old_index]) {
1292  dependent_variable[old_index] = false;
1293  }
1294  }
1295 
1296  size_t nin = opstack[i]->input_size();
1297  for (size_t k = 0; k < nin; k++) {
1298  new_glob.inputs.push_back(var_remap[args.input(k)]);
1299  }
1300 
1301  new_glob.opstack.push_back(opstack[i]->copy());
1302  }
1303 
1304  independent_variable.flip();
1305  dependent_variable.flip();
1306 
1307  for (size_t i = 0; i < inv_index.size(); i++) {
1308  Index old_var = inv_index[i];
1309  if (independent_variable[old_var])
1310  new_glob.inv_index.push_back(var_remap[old_var]);
1311  }
1312  for (size_t i = 0; i < dep_index.size(); i++) {
1313  Index old_var = dep_index[i];
1314  if (dependent_variable[old_var])
1315  new_glob.dep_index.push_back(var_remap[old_var]);
1316  }
1317  return new_glob;
1318 }
1319 
1320 void global::extract_sub_inplace(std::vector<bool> marks) {
1321  TMBAD_ASSERT(marks.size() == values.size());
1322  std::vector<Index> var_remap(values.size(), 0);
1323  std::vector<bool> independent_variable = inv_marks();
1324  std::vector<bool> dependent_variable = dep_marks();
1325  intervals<Index> marked_intervals;
1326  ForwardArgs<bool> args(inputs, marks, marked_intervals);
1327  size_t s = 0, s_input = 0;
1328  std::vector<bool> opstack_deallocate(opstack.size(), false);
1329 
1330  for (size_t i = 0; i < opstack.size(); i++) {
1331  op_info info = opstack[i]->info();
1332 
1333  size_t nout = opstack[i]->output_size();
1334  bool any_marked_output = info.test(op_info::elimination_protected);
1335  for (size_t j = 0; j < nout; j++) {
1336  any_marked_output |= args.y(j);
1337  }
1338  if (info.test(op_info::updating) && nout == 0) {
1339  Dependencies dep;
1340  opstack[i]->dependencies_updating(args, dep);
1341  any_marked_output |= dep.any(args.values);
1342  }
1343 
1344  if (any_marked_output) {
1345  for (size_t k = 0; k < nout; k++) {
1346  Index new_index = s;
1347  Index old_index = args.output(k);
1348  var_remap[old_index] = new_index;
1349  values[new_index] = values[old_index];
1350  if (independent_variable[old_index]) {
1351  independent_variable[old_index] = false;
1352  }
1353  if (dependent_variable[old_index]) {
1354  dependent_variable[old_index] = false;
1355  }
1356  s++;
1357  }
1358 
1359  size_t nin = opstack[i]->input_size();
1360  for (size_t k = 0; k < nin; k++) {
1361  inputs[s_input] = var_remap[args.input(k)];
1362  s_input++;
1363  }
1364  }
1365  opstack[i]->increment(args.ptr);
1366  if (!any_marked_output) {
1367  opstack_deallocate[i] = true;
1368  }
1369  }
1370 
1371  independent_variable.flip();
1372  dependent_variable.flip();
1373  std::vector<Index> new_inv_index;
1374  for (size_t i = 0; i < inv_index.size(); i++) {
1375  Index old_var = inv_index[i];
1376  if (independent_variable[old_var])
1377  new_inv_index.push_back(var_remap[old_var]);
1378  }
1379  inv_index = new_inv_index;
1380  std::vector<Index> new_dep_index;
1381  for (size_t i = 0; i < dep_index.size(); i++) {
1382  Index old_var = dep_index[i];
1383  if (dependent_variable[old_var])
1384  new_dep_index.push_back(var_remap[old_var]);
1385  }
1386  dep_index = new_dep_index;
1387 
1388  inputs.resize(s_input);
1389  values.resize(s);
1390  size_t k = 0;
1391  for (size_t i = 0; i < opstack.size(); i++) {
1392  if (opstack_deallocate[i]) {
1393  opstack[i]->deallocate();
1394  } else {
1395  opstack[k] = opstack[i];
1396  k++;
1397  }
1398  }
1399  opstack.resize(k);
1400 
1401  if (opstack.any.test(op_info::dynamic)) this->forward();
1402 }
1403 
1404 global global::extract_sub() {
1405  std::vector<Index> var_remap;
1406  return extract_sub(var_remap);
1407 }
1408 
1409 std::vector<Index> global::var2op() {
1410  std::vector<Index> var2op(values.size());
1411  Args<> args(inputs);
1412  size_t j = 0;
1413  for (size_t i = 0; i < opstack.size(); i++) {
1414  opstack[i]->increment(args.ptr);
1415  for (; j < (size_t)args.ptr.second; j++) {
1416  var2op[j] = i;
1417  }
1418  }
1419  return var2op;
1420 }
1421 
1422 std::vector<bool> global::var2op(const std::vector<bool> &values) {
1423  std::vector<bool> ans(opstack.size(), false);
1424  Args<> args(inputs);
1425  size_t j = 0;
1426  for (size_t i = 0; i < opstack.size(); i++) {
1427  opstack[i]->increment(args.ptr);
1428  for (; j < (size_t)args.ptr.second; j++) {
1429  ans[i] = ans[i] || values[j];
1430  }
1431  }
1432  return ans;
1433 }
1434 
1435 std::vector<Index> global::op2var(const std::vector<Index> &seq) {
1436  std::vector<bool> seq_mark = mark_space(opstack.size(), seq);
1437  std::vector<Index> ans;
1438  Args<> args(inputs);
1439  size_t j = 0;
1440  for (size_t i = 0; i < opstack.size(); i++) {
1441  opstack[i]->increment(args.ptr);
1442  for (; j < (size_t)args.ptr.second; j++) {
1443  if (seq_mark[i]) ans.push_back(j);
1444  }
1445  }
1446  return ans;
1447 }
1448 
1449 std::vector<bool> global::op2var(const std::vector<bool> &seq_mark) {
1450  std::vector<bool> ans(values.size());
1451  Args<> args(inputs);
1452  size_t j = 0;
1453  for (size_t i = 0; i < opstack.size(); i++) {
1454  opstack[i]->increment(args.ptr);
1455  for (; j < (size_t)args.ptr.second; j++) {
1456  if (seq_mark[i]) ans[j] = true;
1457  }
1458  }
1459  return ans;
1460 }
1461 
1462 std::vector<Index> global::op2idx(const std::vector<Index> &var_subset,
1463  Index NA) {
1464  std::vector<Index> v2o = var2op();
1465  std::vector<Index> op2idx(opstack.size(), NA);
1466  for (size_t i = var_subset.size(); i > 0;) {
1467  i--;
1468  op2idx[v2o[var_subset[i]]] = i;
1469  }
1470  return op2idx;
1471 }
1472 
1473 std::vector<bool> global::mark_space(size_t n, const std::vector<Index> ind) {
1474  std::vector<bool> mark(n, false);
1475  for (size_t i = 0; i < ind.size(); i++) {
1476  mark[ind[i]] = true;
1477  }
1478  return mark;
1479 }
1480 
1481 std::vector<bool> global::inv_marks() {
1482  return mark_space(values.size(), inv_index);
1483 }
1484 
1485 std::vector<bool> global::dep_marks() {
1486  return mark_space(values.size(), dep_index);
1487 }
1488 
1489 std::vector<bool> global::subgraph_marks() {
1490  return mark_space(opstack.size(), subgraph_seq);
1491 }
1492 
1493 global::append_edges::append_edges(size_t &i, size_t num_nodes,
1494  const std::vector<bool> &keep_var,
1495  std::vector<Index> &var2op,
1496  std::vector<IndexPair> &edges)
1497  : i(i),
1498  keep_var(keep_var),
1499  var2op(var2op),
1500  edges(edges),
1501  op_marks(num_nodes, false),
1502  pos(0) {}
1503 
1504 void global::append_edges::operator()(Index dep_j) {
1505  if (keep_var[dep_j]) {
1506  size_t k = var2op[dep_j];
1507  if (i != k && !op_marks[k]) {
1508  IndexPair edge;
1509 
1510  edge.first = k;
1511  edge.second = i;
1512  edges.push_back(edge);
1513  op_marks[k] = true;
1514  }
1515  }
1516 }
1517 
1518 void global::append_edges::start_iteration() { pos = edges.size(); }
1519 
1520 void global::append_edges::end_iteration() {
1521  size_t n = edges.size() - pos;
1522  for (size_t j = 0; j < n; j++) op_marks[edges[pos + j].first] = false;
1523 }
1524 
1525 graph global::build_graph(bool transpose, const std::vector<bool> &keep_var) {
1526  TMBAD_ASSERT(keep_var.size() == values.size());
1527 
1528  std::vector<Index> var2op = this->var2op();
1529 
1530  bool any_updating = false;
1531 
1532  Args<> args(inputs);
1533  std::vector<IndexPair> edges;
1534  Dependencies dep;
1535  size_t i = 0;
1536  append_edges F(i, opstack.size(), keep_var, var2op, edges);
1537  for (; i < opstack.size(); i++) {
1538  any_updating |= opstack[i]->info().test(op_info::updating);
1539  dep.clear();
1540  opstack[i]->dependencies(args, dep);
1541  F.start_iteration();
1542  dep.apply(F);
1543  F.end_iteration();
1544  opstack[i]->increment(args.ptr);
1545  }
1546  if (any_updating) {
1547  size_t begin = edges.size();
1548  i = 0;
1549  args = Args<>(inputs);
1550  for (; i < opstack.size(); i++) {
1551  dep.clear();
1552  opstack[i]->dependencies_updating(args, dep);
1553  F.start_iteration();
1554  dep.apply(F);
1555  F.end_iteration();
1556  opstack[i]->increment(args.ptr);
1557  }
1558  for (size_t j = begin; j < edges.size(); j++)
1559  std::swap(edges[j].first, edges[j].second);
1560  }
1561 
1562  if (transpose) {
1563  for (size_t j = 0; j < edges.size(); j++)
1564  std::swap(edges[j].first, edges[j].second);
1565  }
1566 
1567  graph G(opstack.size(), edges);
1568 
1569  for (size_t i = 0; i < inv_index.size(); i++)
1570  G.inv2op.push_back(var2op[inv_index[i]]);
1571  for (size_t i = 0; i < dep_index.size(); i++)
1572  G.dep2op.push_back(var2op[dep_index[i]]);
1573  return G;
1574 }
1575 
1576 graph global::forward_graph(std::vector<bool> keep_var) {
1577  if (keep_var.size() == 0) {
1578  keep_var.resize(values.size(), true);
1579  }
1580  TMBAD_ASSERT(values.size() == keep_var.size());
1581  return build_graph(false, keep_var);
1582 }
1583 
1584 graph global::reverse_graph(std::vector<bool> keep_var) {
1585  if (keep_var.size() == 0) {
1586  keep_var.resize(values.size(), true);
1587  }
1588  TMBAD_ASSERT(values.size() == keep_var.size());
1589  return build_graph(true, keep_var);
1590 }
1591 
1592 bool global::identical(const global &other) const {
1593  if (inv_index != other.inv_index) return false;
1594  ;
1595  if (dep_index != other.dep_index) return false;
1596  ;
1597  if (opstack.size() != other.opstack.size()) return false;
1598  ;
1599  for (size_t i = 0; i < opstack.size(); i++) {
1600  if (opstack[i]->identifier() != other.opstack[i]->identifier())
1601  return false;
1602  ;
1603  }
1604  if (inputs != other.inputs) return false;
1605  ;
1606  if (values.size() != other.values.size()) return false;
1607  ;
1608  OperatorPure *constant = getOperator<ConstOp>();
1609  IndexPair ptr(0, 0);
1610  for (size_t i = 0; i < opstack.size(); i++) {
1611  if (opstack[i] == constant) {
1612  if (values[ptr.second] != other.values[ptr.second]) return false;
1613  ;
1614  }
1615  opstack[i]->increment(ptr);
1616  }
1617 
1618  return true;
1619 }
1620 
1621 hash_t global::hash() const {
1622  hash_t h = 37;
1623 
1624  hash(h, inv_index.size());
1625  ;
1626  for (size_t i = 0; i < inv_index.size(); i++) hash(h, inv_index[i]);
1627  ;
1628  ;
1629  hash(h, dep_index.size());
1630  ;
1631  for (size_t i = 0; i < dep_index.size(); i++) hash(h, dep_index[i]);
1632  ;
1633  ;
1634  hash(h, opstack.size());
1635  ;
1636  for (size_t i = 0; i < opstack.size(); i++) hash(h, opstack[i]);
1637  ;
1638  ;
1639  hash(h, inputs.size());
1640  ;
1641  for (size_t i = 0; i < inputs.size(); i++) hash(h, inputs[i]);
1642  ;
1643  ;
1644  hash(h, values.size());
1645  ;
1646  OperatorPure *constant = getOperator<ConstOp>();
1647  IndexPair ptr(0, 0);
1648  for (size_t i = 0; i < opstack.size(); i++) {
1649  if (opstack[i] == constant) {
1650  hash(h, values[ptr.second]);
1651  ;
1652  }
1653  opstack[i]->increment(ptr);
1654  }
1655 
1656  return h;
1657 }
1658 
1659 std::vector<hash_t> global::hash_sweep(hash_config cfg) const {
1660  std::vector<Index> opstack_id;
1661  if (cfg.deterministic) {
1662  std::vector<size_t> tmp(opstack.size());
1663  for (size_t i = 0; i < tmp.size(); i++)
1664  tmp[i] = (size_t)opstack[i]->identifier();
1665  opstack_id = radix::first_occurance<Index>(tmp);
1666  hash_t spread = (hash_t(1) << (sizeof(hash_t) * 4)) - 1;
1667  for (size_t i = 0; i < opstack_id.size(); i++)
1668  opstack_id[i] = (opstack_id[i] + 1) * spread;
1669  }
1670 
1671  std::vector<hash_t> hash_vec(values.size(), 37);
1672  Dependencies dep;
1673  OperatorPure *inv = getOperator<InvOp>();
1674  OperatorPure *constant = getOperator<ConstOp>();
1675 
1676  if (cfg.strong_inv) {
1677  bool have_inv_seed = (cfg.inv_seed.size() > 0);
1678  if (have_inv_seed) {
1679  TMBAD_ASSERT(cfg.inv_seed.size() == inv_index.size());
1680  }
1681  for (size_t i = 0; i < inv_index.size(); i++) {
1682  hash_vec[inv_index[i]] += (have_inv_seed ? cfg.inv_seed[i] + 1 : (i + 1));
1683  }
1684  }
1685 
1686  Args<> args(inputs);
1687  IndexPair &ptr = args.ptr;
1688  for (size_t i = 0; i < opstack.size(); i++) {
1689  if (opstack[i] == inv) {
1690  opstack[i]->increment(ptr);
1691  continue;
1692  }
1693  dep.clear();
1694 
1695  opstack[i]->dependencies(args, dep);
1696 
1697  hash_t h = 37;
1698  for (size_t j = 0; j < dep.size(); j++) {
1699  if (j == 0)
1700  h = hash_vec[dep[0]];
1701  else
1702  hash(h, hash_vec[dep[j]]);
1703  ;
1704  }
1705 
1706  if (!cfg.deterministic) {
1707  hash(h, opstack[i]->identifier());
1708  ;
1709  } else {
1710  hash(h, opstack_id[i]);
1711  ;
1712  }
1713 
1714  if (opstack[i] == constant && cfg.strong_const) {
1715  hash(h, values[ptr.second]);
1716  ;
1717 
1718  hash(h, values[ptr.second] > 0);
1719  ;
1720  }
1721 
1722  size_t noutput = opstack[i]->output_size();
1723  for (size_t j = 0; j < noutput; j++) {
1724  hash_vec[ptr.second + j] = h + j * cfg.strong_output;
1725  }
1726 
1727  opstack[i]->increment(ptr);
1728  }
1729  if (!cfg.reduce) return hash_vec;
1730  std::vector<hash_t> ans(dep_index.size());
1731  for (size_t j = 0; j < dep_index.size(); j++) {
1732  ans[j] = hash_vec[dep_index[j]];
1733  }
1734  return ans;
1735 }
1736 
1737 std::vector<hash_t> global::hash_sweep(bool weak) const {
1738  hash_config cfg;
1739  cfg.strong_inv = !weak;
1740  cfg.strong_const = true;
1741  cfg.strong_output = true;
1742  cfg.reduce = weak;
1743  cfg.deterministic = false;
1744  return hash_sweep(cfg);
1745 }
1746 
1747 void global::eliminate() {
1748  this->shrink_to_fit();
1749 
1750  std::vector<bool> marks;
1751  marks.resize(values.size(), false);
1752 
1753  for (size_t i = 0; i < inv_index.size(); i++) marks[inv_index[i]] = true;
1754  for (size_t i = 0; i < dep_index.size(); i++) marks[dep_index[i]] = true;
1755 
1756  reverse(marks);
1757 
1758  if (false) {
1759  set_subgraph(marks);
1760 
1761  *this = extract_sub();
1762  }
1763  this->extract_sub_inplace(marks);
1764  this->shrink_to_fit();
1765 }
1766 
1767 global::print_config::print_config() : prefix(""), mark("*"), depth(0) {}
1768 
1769 void global::print(print_config cfg) {
1770  using std::endl;
1771  using std::left;
1772  using std::setw;
1773  IndexPair ptr(0, 0);
1774  std::vector<bool> sgm = subgraph_marks();
1775  bool have_subgraph = (subgraph_seq.size() > 0);
1776  int v = 0;
1777  print_config cfg2 = cfg;
1778  cfg2.depth--;
1779  cfg2.prefix = cfg.prefix + "##";
1780  Rcout << cfg.prefix;
1781  Rcout << setw(7) << "OpName:" << setw(7 + have_subgraph)
1782  << "Node:" << setw(13) << "Value:" << setw(13) << "Deriv:" << setw(13)
1783  << "Index:";
1784  Rcout << " "
1785  << "Inputs:";
1786  Rcout << endl;
1787  for (size_t i = 0; i < opstack.size(); i++) {
1788  Rcout << cfg.prefix;
1789  Rcout << setw(7) << opstack[i]->op_name();
1790  if (have_subgraph) {
1791  if (sgm[i])
1792  Rcout << cfg.mark;
1793  else
1794  Rcout << " ";
1795  }
1796  Rcout << setw(7) << i;
1797  int numvar = opstack[i]->output_size();
1798  for (int j = 0; j < numvar + (numvar == 0); j++) {
1799  if (j > 0) Rcout << cfg.prefix;
1800  Rcout << setw((7 + 7) * (j > 0) + 13);
1801  if (numvar > 0)
1802  Rcout << values[v];
1803  else
1804  Rcout << "";
1805  Rcout << setw(13);
1806  if (numvar > 0) {
1807  if (derivs.size() == values.size())
1808  Rcout << derivs[v];
1809  else
1810  Rcout << "NA";
1811  } else {
1812  Rcout << "";
1813  }
1814  Rcout << setw(13);
1815  if (numvar > 0) {
1816  Rcout << v;
1817  } else {
1818  Rcout << "";
1819  }
1820  if (j == 0) {
1821  IndexPair ptr_old = ptr;
1822  opstack[i]->increment(ptr);
1823  int ninput = ptr.first - ptr_old.first;
1824  for (int k = 0; k < ninput; k++) {
1825  if (k == 0) Rcout << " ";
1826  Rcout << " " << inputs[ptr_old.first + k];
1827  }
1828  }
1829  Rcout << endl;
1830  if (numvar > 0) {
1831  v++;
1832  }
1833  }
1834  if (cfg.depth > 0) opstack[i]->print(cfg2);
1835  }
1836 }
1837 
1838 void global::print() { this->print(print_config()); }
1839 
1840 global::DynamicInputOutputOperator::DynamicInputOutputOperator(Index ninput,
1841  Index noutput)
1842  : ninput_(ninput), noutput_(noutput) {}
1843 
1844 Index global::DynamicInputOutputOperator::input_size() const {
1845  return this->ninput_;
1846 }
1847 
1848 Index global::DynamicInputOutputOperator::output_size() const {
1849  return this->noutput_;
1850 }
1851 
1852 const char *global::InvOp::op_name() { return "InvOp"; }
1853 
1854 const char *global::DepOp::op_name() { return "DepOp"; }
1855 
1856 void global::ConstOp::forward(ForwardArgs<Replay> &args) {
1857  args.y(0).addToTape();
1858 }
1859 
1860 const char *global::ConstOp::op_name() { return "ConstOp"; }
1861 
1862 void global::ConstOp::forward(ForwardArgs<Writer> &args) {
1863  if (args.const_literals) {
1864  args.y(0) = args.y_const(0);
1865  }
1866 }
1867 
1868 global::DataOp::DataOp(Index n) { Base::noutput = n; }
1869 
1870 const char *global::DataOp::op_name() { return "DataOp"; }
1871 
1872 void global::DataOp::forward(ForwardArgs<Writer> &args) { TMBAD_ASSERT(false); }
1873 
1874 global::ZeroOp::ZeroOp(Index n) { Base::noutput = n; }
1875 
1876 const char *global::ZeroOp::op_name() { return "ZeroOp"; }
1877 
1878 void global::ZeroOp::forward(ForwardArgs<Writer> &args) { TMBAD_ASSERT(false); }
1879 
1880 void global::ZeroOp::operator()(Replay *x, Index n) {
1881  Complete<ZeroOp> Z(n);
1882  ad_segment y = Z(ad_segment());
1883  for (size_t i = 0; i < n; i++) x[i] = y[i];
1884 }
1885 
1886 global::NullOp::NullOp() {}
1887 
1888 const char *global::NullOp::op_name() { return "NullOp"; }
1889 
1890 global::NullOp2::NullOp2(Index ninput, Index noutput)
1891  : global::DynamicInputOutputOperator(ninput, noutput) {}
1892 
1893 const char *global::NullOp2::op_name() { return "NullOp2"; }
1894 
1895 global::RefOp::RefOp(global *glob, Index i) : glob(glob), i(i) {}
1896 
1897 void global::RefOp::forward(ForwardArgs<Scalar> &args) {
1898  args.y(0) = glob->values[i];
1899 }
1900 
1901 void global::RefOp::forward(ForwardArgs<Replay> &args) {
1902  if (get_glob() == this->glob) {
1903  ad_plain tmp;
1904  tmp.index = i;
1905  args.y(0) = tmp;
1906  } else {
1907  global::OperatorPure *pOp =
1908  get_glob()->getOperator<RefOp>(this->glob, this->i);
1909  args.y(0) =
1910  get_glob()->add_to_stack<RefOp>(pOp, std::vector<ad_plain>(0))[0];
1911  }
1912 }
1913 
1914 void global::RefOp::reverse(ReverseArgs<Replay> &args) {
1915  if (get_glob() == this->glob) {
1916  args.dx(0) += args.dy(0);
1917  }
1918 }
1919 
1920 const char *global::RefOp::op_name() { return "RefOp"; }
1921 
1922 OperatorPure *global::Fuse(OperatorPure *Op1, OperatorPure *Op2) {
1923  if (Op1 == Op2)
1924  return Op1->self_fuse();
1925  else
1926  return Op1->other_fuse(Op2);
1927 }
1928 
1929 void global::set_fuse(bool flag) { fuse = flag; }
1930 
1931 void global::add_to_opstack(OperatorPure *pOp) {
1932  if (fuse) {
1933  while (this->opstack.size() > 0) {
1934  OperatorPure *OpTry = this->Fuse(this->opstack.back(), pOp);
1935  if (OpTry == NULL) break;
1936 
1937  this->opstack.pop_back();
1938  pOp = OpTry;
1939  }
1940  }
1941 
1942  this->opstack.push_back(pOp);
1943 }
1944 
1945 bool global::ad_plain::initialized() const { return index != NA; }
1946 
1947 bool global::ad_plain::on_some_tape() const { return initialized(); }
1948 
1949 void global::ad_plain::addToTape() const { TMBAD_ASSERT(initialized()); }
1950 
1951 global *global::ad_plain::glob() const {
1952  return (on_some_tape() ? get_glob() : NULL);
1953 }
1954 
1955 void global::ad_plain::override_by(const ad_plain &x) const {}
1956 
1957 global::ad_plain::ad_plain() : index(NA) {}
1958 
1959 global::ad_plain::ad_plain(Scalar x) {
1960  *this = get_glob()->add_to_stack<ConstOp>(x);
1961 }
1962 
1963 global::ad_plain::ad_plain(ad_aug x) {
1964  x.addToTape();
1965  *this = x.taped_value;
1966 }
1967 
1968 Replay global::ad_plain::CopyOp::eval(Replay x0) { return x0.copy(); }
1969 
1970 const char *global::ad_plain::CopyOp::op_name() { return "CopyOp"; }
1971 
1972 ad_plain global::ad_plain::copy() const {
1973  ad_plain ans = get_glob()->add_to_stack<CopyOp>(*this);
1974  return ans;
1975 }
1976 
1977 Replay global::ad_plain::ValOp::eval(Replay x0) { return x0.copy0(); }
1978 
1979 void global::ad_plain::ValOp::dependencies(Args<> &args,
1980  Dependencies &dep) const {}
1981 
1982 const char *global::ad_plain::ValOp::op_name() { return "ValOp"; }
1983 
1984 ad_plain global::ad_plain::copy0() const {
1985  ad_plain ans = get_glob()->add_to_stack<ValOp>(*this);
1986  return ans;
1987 }
1988 
1989 ad_plain global::ad_plain::operator+(const ad_plain &other) const {
1990  ad_plain ans;
1991  ans = get_glob()->add_to_stack<AddOp>(*this, other);
1992  return ans;
1993 }
1994 
1995 ad_plain global::ad_plain::operator-(const ad_plain &other) const {
1996  ad_plain ans;
1997  ans = get_glob()->add_to_stack<SubOp>(*this, other);
1998  return ans;
1999 }
2000 
2001 ad_plain global::ad_plain::operator*(const ad_plain &other) const {
2002  ad_plain ans = get_glob()->add_to_stack<MulOp>(*this, other);
2003  return ans;
2004 }
2005 
2006 ad_plain global::ad_plain::operator*(const Scalar &other) const {
2007  ad_plain ans =
2008  get_glob()->add_to_stack<MulOp_<true, false> >(*this, ad_plain(other));
2009  return ans;
2010 }
2011 
2012 ad_plain global::ad_plain::operator/(const ad_plain &other) const {
2013  ad_plain ans = get_glob()->add_to_stack<DivOp>(*this, other);
2014  return ans;
2015 }
2016 
2017 const char *global::ad_plain::NegOp::op_name() { return "NegOp"; }
2018 
2019 ad_plain global::ad_plain::operator-() const {
2020  ad_plain ans = get_glob()->add_to_stack<NegOp>(*this);
2021  return ans;
2022 }
2023 
2024 ad_plain &global::ad_plain::operator+=(const ad_plain &other) {
2025  *this = *this + other;
2026  return *this;
2027 }
2028 
2029 ad_plain &global::ad_plain::operator-=(const ad_plain &other) {
2030  *this = *this - other;
2031  return *this;
2032 }
2033 
2034 ad_plain &global::ad_plain::operator*=(const ad_plain &other) {
2035  *this = *this * other;
2036  return *this;
2037 }
2038 
2039 ad_plain &global::ad_plain::operator/=(const ad_plain &other) {
2040  *this = *this / other;
2041  return *this;
2042 }
2043 
2044 void global::ad_plain::Dependent() {
2045  *this = get_glob()->add_to_stack<DepOp>(*this);
2046  get_glob()->dep_index.push_back(this->index);
2047 }
2048 
2049 void global::ad_plain::Independent() {
2050  Scalar val = (index == NA ? NAN : this->Value());
2051  *this = get_glob()->add_to_stack<InvOp>(val);
2052  get_glob()->inv_index.push_back(this->index);
2053 }
2054 
2055 Scalar &global::ad_plain::Value() { return get_glob()->values[index]; }
2056 
2057 Scalar global::ad_plain::Value() const { return get_glob()->values[index]; }
2058 
2059 Scalar global::ad_plain::Value(global *glob) const {
2060  return glob->values[index];
2061 }
2062 
2063 Scalar &global::ad_plain::Deriv() { return get_glob()->derivs[index]; }
2064 
2065 void global::ad_start() {
2066  TMBAD_ASSERT2(!in_use, "Tape already in use");
2067  TMBAD_ASSERT(parent_glob == NULL);
2068  parent_glob = global_ptr[TMBAD_THREAD_NUM];
2069  global_ptr[TMBAD_THREAD_NUM] = this;
2070  in_use = true;
2071 }
2072 
2073 void global::ad_stop() {
2074  TMBAD_ASSERT2(in_use, "Tape not in use");
2075  global_ptr[TMBAD_THREAD_NUM] = parent_glob;
2076  parent_glob = NULL;
2077  in_use = false;
2078 }
2079 
2080 void global::Independent(std::vector<ad_plain> &x) {
2081  for (size_t i = 0; i < x.size(); i++) {
2082  x[i].Independent();
2083  }
2084 }
2085 
2086 global::ad_segment::ad_segment() : n(0), c(0) {}
2087 
2088 global::ad_segment::ad_segment(ad_plain x, size_t n) : x(x), n(n), c(1) {}
2089 
2090 global::ad_segment::ad_segment(ad_aug x) : x(ad_plain(x)), n(1), c(1) {}
2091 
2092 global::ad_segment::ad_segment(Scalar x) : x(ad_plain(x)), n(1), c(1) {}
2093 
2094 global::ad_segment::ad_segment(Index idx, size_t n) : n(n) { x.index = idx; }
2095 
2096 global::ad_segment::ad_segment(ad_plain x, size_t r, size_t c)
2097  : x(x), n(r * c), c(c) {}
2098 
2099 global::ad_segment::ad_segment(Replay *x, size_t n, bool zero_check)
2100  : n(n), c(1) {
2101  if (zero_check && all_zero(x, n)) return;
2102  if (all_constant(x, n)) {
2103  global *glob = get_glob();
2104  size_t m = glob->values.size();
2105  Complete<DataOp> D(n);
2106  D(ad_segment());
2107  for (size_t i = 0; i < n; i++) glob->values[m + i] = x[i].Value();
2108  this->x.index = m;
2109  return;
2110  }
2111  if (!is_contiguous(x, n)) {
2112  size_t before = get_glob()->values.size();
2113  this->x = x[0].copy();
2114  for (size_t i = 1; i < n; i++) x[i].copy();
2115  size_t after = get_glob()->values.size();
2116  TMBAD_ASSERT2(after - before == n,
2117  "Each invocation of copy() should construct a new variable");
2118  return;
2119  }
2120  if (n > 0) this->x = x[0];
2121 }
2122 
2123 bool global::ad_segment::identicalZero() { return !x.initialized(); }
2124 
2125 bool global::ad_segment::all_on_active_tape(Replay *x, size_t n) {
2126  global *cur_glob = get_glob();
2127  for (size_t i = 0; i < n; i++) {
2128  bool ok = x[i].on_some_tape() && (x[i].glob() == cur_glob);
2129  if (!ok) return false;
2130  }
2131  return true;
2132 }
2133 
2134 bool global::ad_segment::is_contiguous(Replay *x, size_t n) {
2135  if (!all_on_active_tape(x, n)) return false;
2136  for (size_t i = 1; i < n; i++) {
2137  if (x[i].index() != x[i - 1].index() + 1) return false;
2138  }
2139  return true;
2140 }
2141 
2142 bool global::ad_segment::all_zero(Replay *x, size_t n) {
2143  for (size_t i = 0; i < n; i++) {
2144  if (!x[i].identicalZero()) return false;
2145  }
2146  return true;
2147 }
2148 
2149 bool global::ad_segment::all_constant(Replay *x, size_t n) {
2150  for (size_t i = 0; i < n; i++) {
2151  if (!x[i].constant()) return false;
2152  }
2153  return true;
2154 }
2155 
2156 size_t global::ad_segment::size() const { return n; }
2157 
2158 size_t global::ad_segment::rows() const { return n / c; }
2159 
2160 size_t global::ad_segment::cols() const { return c; }
2161 
2162 ad_plain global::ad_segment::operator[](size_t i) const {
2163  ad_plain ans;
2164  ans.index = x.index + i;
2165  return ans;
2166 }
2167 
2168 ad_plain global::ad_segment::offset() const { return x; }
2169 
2170 Index global::ad_segment::index() const { return x.index; }
2171 
2172 bool global::ad_aug::on_some_tape() const { return taped_value.initialized(); }
2173 
2175  return on_some_tape() && (this->glob() == get_glob());
2176 }
2177 
2178 bool global::ad_aug::ontape() const { return on_some_tape(); }
2179 
2180 bool global::ad_aug::constant() const { return !taped_value.initialized(); }
2181 
2182 Index global::ad_aug::index() const { return taped_value.index; }
2183 
2184 global *global::ad_aug::glob() const {
2185  return (on_some_tape() ? data.glob : NULL);
2186 }
2187 
2188 Scalar global::ad_aug::Value() const {
2189  if (on_some_tape())
2190  return taped_value.Value(this->data.glob);
2191  else
2192  return data.value;
2193 }
2194 
2196 
2197 global::ad_aug::ad_aug(Scalar x) { data.value = x; }
2198 
2199 global::ad_aug::ad_aug(ad_plain x) : taped_value(x) { data.glob = get_glob(); }
2200 
2202  if (on_some_tape()) {
2203  if (data.glob != get_glob()) {
2204  TMBAD_ASSERT2(in_context_stack(data.glob), "Variable not initialized?");
2205  global::OperatorPure *pOp =
2206  get_glob()->getOperator<RefOp>(data.glob, taped_value.index);
2207  this->taped_value =
2208  get_glob()->add_to_stack<RefOp>(pOp, std::vector<ad_plain>(0))[0];
2209 
2210  this->data.glob = get_glob();
2211  }
2212  return;
2213  }
2214  this->taped_value = ad_plain(data.value);
2215  this->data.glob = get_glob();
2216 }
2217 
2218 void global::ad_aug::override_by(const ad_plain &x) const {
2219  this->taped_value = x;
2220  this->data.glob = get_glob();
2221 }
2222 
2224  global *cur_glob = get_glob();
2225  while (cur_glob != NULL) {
2226  if (cur_glob == glob) return true;
2227  cur_glob = cur_glob->parent_glob;
2228  }
2229  return false;
2230 }
2231 
2233  if (on_active_tape()) {
2234  return taped_value.copy();
2235  } else {
2236  ad_aug cpy = *this;
2237  cpy.addToTape();
2238  return cpy;
2239  }
2240 }
2241 
2243  ad_aug cpy = *this;
2244  if (!cpy.on_active_tape()) {
2245  cpy.addToTape();
2246  }
2247  return cpy.taped_value.copy0();
2248 }
2249 
2251  return constant() && data.value == Scalar(0);
2252 }
2253 
2255  return constant() && data.value == Scalar(1);
2256 }
2257 
2258 bool global::ad_aug::bothConstant(const ad_aug &other) const {
2259  return constant() && other.constant();
2260 }
2261 
2262 bool global::ad_aug::identical(const ad_aug &other) const {
2263  if (constant() && other.constant()) return (data.value == other.data.value);
2264 
2265  if (glob() == other.glob())
2266  return (taped_value.index == other.taped_value.index);
2267  return false;
2268 }
2269 
2271  if (bothConstant(other)) return Scalar(this->data.value + other.data.value);
2272  if (this->identicalZero()) return other;
2273  if (other.identicalZero()) return *this;
2274  return ad_plain(*this) + ad_plain(other);
2275 }
2276 
2278  if (bothConstant(other)) return Scalar(this->data.value - other.data.value);
2279  if (other.identicalZero()) return *this;
2280  if (this->identicalZero()) return -other;
2281  if (this->identical(other)) return Scalar(0);
2282  return ad_plain(*this) - ad_plain(other);
2283 }
2284 
2286  if (this->constant()) return Scalar(-(this->data.value));
2287  return -ad_plain(*this);
2288 }
2289 
2291  if (bothConstant(other)) return Scalar(this->data.value * other.data.value);
2292  if (this->identicalZero()) return *this;
2293  if (other.identicalZero()) return other;
2294  if (this->identicalOne()) return other;
2295  if (other.identicalOne()) return *this;
2296  if (this->constant()) return ad_plain(other) * Scalar(this->data.value);
2297  if (other.constant()) return ad_plain(*this) * Scalar(other.data.value);
2298  return ad_plain(*this) * ad_plain(other);
2299 }
2300 
2302  if (bothConstant(other)) return Scalar(this->data.value / other.data.value);
2303  if (this->identicalZero()) return *this;
2304  if (other.identicalOne()) return *this;
2305  return ad_plain(*this) / ad_plain(other);
2306 }
2307 
2309  *this = *this + other;
2310  return *this;
2311 }
2312 
2314  *this = *this - other;
2315  return *this;
2316 }
2317 
2319  *this = *this * other;
2320  return *this;
2321 }
2322 
2324  *this = *this / other;
2325  return *this;
2326 }
2327 
2329  this->addToTape();
2330  taped_value.Dependent();
2331 }
2332 
2334  taped_value.Independent();
2335  taped_value.Value() = this->data.value;
2336  this->data.glob = get_glob();
2337 }
2338 
2339 Scalar &global::ad_aug::Value() {
2340  if (on_some_tape())
2341 
2342  return taped_value.Value();
2343  else
2344  return data.value;
2345 }
2346 
2347 Scalar &global::ad_aug::Deriv() { return taped_value.Deriv(); }
2348 
2349 void global::Independent(std::vector<ad_aug> &x) {
2350  for (size_t i = 0; i < x.size(); i++) {
2351  x[i].Independent();
2352  }
2353 }
2354 
2355 std::ostream &operator<<(std::ostream &os, const global::ad_plain &x) {
2356  os << x.Value();
2357  return os;
2358 }
2359 
2360 std::ostream &operator<<(std::ostream &os, const global::ad_aug &x) {
2361  os << "{";
2362  if (x.on_some_tape()) {
2363  os << "value=" << x.data.glob->values[x.taped_value.index] << ", ";
2364  os << "index=" << x.taped_value.index << ", ";
2365  os << "tape=" << x.data.glob;
2366  } else {
2367  os << "const=" << x.data.value;
2368  }
2369  os << "}";
2370  return os;
2371 }
2372 
2373 ad_plain_index::ad_plain_index(const Index &i) { this->index = i; }
2374 
2375 ad_plain_index::ad_plain_index(const ad_plain &x) : ad_plain(x) {}
2376 
2377 ad_aug_index::ad_aug_index(const Index &i) : ad_aug(ad_plain_index(i)) {}
2378 
2379 ad_aug_index::ad_aug_index(const ad_aug &x) : ad_aug(x) {}
2380 
2381 ad_aug_index::ad_aug_index(const ad_plain &x) : ad_aug(x) {}
2382 
2383 Scalar Value(Scalar x) { return x; }
2384 
2385 ad_aug operator+(const double &x, const ad_aug &y) { return ad_aug(x) + y; }
2386 
2387 ad_aug operator-(const double &x, const ad_aug &y) { return ad_aug(x) - y; }
2388 
2389 ad_aug operator*(const double &x, const ad_aug &y) { return ad_aug(x) * y; }
2390 
2391 ad_aug operator/(const double &x, const ad_aug &y) { return ad_aug(x) / y; }
2392 
2393 bool operator<(const double &x, const ad_adapt &y) { return x < y.Value(); }
2394 
2395 bool operator<=(const double &x, const ad_adapt &y) { return x <= y.Value(); }
2396 
2397 bool operator>(const double &x, const ad_adapt &y) { return x > y.Value(); }
2398 
2399 bool operator>=(const double &x, const ad_adapt &y) { return x >= y.Value(); }
2400 
2401 bool operator==(const double &x, const ad_adapt &y) { return x == y.Value(); }
2402 
2403 bool operator!=(const double &x, const ad_adapt &y) { return x != y.Value(); }
2404 
2405 Writer floor(const Writer &x) {
2406  return "floor"
2407  "(" +
2408  x + ")";
2409 }
2410 const char *FloorOp::op_name() { return "FloorOp"; }
2411 ad_plain floor(const ad_plain &x) {
2412  return get_glob()->add_to_stack<FloorOp>(x);
2413 }
2414 ad_aug floor(const ad_aug &x) {
2415  if (x.constant())
2416  return Scalar(floor(x.Value()));
2417  else
2418  return floor(ad_plain(x));
2419 }
2420 
2421 Writer ceil(const Writer &x) {
2422  return "ceil"
2423  "(" +
2424  x + ")";
2425 }
2426 const char *CeilOp::op_name() { return "CeilOp"; }
2427 ad_plain ceil(const ad_plain &x) { return get_glob()->add_to_stack<CeilOp>(x); }
2428 ad_aug ceil(const ad_aug &x) {
2429  if (x.constant())
2430  return Scalar(ceil(x.Value()));
2431  else
2432  return ceil(ad_plain(x));
2433 }
2434 
2435 Writer trunc(const Writer &x) {
2436  return "trunc"
2437  "(" +
2438  x + ")";
2439 }
2440 const char *TruncOp::op_name() { return "TruncOp"; }
2441 ad_plain trunc(const ad_plain &x) {
2442  return get_glob()->add_to_stack<TruncOp>(x);
2443 }
2444 ad_aug trunc(const ad_aug &x) {
2445  if (x.constant())
2446  return Scalar(trunc(x.Value()));
2447  else
2448  return trunc(ad_plain(x));
2449 }
2450 
2451 Writer round(const Writer &x) {
2452  return "round"
2453  "(" +
2454  x + ")";
2455 }
2456 const char *RoundOp::op_name() { return "RoundOp"; }
2457 ad_plain round(const ad_plain &x) {
2458  return get_glob()->add_to_stack<RoundOp>(x);
2459 }
2460 ad_aug round(const ad_aug &x) {
2461  if (x.constant())
2462  return Scalar(round(x.Value()));
2463  else
2464  return round(ad_plain(x));
2465 }
2466 
2467 double sign(const double &x) { return (x >= 0) - (x < 0); }
2468 
2469 Writer sign(const Writer &x) {
2470  return "sign"
2471  "(" +
2472  x + ")";
2473 }
2474 const char *SignOp::op_name() { return "SignOp"; }
2475 ad_plain sign(const ad_plain &x) { return get_glob()->add_to_stack<SignOp>(x); }
2476 ad_aug sign(const ad_aug &x) {
2477  if (x.constant())
2478  return Scalar(sign(x.Value()));
2479  else
2480  return sign(ad_plain(x));
2481 }
2482 
2483 double ge0(const double &x) { return (x >= 0); }
2484 
2485 double lt0(const double &x) { return (x < 0); }
2486 
2487 Writer ge0(const Writer &x) {
2488  return "ge0"
2489  "(" +
2490  x + ")";
2491 }
2492 const char *Ge0Op::op_name() { return "Ge0Op"; }
2493 ad_plain ge0(const ad_plain &x) { return get_glob()->add_to_stack<Ge0Op>(x); }
2494 ad_aug ge0(const ad_aug &x) {
2495  if (x.constant())
2496  return Scalar(ge0(x.Value()));
2497  else
2498  return ge0(ad_plain(x));
2499 }
2500 
2501 Writer lt0(const Writer &x) {
2502  return "lt0"
2503  "(" +
2504  x + ")";
2505 }
2506 const char *Lt0Op::op_name() { return "Lt0Op"; }
2507 ad_plain lt0(const ad_plain &x) { return get_glob()->add_to_stack<Lt0Op>(x); }
2508 ad_aug lt0(const ad_aug &x) {
2509  if (x.constant())
2510  return Scalar(lt0(x.Value()));
2511  else
2512  return lt0(ad_plain(x));
2513 }
2514 
2515 Writer fabs(const Writer &x) {
2516  return "fabs"
2517  "(" +
2518  x + ")";
2519 }
2520 void AbsOp::reverse(ReverseArgs<Scalar> &args) {
2521  typedef Scalar Type;
2522  if (args.dy(0) != Type(0)) args.dx(0) += args.dy(0) * sign(args.x(0));
2523 }
2524 const char *AbsOp::op_name() { return "AbsOp"; }
2525 ad_plain fabs(const ad_plain &x) { return get_glob()->add_to_stack<AbsOp>(x); }
2526 ad_aug fabs(const ad_aug &x) {
2527  if (x.constant())
2528  return Scalar(fabs(x.Value()));
2529  else
2530  return fabs(ad_plain(x));
2531 }
2532 ad_adapt fabs(const ad_adapt &x) { return ad_adapt(fabs(ad_aug(x))); }
2533 
2534 Writer sin(const Writer &x) {
2535  return "sin"
2536  "(" +
2537  x + ")";
2538 }
2539 void SinOp::reverse(ReverseArgs<Scalar> &args) {
2540  typedef Scalar Type;
2541  if (args.dy(0) != Type(0)) args.dx(0) += args.dy(0) * cos(args.x(0));
2542 }
2543 const char *SinOp::op_name() { return "SinOp"; }
2544 ad_plain sin(const ad_plain &x) { return get_glob()->add_to_stack<SinOp>(x); }
2545 ad_aug sin(const ad_aug &x) {
2546  if (x.constant())
2547  return Scalar(sin(x.Value()));
2548  else
2549  return sin(ad_plain(x));
2550 }
2551 ad_adapt sin(const ad_adapt &x) { return ad_adapt(sin(ad_aug(x))); }
2552 
2553 Writer cos(const Writer &x) {
2554  return "cos"
2555  "(" +
2556  x + ")";
2557 }
2558 void CosOp::reverse(ReverseArgs<Scalar> &args) {
2559  typedef Scalar Type;
2560  if (args.dy(0) != Type(0)) args.dx(0) += args.dy(0) * -sin(args.x(0));
2561 }
2562 const char *CosOp::op_name() { return "CosOp"; }
2563 ad_plain cos(const ad_plain &x) { return get_glob()->add_to_stack<CosOp>(x); }
2564 ad_aug cos(const ad_aug &x) {
2565  if (x.constant())
2566  return Scalar(cos(x.Value()));
2567  else
2568  return cos(ad_plain(x));
2569 }
2570 ad_adapt cos(const ad_adapt &x) { return ad_adapt(cos(ad_aug(x))); }
2571 
2572 Writer exp(const Writer &x) {
2573  return "exp"
2574  "(" +
2575  x + ")";
2576 }
2577 void ExpOp::reverse(ReverseArgs<Scalar> &args) {
2578  typedef Scalar Type;
2579  if (args.dy(0) != Type(0)) args.dx(0) += args.dy(0) * args.y(0);
2580 }
2581 const char *ExpOp::op_name() { return "ExpOp"; }
2582 ad_plain exp(const ad_plain &x) { return get_glob()->add_to_stack<ExpOp>(x); }
2583 ad_aug exp(const ad_aug &x) {
2584  if (x.constant())
2585  return Scalar(exp(x.Value()));
2586  else
2587  return exp(ad_plain(x));
2588 }
2589 ad_adapt exp(const ad_adapt &x) { return ad_adapt(exp(ad_aug(x))); }
2590 
2591 Writer log(const Writer &x) {
2592  return "log"
2593  "(" +
2594  x + ")";
2595 }
2596 void LogOp::reverse(ReverseArgs<Scalar> &args) {
2597  typedef Scalar Type;
2598  if (args.dy(0) != Type(0)) args.dx(0) += args.dy(0) * Type(1.) / args.x(0);
2599 }
2600 const char *LogOp::op_name() { return "LogOp"; }
2601 ad_plain log(const ad_plain &x) { return get_glob()->add_to_stack<LogOp>(x); }
2602 ad_aug log(const ad_aug &x) {
2603  if (x.constant())
2604  return Scalar(log(x.Value()));
2605  else
2606  return log(ad_plain(x));
2607 }
2608 ad_adapt log(const ad_adapt &x) { return ad_adapt(log(ad_aug(x))); }
2609 
2610 Writer sqrt(const Writer &x) {
2611  return "sqrt"
2612  "(" +
2613  x + ")";
2614 }
2615 void SqrtOp::reverse(ReverseArgs<Scalar> &args) {
2616  typedef Scalar Type;
2617  if (args.dy(0) != Type(0)) args.dx(0) += args.dy(0) * Type(0.5) / args.y(0);
2618 }
2619 const char *SqrtOp::op_name() { return "SqrtOp"; }
2620 ad_plain sqrt(const ad_plain &x) { return get_glob()->add_to_stack<SqrtOp>(x); }
2621 ad_aug sqrt(const ad_aug &x) {
2622  if (x.constant())
2623  return Scalar(sqrt(x.Value()));
2624  else
2625  return sqrt(ad_plain(x));
2626 }
2627 ad_adapt sqrt(const ad_adapt &x) { return ad_adapt(sqrt(ad_aug(x))); }
2628 
2629 Writer tan(const Writer &x) {
2630  return "tan"
2631  "(" +
2632  x + ")";
2633 }
2634 void TanOp::reverse(ReverseArgs<Scalar> &args) {
2635  typedef Scalar Type;
2636  if (args.dy(0) != Type(0))
2637  args.dx(0) += args.dy(0) * Type(1.) / (cos(args.x(0)) * cos(args.x(0)));
2638 }
2639 const char *TanOp::op_name() { return "TanOp"; }
2640 ad_plain tan(const ad_plain &x) { return get_glob()->add_to_stack<TanOp>(x); }
2641 ad_aug tan(const ad_aug &x) {
2642  if (x.constant())
2643  return Scalar(tan(x.Value()));
2644  else
2645  return tan(ad_plain(x));
2646 }
2647 ad_adapt tan(const ad_adapt &x) { return ad_adapt(tan(ad_aug(x))); }
2648 
2649 Writer sinh(const Writer &x) {
2650  return "sinh"
2651  "(" +
2652  x + ")";
2653 }
2654 void SinhOp::reverse(ReverseArgs<Scalar> &args) {
2655  typedef Scalar Type;
2656  if (args.dy(0) != Type(0)) args.dx(0) += args.dy(0) * cosh(args.x(0));
2657 }
2658 const char *SinhOp::op_name() { return "SinhOp"; }
2659 ad_plain sinh(const ad_plain &x) { return get_glob()->add_to_stack<SinhOp>(x); }
2660 ad_aug sinh(const ad_aug &x) {
2661  if (x.constant())
2662  return Scalar(sinh(x.Value()));
2663  else
2664  return sinh(ad_plain(x));
2665 }
2666 ad_adapt sinh(const ad_adapt &x) { return ad_adapt(sinh(ad_aug(x))); }
2667 
2668 Writer cosh(const Writer &x) {
2669  return "cosh"
2670  "(" +
2671  x + ")";
2672 }
2673 void CoshOp::reverse(ReverseArgs<Scalar> &args) {
2674  typedef Scalar Type;
2675  if (args.dy(0) != Type(0)) args.dx(0) += args.dy(0) * sinh(args.x(0));
2676 }
2677 const char *CoshOp::op_name() { return "CoshOp"; }
2678 ad_plain cosh(const ad_plain &x) { return get_glob()->add_to_stack<CoshOp>(x); }
2679 ad_aug cosh(const ad_aug &x) {
2680  if (x.constant())
2681  return Scalar(cosh(x.Value()));
2682  else
2683  return cosh(ad_plain(x));
2684 }
2685 ad_adapt cosh(const ad_adapt &x) { return ad_adapt(cosh(ad_aug(x))); }
2686 
2687 Writer tanh(const Writer &x) {
2688  return "tanh"
2689  "(" +
2690  x + ")";
2691 }
2692 void TanhOp::reverse(ReverseArgs<Scalar> &args) {
2693  typedef Scalar Type;
2694  if (args.dy(0) != Type(0))
2695  args.dx(0) += args.dy(0) * Type(1.) / (cosh(args.x(0)) * cosh(args.x(0)));
2696 }
2697 const char *TanhOp::op_name() { return "TanhOp"; }
2698 ad_plain tanh(const ad_plain &x) { return get_glob()->add_to_stack<TanhOp>(x); }
2699 ad_aug tanh(const ad_aug &x) {
2700  if (x.constant())
2701  return Scalar(tanh(x.Value()));
2702  else
2703  return tanh(ad_plain(x));
2704 }
2705 ad_adapt tanh(const ad_adapt &x) { return ad_adapt(tanh(ad_aug(x))); }
2706 
2707 Writer expm1(const Writer &x) {
2708  return "expm1"
2709  "(" +
2710  x + ")";
2711 }
2712 void Expm1::reverse(ReverseArgs<Scalar> &args) {
2713  typedef Scalar Type;
2714  if (args.dy(0) != Type(0)) args.dx(0) += args.dy(0) * args.y(0) + Type(1.);
2715 }
2716 const char *Expm1::op_name() { return "Expm1"; }
2717 ad_plain expm1(const ad_plain &x) { return get_glob()->add_to_stack<Expm1>(x); }
2718 ad_aug expm1(const ad_aug &x) {
2719  if (x.constant())
2720  return Scalar(expm1(x.Value()));
2721  else
2722  return expm1(ad_plain(x));
2723 }
2724 ad_adapt expm1(const ad_adapt &x) { return ad_adapt(expm1(ad_aug(x))); }
2725 
2726 Writer log1p(const Writer &x) {
2727  return "log1p"
2728  "(" +
2729  x + ")";
2730 }
2731 void Log1p::reverse(ReverseArgs<Scalar> &args) {
2732  typedef Scalar Type;
2733  if (args.dy(0) != Type(0))
2734  args.dx(0) += args.dy(0) * Type(1.) / (args.x(0) + Type(1.));
2735 }
2736 const char *Log1p::op_name() { return "Log1p"; }
2737 ad_plain log1p(const ad_plain &x) { return get_glob()->add_to_stack<Log1p>(x); }
2738 ad_aug log1p(const ad_aug &x) {
2739  if (x.constant())
2740  return Scalar(log1p(x.Value()));
2741  else
2742  return log1p(ad_plain(x));
2743 }
2744 ad_adapt log1p(const ad_adapt &x) { return ad_adapt(log1p(ad_aug(x))); }
2745 
2746 Writer asin(const Writer &x) {
2747  return "asin"
2748  "(" +
2749  x + ")";
2750 }
2751 void AsinOp::reverse(ReverseArgs<Scalar> &args) {
2752  typedef Scalar Type;
2753  if (args.dy(0) != Type(0))
2754  args.dx(0) +=
2755  args.dy(0) * Type(1.) / sqrt(Type(1.) - args.x(0) * args.x(0));
2756 }
2757 const char *AsinOp::op_name() { return "AsinOp"; }
2758 ad_plain asin(const ad_plain &x) { return get_glob()->add_to_stack<AsinOp>(x); }
2759 ad_aug asin(const ad_aug &x) {
2760  if (x.constant())
2761  return Scalar(asin(x.Value()));
2762  else
2763  return asin(ad_plain(x));
2764 }
2765 ad_adapt asin(const ad_adapt &x) { return ad_adapt(asin(ad_aug(x))); }
2766 
2767 Writer acos(const Writer &x) {
2768  return "acos"
2769  "(" +
2770  x + ")";
2771 }
2772 void AcosOp::reverse(ReverseArgs<Scalar> &args) {
2773  typedef Scalar Type;
2774  if (args.dy(0) != Type(0))
2775  args.dx(0) +=
2776  args.dy(0) * Type(-1.) / sqrt(Type(1.) - args.x(0) * args.x(0));
2777 }
2778 const char *AcosOp::op_name() { return "AcosOp"; }
2779 ad_plain acos(const ad_plain &x) { return get_glob()->add_to_stack<AcosOp>(x); }
2780 ad_aug acos(const ad_aug &x) {
2781  if (x.constant())
2782  return Scalar(acos(x.Value()));
2783  else
2784  return acos(ad_plain(x));
2785 }
2786 ad_adapt acos(const ad_adapt &x) { return ad_adapt(acos(ad_aug(x))); }
2787 
2788 Writer atan(const Writer &x) {
2789  return "atan"
2790  "(" +
2791  x + ")";
2792 }
2793 void AtanOp::reverse(ReverseArgs<Scalar> &args) {
2794  typedef Scalar Type;
2795  if (args.dy(0) != Type(0))
2796  args.dx(0) += args.dy(0) * Type(1.) / (Type(1.) + args.x(0) * args.x(0));
2797 }
2798 const char *AtanOp::op_name() { return "AtanOp"; }
2799 ad_plain atan(const ad_plain &x) { return get_glob()->add_to_stack<AtanOp>(x); }
2800 ad_aug atan(const ad_aug &x) {
2801  if (x.constant())
2802  return Scalar(atan(x.Value()));
2803  else
2804  return atan(ad_plain(x));
2805 }
2806 ad_adapt atan(const ad_adapt &x) { return ad_adapt(atan(ad_aug(x))); }
2807 
2808 Writer asinh(const Writer &x) {
2809  return "asinh"
2810  "(" +
2811  x + ")";
2812 }
2813 void AsinhOp::reverse(ReverseArgs<Scalar> &args) {
2814  typedef Scalar Type;
2815  if (args.dy(0) != Type(0))
2816  args.dx(0) +=
2817  args.dy(0) * Type(1.) / sqrt(args.x(0) * args.x(0) + Type(1.));
2818 }
2819 const char *AsinhOp::op_name() { return "AsinhOp"; }
2820 ad_plain asinh(const ad_plain &x) {
2821  return get_glob()->add_to_stack<AsinhOp>(x);
2822 }
2823 ad_aug asinh(const ad_aug &x) {
2824  if (x.constant())
2825  return Scalar(asinh(x.Value()));
2826  else
2827  return asinh(ad_plain(x));
2828 }
2829 ad_adapt asinh(const ad_adapt &x) { return ad_adapt(asinh(ad_aug(x))); }
2830 
2831 Writer acosh(const Writer &x) {
2832  return "acosh"
2833  "(" +
2834  x + ")";
2835 }
2836 void AcoshOp::reverse(ReverseArgs<Scalar> &args) {
2837  typedef Scalar Type;
2838  if (args.dy(0) != Type(0))
2839  args.dx(0) +=
2840  args.dy(0) * Type(1.) / sqrt(args.x(0) * args.x(0) - Type(1.));
2841 }
2842 const char *AcoshOp::op_name() { return "AcoshOp"; }
2843 ad_plain acosh(const ad_plain &x) {
2844  return get_glob()->add_to_stack<AcoshOp>(x);
2845 }
2846 ad_aug acosh(const ad_aug &x) {
2847  if (x.constant())
2848  return Scalar(acosh(x.Value()));
2849  else
2850  return acosh(ad_plain(x));
2851 }
2852 ad_adapt acosh(const ad_adapt &x) { return ad_adapt(acosh(ad_aug(x))); }
2853 
2854 Writer atanh(const Writer &x) {
2855  return "atanh"
2856  "(" +
2857  x + ")";
2858 }
2859 void AtanhOp::reverse(ReverseArgs<Scalar> &args) {
2860  typedef Scalar Type;
2861  if (args.dy(0) != Type(0))
2862  args.dx(0) += args.dy(0) * Type(1.) / (Type(1) - args.x(0) * args.x(0));
2863 }
2864 const char *AtanhOp::op_name() { return "AtanhOp"; }
2865 ad_plain atanh(const ad_plain &x) {
2866  return get_glob()->add_to_stack<AtanhOp>(x);
2867 }
2868 ad_aug atanh(const ad_aug &x) {
2869  if (x.constant())
2870  return Scalar(atanh(x.Value()));
2871  else
2872  return atanh(ad_plain(x));
2873 }
2874 ad_adapt atanh(const ad_adapt &x) { return ad_adapt(atanh(ad_aug(x))); }
2875 
2876 Writer pow(const Writer &x1, const Writer &x2) {
2877  return "pow"
2878  "(" +
2879  x1 + "," + x2 + ")";
2880 }
2881 const char *PowOp::op_name() { return "PowOp"; }
2882 ad_plain pow(const ad_plain &x1, const ad_plain &x2) {
2883  return get_glob()->add_to_stack<PowOp>(x1, x2);
2884 }
2885 ad_aug pow(const ad_aug &x1, const ad_aug &x2) {
2886  if (x1.constant() && x2.constant())
2887  return Scalar(pow(x1.Value(), x2.Value()));
2888  else
2889  return pow(ad_plain(x1), ad_plain(x2));
2890 }
2891 ad_adapt pow(const ad_adapt &x1, const ad_adapt &x2) {
2892  return ad_adapt(pow(ad_aug(x1), ad_aug(x2)));
2893 }
2894 
2895 Writer atan2(const Writer &x1, const Writer &x2) {
2896  return "atan2"
2897  "(" +
2898  x1 + "," + x2 + ")";
2899 }
2900 const char *Atan2::op_name() { return "Atan2"; }
2901 ad_plain atan2(const ad_plain &x1, const ad_plain &x2) {
2902  return get_glob()->add_to_stack<Atan2>(x1, x2);
2903 }
2904 ad_aug atan2(const ad_aug &x1, const ad_aug &x2) {
2905  if (x1.constant() && x2.constant())
2906  return Scalar(atan2(x1.Value(), x2.Value()));
2907  else
2908  return atan2(ad_plain(x1), ad_plain(x2));
2909 }
2910 ad_adapt atan2(const ad_adapt &x1, const ad_adapt &x2) {
2911  return ad_adapt(atan2(ad_aug(x1), ad_aug(x2)));
2912 }
2913 
2914 Writer max(const Writer &x1, const Writer &x2) {
2915  return "max"
2916  "(" +
2917  x1 + "," + x2 + ")";
2918 }
2919 const char *MaxOp::op_name() { return "MaxOp"; }
2920 ad_plain max(const ad_plain &x1, const ad_plain &x2) {
2921  return get_glob()->add_to_stack<MaxOp>(x1, x2);
2922 }
2923 ad_aug max(const ad_aug &x1, const ad_aug &x2) {
2924  if (x1.constant() && x2.constant())
2925  return Scalar(max(x1.Value(), x2.Value()));
2926  else
2927  return max(ad_plain(x1), ad_plain(x2));
2928 }
2929 ad_adapt max(const ad_adapt &x1, const ad_adapt &x2) {
2930  return ad_adapt(max(ad_aug(x1), ad_aug(x2)));
2931 }
2932 
2933 Writer min(const Writer &x1, const Writer &x2) {
2934  return "min"
2935  "(" +
2936  x1 + "," + x2 + ")";
2937 }
2938 const char *MinOp::op_name() { return "MinOp"; }
2939 ad_plain min(const ad_plain &x1, const ad_plain &x2) {
2940  return get_glob()->add_to_stack<MinOp>(x1, x2);
2941 }
2942 ad_aug min(const ad_aug &x1, const ad_aug &x2) {
2943  if (x1.constant() && x2.constant())
2944  return Scalar(min(x1.Value(), x2.Value()));
2945  else
2946  return min(ad_plain(x1), ad_plain(x2));
2947 }
2948 ad_adapt min(const ad_adapt &x1, const ad_adapt &x2) {
2949  return ad_adapt(min(ad_aug(x1), ad_aug(x2)));
2950 }
2951 void CondExpEqOp::forward(ForwardArgs<Scalar> &args) {
2952  if (args.x(0) == args.x(1)) {
2953  args.y(0) = args.x(2);
2954  } else {
2955  args.y(0) = args.x(3);
2956  }
2957 }
2958 void CondExpEqOp::reverse(ReverseArgs<Scalar> &args) {
2959  if (args.x(0) == args.x(1)) {
2960  args.dx(2) += args.dy(0);
2961  } else {
2962  args.dx(3) += args.dy(0);
2963  }
2964 }
2965 void CondExpEqOp::forward(ForwardArgs<Replay> &args) {
2966  args.y(0) = CondExpEq(args.x(0), args.x(1), args.x(2), args.x(3));
2967 }
2968 void CondExpEqOp::reverse(ReverseArgs<Replay> &args) {
2969  Replay zero(0);
2970  args.dx(2) += CondExpEq(args.x(0), args.x(1), args.dy(0), zero);
2971  args.dx(3) += CondExpEq(args.x(0), args.x(1), zero, args.dy(0));
2972 }
2973 void CondExpEqOp::forward(ForwardArgs<Writer> &args) {
2974  Writer w;
2975  w << "if (" << args.x(0) << "==" << args.x(1) << ") ";
2976  args.y(0) = args.x(2);
2977  w << " else ";
2978  args.y(0) = args.x(3);
2979 }
2980 void CondExpEqOp::reverse(ReverseArgs<Writer> &args) {
2981  Writer w;
2982  w << "if (" << args.x(0) << "==" << args.x(1) << ") ";
2983  args.dx(2) += args.dy(0);
2984  w << " else ";
2985  args.dx(3) += args.dy(0);
2986 }
2987 const char *CondExpEqOp::op_name() {
2988  return "CExp"
2989  "Eq";
2990 }
2991 Scalar CondExpEq(const Scalar &x0, const Scalar &x1, const Scalar &x2,
2992  const Scalar &x3) {
2993  if (x0 == x1)
2994  return x2;
2995  else
2996  return x3;
2997 }
2998 ad_plain CondExpEq(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
2999  const ad_plain &x3) {
3000  OperatorPure *pOp = get_glob()->getOperator<CondExpEqOp>();
3001  std::vector<ad_plain> x(4);
3002  x[0] = x0;
3003  x[1] = x1;
3004  x[2] = x2;
3005  x[3] = x3;
3006  std::vector<ad_plain> y = get_glob()->add_to_stack<CondExpEqOp>(pOp, x);
3007  return y[0];
3008 }
3009 ad_aug CondExpEq(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3010  const ad_aug &x3) {
3011  if (x0.constant() && x1.constant()) {
3012  if (x0.Value() == x1.Value())
3013  return x2;
3014  else
3015  return x3;
3016  } else {
3017  return CondExpEq(ad_plain(x0), ad_plain(x1), ad_plain(x2), ad_plain(x3));
3018  }
3019 }
3020 void CondExpNeOp::forward(ForwardArgs<Scalar> &args) {
3021  if (args.x(0) != args.x(1)) {
3022  args.y(0) = args.x(2);
3023  } else {
3024  args.y(0) = args.x(3);
3025  }
3026 }
3027 void CondExpNeOp::reverse(ReverseArgs<Scalar> &args) {
3028  if (args.x(0) != args.x(1)) {
3029  args.dx(2) += args.dy(0);
3030  } else {
3031  args.dx(3) += args.dy(0);
3032  }
3033 }
3034 void CondExpNeOp::forward(ForwardArgs<Replay> &args) {
3035  args.y(0) = CondExpNe(args.x(0), args.x(1), args.x(2), args.x(3));
3036 }
3037 void CondExpNeOp::reverse(ReverseArgs<Replay> &args) {
3038  Replay zero(0);
3039  args.dx(2) += CondExpNe(args.x(0), args.x(1), args.dy(0), zero);
3040  args.dx(3) += CondExpNe(args.x(0), args.x(1), zero, args.dy(0));
3041 }
3042 void CondExpNeOp::forward(ForwardArgs<Writer> &args) {
3043  Writer w;
3044  w << "if (" << args.x(0) << "!=" << args.x(1) << ") ";
3045  args.y(0) = args.x(2);
3046  w << " else ";
3047  args.y(0) = args.x(3);
3048 }
3049 void CondExpNeOp::reverse(ReverseArgs<Writer> &args) {
3050  Writer w;
3051  w << "if (" << args.x(0) << "!=" << args.x(1) << ") ";
3052  args.dx(2) += args.dy(0);
3053  w << " else ";
3054  args.dx(3) += args.dy(0);
3055 }
3056 const char *CondExpNeOp::op_name() {
3057  return "CExp"
3058  "Ne";
3059 }
3060 Scalar CondExpNe(const Scalar &x0, const Scalar &x1, const Scalar &x2,
3061  const Scalar &x3) {
3062  if (x0 != x1)
3063  return x2;
3064  else
3065  return x3;
3066 }
3067 ad_plain CondExpNe(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
3068  const ad_plain &x3) {
3069  OperatorPure *pOp = get_glob()->getOperator<CondExpNeOp>();
3070  std::vector<ad_plain> x(4);
3071  x[0] = x0;
3072  x[1] = x1;
3073  x[2] = x2;
3074  x[3] = x3;
3075  std::vector<ad_plain> y = get_glob()->add_to_stack<CondExpNeOp>(pOp, x);
3076  return y[0];
3077 }
3078 ad_aug CondExpNe(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3079  const ad_aug &x3) {
3080  if (x0.constant() && x1.constant()) {
3081  if (x0.Value() != x1.Value())
3082  return x2;
3083  else
3084  return x3;
3085  } else {
3086  return CondExpNe(ad_plain(x0), ad_plain(x1), ad_plain(x2), ad_plain(x3));
3087  }
3088 }
3089 void CondExpGtOp::forward(ForwardArgs<Scalar> &args) {
3090  if (args.x(0) > args.x(1)) {
3091  args.y(0) = args.x(2);
3092  } else {
3093  args.y(0) = args.x(3);
3094  }
3095 }
3096 void CondExpGtOp::reverse(ReverseArgs<Scalar> &args) {
3097  if (args.x(0) > args.x(1)) {
3098  args.dx(2) += args.dy(0);
3099  } else {
3100  args.dx(3) += args.dy(0);
3101  }
3102 }
3103 void CondExpGtOp::forward(ForwardArgs<Replay> &args) {
3104  args.y(0) = CondExpGt(args.x(0), args.x(1), args.x(2), args.x(3));
3105 }
3106 void CondExpGtOp::reverse(ReverseArgs<Replay> &args) {
3107  Replay zero(0);
3108  args.dx(2) += CondExpGt(args.x(0), args.x(1), args.dy(0), zero);
3109  args.dx(3) += CondExpGt(args.x(0), args.x(1), zero, args.dy(0));
3110 }
3111 void CondExpGtOp::forward(ForwardArgs<Writer> &args) {
3112  Writer w;
3113  w << "if (" << args.x(0) << ">" << args.x(1) << ") ";
3114  args.y(0) = args.x(2);
3115  w << " else ";
3116  args.y(0) = args.x(3);
3117 }
3118 void CondExpGtOp::reverse(ReverseArgs<Writer> &args) {
3119  Writer w;
3120  w << "if (" << args.x(0) << ">" << args.x(1) << ") ";
3121  args.dx(2) += args.dy(0);
3122  w << " else ";
3123  args.dx(3) += args.dy(0);
3124 }
3125 const char *CondExpGtOp::op_name() {
3126  return "CExp"
3127  "Gt";
3128 }
3129 Scalar CondExpGt(const Scalar &x0, const Scalar &x1, const Scalar &x2,
3130  const Scalar &x3) {
3131  if (x0 > x1)
3132  return x2;
3133  else
3134  return x3;
3135 }
3136 ad_plain CondExpGt(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
3137  const ad_plain &x3) {
3138  OperatorPure *pOp = get_glob()->getOperator<CondExpGtOp>();
3139  std::vector<ad_plain> x(4);
3140  x[0] = x0;
3141  x[1] = x1;
3142  x[2] = x2;
3143  x[3] = x3;
3144  std::vector<ad_plain> y = get_glob()->add_to_stack<CondExpGtOp>(pOp, x);
3145  return y[0];
3146 }
3147 ad_aug CondExpGt(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3148  const ad_aug &x3) {
3149  if (x0.constant() && x1.constant()) {
3150  if (x0.Value() > x1.Value())
3151  return x2;
3152  else
3153  return x3;
3154  } else {
3155  return CondExpGt(ad_plain(x0), ad_plain(x1), ad_plain(x2), ad_plain(x3));
3156  }
3157 }
3158 void CondExpLtOp::forward(ForwardArgs<Scalar> &args) {
3159  if (args.x(0) < args.x(1)) {
3160  args.y(0) = args.x(2);
3161  } else {
3162  args.y(0) = args.x(3);
3163  }
3164 }
3165 void CondExpLtOp::reverse(ReverseArgs<Scalar> &args) {
3166  if (args.x(0) < args.x(1)) {
3167  args.dx(2) += args.dy(0);
3168  } else {
3169  args.dx(3) += args.dy(0);
3170  }
3171 }
3172 void CondExpLtOp::forward(ForwardArgs<Replay> &args) {
3173  args.y(0) = CondExpLt(args.x(0), args.x(1), args.x(2), args.x(3));
3174 }
3175 void CondExpLtOp::reverse(ReverseArgs<Replay> &args) {
3176  Replay zero(0);
3177  args.dx(2) += CondExpLt(args.x(0), args.x(1), args.dy(0), zero);
3178  args.dx(3) += CondExpLt(args.x(0), args.x(1), zero, args.dy(0));
3179 }
3180 void CondExpLtOp::forward(ForwardArgs<Writer> &args) {
3181  Writer w;
3182  w << "if (" << args.x(0) << "<" << args.x(1) << ") ";
3183  args.y(0) = args.x(2);
3184  w << " else ";
3185  args.y(0) = args.x(3);
3186 }
3187 void CondExpLtOp::reverse(ReverseArgs<Writer> &args) {
3188  Writer w;
3189  w << "if (" << args.x(0) << "<" << args.x(1) << ") ";
3190  args.dx(2) += args.dy(0);
3191  w << " else ";
3192  args.dx(3) += args.dy(0);
3193 }
3194 const char *CondExpLtOp::op_name() {
3195  return "CExp"
3196  "Lt";
3197 }
3198 Scalar CondExpLt(const Scalar &x0, const Scalar &x1, const Scalar &x2,
3199  const Scalar &x3) {
3200  if (x0 < x1)
3201  return x2;
3202  else
3203  return x3;
3204 }
3205 ad_plain CondExpLt(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
3206  const ad_plain &x3) {
3207  OperatorPure *pOp = get_glob()->getOperator<CondExpLtOp>();
3208  std::vector<ad_plain> x(4);
3209  x[0] = x0;
3210  x[1] = x1;
3211  x[2] = x2;
3212  x[3] = x3;
3213  std::vector<ad_plain> y = get_glob()->add_to_stack<CondExpLtOp>(pOp, x);
3214  return y[0];
3215 }
3216 ad_aug CondExpLt(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3217  const ad_aug &x3) {
3218  if (x0.constant() && x1.constant()) {
3219  if (x0.Value() < x1.Value())
3220  return x2;
3221  else
3222  return x3;
3223  } else {
3224  return CondExpLt(ad_plain(x0), ad_plain(x1), ad_plain(x2), ad_plain(x3));
3225  }
3226 }
3227 void CondExpGeOp::forward(ForwardArgs<Scalar> &args) {
3228  if (args.x(0) >= args.x(1)) {
3229  args.y(0) = args.x(2);
3230  } else {
3231  args.y(0) = args.x(3);
3232  }
3233 }
3234 void CondExpGeOp::reverse(ReverseArgs<Scalar> &args) {
3235  if (args.x(0) >= args.x(1)) {
3236  args.dx(2) += args.dy(0);
3237  } else {
3238  args.dx(3) += args.dy(0);
3239  }
3240 }
3241 void CondExpGeOp::forward(ForwardArgs<Replay> &args) {
3242  args.y(0) = CondExpGe(args.x(0), args.x(1), args.x(2), args.x(3));
3243 }
3244 void CondExpGeOp::reverse(ReverseArgs<Replay> &args) {
3245  Replay zero(0);
3246  args.dx(2) += CondExpGe(args.x(0), args.x(1), args.dy(0), zero);
3247  args.dx(3) += CondExpGe(args.x(0), args.x(1), zero, args.dy(0));
3248 }
3249 void CondExpGeOp::forward(ForwardArgs<Writer> &args) {
3250  Writer w;
3251  w << "if (" << args.x(0) << ">=" << args.x(1) << ") ";
3252  args.y(0) = args.x(2);
3253  w << " else ";
3254  args.y(0) = args.x(3);
3255 }
3256 void CondExpGeOp::reverse(ReverseArgs<Writer> &args) {
3257  Writer w;
3258  w << "if (" << args.x(0) << ">=" << args.x(1) << ") ";
3259  args.dx(2) += args.dy(0);
3260  w << " else ";
3261  args.dx(3) += args.dy(0);
3262 }
3263 const char *CondExpGeOp::op_name() {
3264  return "CExp"
3265  "Ge";
3266 }
3267 Scalar CondExpGe(const Scalar &x0, const Scalar &x1, const Scalar &x2,
3268  const Scalar &x3) {
3269  if (x0 >= x1)
3270  return x2;
3271  else
3272  return x3;
3273 }
3274 ad_plain CondExpGe(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
3275  const ad_plain &x3) {
3276  OperatorPure *pOp = get_glob()->getOperator<CondExpGeOp>();
3277  std::vector<ad_plain> x(4);
3278  x[0] = x0;
3279  x[1] = x1;
3280  x[2] = x2;
3281  x[3] = x3;
3282  std::vector<ad_plain> y = get_glob()->add_to_stack<CondExpGeOp>(pOp, x);
3283  return y[0];
3284 }
3285 ad_aug CondExpGe(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3286  const ad_aug &x3) {
3287  if (x0.constant() && x1.constant()) {
3288  if (x0.Value() >= x1.Value())
3289  return x2;
3290  else
3291  return x3;
3292  } else {
3293  return CondExpGe(ad_plain(x0), ad_plain(x1), ad_plain(x2), ad_plain(x3));
3294  }
3295 }
3296 void CondExpLeOp::forward(ForwardArgs<Scalar> &args) {
3297  if (args.x(0) <= args.x(1)) {
3298  args.y(0) = args.x(2);
3299  } else {
3300  args.y(0) = args.x(3);
3301  }
3302 }
3303 void CondExpLeOp::reverse(ReverseArgs<Scalar> &args) {
3304  if (args.x(0) <= args.x(1)) {
3305  args.dx(2) += args.dy(0);
3306  } else {
3307  args.dx(3) += args.dy(0);
3308  }
3309 }
3310 void CondExpLeOp::forward(ForwardArgs<Replay> &args) {
3311  args.y(0) = CondExpLe(args.x(0), args.x(1), args.x(2), args.x(3));
3312 }
3313 void CondExpLeOp::reverse(ReverseArgs<Replay> &args) {
3314  Replay zero(0);
3315  args.dx(2) += CondExpLe(args.x(0), args.x(1), args.dy(0), zero);
3316  args.dx(3) += CondExpLe(args.x(0), args.x(1), zero, args.dy(0));
3317 }
3318 void CondExpLeOp::forward(ForwardArgs<Writer> &args) {
3319  Writer w;
3320  w << "if (" << args.x(0) << "<=" << args.x(1) << ") ";
3321  args.y(0) = args.x(2);
3322  w << " else ";
3323  args.y(0) = args.x(3);
3324 }
3325 void CondExpLeOp::reverse(ReverseArgs<Writer> &args) {
3326  Writer w;
3327  w << "if (" << args.x(0) << "<=" << args.x(1) << ") ";
3328  args.dx(2) += args.dy(0);
3329  w << " else ";
3330  args.dx(3) += args.dy(0);
3331 }
3332 const char *CondExpLeOp::op_name() {
3333  return "CExp"
3334  "Le";
3335 }
3336 Scalar CondExpLe(const Scalar &x0, const Scalar &x1, const Scalar &x2,
3337  const Scalar &x3) {
3338  if (x0 <= x1)
3339  return x2;
3340  else
3341  return x3;
3342 }
3343 ad_plain CondExpLe(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
3344  const ad_plain &x3) {
3345  OperatorPure *pOp = get_glob()->getOperator<CondExpLeOp>();
3346  std::vector<ad_plain> x(4);
3347  x[0] = x0;
3348  x[1] = x1;
3349  x[2] = x2;
3350  x[3] = x3;
3351  std::vector<ad_plain> y = get_glob()->add_to_stack<CondExpLeOp>(pOp, x);
3352  return y[0];
3353 }
3354 ad_aug CondExpLe(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3355  const ad_aug &x3) {
3356  if (x0.constant() && x1.constant()) {
3357  if (x0.Value() <= x1.Value())
3358  return x2;
3359  else
3360  return x3;
3361  } else {
3362  return CondExpLe(ad_plain(x0), ad_plain(x1), ad_plain(x2), ad_plain(x3));
3363  }
3364 }
3365 
3366 Index SumOp::input_size() const { return n; }
3367 
3368 Index SumOp::output_size() const { return 1; }
3369 
3370 SumOp::SumOp(size_t n) : n(n) {}
3371 
3372 const char *SumOp::op_name() { return "SumOp"; }
3373 
3374 Index LogSpaceSumOp::input_size() const { return this->n; }
3375 
3376 Index LogSpaceSumOp::output_size() const { return 1; }
3377 
3378 LogSpaceSumOp::LogSpaceSumOp(size_t n) : n(n) {}
3379 
3380 void LogSpaceSumOp::forward(ForwardArgs<Scalar> &args) {
3381  Scalar Max = -INFINITY;
3382  for (size_t i = 0; i < n; i++) {
3383  if (Max < args.x(i)) Max = args.x(i);
3384  }
3385  args.y(0) = 0;
3386  for (size_t i = 0; i < n; i++) {
3387  args.y(0) += exp(args.x(i) - Max);
3388  }
3389  args.y(0) = Max + log(args.y(0));
3390 }
3391 
3392 void LogSpaceSumOp::forward(ForwardArgs<Replay> &args) {
3393  std::vector<ad_plain> x(input_size());
3394  for (Index i = 0; i < input_size(); i++) x[i] = args.x(i);
3395  args.y(0) = logspace_sum(x);
3396 }
3397 
3398 const char *LogSpaceSumOp::op_name() { return "LSSumOp"; }
3399 
3400 ad_plain logspace_sum(const std::vector<ad_plain> &x) {
3401  OperatorPure *pOp = get_glob()->getOperator<LogSpaceSumOp>(x.size());
3402  return get_glob()->add_to_stack<LogSpaceSumOp>(pOp, x)[0];
3403 }
3404 
3405 Index LogSpaceSumStrideOp::number_of_terms() const { return stride.size(); }
3406 
3407 Index LogSpaceSumStrideOp::input_size() const { return number_of_terms(); }
3408 
3409 Index LogSpaceSumStrideOp::output_size() const { return 1; }
3410 
3411 LogSpaceSumStrideOp::LogSpaceSumStrideOp(std::vector<Index> stride, size_t n)
3412  : stride(stride), n(n) {}
3413 
3414 void LogSpaceSumStrideOp::forward(ForwardArgs<Scalar> &args) {
3415  Scalar Max = -INFINITY;
3416 
3417  size_t m = stride.size();
3418  std::vector<Scalar *> wrk(m);
3419  Scalar **px = &(wrk[0]);
3420  for (size_t i = 0; i < m; i++) {
3421  px[i] = args.x_ptr(i);
3422  }
3423 
3424  for (size_t i = 0; i < n; i++) {
3425  Scalar s = rowsum(px, i);
3426  if (Max < s) Max = s;
3427  }
3428 
3429  args.y(0) = 0;
3430  for (size_t i = 0; i < n; i++) {
3431  Scalar s = rowsum(px, i);
3432  args.y(0) += exp(s - Max);
3433  }
3434  args.y(0) = Max + log(args.y(0));
3435 }
3436 
3437 void LogSpaceSumStrideOp::forward(ForwardArgs<Replay> &args) {
3438  std::vector<ad_plain> x(input_size());
3439  for (Index i = 0; i < input_size(); i++) x[i] = args.x(i);
3440  args.y(0) = logspace_sum_stride(x, stride, n);
3441 }
3442 
3443 void LogSpaceSumStrideOp::dependencies(Args<> &args, Dependencies &dep) const {
3444  for (size_t j = 0; j < (size_t)number_of_terms(); j++) {
3445  size_t K = n * stride[j];
3446  dep.add_segment(args.input(j), K);
3447  }
3448 }
3449 
3450 const char *LogSpaceSumStrideOp::op_name() { return "LSStride"; }
3451 
3452 void LogSpaceSumStrideOp::forward(ForwardArgs<Writer> &args) {
3453  TMBAD_ASSERT(false);
3454 }
3455 
3456 void LogSpaceSumStrideOp::reverse(ReverseArgs<Writer> &args) {
3457  TMBAD_ASSERT(false);
3458 }
3459 
3460 ad_plain logspace_sum_stride(const std::vector<ad_plain> &x,
3461  const std::vector<Index> &stride, size_t n) {
3462  TMBAD_ASSERT(x.size() == stride.size());
3463  OperatorPure *pOp = get_glob()->getOperator<LogSpaceSumStrideOp>(stride, n);
3464  return get_glob()->add_to_stack<LogSpaceSumStrideOp>(pOp, x)[0];
3465 }
3466 } // namespace TMBad
3467 // Autogenerated - do not edit by hand !
3468 #include "graph2dot.hpp"
3469 namespace TMBad {
3470 
3471 void graph2dot(global glob, graph G, bool show_id, std::ostream &cout) {
3472  cout << "digraph graphname {\n";
3473  for (size_t i = 0; i < glob.opstack.size(); i++) {
3474  if (!show_id)
3475  cout << i << " [label=\"" << glob.opstack[i]->op_name() << "\"];\n";
3476  else
3477  cout << i << " [label=\"" << glob.opstack[i]->op_name() << " " << i
3478  << "\"];\n";
3479  }
3480  for (size_t node = 0; node < G.num_nodes(); node++) {
3481  for (size_t k = 0; k < G.num_neighbors(node); k++) {
3482  cout << node << " -> " << G.neighbors(node)[k] << ";\n";
3483  }
3484  }
3485  for (size_t i = 0; i < glob.subgraph_seq.size(); i++) {
3486  size_t node = glob.subgraph_seq[i];
3487  cout << node << " [style=\"filled\"];\n";
3488  }
3489 
3490  std::vector<Index> v2o = glob.var2op();
3491 
3492  cout << "{rank=same;";
3493  for (size_t i = 0; i < glob.inv_index.size(); i++) {
3494  cout << v2o[glob.inv_index[i]] << ";";
3495  }
3496  cout << "}\n";
3497 
3498  cout << "{rank=same;";
3499  for (size_t i = 0; i < glob.dep_index.size(); i++) {
3500  cout << v2o[glob.dep_index[i]] << ";";
3501  }
3502  cout << "}\n";
3503 
3504  cout << "}\n";
3505 }
3506 
3507 void graph2dot(global glob, bool show_id, std::ostream &cout) {
3508  graph G = glob.forward_graph();
3509  graph2dot(glob, G, show_id, cout);
3510 }
3511 
3512 void graph2dot(const char *filename, global glob, graph G, bool show_id) {
3513  std::ofstream myfile;
3514  myfile.open(filename);
3515  graph2dot(glob, G, show_id, myfile);
3516  myfile.close();
3517 }
3518 
3519 void graph2dot(const char *filename, global glob, bool show_id) {
3520  std::ofstream myfile;
3521  myfile.open(filename);
3522  graph2dot(glob, show_id, myfile);
3523  myfile.close();
3524 }
3525 } // namespace TMBad
3526 // Autogenerated - do not edit by hand !
3527 #include "graph_transform.hpp"
3528 namespace TMBad {
3529 
3530 std::vector<size_t> which(const std::vector<bool> &x) {
3531  return which<size_t>(x);
3532 }
3533 
3534 size_t prod_int(const std::vector<size_t> &x) {
3535  size_t ans = 1;
3536  for (size_t i = 0; i < x.size(); i++) ans *= x[i];
3537  return ans;
3538 }
3539 
3540 std::vector<bool> reverse_boundary(global &glob,
3541  const std::vector<bool> &vars) {
3542  std::vector<bool> boundary(vars);
3543  std::vector<bool> node_filter = glob.var2op(vars);
3544  glob.reverse_sub(boundary, node_filter);
3545 
3546  for (size_t i = 0; i < vars.size(); i++) boundary[i] = boundary[i] ^ vars[i];
3547  return boundary;
3548 }
3549 
3550 std::vector<Index> get_accumulation_tree(global &glob, bool boundary) {
3551  std::vector<OperatorPure *> &opstack = glob.opstack;
3552 
3553  std::vector<bool> node_subset(opstack.size(), false);
3554  for (size_t i = 0; i < opstack.size(); i++) {
3555  node_subset[i] = opstack[i]->info().test(op_info::is_linear);
3556  }
3557 
3558  node_subset.flip();
3559 
3560  std::vector<bool> var_subset = glob.op2var(node_subset);
3561 
3562  glob.reverse(var_subset);
3563 
3564  var_subset.flip();
3565 
3566  if (boundary) var_subset = reverse_boundary(glob, var_subset);
3567 
3568  node_subset = glob.var2op(var_subset);
3569 
3570  return which<Index>(node_subset);
3571 }
3572 
3573 std::vector<Index> find_op_by_name(global &glob, const char *name) {
3574  std::vector<Index> ans;
3575  std::vector<OperatorPure *> &opstack = glob.opstack;
3576  for (size_t i = 0; i < opstack.size(); i++) {
3577  if (!strcmp(opstack[i]->op_name(), name)) {
3578  ans.push_back(i);
3579  }
3580  }
3581  return ans;
3582 }
3583 
3584 std::vector<Index> substitute(global &glob, const std::vector<Index> &seq,
3585  bool inv_tags, bool dep_tags) {
3586  std::vector<OperatorPure *> &opstack = glob.opstack;
3587  std::vector<Index> seq2(seq);
3588  make_space_inplace(opstack, seq2);
3589  OperatorPure *invop = glob.getOperator<global::InvOp>();
3590  for (size_t i = 0; i < seq2.size(); i++) {
3591  OperatorPure *op = opstack[seq2[i]];
3592  if (inv_tags) TMBAD_ASSERT(op != invop);
3593  size_t nin = op->input_size();
3594  size_t nou = op->output_size();
3595  opstack[seq2[i] - 1] = glob.getOperator<global::NullOp2>(nin, 0);
3596  opstack[seq2[i]] = glob.getOperator<global::NullOp2>(0, nou);
3597  op->deallocate();
3598  }
3600  std::vector<Index> new_inv = glob.op2var(seq2);
3601  if (!inv_tags) glob.inv_index.resize(0);
3602  if (!dep_tags) glob.dep_index.resize(0);
3603  glob.inv_index.insert(glob.inv_index.end(), new_inv.begin(), new_inv.end());
3604  return new_inv;
3605 }
3606 
3607 std::vector<Index> substitute(global &glob, const char *name, bool inv_tags,
3608  bool dep_tags) {
3609  std::vector<Index> seq = find_op_by_name(glob, name);
3610  return substitute(glob, seq, inv_tags, dep_tags);
3611 }
3612 
3614  global glob_tree = glob;
3615 
3616  std::vector<Index> boundary = get_accumulation_tree(glob, true);
3617 
3618  substitute(glob_tree, boundary, false, true);
3619  glob_tree.eliminate();
3620 
3621  size_t n = glob_tree.inv_index.size();
3622 
3623  std::vector<Scalar> x0(n);
3624  for (size_t i = 0; i < n; i++) x0[i] = glob_tree.value_inv(i);
3625  glob_tree.forward();
3626  glob_tree.clear_deriv();
3627  glob_tree.deriv_dep(0) = 1;
3628  glob_tree.reverse();
3629  Scalar V = glob_tree.value_dep(0);
3630  std::vector<Scalar> J(n);
3631  for (size_t i = 0; i < n; i++) J[i] = glob_tree.deriv_inv(i);
3632 
3633  for (size_t i = 0; i < n; i++) V -= J[i] * x0[i];
3634 
3635  std::vector<Index> vars = glob.op2var(boundary);
3636  glob.dep_index.resize(0);
3637  glob.ad_start();
3638  std::vector<ad_aug_index> res(vars.begin(), vars.end());
3639  for (size_t i = 0; i < vars.size(); i++) {
3640  res[i] = res[i] * J[i];
3641  if (i == 0) res[i] += V;
3642  if (!sum_) res[i].Dependent();
3643  }
3644  if (sum_) {
3645  ad_aug sum_res = sum(res);
3646  sum_res.Dependent();
3647  }
3648  glob.ad_stop();
3649  glob.eliminate();
3650  return glob;
3651 }
3652 
3653 void aggregate(global &glob, int sign) {
3654  TMBAD_ASSERT((sign == 1) || (sign == -1));
3655  glob.ad_start();
3656  std::vector<ad_aug_index> x(glob.dep_index.begin(), glob.dep_index.end());
3657  ad_aug y = 0;
3658  for (size_t i = 0; i < x.size(); i++) y += x[i];
3659  if (sign < 0) y = -y;
3660  glob.dep_index.resize(0);
3661  y.Dependent();
3662  glob.ad_stop();
3663 }
3664 
3665 old_state::old_state(global &glob) : glob(glob) {
3666  dep_index = glob.dep_index;
3667  opstack_size = glob.opstack.size();
3668 }
3669 
3670 void old_state::restore() {
3671  glob.dep_index = dep_index;
3672  while (glob.opstack.size() > opstack_size) {
3673  Index input_size = glob.opstack.back()->input_size();
3674  Index output_size = glob.opstack.back()->output_size();
3675  glob.inputs.resize(glob.inputs.size() - input_size);
3676  glob.values.resize(glob.values.size() - output_size);
3677  glob.opstack.back()->deallocate();
3678  glob.opstack.pop_back();
3679  }
3680 }
3681 
3682 term_info::term_info(global &glob, bool do_init) : glob(glob) {
3683  if (do_init) initialize();
3684 }
3685 
3686 void term_info::initialize(std::vector<Index> inv_remap) {
3687  if (inv_remap.size() == 0) inv_remap.resize(glob.inv_index.size(), 0);
3688  inv_remap = radix::factor<Index>(inv_remap);
3689  std::vector<Index> remap = remap_identical_sub_expressions(glob, inv_remap);
3690  std::vector<Index> term_ids = subset(remap, glob.dep_index);
3691  id = radix::factor<Index>(term_ids);
3692  Index max_id = *std::max_element(id.begin(), id.end());
3693  count.resize(max_id + 1, 0);
3694  for (size_t i = 0; i < id.size(); i++) {
3695  count[id[i]]++;
3696  }
3697 }
3698 
3699 gk_config::gk_config()
3700  : debug(false), adaptive(false), nan2zero(true), ytol(1e-2), dx(1) {}
3701 
3703  size_t count = 1;
3704  for (size_t i = 0; i < bound.size(); i++)
3705  if (mask_[i]) count *= bound[i];
3706  return count;
3707 }
3708 
3709 multivariate_index::multivariate_index(size_t bound_, size_t dim, bool flag)
3710  : pointer(0) {
3711  bound.resize(dim, bound_);
3712  x.resize(dim, 0);
3713  mask_.resize(dim, flag);
3714 }
3715 
3716 multivariate_index::multivariate_index(std::vector<size_t> bound, bool flag)
3717  : pointer(0), bound(bound) {
3718  x.resize(bound.size(), 0);
3719  mask_.resize(bound.size(), flag);
3720 }
3721 
3722 void multivariate_index::flip() { mask_.flip(); }
3723 
3725  size_t N = 1;
3726  for (size_t i = 0; i < x.size(); i++) {
3727  if (mask_[i]) {
3728  if (x[i] < bound[i] - 1) {
3729  x[i]++;
3730  pointer += N;
3731  break;
3732  } else {
3733  x[i] = 0;
3734  pointer -= (bound[i] - 1) * N;
3735  }
3736  }
3737  N *= bound[i];
3738  }
3739  return *this;
3740 }
3741 
3742 multivariate_index::operator size_t() { return pointer; }
3743 
3744 size_t multivariate_index::index(size_t i) { return x[i]; }
3745 
3746 std::vector<size_t> multivariate_index::index() { return x; }
3747 
3748 std::vector<bool>::reference multivariate_index::mask(size_t i) {
3749  return mask_[i];
3750 }
3751 
3752 void multivariate_index::set_mask(const std::vector<bool> &mask) {
3753  TMBAD_ASSERT(mask.size() == mask_.size());
3754  mask_ = mask;
3755 }
3756 
3757 size_t clique::clique_size() { return indices.size(); }
3758 
3759 clique::clique() {}
3760 
3761 void clique::subset_inplace(const std::vector<bool> &mask) {
3762  indices = subset(indices, mask);
3763  dim = subset(dim, mask);
3764 }
3765 
3766 void clique::logsum_init() { logsum.resize(prod_int(dim)); }
3767 
3768 bool clique::empty() const { return (indices.size() == 0); }
3769 
3770 bool clique::contains(Index i) {
3771  bool ans = false;
3772  for (size_t j = 0; j < indices.size(); j++) ans |= (i == indices[j]);
3773  return ans;
3774 }
3775 
3776 void clique::get_stride(const clique &super, Index ind,
3777  std::vector<ad_plain> &offset, Index &stride) {
3778  stride = 1;
3779  for (size_t k = 0; (k < clique_size()) && (indices[k] < ind); k++) {
3780  stride *= dim[k];
3781  }
3782 
3783  multivariate_index mv(super.dim);
3784  size_t nx = mv.count();
3785  std::vector<bool> mask = lmatch(super.indices, this->indices);
3786  mask.flip();
3787  mv.set_mask(mask);
3788  std::vector<ad_plain> x(nx);
3789  size_t xa_count = mv.count();
3790  mv.flip();
3791  size_t xi_count = mv.count();
3792  mv.flip();
3793  TMBAD_ASSERT(x.size() == xa_count * xi_count);
3794  for (size_t i = 0; i < xa_count; i++, ++mv) {
3795  mv.flip();
3796  for (size_t j = 0; j < xi_count; j++, ++mv) {
3797  TMBAD_ASSERT(logsum[j].on_some_tape());
3798  x[mv] = logsum[j];
3799  }
3800  mv.flip();
3801  }
3802 
3803  mv = multivariate_index(super.dim);
3804  mask = lmatch(super.indices, std::vector<Index>(1, ind));
3805  mask.flip();
3806  mv.set_mask(mask);
3807 
3808  xa_count = mv.count();
3809  offset.resize(xa_count);
3810  for (size_t i = 0; i < xa_count; i++, ++mv) {
3811  offset[i] = x[mv];
3812  }
3813 }
3814 
3815 sr_grid::sr_grid() {}
3816 
3817 sr_grid::sr_grid(Scalar a, Scalar b, size_t n) : x(n), w(n) {
3818  Scalar h = (b - a) / n;
3819  for (size_t i = 0; i < n; i++) {
3820  x[i] = a + h / 2 + i * h;
3821  w[i] = h;
3822  }
3823 }
3824 
3825 sr_grid::sr_grid(size_t n) {
3826  for (size_t i = 0; i < n; i++) {
3827  x[i] = i;
3828  w[i] = 1. / (double)n;
3829  }
3830 }
3831 
3832 size_t sr_grid::size() { return x.size(); }
3833 
3834 ad_plain sr_grid::logw_offset() {
3835  if (logw.size() != w.size()) {
3836  logw.resize(w.size());
3837  for (size_t i = 0; i < w.size(); i++) logw[i] = log(w[i]);
3838  forceContiguous(logw);
3839  }
3840  return logw[0];
3841 }
3842 
3844  std::vector<Index> random,
3845  std::vector<sr_grid> grid,
3846  std::vector<Index> random2grid,
3847  bool perm)
3848  : grid(grid),
3849  glob(glob),
3850  random(random),
3851  replay(glob, new_glob),
3852  tinfo(glob, false) {
3853  inv2grid.resize(glob.inv_index.size(), 0);
3854  for (size_t i = 0; i < random2grid.size(); i++) {
3855  inv2grid[random[i]] = random2grid[i];
3856  }
3857 
3858  mark.resize(glob.values.size(), false);
3859  for (size_t i = 0; i < random.size(); i++)
3860  mark[glob.inv_index[random[i]]] = true;
3861  glob.forward(mark);
3862 
3863  forward_graph = glob.forward_graph(mark);
3864  reverse_graph = glob.reverse_graph(mark);
3865 
3866  glob.subgraph_cache_ptr();
3867 
3868  var_remap.resize(glob.values.size());
3869 
3870  op2inv_idx = glob.op2idx(glob.inv_index, NA);
3871  op2dep_idx = glob.op2idx(glob.dep_index, NA);
3872 
3873  if (perm) reorder_random();
3874 
3875  terms_done.resize(glob.dep_index.size(), false);
3876 
3877  std::vector<Index> inv_remap(glob.inv_index.size());
3878  for (size_t i = 0; i < inv_remap.size(); i++) inv_remap[i] = -(i + 1);
3879  for (size_t i = 0; i < random.size(); i++)
3880  inv_remap[random[i]] = inv2grid[random[i]];
3881  inv_remap = radix::factor<Index>(inv_remap);
3882  tinfo.initialize(inv_remap);
3883 }
3884 
3886  std::vector<IndexPair> edges;
3887  std::vector<Index> &inv2op = forward_graph.inv2op;
3888 
3889  for (size_t i = 0; i < random.size(); i++) {
3890  std::vector<Index> subgraph(1, inv2op[random[i]]);
3891  forward_graph.search(subgraph);
3892  reverse_graph.search(subgraph);
3893  for (size_t l = 0; l < subgraph.size(); l++) {
3894  Index inv_other = op2inv_idx[subgraph[l]];
3895  if (inv_other != NA) {
3896  IndexPair edge(random[i], inv_other);
3897  edges.push_back(edge);
3898  }
3899  }
3900  }
3901 
3902  size_t num_nodes = glob.inv_index.size();
3903  graph G(num_nodes, edges);
3904 
3905  std::vector<bool> visited(num_nodes, false);
3906  std::vector<Index> subgraph;
3907  for (size_t i = 0; i < random.size(); i++) {
3908  if (visited[random[i]]) continue;
3909  std::vector<Index> sg(1, random[i]);
3910  G.search(sg, visited, false, false);
3911  subgraph.insert(subgraph.end(), sg.begin(), sg.end());
3912  }
3913  std::reverse(subgraph.begin(), subgraph.end());
3914  TMBAD_ASSERT(random.size() == subgraph.size());
3915  random = subgraph;
3916 }
3917 
3918 std::vector<size_t> sequential_reduction::get_grid_bounds(
3919  std::vector<Index> inv_index) {
3920  std::vector<size_t> ans(inv_index.size());
3921  for (size_t i = 0; i < inv_index.size(); i++) {
3922  ans[i] = grid[inv2grid[inv_index[i]]].size();
3923  }
3924  return ans;
3925 }
3926 
3927 std::vector<sr_grid *> sequential_reduction::get_grid(
3928  std::vector<Index> inv_index) {
3929  std::vector<sr_grid *> ans(inv_index.size());
3930  for (size_t i = 0; i < inv_index.size(); i++) {
3931  ans[i] = &(grid[inv2grid[inv_index[i]]]);
3932  }
3933  return ans;
3934 }
3935 
3936 std::vector<ad_aug> sequential_reduction::tabulate(std::vector<Index> inv_index,
3937  Index dep_index) {
3938  size_t id = tinfo.id[dep_index];
3939  size_t count = tinfo.count[id];
3940  bool do_cache = (count >= 2);
3941  if (do_cache) {
3942  if (cache[id].size() > 0) {
3943  return cache[id];
3944  }
3945  }
3946 
3947  std::vector<sr_grid *> inv_grid = get_grid(inv_index);
3948  std::vector<size_t> grid_bounds = get_grid_bounds(inv_index);
3949  multivariate_index mv(grid_bounds);
3950  std::vector<ad_aug> ans(mv.count());
3951  for (size_t i = 0; i < ans.size(); i++, ++mv) {
3952  for (size_t j = 0; j < inv_index.size(); j++) {
3953  replay.value_inv(inv_index[j]) = inv_grid[j]->x[mv.index(j)];
3954  }
3955  replay.forward_sub();
3956  ans[i] = replay.value_dep(dep_index);
3957  }
3958 
3959  forceContiguous(ans);
3960  if (do_cache) {
3961  cache[id] = ans;
3962  }
3963  return ans;
3964 }
3965 
3967  std::vector<Index> super;
3968  size_t c = 0;
3969  for (std::list<clique>::iterator it = cliques.begin(); it != cliques.end();
3970  ++it) {
3971  if ((*it).contains(i)) {
3972  super.insert(super.end(), (*it).indices.begin(), (*it).indices.end());
3973  c++;
3974  }
3975  }
3976  sort_unique_inplace(super);
3977 
3978  std::vector<std::vector<ad_plain> > offset_by_clique(c);
3979  std::vector<Index> stride_by_clique(c);
3980  clique C;
3981  C.indices = super;
3982  C.dim = get_grid_bounds(super);
3983  std::list<clique>::iterator it = cliques.begin();
3984  c = 0;
3985  while (it != cliques.end()) {
3986  if ((*it).contains(i)) {
3987  (*it).get_stride(C, i, offset_by_clique[c], stride_by_clique[c]);
3988  it = cliques.erase(it);
3989  c++;
3990  } else {
3991  ++it;
3992  }
3993  }
3994 
3995  std::vector<bool> mask = lmatch(super, std::vector<Index>(1, i));
3996  mask.flip();
3997  C.subset_inplace(mask);
3998  C.logsum_init();
3999 
4000  grid[inv2grid[i]].logw_offset();
4001  size_t v_begin = get_glob()->values.size();
4002  for (size_t j = 0; j < C.logsum.size(); j++) {
4003  std::vector<ad_plain> x;
4004  std::vector<Index> stride;
4005  for (size_t k = 0; k < offset_by_clique.size(); k++) {
4006  x.push_back(offset_by_clique[k][j]);
4007  stride.push_back(stride_by_clique[k]);
4008  }
4009 
4010  x.push_back(grid[inv2grid[i]].logw_offset());
4011  stride.push_back(1);
4012  C.logsum[j] = logspace_sum_stride(x, stride, grid[inv2grid[i]].size());
4013  }
4014  size_t v_end = get_glob()->values.size();
4015  TMBAD_ASSERT(v_end - v_begin == C.logsum.size());
4016 
4017  cliques.push_back(C);
4018 }
4019 
4021  const std::vector<Index> &inv2op = forward_graph.inv2op;
4022 
4023  Index start_node = inv2op[i];
4024  std::vector<Index> subgraph(1, start_node);
4025  forward_graph.search(subgraph);
4026 
4027  std::vector<Index> dep_clique;
4028  std::vector<Index> subgraph_terms;
4029  for (size_t k = 0; k < subgraph.size(); k++) {
4030  Index node = subgraph[k];
4031  Index dep_idx = op2dep_idx[node];
4032  if (dep_idx != NA && !terms_done[dep_idx]) {
4033  terms_done[dep_idx] = true;
4034  subgraph_terms.push_back(node);
4035  dep_clique.push_back(dep_idx);
4036  }
4037  }
4038  for (size_t k = 0; k < subgraph_terms.size(); k++) {
4039  subgraph.resize(0);
4040  subgraph.push_back(subgraph_terms[k]);
4041 
4042  reverse_graph.search(subgraph);
4043 
4044  std::vector<Index> inv_clique;
4045  for (size_t l = 0; l < subgraph.size(); l++) {
4046  Index tmp = op2inv_idx[subgraph[l]];
4047  if (tmp != NA) inv_clique.push_back(tmp);
4048  }
4049 
4050  glob.subgraph_seq = subgraph;
4051 
4052  clique C;
4053  C.indices = inv_clique;
4054  C.dim = get_grid_bounds(inv_clique);
4055  C.logsum = tabulate(inv_clique, dep_clique[k]);
4056 
4057  cliques.push_back(C);
4058  }
4059 
4060  merge(i);
4061 }
4062 
4063 void sequential_reduction::show_cliques() {
4064  Rcout << "Cliques: ";
4065  std::list<clique>::iterator it;
4066  for (it = cliques.begin(); it != cliques.end(); ++it) {
4067  Rcout << it->indices << " ";
4068  }
4069  Rcout << "\n";
4070 }
4071 
4072 void sequential_reduction::update_all() {
4073  for (size_t i = 0; i < random.size(); i++) update(random[i]);
4074 }
4075 
4076 ad_aug sequential_reduction::get_result() {
4077  ad_aug ans = 0;
4078  std::list<clique>::iterator it;
4079  for (it = cliques.begin(); it != cliques.end(); ++it) {
4080  TMBAD_ASSERT(it->clique_size() == 0);
4081  TMBAD_ASSERT(it->logsum.size() == 1);
4082  ans += it->logsum[0];
4083  }
4084 
4085  for (size_t i = 0; i < terms_done.size(); i++) {
4086  if (!terms_done[i]) ans += replay.value_dep(i);
4087  }
4088  return ans;
4089 }
4090 
4091 global sequential_reduction::marginal() {
4092  replay.start();
4093  replay.forward(true, false);
4094  update_all();
4095  ad_aug ans = get_result();
4096  ans.Dependent();
4097  replay.stop();
4098  return new_glob;
4099 }
4100 
4101 autopar::autopar(global &glob, size_t num_threads)
4102  : glob(glob),
4103  num_threads(num_threads),
4104  do_aggregate(false),
4105  keep_all_inv(false) {
4106  reverse_graph = glob.reverse_graph();
4107 }
4108 
4109 std::vector<size_t> autopar::max_tree_depth() {
4110  std::vector<Index> max_tree_depth(glob.opstack.size(), 0);
4111  Dependencies dep;
4112  Args<> args(glob.inputs);
4113  for (size_t i = 0; i < glob.opstack.size(); i++) {
4114  dep.resize(0);
4115  glob.opstack[i]->dependencies(args, dep);
4116  for (size_t j = 0; j < dep.size(); j++) {
4117  max_tree_depth[i] = std::max(max_tree_depth[i], max_tree_depth[dep[j]]);
4118  }
4119 
4120  max_tree_depth[i]++;
4121 
4122  glob.opstack[i]->increment(args.ptr);
4123  }
4124  std::vector<size_t> ans(glob.dep_index.size());
4125  for (size_t j = 0; j < glob.dep_index.size(); j++) {
4126  ans[j] = max_tree_depth[glob.dep_index[j]];
4127  }
4128  return ans;
4129 }
4130 
4131 void autopar::run() {
4132  std::vector<size_t> ord = order(max_tree_depth());
4133  std::reverse(ord.begin(), ord.end());
4134  std::vector<bool> visited(glob.opstack.size(), false);
4135  std::vector<Index> start;
4136  std::vector<Index> dWork(ord.size());
4137  for (size_t i = 0; i < ord.size(); i++) {
4138  start.resize(1);
4139  start[0] = reverse_graph.dep2op[ord[i]];
4140  reverse_graph.search(start, visited, false, false);
4141  dWork[i] = start.size();
4142  if (false) {
4143  for (size_t k = 0; k < start.size(); k++) {
4144  Rcout << glob.opstack[start[k]]->op_name() << " ";
4145  }
4146  Rcout << "\n";
4147  }
4148  }
4149 
4150  std::vector<size_t> thread_assign(ord.size(), 0);
4151  std::vector<size_t> work_by_thread(num_threads, 0);
4152  for (size_t i = 0; i < dWork.size(); i++) {
4153  if (i == 0) {
4154  thread_assign[i] = 0;
4155  } else {
4156  if (dWork[i] <= 1)
4157  thread_assign[i] = thread_assign[i - 1];
4158  else
4159  thread_assign[i] = which_min(work_by_thread);
4160  }
4161  work_by_thread[thread_assign[i]] += dWork[i];
4162  }
4163 
4164  node_split.resize(num_threads);
4165  for (size_t i = 0; i < ord.size(); i++) {
4166  node_split[thread_assign[i]].push_back(reverse_graph.dep2op[ord[i]]);
4167  }
4168 
4169  for (size_t i = 0; i < num_threads; i++) {
4170  if (keep_all_inv)
4171  node_split[i].insert(node_split[i].begin(), reverse_graph.inv2op.begin(),
4172  reverse_graph.inv2op.end());
4173  reverse_graph.search(node_split[i]);
4174  }
4175 }
4176 
4178  vglob.resize(num_threads);
4179  inv_idx.resize(num_threads);
4180  dep_idx.resize(num_threads);
4181  std::vector<Index> tmp;
4182  for (size_t i = 0; i < num_threads; i++) {
4183  glob.subgraph_seq = node_split[i];
4184  vglob[i] = glob.extract_sub(tmp);
4185  if (do_aggregate) aggregate(vglob[i]);
4186  }
4187 
4188  Index NA = -1;
4189  std::vector<Index> op2inv_idx = glob.op2idx(glob.inv_index, NA);
4190  std::vector<Index> op2dep_idx = glob.op2idx(glob.dep_index, NA);
4191  for (size_t i = 0; i < num_threads; i++) {
4192  std::vector<Index> &seq = node_split[i];
4193  for (size_t j = 0; j < seq.size(); j++) {
4194  if (op2inv_idx[seq[j]] != NA) inv_idx[i].push_back(op2inv_idx[seq[j]]);
4195  if (op2dep_idx[seq[j]] != NA) dep_idx[i].push_back(op2dep_idx[seq[j]]);
4196  }
4197  if (do_aggregate) {
4198  dep_idx[i].resize(1);
4199  dep_idx[i][0] = i;
4200  }
4201  }
4202 }
4203 
4204 size_t autopar::input_size() const { return glob.inv_index.size(); }
4205 
4206 size_t autopar::output_size() const {
4207  return (do_aggregate ? num_threads : glob.dep_index.size());
4208 }
4209 
4210 Index ParalOp::input_size() const { return n; }
4211 
4212 Index ParalOp::output_size() const { return m; }
4213 
4214 ParalOp::ParalOp(const autopar &ap)
4215  : vglob(ap.vglob),
4216  inv_idx(ap.inv_idx),
4217  dep_idx(ap.dep_idx),
4218  n(ap.input_size()),
4219  m(ap.output_size()) {}
4220 
4221 void ParalOp::forward(ForwardArgs<Scalar> &args) {
4222  size_t num_threads = vglob.size();
4223 
4224 #ifdef _OPENMP
4225 #pragma omp parallel for
4226 #endif
4227 
4228  for (size_t i = 0; i < num_threads; i++) {
4229  for (size_t j = 0; j < inv_idx[i].size(); j++) {
4230  vglob[i].value_inv(j) = args.x(inv_idx[i][j]);
4231  }
4232  vglob[i].forward();
4233  }
4234 
4235  for (size_t i = 0; i < num_threads; i++) {
4236  for (size_t j = 0; j < dep_idx[i].size(); j++) {
4237  args.y(dep_idx[i][j]) = vglob[i].value_dep(j);
4238  }
4239  }
4240 }
4241 
4242 void ParalOp::reverse(ReverseArgs<Scalar> &args) {
4243  size_t num_threads = vglob.size();
4244 
4245 #ifdef _OPENMP
4246 #pragma omp parallel for
4247 #endif
4248 
4249  for (size_t i = 0; i < num_threads; i++) {
4250  vglob[i].clear_deriv();
4251  for (size_t j = 0; j < dep_idx[i].size(); j++) {
4252  vglob[i].deriv_dep(j) = args.dy(dep_idx[i][j]);
4253  }
4254  vglob[i].reverse();
4255  }
4256 
4257  for (size_t i = 0; i < num_threads; i++) {
4258  for (size_t j = 0; j < inv_idx[i].size(); j++) {
4259  args.dx(inv_idx[i][j]) += vglob[i].deriv_inv(j);
4260  }
4261  }
4262 }
4263 
4264 const char *ParalOp::op_name() { return "ParalOp"; }
4265 
4266 void ParalOp::print(global::print_config cfg) {
4267  size_t num_threads = vglob.size();
4268  for (size_t i = 0; i < num_threads; i++) {
4269  global::print_config cfg2 = cfg;
4270  std::stringstream ss;
4271  ss << i;
4272  std::string str = ss.str();
4273  cfg2.prefix = cfg2.prefix + str;
4274  vglob[i].print(cfg2);
4275  }
4276 }
4277 
4278 std::vector<Index> get_likely_expression_duplicates(
4279  const global &glob, std::vector<Index> inv_remap) {
4280  global::hash_config cfg;
4281  cfg.strong_inv = true;
4282  cfg.strong_const = true;
4283  cfg.strong_output = true;
4284  cfg.reduce = false;
4285  cfg.deterministic = false;
4286  cfg.inv_seed = inv_remap;
4287  std::vector<hash_t> h = glob.hash_sweep(cfg);
4288  return radix::first_occurance<Index>(h);
4289 }
4290 
4291 bool all_allow_remap(const global &glob) {
4292  Args<> args(glob.inputs);
4293  for (size_t i = 0; i < glob.opstack.size(); i++) {
4294  op_info info = glob.opstack[i]->info();
4295  if (!info.test(op_info::allow_remap)) {
4296  return false;
4297  }
4298  glob.opstack[i]->increment(args.ptr);
4299  }
4300  return true;
4301 }
4302 
4304  global &glob, std::vector<Index> inv_remap) {
4305  std::vector<Index> remap = get_likely_expression_duplicates(glob, inv_remap);
4306 
4307  for (size_t i = 0; i < glob.inv_index.size(); i++) {
4308  bool accept = false;
4309  Index var_i = glob.inv_index[i];
4310  if (inv_remap.size() > 0) {
4311  Index j = inv_remap[i];
4312  Index var_j = glob.inv_index[j];
4313  accept = remap[var_i] == remap[var_j];
4314  }
4315  if (!accept) remap[var_i] = var_i;
4316  }
4317 
4318  std::vector<Index> v2o = glob.var2op();
4319  std::vector<Index> dep;
4320  global::OperatorPure *invop = glob.getOperator<global::InvOp>();
4321  Dependencies dep1;
4322  Dependencies dep2;
4323  size_t reject = 0;
4324  size_t total = 0;
4325  Args<> args(glob.inputs);
4326 
4327  for (size_t j = 0, i = 0, nout = 0; j < glob.opstack.size(); j++, i += nout) {
4328  nout = glob.opstack[j]->output_size();
4329  bool any_remap = false;
4330  for (size_t k = i; k < i + nout; k++) {
4331  if (remap[k] != k) {
4332  any_remap = true;
4333  break;
4334  }
4335  }
4336  if (any_remap) {
4337  bool ok = true;
4338  total += nout;
4339 
4340  global::OperatorPure *CurOp = glob.opstack[v2o[i]];
4341  global::OperatorPure *RemOp = glob.opstack[v2o[remap[i]]];
4342  ok &= (CurOp->identifier() == RemOp->identifier());
4343 
4344  ok &= (CurOp->input_size() == RemOp->input_size());
4345  ok &= (CurOp->output_size() == RemOp->output_size());
4346 
4347  op_info CurInfo = CurOp->info();
4348 
4349  if (ok && (nout > 1)) {
4350  for (size_t k = 1; k < nout; k++) {
4351  ok &= (remap[i + k] < i);
4352 
4353  ok &= (v2o[remap[i + k]] == v2o[remap[i]]);
4354 
4355  ok &= (remap[i + k] == remap[i] + k);
4356  }
4357  }
4358 
4359  if (CurOp == invop) {
4360  ok = false;
4361  }
4362  if (ok) {
4363  if (CurInfo.test(op_info::is_constant)) {
4364  if (glob.values[i] != glob.values[remap[i]]) {
4365  ok = false;
4366  }
4367  }
4368  }
4369 
4370  if (ok) {
4371  glob.subgraph_cache_ptr();
4372 
4373  args.ptr = glob.subgraph_ptr[v2o[i]];
4374  dep1.resize(0);
4375  glob.opstack[v2o[i]]->dependencies(args, dep1);
4376 
4377  args.ptr = glob.subgraph_ptr[v2o[remap[i]]];
4378  dep2.resize(0);
4379  glob.opstack[v2o[remap[i]]]->dependencies(args, dep2);
4380 
4381  ok = (dep1.size() == dep2.size());
4382  if (ok) {
4383  bool all_equal = true;
4384  for (size_t j = 0; j < dep1.size(); j++) {
4385  all_equal &= (remap[dep1[j]] == remap[dep2[j]]);
4386  }
4387  ok = all_equal;
4388  }
4389  }
4390 
4391  if (!ok) {
4392  reject += nout;
4393  for (size_t k = i; k < i + nout; k++) remap[k] = k;
4394  }
4395  }
4396  }
4397 
4398  for (size_t i = 0; i < remap.size(); i++) {
4399  TMBAD_ASSERT(remap[i] <= i);
4400  TMBAD_ASSERT(remap[remap[i]] == remap[i]);
4401  }
4402 
4403  if (true) {
4404  Args<> args(glob.inputs);
4405  intervals<Index> visited;
4406  for (size_t i = 0; i < glob.opstack.size(); i++) {
4407  op_info info = glob.opstack[i]->info();
4408  if (!info.test(op_info::allow_remap)) {
4409  Dependencies dep;
4410  glob.opstack[i]->dependencies(args, dep);
4411  for (size_t j = 0; j < dep.I.size(); j++) {
4412  visited.insert(dep.I[j].first, dep.I[j].second);
4413  }
4414  }
4415  glob.opstack[i]->increment(args.ptr);
4416  }
4417 
4418  forbid_remap<std::vector<Index> > fb(remap);
4419  visited.apply(fb);
4420  }
4421  if (reject > 0) {
4422  ((void)(total));
4423  }
4424 
4425  return remap;
4426 }
4427 
4429  std::vector<Index> inv_remap(0);
4430  std::vector<Index> remap = remap_identical_sub_expressions(glob, inv_remap);
4431 
4432  for (size_t i = 0; i < glob.inputs.size(); i++) {
4433  glob.inputs[i] = remap[glob.inputs[i]];
4434  }
4435 }
4436 
4437 std::vector<Position> inv_positions(global &glob) {
4438  IndexPair ptr(0, 0);
4439  std::vector<bool> independent_variable = glob.inv_marks();
4440  std::vector<Position> ans(glob.inv_index.size());
4441  size_t k = 0;
4442  for (size_t i = 0; i < glob.opstack.size(); i++) {
4443  Index nout = glob.opstack[i]->output_size();
4444  for (Index j = 0; j < nout; j++) {
4445  if (independent_variable[ptr.second + j]) {
4446  ans[k].node = i;
4447  ans[k].ptr = ptr;
4448  k++;
4449  }
4450  }
4451  glob.opstack[i]->increment(ptr);
4452  }
4453  return ans;
4454 }
4455 
4456 void reorder_graph(global &glob, std::vector<Index> inv_idx) {
4457  if (!all_allow_remap(glob)) return;
4458  for (size_t i = 1; i < inv_idx.size(); i++) {
4459  TMBAD_ASSERT(inv_idx[i] > inv_idx[i - 1]);
4460  }
4461  std::vector<bool> marks(glob.values.size(), false);
4462  for (size_t i = 0; i < inv_idx.size(); i++)
4463  marks[glob.inv_index[inv_idx[i]]] = true;
4464  glob.forward_dense(marks);
4465  if (false) {
4466  int c = std::count(marks.begin(), marks.end(), true);
4467  Rcout << "marked proportion:" << (double)c / (double)marks.size() << "\n";
4468  }
4469 
4470  marks.flip();
4471  glob.set_subgraph(marks);
4472  marks.flip();
4473  glob.set_subgraph(marks, true);
4474  glob = glob.extract_sub();
4475 }
4476 } // namespace TMBad
4477 // Autogenerated - do not edit by hand !
4478 #include "integrate.hpp"
4479 namespace TMBad {
4480 
4481 double value(double x) { return x; }
4482 
4483 control::control(int subdivisions_, double reltol_, double abstol_)
4484  : subdivisions(subdivisions_), reltol(reltol_), abstol(abstol_) {}
4485 } // namespace TMBad
4486 // Autogenerated - do not edit by hand !
4487 #include "radix.hpp"
4488 namespace TMBad {}
4489 // Autogenerated - do not edit by hand !
4490 #include "tmbad_allow_comparison.hpp"
4491 namespace TMBad {
4492 
4493 bool operator<(const ad_aug &x, const ad_aug &y) {
4494  return x.Value() < y.Value();
4495 }
4496 bool operator<(const Scalar &x, const ad_aug &y) { return x < y.Value(); }
4497 
4498 bool operator<=(const ad_aug &x, const ad_aug &y) {
4499  return x.Value() <= y.Value();
4500 }
4501 bool operator<=(const Scalar &x, const ad_aug &y) { return x <= y.Value(); }
4502 
4503 bool operator>(const ad_aug &x, const ad_aug &y) {
4504  return x.Value() > y.Value();
4505 }
4506 bool operator>(const Scalar &x, const ad_aug &y) { return x > y.Value(); }
4507 
4508 bool operator>=(const ad_aug &x, const ad_aug &y) {
4509  return x.Value() >= y.Value();
4510 }
4511 bool operator>=(const Scalar &x, const ad_aug &y) { return x >= y.Value(); }
4512 
4513 bool operator==(const ad_aug &x, const ad_aug &y) {
4514  return x.Value() == y.Value();
4515 }
4516 bool operator==(const Scalar &x, const ad_aug &y) { return x == y.Value(); }
4517 
4518 bool operator!=(const ad_aug &x, const ad_aug &y) {
4519  return x.Value() != y.Value();
4520 }
4521 bool operator!=(const Scalar &x, const ad_aug &y) { return x != y.Value(); }
4522 } // namespace TMBad
4523 // Autogenerated - do not edit by hand !
4524 #include "vectorize.hpp"
4525 namespace TMBad {
4526 
4527 VSumOp::VSumOp(size_t n) : n(n) {}
4528 
4529 void VSumOp::dependencies(Args<> &args, Dependencies &dep) const {
4530  dep.add_segment(args.input(0), n);
4531 }
4532 
4533 void VSumOp::forward(ForwardArgs<Writer> &args) { TMBAD_ASSERT(false); }
4534 
4535 void VSumOp::reverse(ReverseArgs<Writer> &args) { TMBAD_ASSERT(false); }
4536 
4537 const char *VSumOp::op_name() { return "VSumOp"; }
4538 
4539 ad_aug sum(ad_segment x) {
4540  global::Complete<VSumOp> F(x.size());
4541  return F(x)[0];
4542 }
4543 
4544 Scalar *SegmentRef::value_ptr() { return (*glob_ptr).values.data() + offset; }
4545 
4546 Scalar *SegmentRef::deriv_ptr() { return (*glob_ptr).derivs.data() + offset; }
4547 
4548 SegmentRef::SegmentRef() {}
4549 
4550 SegmentRef::SegmentRef(const Scalar *x) {
4551  SegmentRef *sx = (SegmentRef *)x;
4552  *this = *sx;
4553 }
4554 
4555 SegmentRef::SegmentRef(global *g, Index o, Index s)
4556  : glob_ptr(g), offset(o), size(s) {}
4557 
4558 SegmentRef::SegmentRef(const ad_segment &x) {
4559  static const size_t K = ScalarPack<SegmentRef>::size;
4560  TMBAD_ASSERT(x.size() == K);
4561  Scalar buf[K];
4562  for (size_t i = 0; i < K; i++) buf[i] = x[i].Value();
4563  SegmentRef *sx = (SegmentRef *)buf;
4564  *this = *sx;
4565 }
4566 
4567 bool SegmentRef::isNull() { return (glob_ptr == NULL); }
4568 
4569 void SegmentRef::resize(ad_segment &pack, Index n) {
4570  Index i = pack.index();
4571  SegmentRef *p = (SegmentRef *)(get_glob()->values.data() + i);
4572  p->size = n;
4573 }
4574 
4575 PackOp::PackOp(const Index n) : n(n) {}
4576 
4578  SegmentRef *y = (SegmentRef *)args.y_ptr(0);
4579  y[0] = SegmentRef(args.glob_ptr, args.input(0), n);
4580 }
4581 
4583  ad_segment x(args.x_ptr(0), n);
4584  args.y_segment(0, K) = pack(x);
4585 }
4586 
4588  SegmentRef tmp(args.dy_ptr(0));
4589  if (tmp.glob_ptr != NULL) {
4590  Scalar *dx = SegmentRef(args.y_ptr(0)).deriv_ptr();
4591  Scalar *dy = SegmentRef(args.dy_ptr(0)).deriv_ptr();
4592  for (Index i = 0; i < n; i++) dx[i] += dy[i];
4593  }
4594 }
4595 
4597  ad_segment dy_packed(args.dy_ptr(0), K);
4598 
4599  if (SegmentRef(dy_packed).isNull()) {
4600  SegmentRef().resize(dy_packed, n);
4601  }
4602  ad_segment dy = unpack(dy_packed);
4603  ad_segment dx(args.dx_ptr(0), n, true);
4604  dx += dy;
4605  Replay *pdx = args.dx_ptr(0);
4606  for (Index i = 0; i < n; i++) pdx[i] = dx[i];
4607 }
4608 
4609 const char *PackOp::op_name() { return "PackOp"; }
4610 
4611 void PackOp::dependencies(Args<> &args, Dependencies &dep) const {
4612  dep.add_segment(args.input(0), n);
4613 }
4614 
4615 UnpkOp::UnpkOp(const Index n) : noutput(n) {}
4616 
4618  Scalar *y = args.y_ptr(0);
4619  SegmentRef srx(args.x_ptr(0));
4620  if (srx.isNull()) {
4621  for (Index i = 0; i < noutput; i++) y[i] = 0;
4622  return;
4623  }
4624  Scalar *x = srx.value_ptr();
4625  for (Index i = 0; i < noutput; i++) y[i] = x[i];
4626 
4627  ((SegmentRef *)args.x_ptr(0))->glob_ptr = NULL;
4628 }
4629 
4631  SegmentRef *dx = (SegmentRef *)args.dx_ptr(0);
4632  dx[0] = SegmentRef(args.glob_ptr, args.output(0), noutput);
4633 }
4634 
4636  ad_segment dy(args.dy_ptr(0), noutput);
4637  ad_segment dy_packed = pack(dy);
4638  Replay *pdx = args.dx_ptr(0);
4639  for (Index i = 0; i < dy_packed.size(); i++) pdx[i] = dy_packed[i];
4640 }
4641 
4642 const char *UnpkOp::op_name() { return "UnpkOp"; }
4643 
4644 void UnpkOp::dependencies(Args<> &args, Dependencies &dep) const {
4645  dep.add_segment(args.input(0), K);
4646 }
4647 
4649  global::Complete<PackOp> F(x.size());
4650  return F(x);
4651 }
4652 
4654  Index n = SegmentRef(x).size;
4656  return op(x);
4657 }
4658 
4659 Scalar *unpack(const std::vector<Scalar> &x, Index j) {
4660  Index K = ScalarPack<SegmentRef>::size;
4661  SegmentRef sr(&(x[j * K]));
4662  return sr.value_ptr();
4663 }
4664 
4665 std::vector<ad_aug> concat(const std::vector<ad_segment> &x) {
4666  std::vector<ad_aug> ans;
4667  for (size_t i = 0; i < x.size(); i++) {
4668  ad_segment xi = x[i];
4669  for (size_t j = 0; j < xi.size(); j++) {
4670  ans.push_back(xi[j]);
4671  }
4672  }
4673  return ans;
4674 }
4675 } // namespace TMBad
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:153
+
1 // Autogenerated - do not edit by hand !
2 #include "TMBad.hpp"
3 namespace TMBad {
4 
5 SpJacFun_config::SpJacFun_config() : compress(false), index_remap(true) {}
6 } // namespace TMBad
7 // Autogenerated - do not edit by hand !
8 #include "ad_blas.hpp"
9 namespace TMBad {
10 
11 vmatrix matmul(const vmatrix &x, const vmatrix &y) {
12  vmatrix z(x.rows(), y.cols());
13  Map<vmatrix> zm(&z(0), z.rows(), z.cols());
14  matmul<false, false, false, false>(x, y, zm);
15  return z;
16 }
17 
18 dmatrix matmul(const dmatrix &x, const dmatrix &y) { return x * y; }
19 } // namespace TMBad
20 // Autogenerated - do not edit by hand !
21 #include "checkpoint.hpp"
22 namespace TMBad {
23 
24 bool ParametersChanged::operator()(const std::vector<Scalar> &x) {
25  bool change = (x != x_prev);
26  if (change) {
27  x_prev = x;
28  }
29  return change;
30 }
31 } // namespace TMBad
32 // Autogenerated - do not edit by hand !
33 #include "code_generator.hpp"
34 namespace TMBad {
35 
36 void searchReplace(std::string &str, const std::string &oldStr,
37  const std::string &newStr) {
38  std::string::size_type pos = 0u;
39  while ((pos = str.find(oldStr, pos)) != std::string::npos) {
40  str.replace(pos, oldStr.length(), newStr);
41  pos += newStr.length();
42  }
43 }
44 
45 std::string code_config::float_ptr() { return float_str + (gpu ? "**" : "*"); }
46 
47 std::string code_config::void_str() {
48  return (gpu ? "__device__ void" : "extern \"C\" void");
49 }
50 
51 void code_config::init_code() {
52  if (gpu) {
53  *cout << indent << "int idx = threadIdx.x;" << std::endl;
54  }
55 }
56 
57 void code_config::write_header_comment() {
58  if (header_comment.length() > 0) *cout << header_comment << std::endl;
59 }
60 
61 code_config::code_config()
62  : asm_comments(true),
63  gpu(true),
64  indent(" "),
65  header_comment("// Autogenerated - do not edit by hand !"),
66  float_str(xstringify(TMBAD_SCALAR_TYPE)),
67  cout(&Rcout) {}
68 
69 void write_common(std::ostringstream &buffer, code_config cfg, size_t node) {
70  std::ostream &cout = *cfg.cout;
71  using std::endl;
72  using std::left;
73  using std::setw;
74  std::string indent = cfg.indent;
75  if (cfg.asm_comments)
76  cout << indent << "asm(\"// Node: " << node << "\");" << endl;
77  bool empty_buffer = (buffer.tellp() == 0);
78  if (!empty_buffer) {
79  std::string str = buffer.str();
80  if (cfg.gpu) {
81  std::string pattern = "]";
82  std::string replace = "][idx]";
83  searchReplace(str, pattern, replace);
84  }
85  searchReplace(str, ";v", "; v");
86  searchReplace(str, ";d", "; d");
87  cout << indent << str << endl;
88  }
89 }
90 
91 void write_forward(global &glob, code_config cfg) {
92  using std::endl;
93  using std::left;
94  using std::setw;
95  std::ostream &cout = *cfg.cout;
96  cfg.write_header_comment();
97  cout << cfg.void_str() << " forward(" << cfg.float_ptr() << " v) {" << endl;
98  cfg.init_code();
99  ForwardArgs<Writer> args(glob.inputs, glob.values);
100  for (size_t i = 0; i < glob.opstack.size(); i++) {
101  std::ostringstream buffer;
102  Writer::cout = &buffer;
103  glob.opstack[i]->forward(args);
104  write_common(buffer, cfg, i);
105  glob.opstack[i]->increment(args.ptr);
106  }
107  cout << "}" << endl;
108 }
109 
110 void write_reverse(global &glob, code_config cfg) {
111  using std::endl;
112  using std::left;
113  using std::setw;
114  std::ostream &cout = *cfg.cout;
115  cfg.write_header_comment();
116  cout << cfg.void_str() << " reverse(" << cfg.float_ptr() << " v, "
117  << cfg.float_ptr() << " d) {" << endl;
118  cfg.init_code();
119  ReverseArgs<Writer> args(glob.inputs, glob.values);
120  for (size_t i = glob.opstack.size(); i > 0;) {
121  i--;
122  glob.opstack[i]->decrement(args.ptr);
123  std::ostringstream buffer;
124  Writer::cout = &buffer;
125  glob.opstack[i]->reverse(args);
126  write_common(buffer, cfg, i);
127  }
128  cout << "}" << endl;
129 }
130 
131 void write_all(global glob, code_config cfg) {
132  using std::endl;
133  using std::left;
134  using std::setw;
135  std::ostream &cout = *cfg.cout;
136  cout << "#include \"global.hpp\"" << endl;
137  cout << "#include \"ad_blas.hpp\"" << endl;
138  write_forward(glob, cfg);
139  write_reverse(glob, cfg);
140  cout << "int main() {}" << endl;
141 }
142 } // namespace TMBad
143 #ifndef _WIN32
144 // Autogenerated - do not edit by hand !
145 #include "compile.hpp"
146 namespace TMBad {
147 
148 void compile(global &glob, code_config cfg) {
149  cfg.gpu = false;
150  cfg.asm_comments = false;
151  std::ofstream file;
152  file.open("tmp.cpp");
153  cfg.cout = &file;
154 
155  *cfg.cout << "#include <cmath>" << std::endl;
156  *cfg.cout
157  << "template<class T>T sign(const T &x) { return (x > 0) - (x < 0); }"
158  << std::endl;
159 
160  write_forward(glob, cfg);
161 
162  write_reverse(glob, cfg);
163 
164  int out = system("g++ -O3 -g tmp.cpp -o tmp.so -shared -fPIC");
165  if (out != 0) {
166  }
167 
168  void *handle = dlopen("./tmp.so", RTLD_NOW);
169  if (handle != NULL) {
170  Rcout << "Loading compiled code!" << std::endl;
171  glob.forward_compiled =
172  reinterpret_cast<void (*)(Scalar *)>(dlsym(handle, "forward"));
173  glob.reverse_compiled = reinterpret_cast<void (*)(Scalar *, Scalar *)>(
174  dlsym(handle, "reverse"));
175  }
176 }
177 } // namespace TMBad
178 #endif
179 // Autogenerated - do not edit by hand !
180 #include "compression.hpp"
181 namespace TMBad {
182 
183 std::ostream &operator<<(std::ostream &os, const period &x) {
184  os << "begin: " << x.begin;
185  os << " size: " << x.size;
186  os << " rep: " << x.rep;
187  return os;
188 }
189 
190 std::vector<period> split_period(global *glob, period p,
191  size_t max_period_size) {
192  typedef std::ptrdiff_t ptrdiff_t;
193  glob->subgraph_cache_ptr();
194 
195  size_t offset = glob->subgraph_ptr[p.begin].first;
196 
197  size_t nrow = 0;
198  for (size_t i = 0; i < p.size; i++) {
199  nrow += glob->opstack[p.begin + i]->input_size();
200  }
201 
202  size_t ncol = p.rep;
203 
204  matrix_view<Index> x(&(glob->inputs[offset]), nrow, ncol);
205 
206  std::vector<bool> marks(ncol - 1, false);
207 
208  for (size_t i = 0; i < nrow; i++) {
209  std::vector<period> pd =
210  periodic<ptrdiff_t>(x.row_diff<ptrdiff_t>(i), max_period_size)
211  .find_all();
212 
213  for (size_t j = 0; j < pd.size(); j++) {
214  if (pd[j].begin > 0) {
215  marks[pd[j].begin - 1] = true;
216  }
217  size_t end = pd[j].begin + pd[j].size * pd[j].rep;
218  if (end < marks.size()) marks[end] = true;
219  }
220  }
221 
222  std::vector<period> ans;
223  p.rep = 1;
224  ans.push_back(p);
225  for (size_t j = 0; j < marks.size(); j++) {
226  if (marks[j]) {
227  period pnew = p;
228  pnew.begin = p.begin + (j + 1) * p.size;
229  pnew.rep = 1;
230  ans.push_back(pnew);
231  } else {
232  ans.back().rep++;
233  }
234  }
235 
236  return ans;
237 }
238 
239 size_t compressed_input::input_size() const { return n; }
240 
241 void compressed_input::update_increment_pattern() const {
242  for (size_t i = 0; i < (size_t)np; i++)
243  increment_pattern[which_periodic[i]] =
244  period_data[period_offsets[i] + counter % period_sizes[i]];
245 }
246 
247 void compressed_input::increment(Args<> &args) const {
248  if (np) {
249  update_increment_pattern();
250  counter++;
251  }
252  for (size_t i = 0; i < n; i++) inputs[i] += increment_pattern[i];
253  args.ptr.first = 0;
254 }
255 
256 void compressed_input::decrement(Args<> &args) const {
257  args.ptr.first = input_size();
258  for (size_t i = 0; i < n; i++) inputs[i] -= increment_pattern[i];
259  if (np) {
260  counter--;
261  update_increment_pattern();
262  }
263 }
264 
265 void compressed_input::forward_init(Args<> &args) const {
266  counter = 0;
267  inputs.resize(input_size());
268  for (size_t i = 0; i < inputs.size(); i++) inputs[i] = args.input(i);
269  args.inputs = inputs.data();
270  args.ptr.first = 0;
271 }
272 
273 void compressed_input::reverse_init(Args<> &args) {
274  inputs.resize(input_size());
275  for (size_t i = 0; i < inputs.size(); i++)
276  inputs[i] = args.input(i) + input_diff[i];
277 
278  args.inputs = inputs.data();
279  args.ptr.first = 0;
280  args.ptr.second += m * nrep;
281  counter = nrep - 1;
282  update_increment_pattern();
283  args.ptr.first = input_size();
284 }
285 
286 void compressed_input::dependencies_intervals(Args<> &args,
287  std::vector<Index> &lower,
288  std::vector<Index> &upper) const {
289  forward_init(args);
290  lower = inputs;
291  upper = inputs;
292  for (size_t i = 0; i < nrep; i++) {
293  for (size_t j = 0; j < inputs.size(); j++) {
294  if (inputs[j] < lower[j]) lower[j] = inputs[j];
295  if (inputs[j] > upper[j]) upper[j] = inputs[j];
296  }
297  increment(args);
298  }
299 }
300 
301 bool compressed_input::test_period(std::vector<ptrdiff_t> &x, size_t p) {
302  for (size_t j = 0; j < x.size(); j++) {
303  if (x[j] != x[j % p]) return false;
304  }
305  return true;
306 }
307 
308 size_t compressed_input::find_shortest(std::vector<ptrdiff_t> &x) {
309  for (size_t p = 1; p < max_period_size; p++) {
310  if (test_period(x, p)) return p;
311  }
312  return x.size();
313 }
314 
315 compressed_input::compressed_input() {}
316 
317 compressed_input::compressed_input(std::vector<Index> &x, size_t offset,
318  size_t nrow, size_t m, size_t ncol,
319  size_t max_period_size)
320  : n(nrow), m(m), nrep(ncol), counter(0), max_period_size(max_period_size) {
321  matrix_view<Index> xm(&x[offset], nrow, ncol);
322 
323  for (size_t i = 0; i < nrow; i++) {
324  std::vector<ptrdiff_t> rd = xm.row_diff<ptrdiff_t>(i);
325 
326  size_t p = find_shortest(rd);
327 
328  increment_pattern.push_back(rd[0]);
329  if (p != 1) {
330  which_periodic.push_back(i);
331  period_sizes.push_back(p);
332 
333  size_t pos = std::search(period_data.begin(), period_data.end(),
334  rd.begin(), rd.begin() + p) -
335  period_data.begin();
336  if (pos < period_data.size()) {
337  period_offsets.push_back(pos);
338  } else {
339  period_offsets.push_back(period_data.size());
340  period_data.insert(period_data.end(), rd.begin(), rd.begin() + p);
341  }
342  }
343  }
344 
345  np = which_periodic.size();
346 
347  input_diff.resize(n, 0);
348  Args<> args(input_diff);
349  forward_init(args);
350  for (size_t i = 0; i < nrep; i++) {
351  increment(args);
352  }
353  input_diff = inputs;
354 }
355 
356 StackOp::StackOp(global *glob, period p, IndexPair ptr,
357  size_t max_period_size) {
358  opstack.resize(p.size);
359  size_t n = 0, m = 0;
360  for (size_t i = 0; i < p.size; i++) {
361  opstack[i] = glob->opstack[p.begin + i]->copy();
362  n += opstack[i]->input_size();
363  m += opstack[i]->output_size();
364  }
365  ci = compressed_input(glob->inputs, ptr.first, n, m, p.rep, max_period_size);
366 }
367 
368 StackOp::StackOp(const StackOp &x) : opstack(x.opstack), ci(x.ci) {}
369 
370 void StackOp::print(global::print_config cfg) {
371  std::vector<const char *> tmp(opstack.size());
372  for (size_t i = 0; i < opstack.size(); i++) tmp[i] = opstack[i]->op_name();
373  Rcout << cfg.prefix << " opstack = " << tmp << "\n";
374 
375  Rcout << cfg.prefix << " "
376  << "nrep"
377  << " = " << ci.nrep << "\n";
378  ;
379  Rcout << cfg.prefix << " "
380  << "increment_pattern"
381  << " = " << ci.increment_pattern << "\n";
382  ;
383  if (ci.which_periodic.size() > 0) {
384  Rcout << cfg.prefix << " "
385  << "which_periodic"
386  << " = " << ci.which_periodic << "\n";
387  ;
388  Rcout << cfg.prefix << " "
389  << "period_sizes"
390  << " = " << ci.period_sizes << "\n";
391  ;
392  Rcout << cfg.prefix << " "
393  << "period_offsets"
394  << " = " << ci.period_offsets << "\n";
395  ;
396  Rcout << cfg.prefix << " "
397  << "period_data"
398  << " = " << ci.period_data << "\n";
399  ;
400  }
401 
402  Rcout << "\n";
403 }
404 
405 Index StackOp::input_size() const { return ci.n; }
406 
407 Index StackOp::output_size() const { return ci.m * ci.nrep; }
408 
409 void StackOp::forward(ForwardArgs<Writer> &args) {
410  size_t n = ci.n, m = ci.m, nrep = ci.nrep;
411  std::vector<Index> inputs(n);
412  for (size_t i = 0; i < (size_t)n; i++) inputs[i] = args.input(i);
413  std::vector<Index> outputs(m);
414  for (size_t i = 0; i < (size_t)m; i++) outputs[i] = args.output(i);
415  Writer w;
416  size_t np = ci.which_periodic.size();
417  size_t sp = ci.period_data.size();
418  w << "for (int count = 0, ";
419  if (n > 0) {
420  w << "i[" << n << "]=" << inputs << ", "
421  << "ip[" << n << "]=" << ci.increment_pattern << ", ";
422  }
423  if (np > 0) {
424  w << "wp[" << np << "]=" << ci.which_periodic << ", "
425  << "ps[" << np << "]=" << ci.period_sizes << ", "
426  << "po[" << np << "]=" << ci.period_offsets << ", "
427  << "pd[" << sp << "]=" << ci.period_data << ", ";
428  }
429  w << "o[" << m << "]=" << outputs << "; "
430  << "count < " << nrep << "; count++) {\n";
431 
432  w << " ";
433  ForwardArgs<Writer> args_cpy = args;
434  args_cpy.set_indirect();
435  for (size_t k = 0; k < opstack.size(); k++) {
436  opstack[k]->forward_incr(args_cpy);
437  }
438  w << "\n";
439 
440  if (np > 0) {
441  w << " ";
442  for (size_t k = 0; k < np; k++)
443  w << "ip[wp[" << k << "]] = pd[po[" << k << "] + count % ps[" << k
444  << "]]; ";
445  w << "\n";
446  }
447  if (n > 0) {
448  w << " ";
449  for (size_t k = 0; k < n; k++) w << "i[" << k << "] += ip[" << k << "]; ";
450  w << "\n";
451  }
452  w << " ";
453  for (size_t k = 0; k < m; k++) w << "o[" << k << "] += " << m << "; ";
454  w << "\n";
455 
456  w << " ";
457  w << "}";
458 }
459 
460 void StackOp::reverse(ReverseArgs<Writer> &args) {
461  size_t n = ci.n, m = ci.m, nrep = ci.nrep;
462  std::vector<ptrdiff_t> inputs(input_size());
463  for (size_t i = 0; i < inputs.size(); i++) {
464  ptrdiff_t tmp;
465  if (-ci.input_diff[i] < ci.input_diff[i]) {
466  tmp = -((ptrdiff_t)-ci.input_diff[i]);
467  } else {
468  tmp = ci.input_diff[i];
469  }
470  inputs[i] = args.input(i) + tmp;
471  }
472  std::vector<Index> outputs(ci.m);
473  for (size_t i = 0; i < (size_t)ci.m; i++)
474  outputs[i] = args.output(i) + ci.m * ci.nrep;
475  Writer w;
476  size_t np = ci.which_periodic.size();
477  size_t sp = ci.period_data.size();
478  w << "for (int count = " << nrep << ", ";
479  if (n > 0) {
480  w << "i[" << n << "]=" << inputs << ", "
481  << "ip[" << n << "]=" << ci.increment_pattern << ", ";
482  }
483  if (np > 0) {
484  w << "wp[" << np << "]=" << ci.which_periodic << ", "
485  << "ps[" << np << "]=" << ci.period_sizes << ", "
486  << "po[" << np << "]=" << ci.period_offsets << ", "
487  << "pd[" << sp << "]=" << ci.period_data << ", ";
488  }
489  w << "o[" << m << "]=" << outputs << "; "
490  << "count > 0 ; ) {\n";
491 
492  w << " ";
493  w << "count--;\n";
494  if (np > 0) {
495  w << " ";
496  for (size_t k = 0; k < np; k++)
497  w << "ip[wp[" << k << "]] = pd[po[" << k << "] + count % ps[" << k
498  << "]]; ";
499  w << "\n";
500  }
501  if (n > 0) {
502  w << " ";
503  for (size_t k = 0; k < n; k++) w << "i[" << k << "] -= ip[" << k << "]; ";
504  w << "\n";
505  }
506  w << " ";
507  for (size_t k = 0; k < m; k++) w << "o[" << k << "] -= " << m << "; ";
508  w << "\n";
509 
510  w << " ";
511 
512  ReverseArgs<Writer> args_cpy = args;
513  args_cpy.set_indirect();
514  args_cpy.ptr.first = ci.n;
515  args_cpy.ptr.second = ci.m;
516  for (size_t k = opstack.size(); k > 0;) {
517  k--;
518  opstack[k]->reverse_decr(args_cpy);
519  }
520  w << "\n";
521 
522  w << " ";
523  w << "}";
524 }
525 
526 void StackOp::dependencies(Args<> args, Dependencies &dep) const {
527  std::vector<Index> lower;
528  std::vector<Index> upper;
529  ci.dependencies_intervals(args, lower, upper);
530  for (size_t i = 0; i < lower.size(); i++) {
531  dep.add_interval(lower[i], upper[i]);
532  }
533 }
534 
535 const char *StackOp::op_name() { return "StackOp"; }
536 
539  cfg.strong_inv = false;
540  cfg.strong_const = false;
541  cfg.strong_output = false;
542  cfg.reduce = false;
543  cfg.deterministic = false;
544  std::vector<hash_t> h = glob.hash_sweep(cfg);
545  std::vector<Index> remap = radix::first_occurance<Index>(h);
546 
547  TMBAD_ASSERT(all_allow_remap(glob));
548 
549  Args<> args(glob.inputs);
550  for (size_t i = 0; i < glob.opstack.size(); i++) {
551  Dependencies dep;
552  glob.opstack[i]->dependencies(args, dep);
553 
554  Index var = args.ptr.second;
555  toposort_remap<Index> fb(remap, var);
556  dep.apply(fb);
557  glob.opstack[i]->increment(args.ptr);
558  }
559 
560  std::vector<Index> ord = radix::order<Index>(remap);
561  std::vector<Index> v2o = glob.var2op();
562  glob.subgraph_seq = subset(v2o, ord);
563 
564  glob = glob.extract_sub();
565 }
566 
568  std::vector<Index> remap(glob.values.size(), Index(-1));
569  Args<> args(glob.inputs);
570  for (size_t i = 0; i < glob.opstack.size(); i++) {
571  Dependencies dep;
572  glob.opstack[i]->dependencies(args, dep);
573  sort_unique_inplace(dep);
574  Index var = args.ptr.second;
575  temporaries_remap<Index> fb(remap, var);
576  dep.apply(fb);
577  glob.opstack[i]->increment(args.ptr);
578  }
579 
580  for (size_t i = remap.size(); i > 0;) {
581  i--;
582  if (remap[i] == Index(-1))
583  remap[i] = i;
584  else
585  remap[i] = remap[remap[i]];
586  }
587 
588  std::vector<Index> ord = radix::order<Index>(remap);
589  std::vector<Index> v2o = glob.var2op();
590  glob.subgraph_seq = subset(v2o, ord);
591 
592  glob = glob.extract_sub();
593 }
594 
596  std::vector<bool> visited(glob.opstack.size(), false);
597  std::vector<Index> v2o = glob.var2op();
598  std::vector<Index> stack;
599  std::vector<Index> result;
600  Args<> args(glob.inputs);
601  glob.subgraph_cache_ptr();
602  for (size_t k = 0; k < glob.dep_index.size(); k++) {
603  Index dep_var = glob.dep_index[k];
604  Index i = v2o[dep_var];
605 
606  stack.push_back(i);
607  visited[i] = true;
608  while (stack.size() > 0) {
609  Index i = stack.back();
610  args.ptr = glob.subgraph_ptr[i];
611  Dependencies dep;
612  glob.opstack[i]->dependencies(args, dep);
613  dfs_add_to_stack<Index> add_to_stack(stack, visited, v2o);
614  size_t before = stack.size();
615  dep.apply(add_to_stack);
616  size_t after = stack.size();
617  if (before == after) {
618  result.push_back(i);
619  stack.pop_back();
620  }
621  }
622  }
623 
624  glob.subgraph_seq = result;
625  glob = glob.extract_sub();
626 
627  glob.shrink_to_fit();
628 }
629 
630 void compress(global &glob, size_t max_period_size) {
631  size_t min_period_rep = TMBAD_MIN_PERIOD_REP;
632  periodic<global::OperatorPure *> p(glob.opstack, max_period_size,
633  min_period_rep);
634  std::vector<period> periods = p.find_all();
635 
636  std::vector<period> periods_expand;
637  for (size_t i = 0; i < periods.size(); i++) {
638  std::vector<period> tmp = split_period(&glob, periods[i], max_period_size);
639 
640  if (tmp.size() > 10) {
641  tmp.resize(0);
642  tmp.push_back(periods[i]);
643  }
644 
645  for (size_t j = 0; j < tmp.size(); j++) {
646  if (tmp[j].rep > 1) periods_expand.push_back(tmp[j]);
647  }
648  }
649 
650  std::swap(periods, periods_expand);
651  OperatorPure *null_op = get_glob()->getOperator<global::NullOp>();
652  IndexPair ptr(0, 0);
653  Index k = 0;
654  for (size_t i = 0; i < periods.size(); i++) {
655  period p = periods[i];
656  TMBAD_ASSERT(p.rep >= 1);
657  while (k < p.begin) {
658  glob.opstack[k]->increment(ptr);
659  k++;
660  }
661 
662  OperatorPure *pOp =
663  get_glob()->getOperator<StackOp>(&glob, p, ptr, max_period_size);
664  Index ninp = 0;
665  for (size_t j = 0; j < p.size * p.rep; j++) {
666  ninp += glob.opstack[p.begin + j]->input_size();
667  glob.opstack[p.begin + j]->deallocate();
668  glob.opstack[p.begin + j] = null_op;
669  }
670  glob.opstack[p.begin] = pOp;
671  ninp -= pOp->input_size();
672  glob.opstack[p.begin + 1] =
673  get_glob()->getOperator<global::NullOp2>(ninp, 0);
674  }
675 
676  std::vector<bool> marks(glob.values.size(), true);
677  glob.extract_sub_inplace(marks);
678  glob.shrink_to_fit();
679 }
680 } // namespace TMBad
681 // Autogenerated - do not edit by hand !
682 #include "global.hpp"
683 namespace TMBad {
684 
685 global *global_ptr_data[TMBAD_MAX_NUM_THREADS] = {NULL};
686 global **global_ptr = global_ptr_data;
687 std::ostream *Writer::cout = 0;
688 bool global::fuse = 0;
689 
690 global *get_glob() { return global_ptr[TMBAD_THREAD_NUM]; }
691 
692 Dependencies::Dependencies() {}
693 
694 void Dependencies::clear() {
695  this->resize(0);
696  I.resize(0);
697 }
698 
699 void Dependencies::add_interval(Index a, Index b) {
700  I.push_back(std::pair<Index, Index>(a, b));
701 }
702 
703 void Dependencies::add_segment(Index start, Index size) {
704  if (size > 0) add_interval(start, start + size - 1);
705 }
706 
707 void Dependencies::monotone_transform_inplace(const std::vector<Index> &x) {
708  for (size_t i = 0; i < this->size(); i++) (*this)[i] = x[(*this)[i]];
709  for (size_t i = 0; i < I.size(); i++) {
710  I[i].first = x[I[i].first];
711  I[i].second = x[I[i].second];
712  }
713 }
714 
715 bool Dependencies::any(const std::vector<bool> &x) const {
716  for (size_t i = 0; i < this->size(); i++)
717  if (x[(*this)[i]]) return true;
718  for (size_t i = 0; i < I.size(); i++) {
719  for (Index j = I[i].first; j <= I[i].second; j++) {
720  if (x[j]) return true;
721  }
722  }
723  return false;
724 }
725 
726 std::string tostr(const Index &x) {
727  std::ostringstream strs;
728  strs << x;
729  return strs.str();
730 }
731 
732 std::string tostr(const Scalar &x) {
733  std::ostringstream strs;
734  strs << x;
735  return strs.str();
736 }
737 
738 Writer::Writer(std::string str) : std::string(str) {}
739 
740 Writer::Writer(Scalar x) : std::string(tostr(x)) {}
741 
742 Writer::Writer() {}
743 
744 std::string Writer::p(std::string x) { return "(" + x + ")"; }
745 
746 Writer Writer::operator+(const Writer &other) {
747  return p(*this + " + " + other);
748 }
749 
750 Writer Writer::operator-(const Writer &other) {
751  return p(*this + " - " + other);
752 }
753 
754 Writer Writer::operator-() { return " - " + *this; }
755 
756 Writer Writer::operator*(const Writer &other) { return *this + " * " + other; }
757 
758 Writer Writer::operator/(const Writer &other) { return *this + " / " + other; }
759 
760 Writer Writer::operator*(const Scalar &other) {
761  return *this + "*" + tostr(other);
762 }
763 
764 Writer Writer::operator+(const Scalar &other) {
765  return p(*this + "+" + tostr(other));
766 }
767 
768 void Writer::operator=(const Writer &other) {
769  *cout << *this + " = " + other << ";";
770 }
771 
772 void Writer::operator+=(const Writer &other) {
773  *cout << *this + " += " + other << ";";
774 }
775 
776 void Writer::operator-=(const Writer &other) {
777  *cout << *this + " -= " + other << ";";
778 }
779 
780 void Writer::operator*=(const Writer &other) {
781  *cout << *this + " *= " + other << ";";
782 }
783 
784 void Writer::operator/=(const Writer &other) {
785  *cout << *this + " /= " + other << ";";
786 }
787 
788 Position::Position(Index node, Index first, Index second)
789  : node(node), ptr(first, second) {}
790 
791 Position::Position() : node(0), ptr(0, 0) {}
792 
793 bool Position::operator<(const Position &other) const {
794  return this->node < other.node;
795 }
796 
797 graph::graph() {}
798 
799 size_t graph::num_neighbors(Index node) { return p[node + 1] - p[node]; }
800 
801 Index *graph::neighbors(Index node) { return &(j[p[node]]); }
802 
803 bool graph::empty() { return p.size() == 0; }
804 
805 size_t graph::num_nodes() { return (empty() ? 0 : p.size() - 1); }
806 
807 void graph::print() {
808  for (size_t node = 0; node < num_nodes(); node++) {
809  Rcout << node << ": ";
810  for (size_t i = 0; i < num_neighbors(node); i++) {
811  Rcout << " " << neighbors(node)[i];
812  }
813  Rcout << "\n";
814  }
815 }
816 
817 std::vector<Index> graph::rowcounts() {
818  std::vector<Index> ans(num_nodes());
819  for (size_t i = 0; i < ans.size(); i++) ans[i] = num_neighbors(i);
820  return ans;
821 }
822 
823 std::vector<Index> graph::colcounts() {
824  std::vector<Index> ans(num_nodes());
825  for (size_t i = 0; i < j.size(); i++) ans[j[i]]++;
826  return ans;
827 }
828 
829 void graph::bfs(const std::vector<Index> &start, std::vector<bool> &visited,
830  std::vector<Index> &result) {
831  for (size_t i = 0; i < start.size(); i++) {
832  Index node = start[i];
833  for (size_t j_ = 0; j_ < num_neighbors(node); j_++) {
834  Index k = neighbors(node)[j_];
835  if (!visited[k]) {
836  result.push_back(k);
837  visited[k] = true;
838  }
839  }
840  }
841 }
842 
843 void graph::search(std::vector<Index> &start, bool sort_input,
844  bool sort_output) {
845  if (mark.size() == 0) mark.resize(num_nodes(), false);
846 
847  search(start, mark, sort_input, sort_output);
848 
849  for (size_t i = 0; i < start.size(); i++) mark[start[i]] = false;
850 }
851 
852 void graph::search(std::vector<Index> &start, std::vector<bool> &visited,
853  bool sort_input, bool sort_output) {
854  if (sort_input) sort_unique_inplace(start);
855 
856  for (size_t i = 0; i < start.size(); i++) visited[start[i]] = true;
857 
858  bfs(start, visited, start);
859 
860  if (sort_output) sort_inplace(start);
861 }
862 
863 std::vector<Index> graph::boundary(const std::vector<Index> &subgraph) {
864  if (mark.size() == 0) mark.resize(num_nodes(), false);
865 
866  std::vector<Index> boundary;
867 
868  for (size_t i = 0; i < subgraph.size(); i++) mark[subgraph[i]] = true;
869 
870  bfs(subgraph, mark, boundary);
871 
872  for (size_t i = 0; i < subgraph.size(); i++) mark[subgraph[i]] = false;
873  for (size_t i = 0; i < boundary.size(); i++) mark[boundary[i]] = false;
874 
875  return boundary;
876 }
877 
878 graph::graph(size_t num_nodes, const std::vector<IndexPair> &edges) {
879  std::vector<IndexPair>::const_iterator it;
880  std::vector<Index> row_counts(num_nodes, 0);
881  for (it = edges.begin(); it != edges.end(); it++) {
882  row_counts[it->first]++;
883  }
884 
885  p.resize(num_nodes + 1);
886  p[0] = 0;
887  for (size_t i = 0; i < num_nodes; i++) {
888  p[i + 1] = p[i] + row_counts[i];
889  }
890 
891  std::vector<Index> k(p);
892  j.resize(edges.size());
893  for (it = edges.begin(); it != edges.end(); it++) {
894  j[k[it->first]++] = it->second;
895  }
896 }
897 
898 op_info::op_info() : code(0) {
899  static_assert(sizeof(IntRep) * 8 >= op_flag_count,
900  "'IntRep' not wide enough!");
901 }
902 
903 op_info::op_info(op_flag f) : code(1 << f) {}
904 
905 bool op_info::test(op_flag f) const { return code & 1 << f; }
906 
907 op_info &op_info::operator|=(const op_info &other) {
908  code |= other.code;
909  return *this;
910 }
911 
912 op_info &op_info::operator&=(const op_info &other) {
913  code &= other.code;
914  return *this;
915 }
916 
917 global::operation_stack::operation_stack() {}
918 
919 global::operation_stack::operation_stack(const operation_stack &other) {
920  (*this).copy_from(other);
921 }
922 
923 void global::operation_stack::push_back(OperatorPure *x) {
924  Base::push_back(x);
925 
926  any |= x->info();
927 }
928 
929 operation_stack &global::operation_stack::operator=(
930  const operation_stack &other) {
931  if (this != &other) {
932  (*this).clear();
933  (*this).copy_from(other);
934  }
935  return *this;
936 }
937 
938 global::operation_stack::~operation_stack() { (*this).clear(); }
939 
940 void global::operation_stack::clear() {
941  if (any.test(op_info::dynamic)) {
942  for (size_t i = 0; i < (*this).size(); i++) (*this)[i]->deallocate();
943  }
944  (*this).resize(0);
945 }
946 
947 void global::operation_stack::copy_from(const operation_stack &other) {
948  if (other.any.test(op_info::dynamic)) {
949  for (size_t i = 0; i < other.size(); i++) Base::push_back(other[i]->copy());
950  } else {
951  Base::operator=(other);
952  }
953  this->any = other.any;
954 }
955 
956 global::global()
957  : forward_compiled(NULL),
958  reverse_compiled(NULL),
959  parent_glob(NULL),
960  in_use(false) {}
961 
962 void global::clear() {
963  values.resize(0);
964  derivs.resize(0);
965  inputs.resize(0);
966  inv_index.resize(0);
967  dep_index.resize(0);
968  subgraph_ptr.resize(0);
969  subgraph_seq.resize(0);
970  opstack.clear();
971 }
972 
973 void global::shrink_to_fit(double tol) {
974  std::vector<Scalar>().swap(derivs);
975  std::vector<IndexPair>().swap(subgraph_ptr);
976  if (values.size() < tol * values.capacity())
977  std::vector<Scalar>(values).swap(values);
978  if (inputs.size() < tol * inputs.capacity())
979  std::vector<Index>(inputs).swap(inputs);
980  if (opstack.size() < tol * opstack.capacity())
981  std::vector<OperatorPure *>(opstack).swap(opstack);
982 }
983 
984 void global::clear_deriv(Position start) {
985  derivs.resize(values.size());
986  std::fill(derivs.begin() + start.ptr.second, derivs.end(), 0);
987 }
988 
989 Scalar &global::value_inv(Index i) { return values[inv_index[i]]; }
990 
991 Scalar &global::deriv_inv(Index i) { return derivs[inv_index[i]]; }
992 
993 Scalar &global::value_dep(Index i) { return values[dep_index[i]]; }
994 
995 Scalar &global::deriv_dep(Index i) { return derivs[dep_index[i]]; }
996 
997 Position global::begin() { return Position(0, 0, 0); }
998 
999 Position global::end() {
1000  return Position(opstack.size(), inputs.size(), values.size());
1001 }
1002 
1003 CONSTEXPR bool global::no_filter::operator[](size_t i) const { return true; }
1004 
1005 void global::forward(Position start) {
1006  if (forward_compiled != NULL) {
1007  forward_compiled(values.data());
1008  return;
1009  }
1010  ForwardArgs<Scalar> args(inputs, values, this);
1011  args.ptr = start.ptr;
1012  forward_loop(args, start.node);
1013 }
1014 
1015 void global::reverse(Position start) {
1016  if (reverse_compiled != NULL) {
1017  reverse_compiled(values.data(), derivs.data());
1018  return;
1019  }
1020  ReverseArgs<Scalar> args(inputs, values, derivs, this);
1021  reverse_loop(args, start.node);
1022 }
1023 
1024 void global::forward_sub() {
1025  ForwardArgs<Scalar> args(inputs, values, this);
1026  forward_loop_subgraph(args);
1027 }
1028 
1029 void global::reverse_sub() {
1030  ReverseArgs<Scalar> args(inputs, values, derivs, this);
1031  reverse_loop_subgraph(args);
1032 }
1033 
1034 void global::forward(std::vector<bool> &marks) {
1035  intervals<Index> marked_intervals;
1036  ForwardArgs<bool> args(inputs, marks, marked_intervals);
1037  forward_loop(args);
1038 }
1039 
1040 void global::reverse(std::vector<bool> &marks) {
1041  intervals<Index> marked_intervals;
1042  ReverseArgs<bool> args(inputs, marks, marked_intervals);
1043  reverse_loop(args);
1044 }
1045 
1046 void global::forward_sub(std::vector<bool> &marks,
1047  const std::vector<bool> &node_filter) {
1048  intervals<Index> marked_intervals;
1049  ForwardArgs<bool> args(inputs, marks, marked_intervals);
1050  if (node_filter.size() == 0)
1051  forward_loop_subgraph(args);
1052  else
1053  forward_loop(args, 0, node_filter);
1054 }
1055 
1056 void global::reverse_sub(std::vector<bool> &marks,
1057  const std::vector<bool> &node_filter) {
1058  intervals<Index> marked_intervals;
1059  ReverseArgs<bool> args(inputs, marks, marked_intervals);
1060  if (node_filter.size() == 0)
1061  reverse_loop_subgraph(args);
1062  else
1063  reverse_loop(args, 0, node_filter);
1064 }
1065 
1066 void global::forward_dense(std::vector<bool> &marks) {
1067  intervals<Index> marked_intervals;
1068  ForwardArgs<bool> args(inputs, marks, marked_intervals);
1069  for (size_t i = 0; i < opstack.size(); i++) {
1070  opstack[i]->forward_incr_mark_dense(args);
1071  }
1072 }
1073 
1074 intervals<Index> global::updating_intervals() const {
1075  Dependencies dep;
1076  intervals<Index> marked_intervals;
1077  Args<> args(inputs);
1078  for (size_t i = 0; i < opstack.size(); i++) {
1079  if (opstack[i]->info().test(op_info::updating)) {
1080  dep.clear();
1081  opstack[i]->dependencies(args, dep);
1082 
1083  for (size_t i = 0; i < dep.I.size(); i++) {
1084  Index a = dep.I[i].first;
1085  Index b = dep.I[i].second;
1086  marked_intervals.insert(a, b);
1087  }
1088  }
1089  opstack[i]->increment(args.ptr);
1090  }
1091  return marked_intervals;
1092 }
1093 
1094 intervals<Index> global::updating_intervals_sub() const {
1095  Dependencies dep;
1096  intervals<Index> marked_intervals;
1097  Args<> args(inputs);
1098  subgraph_cache_ptr();
1099  for (size_t j = 0; j < subgraph_seq.size(); j++) {
1100  Index i = subgraph_seq[j];
1101  args.ptr = subgraph_ptr[i];
1102  if (opstack[i]->info().test(op_info::updating)) {
1103  dep.clear();
1104  opstack[i]->dependencies(args, dep);
1105 
1106  for (size_t i = 0; i < dep.I.size(); i++) {
1107  Index a = dep.I[i].first;
1108  Index b = dep.I[i].second;
1109  marked_intervals.insert(a, b);
1110  }
1111  }
1112  }
1113  return marked_intervals;
1114 }
1115 
1116 Replay &global::replay::value_inv(Index i) { return values[orig.inv_index[i]]; }
1117 
1118 Replay &global::replay::deriv_inv(Index i) { return derivs[orig.inv_index[i]]; }
1119 
1120 Replay &global::replay::value_dep(Index i) { return values[orig.dep_index[i]]; }
1121 
1122 Replay &global::replay::deriv_dep(Index i) { return derivs[orig.dep_index[i]]; }
1123 
1124 global::replay::replay(const global &orig, global &target)
1125  : orig(orig), target(target) {
1126  TMBAD_ASSERT(&orig != &target);
1127 }
1128 
1129 void global::replay::start() {
1130  parent_glob = get_glob();
1131  if (&target != parent_glob) target.ad_start();
1132  values = std::vector<Replay>(orig.values.begin(), orig.values.end());
1133 }
1134 
1135 void global::replay::stop() {
1136  if (&target != parent_glob) target.ad_stop();
1137  TMBAD_ASSERT(parent_glob == get_glob());
1138 }
1139 
1140 void global::replay::add_updatable_derivs(const intervals<Index> &I) {
1141  struct {
1142  Replay *p;
1143  void operator()(Index a, Index b) {
1144  Index n = b - a + 1;
1145  global::ZeroOp Z(n);
1146  Z(p + a, n);
1147  }
1148  } F = {derivs.data()};
1149  I.apply(F);
1150 }
1151 
1152 void global::replay::clear_deriv() {
1153  derivs.resize(values.size());
1154  std::fill(derivs.begin(), derivs.end(), Replay(0));
1155 
1156  if (orig.opstack.any.test(op_info::updating)) {
1157  intervals<Index> I = orig.updating_intervals();
1158  add_updatable_derivs(I);
1159  }
1160 }
1161 
1162 void global::replay::forward(bool inv_tags, bool dep_tags, Position start,
1163  const std::vector<bool> &node_filter) {
1164  TMBAD_ASSERT(&target == get_glob());
1165  if (inv_tags) {
1166  for (size_t i = 0; i < orig.inv_index.size(); i++)
1167  value_inv(i).Independent();
1168  }
1169  ForwardArgs<Replay> args(orig.inputs, values);
1170  if (node_filter.size() > 0) {
1171  TMBAD_ASSERT(node_filter.size() == orig.opstack.size());
1172  orig.forward_loop(args, start.node, node_filter);
1173  } else {
1174  orig.forward_loop(args, start.node);
1175  }
1176  if (dep_tags) {
1177  for (size_t i = 0; i < orig.dep_index.size(); i++) value_dep(i).Dependent();
1178  }
1179 }
1180 
1181 void global::replay::reverse(bool dep_tags, bool inv_tags, Position start,
1182  const std::vector<bool> &node_filter) {
1183  TMBAD_ASSERT(&target == get_glob());
1184  if (inv_tags) {
1185  for (size_t i = 0; i < orig.dep_index.size(); i++)
1186  deriv_dep(i).Independent();
1187  }
1188  ReverseArgs<Replay> args(orig.inputs, values, derivs);
1189  if (node_filter.size() > 0) {
1190  TMBAD_ASSERT(node_filter.size() == orig.opstack.size());
1191  orig.reverse_loop(args, start.node, node_filter);
1192  } else {
1193  orig.reverse_loop(args, start.node);
1194  }
1195 
1196  std::fill(derivs.begin(), derivs.begin() + start.ptr.second, Replay(0));
1197  if (dep_tags) {
1198  for (size_t i = 0; i < orig.inv_index.size(); i++) deriv_inv(i).Dependent();
1199  }
1200 }
1201 
1202 void global::replay::forward_sub() {
1203  ForwardArgs<Replay> args(orig.inputs, values);
1204  orig.forward_loop_subgraph(args);
1205 }
1206 
1207 void global::replay::reverse_sub() {
1208  ReverseArgs<Replay> args(orig.inputs, values, derivs);
1209  orig.reverse_loop_subgraph(args);
1210 }
1211 
1212 void global::replay::clear_deriv_sub() {
1213  orig.clear_array_subgraph(derivs);
1214 
1215  if (orig.opstack.any.test(op_info::updating)) {
1216  intervals<Index> I = orig.updating_intervals_sub();
1217  add_updatable_derivs(I);
1218  }
1219 }
1220 
1221 void global::forward_replay(bool inv_tags, bool dep_tags) {
1222  global new_glob;
1223  global::replay replay(*this, new_glob);
1224  replay.start();
1225  replay.forward(inv_tags, dep_tags);
1226  replay.stop();
1227  *this = new_glob;
1228 }
1229 
1230 void global::subgraph_cache_ptr() const {
1231  if (subgraph_ptr.size() == opstack.size()) return;
1232  TMBAD_ASSERT(subgraph_ptr.size() < opstack.size());
1233  if (subgraph_ptr.size() == 0) subgraph_ptr.push_back(IndexPair(0, 0));
1234  for (size_t i = subgraph_ptr.size(); i < opstack.size(); i++) {
1235  IndexPair ptr = subgraph_ptr[i - 1];
1236  opstack[i - 1]->increment(ptr);
1237  subgraph_ptr.push_back(ptr);
1238  }
1239 }
1240 
1241 void global::set_subgraph(const std::vector<bool> &marks, bool append) {
1242  std::vector<Index> v2o = var2op();
1243  if (!append) subgraph_seq.resize(0);
1244  Index previous = (Index)-1;
1245  for (size_t i = 0; i < marks.size(); i++) {
1246  if (marks[i] && (v2o[i] != previous)) {
1247  subgraph_seq.push_back(v2o[i]);
1248  previous = v2o[i];
1249  }
1250  }
1251 }
1252 
1253 void global::mark_subgraph(std::vector<bool> &marks) {
1254  TMBAD_ASSERT(marks.size() == values.size());
1255  clear_array_subgraph(marks, true);
1256 }
1257 
1258 void global::unmark_subgraph(std::vector<bool> &marks) {
1259  TMBAD_ASSERT(marks.size() == values.size());
1260  clear_array_subgraph(marks, false);
1261 }
1262 
1263 void global::subgraph_trivial() {
1264  subgraph_cache_ptr();
1265  subgraph_seq.resize(0);
1266  for (size_t i = 0; i < opstack.size(); i++) subgraph_seq.push_back(i);
1267 }
1268 
1269 void global::clear_deriv_sub() { clear_array_subgraph(derivs); }
1270 
1271 global global::extract_sub(std::vector<Index> &var_remap, global new_glob) {
1272  subgraph_cache_ptr();
1273  TMBAD_ASSERT(var_remap.size() == 0 || var_remap.size() == values.size());
1274  var_remap.resize(values.size(), 0);
1275  std::vector<bool> independent_variable = inv_marks();
1276  std::vector<bool> dependent_variable = dep_marks();
1277  ForwardArgs<Scalar> args(inputs, values, this);
1278  for (size_t j = 0; j < subgraph_seq.size(); j++) {
1279  Index i = subgraph_seq[j];
1280  args.ptr = subgraph_ptr[i];
1281 
1282  size_t nout = opstack[i]->output_size();
1283  for (size_t k = 0; k < nout; k++) {
1284  Index new_index = new_glob.values.size();
1285  Index old_index = args.output(k);
1286  var_remap[old_index] = new_index;
1287  new_glob.values.push_back(args.y(k));
1288  if (independent_variable[old_index]) {
1289  independent_variable[old_index] = false;
1290  }
1291  if (dependent_variable[old_index]) {
1292  dependent_variable[old_index] = false;
1293  }
1294  }
1295 
1296  size_t nin = opstack[i]->input_size();
1297  for (size_t k = 0; k < nin; k++) {
1298  new_glob.inputs.push_back(var_remap[args.input(k)]);
1299  }
1300 
1301  new_glob.opstack.push_back(opstack[i]->copy());
1302  }
1303 
1304  independent_variable.flip();
1305  dependent_variable.flip();
1306 
1307  for (size_t i = 0; i < inv_index.size(); i++) {
1308  Index old_var = inv_index[i];
1309  if (independent_variable[old_var])
1310  new_glob.inv_index.push_back(var_remap[old_var]);
1311  }
1312  for (size_t i = 0; i < dep_index.size(); i++) {
1313  Index old_var = dep_index[i];
1314  if (dependent_variable[old_var])
1315  new_glob.dep_index.push_back(var_remap[old_var]);
1316  }
1317  return new_glob;
1318 }
1319 
1320 void global::extract_sub_inplace(std::vector<bool> marks) {
1321  TMBAD_ASSERT(marks.size() == values.size());
1322  std::vector<Index> var_remap(values.size(), 0);
1323  std::vector<bool> independent_variable = inv_marks();
1324  std::vector<bool> dependent_variable = dep_marks();
1325  intervals<Index> marked_intervals;
1326  ForwardArgs<bool> args(inputs, marks, marked_intervals);
1327  size_t s = 0, s_input = 0;
1328  std::vector<bool> opstack_deallocate(opstack.size(), false);
1329 
1330  for (size_t i = 0; i < opstack.size(); i++) {
1331  op_info info = opstack[i]->info();
1332 
1333  size_t nout = opstack[i]->output_size();
1334  bool any_marked_output = info.test(op_info::elimination_protected);
1335  for (size_t j = 0; j < nout; j++) {
1336  any_marked_output |= args.y(j);
1337  }
1338  if (info.test(op_info::updating) && nout == 0) {
1339  Dependencies dep;
1340  opstack[i]->dependencies_updating(args, dep);
1341  any_marked_output |= dep.any(args.values);
1342  }
1343 
1344  if (any_marked_output) {
1345  for (size_t k = 0; k < nout; k++) {
1346  Index new_index = s;
1347  Index old_index = args.output(k);
1348  var_remap[old_index] = new_index;
1349  values[new_index] = values[old_index];
1350  if (independent_variable[old_index]) {
1351  independent_variable[old_index] = false;
1352  }
1353  if (dependent_variable[old_index]) {
1354  dependent_variable[old_index] = false;
1355  }
1356  s++;
1357  }
1358 
1359  size_t nin = opstack[i]->input_size();
1360  for (size_t k = 0; k < nin; k++) {
1361  inputs[s_input] = var_remap[args.input(k)];
1362  s_input++;
1363  }
1364  }
1365  opstack[i]->increment(args.ptr);
1366  if (!any_marked_output) {
1367  opstack_deallocate[i] = true;
1368  }
1369  }
1370 
1371  independent_variable.flip();
1372  dependent_variable.flip();
1373  std::vector<Index> new_inv_index;
1374  for (size_t i = 0; i < inv_index.size(); i++) {
1375  Index old_var = inv_index[i];
1376  if (independent_variable[old_var])
1377  new_inv_index.push_back(var_remap[old_var]);
1378  }
1379  inv_index = new_inv_index;
1380  std::vector<Index> new_dep_index;
1381  for (size_t i = 0; i < dep_index.size(); i++) {
1382  Index old_var = dep_index[i];
1383  if (dependent_variable[old_var])
1384  new_dep_index.push_back(var_remap[old_var]);
1385  }
1386  dep_index = new_dep_index;
1387 
1388  inputs.resize(s_input);
1389  values.resize(s);
1390  size_t k = 0;
1391  for (size_t i = 0; i < opstack.size(); i++) {
1392  if (opstack_deallocate[i]) {
1393  opstack[i]->deallocate();
1394  } else {
1395  opstack[k] = opstack[i];
1396  k++;
1397  }
1398  }
1399  opstack.resize(k);
1400 
1401  if (opstack.any.test(op_info::dynamic)) this->forward();
1402 }
1403 
1404 global global::extract_sub() {
1405  std::vector<Index> var_remap;
1406  return extract_sub(var_remap);
1407 }
1408 
1409 std::vector<Index> global::var2op() {
1410  std::vector<Index> var2op(values.size());
1411  Args<> args(inputs);
1412  size_t j = 0;
1413  for (size_t i = 0; i < opstack.size(); i++) {
1414  opstack[i]->increment(args.ptr);
1415  for (; j < (size_t)args.ptr.second; j++) {
1416  var2op[j] = i;
1417  }
1418  }
1419  return var2op;
1420 }
1421 
1422 std::vector<bool> global::var2op(const std::vector<bool> &values) {
1423  std::vector<bool> ans(opstack.size(), false);
1424  Args<> args(inputs);
1425  size_t j = 0;
1426  for (size_t i = 0; i < opstack.size(); i++) {
1427  opstack[i]->increment(args.ptr);
1428  for (; j < (size_t)args.ptr.second; j++) {
1429  ans[i] = ans[i] || values[j];
1430  }
1431  }
1432  return ans;
1433 }
1434 
1435 std::vector<Index> global::op2var(const std::vector<Index> &seq) {
1436  std::vector<bool> seq_mark = mark_space(opstack.size(), seq);
1437  std::vector<Index> ans;
1438  Args<> args(inputs);
1439  size_t j = 0;
1440  for (size_t i = 0; i < opstack.size(); i++) {
1441  opstack[i]->increment(args.ptr);
1442  for (; j < (size_t)args.ptr.second; j++) {
1443  if (seq_mark[i]) ans.push_back(j);
1444  }
1445  }
1446  return ans;
1447 }
1448 
1449 std::vector<bool> global::op2var(const std::vector<bool> &seq_mark) {
1450  std::vector<bool> ans(values.size());
1451  Args<> args(inputs);
1452  size_t j = 0;
1453  for (size_t i = 0; i < opstack.size(); i++) {
1454  opstack[i]->increment(args.ptr);
1455  for (; j < (size_t)args.ptr.second; j++) {
1456  if (seq_mark[i]) ans[j] = true;
1457  }
1458  }
1459  return ans;
1460 }
1461 
1462 std::vector<Index> global::op2idx(const std::vector<Index> &var_subset,
1463  Index NA) {
1464  std::vector<Index> v2o = var2op();
1465  std::vector<Index> op2idx(opstack.size(), NA);
1466  for (size_t i = var_subset.size(); i > 0;) {
1467  i--;
1468  op2idx[v2o[var_subset[i]]] = i;
1469  }
1470  return op2idx;
1471 }
1472 
1473 std::vector<bool> global::mark_space(size_t n, const std::vector<Index> ind) {
1474  std::vector<bool> mark(n, false);
1475  for (size_t i = 0; i < ind.size(); i++) {
1476  mark[ind[i]] = true;
1477  }
1478  return mark;
1479 }
1480 
1481 std::vector<bool> global::inv_marks() {
1482  return mark_space(values.size(), inv_index);
1483 }
1484 
1485 std::vector<bool> global::dep_marks() {
1486  return mark_space(values.size(), dep_index);
1487 }
1488 
1489 std::vector<bool> global::subgraph_marks() {
1490  return mark_space(opstack.size(), subgraph_seq);
1491 }
1492 
1493 global::append_edges::append_edges(size_t &i, size_t num_nodes,
1494  const std::vector<bool> &keep_var,
1495  std::vector<Index> &var2op,
1496  std::vector<IndexPair> &edges)
1497  : i(i),
1498  keep_var(keep_var),
1499  var2op(var2op),
1500  edges(edges),
1501  op_marks(num_nodes, false),
1502  pos(0) {}
1503 
1504 void global::append_edges::operator()(Index dep_j) {
1505  if (keep_var[dep_j]) {
1506  size_t k = var2op[dep_j];
1507  if (i != k && !op_marks[k]) {
1508  IndexPair edge;
1509 
1510  edge.first = k;
1511  edge.second = i;
1512  edges.push_back(edge);
1513  op_marks[k] = true;
1514  }
1515  }
1516 }
1517 
1518 void global::append_edges::start_iteration() { pos = edges.size(); }
1519 
1520 void global::append_edges::end_iteration() {
1521  size_t n = edges.size() - pos;
1522  for (size_t j = 0; j < n; j++) op_marks[edges[pos + j].first] = false;
1523 }
1524 
1525 graph global::build_graph(bool transpose, const std::vector<bool> &keep_var) {
1526  TMBAD_ASSERT(keep_var.size() == values.size());
1527 
1528  std::vector<Index> var2op = this->var2op();
1529 
1530  bool any_updating = false;
1531 
1532  Args<> args(inputs);
1533  std::vector<IndexPair> edges;
1534  Dependencies dep;
1535  size_t i = 0;
1536  append_edges F(i, opstack.size(), keep_var, var2op, edges);
1537  for (; i < opstack.size(); i++) {
1538  any_updating |= opstack[i]->info().test(op_info::updating);
1539  dep.clear();
1540  opstack[i]->dependencies(args, dep);
1541  F.start_iteration();
1542  dep.apply(F);
1543  F.end_iteration();
1544  opstack[i]->increment(args.ptr);
1545  }
1546  if (any_updating) {
1547  size_t begin = edges.size();
1548  i = 0;
1549  args = Args<>(inputs);
1550  for (; i < opstack.size(); i++) {
1551  dep.clear();
1552  opstack[i]->dependencies_updating(args, dep);
1553  F.start_iteration();
1554  dep.apply(F);
1555  F.end_iteration();
1556  opstack[i]->increment(args.ptr);
1557  }
1558  for (size_t j = begin; j < edges.size(); j++)
1559  std::swap(edges[j].first, edges[j].second);
1560  }
1561 
1562  if (transpose) {
1563  for (size_t j = 0; j < edges.size(); j++)
1564  std::swap(edges[j].first, edges[j].second);
1565  }
1566 
1567  graph G(opstack.size(), edges);
1568 
1569  for (size_t i = 0; i < inv_index.size(); i++)
1570  G.inv2op.push_back(var2op[inv_index[i]]);
1571  for (size_t i = 0; i < dep_index.size(); i++)
1572  G.dep2op.push_back(var2op[dep_index[i]]);
1573  return G;
1574 }
1575 
1576 graph global::forward_graph(std::vector<bool> keep_var) {
1577  if (keep_var.size() == 0) {
1578  keep_var.resize(values.size(), true);
1579  }
1580  TMBAD_ASSERT(values.size() == keep_var.size());
1581  return build_graph(false, keep_var);
1582 }
1583 
1584 graph global::reverse_graph(std::vector<bool> keep_var) {
1585  if (keep_var.size() == 0) {
1586  keep_var.resize(values.size(), true);
1587  }
1588  TMBAD_ASSERT(values.size() == keep_var.size());
1589  return build_graph(true, keep_var);
1590 }
1591 
1592 bool global::identical(const global &other) const {
1593  if (inv_index != other.inv_index) return false;
1594  ;
1595  if (dep_index != other.dep_index) return false;
1596  ;
1597  if (opstack.size() != other.opstack.size()) return false;
1598  ;
1599  for (size_t i = 0; i < opstack.size(); i++) {
1600  if (opstack[i]->identifier() != other.opstack[i]->identifier())
1601  return false;
1602  ;
1603  }
1604  if (inputs != other.inputs) return false;
1605  ;
1606  if (values.size() != other.values.size()) return false;
1607  ;
1608  OperatorPure *constant = getOperator<ConstOp>();
1609  IndexPair ptr(0, 0);
1610  for (size_t i = 0; i < opstack.size(); i++) {
1611  if (opstack[i] == constant) {
1612  if (values[ptr.second] != other.values[ptr.second]) return false;
1613  ;
1614  }
1615  opstack[i]->increment(ptr);
1616  }
1617 
1618  return true;
1619 }
1620 
1621 hash_t global::hash() const {
1622  hash_t h = 37;
1623 
1624  hash(h, inv_index.size());
1625  ;
1626  for (size_t i = 0; i < inv_index.size(); i++) hash(h, inv_index[i]);
1627  ;
1628  ;
1629  hash(h, dep_index.size());
1630  ;
1631  for (size_t i = 0; i < dep_index.size(); i++) hash(h, dep_index[i]);
1632  ;
1633  ;
1634  hash(h, opstack.size());
1635  ;
1636  for (size_t i = 0; i < opstack.size(); i++) hash(h, opstack[i]);
1637  ;
1638  ;
1639  hash(h, inputs.size());
1640  ;
1641  for (size_t i = 0; i < inputs.size(); i++) hash(h, inputs[i]);
1642  ;
1643  ;
1644  hash(h, values.size());
1645  ;
1646  OperatorPure *constant = getOperator<ConstOp>();
1647  IndexPair ptr(0, 0);
1648  for (size_t i = 0; i < opstack.size(); i++) {
1649  if (opstack[i] == constant) {
1650  hash(h, values[ptr.second]);
1651  ;
1652  }
1653  opstack[i]->increment(ptr);
1654  }
1655 
1656  return h;
1657 }
1658 
1659 std::vector<hash_t> global::hash_sweep(hash_config cfg) const {
1660  std::vector<Index> opstack_id;
1661  if (cfg.deterministic) {
1662  std::vector<size_t> tmp(opstack.size());
1663  for (size_t i = 0; i < tmp.size(); i++)
1664  tmp[i] = (size_t)opstack[i]->identifier();
1665  opstack_id = radix::first_occurance<Index>(tmp);
1666  hash_t spread = (hash_t(1) << (sizeof(hash_t) * 4)) - 1;
1667  for (size_t i = 0; i < opstack_id.size(); i++)
1668  opstack_id[i] = (opstack_id[i] + 1) * spread;
1669  }
1670 
1671  std::vector<hash_t> hash_vec(values.size(), 37);
1672  Dependencies dep;
1673  OperatorPure *inv = getOperator<InvOp>();
1674  OperatorPure *constant = getOperator<ConstOp>();
1675 
1676  if (cfg.strong_inv) {
1677  bool have_inv_seed = (cfg.inv_seed.size() > 0);
1678  if (have_inv_seed) {
1679  TMBAD_ASSERT(cfg.inv_seed.size() == inv_index.size());
1680  }
1681  for (size_t i = 0; i < inv_index.size(); i++) {
1682  hash_vec[inv_index[i]] += (have_inv_seed ? cfg.inv_seed[i] + 1 : (i + 1));
1683  }
1684  }
1685 
1686  Args<> args(inputs);
1687  IndexPair &ptr = args.ptr;
1688  for (size_t i = 0; i < opstack.size(); i++) {
1689  if (opstack[i] == inv) {
1690  opstack[i]->increment(ptr);
1691  continue;
1692  }
1693  dep.clear();
1694 
1695  opstack[i]->dependencies(args, dep);
1696 
1697  hash_t h = 37;
1698  for (size_t j = 0; j < dep.size(); j++) {
1699  if (j == 0)
1700  h = hash_vec[dep[0]];
1701  else
1702  hash(h, hash_vec[dep[j]]);
1703  ;
1704  }
1705 
1706  if (!cfg.deterministic) {
1707  hash(h, opstack[i]->identifier());
1708  ;
1709  } else {
1710  hash(h, opstack_id[i]);
1711  ;
1712  }
1713 
1714  if (opstack[i] == constant && cfg.strong_const) {
1715  hash(h, values[ptr.second]);
1716  ;
1717 
1718  hash(h, values[ptr.second] > 0);
1719  ;
1720  }
1721 
1722  size_t noutput = opstack[i]->output_size();
1723  for (size_t j = 0; j < noutput; j++) {
1724  hash_vec[ptr.second + j] = h + j * cfg.strong_output;
1725  }
1726 
1727  opstack[i]->increment(ptr);
1728  }
1729  if (!cfg.reduce) return hash_vec;
1730  std::vector<hash_t> ans(dep_index.size());
1731  for (size_t j = 0; j < dep_index.size(); j++) {
1732  ans[j] = hash_vec[dep_index[j]];
1733  }
1734  return ans;
1735 }
1736 
1737 std::vector<hash_t> global::hash_sweep(bool weak) const {
1738  hash_config cfg;
1739  cfg.strong_inv = !weak;
1740  cfg.strong_const = true;
1741  cfg.strong_output = true;
1742  cfg.reduce = weak;
1743  cfg.deterministic = false;
1744  return hash_sweep(cfg);
1745 }
1746 
1747 void global::eliminate() {
1748  this->shrink_to_fit();
1749 
1750  std::vector<bool> marks;
1751  marks.resize(values.size(), false);
1752 
1753  for (size_t i = 0; i < inv_index.size(); i++) marks[inv_index[i]] = true;
1754  for (size_t i = 0; i < dep_index.size(); i++) marks[dep_index[i]] = true;
1755 
1756  reverse(marks);
1757 
1758  if (false) {
1759  set_subgraph(marks);
1760 
1761  *this = extract_sub();
1762  }
1763  this->extract_sub_inplace(marks);
1764  this->shrink_to_fit();
1765 }
1766 
1767 global::print_config::print_config() : prefix(""), mark("*"), depth(0) {}
1768 
1769 void global::print(print_config cfg) {
1770  using std::endl;
1771  using std::left;
1772  using std::setw;
1773  IndexPair ptr(0, 0);
1774  std::vector<bool> sgm = subgraph_marks();
1775  bool have_subgraph = (subgraph_seq.size() > 0);
1776  int v = 0;
1777  print_config cfg2 = cfg;
1778  cfg2.depth--;
1779  cfg2.prefix = cfg.prefix + "##";
1780  Rcout << cfg.prefix;
1781  Rcout << setw(7) << "OpName:" << setw(7 + have_subgraph)
1782  << "Node:" << setw(13) << "Value:" << setw(13) << "Deriv:" << setw(13)
1783  << "Index:";
1784  Rcout << " "
1785  << "Inputs:";
1786  Rcout << endl;
1787  for (size_t i = 0; i < opstack.size(); i++) {
1788  Rcout << cfg.prefix;
1789  Rcout << setw(7) << opstack[i]->op_name();
1790  if (have_subgraph) {
1791  if (sgm[i])
1792  Rcout << cfg.mark;
1793  else
1794  Rcout << " ";
1795  }
1796  Rcout << setw(7) << i;
1797  int numvar = opstack[i]->output_size();
1798  for (int j = 0; j < numvar + (numvar == 0); j++) {
1799  if (j > 0) Rcout << cfg.prefix;
1800  Rcout << setw((7 + 7) * (j > 0) + 13);
1801  if (numvar > 0)
1802  Rcout << values[v];
1803  else
1804  Rcout << "";
1805  Rcout << setw(13);
1806  if (numvar > 0) {
1807  if (derivs.size() == values.size())
1808  Rcout << derivs[v];
1809  else
1810  Rcout << "NA";
1811  } else {
1812  Rcout << "";
1813  }
1814  Rcout << setw(13);
1815  if (numvar > 0) {
1816  Rcout << v;
1817  } else {
1818  Rcout << "";
1819  }
1820  if (j == 0) {
1821  IndexPair ptr_old = ptr;
1822  opstack[i]->increment(ptr);
1823  int ninput = ptr.first - ptr_old.first;
1824  for (int k = 0; k < ninput; k++) {
1825  if (k == 0) Rcout << " ";
1826  Rcout << " " << inputs[ptr_old.first + k];
1827  }
1828  }
1829  Rcout << endl;
1830  if (numvar > 0) {
1831  v++;
1832  }
1833  }
1834  if (cfg.depth > 0) opstack[i]->print(cfg2);
1835  }
1836 }
1837 
1838 void global::print() { this->print(print_config()); }
1839 
1840 global::DynamicInputOutputOperator::DynamicInputOutputOperator(Index ninput,
1841  Index noutput)
1842  : ninput_(ninput), noutput_(noutput) {}
1843 
1844 Index global::DynamicInputOutputOperator::input_size() const {
1845  return this->ninput_;
1846 }
1847 
1848 Index global::DynamicInputOutputOperator::output_size() const {
1849  return this->noutput_;
1850 }
1851 
1852 const char *global::InvOp::op_name() { return "InvOp"; }
1853 
1854 const char *global::DepOp::op_name() { return "DepOp"; }
1855 
1856 void global::ConstOp::forward(ForwardArgs<Replay> &args) {
1857  args.y(0).addToTape();
1858 }
1859 
1860 const char *global::ConstOp::op_name() { return "ConstOp"; }
1861 
1862 void global::ConstOp::forward(ForwardArgs<Writer> &args) {
1863  if (args.const_literals) {
1864  args.y(0) = args.y_const(0);
1865  }
1866 }
1867 
1868 global::DataOp::DataOp(Index n) { Base::noutput = n; }
1869 
1870 const char *global::DataOp::op_name() { return "DataOp"; }
1871 
1872 void global::DataOp::forward(ForwardArgs<Writer> &args) { TMBAD_ASSERT(false); }
1873 
1874 global::ZeroOp::ZeroOp(Index n) { Base::noutput = n; }
1875 
1876 const char *global::ZeroOp::op_name() { return "ZeroOp"; }
1877 
1878 void global::ZeroOp::forward(ForwardArgs<Writer> &args) { TMBAD_ASSERT(false); }
1879 
1880 void global::ZeroOp::operator()(Replay *x, Index n) {
1881  Complete<ZeroOp> Z(n);
1882  ad_segment y = Z(ad_segment());
1883  for (size_t i = 0; i < n; i++) x[i] = y[i];
1884 }
1885 
1886 global::NullOp::NullOp() {}
1887 
1888 const char *global::NullOp::op_name() { return "NullOp"; }
1889 
1890 global::NullOp2::NullOp2(Index ninput, Index noutput)
1891  : global::DynamicInputOutputOperator(ninput, noutput) {}
1892 
1893 const char *global::NullOp2::op_name() { return "NullOp2"; }
1894 
1895 global::RefOp::RefOp(global *glob, Index i) : glob(glob), i(i) {}
1896 
1897 void global::RefOp::forward(ForwardArgs<Scalar> &args) {
1898  args.y(0) = glob->values[i];
1899 }
1900 
1901 void global::RefOp::forward(ForwardArgs<Replay> &args) {
1902  if (get_glob() == this->glob) {
1903  ad_plain tmp;
1904  tmp.index = i;
1905  args.y(0) = tmp;
1906  } else {
1907  global::OperatorPure *pOp =
1908  get_glob()->getOperator<RefOp>(this->glob, this->i);
1909  args.y(0) =
1910  get_glob()->add_to_stack<RefOp>(pOp, std::vector<ad_plain>(0))[0];
1911  }
1912 }
1913 
1914 void global::RefOp::reverse(ReverseArgs<Replay> &args) {
1915  if (get_glob() == this->glob) {
1916  args.dx(0) += args.dy(0);
1917  }
1918 }
1919 
1920 const char *global::RefOp::op_name() { return "RefOp"; }
1921 
1922 OperatorPure *global::Fuse(OperatorPure *Op1, OperatorPure *Op2) {
1923  if (Op1 == Op2)
1924  return Op1->self_fuse();
1925  else
1926  return Op1->other_fuse(Op2);
1927 }
1928 
1929 void global::set_fuse(bool flag) { fuse = flag; }
1930 
1931 void global::add_to_opstack(OperatorPure *pOp) {
1932  if (fuse) {
1933  while (this->opstack.size() > 0) {
1934  OperatorPure *OpTry = this->Fuse(this->opstack.back(), pOp);
1935  if (OpTry == NULL) break;
1936 
1937  this->opstack.pop_back();
1938  pOp = OpTry;
1939  }
1940  }
1941 
1942  this->opstack.push_back(pOp);
1943 }
1944 
1945 bool global::ad_plain::initialized() const { return index != NA; }
1946 
1947 bool global::ad_plain::on_some_tape() const { return initialized(); }
1948 
1949 void global::ad_plain::addToTape() const { TMBAD_ASSERT(initialized()); }
1950 
1951 global *global::ad_plain::glob() const {
1952  return (on_some_tape() ? get_glob() : NULL);
1953 }
1954 
1955 void global::ad_plain::override_by(const ad_plain &x) const {}
1956 
1957 global::ad_plain::ad_plain() : index(NA) {}
1958 
1959 global::ad_plain::ad_plain(Scalar x) {
1960  *this = get_glob()->add_to_stack<ConstOp>(x);
1961 }
1962 
1963 global::ad_plain::ad_plain(ad_aug x) {
1964  x.addToTape();
1965  *this = x.taped_value;
1966 }
1967 
1968 Replay global::ad_plain::CopyOp::eval(Replay x0) { return x0.copy(); }
1969 
1970 const char *global::ad_plain::CopyOp::op_name() { return "CopyOp"; }
1971 
1972 ad_plain global::ad_plain::copy() const {
1973  ad_plain ans = get_glob()->add_to_stack<CopyOp>(*this);
1974  return ans;
1975 }
1976 
1977 Replay global::ad_plain::ValOp::eval(Replay x0) { return x0.copy0(); }
1978 
1979 void global::ad_plain::ValOp::dependencies(Args<> &args,
1980  Dependencies &dep) const {}
1981 
1982 const char *global::ad_plain::ValOp::op_name() { return "ValOp"; }
1983 
1984 ad_plain global::ad_plain::copy0() const {
1985  ad_plain ans = get_glob()->add_to_stack<ValOp>(*this);
1986  return ans;
1987 }
1988 
1989 ad_plain global::ad_plain::operator+(const ad_plain &other) const {
1990  ad_plain ans;
1991  ans = get_glob()->add_to_stack<AddOp>(*this, other);
1992  return ans;
1993 }
1994 
1995 ad_plain global::ad_plain::operator-(const ad_plain &other) const {
1996  ad_plain ans;
1997  ans = get_glob()->add_to_stack<SubOp>(*this, other);
1998  return ans;
1999 }
2000 
2001 ad_plain global::ad_plain::operator*(const ad_plain &other) const {
2002  ad_plain ans = get_glob()->add_to_stack<MulOp>(*this, other);
2003  return ans;
2004 }
2005 
2006 ad_plain global::ad_plain::operator*(const Scalar &other) const {
2007  ad_plain ans =
2008  get_glob()->add_to_stack<MulOp_<true, false> >(*this, ad_plain(other));
2009  return ans;
2010 }
2011 
2012 ad_plain global::ad_plain::operator/(const ad_plain &other) const {
2013  ad_plain ans = get_glob()->add_to_stack<DivOp>(*this, other);
2014  return ans;
2015 }
2016 
2017 const char *global::ad_plain::NegOp::op_name() { return "NegOp"; }
2018 
2019 ad_plain global::ad_plain::operator-() const {
2020  ad_plain ans = get_glob()->add_to_stack<NegOp>(*this);
2021  return ans;
2022 }
2023 
2024 ad_plain &global::ad_plain::operator+=(const ad_plain &other) {
2025  *this = *this + other;
2026  return *this;
2027 }
2028 
2029 ad_plain &global::ad_plain::operator-=(const ad_plain &other) {
2030  *this = *this - other;
2031  return *this;
2032 }
2033 
2034 ad_plain &global::ad_plain::operator*=(const ad_plain &other) {
2035  *this = *this * other;
2036  return *this;
2037 }
2038 
2039 ad_plain &global::ad_plain::operator/=(const ad_plain &other) {
2040  *this = *this / other;
2041  return *this;
2042 }
2043 
2044 void global::ad_plain::Dependent() {
2045  *this = get_glob()->add_to_stack<DepOp>(*this);
2046  get_glob()->dep_index.push_back(this->index);
2047 }
2048 
2049 void global::ad_plain::Independent() {
2050  Scalar val = (index == NA ? NAN : this->Value());
2051  *this = get_glob()->add_to_stack<InvOp>(val);
2052  get_glob()->inv_index.push_back(this->index);
2053 }
2054 
2055 Scalar &global::ad_plain::Value() { return get_glob()->values[index]; }
2056 
2057 Scalar global::ad_plain::Value() const { return get_glob()->values[index]; }
2058 
2059 Scalar global::ad_plain::Value(global *glob) const {
2060  return glob->values[index];
2061 }
2062 
2063 Scalar &global::ad_plain::Deriv() { return get_glob()->derivs[index]; }
2064 
2065 void global::ad_start() {
2066  TMBAD_ASSERT2(!in_use, "Tape already in use");
2067  TMBAD_ASSERT(parent_glob == NULL);
2068  parent_glob = global_ptr[TMBAD_THREAD_NUM];
2069  global_ptr[TMBAD_THREAD_NUM] = this;
2070  in_use = true;
2071 }
2072 
2073 void global::ad_stop() {
2074  TMBAD_ASSERT2(in_use, "Tape not in use");
2075  global_ptr[TMBAD_THREAD_NUM] = parent_glob;
2076  parent_glob = NULL;
2077  in_use = false;
2078 }
2079 
2080 void global::Independent(std::vector<ad_plain> &x) {
2081  for (size_t i = 0; i < x.size(); i++) {
2082  x[i].Independent();
2083  }
2084 }
2085 
2086 global::ad_segment::ad_segment() : n(0), c(0) {}
2087 
2088 global::ad_segment::ad_segment(ad_plain x, size_t n) : x(x), n(n), c(1) {}
2089 
2090 global::ad_segment::ad_segment(ad_aug x) : x(ad_plain(x)), n(1), c(1) {}
2091 
2092 global::ad_segment::ad_segment(Scalar x) : x(ad_plain(x)), n(1), c(1) {}
2093 
2094 global::ad_segment::ad_segment(Index idx, size_t n) : n(n) { x.index = idx; }
2095 
2096 global::ad_segment::ad_segment(ad_plain x, size_t r, size_t c)
2097  : x(x), n(r * c), c(c) {}
2098 
2099 global::ad_segment::ad_segment(Replay *x, size_t n, bool zero_check)
2100  : n(n), c(1) {
2101  if (zero_check && all_zero(x, n)) return;
2102  if (all_constant(x, n)) {
2103  global *glob = get_glob();
2104  size_t m = glob->values.size();
2105  Complete<DataOp> D(n);
2106  D(ad_segment());
2107  for (size_t i = 0; i < n; i++) glob->values[m + i] = x[i].Value();
2108  this->x.index = m;
2109  return;
2110  }
2111  if (!is_contiguous(x, n)) {
2112  size_t before = get_glob()->values.size();
2113  this->x = x[0].copy();
2114  for (size_t i = 1; i < n; i++) x[i].copy();
2115  size_t after = get_glob()->values.size();
2116  TMBAD_ASSERT2(after - before == n,
2117  "Each invocation of copy() should construct a new variable");
2118  return;
2119  }
2120  if (n > 0) this->x = x[0];
2121 }
2122 
2123 bool global::ad_segment::identicalZero() { return !x.initialized(); }
2124 
2125 bool global::ad_segment::all_on_active_tape(Replay *x, size_t n) {
2126  global *cur_glob = get_glob();
2127  for (size_t i = 0; i < n; i++) {
2128  bool ok = x[i].on_some_tape() && (x[i].glob() == cur_glob);
2129  if (!ok) return false;
2130  }
2131  return true;
2132 }
2133 
2134 bool global::ad_segment::is_contiguous(Replay *x, size_t n) {
2135  if (!all_on_active_tape(x, n)) return false;
2136  for (size_t i = 1; i < n; i++) {
2137  if (x[i].index() != x[i - 1].index() + 1) return false;
2138  }
2139  return true;
2140 }
2141 
2142 bool global::ad_segment::all_zero(Replay *x, size_t n) {
2143  for (size_t i = 0; i < n; i++) {
2144  if (!x[i].identicalZero()) return false;
2145  }
2146  return true;
2147 }
2148 
2149 bool global::ad_segment::all_constant(Replay *x, size_t n) {
2150  for (size_t i = 0; i < n; i++) {
2151  if (!x[i].constant()) return false;
2152  }
2153  return true;
2154 }
2155 
2156 size_t global::ad_segment::size() const { return n; }
2157 
2158 size_t global::ad_segment::rows() const { return n / c; }
2159 
2160 size_t global::ad_segment::cols() const { return c; }
2161 
2162 ad_plain global::ad_segment::operator[](size_t i) const {
2163  ad_plain ans;
2164  ans.index = x.index + i;
2165  return ans;
2166 }
2167 
2168 ad_plain global::ad_segment::offset() const { return x; }
2169 
2170 Index global::ad_segment::index() const { return x.index; }
2171 
2172 bool global::ad_aug::on_some_tape() const { return taped_value.initialized(); }
2173 
2175  return on_some_tape() && (this->glob() == get_glob());
2176 }
2177 
2178 bool global::ad_aug::ontape() const { return on_some_tape(); }
2179 
2180 bool global::ad_aug::constant() const { return !taped_value.initialized(); }
2181 
2182 Index global::ad_aug::index() const { return taped_value.index; }
2183 
2184 global *global::ad_aug::glob() const {
2185  return (on_some_tape() ? data.glob : NULL);
2186 }
2187 
2188 Scalar global::ad_aug::Value() const {
2189  if (on_some_tape())
2190  return taped_value.Value(this->data.glob);
2191  else
2192  return data.value;
2193 }
2194 
2196 
2197 global::ad_aug::ad_aug(Scalar x) { data.value = x; }
2198 
2199 global::ad_aug::ad_aug(ad_plain x) : taped_value(x) { data.glob = get_glob(); }
2200 
2202  if (on_some_tape()) {
2203  if (data.glob != get_glob()) {
2204  TMBAD_ASSERT2(in_context_stack(data.glob), "Variable not initialized?");
2205  global::OperatorPure *pOp =
2206  get_glob()->getOperator<RefOp>(data.glob, taped_value.index);
2207  this->taped_value =
2208  get_glob()->add_to_stack<RefOp>(pOp, std::vector<ad_plain>(0))[0];
2209 
2210  this->data.glob = get_glob();
2211  }
2212  return;
2213  }
2214  this->taped_value = ad_plain(data.value);
2215  this->data.glob = get_glob();
2216 }
2217 
2218 void global::ad_aug::override_by(const ad_plain &x) const {
2219  this->taped_value = x;
2220  this->data.glob = get_glob();
2221 }
2222 
2224  global *cur_glob = get_glob();
2225  while (cur_glob != NULL) {
2226  if (cur_glob == glob) return true;
2227  cur_glob = cur_glob->parent_glob;
2228  }
2229  return false;
2230 }
2231 
2233  if (on_active_tape()) {
2234  return taped_value.copy();
2235  } else {
2236  ad_aug cpy = *this;
2237  cpy.addToTape();
2238  return cpy;
2239  }
2240 }
2241 
2243  ad_aug cpy = *this;
2244  if (!cpy.on_active_tape()) {
2245  cpy.addToTape();
2246  }
2247  return cpy.taped_value.copy0();
2248 }
2249 
2251  return constant() && data.value == Scalar(0);
2252 }
2253 
2255  return constant() && data.value == Scalar(1);
2256 }
2257 
2258 bool global::ad_aug::bothConstant(const ad_aug &other) const {
2259  return constant() && other.constant();
2260 }
2261 
2262 bool global::ad_aug::identical(const ad_aug &other) const {
2263  if (constant() && other.constant()) return (data.value == other.data.value);
2264 
2265  if (glob() == other.glob())
2266  return (taped_value.index == other.taped_value.index);
2267  return false;
2268 }
2269 
2271  if (bothConstant(other)) return Scalar(this->data.value + other.data.value);
2272  if (this->identicalZero()) return other;
2273  if (other.identicalZero()) return *this;
2274  return ad_plain(*this) + ad_plain(other);
2275 }
2276 
2278  if (bothConstant(other)) return Scalar(this->data.value - other.data.value);
2279  if (other.identicalZero()) return *this;
2280  if (this->identicalZero()) return -other;
2281  if (this->identical(other)) return Scalar(0);
2282  return ad_plain(*this) - ad_plain(other);
2283 }
2284 
2286  if (this->constant()) return Scalar(-(this->data.value));
2287  return -ad_plain(*this);
2288 }
2289 
2291  if (bothConstant(other)) return Scalar(this->data.value * other.data.value);
2292  if (this->identicalZero()) return *this;
2293  if (other.identicalZero()) return other;
2294  if (this->identicalOne()) return other;
2295  if (other.identicalOne()) return *this;
2296  if (this->constant()) return ad_plain(other) * Scalar(this->data.value);
2297  if (other.constant()) return ad_plain(*this) * Scalar(other.data.value);
2298  return ad_plain(*this) * ad_plain(other);
2299 }
2300 
2302  if (bothConstant(other)) return Scalar(this->data.value / other.data.value);
2303  if (this->identicalZero()) return *this;
2304  if (other.identicalOne()) return *this;
2305  return ad_plain(*this) / ad_plain(other);
2306 }
2307 
2309  *this = *this + other;
2310  return *this;
2311 }
2312 
2314  *this = *this - other;
2315  return *this;
2316 }
2317 
2319  *this = *this * other;
2320  return *this;
2321 }
2322 
2324  *this = *this / other;
2325  return *this;
2326 }
2327 
2329  this->addToTape();
2330  taped_value.Dependent();
2331 }
2332 
2334  taped_value.Independent();
2335  taped_value.Value() = this->data.value;
2336  this->data.glob = get_glob();
2337 }
2338 
2339 Scalar &global::ad_aug::Value() {
2340  if (on_some_tape())
2341 
2342  return taped_value.Value();
2343  else
2344  return data.value;
2345 }
2346 
2347 Scalar &global::ad_aug::Deriv() { return taped_value.Deriv(); }
2348 
2349 void global::Independent(std::vector<ad_aug> &x) {
2350  for (size_t i = 0; i < x.size(); i++) {
2351  x[i].Independent();
2352  }
2353 }
2354 
2355 std::ostream &operator<<(std::ostream &os, const global::ad_plain &x) {
2356  os << x.Value();
2357  return os;
2358 }
2359 
2360 std::ostream &operator<<(std::ostream &os, const global::ad_aug &x) {
2361  os << "{";
2362  if (x.on_some_tape()) {
2363  os << "value=" << x.data.glob->values[x.taped_value.index] << ", ";
2364  os << "index=" << x.taped_value.index << ", ";
2365  os << "tape=" << x.data.glob;
2366  } else {
2367  os << "const=" << x.data.value;
2368  }
2369  os << "}";
2370  return os;
2371 }
2372 
2373 ad_plain_index::ad_plain_index(const Index &i) { this->index = i; }
2374 
2375 ad_plain_index::ad_plain_index(const ad_plain &x) : ad_plain(x) {}
2376 
2377 ad_aug_index::ad_aug_index(const Index &i) : ad_aug(ad_plain_index(i)) {}
2378 
2379 ad_aug_index::ad_aug_index(const ad_aug &x) : ad_aug(x) {}
2380 
2381 ad_aug_index::ad_aug_index(const ad_plain &x) : ad_aug(x) {}
2382 
2383 Scalar Value(Scalar x) { return x; }
2384 
2385 ad_aug operator+(const double &x, const ad_aug &y) { return ad_aug(x) + y; }
2386 
2387 ad_aug operator-(const double &x, const ad_aug &y) { return ad_aug(x) - y; }
2388 
2389 ad_aug operator*(const double &x, const ad_aug &y) { return ad_aug(x) * y; }
2390 
2391 ad_aug operator/(const double &x, const ad_aug &y) { return ad_aug(x) / y; }
2392 
2393 bool operator<(const double &x, const ad_adapt &y) { return x < y.Value(); }
2394 
2395 bool operator<=(const double &x, const ad_adapt &y) { return x <= y.Value(); }
2396 
2397 bool operator>(const double &x, const ad_adapt &y) { return x > y.Value(); }
2398 
2399 bool operator>=(const double &x, const ad_adapt &y) { return x >= y.Value(); }
2400 
2401 bool operator==(const double &x, const ad_adapt &y) { return x == y.Value(); }
2402 
2403 bool operator!=(const double &x, const ad_adapt &y) { return x != y.Value(); }
2404 
2405 Writer floor(const Writer &x) {
2406  return "floor"
2407  "(" +
2408  x + ")";
2409 }
2410 const char *FloorOp::op_name() { return "FloorOp"; }
2411 ad_plain floor(const ad_plain &x) {
2412  return get_glob()->add_to_stack<FloorOp>(x);
2413 }
2414 ad_aug floor(const ad_aug &x) {
2415  if (x.constant())
2416  return Scalar(floor(x.Value()));
2417  else
2418  return floor(ad_plain(x));
2419 }
2420 
2421 Writer ceil(const Writer &x) {
2422  return "ceil"
2423  "(" +
2424  x + ")";
2425 }
2426 const char *CeilOp::op_name() { return "CeilOp"; }
2427 ad_plain ceil(const ad_plain &x) { return get_glob()->add_to_stack<CeilOp>(x); }
2428 ad_aug ceil(const ad_aug &x) {
2429  if (x.constant())
2430  return Scalar(ceil(x.Value()));
2431  else
2432  return ceil(ad_plain(x));
2433 }
2434 
2435 Writer trunc(const Writer &x) {
2436  return "trunc"
2437  "(" +
2438  x + ")";
2439 }
2440 const char *TruncOp::op_name() { return "TruncOp"; }
2441 ad_plain trunc(const ad_plain &x) {
2442  return get_glob()->add_to_stack<TruncOp>(x);
2443 }
2444 ad_aug trunc(const ad_aug &x) {
2445  if (x.constant())
2446  return Scalar(trunc(x.Value()));
2447  else
2448  return trunc(ad_plain(x));
2449 }
2450 
2451 Writer round(const Writer &x) {
2452  return "round"
2453  "(" +
2454  x + ")";
2455 }
2456 const char *RoundOp::op_name() { return "RoundOp"; }
2457 ad_plain round(const ad_plain &x) {
2458  return get_glob()->add_to_stack<RoundOp>(x);
2459 }
2460 ad_aug round(const ad_aug &x) {
2461  if (x.constant())
2462  return Scalar(round(x.Value()));
2463  else
2464  return round(ad_plain(x));
2465 }
2466 
2467 double sign(const double &x) { return (x >= 0) - (x < 0); }
2468 
2469 Writer sign(const Writer &x) {
2470  return "sign"
2471  "(" +
2472  x + ")";
2473 }
2474 const char *SignOp::op_name() { return "SignOp"; }
2475 ad_plain sign(const ad_plain &x) { return get_glob()->add_to_stack<SignOp>(x); }
2476 ad_aug sign(const ad_aug &x) {
2477  if (x.constant())
2478  return Scalar(sign(x.Value()));
2479  else
2480  return sign(ad_plain(x));
2481 }
2482 
2483 double ge0(const double &x) { return (x >= 0); }
2484 
2485 double lt0(const double &x) { return (x < 0); }
2486 
2487 Writer ge0(const Writer &x) {
2488  return "ge0"
2489  "(" +
2490  x + ")";
2491 }
2492 const char *Ge0Op::op_name() { return "Ge0Op"; }
2493 ad_plain ge0(const ad_plain &x) { return get_glob()->add_to_stack<Ge0Op>(x); }
2494 ad_aug ge0(const ad_aug &x) {
2495  if (x.constant())
2496  return Scalar(ge0(x.Value()));
2497  else
2498  return ge0(ad_plain(x));
2499 }
2500 
2501 Writer lt0(const Writer &x) {
2502  return "lt0"
2503  "(" +
2504  x + ")";
2505 }
2506 const char *Lt0Op::op_name() { return "Lt0Op"; }
2507 ad_plain lt0(const ad_plain &x) { return get_glob()->add_to_stack<Lt0Op>(x); }
2508 ad_aug lt0(const ad_aug &x) {
2509  if (x.constant())
2510  return Scalar(lt0(x.Value()));
2511  else
2512  return lt0(ad_plain(x));
2513 }
2514 
2515 Writer fabs(const Writer &x) {
2516  return "fabs"
2517  "(" +
2518  x + ")";
2519 }
2520 void AbsOp::reverse(ReverseArgs<Scalar> &args) {
2521  typedef Scalar Type;
2522  if (args.dy(0) != Type(0)) args.dx(0) += args.dy(0) * sign(args.x(0));
2523 }
2524 const char *AbsOp::op_name() { return "AbsOp"; }
2525 ad_plain fabs(const ad_plain &x) { return get_glob()->add_to_stack<AbsOp>(x); }
2526 ad_aug fabs(const ad_aug &x) {
2527  if (x.constant())
2528  return Scalar(fabs(x.Value()));
2529  else
2530  return fabs(ad_plain(x));
2531 }
2532 ad_adapt fabs(const ad_adapt &x) { return ad_adapt(fabs(ad_aug(x))); }
2533 
2534 Writer sin(const Writer &x) {
2535  return "sin"
2536  "(" +
2537  x + ")";
2538 }
2539 void SinOp::reverse(ReverseArgs<Scalar> &args) {
2540  typedef Scalar Type;
2541  if (args.dy(0) != Type(0)) args.dx(0) += args.dy(0) * cos(args.x(0));
2542 }
2543 const char *SinOp::op_name() { return "SinOp"; }
2544 ad_plain sin(const ad_plain &x) { return get_glob()->add_to_stack<SinOp>(x); }
2545 ad_aug sin(const ad_aug &x) {
2546  if (x.constant())
2547  return Scalar(sin(x.Value()));
2548  else
2549  return sin(ad_plain(x));
2550 }
2551 ad_adapt sin(const ad_adapt &x) { return ad_adapt(sin(ad_aug(x))); }
2552 
2553 Writer cos(const Writer &x) {
2554  return "cos"
2555  "(" +
2556  x + ")";
2557 }
2558 void CosOp::reverse(ReverseArgs<Scalar> &args) {
2559  typedef Scalar Type;
2560  if (args.dy(0) != Type(0)) args.dx(0) += args.dy(0) * -sin(args.x(0));
2561 }
2562 const char *CosOp::op_name() { return "CosOp"; }
2563 ad_plain cos(const ad_plain &x) { return get_glob()->add_to_stack<CosOp>(x); }
2564 ad_aug cos(const ad_aug &x) {
2565  if (x.constant())
2566  return Scalar(cos(x.Value()));
2567  else
2568  return cos(ad_plain(x));
2569 }
2570 ad_adapt cos(const ad_adapt &x) { return ad_adapt(cos(ad_aug(x))); }
2571 
2572 Writer exp(const Writer &x) {
2573  return "exp"
2574  "(" +
2575  x + ")";
2576 }
2577 void ExpOp::reverse(ReverseArgs<Scalar> &args) {
2578  typedef Scalar Type;
2579  if (args.dy(0) != Type(0)) args.dx(0) += args.dy(0) * args.y(0);
2580 }
2581 const char *ExpOp::op_name() { return "ExpOp"; }
2582 ad_plain exp(const ad_plain &x) { return get_glob()->add_to_stack<ExpOp>(x); }
2583 ad_aug exp(const ad_aug &x) {
2584  if (x.constant())
2585  return Scalar(exp(x.Value()));
2586  else
2587  return exp(ad_plain(x));
2588 }
2589 ad_adapt exp(const ad_adapt &x) { return ad_adapt(exp(ad_aug(x))); }
2590 
2591 Writer log(const Writer &x) {
2592  return "log"
2593  "(" +
2594  x + ")";
2595 }
2596 void LogOp::reverse(ReverseArgs<Scalar> &args) {
2597  typedef Scalar Type;
2598  if (args.dy(0) != Type(0)) args.dx(0) += args.dy(0) * Type(1.) / args.x(0);
2599 }
2600 const char *LogOp::op_name() { return "LogOp"; }
2601 ad_plain log(const ad_plain &x) { return get_glob()->add_to_stack<LogOp>(x); }
2602 ad_aug log(const ad_aug &x) {
2603  if (x.constant())
2604  return Scalar(log(x.Value()));
2605  else
2606  return log(ad_plain(x));
2607 }
2608 ad_adapt log(const ad_adapt &x) { return ad_adapt(log(ad_aug(x))); }
2609 
2610 Writer sqrt(const Writer &x) {
2611  return "sqrt"
2612  "(" +
2613  x + ")";
2614 }
2615 void SqrtOp::reverse(ReverseArgs<Scalar> &args) {
2616  typedef Scalar Type;
2617  if (args.dy(0) != Type(0)) args.dx(0) += args.dy(0) * Type(0.5) / args.y(0);
2618 }
2619 const char *SqrtOp::op_name() { return "SqrtOp"; }
2620 ad_plain sqrt(const ad_plain &x) { return get_glob()->add_to_stack<SqrtOp>(x); }
2621 ad_aug sqrt(const ad_aug &x) {
2622  if (x.constant())
2623  return Scalar(sqrt(x.Value()));
2624  else
2625  return sqrt(ad_plain(x));
2626 }
2627 ad_adapt sqrt(const ad_adapt &x) { return ad_adapt(sqrt(ad_aug(x))); }
2628 
2629 Writer tan(const Writer &x) {
2630  return "tan"
2631  "(" +
2632  x + ")";
2633 }
2634 void TanOp::reverse(ReverseArgs<Scalar> &args) {
2635  typedef Scalar Type;
2636  if (args.dy(0) != Type(0))
2637  args.dx(0) += args.dy(0) * Type(1.) / (cos(args.x(0)) * cos(args.x(0)));
2638 }
2639 const char *TanOp::op_name() { return "TanOp"; }
2640 ad_plain tan(const ad_plain &x) { return get_glob()->add_to_stack<TanOp>(x); }
2641 ad_aug tan(const ad_aug &x) {
2642  if (x.constant())
2643  return Scalar(tan(x.Value()));
2644  else
2645  return tan(ad_plain(x));
2646 }
2647 ad_adapt tan(const ad_adapt &x) { return ad_adapt(tan(ad_aug(x))); }
2648 
2649 Writer sinh(const Writer &x) {
2650  return "sinh"
2651  "(" +
2652  x + ")";
2653 }
2654 void SinhOp::reverse(ReverseArgs<Scalar> &args) {
2655  typedef Scalar Type;
2656  if (args.dy(0) != Type(0)) args.dx(0) += args.dy(0) * cosh(args.x(0));
2657 }
2658 const char *SinhOp::op_name() { return "SinhOp"; }
2659 ad_plain sinh(const ad_plain &x) { return get_glob()->add_to_stack<SinhOp>(x); }
2660 ad_aug sinh(const ad_aug &x) {
2661  if (x.constant())
2662  return Scalar(sinh(x.Value()));
2663  else
2664  return sinh(ad_plain(x));
2665 }
2666 ad_adapt sinh(const ad_adapt &x) { return ad_adapt(sinh(ad_aug(x))); }
2667 
2668 Writer cosh(const Writer &x) {
2669  return "cosh"
2670  "(" +
2671  x + ")";
2672 }
2673 void CoshOp::reverse(ReverseArgs<Scalar> &args) {
2674  typedef Scalar Type;
2675  if (args.dy(0) != Type(0)) args.dx(0) += args.dy(0) * sinh(args.x(0));
2676 }
2677 const char *CoshOp::op_name() { return "CoshOp"; }
2678 ad_plain cosh(const ad_plain &x) { return get_glob()->add_to_stack<CoshOp>(x); }
2679 ad_aug cosh(const ad_aug &x) {
2680  if (x.constant())
2681  return Scalar(cosh(x.Value()));
2682  else
2683  return cosh(ad_plain(x));
2684 }
2685 ad_adapt cosh(const ad_adapt &x) { return ad_adapt(cosh(ad_aug(x))); }
2686 
2687 Writer tanh(const Writer &x) {
2688  return "tanh"
2689  "(" +
2690  x + ")";
2691 }
2692 void TanhOp::reverse(ReverseArgs<Scalar> &args) {
2693  typedef Scalar Type;
2694  if (args.dy(0) != Type(0))
2695  args.dx(0) += args.dy(0) * Type(1.) / (cosh(args.x(0)) * cosh(args.x(0)));
2696 }
2697 const char *TanhOp::op_name() { return "TanhOp"; }
2698 ad_plain tanh(const ad_plain &x) { return get_glob()->add_to_stack<TanhOp>(x); }
2699 ad_aug tanh(const ad_aug &x) {
2700  if (x.constant())
2701  return Scalar(tanh(x.Value()));
2702  else
2703  return tanh(ad_plain(x));
2704 }
2705 ad_adapt tanh(const ad_adapt &x) { return ad_adapt(tanh(ad_aug(x))); }
2706 
2707 Writer expm1(const Writer &x) {
2708  return "expm1"
2709  "(" +
2710  x + ")";
2711 }
2712 void Expm1::reverse(ReverseArgs<Scalar> &args) {
2713  typedef Scalar Type;
2714  if (args.dy(0) != Type(0)) args.dx(0) += args.dy(0) * args.y(0) + Type(1.);
2715 }
2716 const char *Expm1::op_name() { return "Expm1"; }
2717 ad_plain expm1(const ad_plain &x) { return get_glob()->add_to_stack<Expm1>(x); }
2718 ad_aug expm1(const ad_aug &x) {
2719  if (x.constant())
2720  return Scalar(expm1(x.Value()));
2721  else
2722  return expm1(ad_plain(x));
2723 }
2724 ad_adapt expm1(const ad_adapt &x) { return ad_adapt(expm1(ad_aug(x))); }
2725 
2726 Writer log1p(const Writer &x) {
2727  return "log1p"
2728  "(" +
2729  x + ")";
2730 }
2731 void Log1p::reverse(ReverseArgs<Scalar> &args) {
2732  typedef Scalar Type;
2733  if (args.dy(0) != Type(0))
2734  args.dx(0) += args.dy(0) * Type(1.) / (args.x(0) + Type(1.));
2735 }
2736 const char *Log1p::op_name() { return "Log1p"; }
2737 ad_plain log1p(const ad_plain &x) { return get_glob()->add_to_stack<Log1p>(x); }
2738 ad_aug log1p(const ad_aug &x) {
2739  if (x.constant())
2740  return Scalar(log1p(x.Value()));
2741  else
2742  return log1p(ad_plain(x));
2743 }
2744 ad_adapt log1p(const ad_adapt &x) { return ad_adapt(log1p(ad_aug(x))); }
2745 
2746 Writer asin(const Writer &x) {
2747  return "asin"
2748  "(" +
2749  x + ")";
2750 }
2751 void AsinOp::reverse(ReverseArgs<Scalar> &args) {
2752  typedef Scalar Type;
2753  if (args.dy(0) != Type(0))
2754  args.dx(0) +=
2755  args.dy(0) * Type(1.) / sqrt(Type(1.) - args.x(0) * args.x(0));
2756 }
2757 const char *AsinOp::op_name() { return "AsinOp"; }
2758 ad_plain asin(const ad_plain &x) { return get_glob()->add_to_stack<AsinOp>(x); }
2759 ad_aug asin(const ad_aug &x) {
2760  if (x.constant())
2761  return Scalar(asin(x.Value()));
2762  else
2763  return asin(ad_plain(x));
2764 }
2765 ad_adapt asin(const ad_adapt &x) { return ad_adapt(asin(ad_aug(x))); }
2766 
2767 Writer acos(const Writer &x) {
2768  return "acos"
2769  "(" +
2770  x + ")";
2771 }
2772 void AcosOp::reverse(ReverseArgs<Scalar> &args) {
2773  typedef Scalar Type;
2774  if (args.dy(0) != Type(0))
2775  args.dx(0) +=
2776  args.dy(0) * Type(-1.) / sqrt(Type(1.) - args.x(0) * args.x(0));
2777 }
2778 const char *AcosOp::op_name() { return "AcosOp"; }
2779 ad_plain acos(const ad_plain &x) { return get_glob()->add_to_stack<AcosOp>(x); }
2780 ad_aug acos(const ad_aug &x) {
2781  if (x.constant())
2782  return Scalar(acos(x.Value()));
2783  else
2784  return acos(ad_plain(x));
2785 }
2786 ad_adapt acos(const ad_adapt &x) { return ad_adapt(acos(ad_aug(x))); }
2787 
2788 Writer atan(const Writer &x) {
2789  return "atan"
2790  "(" +
2791  x + ")";
2792 }
2793 void AtanOp::reverse(ReverseArgs<Scalar> &args) {
2794  typedef Scalar Type;
2795  if (args.dy(0) != Type(0))
2796  args.dx(0) += args.dy(0) * Type(1.) / (Type(1.) + args.x(0) * args.x(0));
2797 }
2798 const char *AtanOp::op_name() { return "AtanOp"; }
2799 ad_plain atan(const ad_plain &x) { return get_glob()->add_to_stack<AtanOp>(x); }
2800 ad_aug atan(const ad_aug &x) {
2801  if (x.constant())
2802  return Scalar(atan(x.Value()));
2803  else
2804  return atan(ad_plain(x));
2805 }
2806 ad_adapt atan(const ad_adapt &x) { return ad_adapt(atan(ad_aug(x))); }
2807 
2808 Writer asinh(const Writer &x) {
2809  return "asinh"
2810  "(" +
2811  x + ")";
2812 }
2813 void AsinhOp::reverse(ReverseArgs<Scalar> &args) {
2814  typedef Scalar Type;
2815  if (args.dy(0) != Type(0))
2816  args.dx(0) +=
2817  args.dy(0) * Type(1.) / sqrt(args.x(0) * args.x(0) + Type(1.));
2818 }
2819 const char *AsinhOp::op_name() { return "AsinhOp"; }
2820 ad_plain asinh(const ad_plain &x) {
2821  return get_glob()->add_to_stack<AsinhOp>(x);
2822 }
2823 ad_aug asinh(const ad_aug &x) {
2824  if (x.constant())
2825  return Scalar(asinh(x.Value()));
2826  else
2827  return asinh(ad_plain(x));
2828 }
2829 ad_adapt asinh(const ad_adapt &x) { return ad_adapt(asinh(ad_aug(x))); }
2830 
2831 Writer acosh(const Writer &x) {
2832  return "acosh"
2833  "(" +
2834  x + ")";
2835 }
2836 void AcoshOp::reverse(ReverseArgs<Scalar> &args) {
2837  typedef Scalar Type;
2838  if (args.dy(0) != Type(0))
2839  args.dx(0) +=
2840  args.dy(0) * Type(1.) / sqrt(args.x(0) * args.x(0) - Type(1.));
2841 }
2842 const char *AcoshOp::op_name() { return "AcoshOp"; }
2843 ad_plain acosh(const ad_plain &x) {
2844  return get_glob()->add_to_stack<AcoshOp>(x);
2845 }
2846 ad_aug acosh(const ad_aug &x) {
2847  if (x.constant())
2848  return Scalar(acosh(x.Value()));
2849  else
2850  return acosh(ad_plain(x));
2851 }
2852 ad_adapt acosh(const ad_adapt &x) { return ad_adapt(acosh(ad_aug(x))); }
2853 
2854 Writer atanh(const Writer &x) {
2855  return "atanh"
2856  "(" +
2857  x + ")";
2858 }
2859 void AtanhOp::reverse(ReverseArgs<Scalar> &args) {
2860  typedef Scalar Type;
2861  if (args.dy(0) != Type(0))
2862  args.dx(0) += args.dy(0) * Type(1.) / (Type(1) - args.x(0) * args.x(0));
2863 }
2864 const char *AtanhOp::op_name() { return "AtanhOp"; }
2865 ad_plain atanh(const ad_plain &x) {
2866  return get_glob()->add_to_stack<AtanhOp>(x);
2867 }
2868 ad_aug atanh(const ad_aug &x) {
2869  if (x.constant())
2870  return Scalar(atanh(x.Value()));
2871  else
2872  return atanh(ad_plain(x));
2873 }
2874 ad_adapt atanh(const ad_adapt &x) { return ad_adapt(atanh(ad_aug(x))); }
2875 
2876 Writer pow(const Writer &x1, const Writer &x2) {
2877  return "pow"
2878  "(" +
2879  x1 + "," + x2 + ")";
2880 }
2881 const char *PowOp::op_name() { return "PowOp"; }
2882 ad_plain pow(const ad_plain &x1, const ad_plain &x2) {
2883  return get_glob()->add_to_stack<PowOp>(x1, x2);
2884 }
2885 ad_aug pow(const ad_aug &x1, const ad_aug &x2) {
2886  if (x1.constant() && x2.constant())
2887  return Scalar(pow(x1.Value(), x2.Value()));
2888  else
2889  return pow(ad_plain(x1), ad_plain(x2));
2890 }
2891 ad_adapt pow(const ad_adapt &x1, const ad_adapt &x2) {
2892  return ad_adapt(pow(ad_aug(x1), ad_aug(x2)));
2893 }
2894 
2895 Writer atan2(const Writer &x1, const Writer &x2) {
2896  return "atan2"
2897  "(" +
2898  x1 + "," + x2 + ")";
2899 }
2900 const char *Atan2::op_name() { return "Atan2"; }
2901 ad_plain atan2(const ad_plain &x1, const ad_plain &x2) {
2902  return get_glob()->add_to_stack<Atan2>(x1, x2);
2903 }
2904 ad_aug atan2(const ad_aug &x1, const ad_aug &x2) {
2905  if (x1.constant() && x2.constant())
2906  return Scalar(atan2(x1.Value(), x2.Value()));
2907  else
2908  return atan2(ad_plain(x1), ad_plain(x2));
2909 }
2910 ad_adapt atan2(const ad_adapt &x1, const ad_adapt &x2) {
2911  return ad_adapt(atan2(ad_aug(x1), ad_aug(x2)));
2912 }
2913 
2914 Writer max(const Writer &x1, const Writer &x2) {
2915  return "max"
2916  "(" +
2917  x1 + "," + x2 + ")";
2918 }
2919 const char *MaxOp::op_name() { return "MaxOp"; }
2920 ad_plain max(const ad_plain &x1, const ad_plain &x2) {
2921  return get_glob()->add_to_stack<MaxOp>(x1, x2);
2922 }
2923 ad_aug max(const ad_aug &x1, const ad_aug &x2) {
2924  if (x1.constant() && x2.constant())
2925  return Scalar(max(x1.Value(), x2.Value()));
2926  else
2927  return max(ad_plain(x1), ad_plain(x2));
2928 }
2929 ad_adapt max(const ad_adapt &x1, const ad_adapt &x2) {
2930  return ad_adapt(max(ad_aug(x1), ad_aug(x2)));
2931 }
2932 
2933 Writer min(const Writer &x1, const Writer &x2) {
2934  return "min"
2935  "(" +
2936  x1 + "," + x2 + ")";
2937 }
2938 const char *MinOp::op_name() { return "MinOp"; }
2939 ad_plain min(const ad_plain &x1, const ad_plain &x2) {
2940  return get_glob()->add_to_stack<MinOp>(x1, x2);
2941 }
2942 ad_aug min(const ad_aug &x1, const ad_aug &x2) {
2943  if (x1.constant() && x2.constant())
2944  return Scalar(min(x1.Value(), x2.Value()));
2945  else
2946  return min(ad_plain(x1), ad_plain(x2));
2947 }
2948 ad_adapt min(const ad_adapt &x1, const ad_adapt &x2) {
2949  return ad_adapt(min(ad_aug(x1), ad_aug(x2)));
2950 }
2951 void CondExpEqOp::forward(ForwardArgs<Scalar> &args) {
2952  if (args.x(0) == args.x(1)) {
2953  args.y(0) = args.x(2);
2954  } else {
2955  args.y(0) = args.x(3);
2956  }
2957 }
2958 void CondExpEqOp::reverse(ReverseArgs<Scalar> &args) {
2959  if (args.x(0) == args.x(1)) {
2960  args.dx(2) += args.dy(0);
2961  } else {
2962  args.dx(3) += args.dy(0);
2963  }
2964 }
2965 void CondExpEqOp::forward(ForwardArgs<Replay> &args) {
2966  args.y(0) = CondExpEq(args.x(0), args.x(1), args.x(2), args.x(3));
2967 }
2968 void CondExpEqOp::reverse(ReverseArgs<Replay> &args) {
2969  Replay zero(0);
2970  args.dx(2) += CondExpEq(args.x(0), args.x(1), args.dy(0), zero);
2971  args.dx(3) += CondExpEq(args.x(0), args.x(1), zero, args.dy(0));
2972 }
2973 void CondExpEqOp::forward(ForwardArgs<Writer> &args) {
2974  Writer w;
2975  w << "if (" << args.x(0) << "==" << args.x(1) << ") ";
2976  args.y(0) = args.x(2);
2977  w << " else ";
2978  args.y(0) = args.x(3);
2979 }
2980 void CondExpEqOp::reverse(ReverseArgs<Writer> &args) {
2981  Writer w;
2982  w << "if (" << args.x(0) << "==" << args.x(1) << ") ";
2983  args.dx(2) += args.dy(0);
2984  w << " else ";
2985  args.dx(3) += args.dy(0);
2986 }
2987 const char *CondExpEqOp::op_name() {
2988  return "CExp"
2989  "Eq";
2990 }
2991 Scalar CondExpEq(const Scalar &x0, const Scalar &x1, const Scalar &x2,
2992  const Scalar &x3) {
2993  if (x0 == x1)
2994  return x2;
2995  else
2996  return x3;
2997 }
2998 ad_plain CondExpEq(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
2999  const ad_plain &x3) {
3000  OperatorPure *pOp = get_glob()->getOperator<CondExpEqOp>();
3001  std::vector<ad_plain> x(4);
3002  x[0] = x0;
3003  x[1] = x1;
3004  x[2] = x2;
3005  x[3] = x3;
3006  std::vector<ad_plain> y = get_glob()->add_to_stack<CondExpEqOp>(pOp, x);
3007  return y[0];
3008 }
3009 ad_aug CondExpEq(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3010  const ad_aug &x3) {
3011  if (x0.constant() && x1.constant()) {
3012  if (x0.Value() == x1.Value())
3013  return x2;
3014  else
3015  return x3;
3016  } else {
3017  return CondExpEq(ad_plain(x0), ad_plain(x1), ad_plain(x2), ad_plain(x3));
3018  }
3019 }
3020 void CondExpNeOp::forward(ForwardArgs<Scalar> &args) {
3021  if (args.x(0) != args.x(1)) {
3022  args.y(0) = args.x(2);
3023  } else {
3024  args.y(0) = args.x(3);
3025  }
3026 }
3027 void CondExpNeOp::reverse(ReverseArgs<Scalar> &args) {
3028  if (args.x(0) != args.x(1)) {
3029  args.dx(2) += args.dy(0);
3030  } else {
3031  args.dx(3) += args.dy(0);
3032  }
3033 }
3034 void CondExpNeOp::forward(ForwardArgs<Replay> &args) {
3035  args.y(0) = CondExpNe(args.x(0), args.x(1), args.x(2), args.x(3));
3036 }
3037 void CondExpNeOp::reverse(ReverseArgs<Replay> &args) {
3038  Replay zero(0);
3039  args.dx(2) += CondExpNe(args.x(0), args.x(1), args.dy(0), zero);
3040  args.dx(3) += CondExpNe(args.x(0), args.x(1), zero, args.dy(0));
3041 }
3042 void CondExpNeOp::forward(ForwardArgs<Writer> &args) {
3043  Writer w;
3044  w << "if (" << args.x(0) << "!=" << args.x(1) << ") ";
3045  args.y(0) = args.x(2);
3046  w << " else ";
3047  args.y(0) = args.x(3);
3048 }
3049 void CondExpNeOp::reverse(ReverseArgs<Writer> &args) {
3050  Writer w;
3051  w << "if (" << args.x(0) << "!=" << args.x(1) << ") ";
3052  args.dx(2) += args.dy(0);
3053  w << " else ";
3054  args.dx(3) += args.dy(0);
3055 }
3056 const char *CondExpNeOp::op_name() {
3057  return "CExp"
3058  "Ne";
3059 }
3060 Scalar CondExpNe(const Scalar &x0, const Scalar &x1, const Scalar &x2,
3061  const Scalar &x3) {
3062  if (x0 != x1)
3063  return x2;
3064  else
3065  return x3;
3066 }
3067 ad_plain CondExpNe(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
3068  const ad_plain &x3) {
3069  OperatorPure *pOp = get_glob()->getOperator<CondExpNeOp>();
3070  std::vector<ad_plain> x(4);
3071  x[0] = x0;
3072  x[1] = x1;
3073  x[2] = x2;
3074  x[3] = x3;
3075  std::vector<ad_plain> y = get_glob()->add_to_stack<CondExpNeOp>(pOp, x);
3076  return y[0];
3077 }
3078 ad_aug CondExpNe(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3079  const ad_aug &x3) {
3080  if (x0.constant() && x1.constant()) {
3081  if (x0.Value() != x1.Value())
3082  return x2;
3083  else
3084  return x3;
3085  } else {
3086  return CondExpNe(ad_plain(x0), ad_plain(x1), ad_plain(x2), ad_plain(x3));
3087  }
3088 }
3089 void CondExpGtOp::forward(ForwardArgs<Scalar> &args) {
3090  if (args.x(0) > args.x(1)) {
3091  args.y(0) = args.x(2);
3092  } else {
3093  args.y(0) = args.x(3);
3094  }
3095 }
3096 void CondExpGtOp::reverse(ReverseArgs<Scalar> &args) {
3097  if (args.x(0) > args.x(1)) {
3098  args.dx(2) += args.dy(0);
3099  } else {
3100  args.dx(3) += args.dy(0);
3101  }
3102 }
3103 void CondExpGtOp::forward(ForwardArgs<Replay> &args) {
3104  args.y(0) = CondExpGt(args.x(0), args.x(1), args.x(2), args.x(3));
3105 }
3106 void CondExpGtOp::reverse(ReverseArgs<Replay> &args) {
3107  Replay zero(0);
3108  args.dx(2) += CondExpGt(args.x(0), args.x(1), args.dy(0), zero);
3109  args.dx(3) += CondExpGt(args.x(0), args.x(1), zero, args.dy(0));
3110 }
3111 void CondExpGtOp::forward(ForwardArgs<Writer> &args) {
3112  Writer w;
3113  w << "if (" << args.x(0) << ">" << args.x(1) << ") ";
3114  args.y(0) = args.x(2);
3115  w << " else ";
3116  args.y(0) = args.x(3);
3117 }
3118 void CondExpGtOp::reverse(ReverseArgs<Writer> &args) {
3119  Writer w;
3120  w << "if (" << args.x(0) << ">" << args.x(1) << ") ";
3121  args.dx(2) += args.dy(0);
3122  w << " else ";
3123  args.dx(3) += args.dy(0);
3124 }
3125 const char *CondExpGtOp::op_name() {
3126  return "CExp"
3127  "Gt";
3128 }
3129 Scalar CondExpGt(const Scalar &x0, const Scalar &x1, const Scalar &x2,
3130  const Scalar &x3) {
3131  if (x0 > x1)
3132  return x2;
3133  else
3134  return x3;
3135 }
3136 ad_plain CondExpGt(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
3137  const ad_plain &x3) {
3138  OperatorPure *pOp = get_glob()->getOperator<CondExpGtOp>();
3139  std::vector<ad_plain> x(4);
3140  x[0] = x0;
3141  x[1] = x1;
3142  x[2] = x2;
3143  x[3] = x3;
3144  std::vector<ad_plain> y = get_glob()->add_to_stack<CondExpGtOp>(pOp, x);
3145  return y[0];
3146 }
3147 ad_aug CondExpGt(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3148  const ad_aug &x3) {
3149  if (x0.constant() && x1.constant()) {
3150  if (x0.Value() > x1.Value())
3151  return x2;
3152  else
3153  return x3;
3154  } else {
3155  return CondExpGt(ad_plain(x0), ad_plain(x1), ad_plain(x2), ad_plain(x3));
3156  }
3157 }
3158 void CondExpLtOp::forward(ForwardArgs<Scalar> &args) {
3159  if (args.x(0) < args.x(1)) {
3160  args.y(0) = args.x(2);
3161  } else {
3162  args.y(0) = args.x(3);
3163  }
3164 }
3165 void CondExpLtOp::reverse(ReverseArgs<Scalar> &args) {
3166  if (args.x(0) < args.x(1)) {
3167  args.dx(2) += args.dy(0);
3168  } else {
3169  args.dx(3) += args.dy(0);
3170  }
3171 }
3172 void CondExpLtOp::forward(ForwardArgs<Replay> &args) {
3173  args.y(0) = CondExpLt(args.x(0), args.x(1), args.x(2), args.x(3));
3174 }
3175 void CondExpLtOp::reverse(ReverseArgs<Replay> &args) {
3176  Replay zero(0);
3177  args.dx(2) += CondExpLt(args.x(0), args.x(1), args.dy(0), zero);
3178  args.dx(3) += CondExpLt(args.x(0), args.x(1), zero, args.dy(0));
3179 }
3180 void CondExpLtOp::forward(ForwardArgs<Writer> &args) {
3181  Writer w;
3182  w << "if (" << args.x(0) << "<" << args.x(1) << ") ";
3183  args.y(0) = args.x(2);
3184  w << " else ";
3185  args.y(0) = args.x(3);
3186 }
3187 void CondExpLtOp::reverse(ReverseArgs<Writer> &args) {
3188  Writer w;
3189  w << "if (" << args.x(0) << "<" << args.x(1) << ") ";
3190  args.dx(2) += args.dy(0);
3191  w << " else ";
3192  args.dx(3) += args.dy(0);
3193 }
3194 const char *CondExpLtOp::op_name() {
3195  return "CExp"
3196  "Lt";
3197 }
3198 Scalar CondExpLt(const Scalar &x0, const Scalar &x1, const Scalar &x2,
3199  const Scalar &x3) {
3200  if (x0 < x1)
3201  return x2;
3202  else
3203  return x3;
3204 }
3205 ad_plain CondExpLt(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
3206  const ad_plain &x3) {
3207  OperatorPure *pOp = get_glob()->getOperator<CondExpLtOp>();
3208  std::vector<ad_plain> x(4);
3209  x[0] = x0;
3210  x[1] = x1;
3211  x[2] = x2;
3212  x[3] = x3;
3213  std::vector<ad_plain> y = get_glob()->add_to_stack<CondExpLtOp>(pOp, x);
3214  return y[0];
3215 }
3216 ad_aug CondExpLt(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3217  const ad_aug &x3) {
3218  if (x0.constant() && x1.constant()) {
3219  if (x0.Value() < x1.Value())
3220  return x2;
3221  else
3222  return x3;
3223  } else {
3224  return CondExpLt(ad_plain(x0), ad_plain(x1), ad_plain(x2), ad_plain(x3));
3225  }
3226 }
3227 void CondExpGeOp::forward(ForwardArgs<Scalar> &args) {
3228  if (args.x(0) >= args.x(1)) {
3229  args.y(0) = args.x(2);
3230  } else {
3231  args.y(0) = args.x(3);
3232  }
3233 }
3234 void CondExpGeOp::reverse(ReverseArgs<Scalar> &args) {
3235  if (args.x(0) >= args.x(1)) {
3236  args.dx(2) += args.dy(0);
3237  } else {
3238  args.dx(3) += args.dy(0);
3239  }
3240 }
3241 void CondExpGeOp::forward(ForwardArgs<Replay> &args) {
3242  args.y(0) = CondExpGe(args.x(0), args.x(1), args.x(2), args.x(3));
3243 }
3244 void CondExpGeOp::reverse(ReverseArgs<Replay> &args) {
3245  Replay zero(0);
3246  args.dx(2) += CondExpGe(args.x(0), args.x(1), args.dy(0), zero);
3247  args.dx(3) += CondExpGe(args.x(0), args.x(1), zero, args.dy(0));
3248 }
3249 void CondExpGeOp::forward(ForwardArgs<Writer> &args) {
3250  Writer w;
3251  w << "if (" << args.x(0) << ">=" << args.x(1) << ") ";
3252  args.y(0) = args.x(2);
3253  w << " else ";
3254  args.y(0) = args.x(3);
3255 }
3256 void CondExpGeOp::reverse(ReverseArgs<Writer> &args) {
3257  Writer w;
3258  w << "if (" << args.x(0) << ">=" << args.x(1) << ") ";
3259  args.dx(2) += args.dy(0);
3260  w << " else ";
3261  args.dx(3) += args.dy(0);
3262 }
3263 const char *CondExpGeOp::op_name() {
3264  return "CExp"
3265  "Ge";
3266 }
3267 Scalar CondExpGe(const Scalar &x0, const Scalar &x1, const Scalar &x2,
3268  const Scalar &x3) {
3269  if (x0 >= x1)
3270  return x2;
3271  else
3272  return x3;
3273 }
3274 ad_plain CondExpGe(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
3275  const ad_plain &x3) {
3276  OperatorPure *pOp = get_glob()->getOperator<CondExpGeOp>();
3277  std::vector<ad_plain> x(4);
3278  x[0] = x0;
3279  x[1] = x1;
3280  x[2] = x2;
3281  x[3] = x3;
3282  std::vector<ad_plain> y = get_glob()->add_to_stack<CondExpGeOp>(pOp, x);
3283  return y[0];
3284 }
3285 ad_aug CondExpGe(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3286  const ad_aug &x3) {
3287  if (x0.constant() && x1.constant()) {
3288  if (x0.Value() >= x1.Value())
3289  return x2;
3290  else
3291  return x3;
3292  } else {
3293  return CondExpGe(ad_plain(x0), ad_plain(x1), ad_plain(x2), ad_plain(x3));
3294  }
3295 }
3296 void CondExpLeOp::forward(ForwardArgs<Scalar> &args) {
3297  if (args.x(0) <= args.x(1)) {
3298  args.y(0) = args.x(2);
3299  } else {
3300  args.y(0) = args.x(3);
3301  }
3302 }
3303 void CondExpLeOp::reverse(ReverseArgs<Scalar> &args) {
3304  if (args.x(0) <= args.x(1)) {
3305  args.dx(2) += args.dy(0);
3306  } else {
3307  args.dx(3) += args.dy(0);
3308  }
3309 }
3310 void CondExpLeOp::forward(ForwardArgs<Replay> &args) {
3311  args.y(0) = CondExpLe(args.x(0), args.x(1), args.x(2), args.x(3));
3312 }
3313 void CondExpLeOp::reverse(ReverseArgs<Replay> &args) {
3314  Replay zero(0);
3315  args.dx(2) += CondExpLe(args.x(0), args.x(1), args.dy(0), zero);
3316  args.dx(3) += CondExpLe(args.x(0), args.x(1), zero, args.dy(0));
3317 }
3318 void CondExpLeOp::forward(ForwardArgs<Writer> &args) {
3319  Writer w;
3320  w << "if (" << args.x(0) << "<=" << args.x(1) << ") ";
3321  args.y(0) = args.x(2);
3322  w << " else ";
3323  args.y(0) = args.x(3);
3324 }
3325 void CondExpLeOp::reverse(ReverseArgs<Writer> &args) {
3326  Writer w;
3327  w << "if (" << args.x(0) << "<=" << args.x(1) << ") ";
3328  args.dx(2) += args.dy(0);
3329  w << " else ";
3330  args.dx(3) += args.dy(0);
3331 }
3332 const char *CondExpLeOp::op_name() {
3333  return "CExp"
3334  "Le";
3335 }
3336 Scalar CondExpLe(const Scalar &x0, const Scalar &x1, const Scalar &x2,
3337  const Scalar &x3) {
3338  if (x0 <= x1)
3339  return x2;
3340  else
3341  return x3;
3342 }
3343 ad_plain CondExpLe(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
3344  const ad_plain &x3) {
3345  OperatorPure *pOp = get_glob()->getOperator<CondExpLeOp>();
3346  std::vector<ad_plain> x(4);
3347  x[0] = x0;
3348  x[1] = x1;
3349  x[2] = x2;
3350  x[3] = x3;
3351  std::vector<ad_plain> y = get_glob()->add_to_stack<CondExpLeOp>(pOp, x);
3352  return y[0];
3353 }
3354 ad_aug CondExpLe(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3355  const ad_aug &x3) {
3356  if (x0.constant() && x1.constant()) {
3357  if (x0.Value() <= x1.Value())
3358  return x2;
3359  else
3360  return x3;
3361  } else {
3362  return CondExpLe(ad_plain(x0), ad_plain(x1), ad_plain(x2), ad_plain(x3));
3363  }
3364 }
3365 
3366 Index SumOp::input_size() const { return n; }
3367 
3368 Index SumOp::output_size() const { return 1; }
3369 
3370 SumOp::SumOp(size_t n) : n(n) {}
3371 
3372 const char *SumOp::op_name() { return "SumOp"; }
3373 
3374 Index LogSpaceSumOp::input_size() const { return this->n; }
3375 
3376 Index LogSpaceSumOp::output_size() const { return 1; }
3377 
3378 LogSpaceSumOp::LogSpaceSumOp(size_t n) : n(n) {}
3379 
3380 void LogSpaceSumOp::forward(ForwardArgs<Scalar> &args) {
3381  Scalar Max = -INFINITY;
3382  for (size_t i = 0; i < n; i++) {
3383  if (Max < args.x(i)) Max = args.x(i);
3384  }
3385  args.y(0) = 0;
3386  for (size_t i = 0; i < n; i++) {
3387  args.y(0) += exp(args.x(i) - Max);
3388  }
3389  args.y(0) = Max + log(args.y(0));
3390 }
3391 
3392 void LogSpaceSumOp::forward(ForwardArgs<Replay> &args) {
3393  std::vector<ad_plain> x(input_size());
3394  for (Index i = 0; i < input_size(); i++) x[i] = args.x(i);
3395  args.y(0) = logspace_sum(x);
3396 }
3397 
3398 const char *LogSpaceSumOp::op_name() { return "LSSumOp"; }
3399 
3400 ad_plain logspace_sum(const std::vector<ad_plain> &x) {
3401  OperatorPure *pOp = get_glob()->getOperator<LogSpaceSumOp>(x.size());
3402  return get_glob()->add_to_stack<LogSpaceSumOp>(pOp, x)[0];
3403 }
3404 
3405 Index LogSpaceSumStrideOp::number_of_terms() const { return stride.size(); }
3406 
3407 Index LogSpaceSumStrideOp::input_size() const { return number_of_terms(); }
3408 
3409 Index LogSpaceSumStrideOp::output_size() const { return 1; }
3410 
3411 LogSpaceSumStrideOp::LogSpaceSumStrideOp(std::vector<Index> stride, size_t n)
3412  : stride(stride), n(n) {}
3413 
3414 void LogSpaceSumStrideOp::forward(ForwardArgs<Scalar> &args) {
3415  Scalar Max = -INFINITY;
3416 
3417  size_t m = stride.size();
3418  std::vector<Scalar *> wrk(m);
3419  Scalar **px = &(wrk[0]);
3420  for (size_t i = 0; i < m; i++) {
3421  px[i] = args.x_ptr(i);
3422  }
3423 
3424  for (size_t i = 0; i < n; i++) {
3425  Scalar s = rowsum(px, i);
3426  if (Max < s) Max = s;
3427  }
3428 
3429  args.y(0) = 0;
3430  for (size_t i = 0; i < n; i++) {
3431  Scalar s = rowsum(px, i);
3432  args.y(0) += exp(s - Max);
3433  }
3434  args.y(0) = Max + log(args.y(0));
3435 }
3436 
3437 void LogSpaceSumStrideOp::forward(ForwardArgs<Replay> &args) {
3438  std::vector<ad_plain> x(input_size());
3439  for (Index i = 0; i < input_size(); i++) x[i] = args.x(i);
3440  args.y(0) = logspace_sum_stride(x, stride, n);
3441 }
3442 
3443 void LogSpaceSumStrideOp::dependencies(Args<> &args, Dependencies &dep) const {
3444  for (size_t j = 0; j < (size_t)number_of_terms(); j++) {
3445  size_t K = n * stride[j];
3446  dep.add_segment(args.input(j), K);
3447  }
3448 }
3449 
3450 const char *LogSpaceSumStrideOp::op_name() { return "LSStride"; }
3451 
3452 void LogSpaceSumStrideOp::forward(ForwardArgs<Writer> &args) {
3453  TMBAD_ASSERT(false);
3454 }
3455 
3456 void LogSpaceSumStrideOp::reverse(ReverseArgs<Writer> &args) {
3457  TMBAD_ASSERT(false);
3458 }
3459 
3460 ad_plain logspace_sum_stride(const std::vector<ad_plain> &x,
3461  const std::vector<Index> &stride, size_t n) {
3462  TMBAD_ASSERT(x.size() == stride.size());
3463  OperatorPure *pOp = get_glob()->getOperator<LogSpaceSumStrideOp>(stride, n);
3464  return get_glob()->add_to_stack<LogSpaceSumStrideOp>(pOp, x)[0];
3465 }
3466 } // namespace TMBad
3467 // Autogenerated - do not edit by hand !
3468 #include "graph2dot.hpp"
3469 namespace TMBad {
3470 
3471 void graph2dot(global glob, graph G, bool show_id, std::ostream &cout) {
3472  cout << "digraph graphname {\n";
3473  for (size_t i = 0; i < glob.opstack.size(); i++) {
3474  if (!show_id)
3475  cout << i << " [label=\"" << glob.opstack[i]->op_name() << "\"];\n";
3476  else
3477  cout << i << " [label=\"" << glob.opstack[i]->op_name() << " " << i
3478  << "\"];\n";
3479  }
3480  for (size_t node = 0; node < G.num_nodes(); node++) {
3481  for (size_t k = 0; k < G.num_neighbors(node); k++) {
3482  cout << node << " -> " << G.neighbors(node)[k] << ";\n";
3483  }
3484  }
3485  for (size_t i = 0; i < glob.subgraph_seq.size(); i++) {
3486  size_t node = glob.subgraph_seq[i];
3487  cout << node << " [style=\"filled\"];\n";
3488  }
3489 
3490  std::vector<Index> v2o = glob.var2op();
3491 
3492  cout << "{rank=same;";
3493  for (size_t i = 0; i < glob.inv_index.size(); i++) {
3494  cout << v2o[glob.inv_index[i]] << ";";
3495  }
3496  cout << "}\n";
3497 
3498  cout << "{rank=same;";
3499  for (size_t i = 0; i < glob.dep_index.size(); i++) {
3500  cout << v2o[glob.dep_index[i]] << ";";
3501  }
3502  cout << "}\n";
3503 
3504  cout << "}\n";
3505 }
3506 
3507 void graph2dot(global glob, bool show_id, std::ostream &cout) {
3508  graph G = glob.forward_graph();
3509  graph2dot(glob, G, show_id, cout);
3510 }
3511 
3512 void graph2dot(const char *filename, global glob, graph G, bool show_id) {
3513  std::ofstream myfile;
3514  myfile.open(filename);
3515  graph2dot(glob, G, show_id, myfile);
3516  myfile.close();
3517 }
3518 
3519 void graph2dot(const char *filename, global glob, bool show_id) {
3520  std::ofstream myfile;
3521  myfile.open(filename);
3522  graph2dot(glob, show_id, myfile);
3523  myfile.close();
3524 }
3525 } // namespace TMBad
3526 // Autogenerated - do not edit by hand !
3527 #include "graph_transform.hpp"
3528 namespace TMBad {
3529 
3530 std::vector<size_t> which(const std::vector<bool> &x) {
3531  return which<size_t>(x);
3532 }
3533 
3534 size_t prod_int(const std::vector<size_t> &x) {
3535  size_t ans = 1;
3536  for (size_t i = 0; i < x.size(); i++) ans *= x[i];
3537  return ans;
3538 }
3539 
3540 std::vector<bool> reverse_boundary(global &glob,
3541  const std::vector<bool> &vars) {
3542  std::vector<bool> boundary(vars);
3543  std::vector<bool> node_filter = glob.var2op(vars);
3544  glob.reverse_sub(boundary, node_filter);
3545 
3546  for (size_t i = 0; i < vars.size(); i++) boundary[i] = boundary[i] ^ vars[i];
3547  return boundary;
3548 }
3549 
3550 std::vector<Index> get_accumulation_tree(global &glob, bool boundary) {
3551  std::vector<OperatorPure *> &opstack = glob.opstack;
3552 
3553  std::vector<bool> node_subset(opstack.size(), false);
3554  for (size_t i = 0; i < opstack.size(); i++) {
3555  node_subset[i] = opstack[i]->info().test(op_info::is_linear);
3556  }
3557 
3558  node_subset.flip();
3559 
3560  std::vector<bool> var_subset = glob.op2var(node_subset);
3561 
3562  glob.reverse(var_subset);
3563 
3564  var_subset.flip();
3565 
3566  if (boundary) var_subset = reverse_boundary(glob, var_subset);
3567 
3568  node_subset = glob.var2op(var_subset);
3569 
3570  return which<Index>(node_subset);
3571 }
3572 
3573 std::vector<Index> find_op_by_name(global &glob, const char *name) {
3574  std::vector<Index> ans;
3575  std::vector<OperatorPure *> &opstack = glob.opstack;
3576  for (size_t i = 0; i < opstack.size(); i++) {
3577  if (!strcmp(opstack[i]->op_name(), name)) {
3578  ans.push_back(i);
3579  }
3580  }
3581  return ans;
3582 }
3583 
3584 std::vector<Index> substitute(global &glob, const std::vector<Index> &seq,
3585  bool inv_tags, bool dep_tags) {
3586  std::vector<OperatorPure *> &opstack = glob.opstack;
3587  std::vector<Index> seq2(seq);
3588  make_space_inplace(opstack, seq2);
3589  OperatorPure *invop = glob.getOperator<global::InvOp>();
3590  for (size_t i = 0; i < seq2.size(); i++) {
3591  OperatorPure *op = opstack[seq2[i]];
3592  if (inv_tags) TMBAD_ASSERT(op != invop);
3593  size_t nin = op->input_size();
3594  size_t nou = op->output_size();
3595  opstack[seq2[i] - 1] = glob.getOperator<global::NullOp2>(nin, 0);
3596  opstack[seq2[i]] = glob.getOperator<global::NullOp2>(0, nou);
3597  op->deallocate();
3598  }
3600  std::vector<Index> new_inv = glob.op2var(seq2);
3601  if (!inv_tags) glob.inv_index.resize(0);
3602  if (!dep_tags) glob.dep_index.resize(0);
3603  glob.inv_index.insert(glob.inv_index.end(), new_inv.begin(), new_inv.end());
3604  return new_inv;
3605 }
3606 
3607 std::vector<Index> substitute(global &glob, const char *name, bool inv_tags,
3608  bool dep_tags) {
3609  std::vector<Index> seq = find_op_by_name(glob, name);
3610  return substitute(glob, seq, inv_tags, dep_tags);
3611 }
3612 
3614  global glob_tree = glob;
3615 
3616  std::vector<Index> boundary = get_accumulation_tree(glob, true);
3617 
3618  substitute(glob_tree, boundary, false, true);
3619  glob_tree.eliminate();
3620 
3621  size_t n = glob_tree.inv_index.size();
3622 
3623  std::vector<Scalar> x0(n);
3624  for (size_t i = 0; i < n; i++) x0[i] = glob_tree.value_inv(i);
3625  glob_tree.forward();
3626  glob_tree.clear_deriv();
3627  glob_tree.deriv_dep(0) = 1;
3628  glob_tree.reverse();
3629  Scalar V = glob_tree.value_dep(0);
3630  std::vector<Scalar> J(n);
3631  for (size_t i = 0; i < n; i++) J[i] = glob_tree.deriv_inv(i);
3632 
3633  for (size_t i = 0; i < n; i++) V -= J[i] * x0[i];
3634 
3635  std::vector<Index> vars = glob.op2var(boundary);
3636  glob.dep_index.resize(0);
3637  glob.ad_start();
3638  std::vector<ad_aug_index> res(vars.begin(), vars.end());
3639  for (size_t i = 0; i < vars.size(); i++) {
3640  res[i] = res[i] * J[i];
3641  if (i == 0) res[i] += V;
3642  if (!sum_) res[i].Dependent();
3643  }
3644  if (sum_) {
3645  ad_aug sum_res = sum(res);
3646  sum_res.Dependent();
3647  }
3648  glob.ad_stop();
3649  glob.eliminate();
3650  return glob;
3651 }
3652 
3653 void aggregate(global &glob, int sign) {
3654  TMBAD_ASSERT((sign == 1) || (sign == -1));
3655  glob.ad_start();
3656  std::vector<ad_aug_index> x(glob.dep_index.begin(), glob.dep_index.end());
3657  ad_aug y = 0;
3658  for (size_t i = 0; i < x.size(); i++) y += x[i];
3659  if (sign < 0) y = -y;
3660  glob.dep_index.resize(0);
3661  y.Dependent();
3662  glob.ad_stop();
3663 }
3664 
3665 old_state::old_state(global &glob) : glob(glob) {
3666  dep_index = glob.dep_index;
3667  opstack_size = glob.opstack.size();
3668 }
3669 
3670 void old_state::restore() {
3671  glob.dep_index = dep_index;
3672  while (glob.opstack.size() > opstack_size) {
3673  Index input_size = glob.opstack.back()->input_size();
3674  Index output_size = glob.opstack.back()->output_size();
3675  glob.inputs.resize(glob.inputs.size() - input_size);
3676  glob.values.resize(glob.values.size() - output_size);
3677  glob.opstack.back()->deallocate();
3678  glob.opstack.pop_back();
3679  }
3680 }
3681 
3682 term_info::term_info(global &glob, bool do_init) : glob(glob) {
3683  if (do_init) initialize();
3684 }
3685 
3686 void term_info::initialize(std::vector<Index> inv_remap) {
3687  if (inv_remap.size() == 0) inv_remap.resize(glob.inv_index.size(), 0);
3688  inv_remap = radix::factor<Index>(inv_remap);
3689  std::vector<Index> remap = remap_identical_sub_expressions(glob, inv_remap);
3690  std::vector<Index> term_ids = subset(remap, glob.dep_index);
3691  id = radix::factor<Index>(term_ids);
3692  Index max_id = *std::max_element(id.begin(), id.end());
3693  count.resize(max_id + 1, 0);
3694  for (size_t i = 0; i < id.size(); i++) {
3695  count[id[i]]++;
3696  }
3697 }
3698 
3699 gk_config::gk_config()
3700  : debug(false), adaptive(false), nan2zero(true), ytol(1e-2), dx(1) {}
3701 
3703  size_t count = 1;
3704  for (size_t i = 0; i < bound.size(); i++)
3705  if (mask_[i]) count *= bound[i];
3706  return count;
3707 }
3708 
3709 multivariate_index::multivariate_index(size_t bound_, size_t dim, bool flag)
3710  : pointer(0) {
3711  bound.resize(dim, bound_);
3712  x.resize(dim, 0);
3713  mask_.resize(dim, flag);
3714 }
3715 
3716 multivariate_index::multivariate_index(std::vector<size_t> bound, bool flag)
3717  : pointer(0), bound(bound) {
3718  x.resize(bound.size(), 0);
3719  mask_.resize(bound.size(), flag);
3720 }
3721 
3722 void multivariate_index::flip() { mask_.flip(); }
3723 
3725  size_t N = 1;
3726  for (size_t i = 0; i < x.size(); i++) {
3727  if (mask_[i]) {
3728  if (x[i] < bound[i] - 1) {
3729  x[i]++;
3730  pointer += N;
3731  break;
3732  } else {
3733  x[i] = 0;
3734  pointer -= (bound[i] - 1) * N;
3735  }
3736  }
3737  N *= bound[i];
3738  }
3739  return *this;
3740 }
3741 
3742 multivariate_index::operator size_t() { return pointer; }
3743 
3744 size_t multivariate_index::index(size_t i) { return x[i]; }
3745 
3746 std::vector<size_t> multivariate_index::index() { return x; }
3747 
3748 std::vector<bool>::reference multivariate_index::mask(size_t i) {
3749  return mask_[i];
3750 }
3751 
3752 void multivariate_index::set_mask(const std::vector<bool> &mask) {
3753  TMBAD_ASSERT(mask.size() == mask_.size());
3754  mask_ = mask;
3755 }
3756 
3757 size_t clique::clique_size() { return indices.size(); }
3758 
3759 clique::clique() {}
3760 
3761 void clique::subset_inplace(const std::vector<bool> &mask) {
3762  indices = subset(indices, mask);
3763  dim = subset(dim, mask);
3764 }
3765 
3766 void clique::logsum_init() { logsum.resize(prod_int(dim)); }
3767 
3768 bool clique::empty() const { return (indices.size() == 0); }
3769 
3770 bool clique::contains(Index i) {
3771  bool ans = false;
3772  for (size_t j = 0; j < indices.size(); j++) ans |= (i == indices[j]);
3773  return ans;
3774 }
3775 
3776 void clique::get_stride(const clique &super, Index ind,
3777  std::vector<ad_plain> &offset, Index &stride) {
3778  stride = 1;
3779  for (size_t k = 0; (k < clique_size()) && (indices[k] < ind); k++) {
3780  stride *= dim[k];
3781  }
3782 
3783  multivariate_index mv(super.dim);
3784  size_t nx = mv.count();
3785  std::vector<bool> mask = lmatch(super.indices, this->indices);
3786  mask.flip();
3787  mv.set_mask(mask);
3788  std::vector<ad_plain> x(nx);
3789  size_t xa_count = mv.count();
3790  mv.flip();
3791  size_t xi_count = mv.count();
3792  mv.flip();
3793  TMBAD_ASSERT(x.size() == xa_count * xi_count);
3794  for (size_t i = 0; i < xa_count; i++, ++mv) {
3795  mv.flip();
3796  for (size_t j = 0; j < xi_count; j++, ++mv) {
3797  TMBAD_ASSERT(logsum[j].on_some_tape());
3798  x[mv] = logsum[j];
3799  }
3800  mv.flip();
3801  }
3802 
3803  mv = multivariate_index(super.dim);
3804  mask = lmatch(super.indices, std::vector<Index>(1, ind));
3805  mask.flip();
3806  mv.set_mask(mask);
3807 
3808  xa_count = mv.count();
3809  offset.resize(xa_count);
3810  for (size_t i = 0; i < xa_count; i++, ++mv) {
3811  offset[i] = x[mv];
3812  }
3813 }
3814 
3815 sr_grid::sr_grid() {}
3816 
3817 sr_grid::sr_grid(Scalar a, Scalar b, size_t n) : x(n), w(n) {
3818  Scalar h = (b - a) / n;
3819  for (size_t i = 0; i < n; i++) {
3820  x[i] = a + h / 2 + i * h;
3821  w[i] = h;
3822  }
3823 }
3824 
3825 sr_grid::sr_grid(size_t n) {
3826  for (size_t i = 0; i < n; i++) {
3827  x[i] = i;
3828  w[i] = 1. / (double)n;
3829  }
3830 }
3831 
3832 size_t sr_grid::size() { return x.size(); }
3833 
3834 ad_plain sr_grid::logw_offset() {
3835  if (logw.size() != w.size()) {
3836  logw.resize(w.size());
3837  for (size_t i = 0; i < w.size(); i++) logw[i] = log(w[i]);
3838  forceContiguous(logw);
3839  }
3840  return logw[0];
3841 }
3842 
3844  std::vector<Index> random,
3845  std::vector<sr_grid> grid,
3846  std::vector<Index> random2grid,
3847  bool perm)
3848  : grid(grid),
3849  glob(glob),
3850  random(random),
3851  replay(glob, new_glob),
3852  tinfo(glob, false) {
3853  inv2grid.resize(glob.inv_index.size(), 0);
3854  for (size_t i = 0; i < random2grid.size(); i++) {
3855  inv2grid[random[i]] = random2grid[i];
3856  }
3857 
3858  mark.resize(glob.values.size(), false);
3859  for (size_t i = 0; i < random.size(); i++)
3860  mark[glob.inv_index[random[i]]] = true;
3861  glob.forward(mark);
3862 
3863  forward_graph = glob.forward_graph(mark);
3864  reverse_graph = glob.reverse_graph(mark);
3865 
3866  glob.subgraph_cache_ptr();
3867 
3868  var_remap.resize(glob.values.size());
3869 
3870  op2inv_idx = glob.op2idx(glob.inv_index, NA);
3871  op2dep_idx = glob.op2idx(glob.dep_index, NA);
3872 
3873  if (perm) reorder_random();
3874 
3875  terms_done.resize(glob.dep_index.size(), false);
3876 
3877  std::vector<Index> inv_remap(glob.inv_index.size());
3878  for (size_t i = 0; i < inv_remap.size(); i++) inv_remap[i] = -(i + 1);
3879  for (size_t i = 0; i < random.size(); i++)
3880  inv_remap[random[i]] = inv2grid[random[i]];
3881  inv_remap = radix::factor<Index>(inv_remap);
3882  tinfo.initialize(inv_remap);
3883 }
3884 
3886  std::vector<IndexPair> edges;
3887  std::vector<Index> &inv2op = forward_graph.inv2op;
3888 
3889  for (size_t i = 0; i < random.size(); i++) {
3890  std::vector<Index> subgraph(1, inv2op[random[i]]);
3891  forward_graph.search(subgraph);
3892  reverse_graph.search(subgraph);
3893  for (size_t l = 0; l < subgraph.size(); l++) {
3894  Index inv_other = op2inv_idx[subgraph[l]];
3895  if (inv_other != NA) {
3896  IndexPair edge(random[i], inv_other);
3897  edges.push_back(edge);
3898  }
3899  }
3900  }
3901 
3902  size_t num_nodes = glob.inv_index.size();
3903  graph G(num_nodes, edges);
3904 
3905  std::vector<bool> visited(num_nodes, false);
3906  std::vector<Index> subgraph;
3907  for (size_t i = 0; i < random.size(); i++) {
3908  if (visited[random[i]]) continue;
3909  std::vector<Index> sg(1, random[i]);
3910  G.search(sg, visited, false, false);
3911  subgraph.insert(subgraph.end(), sg.begin(), sg.end());
3912  }
3913  std::reverse(subgraph.begin(), subgraph.end());
3914  TMBAD_ASSERT(random.size() == subgraph.size());
3915  random = subgraph;
3916 }
3917 
3918 std::vector<size_t> sequential_reduction::get_grid_bounds(
3919  std::vector<Index> inv_index) {
3920  std::vector<size_t> ans(inv_index.size());
3921  for (size_t i = 0; i < inv_index.size(); i++) {
3922  ans[i] = grid[inv2grid[inv_index[i]]].size();
3923  }
3924  return ans;
3925 }
3926 
3927 std::vector<sr_grid *> sequential_reduction::get_grid(
3928  std::vector<Index> inv_index) {
3929  std::vector<sr_grid *> ans(inv_index.size());
3930  for (size_t i = 0; i < inv_index.size(); i++) {
3931  ans[i] = &(grid[inv2grid[inv_index[i]]]);
3932  }
3933  return ans;
3934 }
3935 
3936 std::vector<ad_aug> sequential_reduction::tabulate(std::vector<Index> inv_index,
3937  Index dep_index) {
3938  size_t id = tinfo.id[dep_index];
3939  size_t count = tinfo.count[id];
3940  bool do_cache = (count >= 2);
3941  if (do_cache) {
3942  if (cache[id].size() > 0) {
3943  return cache[id];
3944  }
3945  }
3946 
3947  std::vector<sr_grid *> inv_grid = get_grid(inv_index);
3948  std::vector<size_t> grid_bounds = get_grid_bounds(inv_index);
3949  multivariate_index mv(grid_bounds);
3950  std::vector<ad_aug> ans(mv.count());
3951  for (size_t i = 0; i < ans.size(); i++, ++mv) {
3952  for (size_t j = 0; j < inv_index.size(); j++) {
3953  replay.value_inv(inv_index[j]) = inv_grid[j]->x[mv.index(j)];
3954  }
3955  replay.forward_sub();
3956  ans[i] = replay.value_dep(dep_index);
3957  }
3958 
3959  forceContiguous(ans);
3960  if (do_cache) {
3961  cache[id] = ans;
3962  }
3963  return ans;
3964 }
3965 
3967  std::vector<Index> super;
3968  size_t c = 0;
3969  for (std::list<clique>::iterator it = cliques.begin(); it != cliques.end();
3970  ++it) {
3971  if ((*it).contains(i)) {
3972  super.insert(super.end(), (*it).indices.begin(), (*it).indices.end());
3973  c++;
3974  }
3975  }
3976  sort_unique_inplace(super);
3977 
3978  std::vector<std::vector<ad_plain> > offset_by_clique(c);
3979  std::vector<Index> stride_by_clique(c);
3980  clique C;
3981  C.indices = super;
3982  C.dim = get_grid_bounds(super);
3983  std::list<clique>::iterator it = cliques.begin();
3984  c = 0;
3985  while (it != cliques.end()) {
3986  if ((*it).contains(i)) {
3987  (*it).get_stride(C, i, offset_by_clique[c], stride_by_clique[c]);
3988  it = cliques.erase(it);
3989  c++;
3990  } else {
3991  ++it;
3992  }
3993  }
3994 
3995  std::vector<bool> mask = lmatch(super, std::vector<Index>(1, i));
3996  mask.flip();
3997  C.subset_inplace(mask);
3998  C.logsum_init();
3999 
4000  grid[inv2grid[i]].logw_offset();
4001  size_t v_begin = get_glob()->values.size();
4002  for (size_t j = 0; j < C.logsum.size(); j++) {
4003  std::vector<ad_plain> x;
4004  std::vector<Index> stride;
4005  for (size_t k = 0; k < offset_by_clique.size(); k++) {
4006  x.push_back(offset_by_clique[k][j]);
4007  stride.push_back(stride_by_clique[k]);
4008  }
4009 
4010  x.push_back(grid[inv2grid[i]].logw_offset());
4011  stride.push_back(1);
4012  C.logsum[j] = logspace_sum_stride(x, stride, grid[inv2grid[i]].size());
4013  }
4014  size_t v_end = get_glob()->values.size();
4015  TMBAD_ASSERT(v_end - v_begin == C.logsum.size());
4016 
4017  cliques.push_back(C);
4018 }
4019 
4021  const std::vector<Index> &inv2op = forward_graph.inv2op;
4022 
4023  Index start_node = inv2op[i];
4024  std::vector<Index> subgraph(1, start_node);
4025  forward_graph.search(subgraph);
4026 
4027  std::vector<Index> dep_clique;
4028  std::vector<Index> subgraph_terms;
4029  for (size_t k = 0; k < subgraph.size(); k++) {
4030  Index node = subgraph[k];
4031  Index dep_idx = op2dep_idx[node];
4032  if (dep_idx != NA && !terms_done[dep_idx]) {
4033  terms_done[dep_idx] = true;
4034  subgraph_terms.push_back(node);
4035  dep_clique.push_back(dep_idx);
4036  }
4037  }
4038  for (size_t k = 0; k < subgraph_terms.size(); k++) {
4039  subgraph.resize(0);
4040  subgraph.push_back(subgraph_terms[k]);
4041 
4042  reverse_graph.search(subgraph);
4043 
4044  std::vector<Index> inv_clique;
4045  for (size_t l = 0; l < subgraph.size(); l++) {
4046  Index tmp = op2inv_idx[subgraph[l]];
4047  if (tmp != NA) inv_clique.push_back(tmp);
4048  }
4049 
4050  glob.subgraph_seq = subgraph;
4051 
4052  clique C;
4053  C.indices = inv_clique;
4054  C.dim = get_grid_bounds(inv_clique);
4055  C.logsum = tabulate(inv_clique, dep_clique[k]);
4056 
4057  cliques.push_back(C);
4058  }
4059 
4060  merge(i);
4061 }
4062 
4063 void sequential_reduction::show_cliques() {
4064  Rcout << "Cliques: ";
4065  std::list<clique>::iterator it;
4066  for (it = cliques.begin(); it != cliques.end(); ++it) {
4067  Rcout << it->indices << " ";
4068  }
4069  Rcout << "\n";
4070 }
4071 
4072 void sequential_reduction::update_all() {
4073  for (size_t i = 0; i < random.size(); i++) update(random[i]);
4074 }
4075 
4076 ad_aug sequential_reduction::get_result() {
4077  ad_aug ans = 0;
4078  std::list<clique>::iterator it;
4079  for (it = cliques.begin(); it != cliques.end(); ++it) {
4080  TMBAD_ASSERT(it->clique_size() == 0);
4081  TMBAD_ASSERT(it->logsum.size() == 1);
4082  ans += it->logsum[0];
4083  }
4084 
4085  for (size_t i = 0; i < terms_done.size(); i++) {
4086  if (!terms_done[i]) ans += replay.value_dep(i);
4087  }
4088  return ans;
4089 }
4090 
4091 global sequential_reduction::marginal() {
4092  replay.start();
4093  replay.forward(true, false);
4094  update_all();
4095  ad_aug ans = get_result();
4096  ans.Dependent();
4097  replay.stop();
4098  return new_glob;
4099 }
4100 
4101 autopar::autopar(global &glob, size_t num_threads)
4102  : glob(glob),
4103  num_threads(num_threads),
4104  do_aggregate(false),
4105  keep_all_inv(false) {
4106  reverse_graph = glob.reverse_graph();
4107 }
4108 
4109 std::vector<size_t> autopar::max_tree_depth() {
4110  std::vector<Index> max_tree_depth(glob.opstack.size(), 0);
4111  Dependencies dep;
4112  Args<> args(glob.inputs);
4113  for (size_t i = 0; i < glob.opstack.size(); i++) {
4114  dep.resize(0);
4115  glob.opstack[i]->dependencies(args, dep);
4116  for (size_t j = 0; j < dep.size(); j++) {
4117  max_tree_depth[i] = std::max(max_tree_depth[i], max_tree_depth[dep[j]]);
4118  }
4119 
4120  max_tree_depth[i]++;
4121 
4122  glob.opstack[i]->increment(args.ptr);
4123  }
4124  std::vector<size_t> ans(glob.dep_index.size());
4125  for (size_t j = 0; j < glob.dep_index.size(); j++) {
4126  ans[j] = max_tree_depth[glob.dep_index[j]];
4127  }
4128  return ans;
4129 }
4130 
4131 void autopar::run() {
4132  std::vector<size_t> ord = order(max_tree_depth());
4133  std::reverse(ord.begin(), ord.end());
4134  std::vector<bool> visited(glob.opstack.size(), false);
4135  std::vector<Index> start;
4136  std::vector<Index> dWork(ord.size());
4137  for (size_t i = 0; i < ord.size(); i++) {
4138  start.resize(1);
4139  start[0] = reverse_graph.dep2op[ord[i]];
4140  reverse_graph.search(start, visited, false, false);
4141  dWork[i] = start.size();
4142  if (false) {
4143  for (size_t k = 0; k < start.size(); k++) {
4144  Rcout << glob.opstack[start[k]]->op_name() << " ";
4145  }
4146  Rcout << "\n";
4147  }
4148  }
4149 
4150  std::vector<size_t> thread_assign(ord.size(), 0);
4151  std::vector<size_t> work_by_thread(num_threads, 0);
4152  for (size_t i = 0; i < dWork.size(); i++) {
4153  if (i == 0) {
4154  thread_assign[i] = 0;
4155  } else {
4156  if (dWork[i] <= 1)
4157  thread_assign[i] = thread_assign[i - 1];
4158  else
4159  thread_assign[i] = which_min(work_by_thread);
4160  }
4161  work_by_thread[thread_assign[i]] += dWork[i];
4162  }
4163 
4164  node_split.resize(num_threads);
4165  for (size_t i = 0; i < ord.size(); i++) {
4166  node_split[thread_assign[i]].push_back(reverse_graph.dep2op[ord[i]]);
4167  }
4168 
4169  for (size_t i = 0; i < num_threads; i++) {
4170  if (keep_all_inv)
4171  node_split[i].insert(node_split[i].begin(), reverse_graph.inv2op.begin(),
4172  reverse_graph.inv2op.end());
4173  reverse_graph.search(node_split[i]);
4174  }
4175 }
4176 
4178  vglob.resize(num_threads);
4179  inv_idx.resize(num_threads);
4180  dep_idx.resize(num_threads);
4181  std::vector<Index> tmp;
4182  for (size_t i = 0; i < num_threads; i++) {
4183  glob.subgraph_seq = node_split[i];
4184  vglob[i] = glob.extract_sub(tmp);
4185  if (do_aggregate) aggregate(vglob[i]);
4186  }
4187 
4188  Index NA = -1;
4189  std::vector<Index> op2inv_idx = glob.op2idx(glob.inv_index, NA);
4190  std::vector<Index> op2dep_idx = glob.op2idx(glob.dep_index, NA);
4191  for (size_t i = 0; i < num_threads; i++) {
4192  std::vector<Index> &seq = node_split[i];
4193  for (size_t j = 0; j < seq.size(); j++) {
4194  if (op2inv_idx[seq[j]] != NA) inv_idx[i].push_back(op2inv_idx[seq[j]]);
4195  if (op2dep_idx[seq[j]] != NA) dep_idx[i].push_back(op2dep_idx[seq[j]]);
4196  }
4197  if (do_aggregate) {
4198  dep_idx[i].resize(1);
4199  dep_idx[i][0] = i;
4200  }
4201  }
4202 }
4203 
4204 size_t autopar::input_size() const { return glob.inv_index.size(); }
4205 
4206 size_t autopar::output_size() const {
4207  return (do_aggregate ? num_threads : glob.dep_index.size());
4208 }
4209 
4210 Index ParalOp::input_size() const { return n; }
4211 
4212 Index ParalOp::output_size() const { return m; }
4213 
4214 ParalOp::ParalOp(const autopar &ap)
4215  : vglob(ap.vglob),
4216  inv_idx(ap.inv_idx),
4217  dep_idx(ap.dep_idx),
4218  n(ap.input_size()),
4219  m(ap.output_size()) {}
4220 
4221 void ParalOp::forward(ForwardArgs<Scalar> &args) {
4222  size_t num_threads = vglob.size();
4223 
4224 #ifdef _OPENMP
4225 #pragma omp parallel for
4226 #endif
4227 
4228  for (size_t i = 0; i < num_threads; i++) {
4229  for (size_t j = 0; j < inv_idx[i].size(); j++) {
4230  vglob[i].value_inv(j) = args.x(inv_idx[i][j]);
4231  }
4232  vglob[i].forward();
4233  }
4234 
4235  for (size_t i = 0; i < num_threads; i++) {
4236  for (size_t j = 0; j < dep_idx[i].size(); j++) {
4237  args.y(dep_idx[i][j]) = vglob[i].value_dep(j);
4238  }
4239  }
4240 }
4241 
4242 void ParalOp::reverse(ReverseArgs<Scalar> &args) {
4243  size_t num_threads = vglob.size();
4244 
4245 #ifdef _OPENMP
4246 #pragma omp parallel for
4247 #endif
4248 
4249  for (size_t i = 0; i < num_threads; i++) {
4250  vglob[i].clear_deriv();
4251  for (size_t j = 0; j < dep_idx[i].size(); j++) {
4252  vglob[i].deriv_dep(j) = args.dy(dep_idx[i][j]);
4253  }
4254  vglob[i].reverse();
4255  }
4256 
4257  for (size_t i = 0; i < num_threads; i++) {
4258  for (size_t j = 0; j < inv_idx[i].size(); j++) {
4259  args.dx(inv_idx[i][j]) += vglob[i].deriv_inv(j);
4260  }
4261  }
4262 }
4263 
4264 const char *ParalOp::op_name() { return "ParalOp"; }
4265 
4266 void ParalOp::print(global::print_config cfg) {
4267  size_t num_threads = vglob.size();
4268  for (size_t i = 0; i < num_threads; i++) {
4269  global::print_config cfg2 = cfg;
4270  std::stringstream ss;
4271  ss << i;
4272  std::string str = ss.str();
4273  cfg2.prefix = cfg2.prefix + str;
4274  vglob[i].print(cfg2);
4275  }
4276 }
4277 
4278 std::vector<Index> get_likely_expression_duplicates(
4279  const global &glob, std::vector<Index> inv_remap) {
4280  global::hash_config cfg;
4281  cfg.strong_inv = true;
4282  cfg.strong_const = true;
4283  cfg.strong_output = true;
4284  cfg.reduce = false;
4285  cfg.deterministic = false;
4286  cfg.inv_seed = inv_remap;
4287  std::vector<hash_t> h = glob.hash_sweep(cfg);
4288  return radix::first_occurance<Index>(h);
4289 }
4290 
4291 bool all_allow_remap(const global &glob) {
4292  Args<> args(glob.inputs);
4293  for (size_t i = 0; i < glob.opstack.size(); i++) {
4294  op_info info = glob.opstack[i]->info();
4295  if (!info.test(op_info::allow_remap)) {
4296  return false;
4297  }
4298  glob.opstack[i]->increment(args.ptr);
4299  }
4300  return true;
4301 }
4302 
4304  global &glob, std::vector<Index> inv_remap) {
4305  std::vector<Index> remap = get_likely_expression_duplicates(glob, inv_remap);
4306 
4307  for (size_t i = 0; i < glob.inv_index.size(); i++) {
4308  bool accept = false;
4309  Index var_i = glob.inv_index[i];
4310  if (inv_remap.size() > 0) {
4311  Index j = inv_remap[i];
4312  Index var_j = glob.inv_index[j];
4313  accept = remap[var_i] == remap[var_j];
4314  }
4315  if (!accept) remap[var_i] = var_i;
4316  }
4317 
4318  std::vector<Index> v2o = glob.var2op();
4319  std::vector<Index> dep;
4320  global::OperatorPure *invop = glob.getOperator<global::InvOp>();
4321  Dependencies dep1;
4322  Dependencies dep2;
4323  size_t reject = 0;
4324  size_t total = 0;
4325  Args<> args(glob.inputs);
4326 
4327  for (size_t j = 0, i = 0, nout = 0; j < glob.opstack.size(); j++, i += nout) {
4328  nout = glob.opstack[j]->output_size();
4329  bool any_remap = false;
4330  for (size_t k = i; k < i + nout; k++) {
4331  if (remap[k] != k) {
4332  any_remap = true;
4333  break;
4334  }
4335  }
4336  if (any_remap) {
4337  bool ok = true;
4338  total += nout;
4339 
4340  global::OperatorPure *CurOp = glob.opstack[v2o[i]];
4341  global::OperatorPure *RemOp = glob.opstack[v2o[remap[i]]];
4342  ok &= (CurOp->identifier() == RemOp->identifier());
4343 
4344  ok &= (CurOp->input_size() == RemOp->input_size());
4345  ok &= (CurOp->output_size() == RemOp->output_size());
4346 
4347  op_info CurInfo = CurOp->info();
4348 
4349  if (ok && (nout > 1)) {
4350  for (size_t k = 1; k < nout; k++) {
4351  ok &= (remap[i + k] < i);
4352 
4353  ok &= (v2o[remap[i + k]] == v2o[remap[i]]);
4354 
4355  ok &= (remap[i + k] == remap[i] + k);
4356  }
4357  }
4358 
4359  if (CurOp == invop) {
4360  ok = false;
4361  }
4362  if (ok) {
4363  if (CurInfo.test(op_info::is_constant)) {
4364  if (glob.values[i] != glob.values[remap[i]]) {
4365  ok = false;
4366  }
4367  }
4368  }
4369 
4370  if (ok) {
4371  glob.subgraph_cache_ptr();
4372 
4373  args.ptr = glob.subgraph_ptr[v2o[i]];
4374  dep1.resize(0);
4375  glob.opstack[v2o[i]]->dependencies(args, dep1);
4376 
4377  args.ptr = glob.subgraph_ptr[v2o[remap[i]]];
4378  dep2.resize(0);
4379  glob.opstack[v2o[remap[i]]]->dependencies(args, dep2);
4380 
4381  ok = (dep1.size() == dep2.size());
4382  if (ok) {
4383  bool all_equal = true;
4384  for (size_t j = 0; j < dep1.size(); j++) {
4385  all_equal &= (remap[dep1[j]] == remap[dep2[j]]);
4386  }
4387  ok = all_equal;
4388  }
4389  }
4390 
4391  if (!ok) {
4392  reject += nout;
4393  for (size_t k = i; k < i + nout; k++) remap[k] = k;
4394  }
4395  }
4396  }
4397 
4398  for (size_t i = 0; i < remap.size(); i++) {
4399  TMBAD_ASSERT(remap[i] <= i);
4400  TMBAD_ASSERT(remap[remap[i]] == remap[i]);
4401  }
4402 
4403  if (true) {
4404  Args<> args(glob.inputs);
4405  intervals<Index> visited;
4406  for (size_t i = 0; i < glob.opstack.size(); i++) {
4407  op_info info = glob.opstack[i]->info();
4408  if (!info.test(op_info::allow_remap)) {
4409  Dependencies dep;
4410  glob.opstack[i]->dependencies(args, dep);
4411  for (size_t j = 0; j < dep.I.size(); j++) {
4412  visited.insert(dep.I[j].first, dep.I[j].second);
4413  }
4414  }
4415  glob.opstack[i]->increment(args.ptr);
4416  }
4417 
4418  forbid_remap<std::vector<Index> > fb(remap);
4419  visited.apply(fb);
4420  }
4421  if (reject > 0) {
4422  ((void)(total));
4423  }
4424 
4425  return remap;
4426 }
4427 
4429  std::vector<Index> inv_remap(0);
4430  std::vector<Index> remap = remap_identical_sub_expressions(glob, inv_remap);
4431 
4432  for (size_t i = 0; i < glob.inputs.size(); i++) {
4433  glob.inputs[i] = remap[glob.inputs[i]];
4434  }
4435 }
4436 
4437 std::vector<Position> inv_positions(global &glob) {
4438  IndexPair ptr(0, 0);
4439  std::vector<bool> independent_variable = glob.inv_marks();
4440  std::vector<Position> ans(glob.inv_index.size());
4441  size_t k = 0;
4442  for (size_t i = 0; i < glob.opstack.size(); i++) {
4443  Index nout = glob.opstack[i]->output_size();
4444  for (Index j = 0; j < nout; j++) {
4445  if (independent_variable[ptr.second + j]) {
4446  ans[k].node = i;
4447  ans[k].ptr = ptr;
4448  k++;
4449  }
4450  }
4451  glob.opstack[i]->increment(ptr);
4452  }
4453  return ans;
4454 }
4455 
4456 void reorder_graph(global &glob, std::vector<Index> inv_idx) {
4457  if (!all_allow_remap(glob)) return;
4458  for (size_t i = 1; i < inv_idx.size(); i++) {
4459  TMBAD_ASSERT(inv_idx[i] > inv_idx[i - 1]);
4460  }
4461  std::vector<bool> marks(glob.values.size(), false);
4462  for (size_t i = 0; i < inv_idx.size(); i++)
4463  marks[glob.inv_index[inv_idx[i]]] = true;
4464  glob.forward_dense(marks);
4465  if (false) {
4466  int c = std::count(marks.begin(), marks.end(), true);
4467  Rcout << "marked proportion:" << (double)c / (double)marks.size() << "\n";
4468  }
4469 
4470  marks.flip();
4471  glob.set_subgraph(marks);
4472  marks.flip();
4473  glob.set_subgraph(marks, true);
4474  glob = glob.extract_sub();
4475 }
4476 } // namespace TMBad
4477 // Autogenerated - do not edit by hand !
4478 #include "integrate.hpp"
4479 namespace TMBad {
4480 
4481 double value(double x) { return x; }
4482 
4483 control::control(int subdivisions_, double reltol_, double abstol_)
4484  : subdivisions(subdivisions_), reltol(reltol_), abstol(abstol_) {}
4485 } // namespace TMBad
4486 // Autogenerated - do not edit by hand !
4487 #include "radix.hpp"
4488 namespace TMBad {}
4489 // Autogenerated - do not edit by hand !
4490 #include "tmbad_allow_comparison.hpp"
4491 namespace TMBad {
4492 
4493 bool operator<(const ad_aug &x, const ad_aug &y) {
4494  return x.Value() < y.Value();
4495 }
4496 bool operator<(const Scalar &x, const ad_aug &y) { return x < y.Value(); }
4497 
4498 bool operator<=(const ad_aug &x, const ad_aug &y) {
4499  return x.Value() <= y.Value();
4500 }
4501 bool operator<=(const Scalar &x, const ad_aug &y) { return x <= y.Value(); }
4502 
4503 bool operator>(const ad_aug &x, const ad_aug &y) {
4504  return x.Value() > y.Value();
4505 }
4506 bool operator>(const Scalar &x, const ad_aug &y) { return x > y.Value(); }
4507 
4508 bool operator>=(const ad_aug &x, const ad_aug &y) {
4509  return x.Value() >= y.Value();
4510 }
4511 bool operator>=(const Scalar &x, const ad_aug &y) { return x >= y.Value(); }
4512 
4513 bool operator==(const ad_aug &x, const ad_aug &y) {
4514  return x.Value() == y.Value();
4515 }
4516 bool operator==(const Scalar &x, const ad_aug &y) { return x == y.Value(); }
4517 
4518 bool operator!=(const ad_aug &x, const ad_aug &y) {
4519  return x.Value() != y.Value();
4520 }
4521 bool operator!=(const Scalar &x, const ad_aug &y) { return x != y.Value(); }
4522 } // namespace TMBad
4523 // Autogenerated - do not edit by hand !
4524 #include "vectorize.hpp"
4525 namespace TMBad {
4526 
4527 VSumOp::VSumOp(size_t n) : n(n) {}
4528 
4529 void VSumOp::dependencies(Args<> &args, Dependencies &dep) const {
4530  dep.add_segment(args.input(0), n);
4531 }
4532 
4533 void VSumOp::forward(ForwardArgs<Writer> &args) { TMBAD_ASSERT(false); }
4534 
4535 void VSumOp::reverse(ReverseArgs<Writer> &args) { TMBAD_ASSERT(false); }
4536 
4537 const char *VSumOp::op_name() { return "VSumOp"; }
4538 
4539 ad_aug sum(ad_segment x) {
4540  global::Complete<VSumOp> F(x.size());
4541  return F(x)[0];
4542 }
4543 
4544 Scalar *SegmentRef::value_ptr() { return (*glob_ptr).values.data() + offset; }
4545 
4546 Scalar *SegmentRef::deriv_ptr() { return (*glob_ptr).derivs.data() + offset; }
4547 
4548 SegmentRef::SegmentRef() {}
4549 
4550 SegmentRef::SegmentRef(const Scalar *x) {
4551  SegmentRef *sx = (SegmentRef *)x;
4552  *this = *sx;
4553 }
4554 
4555 SegmentRef::SegmentRef(global *g, Index o, Index s)
4556  : glob_ptr(g), offset(o), size(s) {}
4557 
4558 SegmentRef::SegmentRef(const ad_segment &x) {
4559  static const size_t K = ScalarPack<SegmentRef>::size;
4560  TMBAD_ASSERT(x.size() == K);
4561  Scalar buf[K];
4562  for (size_t i = 0; i < K; i++) buf[i] = x[i].Value();
4563  SegmentRef *sx = (SegmentRef *)buf;
4564  *this = *sx;
4565 }
4566 
4567 bool SegmentRef::isNull() { return (glob_ptr == NULL); }
4568 
4569 void SegmentRef::resize(ad_segment &pack, Index n) {
4570  Index i = pack.index();
4571  SegmentRef *p = (SegmentRef *)(get_glob()->values.data() + i);
4572  p->size = n;
4573 }
4574 
4575 PackOp::PackOp(const Index n) : n(n) {}
4576 
4578  SegmentRef *y = (SegmentRef *)args.y_ptr(0);
4579  y[0] = SegmentRef(args.glob_ptr, args.input(0), n);
4580 }
4581 
4583  ad_segment x(args.x_ptr(0), n);
4584  args.y_segment(0, K) = pack(x);
4585 }
4586 
4588  SegmentRef tmp(args.dy_ptr(0));
4589  if (tmp.glob_ptr != NULL) {
4590  Scalar *dx = SegmentRef(args.y_ptr(0)).deriv_ptr();
4591  Scalar *dy = SegmentRef(args.dy_ptr(0)).deriv_ptr();
4592  for (Index i = 0; i < n; i++) dx[i] += dy[i];
4593  }
4594 }
4595 
4597  ad_segment dy_packed(args.dy_ptr(0), K);
4598 
4599  if (SegmentRef(dy_packed).isNull()) {
4600  SegmentRef().resize(dy_packed, n);
4601  }
4602  ad_segment dy = unpack(dy_packed);
4603  ad_segment dx(args.dx_ptr(0), n, true);
4604  dx += dy;
4605  Replay *pdx = args.dx_ptr(0);
4606  for (Index i = 0; i < n; i++) pdx[i] = dx[i];
4607 }
4608 
4609 const char *PackOp::op_name() { return "PackOp"; }
4610 
4611 void PackOp::dependencies(Args<> &args, Dependencies &dep) const {
4612  dep.add_segment(args.input(0), n);
4613 }
4614 
4615 UnpkOp::UnpkOp(const Index n) : noutput(n) {}
4616 
4618  Scalar *y = args.y_ptr(0);
4619  SegmentRef srx(args.x_ptr(0));
4620  if (srx.isNull()) {
4621  for (Index i = 0; i < noutput; i++) y[i] = 0;
4622  return;
4623  }
4624  Scalar *x = srx.value_ptr();
4625  for (Index i = 0; i < noutput; i++) y[i] = x[i];
4626 
4627  ((SegmentRef *)args.x_ptr(0))->glob_ptr = NULL;
4628 }
4629 
4631  SegmentRef *dx = (SegmentRef *)args.dx_ptr(0);
4632  dx[0] = SegmentRef(args.glob_ptr, args.output(0), noutput);
4633 }
4634 
4636  ad_segment dy(args.dy_ptr(0), noutput);
4637  ad_segment dy_packed = pack(dy);
4638  Replay *pdx = args.dx_ptr(0);
4639  for (Index i = 0; i < dy_packed.size(); i++) pdx[i] = dy_packed[i];
4640 }
4641 
4642 const char *UnpkOp::op_name() { return "UnpkOp"; }
4643 
4644 void UnpkOp::dependencies(Args<> &args, Dependencies &dep) const {
4645  dep.add_segment(args.input(0), K);
4646 }
4647 
4649  global::Complete<PackOp> F(x.size());
4650  return F(x);
4651 }
4652 
4654  Index n = SegmentRef(x).size;
4656  return op(x);
4657 }
4658 
4659 Scalar *unpack(const std::vector<Scalar> &x, Index j) {
4660  Index K = ScalarPack<SegmentRef>::size;
4661  SegmentRef sr(&(x[j * K]));
4662  return sr.value_ptr();
4663 }
4664 
4665 std::vector<ad_aug> concat(const std::vector<ad_segment> &x) {
4666  std::vector<ad_aug> ans;
4667  for (size_t i = 0; i < x.size(); i++) {
4668  ad_segment xi = x[i];
4669  for (size_t j = 0; j < xi.size(); j++) {
4670  ans.push_back(xi[j]);
4671  }
4672  }
4673  return ans;
4674 }
4675 } // namespace TMBad
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:157
std::vector< Index > op2var(const std::vector< Index > &seq)
Get variables produces by a node seqence.
Definition: TMBad.cpp:1435
std::vector< T > subset(const std::vector< T > &x, const std::vector< bool > &y)
Vector subset by boolean mask.
graph reverse_graph(std::vector< bool > keep_var=std::vector< bool >(0))
Construct operator graph with reverse connections.
Definition: TMBad.cpp:1584
diff --git a/TMBad_8hpp_source.html b/TMBad_8hpp_source.html index 76423041e..b9bdcc829 100644 --- a/TMBad_8hpp_source.html +++ b/TMBad_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
@@ -73,7 +73,7 @@
TMBad.hpp
-
1 #ifndef HAVE_TMBAD_HPP
2 #define HAVE_TMBAD_HPP
3 // Autogenerated - do not edit by hand !
4 #include "checkpoint.hpp"
5 #include "global.hpp"
6 #include "graph_transform.hpp"
7 
8 namespace TMBad {
9 
10 template <class ADFun>
11 struct Sparse;
12 template <class ADFun>
13 struct Decomp2;
14 template <class ADFun>
15 struct Decomp3;
16 
17 namespace {
18 
19 template <class I>
20 std::vector<I> cumsum0(const std::vector<bool> &x) {
21  std::vector<I> y(x.size(), 0);
22  for (size_t i = 1; i < x.size(); i++) {
23  y[i] = y[i - 1] + x[i - 1];
24  }
25  return y;
26 }
27 } // namespace
28 
56 template <class Functor, class InterfaceVector>
57 struct StdWrap {
58  Functor &F;
59  typedef typename InterfaceVector::value_type Scalar;
60  InterfaceVector tovec(const InterfaceVector &x) { return x; }
61  InterfaceVector tovec(const Scalar &x) {
62  InterfaceVector y(1);
63  y[0] = x;
64  return y;
65  }
66  StdWrap(Functor &F) : F(F) {}
67  template <class T>
68  std::vector<T> operator()(const std::vector<T> &x) {
69  InterfaceVector xi(x);
70  InterfaceVector yi = tovec(F(xi));
71  std::vector<T> y(yi);
72  return y;
73  }
74 };
75 
79  bool compress;
80  bool index_remap;
81 };
82 
116 template <class ad = ad_aug>
117 struct ADFun {
118  global glob;
119 
121  template <class Functor, class ScalarVector>
122  ADFun(Functor F, const ScalarVector &x_) : force_update_flag(false) {
123  std::vector<ad> x(x_.size());
124  for (size_t i = 0; i < x.size(); i++) x[i] = Value(x_[i]);
125  global *glob_begin = get_glob();
126  this->glob.ad_start();
127  Independent(x);
128  std::vector<ad> y = F(x);
129  Dependent(y);
130  this->glob.ad_stop();
131  global *glob_end = get_glob();
132  TMBAD_ASSERT(glob_begin == glob_end);
133  }
134 
138  template <class Functor>
139  ADFun(Functor F, Scalar x0_) : force_update_flag(false) {
140  global *glob_begin = get_glob();
141  this->glob.ad_start();
142  ad x0(x0_);
143  x0.Independent();
144  ad y0 = F(x0);
145  y0.Dependent();
146  this->glob.ad_stop();
147  global *glob_end = get_glob();
148  TMBAD_ASSERT(glob_begin == glob_end);
149  }
150 
154  template <class Functor>
155  ADFun(Functor F, Scalar x0_, Scalar x1_) : force_update_flag(false) {
156  global *glob_begin = get_glob();
157  this->glob.ad_start();
158  ad x0(x0_);
159  x0.Independent();
160  ad x1(x1_);
161  x1.Independent();
162  ad y0 = F(x0, x1);
163  y0.Dependent();
164  this->glob.ad_stop();
165  global *glob_end = get_glob();
166  TMBAD_ASSERT(glob_begin == glob_end);
167  }
168 
169  ADFun() : force_update_flag(false) {}
170 
171  void forward() { glob.forward(); }
172  void reverse() { glob.reverse(); }
173  void clear_deriv() { glob.clear_deriv(); }
174  Scalar &deriv_inv(Index i) { return glob.deriv_inv(i); }
175  Scalar &deriv_dep(Index i) { return glob.deriv_dep(i); }
176 
178  void print(print_config cfg = print_config()) { glob.print(cfg); }
179 
181  void eliminate() { glob.eliminate(); }
182 
195  void optimize() {
196  TMBAD_ASSERT2(inv_pos.size() == 0,
197  "Tape has 'cached independent variable positions' which "
198  "would be invalidated by the optimizer");
199 
200  std::vector<bool> outer_mask;
201  if (inner_outer_in_use()) {
202  outer_mask = DomainOuterMask();
203  }
204 
206 
207  glob.eliminate();
208 
209  if (inner_outer_in_use()) {
210  TMBAD_ASSERT(outer_mask.size() == Domain());
211  set_inner_outer(*this, outer_mask);
212  }
213  }
223  std::vector<Position> pos = inv_positions(glob);
224  inv_pos = subset(pos, invperm(order(glob.inv_index)));
225  }
237  void reorder(std::vector<Index> last) {
238  std::vector<bool> outer_mask;
239  if (inner_outer_in_use()) {
240  outer_mask = DomainOuterMask();
241  }
242  reorder_graph(glob, last);
243 
244  if (inner_outer_in_use()) {
245  TMBAD_ASSERT(outer_mask.size() == Domain());
246  set_inner_outer(*this, outer_mask);
247  }
248  set_inv_positions();
249  }
250 
251  size_t Domain() const { return glob.inv_index.size(); }
252  size_t Range() const { return glob.dep_index.size(); }
254  std::vector<bool> activeDomain() {
255  std::vector<bool> mark(glob.values.size(), false);
256  for (size_t i = 0; i < glob.dep_index.size(); i++)
257  mark[glob.dep_index[i]] = true;
258  glob.reverse(mark);
259  return subset(mark, glob.inv_index);
260  }
262  std::vector<bool> activeRange() {
263  std::vector<bool> mark(glob.values.size(), false);
264  for (size_t i = 0; i < glob.inv_index.size(); i++)
265  mark[glob.inv_index[i]] = true;
266  glob.forward(mark);
267  return subset(mark, glob.dep_index);
268  }
270  std::vector<Scalar> DomainVec() {
271  std::vector<Scalar> xd(Domain());
272  for (size_t i = 0; i < xd.size(); i++) xd[i] = glob.value_inv(i);
273  return xd;
274  }
277  return IndirectAccessor<Scalar>(glob.values, glob.dep_index);
278  }
280  std::vector<bool> get_keep_var(std::vector<bool> keep_x,
281  std::vector<bool> keep_y) {
282  std::vector<bool> keep_var(glob.values.size(), true);
283  if (keep_x.size() > 0 || keep_y.size() > 0) {
284  if (keep_x.size() == 0) keep_x.resize(glob.inv_index.size(), true);
285  if (keep_y.size() == 0) keep_y.resize(glob.dep_index.size(), true);
286  TMBAD_ASSERT(keep_x.size() == glob.inv_index.size());
287  TMBAD_ASSERT(keep_y.size() == glob.dep_index.size());
288 
289  std::vector<bool> keep_var_init(keep_var.size(), false);
290  for (size_t i = 0; i < glob.inv_index.size(); i++)
291  if (keep_x[i]) keep_var_init[glob.inv_index[i]] = true;
292  for (size_t i = 0; i < glob.dep_index.size(); i++)
293  if (keep_y[i]) keep_var_init[glob.dep_index[i]] = true;
294 
295  std::vector<bool> keep_var_x = keep_var_init;
296  glob.forward(keep_var_x);
297 
298  std::vector<bool> keep_var_y = keep_var_init;
299  glob.reverse(keep_var_y);
300 
301  for (size_t i = 0; i < keep_var.size(); i++)
302  keep_var[i] = keep_var_x[i] && keep_var_y[i];
303  }
304  return keep_var;
305  }
313  std::vector<Position> inv_pos;
315  Position find_pos(Index inv) {
316  for (size_t i = 0; i < inv_pos.size(); i++) {
317  if (inv_pos[i].ptr.second == inv) return inv_pos[i];
318  }
319  return Position(0, 0, 0);
320  }
325  Position tail_start;
331  if (glob.inv_index.size() == 0) return true;
332 
333  bool is_sorted = (inv_pos.size() == 0 && !inner_outer_in_use());
334  return is_sorted && (glob.inv_index.size() ==
335  1 + glob.inv_index.back() - glob.inv_index.front());
336  }
339  void set_tail(const std::vector<Index> &random) {
340  if (inv_pos.size() > 0) {
341  std::vector<Position> pos = subset(inv_pos, random);
342  tail_start = *std::min_element(pos.begin(), pos.end());
343  } else {
344  tail_start = Position(0, 0, 0);
345  }
346  }
349  void unset_tail() { tail_start = Position(0, 0, 0); }
351  void force_update() { force_update_flag = true; }
352  bool force_update_flag;
354  template <class InplaceVector>
355  Position DomainVecSet(const InplaceVector &x) {
356  TMBAD_ASSERT(x.size() == Domain());
357  if (force_update_flag) {
358  for (size_t i = 0; i < x.size(); i++) glob.value_inv(i) = x[i];
359  force_update_flag = false;
360  return Position(0, 0, 0);
361  }
362  if (inv_pos.size() > 0) {
363  if (inner_outer_in_use()) {
364  for (size_t i = 0; i < x.size(); i++) glob.value_inv(i) = x[i];
365  Index min_inv =
366  *std::min_element(glob.inv_index.begin(), glob.inv_index.end());
367  return find_pos(min_inv);
368  }
369  TMBAD_ASSERT(inv_pos.size() == Domain());
370  size_t min_var_changed = -1;
371  size_t i_min = -1;
372  for (size_t i = 0; i < x.size(); i++) {
373  if (glob.value_inv(i) != x[i] && glob.inv_index[i] < min_var_changed) {
374  min_var_changed = glob.inv_index[i];
375  i_min = i;
376  }
377  glob.value_inv(i) = x[i];
378  }
379  if (min_var_changed == (size_t)-1)
380  return glob.end();
381  else
382  return inv_pos[i_min];
383  }
384  if (x.size() > 0) {
385  bool no_change = true;
386  for (size_t i = 0; i < x.size(); i++) {
387  if (glob.value_inv(i) != x[i]) {
388  no_change = false;
389  break;
390  }
391  }
392  if (no_change) return glob.end();
393 
394  for (size_t i = 0; i < x.size(); i++) glob.value_inv(i) = x[i];
395  }
396  return Position(0, 0, 0);
397  }
399  template <class Vector>
400  Vector forward(const Vector &x) {
401  TMBAD_ASSERT((size_t)x.size() == Domain());
402  for (size_t i = 0; i < (size_t)x.size(); i++) glob.value_inv(i) = x[i];
403  glob.forward();
404  Vector y(Range());
405  for (size_t i = 0; i < (size_t)y.size(); i++) y[i] = glob.value_dep(i);
406  return y;
407  }
409  template <class Vector>
410  Vector reverse(const Vector &w) {
411  TMBAD_ASSERT((size_t)w.size() == Range());
412  glob.clear_deriv();
413  for (size_t i = 0; i < (size_t)w.size(); i++) glob.deriv_dep(i) = w[i];
414  glob.reverse();
415  Vector d(Domain());
416  for (size_t i = 0; i < (size_t)d.size(); i++) d[i] = glob.deriv_inv(i);
417  return d;
418  }
420  std::vector<Scalar> operator()(const std::vector<Scalar> &x) {
421  Position start = DomainVecSet(x);
422  glob.forward(start);
423  return RangeVec();
424  }
425 
426  IndirectAccessor<Scalar> operator()(
427  const segment_ref<ForwardArgs<Scalar>, x_read> &x) {
428  Position start = DomainVecSet(x);
429  glob.forward(start);
430  return RangeVec();
431  }
437  std::vector<ad> operator()(const std::vector<ad> &x_) const {
438  std::vector<ad> x(x_.begin(), x_.end());
439  TMBAD_ASSERT(x.size() == Domain());
440  for (size_t i = 0; i < x.size(); i++) {
441  x[i].addToTape();
442  }
443  global *cur_glob = get_glob();
444  for (size_t i = 0; i < x.size(); i++) {
445  TMBAD_ASSERT(x[i].on_some_tape());
446  TMBAD_ASSERT(x[i].glob() == cur_glob);
447  }
448  global::replay replay(this->glob, *get_glob());
449  replay.start();
450  for (size_t i = 0; i < this->Domain(); i++) {
451  replay.value_inv(i) = x[i];
452  }
453  replay.forward(false, false);
454  std::vector<ad> y(this->Range());
455  for (size_t i = 0; i < this->Range(); i++) {
456  y[i] = replay.value_dep(i);
457  }
458  replay.stop();
459  return y;
460  }
463  ad operator()(ad x0) {
464  TMBAD_ASSERT(Domain() == 1);
465  TMBAD_ASSERT(Range() == 1);
466  std::vector<ad> x(1);
467  x[0] = x0;
468  return (*this)(x)[0];
469  }
472  ad operator()(ad x0, ad x1) {
473  TMBAD_ASSERT(Domain() == 2);
474  TMBAD_ASSERT(Range() == 1);
475  std::vector<ad> x(2);
476  x[0] = x0;
477  x[1] = x1;
478  return (*this)(x)[0];
479  }
484  std::vector<Scalar> Jacobian(const std::vector<Scalar> &x) {
485  Position start = DomainVecSet(x);
486  glob.forward(start);
487  std::vector<Scalar> ans(Domain() * Range());
488  for (size_t j = 0; j < Range(); j++) {
489  glob.clear_deriv(tail_start);
490  glob.deriv_dep(j) = 1;
491  glob.reverse(tail_start);
492  for (size_t k = 0; k < Domain(); k++)
493  ans[j * Domain() + k] = glob.deriv_inv(k);
494  }
495  return ans;
496  }
503  std::vector<Scalar> Jacobian(const std::vector<Scalar> &x,
504  std::vector<bool> keep_x,
505  std::vector<bool> keep_y) {
506  std::vector<Scalar> ans;
507 
508  std::vector<bool> keep_var = get_keep_var(keep_x, keep_y);
509 
510  graph G = this->glob.reverse_graph(keep_var);
511 
512  std::vector<size_t> which_keep_x = which(keep_x);
513  std::vector<size_t> which_keep_y = which(keep_y);
514 
515  Position start = DomainVecSet(x);
516  glob.forward(start);
517 
518  for (size_t w = 0; w < which_keep_y.size(); w++) {
519  size_t k = which_keep_y[w];
520 
521  glob.subgraph_seq.resize(0);
522  glob.subgraph_seq.push_back(G.dep2op[k]);
523  G.search(glob.subgraph_seq);
524 
525  glob.clear_deriv_sub();
526  for (size_t l = 0; l < which_keep_x.size(); l++)
527  glob.deriv_inv(which_keep_x[l]) = Scalar(0);
528  glob.deriv_dep(k) = 1.;
529  glob.reverse_sub();
530 
531  for (size_t l = 0; l < which_keep_x.size(); l++) {
532  ans.push_back(glob.deriv_inv(which_keep_x[l]));
533  }
534  }
535  return ans;
536  }
542  std::vector<Scalar> Jacobian(const std::vector<Scalar> &x,
543  const std::vector<Scalar> &w) {
544  TMBAD_ASSERT(x.size() == Domain());
545  TMBAD_ASSERT(w.size() == Range());
546  Position start = DomainVecSet(x);
547  glob.forward(start);
548  glob.clear_deriv();
549  for (size_t j = 0; j < Range(); j++) glob.deriv_dep(j) = w[j];
550  glob.reverse();
551  return IndirectAccessor<Scalar>(glob.derivs, glob.inv_index);
552  }
553 
554  IndirectAccessor<Scalar> Jacobian(
555  const segment_ref<ReverseArgs<Scalar>, x_read> &x,
556  const segment_ref<ReverseArgs<Scalar>, dy_read> &w) {
557  TMBAD_ASSERT(x.size() == Domain());
558  TMBAD_ASSERT(w.size() == Range());
559  Position start = DomainVecSet(x);
560  glob.forward(start);
561  glob.clear_deriv();
562  for (size_t j = 0; j < Range(); j++) glob.deriv_dep(j) = w[j];
563  glob.reverse();
564  return IndirectAccessor<Scalar>(glob.derivs, glob.inv_index);
565  }
566  std::vector<ad> Jacobian(const std::vector<ad> &x_,
567  const std::vector<ad> &w_) {
568  std::vector<ad> x(x_.begin(), x_.end());
569  std::vector<ad> w(w_.begin(), w_.end());
570  global *cur_glob = get_glob();
571 
572  TMBAD_ASSERT(x.size() == Domain());
573  for (size_t i = 0; i < x.size(); i++) {
574  x[i].addToTape();
575  }
576  for (size_t i = 0; i < x.size(); i++) {
577  TMBAD_ASSERT(x[i].on_some_tape());
578  TMBAD_ASSERT(x[i].glob() == cur_glob);
579  }
580 
581  TMBAD_ASSERT(w.size() == Range());
582  for (size_t i = 0; i < w.size(); i++) {
583  w[i].addToTape();
584  }
585  for (size_t i = 0; i < w.size(); i++) {
586  TMBAD_ASSERT(w[i].on_some_tape());
587  TMBAD_ASSERT(w[i].glob() == cur_glob);
588  }
589 
590  global::replay replay(this->glob, *get_glob());
591  replay.start();
592  for (size_t i = 0; i < this->Domain(); i++) {
593  replay.value_inv(i) = x[i];
594  }
595  replay.forward(false, false);
596  replay.clear_deriv();
597  for (size_t i = 0; i < this->Range(); i++) {
598  replay.deriv_dep(i) = w[i];
599  }
600  replay.reverse(false, false);
601  std::vector<ad> dx(this->Domain());
602  for (size_t i = 0; i < dx.size(); i++) {
603  dx[i] = replay.deriv_inv(i);
604  }
605  replay.stop();
606  return dx;
607  }
608  template <bool range_weight>
609  ADFun JacFun_(std::vector<bool> keep_x, std::vector<bool> keep_y) {
610  ADFun ans;
611  if (keep_x.size() == 0) keep_x.resize(Domain(), true);
612  if (keep_y.size() == 0) keep_y.resize(Range(), true);
613  std::vector<bool> keep = get_keep_var(keep_x, keep_y);
614  graph G;
615  if (!range_weight && Range() > 1) {
616  G = this->glob.reverse_graph(keep);
617  }
618  keep = glob.var2op(keep);
619  global::replay replay(this->glob, ans.glob);
620  replay.start();
621  replay.forward(true, false);
622  if (!range_weight) {
623  if (G.empty()) {
624  for (size_t i = 0; i < this->Range(); i++) {
625  if (!keep_y[i]) continue;
626  replay.clear_deriv();
627  replay.deriv_dep(i) = 1.;
628  replay.reverse(false, false, tail_start, keep);
629  for (size_t j = 0; j < this->Domain(); j++) {
630  if (keep_x[j]) replay.deriv_inv(j).Dependent();
631  }
632  }
633  } else {
634  replay.clear_deriv();
635  for (size_t i = 0; i < this->Range(); i++) {
636  if (!keep_y[i]) continue;
637  glob.subgraph_seq.resize(0);
638  glob.subgraph_seq.push_back(G.dep2op[i]);
639  G.search(glob.subgraph_seq);
640  replay.deriv_dep(i) = 1.;
641  replay.reverse_sub();
642  for (size_t j = 0; j < this->Domain(); j++) {
643  if (keep_x[j]) replay.deriv_inv(j).Dependent();
644  }
645  replay.clear_deriv_sub();
646  }
647  }
648  } else {
649  replay.clear_deriv();
650  replay.reverse(false, true, tail_start, keep);
651  for (size_t j = 0; j < this->Domain(); j++) {
652  if (keep_x[j]) replay.deriv_inv(j).Dependent();
653  }
654  }
655  replay.stop();
656  set_inner_outer(ans);
657  return ans;
658  }
680  ADFun JacFun(std::vector<bool> keep_x = std::vector<bool>(0),
681  std::vector<bool> keep_y = std::vector<bool>(0)) {
682  return JacFun_<false>(keep_x, keep_y);
683  }
702  ADFun WgtJacFun(std::vector<bool> keep_x = std::vector<bool>(0),
703  std::vector<bool> keep_y = std::vector<bool>(0)) {
704  return JacFun_<true>(keep_x, keep_y);
705  }
709  std::vector<Scalar> x = DomainVec();
710  return ADFun(F, x);
711  }
717  std::vector<ADFun> parallel_accumulate(size_t num_threads) {
718  TMBAD_ASSERT(Range() == 1);
719  global glob_split = accumulation_tree_split(glob);
720  autopar ap(glob_split, num_threads);
721  ap.do_aggregate = true;
722  ap.keep_all_inv = true;
723  ap.run();
724  ap.extract();
725  std::vector<ADFun> ans(num_threads);
726  for (size_t i = 0; i < num_threads; i++) ans[i].glob = ap.vglob[i];
727  return ans;
728  }
732  ADFun parallelize(size_t num_threads) {
733  TMBAD_ASSERT(Range() == 1);
734  global glob_split = accumulation_tree_split(glob);
735  autopar ap(glob_split, num_threads);
736  ap.do_aggregate = true;
737  ap.keep_all_inv = false;
738  ap.run();
739  ap.extract();
740  global::Complete<ParalOp> f_parallel(ap);
741  ADFun F(f_parallel, DomainVec());
742  aggregate(F.glob);
743  return F;
744  }
750  void replay() { glob.forward_replay(true, true); }
776  Sparse<ADFun> SpJacFun(std::vector<bool> keep_x = std::vector<bool>(0),
777  std::vector<bool> keep_y = std::vector<bool>(0),
778  SpJacFun_config config = SpJacFun_config()) {
779  ADFun atomic_jac_row;
780  std::vector<Index> rowcounts;
781 
782  Sparse<ADFun> ans;
783 
784  ans.m = Range();
785  ans.n = Domain();
786 
787  if (keep_x.size() == 0) keep_x.resize(Domain(), true);
788  if (keep_y.size() == 0) keep_y.resize(Range(), true);
789  std::vector<bool> keep_var = get_keep_var(keep_x, keep_y);
790 
791  size_t keep_x_count = std::count(keep_x.begin(), keep_x.end(), true);
792  size_t keep_y_count = std::count(keep_y.begin(), keep_y.end(), true);
793 
794  graph G = this->glob.reverse_graph(keep_var);
795 
796  global::replay replay(this->glob, ans.glob);
797  replay.start();
798  replay.forward(true, false);
799 
800  Index NA = -1;
801  std::vector<Index> op2inv_idx = glob.op2idx(glob.inv_index, NA);
802 
803  std::fill(keep_var.begin(), keep_var.end(), true);
804 
805  std::vector<Index> col_idx;
806  for (size_t k = 0; k < glob.dep_index.size(); k++) {
807  size_t i = glob.dep_index[k];
808 
809  glob.subgraph_seq.resize(0);
810  glob.subgraph_seq.push_back(G.dep2op[k]);
811  G.search(glob.subgraph_seq);
812 
813  bool do_compress = false;
814  if (config.compress) {
815  if (rowcounts.size() == 0) rowcounts = G.rowcounts();
816 
817  size_t cost1 = 0;
818  for (size_t i = 0; i < glob.subgraph_seq.size(); i++) {
819  cost1 += rowcounts[glob.subgraph_seq[i]];
820  }
821 
822  size_t cost2 = Domain() + Range() + Domain();
823 
824  if (cost2 < cost1) do_compress = true;
825  }
826 
827  if (true) {
828  glob.clear_array_subgraph(keep_var);
829  keep_var[i] = true;
830  glob.reverse_sub(keep_var);
831  }
832 
833  col_idx.resize(0);
834  for (size_t l = 0; l < glob.subgraph_seq.size(); l++) {
835  Index idx = op2inv_idx[glob.subgraph_seq[l]];
836  if (idx != NA) {
837  Index nrep = glob.opstack[glob.subgraph_seq[l]]->output_size();
838  for (Index r = 0; r < nrep; r++) {
839  if (keep_var[glob.inv_index[idx]]) col_idx.push_back(idx);
840  idx++;
841  }
842  }
843  }
844 
845  ans.i.resize(ans.i.size() + col_idx.size(), k);
846  ans.j.insert(ans.j.end(), col_idx.begin(), col_idx.end());
847  if (!do_compress) {
848  replay.clear_deriv_sub();
849 
850  replay.deriv_dep(k) = 1.;
851 
852  replay.reverse_sub();
853 
854  } else {
855  if (atomic_jac_row.Domain() == 0) {
856  Rcout << "Warning: This is an experimental compression method\n";
857  Rcout << "Disable: 'config(tmbad.sparse_hessian_compress=0)'\n";
858  atomic_jac_row = this->WgtJacFun(keep_x, keep_y);
859  atomic_jac_row.optimize();
860 
861  atomic_jac_row.set_inv_positions();
862 
863  atomic_jac_row = atomic_jac_row.atomic();
864 
865  replay.clear_deriv_sub();
866  Rcout << "done\n";
867 
868  TMBAD_ASSERT(atomic_jac_row.Domain() ==
869  this->Domain() + this->Range());
870  TMBAD_ASSERT(atomic_jac_row.Range() == keep_x_count);
871  }
872  std::vector<Replay> vec(atomic_jac_row.Domain(), Replay(0));
873  for (size_t i = 0; i < this->Domain(); i++) {
874  vec[i] = replay.value_inv(i);
875  }
876  vec[this->Domain() + k] = 1.;
877  std::vector<Replay> r = atomic_jac_row(vec);
878  size_t r_idx = 0;
879  for (size_t i = 0; i < this->Domain(); i++) {
880  if (keep_x[i]) replay.deriv_inv(i) = r[r_idx++];
881  }
882  }
883  for (size_t l = 0; l < col_idx.size(); l++) {
884  replay.deriv_inv(col_idx[l]).Dependent();
885  }
886  }
887  replay.stop();
888  if (config.index_remap) {
889  if (keep_x.size() > 0) {
890  std::vector<Index> remap_j = cumsum0<Index>(keep_x);
891  ans.j = TMBad::subset(remap_j, ans.j);
892  ans.n = keep_x_count;
893  }
894  if (keep_y.size() > 0) {
895  std::vector<Index> remap_i = cumsum0<Index>(keep_y);
896  ans.i = TMBad::subset(remap_i, ans.i);
897  ans.m = keep_y_count;
898  }
899  }
900  set_inner_outer(ans);
901  return ans;
902  }
907  ADFun marginal_gk(const std::vector<Index> &random,
908  gk_config cfg = gk_config()) {
909  ADFun ans;
910  old_state os(this->glob);
911  aggregate(this->glob, -1);
912  global glob_split = accumulation_tree_split(this->glob);
913  os.restore();
914  integrate_subgraph<ADFun> i_s(glob_split, random, cfg);
915  ans.glob = i_s.gk();
916  aggregate(ans.glob, -1);
917  return ans;
918  }
920  ADFun marginal_sr(const std::vector<Index> &random, std::vector<sr_grid> grid,
921  const std::vector<Index> &random2grid, bool perm = true) {
922  ADFun ans;
923  old_state os(this->glob);
924  aggregate(this->glob, -1);
925  global glob_split = accumulation_tree_split(this->glob);
926  os.restore();
927  sequential_reduction SR(glob_split, random, grid, random2grid, perm);
928  ans.glob = SR.marginal();
929  aggregate(ans.glob, -1);
930  return ans;
931  }
933  ADFun marginal_sr(const std::vector<Index> &random,
934  sr_grid grid = sr_grid()) {
935  return marginal_sr(random, std::vector<sr_grid>(1, grid),
936  std::vector<Index>(0));
937  }
942  ADFun compose(ADFun other) {
943  TMBAD_ASSERT2(other.Range() == this->Domain(),
944  "Compostion of incompatible functions");
945  struct composition {
946  const ADFun &f;
947  const ADFun &g;
948  composition(const ADFun &f, const ADFun &g) : f(f), g(g) {}
949  std::vector<ad> operator()(std::vector<ad> x) { return f(g(x)); }
950  };
951  composition fg(*this, other);
952  return ADFun(fg, other.DomainVec());
953  }
958  Decomp2<ADFun> decompose(std::vector<Index> nodes) {
959  Decomp2<ADFun> ans;
960  global &glob1 = ans.first.glob;
961  global &glob2 = ans.second.glob;
962 
963  OperatorPure *invop = glob.getOperator<global::InvOp>();
964  std::vector<bool> keep(nodes.size(), true);
965  for (size_t i = 0; i < nodes.size(); i++)
966  if (glob.opstack[nodes[i]] == invop) keep[i] = false;
967  nodes = subset(nodes, keep);
968 
969  glob1 = this->glob;
970  glob1.dep_index.resize(0);
971  std::vector<Index> dep1 = glob1.op2var(nodes);
972  glob1.ad_start();
973  for (size_t i = 0; i < dep1.size(); i++) {
974  ad_plain tmp;
975  tmp.index = dep1[i];
976  tmp.Dependent();
977  }
978  glob1.ad_stop();
979  glob1.eliminate();
980 
981  glob2 = this->glob;
982  substitute(glob2, nodes);
983  glob2.eliminate();
984 
985  set_inner_outer(ans.first);
986  set_inner_outer(ans.second);
987 
988  return ans;
989  }
994  Decomp2<ADFun> decompose(const char *name) {
995  std::vector<Index> nodes = find_op_by_name(this->glob, name);
996  return decompose(nodes);
997  }
1003  if (find_op_by_name(glob, "RefOp").size() == 0) return;
1004 
1005  std::vector<bool> keep_x(Domain(), true);
1006  std::vector<bool> keep_y(Range(), true);
1007  std::vector<bool> vars = get_keep_var(keep_x, keep_y);
1008 
1009  vars = reverse_boundary(glob, vars);
1010 
1011  std::vector<Index> nodes = which<Index>(glob.var2op(vars));
1012 
1013  Decomp2<ADFun> decomp = decompose(nodes);
1014 
1015  size_t n_inner = decomp.first.Domain();
1016  size_t n_outer = decomp.first.Range();
1017 
1018  decomp.first.glob.inv_index.resize(0);
1019 
1020  std::vector<ad_aug> empty;
1021  std::vector<ad_aug> gx = decomp.first(empty);
1022 
1023  ADFun &f = decomp.second;
1024 
1025  f.replay();
1026 
1027  TMBAD_ASSERT(n_inner + n_outer == f.Domain());
1028  TMBAD_ASSERT(find_op_by_name(f.glob, "RefOp").size() == 0);
1029  TMBAD_ASSERT(find_op_by_name(f.glob, "InvOp").size() == f.Domain());
1030  TMBAD_ASSERT(gx.size() == n_outer);
1031 
1032  for (size_t i = 0; i < n_outer; i++) {
1033  Index j = f.glob.inv_index[n_inner + i];
1034 
1035  if (gx[i].constant()) {
1036  f.glob.opstack[j] = glob.getOperator<global::ConstOp>();
1037  } else {
1038  f.glob.opstack[j] = glob.getOperator<global::RefOp>(
1039  gx[i].data.glob, gx[i].taped_value.index);
1040  }
1041  }
1042  f.glob.inv_index.resize(n_inner);
1043 
1044  *this = f;
1045  }
1055  std::vector<ad_aug> resolve_refs() {
1056  TMBAD_ASSERT2(
1057  inner_inv_index.size() == 0 && outer_inv_index.size() == 0,
1058  "'resolve_refs' can only be run once for a given function object")
1059 
1060  ;
1061  std::vector<Index> seq = find_op_by_name(glob, "RefOp");
1062  std::vector<Replay> values(seq.size());
1063  std::vector<Index> dummy_inputs;
1064  ForwardArgs<Replay> args(dummy_inputs, values);
1065  for (size_t i = 0; i < seq.size(); i++) {
1066  TMBAD_ASSERT(glob.opstack[seq[i]]->input_size() == 0);
1067  TMBAD_ASSERT(glob.opstack[seq[i]]->output_size() == 1);
1068  glob.opstack[seq[i]]->forward_incr(args);
1069  glob.opstack[seq[i]]->deallocate();
1070  glob.opstack[seq[i]] = get_glob()->getOperator<global::InvOp>();
1071  }
1072  inner_inv_index = glob.inv_index;
1073  outer_inv_index = glob.op2var(seq);
1074 
1075  glob.inv_index.insert(glob.inv_index.end(), outer_inv_index.begin(),
1076  outer_inv_index.end());
1077  return values;
1078  }
1079  std::vector<Index> inner_inv_index;
1080  std::vector<Index> outer_inv_index;
1082  size_t DomainInner() const { return inner_inv_index.size(); }
1084  size_t DomainOuter() const { return outer_inv_index.size(); }
1088  void SwapInner() {
1089  std::swap(glob.inv_index, inner_inv_index);
1090  force_update();
1091  }
1095  void SwapOuter() {
1096  std::swap(glob.inv_index, outer_inv_index);
1097  force_update();
1098  }
1101  return (DomainInner() > 0) || (DomainOuter() > 0);
1102  }
1104  std::vector<bool> DomainOuterMask() {
1105  std::vector<bool> mark_outer =
1106  glob.mark_space(glob.values.size(), outer_inv_index);
1107  return subset(mark_outer, glob.inv_index);
1108  }
1116  void set_inner_outer(ADFun &ans, const std::vector<bool> &outer_mask) {
1117  if (inner_outer_in_use()) {
1118  std::vector<bool> mark(outer_mask);
1119  mark.resize(ans.Domain(), false);
1120 
1121  ans.outer_inv_index = subset(ans.glob.inv_index, mark);
1122 
1123  mark.flip();
1124 
1125  ans.inner_inv_index = subset(ans.glob.inv_index, mark);
1126  }
1127  }
1128  void set_inner_outer(ADFun &ans) {
1129  if (inner_outer_in_use()) {
1130  set_inner_outer(ans, DomainOuterMask());
1131  }
1132  }
1133  void DomainReduce(const std::vector<bool> &inv_keep) {
1134  std::vector<bool> outer_mask = DomainOuterMask();
1135  outer_mask = subset(outer_mask, inv_keep);
1136  glob.inv_index = subset(glob.inv_index, inv_keep);
1137  set_inner_outer(*this, outer_mask);
1138  }
1144  void inactivate(std::vector<Index> nodes) {
1145  for (size_t i = 0; i < nodes.size(); i++) {
1146  OperatorPure *op = glob.opstack[nodes[i]];
1147  glob.opstack[nodes[i]] = glob.getOperator<global::NullOp2>(
1148  op->input_size(), op->output_size());
1149  op->deallocate();
1150  }
1151  }
1152 };
1164 template <class Functor, class Test = ParametersChanged>
1165 ADFun<> ADFun_retaping(Functor &F, const std::vector<ad_aug> &x,
1166  Test test = Test()) {
1167  typedef retaping_derivative_table<Functor, ADFun<>, Test> DTab;
1168  global::Complete<AtomOp<DTab> > Op(F, x, test);
1169  return ADFun<>(Op, x);
1170 }
1171 
1173 template <class dummy = void>
1175  ADFun<> Fp;
1176  ADFun_packed(const ADFun<> &Fp) : Fp(Fp) {}
1177  ADFun_packed() {}
1178  ad_segment operator()(const std::vector<ad_segment> &x) {
1179  std::vector<ad_segment> xp(x.size());
1180  for (size_t i = 0; i < xp.size(); i++) xp[i] = pack(x[i]);
1181  std::vector<ad_aug> yp = Fp(concat(xp));
1182  return unpack(yp, 0);
1183  }
1184  bool initialized() { return Fp.Domain() != 0; }
1185 };
1193 template <class Functor, class Test>
1194 ADFun_packed<> ADFun_retaping(Functor &F, const std::vector<ad_segment> &x,
1195  Test test) {
1196  static const bool packed = true;
1198  packed>
1199  DTab;
1200  PackWrap<Functor> Fp(F);
1201  std::vector<ad_segment> xp(x.size());
1202  for (size_t i = 0; i < xp.size(); i++) xp[i] = pack(x[i]);
1203  std::vector<ad_aug> xp_ = concat(xp);
1204  PackWrap<Test> testp(test);
1205  global::Complete<AtomOp<DTab> > Op(Fp, xp_, testp);
1206  ADFun<> TapeFp(Op, xp_);
1207  return ADFun_packed<>(TapeFp);
1208 }
1209 
1210 template <class ADFun>
1211 struct Sparse : ADFun {
1212  std::vector<Index> i;
1213  std::vector<Index> j;
1214  Index m;
1215  Index n;
1216  Sparse() {}
1217  Sparse(const ADFun &f) : ADFun(f) {}
1218  std::vector<Index> a2v(const std::valarray<Index> &x) const {
1219  return std::vector<Index>(&x[0], &x[0] + x.size());
1220  }
1221  std::valarray<Index> v2a(const std::vector<Index> &x) const {
1222  return std::valarray<Index>(x.data(), x.size());
1223  }
1224  std::valarray<Index> row() const { return v2a(i); }
1225  std::valarray<Index> col() const { return v2a(j); }
1226  void subset_inplace(const std::valarray<bool> &x) {
1227  i = a2v(row()[x]);
1228  j = a2v(col()[x]);
1229  this->glob.dep_index = a2v(v2a(this->glob.dep_index)[x]);
1230  }
1231  void transpose_inplace() {
1232  std::swap(i, j);
1233  std::swap(m, n);
1234  }
1235 };
1236 
1243 template <class ADFun>
1244 struct Decomp2 : std::pair<ADFun, ADFun> {
1245  struct composition {
1246  typedef ad_aug ad;
1247  const ADFun &f;
1248  const ADFun &g;
1249  composition(const ADFun &f, const ADFun &g) : f(f), g(g) {}
1250  std::vector<ad> operator()(std::vector<ad> x) {
1251  std::vector<ad> y = g(x);
1252  x.insert(x.end(), y.begin(), y.end());
1253  return f(x);
1254  }
1255  };
1256  operator ADFun() {
1257  ADFun &g = this->first;
1258  ADFun &f = this->second;
1259  composition fg(f, g);
1260  return ADFun(fg, g.DomainVec());
1261  }
1285  Decomp3<ADFun> HesFun(std::vector<bool> keep_rc = std::vector<bool>(0),
1286  bool sparse_1 = true, bool sparse_2 = true,
1287  bool sparse_3 = true) {
1288  ADFun &g = this->first;
1289  ADFun &f = this->second;
1290  Decomp3<ADFun> ans;
1291  TMBAD_ASSERT(f.Range() == 1);
1292 
1293  std::vector<bool> keep_f = std::vector<bool>(f.Range(), true);
1294  std::vector<bool> keep_g = std::vector<bool>(g.Range(), true);
1295 
1296  typedef ad_aug ad;
1297  global &glob = ans.first.glob;
1298  glob.ad_start();
1299  std::vector<Scalar> x_ = f.DomainVec();
1300  size_t k = g.Range();
1301  size_t n = f.Domain() - k;
1302 
1303  std::vector<bool> mask_x(f.Domain(), false);
1304  for (size_t i = 0; i < n; i++) mask_x[i] = true;
1305  std::vector<bool> mask_s(mask_x);
1306  mask_s.flip();
1307 
1308  std::vector<ad> x(x_.begin(), x_.end() - k);
1309  Independent(x);
1310  std::vector<ad> s = g(x);
1311  std::vector<ad> s0(s.size());
1312 
1313  for (size_t i = 0; i < s.size(); i++) s0[i] = s[i].copy0();
1314  std::vector<ad> xs(x);
1315  xs.insert(xs.end(), s.begin(), s.end());
1316  std::vector<ad> xs0(x);
1317  xs0.insert(xs0.end(), s0.begin(), s0.end());
1318  if (false) {
1319  TMBAD_ASSERT(keep_rc.size() == n || keep_rc.size() == 0);
1320  std::vector<bool> keep_xy(keep_rc);
1321  keep_xy.resize(f.Domain(), true);
1322  ADFun f_grad = f.JacFun(keep_xy, keep_f);
1323  }
1324  ADFun f_grad = f.JacFun();
1325  std::vector<ad> z = subset(f_grad(xs), mask_x);
1326  std::vector<ad> z0 = subset(f_grad(xs0), mask_s);
1327  std::vector<ad> xw(x);
1328  xw.insert(xw.end(), z0.begin(), z0.end());
1329  std::vector<ad> z1 = g.WgtJacFun()(xw);
1330  for (size_t i = 0; i < n; i++) z[i] += z1[i];
1331  Dependent(z);
1332  glob.ad_stop();
1333  glob.eliminate();
1334  ans.first.glob = glob;
1335 
1336  if (sparse_1) {
1337  ans.first = ans.first.SpJacFun(keep_rc, keep_rc);
1338  } else {
1339  ans.first = ans.first.JacFun(keep_rc, keep_rc);
1340  }
1341  ans.first.glob.eliminate();
1342  f.set_inner_outer(ans.first);
1343 
1344  if (sparse_2) {
1345  ans.second = g.SpJacFun(keep_rc);
1346  } else {
1347  ans.second = g.JacFun(keep_rc);
1348  }
1349  ans.second.glob.eliminate();
1350 
1351  Sparse<ADFun> B;
1352  if (sparse_3) {
1353  B = f_grad.SpJacFun(mask_s, mask_s);
1354  } else {
1355  B = f_grad.JacFun(mask_s, mask_s);
1356  }
1357  ans.third.glob.ad_start();
1358  std::vector<ad> xx(x_.begin(), x_.end() - k);
1359  Independent(xx);
1360  s = g(xx);
1361  xs = xx;
1362  xs.insert(xs.end(), s.begin(), s.end());
1363  z = B(xs);
1364  Dependent(z);
1365  ans.third.glob.ad_stop();
1366  ans.third.glob.eliminate();
1367  ans.third.i = B.i;
1368  ans.third.j = B.j;
1369  f.set_inner_outer(ans.third);
1370 
1371  return ans;
1372  }
1373 };
1374 
1384 template <class ADFun>
1385 struct Decomp3 : Decomp2<Sparse<ADFun> > {
1386  Sparse<ADFun> third;
1387 };
1388 
1389 } // namespace TMBad
1390 #endif // HAVE_TMBAD_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:153
+
1 #ifndef HAVE_TMBAD_HPP
2 #define HAVE_TMBAD_HPP
3 // Autogenerated - do not edit by hand !
4 #include "checkpoint.hpp"
5 #include "global.hpp"
6 #include "graph_transform.hpp"
7 
8 namespace TMBad {
9 
10 template <class ADFun>
11 struct Sparse;
12 template <class ADFun>
13 struct Decomp2;
14 template <class ADFun>
15 struct Decomp3;
16 
17 namespace {
18 
19 template <class I>
20 std::vector<I> cumsum0(const std::vector<bool> &x) {
21  std::vector<I> y(x.size(), 0);
22  for (size_t i = 1; i < x.size(); i++) {
23  y[i] = y[i - 1] + x[i - 1];
24  }
25  return y;
26 }
27 } // namespace
28 
56 template <class Functor, class InterfaceVector>
57 struct StdWrap {
58  Functor &F;
59  typedef typename InterfaceVector::value_type Scalar;
60  InterfaceVector tovec(const InterfaceVector &x) { return x; }
61  InterfaceVector tovec(const Scalar &x) {
62  InterfaceVector y(1);
63  y[0] = x;
64  return y;
65  }
66  StdWrap(Functor &F) : F(F) {}
67  template <class T>
68  std::vector<T> operator()(const std::vector<T> &x) {
69  InterfaceVector xi(x);
70  InterfaceVector yi = tovec(F(xi));
71  std::vector<T> y(yi);
72  return y;
73  }
74 };
75 
79  bool compress;
80  bool index_remap;
81 };
82 
116 template <class ad = ad_aug>
117 struct ADFun {
118  global glob;
119 
121  template <class Functor, class ScalarVector>
122  ADFun(Functor F, const ScalarVector &x_) : force_update_flag(false) {
123  std::vector<ad> x(x_.size());
124  for (size_t i = 0; i < x.size(); i++) x[i] = Value(x_[i]);
125  global *glob_begin = get_glob();
126  this->glob.ad_start();
127  Independent(x);
128  std::vector<ad> y = F(x);
129  Dependent(y);
130  this->glob.ad_stop();
131  global *glob_end = get_glob();
132  TMBAD_ASSERT(glob_begin == glob_end);
133  }
134 
138  template <class Functor>
139  ADFun(Functor F, Scalar x0_) : force_update_flag(false) {
140  global *glob_begin = get_glob();
141  this->glob.ad_start();
142  ad x0(x0_);
143  x0.Independent();
144  ad y0 = F(x0);
145  y0.Dependent();
146  this->glob.ad_stop();
147  global *glob_end = get_glob();
148  TMBAD_ASSERT(glob_begin == glob_end);
149  }
150 
154  template <class Functor>
155  ADFun(Functor F, Scalar x0_, Scalar x1_) : force_update_flag(false) {
156  global *glob_begin = get_glob();
157  this->glob.ad_start();
158  ad x0(x0_);
159  x0.Independent();
160  ad x1(x1_);
161  x1.Independent();
162  ad y0 = F(x0, x1);
163  y0.Dependent();
164  this->glob.ad_stop();
165  global *glob_end = get_glob();
166  TMBAD_ASSERT(glob_begin == glob_end);
167  }
168 
169  ADFun() : force_update_flag(false) {}
170 
171  void forward() { glob.forward(); }
172  void reverse() { glob.reverse(); }
173  void clear_deriv() { glob.clear_deriv(); }
174  Scalar &deriv_inv(Index i) { return glob.deriv_inv(i); }
175  Scalar &deriv_dep(Index i) { return glob.deriv_dep(i); }
176 
178  void print(print_config cfg = print_config()) { glob.print(cfg); }
179 
181  void eliminate() { glob.eliminate(); }
182 
195  void optimize() {
196  TMBAD_ASSERT2(inv_pos.size() == 0,
197  "Tape has 'cached independent variable positions' which "
198  "would be invalidated by the optimizer");
199 
200  std::vector<bool> outer_mask;
201  if (inner_outer_in_use()) {
202  outer_mask = DomainOuterMask();
203  }
204 
206 
207  glob.eliminate();
208 
209  if (inner_outer_in_use()) {
210  TMBAD_ASSERT(outer_mask.size() == Domain());
211  set_inner_outer(*this, outer_mask);
212  }
213  }
223  std::vector<Position> pos = inv_positions(glob);
224  inv_pos = subset(pos, invperm(order(glob.inv_index)));
225  }
237  void reorder(std::vector<Index> last) {
238  std::vector<bool> outer_mask;
239  if (inner_outer_in_use()) {
240  outer_mask = DomainOuterMask();
241  }
242  reorder_graph(glob, last);
243 
244  if (inner_outer_in_use()) {
245  TMBAD_ASSERT(outer_mask.size() == Domain());
246  set_inner_outer(*this, outer_mask);
247  }
248  set_inv_positions();
249  }
250 
251  size_t Domain() const { return glob.inv_index.size(); }
252  size_t Range() const { return glob.dep_index.size(); }
254  std::vector<bool> activeDomain() {
255  std::vector<bool> mark(glob.values.size(), false);
256  for (size_t i = 0; i < glob.dep_index.size(); i++)
257  mark[glob.dep_index[i]] = true;
258  glob.reverse(mark);
259  return subset(mark, glob.inv_index);
260  }
262  std::vector<bool> activeRange() {
263  std::vector<bool> mark(glob.values.size(), false);
264  for (size_t i = 0; i < glob.inv_index.size(); i++)
265  mark[glob.inv_index[i]] = true;
266  glob.forward(mark);
267  return subset(mark, glob.dep_index);
268  }
270  std::vector<Scalar> DomainVec() {
271  std::vector<Scalar> xd(Domain());
272  for (size_t i = 0; i < xd.size(); i++) xd[i] = glob.value_inv(i);
273  return xd;
274  }
277  return IndirectAccessor<Scalar>(glob.values, glob.dep_index);
278  }
280  std::vector<bool> get_keep_var(std::vector<bool> keep_x,
281  std::vector<bool> keep_y) {
282  std::vector<bool> keep_var(glob.values.size(), true);
283  if (keep_x.size() > 0 || keep_y.size() > 0) {
284  if (keep_x.size() == 0) keep_x.resize(glob.inv_index.size(), true);
285  if (keep_y.size() == 0) keep_y.resize(glob.dep_index.size(), true);
286  TMBAD_ASSERT(keep_x.size() == glob.inv_index.size());
287  TMBAD_ASSERT(keep_y.size() == glob.dep_index.size());
288 
289  std::vector<bool> keep_var_init(keep_var.size(), false);
290  for (size_t i = 0; i < glob.inv_index.size(); i++)
291  if (keep_x[i]) keep_var_init[glob.inv_index[i]] = true;
292  for (size_t i = 0; i < glob.dep_index.size(); i++)
293  if (keep_y[i]) keep_var_init[glob.dep_index[i]] = true;
294 
295  std::vector<bool> keep_var_x = keep_var_init;
296  glob.forward(keep_var_x);
297 
298  std::vector<bool> keep_var_y = keep_var_init;
299  glob.reverse(keep_var_y);
300 
301  for (size_t i = 0; i < keep_var.size(); i++)
302  keep_var[i] = keep_var_x[i] && keep_var_y[i];
303  }
304  return keep_var;
305  }
313  std::vector<Position> inv_pos;
315  Position find_pos(Index inv) {
316  for (size_t i = 0; i < inv_pos.size(); i++) {
317  if (inv_pos[i].ptr.second == inv) return inv_pos[i];
318  }
319  return Position(0, 0, 0);
320  }
325  Position tail_start;
331  if (glob.inv_index.size() == 0) return true;
332 
333  bool is_sorted = (inv_pos.size() == 0 && !inner_outer_in_use());
334  return is_sorted && (glob.inv_index.size() ==
335  1 + glob.inv_index.back() - glob.inv_index.front());
336  }
339  void set_tail(const std::vector<Index> &random) {
340  if (inv_pos.size() > 0) {
341  std::vector<Position> pos = subset(inv_pos, random);
342  tail_start = *std::min_element(pos.begin(), pos.end());
343  } else {
344  tail_start = Position(0, 0, 0);
345  }
346  }
349  void unset_tail() { tail_start = Position(0, 0, 0); }
351  void force_update() { force_update_flag = true; }
352  bool force_update_flag;
354  template <class InplaceVector>
355  Position DomainVecSet(const InplaceVector &x) {
356  TMBAD_ASSERT(x.size() == Domain());
357  if (force_update_flag) {
358  for (size_t i = 0; i < x.size(); i++) glob.value_inv(i) = x[i];
359  force_update_flag = false;
360  return Position(0, 0, 0);
361  }
362  if (inv_pos.size() > 0) {
363  if (inner_outer_in_use()) {
364  for (size_t i = 0; i < x.size(); i++) glob.value_inv(i) = x[i];
365  Index min_inv =
366  *std::min_element(glob.inv_index.begin(), glob.inv_index.end());
367  return find_pos(min_inv);
368  }
369  TMBAD_ASSERT(inv_pos.size() == Domain());
370  size_t min_var_changed = -1;
371  size_t i_min = -1;
372  for (size_t i = 0; i < x.size(); i++) {
373  if (glob.value_inv(i) != x[i] && glob.inv_index[i] < min_var_changed) {
374  min_var_changed = glob.inv_index[i];
375  i_min = i;
376  }
377  glob.value_inv(i) = x[i];
378  }
379  if (min_var_changed == (size_t)-1)
380  return glob.end();
381  else
382  return inv_pos[i_min];
383  }
384  if (x.size() > 0) {
385  bool no_change = true;
386  for (size_t i = 0; i < x.size(); i++) {
387  if (glob.value_inv(i) != x[i]) {
388  no_change = false;
389  break;
390  }
391  }
392  if (no_change) return glob.end();
393 
394  for (size_t i = 0; i < x.size(); i++) glob.value_inv(i) = x[i];
395  }
396  return Position(0, 0, 0);
397  }
399  template <class Vector>
400  Vector forward(const Vector &x) {
401  TMBAD_ASSERT((size_t)x.size() == Domain());
402  for (size_t i = 0; i < (size_t)x.size(); i++) glob.value_inv(i) = x[i];
403  glob.forward();
404  Vector y(Range());
405  for (size_t i = 0; i < (size_t)y.size(); i++) y[i] = glob.value_dep(i);
406  return y;
407  }
409  template <class Vector>
410  Vector reverse(const Vector &w) {
411  TMBAD_ASSERT((size_t)w.size() == Range());
412  glob.clear_deriv();
413  for (size_t i = 0; i < (size_t)w.size(); i++) glob.deriv_dep(i) = w[i];
414  glob.reverse();
415  Vector d(Domain());
416  for (size_t i = 0; i < (size_t)d.size(); i++) d[i] = glob.deriv_inv(i);
417  return d;
418  }
420  std::vector<Scalar> operator()(const std::vector<Scalar> &x) {
421  Position start = DomainVecSet(x);
422  glob.forward(start);
423  return RangeVec();
424  }
425 
426  IndirectAccessor<Scalar> operator()(
427  const segment_ref<ForwardArgs<Scalar>, x_read> &x) {
428  Position start = DomainVecSet(x);
429  glob.forward(start);
430  return RangeVec();
431  }
437  std::vector<ad> operator()(const std::vector<ad> &x_) const {
438  std::vector<ad> x(x_.begin(), x_.end());
439  TMBAD_ASSERT(x.size() == Domain());
440  for (size_t i = 0; i < x.size(); i++) {
441  x[i].addToTape();
442  }
443  global *cur_glob = get_glob();
444  for (size_t i = 0; i < x.size(); i++) {
445  TMBAD_ASSERT(x[i].on_some_tape());
446  TMBAD_ASSERT(x[i].glob() == cur_glob);
447  }
448  global::replay replay(this->glob, *get_glob());
449  replay.start();
450  for (size_t i = 0; i < this->Domain(); i++) {
451  replay.value_inv(i) = x[i];
452  }
453  replay.forward(false, false);
454  std::vector<ad> y(this->Range());
455  for (size_t i = 0; i < this->Range(); i++) {
456  y[i] = replay.value_dep(i);
457  }
458  replay.stop();
459  return y;
460  }
463  ad operator()(ad x0) {
464  TMBAD_ASSERT(Domain() == 1);
465  TMBAD_ASSERT(Range() == 1);
466  std::vector<ad> x(1);
467  x[0] = x0;
468  return (*this)(x)[0];
469  }
472  ad operator()(ad x0, ad x1) {
473  TMBAD_ASSERT(Domain() == 2);
474  TMBAD_ASSERT(Range() == 1);
475  std::vector<ad> x(2);
476  x[0] = x0;
477  x[1] = x1;
478  return (*this)(x)[0];
479  }
484  std::vector<Scalar> Jacobian(const std::vector<Scalar> &x) {
485  Position start = DomainVecSet(x);
486  glob.forward(start);
487  std::vector<Scalar> ans(Domain() * Range());
488  for (size_t j = 0; j < Range(); j++) {
489  glob.clear_deriv(tail_start);
490  glob.deriv_dep(j) = 1;
491  glob.reverse(tail_start);
492  for (size_t k = 0; k < Domain(); k++)
493  ans[j * Domain() + k] = glob.deriv_inv(k);
494  }
495  return ans;
496  }
503  std::vector<Scalar> Jacobian(const std::vector<Scalar> &x,
504  std::vector<bool> keep_x,
505  std::vector<bool> keep_y) {
506  std::vector<Scalar> ans;
507 
508  std::vector<bool> keep_var = get_keep_var(keep_x, keep_y);
509 
510  graph G = this->glob.reverse_graph(keep_var);
511 
512  std::vector<size_t> which_keep_x = which(keep_x);
513  std::vector<size_t> which_keep_y = which(keep_y);
514 
515  Position start = DomainVecSet(x);
516  glob.forward(start);
517 
518  for (size_t w = 0; w < which_keep_y.size(); w++) {
519  size_t k = which_keep_y[w];
520 
521  glob.subgraph_seq.resize(0);
522  glob.subgraph_seq.push_back(G.dep2op[k]);
523  G.search(glob.subgraph_seq);
524 
525  glob.clear_deriv_sub();
526  for (size_t l = 0; l < which_keep_x.size(); l++)
527  glob.deriv_inv(which_keep_x[l]) = Scalar(0);
528  glob.deriv_dep(k) = 1.;
529  glob.reverse_sub();
530 
531  for (size_t l = 0; l < which_keep_x.size(); l++) {
532  ans.push_back(glob.deriv_inv(which_keep_x[l]));
533  }
534  }
535  return ans;
536  }
542  std::vector<Scalar> Jacobian(const std::vector<Scalar> &x,
543  const std::vector<Scalar> &w) {
544  TMBAD_ASSERT(x.size() == Domain());
545  TMBAD_ASSERT(w.size() == Range());
546  Position start = DomainVecSet(x);
547  glob.forward(start);
548  glob.clear_deriv();
549  for (size_t j = 0; j < Range(); j++) glob.deriv_dep(j) = w[j];
550  glob.reverse();
551  return IndirectAccessor<Scalar>(glob.derivs, glob.inv_index);
552  }
553 
554  IndirectAccessor<Scalar> Jacobian(
555  const segment_ref<ReverseArgs<Scalar>, x_read> &x,
556  const segment_ref<ReverseArgs<Scalar>, dy_read> &w) {
557  TMBAD_ASSERT(x.size() == Domain());
558  TMBAD_ASSERT(w.size() == Range());
559  Position start = DomainVecSet(x);
560  glob.forward(start);
561  glob.clear_deriv();
562  for (size_t j = 0; j < Range(); j++) glob.deriv_dep(j) = w[j];
563  glob.reverse();
564  return IndirectAccessor<Scalar>(glob.derivs, glob.inv_index);
565  }
566  std::vector<ad> Jacobian(const std::vector<ad> &x_,
567  const std::vector<ad> &w_) {
568  std::vector<ad> x(x_.begin(), x_.end());
569  std::vector<ad> w(w_.begin(), w_.end());
570  global *cur_glob = get_glob();
571 
572  TMBAD_ASSERT(x.size() == Domain());
573  for (size_t i = 0; i < x.size(); i++) {
574  x[i].addToTape();
575  }
576  for (size_t i = 0; i < x.size(); i++) {
577  TMBAD_ASSERT(x[i].on_some_tape());
578  TMBAD_ASSERT(x[i].glob() == cur_glob);
579  }
580 
581  TMBAD_ASSERT(w.size() == Range());
582  for (size_t i = 0; i < w.size(); i++) {
583  w[i].addToTape();
584  }
585  for (size_t i = 0; i < w.size(); i++) {
586  TMBAD_ASSERT(w[i].on_some_tape());
587  TMBAD_ASSERT(w[i].glob() == cur_glob);
588  }
589 
590  global::replay replay(this->glob, *get_glob());
591  replay.start();
592  for (size_t i = 0; i < this->Domain(); i++) {
593  replay.value_inv(i) = x[i];
594  }
595  replay.forward(false, false);
596  replay.clear_deriv();
597  for (size_t i = 0; i < this->Range(); i++) {
598  replay.deriv_dep(i) = w[i];
599  }
600  replay.reverse(false, false);
601  std::vector<ad> dx(this->Domain());
602  for (size_t i = 0; i < dx.size(); i++) {
603  dx[i] = replay.deriv_inv(i);
604  }
605  replay.stop();
606  return dx;
607  }
608  template <bool range_weight>
609  ADFun JacFun_(std::vector<bool> keep_x, std::vector<bool> keep_y) {
610  ADFun ans;
611  if (keep_x.size() == 0) keep_x.resize(Domain(), true);
612  if (keep_y.size() == 0) keep_y.resize(Range(), true);
613  std::vector<bool> keep = get_keep_var(keep_x, keep_y);
614  graph G;
615  if (!range_weight && Range() > 1) {
616  G = this->glob.reverse_graph(keep);
617  }
618  keep = glob.var2op(keep);
619  global::replay replay(this->glob, ans.glob);
620  replay.start();
621  replay.forward(true, false);
622  if (!range_weight) {
623  if (G.empty()) {
624  for (size_t i = 0; i < this->Range(); i++) {
625  if (!keep_y[i]) continue;
626  replay.clear_deriv();
627  replay.deriv_dep(i) = 1.;
628  replay.reverse(false, false, tail_start, keep);
629  for (size_t j = 0; j < this->Domain(); j++) {
630  if (keep_x[j]) replay.deriv_inv(j).Dependent();
631  }
632  }
633  } else {
634  replay.clear_deriv();
635  for (size_t i = 0; i < this->Range(); i++) {
636  if (!keep_y[i]) continue;
637  glob.subgraph_seq.resize(0);
638  glob.subgraph_seq.push_back(G.dep2op[i]);
639  G.search(glob.subgraph_seq);
640  replay.deriv_dep(i) = 1.;
641  replay.reverse_sub();
642  for (size_t j = 0; j < this->Domain(); j++) {
643  if (keep_x[j]) replay.deriv_inv(j).Dependent();
644  }
645  replay.clear_deriv_sub();
646  }
647  }
648  } else {
649  replay.clear_deriv();
650  replay.reverse(false, true, tail_start, keep);
651  for (size_t j = 0; j < this->Domain(); j++) {
652  if (keep_x[j]) replay.deriv_inv(j).Dependent();
653  }
654  }
655  replay.stop();
656  set_inner_outer(ans);
657  return ans;
658  }
680  ADFun JacFun(std::vector<bool> keep_x = std::vector<bool>(0),
681  std::vector<bool> keep_y = std::vector<bool>(0)) {
682  return JacFun_<false>(keep_x, keep_y);
683  }
702  ADFun WgtJacFun(std::vector<bool> keep_x = std::vector<bool>(0),
703  std::vector<bool> keep_y = std::vector<bool>(0)) {
704  return JacFun_<true>(keep_x, keep_y);
705  }
709  std::vector<Scalar> x = DomainVec();
710  return ADFun(F, x);
711  }
717  std::vector<ADFun> parallel_accumulate(size_t num_threads) {
718  TMBAD_ASSERT(Range() == 1);
719  global glob_split = accumulation_tree_split(glob);
720  autopar ap(glob_split, num_threads);
721  ap.do_aggregate = true;
722  ap.keep_all_inv = true;
723  ap.run();
724  ap.extract();
725  std::vector<ADFun> ans(num_threads);
726  for (size_t i = 0; i < num_threads; i++) ans[i].glob = ap.vglob[i];
727  return ans;
728  }
732  ADFun parallelize(size_t num_threads) {
733  TMBAD_ASSERT(Range() == 1);
734  global glob_split = accumulation_tree_split(glob);
735  autopar ap(glob_split, num_threads);
736  ap.do_aggregate = true;
737  ap.keep_all_inv = false;
738  ap.run();
739  ap.extract();
740  global::Complete<ParalOp> f_parallel(ap);
741  ADFun F(f_parallel, DomainVec());
742  aggregate(F.glob);
743  return F;
744  }
750  void replay() { glob.forward_replay(true, true); }
776  Sparse<ADFun> SpJacFun(std::vector<bool> keep_x = std::vector<bool>(0),
777  std::vector<bool> keep_y = std::vector<bool>(0),
778  SpJacFun_config config = SpJacFun_config()) {
779  ADFun atomic_jac_row;
780  std::vector<Index> rowcounts;
781 
782  Sparse<ADFun> ans;
783 
784  ans.m = Range();
785  ans.n = Domain();
786 
787  if (keep_x.size() == 0) keep_x.resize(Domain(), true);
788  if (keep_y.size() == 0) keep_y.resize(Range(), true);
789  std::vector<bool> keep_var = get_keep_var(keep_x, keep_y);
790 
791  size_t keep_x_count = std::count(keep_x.begin(), keep_x.end(), true);
792  size_t keep_y_count = std::count(keep_y.begin(), keep_y.end(), true);
793 
794  graph G = this->glob.reverse_graph(keep_var);
795 
796  global::replay replay(this->glob, ans.glob);
797  replay.start();
798  replay.forward(true, false);
799 
800  Index NA = -1;
801  std::vector<Index> op2inv_idx = glob.op2idx(glob.inv_index, NA);
802 
803  std::fill(keep_var.begin(), keep_var.end(), true);
804 
805  std::vector<Index> col_idx;
806  for (size_t k = 0; k < glob.dep_index.size(); k++) {
807  size_t i = glob.dep_index[k];
808 
809  glob.subgraph_seq.resize(0);
810  glob.subgraph_seq.push_back(G.dep2op[k]);
811  G.search(glob.subgraph_seq);
812 
813  bool do_compress = false;
814  if (config.compress) {
815  if (rowcounts.size() == 0) rowcounts = G.rowcounts();
816 
817  size_t cost1 = 0;
818  for (size_t i = 0; i < glob.subgraph_seq.size(); i++) {
819  cost1 += rowcounts[glob.subgraph_seq[i]];
820  }
821 
822  size_t cost2 = Domain() + Range() + Domain();
823 
824  if (cost2 < cost1) do_compress = true;
825  }
826 
827  if (true) {
828  glob.clear_array_subgraph(keep_var);
829  keep_var[i] = true;
830  glob.reverse_sub(keep_var);
831  }
832 
833  col_idx.resize(0);
834  for (size_t l = 0; l < glob.subgraph_seq.size(); l++) {
835  Index idx = op2inv_idx[glob.subgraph_seq[l]];
836  if (idx != NA) {
837  Index nrep = glob.opstack[glob.subgraph_seq[l]]->output_size();
838  for (Index r = 0; r < nrep; r++) {
839  if (keep_var[glob.inv_index[idx]]) col_idx.push_back(idx);
840  idx++;
841  }
842  }
843  }
844 
845  ans.i.resize(ans.i.size() + col_idx.size(), k);
846  ans.j.insert(ans.j.end(), col_idx.begin(), col_idx.end());
847  if (!do_compress) {
848  replay.clear_deriv_sub();
849 
850  replay.deriv_dep(k) = 1.;
851 
852  replay.reverse_sub();
853 
854  } else {
855  if (atomic_jac_row.Domain() == 0) {
856  Rcout << "Warning: This is an experimental compression method\n";
857  Rcout << "Disable: 'config(tmbad.sparse_hessian_compress=0)'\n";
858  atomic_jac_row = this->WgtJacFun(keep_x, keep_y);
859  atomic_jac_row.optimize();
860 
861  atomic_jac_row.set_inv_positions();
862 
863  atomic_jac_row = atomic_jac_row.atomic();
864 
865  replay.clear_deriv_sub();
866  Rcout << "done\n";
867 
868  TMBAD_ASSERT(atomic_jac_row.Domain() ==
869  this->Domain() + this->Range());
870  TMBAD_ASSERT(atomic_jac_row.Range() == keep_x_count);
871  }
872  std::vector<Replay> vec(atomic_jac_row.Domain(), Replay(0));
873  for (size_t i = 0; i < this->Domain(); i++) {
874  vec[i] = replay.value_inv(i);
875  }
876  vec[this->Domain() + k] = 1.;
877  std::vector<Replay> r = atomic_jac_row(vec);
878  size_t r_idx = 0;
879  for (size_t i = 0; i < this->Domain(); i++) {
880  if (keep_x[i]) replay.deriv_inv(i) = r[r_idx++];
881  }
882  }
883  for (size_t l = 0; l < col_idx.size(); l++) {
884  replay.deriv_inv(col_idx[l]).Dependent();
885  }
886  }
887  replay.stop();
888  if (config.index_remap) {
889  if (keep_x.size() > 0) {
890  std::vector<Index> remap_j = cumsum0<Index>(keep_x);
891  ans.j = TMBad::subset(remap_j, ans.j);
892  ans.n = keep_x_count;
893  }
894  if (keep_y.size() > 0) {
895  std::vector<Index> remap_i = cumsum0<Index>(keep_y);
896  ans.i = TMBad::subset(remap_i, ans.i);
897  ans.m = keep_y_count;
898  }
899  }
900  set_inner_outer(ans);
901  return ans;
902  }
907  ADFun marginal_gk(const std::vector<Index> &random,
908  gk_config cfg = gk_config()) {
909  ADFun ans;
910  old_state os(this->glob);
911  aggregate(this->glob, -1);
912  global glob_split = accumulation_tree_split(this->glob);
913  os.restore();
914  integrate_subgraph<ADFun> i_s(glob_split, random, cfg);
915  ans.glob = i_s.gk();
916  aggregate(ans.glob, -1);
917  return ans;
918  }
920  ADFun marginal_sr(const std::vector<Index> &random, std::vector<sr_grid> grid,
921  const std::vector<Index> &random2grid, bool perm = true) {
922  ADFun ans;
923  old_state os(this->glob);
924  aggregate(this->glob, -1);
925  global glob_split = accumulation_tree_split(this->glob);
926  os.restore();
927  sequential_reduction SR(glob_split, random, grid, random2grid, perm);
928  ans.glob = SR.marginal();
929  aggregate(ans.glob, -1);
930  return ans;
931  }
933  ADFun marginal_sr(const std::vector<Index> &random,
934  sr_grid grid = sr_grid()) {
935  return marginal_sr(random, std::vector<sr_grid>(1, grid),
936  std::vector<Index>(0));
937  }
942  ADFun compose(ADFun other) {
943  TMBAD_ASSERT2(other.Range() == this->Domain(),
944  "Compostion of incompatible functions");
945  struct composition {
946  const ADFun &f;
947  const ADFun &g;
948  composition(const ADFun &f, const ADFun &g) : f(f), g(g) {}
949  std::vector<ad> operator()(std::vector<ad> x) { return f(g(x)); }
950  };
951  composition fg(*this, other);
952  return ADFun(fg, other.DomainVec());
953  }
958  Decomp2<ADFun> decompose(std::vector<Index> nodes) {
959  Decomp2<ADFun> ans;
960  global &glob1 = ans.first.glob;
961  global &glob2 = ans.second.glob;
962 
963  OperatorPure *invop = glob.getOperator<global::InvOp>();
964  std::vector<bool> keep(nodes.size(), true);
965  for (size_t i = 0; i < nodes.size(); i++)
966  if (glob.opstack[nodes[i]] == invop) keep[i] = false;
967  nodes = subset(nodes, keep);
968 
969  glob1 = this->glob;
970  glob1.dep_index.resize(0);
971  std::vector<Index> dep1 = glob1.op2var(nodes);
972  glob1.ad_start();
973  for (size_t i = 0; i < dep1.size(); i++) {
974  ad_plain tmp;
975  tmp.index = dep1[i];
976  tmp.Dependent();
977  }
978  glob1.ad_stop();
979  glob1.eliminate();
980 
981  glob2 = this->glob;
982  substitute(glob2, nodes);
983  glob2.eliminate();
984 
985  set_inner_outer(ans.first);
986  set_inner_outer(ans.second);
987 
988  return ans;
989  }
994  Decomp2<ADFun> decompose(const char *name) {
995  std::vector<Index> nodes = find_op_by_name(this->glob, name);
996  return decompose(nodes);
997  }
1003  if (find_op_by_name(glob, "RefOp").size() == 0) return;
1004 
1005  std::vector<bool> keep_x(Domain(), true);
1006  std::vector<bool> keep_y(Range(), true);
1007  std::vector<bool> vars = get_keep_var(keep_x, keep_y);
1008 
1009  vars = reverse_boundary(glob, vars);
1010 
1011  std::vector<Index> nodes = which<Index>(glob.var2op(vars));
1012 
1013  Decomp2<ADFun> decomp = decompose(nodes);
1014 
1015  size_t n_inner = decomp.first.Domain();
1016  size_t n_outer = decomp.first.Range();
1017 
1018  decomp.first.glob.inv_index.resize(0);
1019 
1020  std::vector<ad_aug> empty;
1021  std::vector<ad_aug> gx = decomp.first(empty);
1022 
1023  ADFun &f = decomp.second;
1024 
1025  f.replay();
1026 
1027  TMBAD_ASSERT(n_inner + n_outer == f.Domain());
1028  TMBAD_ASSERT(find_op_by_name(f.glob, "RefOp").size() == 0);
1029  TMBAD_ASSERT(find_op_by_name(f.glob, "InvOp").size() == f.Domain());
1030  TMBAD_ASSERT(gx.size() == n_outer);
1031 
1032  for (size_t i = 0; i < n_outer; i++) {
1033  Index j = f.glob.inv_index[n_inner + i];
1034 
1035  if (gx[i].constant()) {
1036  f.glob.opstack[j] = glob.getOperator<global::ConstOp>();
1037  } else {
1038  f.glob.opstack[j] = glob.getOperator<global::RefOp>(
1039  gx[i].data.glob, gx[i].taped_value.index);
1040  }
1041  }
1042  f.glob.inv_index.resize(n_inner);
1043 
1044  *this = f;
1045  }
1055  std::vector<ad_aug> resolve_refs() {
1056  TMBAD_ASSERT2(
1057  inner_inv_index.size() == 0 && outer_inv_index.size() == 0,
1058  "'resolve_refs' can only be run once for a given function object")
1059 
1060  ;
1061  std::vector<Index> seq = find_op_by_name(glob, "RefOp");
1062  std::vector<Replay> values(seq.size());
1063  std::vector<Index> dummy_inputs;
1064  ForwardArgs<Replay> args(dummy_inputs, values);
1065  for (size_t i = 0; i < seq.size(); i++) {
1066  TMBAD_ASSERT(glob.opstack[seq[i]]->input_size() == 0);
1067  TMBAD_ASSERT(glob.opstack[seq[i]]->output_size() == 1);
1068  glob.opstack[seq[i]]->forward_incr(args);
1069  glob.opstack[seq[i]]->deallocate();
1070  glob.opstack[seq[i]] = get_glob()->getOperator<global::InvOp>();
1071  }
1072  inner_inv_index = glob.inv_index;
1073  outer_inv_index = glob.op2var(seq);
1074 
1075  glob.inv_index.insert(glob.inv_index.end(), outer_inv_index.begin(),
1076  outer_inv_index.end());
1077  return values;
1078  }
1079  std::vector<Index> inner_inv_index;
1080  std::vector<Index> outer_inv_index;
1082  size_t DomainInner() const { return inner_inv_index.size(); }
1084  size_t DomainOuter() const { return outer_inv_index.size(); }
1088  void SwapInner() {
1089  std::swap(glob.inv_index, inner_inv_index);
1090  force_update();
1091  }
1095  void SwapOuter() {
1096  std::swap(glob.inv_index, outer_inv_index);
1097  force_update();
1098  }
1101  return (DomainInner() > 0) || (DomainOuter() > 0);
1102  }
1104  std::vector<bool> DomainOuterMask() {
1105  std::vector<bool> mark_outer =
1106  glob.mark_space(glob.values.size(), outer_inv_index);
1107  return subset(mark_outer, glob.inv_index);
1108  }
1116  void set_inner_outer(ADFun &ans, const std::vector<bool> &outer_mask) {
1117  if (inner_outer_in_use()) {
1118  std::vector<bool> mark(outer_mask);
1119  mark.resize(ans.Domain(), false);
1120 
1121  ans.outer_inv_index = subset(ans.glob.inv_index, mark);
1122 
1123  mark.flip();
1124 
1125  ans.inner_inv_index = subset(ans.glob.inv_index, mark);
1126  }
1127  }
1128  void set_inner_outer(ADFun &ans) {
1129  if (inner_outer_in_use()) {
1130  set_inner_outer(ans, DomainOuterMask());
1131  }
1132  }
1133  void DomainReduce(const std::vector<bool> &inv_keep) {
1134  std::vector<bool> outer_mask = DomainOuterMask();
1135  outer_mask = subset(outer_mask, inv_keep);
1136  glob.inv_index = subset(glob.inv_index, inv_keep);
1137  set_inner_outer(*this, outer_mask);
1138  }
1144  void inactivate(std::vector<Index> nodes) {
1145  for (size_t i = 0; i < nodes.size(); i++) {
1146  OperatorPure *op = glob.opstack[nodes[i]];
1147  glob.opstack[nodes[i]] = glob.getOperator<global::NullOp2>(
1148  op->input_size(), op->output_size());
1149  op->deallocate();
1150  }
1151  }
1152 };
1164 template <class Functor, class Test = ParametersChanged>
1165 ADFun<> ADFun_retaping(Functor &F, const std::vector<ad_aug> &x,
1166  Test test = Test()) {
1167  typedef retaping_derivative_table<Functor, ADFun<>, Test> DTab;
1168  global::Complete<AtomOp<DTab> > Op(F, x, test);
1169  return ADFun<>(Op, x);
1170 }
1171 
1173 template <class dummy = void>
1175  ADFun<> Fp;
1176  ADFun_packed(const ADFun<> &Fp) : Fp(Fp) {}
1177  ADFun_packed() {}
1178  ad_segment operator()(const std::vector<ad_segment> &x) {
1179  std::vector<ad_segment> xp(x.size());
1180  for (size_t i = 0; i < xp.size(); i++) xp[i] = pack(x[i]);
1181  std::vector<ad_aug> yp = Fp(concat(xp));
1182  return unpack(yp, 0);
1183  }
1184  bool initialized() { return Fp.Domain() != 0; }
1185 };
1193 template <class Functor, class Test>
1194 ADFun_packed<> ADFun_retaping(Functor &F, const std::vector<ad_segment> &x,
1195  Test test) {
1196  static const bool packed = true;
1198  packed>
1199  DTab;
1200  PackWrap<Functor> Fp(F);
1201  std::vector<ad_segment> xp(x.size());
1202  for (size_t i = 0; i < xp.size(); i++) xp[i] = pack(x[i]);
1203  std::vector<ad_aug> xp_ = concat(xp);
1204  PackWrap<Test> testp(test);
1205  global::Complete<AtomOp<DTab> > Op(Fp, xp_, testp);
1206  ADFun<> TapeFp(Op, xp_);
1207  return ADFun_packed<>(TapeFp);
1208 }
1209 
1210 template <class ADFun>
1211 struct Sparse : ADFun {
1212  std::vector<Index> i;
1213  std::vector<Index> j;
1214  Index m;
1215  Index n;
1216  Sparse() {}
1217  Sparse(const ADFun &f) : ADFun(f) {}
1218  std::vector<Index> a2v(const std::valarray<Index> &x) const {
1219  return std::vector<Index>(&x[0], &x[0] + x.size());
1220  }
1221  std::valarray<Index> v2a(const std::vector<Index> &x) const {
1222  return std::valarray<Index>(x.data(), x.size());
1223  }
1224  std::valarray<Index> row() const { return v2a(i); }
1225  std::valarray<Index> col() const { return v2a(j); }
1226  void subset_inplace(const std::valarray<bool> &x) {
1227  i = a2v(row()[x]);
1228  j = a2v(col()[x]);
1229  this->glob.dep_index = a2v(v2a(this->glob.dep_index)[x]);
1230  }
1231  void transpose_inplace() {
1232  std::swap(i, j);
1233  std::swap(m, n);
1234  }
1235 };
1236 
1243 template <class ADFun>
1244 struct Decomp2 : std::pair<ADFun, ADFun> {
1245  struct composition {
1246  typedef ad_aug ad;
1247  const ADFun &f;
1248  const ADFun &g;
1249  composition(const ADFun &f, const ADFun &g) : f(f), g(g) {}
1250  std::vector<ad> operator()(std::vector<ad> x) {
1251  std::vector<ad> y = g(x);
1252  x.insert(x.end(), y.begin(), y.end());
1253  return f(x);
1254  }
1255  };
1256  operator ADFun() {
1257  ADFun &g = this->first;
1258  ADFun &f = this->second;
1259  composition fg(f, g);
1260  return ADFun(fg, g.DomainVec());
1261  }
1285  Decomp3<ADFun> HesFun(std::vector<bool> keep_rc = std::vector<bool>(0),
1286  bool sparse_1 = true, bool sparse_2 = true,
1287  bool sparse_3 = true) {
1288  ADFun &g = this->first;
1289  ADFun &f = this->second;
1290  Decomp3<ADFun> ans;
1291  TMBAD_ASSERT(f.Range() == 1);
1292 
1293  std::vector<bool> keep_f = std::vector<bool>(f.Range(), true);
1294  std::vector<bool> keep_g = std::vector<bool>(g.Range(), true);
1295 
1296  typedef ad_aug ad;
1297  global &glob = ans.first.glob;
1298  glob.ad_start();
1299  std::vector<Scalar> x_ = f.DomainVec();
1300  size_t k = g.Range();
1301  size_t n = f.Domain() - k;
1302 
1303  std::vector<bool> mask_x(f.Domain(), false);
1304  for (size_t i = 0; i < n; i++) mask_x[i] = true;
1305  std::vector<bool> mask_s(mask_x);
1306  mask_s.flip();
1307 
1308  std::vector<ad> x(x_.begin(), x_.end() - k);
1309  Independent(x);
1310  std::vector<ad> s = g(x);
1311  std::vector<ad> s0(s.size());
1312 
1313  for (size_t i = 0; i < s.size(); i++) s0[i] = s[i].copy0();
1314  std::vector<ad> xs(x);
1315  xs.insert(xs.end(), s.begin(), s.end());
1316  std::vector<ad> xs0(x);
1317  xs0.insert(xs0.end(), s0.begin(), s0.end());
1318  if (false) {
1319  TMBAD_ASSERT(keep_rc.size() == n || keep_rc.size() == 0);
1320  std::vector<bool> keep_xy(keep_rc);
1321  keep_xy.resize(f.Domain(), true);
1322  ADFun f_grad = f.JacFun(keep_xy, keep_f);
1323  }
1324  ADFun f_grad = f.JacFun();
1325  std::vector<ad> z = subset(f_grad(xs), mask_x);
1326  std::vector<ad> z0 = subset(f_grad(xs0), mask_s);
1327  std::vector<ad> xw(x);
1328  xw.insert(xw.end(), z0.begin(), z0.end());
1329  std::vector<ad> z1 = g.WgtJacFun()(xw);
1330  for (size_t i = 0; i < n; i++) z[i] += z1[i];
1331  Dependent(z);
1332  glob.ad_stop();
1333  glob.eliminate();
1334  ans.first.glob = glob;
1335 
1336  if (sparse_1) {
1337  ans.first = ans.first.SpJacFun(keep_rc, keep_rc);
1338  } else {
1339  ans.first = ans.first.JacFun(keep_rc, keep_rc);
1340  }
1341  ans.first.glob.eliminate();
1342  f.set_inner_outer(ans.first);
1343 
1344  if (sparse_2) {
1345  ans.second = g.SpJacFun(keep_rc);
1346  } else {
1347  ans.second = g.JacFun(keep_rc);
1348  }
1349  ans.second.glob.eliminate();
1350 
1351  Sparse<ADFun> B;
1352  if (sparse_3) {
1353  B = f_grad.SpJacFun(mask_s, mask_s);
1354  } else {
1355  B = f_grad.JacFun(mask_s, mask_s);
1356  }
1357  ans.third.glob.ad_start();
1358  std::vector<ad> xx(x_.begin(), x_.end() - k);
1359  Independent(xx);
1360  s = g(xx);
1361  xs = xx;
1362  xs.insert(xs.end(), s.begin(), s.end());
1363  z = B(xs);
1364  Dependent(z);
1365  ans.third.glob.ad_stop();
1366  ans.third.glob.eliminate();
1367  ans.third.i = B.i;
1368  ans.third.j = B.j;
1369  f.set_inner_outer(ans.third);
1370 
1371  return ans;
1372  }
1373 };
1374 
1384 template <class ADFun>
1385 struct Decomp3 : Decomp2<Sparse<ADFun> > {
1386  Sparse<ADFun> third;
1387 };
1388 
1389 } // namespace TMBad
1390 #endif // HAVE_TMBAD_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:157
std::vector< bool > get_keep_var(std::vector< bool > keep_x, std::vector< bool > keep_y)
Get necessary variables to keep for given input/output selection.
Definition: TMBad.hpp:280
std::vector< Index > op2var(const std::vector< Index > &seq)
Get variables produces by a node seqence.
Definition: TMBad.cpp:1435
std::vector< T > subset(const std::vector< T > &x, const std::vector< bool > &y)
Vector subset by boolean mask.
diff --git a/Tutorial.html b/Tutorial.html index 290ee1e17..cd418aeb0 100644 --- a/Tutorial.html +++ b/Tutorial.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/Validation.html b/Validation.html index afdd71f6d..f5b70ba56 100644 --- a/Validation.html +++ b/Validation.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/Vectorize_8hpp.html b/Vectorize_8hpp.html index a5c031c8b..b1b06e22c 100644 --- a/Vectorize_8hpp.html +++ b/Vectorize_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/Vectorize_8hpp_source.html b/Vectorize_8hpp_source.html index 186854e47..4c924bf92 100644 --- a/Vectorize_8hpp_source.html +++ b/Vectorize_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/_book/404.html b/_book/404.html deleted file mode 100644 index 28b86979b..000000000 --- a/_book/404.html +++ /dev/null @@ -1,364 +0,0 @@ - - - - - - - Page not found | The comprehensive TMB documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
- -
-
- - -
-
- -
-
-

Page not found

-

The page you requested cannot be found (perhaps it was moved or renamed).

-

You may want to try searching to find the page's new location, or use -the table of contents to find the page you are looking for.

-
-
- -
-
-
- - -
-
- - - - - - - - - - - - - - - diff --git a/_book/Appendix.html b/_book/Appendix.html deleted file mode 100644 index 2f9206eb8..000000000 --- a/_book/Appendix.html +++ /dev/null @@ -1,507 +0,0 @@ - - - - - - - 16 Appendix | The comprehensive TMB documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
- -
-
- - -
-
- -
-
-

16 Appendix

-
-

16.1 Notation

-

We use the following notation

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NotationExplanation
\(u\)The random effects vector
\(\theta\)Parameter vector (first part)
\(\beta\)Parameter vector (second part)
\(f(u,\beta,\theta)\)Joint negative log likelihood
\(x\)Data
\(E(u|x)\)Conditional expectation of random effect given data
\(\hat u\)The posterior mode \(\arg \min_{u} f(u,\beta,\theta)\)
-
-
-

16.2 Profiling the inner problem

-

This section describes the underlying theory of the argument profile -to MakeADFun intended to speedup and robustify linear mixed effect -models with a large number of fixed effects. With a few common model -properties (Assumption 1 and 2 below), which must be checked by the -user, one can apply the profile argument to move outer parameters to -the inner problem without affecting the model result.

-

Theorem 1 (Profiling inner problem) -Assume that for any \(\beta\) and \(\theta\)

-
    -
  • Assumption 1 The partial derivative \(\partial_{\beta} f(u,\beta,\theta)\) is a linear function of u.
  • -
  • Assumption 2 The posterior mean is equal to the posterior mode: \(E(u|x)=\hat u\)
  • -
-

Then the MLE

-

\[\hat \beta := \arg \max_{\beta} \left( \int \exp(-f(u,\beta,\theta)) \: du \right) \]

-

is a solution to the augmented system

-

\[ -\begin{split} -\partial_{u} f(u,\beta,\theta) &= 0 \\ -\partial_{\beta} f(u,\beta,\theta) &= 0 -\end{split} -\]

-

The augmented system defines \(\hat \beta\) implicitly as function of the posterior mode \(\hat u\).

-

Proof

-

Differentiation of the negative log marginal likelihood gives

-

\[ -\begin{split} -\partial_{\beta} \left( -\log \int \exp(-f(u,\beta,\theta)) \: du \right) &= E(\partial_{\beta}f(u,\beta,\theta) |x) \\ -&= \partial_{\beta} f(u,\beta,\theta)_{|u=\hat u(\beta,\theta)} -\end{split} -\]

-

where the first equality holds in general and the second equality follows from assumptions (1) and (2).

-

\(\square\)

-
-

16.2.1 Example

-

The standard situation for which assumption 1 holds is when the -\(\beta\)s are the linear fixed effects of a mixed model. In this case -the joint negative log density takes the form -\[ f(u,\beta,\theta) = \frac{1}{2}(u-A\beta)'\Sigma_{\theta}^{-1}(u-A\beta) + ... \] -for some design matrix \(A\) where ’ \(...\) ’ does not depend on -\(\beta\). The derivative -\[ \partial_{\beta} f(u,\beta,\theta) = A'\Sigma_{\theta}^{-1}(u-A\beta) \] -is thus a linear function of the random effect \(u\).

-

In general assumption 2 holds exact for models with a symmetric -(e.g. Gaussian) posterior distribution.

-
-
-
-

16.3 Theory underlying sdreport

-

This section supplements the documentation of ?sdreport by adding -some missing details.

-

As previously, we consider a general latent variable model with -parameter vector \(\theta\), random effect vector \(u\) and observation -vector \(x\). The TMB estimation procedure works as follows:

-
    -
  1. The MLE \(\hat\theta=\hat\theta(x)\) is calculated and used as -estimator of \(\theta\).
  2. -
  3. Denote by \(\hat u(\theta,x)\) the random effect mode depending on -\(\theta\) and \(x\). Now, plug in the MLE, and we get our estimator -\(\hat u\left(\hat\theta(x),x\right)\) of \(u\).
  4. -
-

In general, we assume that \(\hat\theta\) is a consistent estimator of -\(\theta\). However, we do not in general require \(\hat u\) to be -consistent for \(u\). The purpose of sdreport is, for a given -realization of the pair \((u,x)\), to quantify the joint uncertainty of -\((\hat u,\hat\theta)\) as estimator of \((u,\theta)\). That is, we are -interested in the variance matrix of the difference

-

\[D:=\begin{pmatrix}\hat u\left(\hat\theta(x),x\right) - u\\ \hat\theta(x) - \theta\end{pmatrix}\]

-

An important point of the uncertainty quantification is to account -for plugging in \(\hat\theta\) rather than using the true \(\theta\).

-

We calculate the variance using the standard formula:

-

\[V[D]=E(V(D|x))+V(E(D|x))\]

-

Consider \(D\) conditionally on \(x\). The second component does not -depend on \(u\) and \(\hat u\) is constant given \(x\):

-

\[V[D|x]=\begin{pmatrix}V[u|x] & 0 \\ 0 & 0 \end{pmatrix}\]

-

It follows that

-

\[E(V[D|x])=\begin{pmatrix}E(V[u|x]) & 0 \\ 0 & 0 \end{pmatrix}\]

-

As central estimator of \(E(V[u|x])\) we use \(V[u|x]\) which is -approximated by the inverse random effect Hessian \(H_{uu}^{-1}\) based -on the assumption that \(u|x\) is well approximated by a Gaussian -distribution (a reasonable assumption given that we are using the -Laplace approximation). This explains the first term of variance formula in ?sdreport:

-

\[E(V[D|x]) \approx \begin{pmatrix} H_{uu}^{-1} & 0 \\ 0 & 0 \end{pmatrix}\]

-

Likewise,

-

\[E[D|x]=\begin{pmatrix}\hat u\left(\hat\theta(x),x\right) - E(u|x)\\ \hat\theta(x) - \theta\end{pmatrix}\]

-

Again, asuming a Gaussian approximation of \(u|x\), it follows that \(E(u|x) \approx \hat u(\theta,x)\):

-

\[E[D|x]=\begin{pmatrix}\hat u\left(\hat\theta(x),x\right) - \hat u(\theta,x)\\ \hat\theta(x) - \theta\end{pmatrix}\]

-

We approximate the expectation using linerization of \(\theta \rightarrow \hat u(\theta,x)\) around \(\hat\theta(x)\)

-

\[E[D|x]=J_x \cdot (\hat\theta(x) - \theta)\]

-

We now have the second term of the variance formula in ?sdreport:

-

\[V(E[D|x]) \approx J_x V(\hat\theta(x)) J_x'\]

-

This term becomes negligible if the amount of data is high because of -the assumed asymptotic consistency of \(\hat\theta\).

- -
-
-Pedersen, Martin Wæver, Casper Willestofte Berg, Uffe Høgsbro Thygesen, Anders Nielsen, and Henrik Madsen. 2011. “Estimation Methods for Nonlinear State-Space Models in Ecology.” Ecological Modelling 222 (8): 1394–1400. -
-
-Thygesen, Uffe Høgsbro, Christoffer Moesgaard Albertsen, Casper Willestofte Berg, Kasper Kristensen, and Anders Nielsen. 2017. “Validation of Ecological State Space Models Using the Laplace Approximation.” Environmental and Ecological Statistics 24 (2): 317–39. -
-
-
-
-
- -
-
-
- - -
-
- - - - - - - - - - - - - - - diff --git a/_book/AtomicFunctions.html b/_book/AtomicFunctions.html deleted file mode 100644 index c6ae0aeca..000000000 --- a/_book/AtomicFunctions.html +++ /dev/null @@ -1,502 +0,0 @@ - - - - - - - 15 Atomic functions | The comprehensive TMB documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
- -
-
- - -
-
- -
-
-

15 Atomic functions

-

Custom functions and derivatives can be added to the TMB library. This may be necessary for the following reasons:

-
    -
  • Adaptive (e.g. iterative) algorithms cannot be represented by a fixed computational graph and thus cannot be directly differentiated using TMB. Algorithms that use parameter dependent if-else branching are examples of such functions.
  • -
  • Some functions have so many floating point operations that it is infeasible to expand the computational graph. Memory usage may be greatly reduced in such cases by collapsing the computational graph to a singe node with multiple inputs and outputs.
  • -
-
-

15.1 Reverse mode differentiation

-

TMB uses CppAD as its engine for reverse mode derivatives. In order to add a new primitive function

-

\[f: R^n \rightarrow R^m\]

-

we must inform CppAD how to calculate derivatives of this function in reverse mode. That is, for any range space vector \(w \in R^m\) we must calculate the gradient of the function \(R^n \rightarrow R\) given by

-

\[ x \rightarrow \text{sum}( f(x) \odot w ) \]

-

where ‘\(\odot\)’ is pointwise multiplication.

-
-
-

15.2 Example: Adding new primitive function with known derivatives

-

As an example consider the Lambert W function defined implicitly by

-

\[y = W(y e^y)\]

-

Here, we only consider \(W\) as defined on the positive reals. It follows, by differentiating the above identity, that

-

\[ W'(x) = \frac{1}{ \exp\left(W(x)\right) \left(1 + W(x)\right) } \]

-

When coding reverse-mode derivatives we can assume that the function value \(W(x)\) has already been computed during a forward pass. For efficiency reasons we should use this intermediate calculation rather than re-calculating \(W(x)\) in the reverse pass.

-

We’ll assume that a plain C++ function (taking double types as input/output) is available to calculate \(W(x)\). It doesn’t matter whether you have the source code of an implementation or just the header with linkage to an external library:

-
double LambertW(double x);
-

The macro TMB_ATOMIC_VECTOR_FUNCTION() is used to declare our new primitive Lambert \(W\) function:

-
TMB_ATOMIC_VECTOR_FUNCTION(
-    // ATOMIC_NAME
-    LambertW
-    ,
-    // OUTPUT_DIM
-    1,
-    // ATOMIC_DOUBLE
-    ty[0] = LambertW(tx[0]); // Call the 'double' version
-    ,
-    // ATOMIC_REVERSE
-    Type W  = ty[0];                    // Function value from forward pass
-    Type DW = 1. / (exp(W) * (1. + W)); // Derivative
-    px[0] = DW * py[0];                 // Reverse mode chain rule
-)
-

Let’s explain in detail what is going on. The macro takes four arguments:

-
    -
  1. ATOMIC_NAME: Name of new primitive function taking CppAD::vector as input and output.
  2. -
  3. OUTPUT_DIM: Dimension of the CppAD::vector which is the function output.
  4. -
  5. ATOMIC_DOUBLE: Specifies how to evaluate the primitive function for the ordinary double type. tx denotes the input vector and ty the output vector of the function \(f: R^n \rightarrow R^m\). In this case both have dimension one.
  6. -
  7. ATOMIC_REVERSE: How to calculate the reverse mode derivatives for a general Type. Again tx and ty denote function input and output but now ty has been computed and is available as an intermediate value. The vectors px and py denote partial derivatives of the end result with respect to \(x\) and \(y\) respectively. py is given and we must calculate px using the chain rule. This first order derivative rule is automatically expanded up to higher orders required when using TMB’s random effects calculations.
  8. -
-

To make the function work like other TMB functions it is convenient to define scalar and a vectorized versions that call the atomic function:

-
// Scalar version
-template<class Type>
-Type LambertW(Type x){
-  CppAD::vector<Type> tx(1);
-  tx[0] = x;
-  return LambertW(tx)[0];
-}
-
-// Vectorized version
-VECTORIZE_1t(LambertW)
-
-

15.2.1 Testing the primitive function

-

Here is a complete example using Newton’s method to calculate the Lambert \(W\) function -(there are more sophisticated algorithms such as the one by Fukushima (2013), -but that doesn’t matter for this example):

-

-#include <TMB.hpp>
-
-// Double version of Lambert W function
-double LambertW(double x) {
-  double logx = log(x);
-  double y = (logx > 0 ? logx : 0);
-  int niter = 100, i=0;
-  for (; i < niter; i++) {
-    if ( fabs( logx - log(y) - y) < 1e-9) break;
-    y -= (y - exp(logx - y)) / (1 + y);
-  }
-  if (i == niter) Rf_warning("W: failed convergence");
-  return y;
-}
-
-TMB_ATOMIC_VECTOR_FUNCTION(
-    // ATOMIC_NAME
-    LambertW
-    ,
-    // OUTPUT_DIM
-    1,
-    // ATOMIC_DOUBLE
-    ty[0] = LambertW(tx[0]); // Call the 'double' version
-    ,
-    // ATOMIC_REVERSE
-    Type W  = ty[0];                    // Function value from forward pass
-    Type DW = 1. / (exp(W) * (1. + W)); // Derivative
-    px[0] = DW * py[0];                 // Reverse mode chain rule
-)
-
-// Scalar version
-template<class Type>
-Type LambertW(Type x){
-  CppAD::vector<Type> tx(1);
-  tx[0] = x;
-  return LambertW(tx)[0];
-}
-
-// Vectorized version
-VECTORIZE1_t(LambertW)
-
-template<class Type>
-Type objective_function<Type>::operator() ()
-{
-  PARAMETER_VECTOR(x);
-  Type f = LambertW(x).sum();
-  return f;
-}
-

And from R

-
compile("lambert.cpp")
-dyn.load(dynlib("lambert"))
-
-

15.2.1.1 Checking function value and derivatives

-

Check definition of the function:

-
obj <- MakeADFun(data=list(), parameters=list(x=1), DLL="lambert")
-obj$fn(7 * exp(7))
-
## [1] 7
-

Check derivatives using the numDeriv package:

-
numDeriv::grad(obj$fn, 7)
-
## [1] 0.08626538
-
obj$gr(7)
-
##            [,1]
-## [1,] 0.08626538
-

Also try second order derivatives:

-
numDeriv::hessian(obj$fn, 7)
-
##             [,1]
-## [1,] -0.01038959
-
obj$he(7)
-
##             [,1]
-## [1,] -0.01038969
-
-
-
-
-

15.3 Other approaches

-

For the Lambert \(W\) function we know how to calculate the derivatives. There are cases for which the derivatives are impossible (or difficult) to write down. If you’re in this situation you may want to try using forward mode AD to help in defining an atomic function. A full worked out example is available here: adaptive_integration.cpp. Derivatives are calculated automatically and if-else branching is allowed. The main downside with this approach is that it is limited to functions with very few inputs.

-

Checkpointing is another useful technique. It is demonstrated in the example register_atomic.cpp. It does not work for adaptive algorithms but is otherwise automatic. It is useful to reduce AD memory usage in cases where the same sequence of operations is being applied many times.

- - -
-
-
- -
-
-
- - -
-
- - - - - - - - - - - - - - - diff --git a/_book/CppTutorial.html b/_book/CppTutorial.html deleted file mode 100644 index 10d0f72e6..000000000 --- a/_book/CppTutorial.html +++ /dev/null @@ -1,390 +0,0 @@ - - - - - - - 10 C++ tutorial | The comprehensive TMB documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
- -
-
- - -
-
- -
-
-

10 C++ tutorial

-
-

10.0.1 I know R but not C++

-

Summary of how syntax differs between R and C++:

-
              R code             C++/TMB code
-
-Comments      #                  //                          // Comment symbol
-Constants     3.4                Type(3.4);                  // Explicit casting recommended in TMB
-Scalar        x = 5.2            Type x = Type(5.2);         // Variables must have type
-Arrays        x = numeric(10)    vector<Type> x(10);         // C++ code here does NOT initialize to 0
-Indexing      x[1]+x[10]         x(0)+x(9);                  // C++ indexing is zero-based
-Loops         for(i in 1:10)     for(int i=1;i<=10;i++)      // Integer i must be declared in C++
-Increments    x[1] = x[1] + 3    x(0) += 3.0;                // += -= *= /= incremental operators in C++
-

It is important to note the following difference compared to R:

-
-

Vectors, matrices and arrays are not zero-initialized in C++.

-
-

A zero initialized object is created using Eigens setZero():

-
matrix<Type> m(4,5);
-m.setZero();
-
-
-

10.0.2 I know C++

-

TMB specific C++ include: -- You should not use if(x) statements where x is a PARAMETER, -or is derived from a variable of type PARAMETER. (It is OK to use -if on DATA types, however.) TMB will remove the if(x) -statement, so the code will produce unexpected results.

- -
-
-
- -
-
-
- - -
-
- - - - - - - - - - - - - - - diff --git a/_book/Densities.html b/_book/Densities.html deleted file mode 100644 index 7ab11e75e..000000000 --- a/_book/Densities.html +++ /dev/null @@ -1,520 +0,0 @@ - - - - - - - 6 Multivariate distributions | The comprehensive TMB documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
- -
-
- - -
-
- -
-
-

6 Multivariate distributions

-

The namespace

-
using namespace density;
-

gives access to a variety of multivariate normal distributions:

-
    -
  • Multivariate normal distributions specified via a covariance matrix -(structured or unstructured).
  • -
  • Autoregressive (AR) processes.
  • -
  • Gaussian Markov random fields (GMRF) defined on regular grids or -defined via a (sparse) precision matrix.
  • -
  • Separable covariance functions, i.e. time-space separability.
  • -
-

These seemingly unrelated concepts are all implemented via the notion -of a distribution, which explains why they are placed in the same -namespace. You can combine two distributions, and this lets you -build up complex multivariate distributions using extremely compact -notation. Due to the flexibility of the approach it is more abstract -than other parts of TMB, but here it will be explained from -scratch. Before looking at the different categories of multivariate -distributions we note the following which is of practical importance:

-
    -
  • All members in the density namespace return the negative log -density, opposed to the univariate densities in R style distributions.
  • -
-
-

6.1 Multivariate normal distributions

-

Consider a zero-mean multivariate normal distribution -with covariance matrix Sigma (symmetric positive definite), -that we want to evaluate at x:

-
int n = 10;
-vector<Type> x(n);           // Evaluation point           
-x.fill(0.0);                 // Point of evaluation: x = (0,0,...,0)
-

The negative log-normal density is evaluated as follows:

-
using namespace density;
-matrix<Type> Sigma(n,n);     // Covariance matrix
-// ..... User must assign value to Sigma here
-res = MVNORM(Sigma)(x);      // Evaluate negative log likelihod
-

In the last line MVNORM(Sigma) should be interpreted as a -multivariate density, which via the last parenthesis (x) is -evaluated at x. A less compact way of expressing this is

-
MVNORM_t<Type> your_dmnorm(Sigma);
-res = your_dmnorm(x);
-

in which your_dmnorm is a variable that holds the “density.”

-

Note, that the latter way (using the MVNORM_t) is more efficient -when you need to evaluate the density more than once, i.e. for different values of x.

-

Sigma can be parameterized in different ways. Due to the symmetry of -Sigma there are at most n(n+1)/2 free parameters (n variances -and n(n-1)/2 correlation parameters). If you want to estimate all of -these freely (modulo the positive definite constraint) you can use -UNSTRUCTURED_CORR() to specify the correlation matrix, and -VECSCALE() to specify variances. UNSTRUCTURED_CORR() takes as -input a vector a dummy parameters that internally is used to build the -correlation matrix via its cholesky factor.

-
using namespace density;
-int n = 10;
-vector<Type> unconstrained_params(n*(n-1)/2);  // Dummy parameterization of correlation matrix
-vector<Type> sds(n);                           // Standard deviations
-res = VECSCALE(UNSTRUCTURED_CORR(unconstrained_params),sds)(x);
-

If all elements of dummy_params are estimated we are in effect -estimating a full correlation matrix without any constraints on its -elements (except for the mandatory positive definiteness). The actual -value of the correlation matrix, but not the full covariance matrix, -can easily be assessed using the .cov() operator

-
matrix<Type> Sigma(n,n);
-Sigma = UNSTRUCTURED_CORR(unconstrained_params).cov();
-REPORT(Sigma);                                         // Report back to R session
-
-
-

6.2 Autoregressive processes

-

Consider a stationary univariate Gaussian AR1 process -x(t),t=0,…,n-1. The stationary distribution is choosen so that:

-
    -
  • x(t) has mean 0 and variance 1 (for all t).
  • -
-

The multivariate density of the vector x can be evaluated as follows

-
int n = 10;
-using namespace density;
- 
-vector<Type> x(n);           // Evaluation point
-x.fill(0.0);                 // Point of evaluation: x = (0,0,...,0)
-Type rho = 0.2;              // Correlation parameter
-res = AR1(rho)(x);           // Evaluate negative log-density of AR1 process at point x 
-

Due to the assumed stationarity the correlation parameter must -satisfy:

-
    -
  • Stationarity constraint: -1 < rho < 1
  • -
-

Note that cor[x(t),x(t-1)] = rho.

-

The SCALE() function can be used to set the standard deviation.

-
Type sigma = 2.1;            // standard deviation of x
-res = SCALE(AR1(rho),sigma)(x);
-

Now, var[x(t)] = sigma^2. Because all elements of x are scaled by -the same constant we use SCALE rather than VECSCALE.

-
-

6.2.0.1 Multivariate AR1 processes

-

This is the first real illustration of how distributions can be used -as building blocks to obtain more complex distributions. Consider the -p dimensional AR1 process

-
int n = 10;                   // Number of time steps
-int p=3;                      // dim(x)
-array<Type> x(p,n);           // Evaluation point
-

The columns in x refer to the different time points. We then -evaluate the (negative log) joint density of the time series.

-
MVNORM_t<Type> your_dmnorm(Sigma);  // Density of x(t) 
-Type phi;                           // Correlation parameter
-res = AR1(phi,your_dmnorm)(x);
-

Note the following:

-
    -
  • We have introduced an intermediate variable your_dmnorm, which -holds the p-dim density marginal density of x(t). This is a -zero-mean normal density with covariance matrix Sigma.
  • -
  • All p univarite time series have the same serial correlation phi.
  • -
  • The multivariate process x(t) is stationary in the same sense as -the univariate AR1 process described above.
  • -
-
-
-

6.2.0.2 Higher order AR processes

-

There also exists ARk_t of arbitrary autoregressive order.

-
-
-
-

6.3 Gaussian Markov random fields (GMRF)

-

GMRF may be defined in two ways:

-
    -
  1. Via a (sparse) precision matrix Q.
  2. -
  3. Via a d-dimensional lattice.
  4. -
-

For further details please see GMRF_t. Under 1) a sparse Q -corresponding to a Matern covariance function can be obtained via the -R_inla namespace.

-
-
-

6.4 Separable construction of covariance (precision) matrices

-

A typical use of separability is to create space-time models with a -sparse precision matrix. Details are given in SEPARABLE_t. Here -we give a simple example.

-

Assume that we study a quantity x that changes both in space and -time. For simplicity we consider only a one-dimensional space. We -discretize space and time using equidistant grids, and assume that the -distance between grid points is 1 in both dimensions. We then define -an AR1(rho_s) process in space and one in time AR1(rho_t). The -separable assumption is that two points x1 and x2, separated in -space by a distance ds and in time by a distance dt, have -correlation given by

-

rho_s^ds*rho_t^dt

-

This is implemented as

-
using namespace density;
-int n_s = 10;                   // Number of grid points in space
-int n_t = 10;                   // Number of grid points in time
-Type rho_s = 0.2;               // Correlation in space
-Type rho_t = 0.4;               // Correlation in time
-
-array<Type> x(n_s,n_t);
-x.setZero();                    // x = 0
-
-res = SEPARABLE(AR1(rho_t),AR1(rho_s))(x);
-

Note that the arguments to SEPARABLE() are given in the opposite order -to the dimensions of x.

-
-
-
- -
-
-
- - -
-
- - - - - - - - - - - - - - - diff --git a/_book/Errors.html b/_book/Errors.html deleted file mode 100644 index 5a148b1db..000000000 --- a/_book/Errors.html +++ /dev/null @@ -1,472 +0,0 @@ - - - - - - - 8 Compilation and run time errors | The comprehensive TMB documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
- -
-
- - -
-
- -
-
-

8 Compilation and run time errors

-

The R interface to the debugger (gdb) is documented as part of the R -help system, i.e. you can type ?gdbsource in R to get info. The -current document only adresses isses that the relate to C++.

-
-

8.1 Compilation errors

-

It may be hard to understand the compilation errors for the following -reasons

-
    -
  • The Eigen libraries use templated C++ which generate non-intuitive -error messages.
  • -
-
-
-

8.2 Run time errors

-

Run time errors are broadly speaking of two types:

-
    -
  • Out-of-bounds (you are “walking out of an array”)
  • -
  • Floating point exceptions
  • -
-

You can use the debugger to locate both types of errors, but the -procedure is a little bit different in the two cases. The following -assumes that you have the GNU debugger gdb installed.

-
-

8.2.1 Out-of-bounds error

-

An example is:

-
vector<Type> y(4);
-y(5);                // 5 is not a valid index value here
-

This will cause TMB and R to crash with the following error message:

-
-

TMB has received an error from Eigen. The following condition was not met: -index >= 0 && index < size() -Please check your matrix-vector bounds etc., or run your program through a debugger. -Aborted (core dumped)

-
-

So, you must restart R and give the commands

-
library(TMB)
-gdbsource("my_project.R")
-
-

#5 objective_function::operator() (this=) at nan_error_ex.cpp:11

-
-

and you can see that the debugger points to line number 11 in the .cpp -file. gdbsource() is an R function that is part of TMB.

-
-
-

8.2.2 Floating point exception

-

If you on the other hand perform an illegal mathematical operation, -such as

-
Type f = sqrt(-1.);
-

R will not crash, but the objective function will return a NaN value. -However, you will not know in which part of your C++ code the error -occured. By including the fenv.h library (part of many C++ -compilers, but can otherwise be downloaded from -http://www.scs.stanford.edu/histar/src/uinc/fenv.h)

-

nan_error_ex.cpp:

-
// Illustrates how to make the debugger catch a floating point error.
-#include <TMB.hpp>
-#include <fenv.h> // Extra line needed
-
-template<class Type>
-Type objective_function<Type>::operator() ()
-{
-  feenableexcept(FE_INVALID | FE_OVERFLOW | FE_DIVBYZERO | FE_UNDERFLOW); // Extra line needed
-    
-  DATA_SCALAR(lambda);
-  PARAMETER(x);
-  Type f;
-  f = sqrt(-1.);        // FE_INVALID   ( sqrt(-1.) returns NaN )
-  //f = 1./0.;          // FE_DIVBYZERO ( division by zero )
-  //f = exp(100000.);   // FE_OVERFLOW  ( exp(100000.) returns Inf )   [Does not work on all platforms]
-  //f = exp(-100000.);  // FE_UNDERFLOW ( exp(-100000.) returns 0 )
-  return f;
-}
-

a floating point exception will be turned into an actual error that -can be picked up by the debugger. There are only two extra lines that -need to be included (“//Extra line needed” in the above example).

-

When we try to run this program in the usual way, the program crashes:

-
source("nan_error_ex.R")
-
-

Floating point exception (core dumped) -tmp3>

-
-

At this stage you should run the debugger to find out that the -floating point exception occurs at line number 14:

-
library(TMB)
-gdbsource("nan_error_ex.R")
-
-

#1 0x00007ffff0e7eb09 in objective_function::operator() (this=) at nan_error_ex.cpp:14

-
-

This enabling of floating point errors applies to R as well as the TMB program. -For more elaborate R-scripts it may therefore happen that a NaN occurs in the R-script -before the floating point exception in the TMB program (i.e. the problem of interest) happens. -To circumvent this problem one can run without NaN debugging enabled and -save the parameter vector that gave the floating point exception (e.g. badpar <- obj$env$last.par after the NaN evaluation), -then enable NaN debugging, re-compile, and evaluate obj$env$f( badpar, type="double").

-
-
-

8.2.3 Missing casts for vectorized functions

-

TMB vectorized functions cannot be called directly with expressions, for example the following will fail to compile:

-
DATA_VECTOR(x);
-// Don't do this! Doesn't compile
-vector<Type> out = lgamma(x + 1);
-
-

error: could not convert ‘atomic::D_lgamma(const CppAD::vector&) … -from ‘double’ to ‘Eigen::CwiseBinaryOp<Eigen::internal::scalar_sum_op<double, double>, … >’

-
-

Eigen lazy-evaluates expressions, and the templating of lgamma means we expect to return a “x + y”-typed object, which it obviously can’t do.

-

To work around this, cast the input:

-
DATA_VECTOR(x);
-vector<Type> out = lgamma(vector<Type>(x + 1));
-
-
-
-
- -
-
-
- - -
-
- - - - - - - - - - - - - - - diff --git a/_book/Examples.html b/_book/Examples.html deleted file mode 100644 index 7b396184f..000000000 --- a/_book/Examples.html +++ /dev/null @@ -1,578 +0,0 @@ - - - - - - - 7 Example collection | The comprehensive TMB documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
- -
-
- - -
-
- -
-
-

7 Example collection

-
    -
  • A list of all examples is found on the “Examples” tab on the top of the page.

  • -
  • Locations of example files: adcomp/tmb_examples and adcomp/TMB/inst/examples.

  • -
  • For each example there is both a .cpp and a .R file. Take for instance the linear regression example:

  • -
  • C++ template

    -
    // Simple linear regression.
    -#include <TMB.hpp>
    -template<class Type>
    -Type objective_function<Type>::operator() ()
    -{
    -  DATA_VECTOR(Y);
    -  DATA_VECTOR(x);
    -  PARAMETER(a);
    -  PARAMETER(b);
    -  PARAMETER(logSigma);
    -  ADREPORT(exp(2*logSigma));
    -  Type nll = -sum(dnorm(Y, a+b*x, exp(logSigma), true));
    -  return nll;
    -}
  • -
  • Controlling R code

    -
    library(TMB)
    -compile("linreg.cpp")
    -dyn.load(dynlib("linreg"))
    -set.seed(123)
    -data <- list(Y = rnorm(10) + 1:10, x=1:10)
    -parameters <- list(a=0, b=0, logSigma=0)
    -obj <- MakeADFun(data, parameters, DLL="linreg")
    -obj$hessian <- TRUE
    -opt <- do.call("optim", obj)
    -opt
    -opt$hessian ## <-- FD hessian from optim
    -obj$he()    ## <-- Analytical hessian
    -sdreport(obj)
  • -
  • To run this example use the R command

    -
    source("linreg.R")
  • -
-
-

7.1 Example overview

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ExampleDescription
adaptive_integration.cppAdaptive integration using ‘tiny_ad’
ar1_4D.cppSeparable covariance on 4D lattice with AR1 structure in each direction.
compois.cppConway-Maxwell-Poisson distribution
fft.cppMultivariate normal distribution with circulant covariance
hmm.cppInference in a ‘double-well’ stochastic differential equation using HMM filter.
laplace.cppLaplace approximation from scratch demonstrated on ‘spatial’ example.
linreg_parallel.cppParallel linear regression.
linreg.cppSimple linear regression.
longlinreg.cppLinear regression - 10^6 observations.
lr_test.cppIllustrate map feature of TMB to perform likelihood ratio tests on a ragged array dataset.
matern.cppGaussian process with Matern covariance.
mvrw_sparse.cppIdentical with random walk example. Utilizing sparse block structure so efficient when the number of states is high.
mvrw.cppRandom walk with multivariate correlated increments and measurement noise.
nmix.cppnmix example from https://groups.nceas.ucsb.edu/non-linear-modeling/projects/nmix
orange_big.cppScaled up version of the Orange Tree example (5000 latent random variables)
register_atomic_parallel.cppParallel version of ‘register_atomic’
register_atomic.cppSimilar to example ‘adaptive_integration’ using CppAD Romberg integration. REGISTER_ATOMIC is used to reduce tape size.
sam.cppState space assessment model from Nielsen and Berg 2014, Fisheries Research.
sde_linear.cppInference in a linear scalar stochastic differential equation.
sdv_multi_compact.cppCompact version of sdv_multi
sdv_multi.cppMultivatiate SV model from Skaug and Yu 2013, Comp. Stat & data Analysis (to appear)
socatt.cppsocatt from ADMB example collection.
spatial.cppSpatial poisson GLMM on a grid, with exponentially decaying correlation function
spde_aniso_speedup.cppSpeedup “spde_aniso.cpp” by moving normalization out of the template.
spde_aniso.cppAnisotropic version of “spde.cpp.”
spde.cppIllustration SPDE/INLA approach to spatial modelling via Matern correlation function
thetalog.cppTheta logistic population model from Pedersen et al 2012, Ecol. Modelling.
TMBad/interpol.cppDemonstrate 2D interpolation operator
TMBad/sam.cppState space assessment model from Nielsen and Berg 2014, Fisheries Research.
TMBad/solver.cppDemonstrate adaptive solver of TMBad
TMBad/spa_gauss.cppDemonstrate saddlepoint approximation (SPA)
TMBad/spatial.cppSpatial poisson GLMM on a grid, with exponentially decaying correlation function
TMBad/spde_epsilon.cppLow-level demonstration of fast epsilon bias correction using ‘sparse plus lowrank’ hessian
TMBad/thetalog.cppTheta logistic population model from Pedersen et al 2012, Ecol. Modelling.
transform_parallel.cppParallel version of transform
transform.cppGamma distributed random effects using copulas.
transform2.cppBeta distributed random effects using copulas.
tweedie.cppEstimating parameters in a Tweedie distribution.
validation/MVRandomWalkValidation.cppEstimate and validate a multivariate random walk model with correlated increments and correlated observations.
validation/randomwalkvalidation.cppEstimate and validate a random walk model with and without drift
validation/rickervalidation.cppEstimate and validate a Ricker model based on data simulated from the logistic map
-
-
-
- -
-
-
- - -
-
- - - - - - - - - - - - - - - diff --git a/_book/Introduction.html b/_book/Introduction.html deleted file mode 100644 index 77ae3d20a..000000000 --- a/_book/Introduction.html +++ /dev/null @@ -1,377 +0,0 @@ - - - - - - - The comprehensive TMB documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
- -
-
- - -
-
- -
- -
-

1 Introduction

-

TMB (Template Model Builder) is an R package for fitting statistical latent variable models to data. It is strongly inspired by ADMB. Unlike most other R packages the model is formulated in C++. This provides great flexibility, but requires some familiarity with the C/C++ programming language.

-
    -
  • TMB can calculate first and second order derivatives of the likelihood function by AD, or any objective function written in C++.
  • -
  • The objective function (and its derivatives) can be called from R. Hence, parameter estimation via e.g. nlminb() is easy.
  • -
  • The user can specify that the Laplace approximation should be applied to any subset of the function arguments. -
      -
    • Yields marginal likelihood in latent variable model.
    • -
  • -
  • Standard deviations of any parameter, or derived parameter, obtained by the ‘delta method’.
  • -
  • Pre and post-processing of data done in R.
  • -
  • TMB is based on state-of-the art software: CppAD, Eigen, …
  • -
-

A more general introduction including the underlying theory used in TMB can be found in this paper.

-
-
- -
-
-
- - -
-
- - - - - - - - - - - - - - - diff --git a/_book/ModelObject.html b/_book/ModelObject.html deleted file mode 100644 index d00351b53..000000000 --- a/_book/ModelObject.html +++ /dev/null @@ -1,378 +0,0 @@ - - - - - - - 11 Model object | The comprehensive TMB documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
- -
-
- - -
-
- -
-
-

11 Model object

-

The TMB core model object is the object returned by MakeADFun(). -A number of options can be passed to MakeADFun to control the -model. The current section walks you through all the -options. Additionally we demonstrate some of the methods that can be -applied to a fitted model object. We shall see how to:

-
    -
  • Fix and collect parameters using the map argument.
  • -
  • Switch parameters back and forth between the inner and outer optimization problem using the arguments random and profile.
  • -
  • Set options for the inner optimization problem.
  • -
  • sdreporting a fitted object.
  • -
  • Bias correction if random effect estimates.
  • -
  • Likelihood profiling a fitted object.
  • -
-
-

FIXME: NOT DONE YET !

-
- -
-
- -
-
-
- - -
-
- - - - - - - - - - - - - - - diff --git a/_book/R_style_distribution.html b/_book/R_style_distribution.html deleted file mode 100644 index 669cc5f42..000000000 --- a/_book/R_style_distribution.html +++ /dev/null @@ -1,390 +0,0 @@ - - - - - - - 5 R style probability distributions | The comprehensive TMB documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
- -
-
- - -
-
- -
-
-

5 R style probability distributions

-

Attempts have been made to make the interface (function name and -arguments) as close as possible to that of R.

-
    -
  • The densities (d...) are provided both in the discrete and -continuous case, cumulative distributions (p...) and inverse -cumulative distributions (q...) are provided only for continuous -distributions.

  • -
  • Scalar and vector arguments (in combination) are supported, but -not array or matrix arguments.

  • -
  • The last argument (of type int) corresponds to the log -argument in R: 1=logaritm, 0=ordinary scale. true (logaritm) and -false (ordinary scale) can also be used.

  • -
  • Vector arguments must all be of the same length (no recycling of -elements). If vectors of different lengths are used an “out of -range” error will occur, which can be picked up by the debugger.

  • -
  • DATA_IVECTOR() and DATA_INTEGER() cannot be used with -probability distributions, except possibly for the last (log) -argument.

  • -
  • An example:

    -
    DATA_SCALAR(y);
    -DATA_VECTOR(x);
    -vector<Type> rate(10);
    -matrix<Type> rate_matrix(10, 10);
    -dexp(y, rate, true);                    // OK, returns vector of length 10 of log-densities
    -dexp(x, rate, true);                    // OK if x is length 10
    -dexp(x, rate_matrix, true);             // Error, matrix arguments not allowed
  • -
  • To sum over elements in the vector returned use

    -
    sum(dexp(x,rate));
  • -
-
-
- -
-
-
- - -
-
- - - - - - - - - - - - - - - diff --git a/_book/Simulation.html b/_book/Simulation.html deleted file mode 100644 index 0750ae6d7..000000000 --- a/_book/Simulation.html +++ /dev/null @@ -1,553 +0,0 @@ - - - - - - - 13 Simulation | The comprehensive TMB documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
- -
-
- - -
-
- -
-
-

13 Simulation

-

When building models in TMB it is generally recommended to test the -implementation on simulated data. Obviously, data can be simulated -from R and passed to the C++ template. In practice this amounts to -implementing the model twice and is thus a strong way to validate the -implementation of the model. However, with increased model complexity -it becomes inconvenient to maintain two separate -implementations. Therefore, TMB allows the the user to write the -simulation code as an integrated part of the C++ model template.

-
-

13.1 Overview of simulation methods in TMB

-
-

13.1.1 Standard generators

-

The TMB simulation routines use the same naming convention as the R -simulators. For instance rnorm() is used to simulate from a normal -distribution. However, the argument convention is slightly -different:

-
    -
  1. rnorm(n, mu, sd) draws n simulations from a normal -distribution. Unlike R this works for scalar parameters only.
  2. -
  3. rnorm(mu, sd) is a TMB specific variant that works for -mixed scalar and vector input. Output length follows the length of the -longest input (no re-cycling) hence is consistent with dnorm(mu, sd).
  4. -
-

Currently the following simulators are implemented:

-
-

rnorm(), rpois(), runif(), rbinom(), rgamma(), rexp(), rbeta(), rf(), rlogis(), rt(), rweibull(), rcompois(), rtweedie(), rnbinom(), rnbinom2()

-
-
-
-

13.1.2 Generators for density objects

-

Objects from the density namespace have their own simulate() -method. Taking the multivariate normal distribution as example we have -the following ways to draw a simulation:

-
    -
  1. MVNORM(Sigma).simulate() returns a vector with a simulation from -the multivariate normal distribution. The void argument version is -only available when there is no ambiguity in the dimension of the -output. In the MVNORM case the dimension of the output is known -from the dimension of Sigma. In other cases e.g. AR1(phi) the -dimension of the output is not known hence the void argument -version is not available.
  2. -
  3. MVNORM(Sigma).simulate(x) pass x by reference and writes the -simulation directly to x without returning anything. This version -is available for all the classes because the dimension of the -simulation can always be deduced from x.
  4. -
-
-
-

13.1.3 Controlling the random seed

-

All TMB simulation methods are based on R’s random number -generator. It follows that the random seed can be controlled from R -the usual way using set.seed even though the simulation is performed -on the C++ side.

-
-
-
-

13.2 Simulation blocks

-

Simulation functions can be called from anywhere in the C++ -program. However, usually one should put the simulation code inside -specialized simulation blocks that allows the code to only be -executed when requested from R.

-
-

13.2.1 A linear regression example

-

A complete example extending the example linreg.cpp -with simulation code is:

-

-#include <TMB.hpp>
-template<class Type>
-Type objective_function<Type>::operator() ()
-{
-  DATA_VECTOR(y);
-  DATA_VECTOR(x);
-  PARAMETER(a);
-  PARAMETER(b);
-  PARAMETER(sd);
-  vector<Type> mu = a + b * x;
-  Type nll = -sum(dnorm(y, mu, sd, true));
-  SIMULATE {
-    y = rnorm(mu, sd);  // Simulate response
-    REPORT(y);          // Report the simulation
-  }
-  return nll;
-}
-
-

The SIMULATE block marks the simulation and is not executed by default.

-
-

We compile the C++-file and the model object is constructed as usual:

-
obj <- MakeADFun(data, parameters, DLL="linreg")
-

Now a simulation can be generated with

-
set.seed(1) ## optional
-obj$simulate()
-
## $y
-##  [1] -0.6264538  0.1836433 -0.8356286  1.5952808  0.3295078 -0.8204684
-##  [7]  0.4874291  0.7383247  0.5757814 -0.3053884
-

This only includes the simulated response - not the rest of the -data. A complete dataset can be generated by:

-
set.seed(1) ## optional - Note: same y as previous
-obj$simulate(complete=TRUE)
-
## $y
-##  [1] -0.6264538  0.1836433 -0.8356286  1.5952808  0.3295078 -0.8204684
-##  [7]  0.4874291  0.7383247  0.5757814 -0.3053884
-## 
-## $x
-##  [1]  1  2  3  4  5  6  7  8  9 10
-## 
-## attr(,"check.passed")
-## [1] TRUE
-

Here we did not explicitely state the parameter values to use with the -simulation. The simulate method takes an additional argument par -that can be used for this.

-
-

The default parameter values used for the simulation is -obj$env$last.par.

-
-
-
-

13.2.2 A simulation study

-

Simulating datasets from known parameters and re-estimationg those -parameters can be done generically by:

-
sim <- replicate(50, {
-  simdata <- obj$simulate(par=obj$par, complete=TRUE)
-  obj2 <- MakeADFun(simdata, parameters, DLL="linreg", silent=TRUE)
-  nlminb(obj2$par, obj2$fn, obj2$gr)$par
-})
-

We reshape and plot the result:

-
library(lattice)
-df <- data.frame(estimate=as.vector(sim), parameter=names(obj$par)[row(sim)])
-densityplot( ~ estimate | parameter, data=df, layout=c(3,1))
-

-

Compare with the true parameter values of the simulation:

-
obj$par
-
##  a  b sd 
-##  0  0  1
-
-
-

13.2.3 Advanced examples

-

The examples sam.cpp and ar1_4D.cpp -includes more advanced simulation code. The latter demonstrates how to -simulate from the density objects:

-
// Separable covariance on 4D lattice with AR1 structure in each direction.
-#include <TMB.hpp>
-
-/* Parameter transform */
-template <class Type>
-Type f(Type x){return Type(2)/(Type(1) + exp(-Type(2) * x)) - Type(1);}
-
-template<class Type>
-Type objective_function<Type>::operator() ()
-{
-  DATA_VECTOR(N)
-  PARAMETER_ARRAY(eta);
-  PARAMETER(transf_phi); /* fastest running dim */
-  Type phi=f(transf_phi);
-  ADREPORT(phi);
-
-  using namespace density;
-  Type res=0;
-
-  res+=AR1(phi,AR1(phi,AR1(phi,AR1(phi))))(eta);
-
-  // logdpois = N log lam - lam
-  for(int i=0;i<N.size();i++)res-=N[i]*eta[i]-exp(eta[i]);
-
-  SIMULATE {
-    AR1(phi,AR1(phi,AR1(phi,AR1(phi)))).simulate(eta);
-    vector<Type> lam = exp(eta);
-    N = rpois(lam);
-    REPORT(eta);
-    REPORT(N);
-  }
-
-  return res;
-
-}
-

In this example the 4D-array eta is passed to the simulator by -reference. Thereby the simulator knows the dimension of eta and can -fill eta with a simulation.

-
-
-

13.2.4 Further notes

-

The above example only used one simulation block. In general there is -no limitation on the number of simulation blocks that can be used in a -model and simulation blocks can use temporaries calculated outside the -blocks (as demonstrated in the linear regression example). For clarity -reasons, it is often a good idea to add a simulation block after each -likelihood contribution. However, note that simulation blocks are in -general not commutative (unlike likelihood accumulation). It is -therefore further recommended to add likelihood contributions of -random effects in the natural hierarchical order.

- -
-
-
-
- -
-
-
- - -
-
- - - - - - - - - - - - - - - diff --git a/_book/Sparsity.html b/_book/Sparsity.html deleted file mode 100644 index 3ab5e73c8..000000000 --- a/_book/Sparsity.html +++ /dev/null @@ -1,455 +0,0 @@ - - - - - - - 12 Sparsity | The comprehensive TMB documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
- -
-
- - -
-
- -
-
-

12 Sparsity

-

Large random effect models require sparsity in order to work in TMB. -In this section we will discuss:

-
    -
  • What exactly we mean by sparsity.
  • -
  • How to formulate sparse models (the same model can sometimes be formulated as both dense and sparse).
  • -
  • How to calculate the sparsity pattern of a given TMB model.
  • -
  • How to visualize sparsity either as a matrix or a graph.
  • -
  • How to use sparsity for general optimization problems (not just random effects).
  • -
-
-

12.1 Conditional independence graphs and DAGs

-
-

12.1.1 Conditional independence

-

There are various graph representations that are commonly used to -visualize probabilistic structure. One such is the conditional -independence graph. Say we have a model of four random variables \(X_1,...,X_4\) for which the joint density is:

-

\[p(x_1,x_2,x_3,x_4) \propto f_1(x_1,x_2)f_2(x_2,x_3)f_3(x_3,x_4)f_4(x_4,x_1)\]

-

The separability of factors on the right hand side implies some conditional independence properties. For instance if \(x_1\) and \(x_3\) are held constant then \(x_2\) and \(x_4\) varies independently. We say that \(x_2\) and \(x_4\) are conditionally independent given \(x_1\) and \(x_3\). -The conditional independence graph is defined by drawing undirected edges between variables occurring in the same factor \(f_i\):

-

-

Equivalently the graph may be visualized via its adjacency matrix:

-

-

This is the sparsity pattern of the model.

-
-

The sparsity pattern visualizes the conditional independence structure of the random effects in the model.

-
-
-
-

12.1.2 Node removal properties

-

Important probabilistic properties can be deduced directly from the graph. This is due to the following node removal properties.

-
    -
  1. The conditional distribution given node \(X_i\) is found by removing \(X_i\) and its edges from the graph. For instance conditional on \(X_4\) we get the following graph: -

  2. -
  3. The marginal distribution wrt. a node \(X_i\) is found by removing \(X_i\) from the graph and connecting all \(X_i\)’s neighbors. For instance when integrating \(X_4\) out of the joint density we get the following graph for the remaining nodes: -

  4. -
-
-

Conditioning preserves sparseness. Marginalizing tend to destroy sparseness by adding more edges to the graph.

-
-
-
-

12.1.3 Directed acyclic graph

-

When building models in TMB it is often more natural to specify processes in incremental steps - i.e. through the successive conditional distributions. The previous example could be simulated by drawing the variables \(X_1,X_2,X_3,X_4\) one by one in the given order as illustrated by the following directed graph:

-

-

The graph shows dependencies of any specific node given past nodes. The edge from \(X_1\) to \(X_3\) was not in the original (undirected) graph. This is a so-called fill-in.

-
-

Order matters. The DAG is different from the conditional independence graph.

-
-
-
-

12.1.4 The effect of adding data

-

It is convenient to use a box-shape for nodes that represent data. For instance if we pretend that \(X_4\) is a data point we would illustrate it by:

-

-

Here there are only three variables left. The conditional independence structure of the variables is:

-

-

which is the same graph as was previously found by integrating \(X_4\) out of the joint distribution.

-
-

Data nodes destroy sparsity the same way as marginalization. To avoid this, try to associate each data point with a single random effect.

-
-
-
-
-

12.2 The theta logistic example

-

Consider the ``theta logistic’’ population model (Pedersen et al. 2011). This is a -state-space model with state equation

-

\[u_t = u_{t-1} + r_0\left(1-\left(\frac{\exp(u_{t-1})}{K}\right)^\psi\right) + e_t\]

-

and observation equation

-

\[y_t = u_t + v_t\]

-

where \(e_t \sim N(0,Q)\), \(v_t \sim N(0,R)\) and \(t\in \{0,...,n-1\}\). A -uniform prior is implicitly assigned to \(u_0\).

-

The joint negative log-likelihood of state vector \(u\) and measurements -\(y\) is implemented in the C++ template thetalog.cpp. -The example can be run by:

-
runExample("thetalog", exfolder="adcomp/tmb_examples")
-

We demonstrate it in the case \(n=5\). Here is the DAG

-

-

This is a standard hidden Markov structure. Each data node is bound to a single random effect - hence the data does not introduce additional edges in the random effect structure.

-

We can use the image function from the Matrix package to plot the random effect structure (we must first load the Matrix package):

-
library(Matrix)
-obj <- MakeADFun(data, parameters, random=c("X"), DLL="thetalog")
-image(obj$env$spHess(random=TRUE))
-

-
-

FIXME: NOT DONE YET !

-
- - -
-
-

References

-
-
-Pedersen, Martin Wæver, Casper Willestofte Berg, Uffe Høgsbro Thygesen, Anders Nielsen, and Henrik Madsen. 2011. “Estimation Methods for Nonlinear State-Space Models in Ecology.” Ecological Modelling 222 (8): 1394–1400. -
-
-
- -
-
-
- - -
-
- - - - - - - - - - - - - - - diff --git a/_book/Structure_TMB.html b/_book/Structure_TMB.html deleted file mode 100644 index 3f2df1316..000000000 --- a/_book/Structure_TMB.html +++ /dev/null @@ -1,372 +0,0 @@ - - - - - - - 3 The structure of TMB | The comprehensive TMB documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
- -
-
- - -
-
- -
-
-

3 The structure of TMB

-

This documentation only covers the TMB specific code, not -CppAD -or -Eigen -These packages have their own documentation, which may be relevant. -In particular, some of the standard functions like sin() and cos() -are part of CppAD, and are hence not documented through TMB.

-
- -

TMB components

-
-
-
- -
-
-
- - -
-
- - - - - - - - - - - - - - - diff --git a/_book/Toolbox.html b/_book/Toolbox.html deleted file mode 100644 index 0dea66d84..000000000 --- a/_book/Toolbox.html +++ /dev/null @@ -1,414 +0,0 @@ - - - - - - - 9 Toolbox | The comprehensive TMB documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
- -
-
- - -
-
- -
-
-

9 Toolbox

-

First read the Statistical Modelling section of Tutorial.

-
-

9.0.1 Non-normal latent variables (random effects)

-

The underlying latent random variables in TMB must be Gaussian for the -Laplace approximation to be accurate. To obtain other distributions, -say a gamma distribution, the “transformation trick” can be used. We -start out with normally distributed variables u and transform these -into new variables w via the pnorm and qgamma functions as -follows:

-
PARAMETER_VECTOR(u);                             // Underlying latent random variables 
-Type nll=Type(0.0);
-nll -= sum(dnorm(u,Type(0),Type(1),true));       // Assign N(0,1) distribution u 
-vector<Type> v = pnorm(u,Type(0),Type(1));  // Uniformly distributed variables (on [0,1])
-vector<Type> w = qgamma(v,shape,scale);
-

w now has a gamma distribution.

-
-
-

9.0.2 Discrete latent variables

-

The Laplace approximation can not be applied to discrete latent -variables that occur in mixture models and HMMs (Hidden Markov -models). However, such likelihoods have analytic expressions, and may -be coded up in TMB. TMB would still calculate the exact gradient of -the HMM likelihood.

-
-
-

9.0.3 Mixture models

-

Although mixture models are a special case of discrete latent variable -models, they do deserve special attention. Consider the case that we -want a mixture of two zero-mean normal distributions (with different -standard deviations). This can be implemented as:

-
DATA_VECTOR(x);                         
-PARAMETER_VECTOR(sigma);      // sigma0 og sigma1
-PARAMETER(p);                 // Mixture proportion of model 0
-Type nll=Type(0.0);
-nll -= sum( log(      p  * dnorm(x, Type(0), sigma(0), false)
-               + (1.0-p) * dnorm(x, Type(0), sigma(1), false) ) );
-
-
-

9.0.4 Time series

-

Autoregressive (AR) processes may be implemented using the compact -notation of section Densities. The resulting AR process may be -applied both in the observational part and in the distribution of a -latent variable.

-

Nonlinear time must be implemented from scratch, as in the example -thetalog.cpp

-
-
-

9.0.5 Spatial models

-

TMB has strong support for spatial model and space-time models via the -GMRF() and SEPARABLE() functions, and the notion of a -distribution. The reader is referred to section Densities for -details and examples.

-
-
-
- -
-
-
- - -
-
- - - - - - - - - - - - - - - diff --git a/_book/Tutorial.html b/_book/Tutorial.html deleted file mode 100644 index 0f4e27d2d..000000000 --- a/_book/Tutorial.html +++ /dev/null @@ -1,488 +0,0 @@ - - - - - - - 2 Tutorial | The comprehensive TMB documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
- -
-
- - -
-
- -
-
-

2 Tutorial

-

A TMB project consists of an R file (.R) and a C++ file (.cpp). The -R file does pre- and post processing of data in addition to maximizing -the log-likelihood contained in *.cpp. See Examples for more -details. All R functions are documented within the standard help -system in R. This tutorial describes how to write the C++ file, and -assumes familiarity with C++ and to some extent with R.

-

The purpose of the C++ program is to evaluate the objective function, -i.e. the negative log-likelihood of the model. The program is compiled -and called from R, where it can be fed to a function minimizer like -nlminb().

-

The objective function should be of the following C++ type:

-
#include <TMB.hpp>
-
-template<class Type>
-Type objective_function<Type>::operator() ()
-{
-.... Here goes your C++ code ..... 
-}
-

The first line includes the source code for the whole TMB package (and -all its dependencies). The objective function is a templated class -where <Type> is the data type of both the input values and -the return value of the objective function. This allows us to -evaluate both the objective function and its derivatives using the -same chunk of C++ code (via the AD package -CppAD). The -technical aspects of this are hidden from the user. There is however -one aspect that surprises the new TMB user. When a constant like “1.2” -is used in a calculation that affects the return value it must be -“cast” to Type:

-
Type nll;           // Define variable that holds the return value (neg. log. lik)
-nll = Type(1.2);    // Assign value 1.2; a cast is needed.
-
-

2.1 Obtaining data and parameter values from R

-

Obviously, we will need to pass both data and parameter values to the -objective function. This is done through a set of macros that TMB -defines for us.

-
-

2.1.1 List of data macros

-
-

DATA_ARRAY(), DATA_FACTOR(), DATA_IARRAY(), DATA_IMATRIX(), DATA_INTEGER(), DATA_IVECTOR(), DATA_MATRIX(), DATA_SCALAR(), DATA_SPARSE_MATRIX(), DATA_STRING(), DATA_STRUCT(), DATA_UPDATE(), DATA_VECTOR()

-
-
-
-

2.1.2 List of parameter macros

-
-

PARAMETER(), PARAMETER_ARRAY(), PARAMETER_MATRIX(), PARAMETER_VECTOR()

-
-

To see which macros are available start typing -DATA_ or PARAMETER_ in the Doxygen search field of -your browser (you may need to refresh the browser window between each -time you make a new search). A simple example if you want to read a -vector of numbers (doubles) is the following

-
DATA_VECTOR(x);     // Vector x(0),x(1),...,x(n-1), where n is the length of x
-

Note that all vectors and matrices in TMB uses a zero-based -indexing scheme. It is not necessary to explicitly pass the dimension -of x, as it can be retrieved inside the C++ program:

-
int n = x.size();
-
-
-
-

2.2 An extended C++ language

-

TMB extends C++ with functionality that is important for formulating -likelihood functions. You have different toolboxes available:

- -

In addition to the variables defined through the DATA_ or -PARAMETER_ macros there can be “local” variables, for which -ordinary C++ scoping rules apply. There must also be a variable that -holds the return value (neg. log. likelihood).

-
DATA_VECTOR(x);               // Vector x(0), x(1), ..., x(n-1)
-Type tmp = x(1);
-Type nll = tmp * tmp; 
-

As in ordinary C++ local variable tmp must be assigned a value before -it can enter into a calculation.

-
-
-

2.3 Statistical modelling

-

TMB can handle complex statistical problems with hierarchical -structure (latent random variables) and multiple data sources. Latent -random variables must be continuous (discrete distributions are not -handled). The PARAMETER_ macros are used to pass two types -of parameters.

-
    -
  • Parameters: to be estimated by maximum likelihood. These include -fixed effects and variance components in the mixed model -literature. They will also correspond to hyper parameters with -non-informative priors in the Bayesian literature.
  • -
  • Latent random variables: to be integrated out of the likelihood -using a Laplace approximation.
  • -
-

Which of these are chosen is controlled from R, via the -random argument to the function MakeADFun. However, -on the C++ side it is usually necessary to assign a probability -distribution to the parameter.

-

The purpose of the C++ program is to calculate the (negative) joint -density of data and latent random variables. Each datum and individual -latent random effect gives a contribution to log likelihood, which may -be though of as a “distribution assignment” by users familiar with -software in the BUGS family.

-
PARAMETER_VECTOR(u);          // Latent random variable 
-Type nll = Type(0);           // Return value
-nll -= dnorm(u(0),0,1,true)   // Distributional assignment: u(0) ~ N(0,1) 
-

The following rules apply:

-
    -
  • Distribution assignments do not need to take place before the latent -variable is used in a calculation.

  • -
  • More complicated distributional assignments are allowed, say -u(0)-u(1) ~ N(0,1), but this requires the user to have a deeper -understanding of the probabilistic aspects of the model.

  • -
  • For latent variables only normal distributions should be used -(otherwise the Laplace approximation will perform poorly). For -response variables all probability distributions (discrete or -continuous) are allowed. If a non-gaussian latent is needed the -“transformation trick” can be used.

  • -
  • The namespaces R style distributions and Densities contain -many probability distributions, including multivariate normal -distributions. For probability distributions not available from -these libraries, the user can use raw C++ code:

    -
    DATA_VECTOR(y);                   // Data vector
    -Type nll = Type(0);               // Return value
    -nll -= sum(-log(Type(1.0)+y*y));  // y are i.i.d. Cauchy distributed
  • -
-

See Toolbox for more about statistical modelling.

-
-
-
- -
-
-
- - -
-
- - - - - - - - - - - - - - - diff --git a/_book/Validation.html b/_book/Validation.html deleted file mode 100644 index f3647fa7b..000000000 --- a/_book/Validation.html +++ /dev/null @@ -1,483 +0,0 @@ - - - - - - - 14 Validation | The comprehensive TMB documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
- -
-
- - -
-
- -
-
-

14 Validation

-
-

14.1 Residuals

-

The underlying framework is the same for all cases listed in this section. -[ Description of general framework FIXME ]

-

For models that does not include random effects the calculations can be simplified greatly.

-
-

14.1.1 Models without random effects

-
-

14.1.1.1 Normal distribution (Pearson residuals) .

-

This example shows how standardized residuals can be calculated within the template code and reported back to R using the REPORT function in TMB.

-
// linear regression with reporting of residuals
-#include <TMB.hpp>
-template<class Type>
-Type objective_function<Type>::operator() ()
-{
-  DATA_VECTOR(Y);
-  DATA_VECTOR(x);
-  PARAMETER(a);
-  PARAMETER(b);
-  PARAMETER(logSigma);
-  Type sigma = exp(logSigma);
-  Vector<Type> pred = a + b*x;
-  Type nll = -sum(dnorm(Y, a+b*x, sigma, true));
-  Vector<Type> residuals = (Y - pred)/sigma;  
-  REPORT(residuals);  
-  return nll;
-}
-

Assuming that the model parameters have been fitted, and the model object is called obj, the standardized residuals can now be extracted from the model object usinig the report() function and inspected for normality as follows:

-
... 
-rep <- obj$report()
-qqnorm(rep$residuals)
-abline(0,1)
-
-
-

14.1.1.2 Non-normal distributions

-
-
14.1.1.2.1 Continuous
-

We now consider situations where the error distribution is continuous but not Gaussian.
-Residuals that are standard normal distributed given that the model is correct, can be obtained be using the “transformation trick,” here illustrated using a model that fits a gamma distribution.

-

-#include <TMB.hpp>
-template<class Type>
-Type objective_function<Type>::operator() ()
-{
-  DATA_VECTOR(Y);
-  PARAMETER(shape);
-  PARAMETER(scale);
-
-  Type nll=-dgamma(Y,shape,scale,true).sum();
-  vector<Type> residuals = qnorm( pgamma(Y,shape,scale) );
-  REPORT(residuals);
-  return nll;
-}
-
-
-
14.1.1.2.2 Discrete
-

For discrete probability distributions the transformation trick can also be used, but an element of randomization must be added in order to obtain residuals that are truly Gaussian.

-

Assume that you have a series of observed counts y and you have fitted some TMB model using a Poisson likelihood, and the predicted values from that model have been reported and saved in a vector called mu.

-
... 
-a <- ppois(y - 1, mu)
-b <- ppois(y, mu)
-u <- runif(n = length(y), min = a, max = b)
-residuals <- qnorm(u)
-
-
-
-
-

14.1.2 Models with random effects

-

Model validation using residuals is considerably more complicated for random effect models. -Further information can be found in (Thygesen et al. 2017) FIXME: not generating reference.

-
-

14.1.2.1 One-step-ahead residuals

-

Other names are one step prediction errors, forecast pseudo-residuals, and recursive residuals. -These residuals can be computed using the oneStepPredict function. -There are several methods available within this function, and it is the responsibility of the user to ensure that an appropriate method is chosen for a given model.

-

The following examples of its use are availabe in the tmb_examples/validation folder.

- ---- - - - - - - - - - - - - - - - - - - - - -
ExampleDescription
validation/MVRandomWalkValidation.cppEstimate and validate a multivariate random walk model with correlated increments and correlated observations.
validation/randomwalkvalidation.cppEstimate and validate a random walk model with and without drift
validation/rickervalidation.cppEstimate and validate a Ricker model based on data simulated from the logistic map
-
-
-

14.1.2.2 One sample from the posterior

-

An alternative (and faster) method is based on a single sample of the random effects from the their posterior distribution given the data. -For state space models we can derive both process- and observation errors from the single sample and the observations, and compare these with the assumptions in the model.

-

An example can be found at the end of the randomwalkvalidation.R file in the tmb_examples/validation folder

-
-
-
-
-

14.2 Checking the Laplace approximation

-

FIXME:

- -
-
-

References

-
-
-Thygesen, Uffe Høgsbro, Christoffer Moesgaard Albertsen, Casper Willestofte Berg, Kasper Kristensen, and Anders Nielsen. 2017. “Validation of Ecological State Space Models Using the Laplace Approximation.” Environmental and Ecological Statistics 24 (2): 317–39. -
-
-
- -
-
-
- - -
-
- - - - - - - - - - - - - - - diff --git a/_book/_main_files/figure-html/unnamed-chunk-11-1.svg b/_book/_main_files/figure-html/unnamed-chunk-11-1.svg deleted file mode 100644 index 864cc26ef..000000000 --- a/_book/_main_files/figure-html/unnamed-chunk-11-1.svg +++ /dev/null @@ -1,49 +0,0 @@ - - - - - - -G - - -1 - -X₁ - - -2 - -X₂ - - -1--2 - - - -4 - -X₄ - - -1--4 - - - -3 - -X₃ - - -2--3 - - - -3--4 - - - - diff --git a/_book/_main_files/figure-html/unnamed-chunk-12-1.svg b/_book/_main_files/figure-html/unnamed-chunk-12-1.svg deleted file mode 100644 index 74c3a11eb..000000000 --- a/_book/_main_files/figure-html/unnamed-chunk-12-1.svg +++ /dev/null @@ -1,267 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/_book/_main_files/figure-html/unnamed-chunk-13-1.svg b/_book/_main_files/figure-html/unnamed-chunk-13-1.svg deleted file mode 100644 index c7353120a..000000000 --- a/_book/_main_files/figure-html/unnamed-chunk-13-1.svg +++ /dev/null @@ -1,36 +0,0 @@ - - - - - - -G - - -1 - -X₁ - - -2 - -X₂ - - -1--2 - - - -3 - -X₃ - - -2--3 - - - - diff --git a/_book/_main_files/figure-html/unnamed-chunk-14-1.svg b/_book/_main_files/figure-html/unnamed-chunk-14-1.svg deleted file mode 100644 index ae7077437..000000000 --- a/_book/_main_files/figure-html/unnamed-chunk-14-1.svg +++ /dev/null @@ -1,40 +0,0 @@ - - - - - - -G - - -1 - -X₁ - - -2 - -X₂ - - -1--2 - - - -3 - -X₃ - - -1--3 - - - -2--3 - - - - diff --git a/_book/_main_files/figure-html/unnamed-chunk-15-1.svg b/_book/_main_files/figure-html/unnamed-chunk-15-1.svg deleted file mode 100644 index 4fb2c9182..000000000 --- a/_book/_main_files/figure-html/unnamed-chunk-15-1.svg +++ /dev/null @@ -1,58 +0,0 @@ - - - - - - -DAG - - -1 - -X₁ - - -2 - -X₂ - - -1->2 - - - - -3 - -X₃ - - -1->3 - - - - -4 - -X₄ - - -1->4 - - - - -2->3 - - - - -3->4 - - - - - diff --git a/_book/_main_files/figure-html/unnamed-chunk-16-1.svg b/_book/_main_files/figure-html/unnamed-chunk-16-1.svg deleted file mode 100644 index b346baf46..000000000 --- a/_book/_main_files/figure-html/unnamed-chunk-16-1.svg +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - -DAG - - -1 - -X₁ - - -2 - -X₂ - - -1->2 - - - - -4 - -X₄ - - -1->4 - - - - -3 - -X₃ - - -2->3 - - - - -3->4 - - - - - diff --git a/_book/_main_files/figure-html/unnamed-chunk-17-1.svg b/_book/_main_files/figure-html/unnamed-chunk-17-1.svg deleted file mode 100644 index ae7077437..000000000 --- a/_book/_main_files/figure-html/unnamed-chunk-17-1.svg +++ /dev/null @@ -1,40 +0,0 @@ - - - - - - -G - - -1 - -X₁ - - -2 - -X₂ - - -1--2 - - - -3 - -X₃ - - -1--3 - - - -2--3 - - - - diff --git a/_book/_main_files/figure-html/unnamed-chunk-19-1.svg b/_book/_main_files/figure-html/unnamed-chunk-19-1.svg deleted file mode 100644 index 63a02bb45..000000000 --- a/_book/_main_files/figure-html/unnamed-chunk-19-1.svg +++ /dev/null @@ -1,108 +0,0 @@ - - - - - - -DAG - - -1 - -X₁ - - -2 - -X₂ - - -1->2 - - - - -6 - -Y₁ - - -1->6 - - - - -3 - -X₃ - - -2->3 - - - - -7 - -Y₂ - - -2->7 - - - - -4 - -X₄ - - -3->4 - - - - -8 - -Y₃ - - -3->8 - - - - -5 - -X₅ - - -4->5 - - - - -9 - -Y₄ - - -4->9 - - - - -10 - -Y₅ - - -5->10 - - - - - diff --git a/_book/_main_files/figure-html/unnamed-chunk-20-1.svg b/_book/_main_files/figure-html/unnamed-chunk-20-1.svg deleted file mode 100644 index 21f0c8ba2..000000000 --- a/_book/_main_files/figure-html/unnamed-chunk-20-1.svg +++ /dev/null @@ -1,346 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/_book/_main_files/figure-html/unnamed-chunk-29-1.svg b/_book/_main_files/figure-html/unnamed-chunk-29-1.svg deleted file mode 100644 index ba207bee0..000000000 --- a/_book/_main_files/figure-html/unnamed-chunk-29-1.svg +++ /dev/null @@ -1,394 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/_book/figure/TMB_components.png b/_book/figure/TMB_components.png deleted file mode 100644 index c674e2ae3..000000000 Binary files a/_book/figure/TMB_components.png and /dev/null differ diff --git a/_book/libs/anchor-sections-1.1.0/anchor-sections-hash.css b/_book/libs/anchor-sections-1.1.0/anchor-sections-hash.css deleted file mode 100644 index b563ec97e..000000000 --- a/_book/libs/anchor-sections-1.1.0/anchor-sections-hash.css +++ /dev/null @@ -1,2 +0,0 @@ -/* Styles for section anchors */ -a.anchor-section::before {content: '#';font-size: 80%;} diff --git a/_book/libs/anchor-sections-1.1.0/anchor-sections.css b/_book/libs/anchor-sections-1.1.0/anchor-sections.css deleted file mode 100644 index 041905f8b..000000000 --- a/_book/libs/anchor-sections-1.1.0/anchor-sections.css +++ /dev/null @@ -1,4 +0,0 @@ -/* Styles for section anchors */ -a.anchor-section {margin-left: 10px; visibility: hidden; color: inherit;} -.hasAnchor:hover a.anchor-section {visibility: visible;} -ul > li > .anchor-section {display: none;} diff --git a/_book/libs/anchor-sections-1.1.0/anchor-sections.js b/_book/libs/anchor-sections-1.1.0/anchor-sections.js deleted file mode 100644 index fee005d95..000000000 --- a/_book/libs/anchor-sections-1.1.0/anchor-sections.js +++ /dev/null @@ -1,11 +0,0 @@ -document.addEventListener('DOMContentLoaded', function () { - // If section divs is used, we need to put the anchor in the child header - const headers = document.querySelectorAll("div.hasAnchor.section[class*='level'] > :first-child") - - headers.forEach(function (x) { - // Add to the header node - if (!x.classList.contains('hasAnchor')) x.classList.add('hasAnchor') - // Remove from the section or div created by Pandoc - x.parentElement.classList.remove('hasAnchor') - }) -}) diff --git a/_book/libs/gitbook-2.6.7/css/fontawesome/fontawesome-webfont.ttf b/_book/libs/gitbook-2.6.7/css/fontawesome/fontawesome-webfont.ttf deleted file mode 100644 index 35acda2fa..000000000 Binary files a/_book/libs/gitbook-2.6.7/css/fontawesome/fontawesome-webfont.ttf and /dev/null differ diff --git a/_book/libs/gitbook-2.6.7/css/plugin-bookdown.css b/_book/libs/gitbook-2.6.7/css/plugin-bookdown.css deleted file mode 100644 index ab7c20eb3..000000000 --- a/_book/libs/gitbook-2.6.7/css/plugin-bookdown.css +++ /dev/null @@ -1,105 +0,0 @@ -.book .book-header h1 { - padding-left: 20px; - padding-right: 20px; -} -.book .book-header.fixed { - position: fixed; - right: 0; - top: 0; - left: 0; - border-bottom: 1px solid rgba(0,0,0,.07); -} -span.search-highlight { - background-color: #ffff88; -} -@media (min-width: 600px) { - .book.with-summary .book-header.fixed { - left: 300px; - } -} -@media (max-width: 1240px) { - .book .book-body.fixed { - top: 50px; - } - .book .book-body.fixed .body-inner { - top: auto; - } -} -@media (max-width: 600px) { - .book.with-summary .book-header.fixed { - left: calc(100% - 60px); - min-width: 300px; - } - .book.with-summary .book-body { - transform: none; - left: calc(100% - 60px); - min-width: 300px; - } - .book .book-body.fixed { - top: 0; - } -} - -.book .book-body.fixed .body-inner { - top: 50px; -} -.book .book-body .page-wrapper .page-inner section.normal sub, .book .book-body .page-wrapper .page-inner section.normal sup { - font-size: 85%; -} - -@media print { - .book .book-summary, .book .book-body .book-header, .fa { - display: none !important; - } - .book .book-body.fixed { - left: 0px; - } - .book .book-body,.book .book-body .body-inner, .book.with-summary { - overflow: visible !important; - } -} -.kable_wrapper { - border-spacing: 20px 0; - border-collapse: separate; - border: none; - margin: auto; -} -.kable_wrapper > tbody > tr > td { - vertical-align: top; -} -.book .book-body .page-wrapper .page-inner section.normal table tr.header { - border-top-width: 2px; -} -.book .book-body .page-wrapper .page-inner section.normal table tr:last-child td { - border-bottom-width: 2px; -} -.book .book-body .page-wrapper .page-inner section.normal table td, .book .book-body .page-wrapper .page-inner section.normal table th { - border-left: none; - border-right: none; -} -.book .book-body .page-wrapper .page-inner section.normal table.kable_wrapper > tbody > tr, .book .book-body .page-wrapper .page-inner section.normal table.kable_wrapper > tbody > tr > td { - border-top: none; -} -.book .book-body .page-wrapper .page-inner section.normal table.kable_wrapper > tbody > tr:last-child > td { - border-bottom: none; -} - -div.theorem, div.lemma, div.corollary, div.proposition, div.conjecture { - font-style: italic; -} -span.theorem, span.lemma, span.corollary, span.proposition, span.conjecture { - font-style: normal; -} -div.proof>*:last-child:after { - content: "\25a2"; - float: right; -} -.header-section-number { - padding-right: .5em; -} -#header .multi-author { - margin: 0.5em 0 -0.5em 0; -} -#header .date { - margin-top: 1.5em; -} diff --git a/_book/libs/gitbook-2.6.7/css/plugin-clipboard.css b/_book/libs/gitbook-2.6.7/css/plugin-clipboard.css deleted file mode 100644 index 6844a70aa..000000000 --- a/_book/libs/gitbook-2.6.7/css/plugin-clipboard.css +++ /dev/null @@ -1,18 +0,0 @@ -div.sourceCode { - position: relative; -} - -.copy-to-clipboard-button { - position: absolute; - right: 0; - top: 0; - visibility: hidden; -} - -.copy-to-clipboard-button:focus { - outline: 0; -} - -div.sourceCode:hover > .copy-to-clipboard-button { - visibility: visible; -} diff --git a/_book/libs/gitbook-2.6.7/css/plugin-fontsettings.css b/_book/libs/gitbook-2.6.7/css/plugin-fontsettings.css deleted file mode 100644 index 3fa6f35b2..000000000 --- a/_book/libs/gitbook-2.6.7/css/plugin-fontsettings.css +++ /dev/null @@ -1,303 +0,0 @@ -/* - * Theme 1 - */ -.color-theme-1 .dropdown-menu { - background-color: #111111; - border-color: #7e888b; -} -.color-theme-1 .dropdown-menu .dropdown-caret .caret-inner { - border-bottom: 9px solid #111111; -} -.color-theme-1 .dropdown-menu .buttons { - border-color: #7e888b; -} -.color-theme-1 .dropdown-menu .button { - color: #afa790; -} -.color-theme-1 .dropdown-menu .button:hover { - color: #73553c; -} -/* - * Theme 2 - */ -.color-theme-2 .dropdown-menu { - background-color: #2d3143; - border-color: #272a3a; -} -.color-theme-2 .dropdown-menu .dropdown-caret .caret-inner { - border-bottom: 9px solid #2d3143; -} -.color-theme-2 .dropdown-menu .buttons { - border-color: #272a3a; -} -.color-theme-2 .dropdown-menu .button { - color: #62677f; -} -.color-theme-2 .dropdown-menu .button:hover { - color: #f4f4f5; -} -.book .book-header .font-settings .font-enlarge { - line-height: 30px; - font-size: 1.4em; -} -.book .book-header .font-settings .font-reduce { - line-height: 30px; - font-size: 1em; -} - -/* sidebar transition background */ -div.book.color-theme-1 { - background: #f3eacb; -} -.book.color-theme-1 .book-body { - color: #704214; - background: #f3eacb; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section { - background: #f3eacb; -} - -/* sidebar transition background */ -div.book.color-theme-2 { - background: #1c1f2b; -} - -.book.color-theme-2 .book-body { - color: #bdcadb; - background: #1c1f2b; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section { - background: #1c1f2b; -} -.book.font-size-0 .book-body .page-inner section { - font-size: 1.2rem; -} -.book.font-size-1 .book-body .page-inner section { - font-size: 1.4rem; -} -.book.font-size-2 .book-body .page-inner section { - font-size: 1.6rem; -} -.book.font-size-3 .book-body .page-inner section { - font-size: 2.2rem; -} -.book.font-size-4 .book-body .page-inner section { - font-size: 4rem; -} -.book.font-family-0 { - font-family: Georgia, serif; -} -.book.font-family-1 { - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal { - color: #704214; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal a { - color: inherit; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal h1, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal h2, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal h3, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal h4, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal h5, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal h6 { - color: inherit; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal h1, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal h2 { - border-color: inherit; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal h6 { - color: inherit; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal hr { - background-color: inherit; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal blockquote { - border-color: #c4b29f; - opacity: 0.9; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code { - background: #fdf6e3; - color: #657b83; - border-color: #f8df9c; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal .highlight { - background-color: inherit; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal table th, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal table td { - border-color: #f5d06c; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal table tr { - color: inherit; - background-color: #fdf6e3; - border-color: #444444; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal table tr:nth-child(2n) { - background-color: #fbeecb; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal { - color: #bdcadb; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal a { - color: #3eb1d0; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal h1, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal h2, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal h3, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal h4, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal h5, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal h6 { - color: #fffffa; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal h1, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal h2 { - border-color: #373b4e; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal h6 { - color: #373b4e; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal hr { - background-color: #373b4e; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal blockquote { - border-color: #373b4e; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code { - color: #9dbed8; - background: #2d3143; - border-color: #2d3143; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal .highlight { - background-color: #282a39; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal table th, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal table td { - border-color: #3b3f54; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal table tr { - color: #b6c2d2; - background-color: #2d3143; - border-color: #3b3f54; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal table tr:nth-child(2n) { - background-color: #35394b; -} -.book.color-theme-1 .book-header { - color: #afa790; - background: transparent; -} -.book.color-theme-1 .book-header .btn { - color: #afa790; -} -.book.color-theme-1 .book-header .btn:hover { - color: #73553c; - background: none; -} -.book.color-theme-1 .book-header h1 { - color: #704214; -} -.book.color-theme-2 .book-header { - color: #7e888b; - background: transparent; -} -.book.color-theme-2 .book-header .btn { - color: #3b3f54; -} -.book.color-theme-2 .book-header .btn:hover { - color: #fffff5; - background: none; -} -.book.color-theme-2 .book-header h1 { - color: #bdcadb; -} -.book.color-theme-1 .book-body .navigation { - color: #afa790; -} -.book.color-theme-1 .book-body .navigation:hover { - color: #73553c; -} -.book.color-theme-2 .book-body .navigation { - color: #383f52; -} -.book.color-theme-2 .book-body .navigation:hover { - color: #fffff5; -} -/* - * Theme 1 - */ -.book.color-theme-1 .book-summary { - color: #afa790; - background: #111111; - border-right: 1px solid rgba(0, 0, 0, 0.07); -} -.book.color-theme-1 .book-summary .book-search { - background: transparent; -} -.book.color-theme-1 .book-summary .book-search input, -.book.color-theme-1 .book-summary .book-search input:focus { - border: 1px solid transparent; -} -.book.color-theme-1 .book-summary ul.summary li.divider { - background: #7e888b; - box-shadow: none; -} -.book.color-theme-1 .book-summary ul.summary li i.fa-check { - color: #33cc33; -} -.book.color-theme-1 .book-summary ul.summary li.done > a { - color: #877f6a; -} -.book.color-theme-1 .book-summary ul.summary li a, -.book.color-theme-1 .book-summary ul.summary li span { - color: #877f6a; - background: transparent; - font-weight: normal; -} -.book.color-theme-1 .book-summary ul.summary li.active > a, -.book.color-theme-1 .book-summary ul.summary li a:hover { - color: #704214; - background: transparent; - font-weight: normal; -} -/* - * Theme 2 - */ -.book.color-theme-2 .book-summary { - color: #bcc1d2; - background: #2d3143; - border-right: none; -} -.book.color-theme-2 .book-summary .book-search { - background: transparent; -} -.book.color-theme-2 .book-summary .book-search input, -.book.color-theme-2 .book-summary .book-search input:focus { - border: 1px solid transparent; -} -.book.color-theme-2 .book-summary ul.summary li.divider { - background: #272a3a; - box-shadow: none; -} -.book.color-theme-2 .book-summary ul.summary li i.fa-check { - color: #33cc33; -} -.book.color-theme-2 .book-summary ul.summary li.done > a { - color: #62687f; -} -.book.color-theme-2 .book-summary ul.summary li a, -.book.color-theme-2 .book-summary ul.summary li span { - color: #c1c6d7; - background: transparent; - font-weight: 600; -} -.book.color-theme-2 .book-summary ul.summary li.active > a, -.book.color-theme-2 .book-summary ul.summary li a:hover { - color: #f4f4f5; - background: #252737; - font-weight: 600; -} diff --git a/_book/libs/gitbook-2.6.7/css/plugin-highlight.css b/_book/libs/gitbook-2.6.7/css/plugin-highlight.css deleted file mode 100644 index 2aabd3deb..000000000 --- a/_book/libs/gitbook-2.6.7/css/plugin-highlight.css +++ /dev/null @@ -1,426 +0,0 @@ -.book .book-body .page-wrapper .page-inner section.normal pre, -.book .book-body .page-wrapper .page-inner section.normal code { - /* http://jmblog.github.com/color-themes-for-google-code-highlightjs */ - /* Tomorrow Comment */ - /* Tomorrow Red */ - /* Tomorrow Orange */ - /* Tomorrow Yellow */ - /* Tomorrow Green */ - /* Tomorrow Aqua */ - /* Tomorrow Blue */ - /* Tomorrow Purple */ -} -.book .book-body .page-wrapper .page-inner section.normal pre .hljs-comment, -.book .book-body .page-wrapper .page-inner section.normal code .hljs-comment, -.book .book-body .page-wrapper .page-inner section.normal pre .hljs-title, -.book .book-body .page-wrapper .page-inner section.normal code .hljs-title { - color: #8e908c; -} -.book .book-body .page-wrapper .page-inner section.normal pre .hljs-variable, -.book .book-body .page-wrapper .page-inner section.normal code .hljs-variable, -.book .book-body .page-wrapper .page-inner section.normal pre .hljs-attribute, -.book .book-body .page-wrapper .page-inner section.normal code .hljs-attribute, -.book .book-body .page-wrapper .page-inner section.normal pre .hljs-tag, -.book .book-body .page-wrapper .page-inner section.normal code .hljs-tag, -.book .book-body .page-wrapper .page-inner section.normal pre .hljs-regexp, -.book .book-body .page-wrapper .page-inner section.normal code .hljs-regexp, -.book .book-body .page-wrapper .page-inner section.normal pre .ruby .hljs-constant, -.book .book-body .page-wrapper .page-inner section.normal code .ruby .hljs-constant, -.book .book-body .page-wrapper .page-inner section.normal pre .xml .hljs-tag .hljs-title, -.book .book-body .page-wrapper .page-inner section.normal code .xml .hljs-tag .hljs-title, -.book .book-body .page-wrapper .page-inner section.normal pre .xml .hljs-pi, -.book .book-body .page-wrapper .page-inner section.normal code .xml .hljs-pi, -.book .book-body .page-wrapper .page-inner section.normal pre .xml .hljs-doctype, -.book .book-body .page-wrapper .page-inner section.normal code .xml .hljs-doctype, -.book .book-body .page-wrapper .page-inner section.normal pre .html .hljs-doctype, -.book .book-body .page-wrapper .page-inner section.normal code .html .hljs-doctype, -.book .book-body .page-wrapper .page-inner section.normal pre .css .hljs-id, -.book .book-body .page-wrapper .page-inner section.normal code .css .hljs-id, -.book .book-body .page-wrapper .page-inner section.normal pre .css .hljs-class, -.book .book-body .page-wrapper .page-inner section.normal code .css .hljs-class, -.book .book-body .page-wrapper .page-inner section.normal pre .css .hljs-pseudo, -.book .book-body .page-wrapper .page-inner section.normal code .css .hljs-pseudo { - color: #c82829; -} -.book .book-body .page-wrapper .page-inner section.normal pre .hljs-number, -.book .book-body .page-wrapper .page-inner section.normal code .hljs-number, -.book .book-body .page-wrapper .page-inner section.normal pre .hljs-preprocessor, -.book .book-body .page-wrapper .page-inner section.normal code .hljs-preprocessor, -.book .book-body .page-wrapper .page-inner section.normal pre .hljs-pragma, -.book .book-body .page-wrapper .page-inner section.normal code .hljs-pragma, -.book .book-body .page-wrapper .page-inner section.normal pre .hljs-built_in, -.book .book-body .page-wrapper .page-inner section.normal code .hljs-built_in, -.book .book-body .page-wrapper .page-inner section.normal pre .hljs-literal, -.book .book-body .page-wrapper .page-inner section.normal code .hljs-literal, -.book .book-body .page-wrapper .page-inner section.normal pre .hljs-params, -.book .book-body .page-wrapper .page-inner section.normal code .hljs-params, -.book .book-body .page-wrapper .page-inner section.normal pre .hljs-constant, -.book .book-body .page-wrapper .page-inner section.normal code .hljs-constant { - color: #f5871f; -} -.book .book-body .page-wrapper .page-inner section.normal pre .ruby .hljs-class .hljs-title, -.book .book-body .page-wrapper .page-inner section.normal code .ruby .hljs-class .hljs-title, -.book .book-body .page-wrapper .page-inner section.normal pre .css .hljs-rules .hljs-attribute, -.book .book-body .page-wrapper .page-inner section.normal code .css .hljs-rules .hljs-attribute { - color: #eab700; -} -.book .book-body .page-wrapper .page-inner section.normal pre .hljs-string, -.book .book-body .page-wrapper .page-inner section.normal code .hljs-string, -.book .book-body .page-wrapper .page-inner section.normal pre .hljs-value, -.book .book-body .page-wrapper .page-inner section.normal code .hljs-value, -.book .book-body .page-wrapper .page-inner section.normal pre .hljs-inheritance, -.book .book-body .page-wrapper .page-inner section.normal code .hljs-inheritance, -.book .book-body .page-wrapper .page-inner section.normal pre .hljs-header, -.book .book-body .page-wrapper .page-inner section.normal code .hljs-header, -.book .book-body .page-wrapper .page-inner section.normal pre .ruby .hljs-symbol, -.book .book-body .page-wrapper .page-inner section.normal code .ruby .hljs-symbol, -.book .book-body .page-wrapper .page-inner section.normal pre .xml .hljs-cdata, -.book .book-body .page-wrapper .page-inner section.normal code .xml .hljs-cdata { - color: #718c00; -} -.book .book-body .page-wrapper .page-inner section.normal pre .css .hljs-hexcolor, -.book .book-body .page-wrapper .page-inner section.normal code .css .hljs-hexcolor { - color: #3e999f; -} -.book .book-body .page-wrapper .page-inner section.normal pre .hljs-function, -.book .book-body .page-wrapper .page-inner section.normal code .hljs-function, -.book .book-body .page-wrapper .page-inner section.normal pre .python .hljs-decorator, -.book .book-body .page-wrapper .page-inner section.normal code .python .hljs-decorator, -.book .book-body .page-wrapper .page-inner section.normal pre .python .hljs-title, -.book .book-body .page-wrapper .page-inner section.normal code .python .hljs-title, -.book .book-body .page-wrapper .page-inner section.normal pre .ruby .hljs-function .hljs-title, -.book .book-body .page-wrapper .page-inner section.normal code .ruby .hljs-function .hljs-title, -.book .book-body .page-wrapper .page-inner section.normal pre .ruby .hljs-title .hljs-keyword, -.book .book-body .page-wrapper .page-inner section.normal code .ruby .hljs-title .hljs-keyword, -.book .book-body .page-wrapper .page-inner section.normal pre .perl .hljs-sub, -.book .book-body .page-wrapper .page-inner section.normal code .perl .hljs-sub, -.book .book-body .page-wrapper .page-inner section.normal pre .javascript .hljs-title, -.book .book-body .page-wrapper .page-inner section.normal code .javascript .hljs-title, -.book .book-body .page-wrapper .page-inner section.normal pre .coffeescript .hljs-title, -.book .book-body .page-wrapper .page-inner section.normal code .coffeescript .hljs-title { - color: #4271ae; -} -.book .book-body .page-wrapper .page-inner section.normal pre .hljs-keyword, -.book .book-body .page-wrapper .page-inner section.normal code .hljs-keyword, -.book .book-body .page-wrapper .page-inner section.normal pre .javascript .hljs-function, -.book .book-body .page-wrapper .page-inner section.normal code .javascript .hljs-function { - color: #8959a8; -} -.book .book-body .page-wrapper .page-inner section.normal pre .hljs, -.book .book-body .page-wrapper .page-inner section.normal code .hljs { - display: block; - background: white; - color: #4d4d4c; - padding: 0.5em; -} -.book .book-body .page-wrapper .page-inner section.normal pre .coffeescript .javascript, -.book .book-body .page-wrapper .page-inner section.normal code .coffeescript .javascript, -.book .book-body .page-wrapper .page-inner section.normal pre .javascript .xml, -.book .book-body .page-wrapper .page-inner section.normal code .javascript .xml, -.book .book-body .page-wrapper .page-inner section.normal pre .tex .hljs-formula, -.book .book-body .page-wrapper .page-inner section.normal code .tex .hljs-formula, -.book .book-body .page-wrapper .page-inner section.normal pre .xml .javascript, -.book .book-body .page-wrapper .page-inner section.normal code .xml .javascript, -.book .book-body .page-wrapper .page-inner section.normal pre .xml .vbscript, -.book .book-body .page-wrapper .page-inner section.normal code .xml .vbscript, -.book .book-body .page-wrapper .page-inner section.normal pre .xml .css, -.book .book-body .page-wrapper .page-inner section.normal code .xml .css, -.book .book-body .page-wrapper .page-inner section.normal pre .xml .hljs-cdata, -.book .book-body .page-wrapper .page-inner section.normal code .xml .hljs-cdata { - opacity: 0.5; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code { - /* - -Orginal Style from ethanschoonover.com/solarized (c) Jeremy Hull - -*/ - /* Solarized Green */ - /* Solarized Cyan */ - /* Solarized Blue */ - /* Solarized Yellow */ - /* Solarized Orange */ - /* Solarized Red */ - /* Solarized Violet */ -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs { - display: block; - padding: 0.5em; - background: #fdf6e3; - color: #657b83; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-comment, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-comment, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-template_comment, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-template_comment, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .diff .hljs-header, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .diff .hljs-header, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-doctype, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-doctype, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-pi, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-pi, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .lisp .hljs-string, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .lisp .hljs-string, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-javadoc, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-javadoc { - color: #93a1a1; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-keyword, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-keyword, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-winutils, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-winutils, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .method, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .method, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-addition, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-addition, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .css .hljs-tag, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .css .hljs-tag, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-request, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-request, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-status, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-status, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .nginx .hljs-title, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .nginx .hljs-title { - color: #859900; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-number, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-number, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-command, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-command, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-string, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-string, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-tag .hljs-value, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-tag .hljs-value, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-rules .hljs-value, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-rules .hljs-value, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-phpdoc, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-phpdoc, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .tex .hljs-formula, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .tex .hljs-formula, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-regexp, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-regexp, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-hexcolor, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-hexcolor, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-link_url, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-link_url { - color: #2aa198; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-title, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-title, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-localvars, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-localvars, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-chunk, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-chunk, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-decorator, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-decorator, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-built_in, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-built_in, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-identifier, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-identifier, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .vhdl .hljs-literal, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .vhdl .hljs-literal, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-id, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-id, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .css .hljs-function, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .css .hljs-function { - color: #268bd2; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-attribute, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-attribute, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-variable, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-variable, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .lisp .hljs-body, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .lisp .hljs-body, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .smalltalk .hljs-number, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .smalltalk .hljs-number, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-constant, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-constant, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-class .hljs-title, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-class .hljs-title, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-parent, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-parent, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .haskell .hljs-type, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .haskell .hljs-type, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-link_reference, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-link_reference { - color: #b58900; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-preprocessor, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-preprocessor, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-preprocessor .hljs-keyword, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-preprocessor .hljs-keyword, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-pragma, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-pragma, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-shebang, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-shebang, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-symbol, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-symbol, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-symbol .hljs-string, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-symbol .hljs-string, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .diff .hljs-change, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .diff .hljs-change, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-special, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-special, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-attr_selector, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-attr_selector, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-subst, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-subst, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-cdata, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-cdata, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .clojure .hljs-title, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .clojure .hljs-title, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .css .hljs-pseudo, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .css .hljs-pseudo, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-header, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-header { - color: #cb4b16; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-deletion, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-deletion, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-important, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-important { - color: #dc322f; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .hljs-link_label, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .hljs-link_label { - color: #6c71c4; -} -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal pre .tex .hljs-formula, -.book.color-theme-1 .book-body .page-wrapper .page-inner section.normal code .tex .hljs-formula { - background: #eee8d5; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code { - /* Tomorrow Night Bright Theme */ - /* Original theme - https://github.com/chriskempson/tomorrow-theme */ - /* http://jmblog.github.com/color-themes-for-google-code-highlightjs */ - /* Tomorrow Comment */ - /* Tomorrow Red */ - /* Tomorrow Orange */ - /* Tomorrow Yellow */ - /* Tomorrow Green */ - /* Tomorrow Aqua */ - /* Tomorrow Blue */ - /* Tomorrow Purple */ -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .hljs-comment, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .hljs-comment, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .hljs-title, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .hljs-title { - color: #969896; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .hljs-variable, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .hljs-variable, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .hljs-attribute, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .hljs-attribute, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .hljs-tag, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .hljs-tag, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .hljs-regexp, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .hljs-regexp, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .ruby .hljs-constant, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .ruby .hljs-constant, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .xml .hljs-tag .hljs-title, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .xml .hljs-tag .hljs-title, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .xml .hljs-pi, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .xml .hljs-pi, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .xml .hljs-doctype, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .xml .hljs-doctype, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .html .hljs-doctype, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .html .hljs-doctype, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .css .hljs-id, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .css .hljs-id, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .css .hljs-class, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .css .hljs-class, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .css .hljs-pseudo, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .css .hljs-pseudo { - color: #d54e53; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .hljs-number, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .hljs-number, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .hljs-preprocessor, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .hljs-preprocessor, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .hljs-pragma, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .hljs-pragma, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .hljs-built_in, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .hljs-built_in, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .hljs-literal, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .hljs-literal, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .hljs-params, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .hljs-params, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .hljs-constant, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .hljs-constant { - color: #e78c45; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .ruby .hljs-class .hljs-title, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .ruby .hljs-class .hljs-title, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .css .hljs-rules .hljs-attribute, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .css .hljs-rules .hljs-attribute { - color: #e7c547; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .hljs-string, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .hljs-string, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .hljs-value, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .hljs-value, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .hljs-inheritance, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .hljs-inheritance, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .hljs-header, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .hljs-header, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .ruby .hljs-symbol, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .ruby .hljs-symbol, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .xml .hljs-cdata, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .xml .hljs-cdata { - color: #b9ca4a; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .css .hljs-hexcolor, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .css .hljs-hexcolor { - color: #70c0b1; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .hljs-function, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .hljs-function, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .python .hljs-decorator, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .python .hljs-decorator, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .python .hljs-title, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .python .hljs-title, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .ruby .hljs-function .hljs-title, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .ruby .hljs-function .hljs-title, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .ruby .hljs-title .hljs-keyword, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .ruby .hljs-title .hljs-keyword, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .perl .hljs-sub, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .perl .hljs-sub, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .javascript .hljs-title, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .javascript .hljs-title, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .coffeescript .hljs-title, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .coffeescript .hljs-title { - color: #7aa6da; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .hljs-keyword, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .hljs-keyword, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .javascript .hljs-function, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .javascript .hljs-function { - color: #c397d8; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .hljs, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .hljs { - display: block; - background: black; - color: #eaeaea; - padding: 0.5em; -} -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .coffeescript .javascript, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .coffeescript .javascript, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .javascript .xml, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .javascript .xml, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .tex .hljs-formula, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .tex .hljs-formula, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .xml .javascript, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .xml .javascript, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .xml .vbscript, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .xml .vbscript, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .xml .css, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .xml .css, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal pre .xml .hljs-cdata, -.book.color-theme-2 .book-body .page-wrapper .page-inner section.normal code .xml .hljs-cdata { - opacity: 0.5; -} diff --git a/_book/libs/gitbook-2.6.7/css/plugin-search.css b/_book/libs/gitbook-2.6.7/css/plugin-search.css deleted file mode 100644 index c85e557aa..000000000 --- a/_book/libs/gitbook-2.6.7/css/plugin-search.css +++ /dev/null @@ -1,31 +0,0 @@ -.book .book-summary .book-search { - padding: 6px; - background: transparent; - position: absolute; - top: -50px; - left: 0px; - right: 0px; - transition: top 0.5s ease; -} -.book .book-summary .book-search input, -.book .book-summary .book-search input:focus, -.book .book-summary .book-search input:hover { - width: 100%; - background: transparent; - border: 1px solid #ccc; - box-shadow: none; - outline: none; - line-height: 22px; - padding: 7px 4px; - color: inherit; - box-sizing: border-box; -} -.book.with-search .book-summary .book-search { - top: 0px; -} -.book.with-search .book-summary ul.summary { - top: 50px; -} -.with-search .summary li[data-level] a[href*=".html#"] { - display: none; -} diff --git a/_book/libs/gitbook-2.6.7/css/plugin-table.css b/_book/libs/gitbook-2.6.7/css/plugin-table.css deleted file mode 100644 index 7fba1b9fb..000000000 --- a/_book/libs/gitbook-2.6.7/css/plugin-table.css +++ /dev/null @@ -1 +0,0 @@ -.book .book-body .page-wrapper .page-inner section.normal table{display:table;width:100%;border-collapse:collapse;border-spacing:0;overflow:auto}.book .book-body .page-wrapper .page-inner section.normal table td,.book .book-body .page-wrapper .page-inner section.normal table th{padding:6px 13px;border:1px solid #ddd}.book .book-body .page-wrapper .page-inner section.normal table tr{background-color:#fff;border-top:1px solid #ccc}.book .book-body .page-wrapper .page-inner section.normal table tr:nth-child(2n){background-color:#f8f8f8}.book .book-body .page-wrapper .page-inner section.normal table th{font-weight:700} diff --git a/_book/libs/gitbook-2.6.7/css/style.css b/_book/libs/gitbook-2.6.7/css/style.css deleted file mode 100644 index cba69b23b..000000000 --- a/_book/libs/gitbook-2.6.7/css/style.css +++ /dev/null @@ -1,13 +0,0 @@ -/*! normalize.css v2.1.0 | MIT License | git.io/normalize */img,legend{border:0}*{-webkit-font-smoothing:antialiased}sub,sup{position:relative}.book .book-body .page-wrapper .page-inner section.normal hr:after,.book-langs-index .inner .languages:after,.buttons:after,.dropdown-menu .buttons:after{clear:both}body,html{-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}article,aside,details,figcaption,figure,footer,header,hgroup,main,nav,section,summary{display:block}audio,canvas,video{display:inline-block}.hidden,[hidden]{display:none}audio:not([controls]){display:none;height:0}html{font-family:sans-serif}body,figure{margin:0}a:focus{outline:dotted thin}a:active,a:hover{outline:0}h1{font-size:2em;margin:.67em 0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}dfn{font-style:italic}hr{-moz-box-sizing:content-box;box-sizing:content-box;height:0}mark{background:#ff0;color:#000}code,kbd,pre,samp{font-family:monospace,serif;font-size:1em}pre{white-space:pre-wrap}q{quotes:"\201C" "\201D" "\2018" "\2019"}small{font-size:80%}sub,sup{font-size:75%;line-height:0;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}svg:not(:root){overflow:hidden}fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em}legend{padding:0}button,input,select,textarea{font-family:inherit;font-size:100%;margin:0}button,input{line-height:normal}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}input[type=checkbox],input[type=radio]{box-sizing:border-box;padding:0}input[type=search]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}input[type=search]::-webkit-search-cancel-button{margin-right:10px;}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}textarea{overflow:auto;vertical-align:top}table{border-collapse:collapse;border-spacing:0}/*! - * Preboot v2 - * - * Open sourced under MIT license by @mdo. - * Some variables and mixins from Bootstrap (Apache 2 license). - */.link-inherit,.link-inherit:focus,.link-inherit:hover{color:inherit}/*! - * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome - * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) - */@font-face{font-family:'FontAwesome';src:url('./fontawesome/fontawesome-webfont.ttf?v=4.7.0') format('truetype');font-weight:normal;font-style:normal}.fa{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571429em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14285714em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14285714em;width:2.14285714em;top:.14285714em;text-align:center}.fa-li.fa-lg{left:-1.85714286em}.fa-border{padding:.2em .25em .15em;border:solid .08em #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left{margin-right:.3em}.fa.fa-pull-right{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left{margin-right:.3em}.fa.pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scale(-1, 1);-ms-transform:scale(-1, 1);transform:scale(-1, 1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scale(1, -1);-ms-transform:scale(1, -1);transform:scale(1, -1)}:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270,:root .fa-flip-horizontal,:root .fa-flip-vertical{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:"\f000"}.fa-music:before{content:"\f001"}.fa-search:before{content:"\f002"}.fa-envelope-o:before{content:"\f003"}.fa-heart:before{content:"\f004"}.fa-star:before{content:"\f005"}.fa-star-o:before{content:"\f006"}.fa-user:before{content:"\f007"}.fa-film:before{content:"\f008"}.fa-th-large:before{content:"\f009"}.fa-th:before{content:"\f00a"}.fa-th-list:before{content:"\f00b"}.fa-check:before{content:"\f00c"}.fa-remove:before,.fa-close:before,.fa-times:before{content:"\f00d"}.fa-search-plus:before{content:"\f00e"}.fa-search-minus:before{content:"\f010"}.fa-power-off:before{content:"\f011"}.fa-signal:before{content:"\f012"}.fa-gear:before,.fa-cog:before{content:"\f013"}.fa-trash-o:before{content:"\f014"}.fa-home:before{content:"\f015"}.fa-file-o:before{content:"\f016"}.fa-clock-o:before{content:"\f017"}.fa-road:before{content:"\f018"}.fa-download:before{content:"\f019"}.fa-arrow-circle-o-down:before{content:"\f01a"}.fa-arrow-circle-o-up:before{content:"\f01b"}.fa-inbox:before{content:"\f01c"}.fa-play-circle-o:before{content:"\f01d"}.fa-rotate-right:before,.fa-repeat:before{content:"\f01e"}.fa-refresh:before{content:"\f021"}.fa-list-alt:before{content:"\f022"}.fa-lock:before{content:"\f023"}.fa-flag:before{content:"\f024"}.fa-headphones:before{content:"\f025"}.fa-volume-off:before{content:"\f026"}.fa-volume-down:before{content:"\f027"}.fa-volume-up:before{content:"\f028"}.fa-qrcode:before{content:"\f029"}.fa-barcode:before{content:"\f02a"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-book:before{content:"\f02d"}.fa-bookmark:before{content:"\f02e"}.fa-print:before{content:"\f02f"}.fa-camera:before{content:"\f030"}.fa-font:before{content:"\f031"}.fa-bold:before{content:"\f032"}.fa-italic:before{content:"\f033"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-align-left:before{content:"\f036"}.fa-align-center:before{content:"\f037"}.fa-align-right:before{content:"\f038"}.fa-align-justify:before{content:"\f039"}.fa-list:before{content:"\f03a"}.fa-dedent:before,.fa-outdent:before{content:"\f03b"}.fa-indent:before{content:"\f03c"}.fa-video-camera:before{content:"\f03d"}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:"\f03e"}.fa-pencil:before{content:"\f040"}.fa-map-marker:before{content:"\f041"}.fa-adjust:before{content:"\f042"}.fa-tint:before{content:"\f043"}.fa-edit:before,.fa-pencil-square-o:before{content:"\f044"}.fa-share-square-o:before{content:"\f045"}.fa-check-square-o:before{content:"\f046"}.fa-arrows:before{content:"\f047"}.fa-step-backward:before{content:"\f048"}.fa-fast-backward:before{content:"\f049"}.fa-backward:before{content:"\f04a"}.fa-play:before{content:"\f04b"}.fa-pause:before{content:"\f04c"}.fa-stop:before{content:"\f04d"}.fa-forward:before{content:"\f04e"}.fa-fast-forward:before{content:"\f050"}.fa-step-forward:before{content:"\f051"}.fa-eject:before{content:"\f052"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-plus-circle:before{content:"\f055"}.fa-minus-circle:before{content:"\f056"}.fa-times-circle:before{content:"\f057"}.fa-check-circle:before{content:"\f058"}.fa-question-circle:before{content:"\f059"}.fa-info-circle:before{content:"\f05a"}.fa-crosshairs:before{content:"\f05b"}.fa-times-circle-o:before{content:"\f05c"}.fa-check-circle-o:before{content:"\f05d"}.fa-ban:before{content:"\f05e"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrow-down:before{content:"\f063"}.fa-mail-forward:before,.fa-share:before{content:"\f064"}.fa-expand:before{content:"\f065"}.fa-compress:before{content:"\f066"}.fa-plus:before{content:"\f067"}.fa-minus:before{content:"\f068"}.fa-asterisk:before{content:"\f069"}.fa-exclamation-circle:before{content:"\f06a"}.fa-gift:before{content:"\f06b"}.fa-leaf:before{content:"\f06c"}.fa-fire:before{content:"\f06d"}.fa-eye:before{content:"\f06e"}.fa-eye-slash:before{content:"\f070"}.fa-warning:before,.fa-exclamation-triangle:before{content:"\f071"}.fa-plane:before{content:"\f072"}.fa-calendar:before{content:"\f073"}.fa-random:before{content:"\f074"}.fa-comment:before{content:"\f075"}.fa-magnet:before{content:"\f076"}.fa-chevron-up:before{content:"\f077"}.fa-chevron-down:before{content:"\f078"}.fa-retweet:before{content:"\f079"}.fa-shopping-cart:before{content:"\f07a"}.fa-folder:before{content:"\f07b"}.fa-folder-open:before{content:"\f07c"}.fa-arrows-v:before{content:"\f07d"}.fa-arrows-h:before{content:"\f07e"}.fa-bar-chart-o:before,.fa-bar-chart:before{content:"\f080"}.fa-twitter-square:before{content:"\f081"}.fa-facebook-square:before{content:"\f082"}.fa-camera-retro:before{content:"\f083"}.fa-key:before{content:"\f084"}.fa-gears:before,.fa-cogs:before{content:"\f085"}.fa-comments:before{content:"\f086"}.fa-thumbs-o-up:before{content:"\f087"}.fa-thumbs-o-down:before{content:"\f088"}.fa-star-half:before{content:"\f089"}.fa-heart-o:before{content:"\f08a"}.fa-sign-out:before{content:"\f08b"}.fa-linkedin-square:before{content:"\f08c"}.fa-thumb-tack:before{content:"\f08d"}.fa-external-link:before{content:"\f08e"}.fa-sign-in:before{content:"\f090"}.fa-trophy:before{content:"\f091"}.fa-github-square:before{content:"\f092"}.fa-upload:before{content:"\f093"}.fa-lemon-o:before{content:"\f094"}.fa-phone:before{content:"\f095"}.fa-square-o:before{content:"\f096"}.fa-bookmark-o:before{content:"\f097"}.fa-phone-square:before{content:"\f098"}.fa-twitter:before{content:"\f099"}.fa-facebook-f:before,.fa-facebook:before{content:"\f09a"}.fa-github:before{content:"\f09b"}.fa-unlock:before{content:"\f09c"}.fa-credit-card:before{content:"\f09d"}.fa-feed:before,.fa-rss:before{content:"\f09e"}.fa-hdd-o:before{content:"\f0a0"}.fa-bullhorn:before{content:"\f0a1"}.fa-bell:before{content:"\f0f3"}.fa-certificate:before{content:"\f0a3"}.fa-hand-o-right:before{content:"\f0a4"}.fa-hand-o-left:before{content:"\f0a5"}.fa-hand-o-up:before{content:"\f0a6"}.fa-hand-o-down:before{content:"\f0a7"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-globe:before{content:"\f0ac"}.fa-wrench:before{content:"\f0ad"}.fa-tasks:before{content:"\f0ae"}.fa-filter:before{content:"\f0b0"}.fa-briefcase:before{content:"\f0b1"}.fa-arrows-alt:before{content:"\f0b2"}.fa-group:before,.fa-users:before{content:"\f0c0"}.fa-chain:before,.fa-link:before{content:"\f0c1"}.fa-cloud:before{content:"\f0c2"}.fa-flask:before{content:"\f0c3"}.fa-cut:before,.fa-scissors:before{content:"\f0c4"}.fa-copy:before,.fa-files-o:before{content:"\f0c5"}.fa-paperclip:before{content:"\f0c6"}.fa-save:before,.fa-floppy-o:before{content:"\f0c7"}.fa-square:before{content:"\f0c8"}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:"\f0c9"}.fa-list-ul:before{content:"\f0ca"}.fa-list-ol:before{content:"\f0cb"}.fa-strikethrough:before{content:"\f0cc"}.fa-underline:before{content:"\f0cd"}.fa-table:before{content:"\f0ce"}.fa-magic:before{content:"\f0d0"}.fa-truck:before{content:"\f0d1"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-square:before{content:"\f0d3"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-plus:before{content:"\f0d5"}.fa-money:before{content:"\f0d6"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-up:before{content:"\f0d8"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-columns:before{content:"\f0db"}.fa-unsorted:before,.fa-sort:before{content:"\f0dc"}.fa-sort-down:before,.fa-sort-desc:before{content:"\f0dd"}.fa-sort-up:before,.fa-sort-asc:before{content:"\f0de"}.fa-envelope:before{content:"\f0e0"}.fa-linkedin:before{content:"\f0e1"}.fa-rotate-left:before,.fa-undo:before{content:"\f0e2"}.fa-legal:before,.fa-gavel:before{content:"\f0e3"}.fa-dashboard:before,.fa-tachometer:before{content:"\f0e4"}.fa-comment-o:before{content:"\f0e5"}.fa-comments-o:before{content:"\f0e6"}.fa-flash:before,.fa-bolt:before{content:"\f0e7"}.fa-sitemap:before{content:"\f0e8"}.fa-umbrella:before{content:"\f0e9"}.fa-paste:before,.fa-clipboard:before{content:"\f0ea"}.fa-lightbulb-o:before{content:"\f0eb"}.fa-exchange:before{content:"\f0ec"}.fa-cloud-download:before{content:"\f0ed"}.fa-cloud-upload:before{content:"\f0ee"}.fa-user-md:before{content:"\f0f0"}.fa-stethoscope:before{content:"\f0f1"}.fa-suitcase:before{content:"\f0f2"}.fa-bell-o:before{content:"\f0a2"}.fa-coffee:before{content:"\f0f4"}.fa-cutlery:before{content:"\f0f5"}.fa-file-text-o:before{content:"\f0f6"}.fa-building-o:before{content:"\f0f7"}.fa-hospital-o:before{content:"\f0f8"}.fa-ambulance:before{content:"\f0f9"}.fa-medkit:before{content:"\f0fa"}.fa-fighter-jet:before{content:"\f0fb"}.fa-beer:before{content:"\f0fc"}.fa-h-square:before{content:"\f0fd"}.fa-plus-square:before{content:"\f0fe"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content:"\f102"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angle-down:before{content:"\f107"}.fa-desktop:before{content:"\f108"}.fa-laptop:before{content:"\f109"}.fa-tablet:before{content:"\f10a"}.fa-mobile-phone:before,.fa-mobile:before{content:"\f10b"}.fa-circle-o:before{content:"\f10c"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-spinner:before{content:"\f110"}.fa-circle:before{content:"\f111"}.fa-mail-reply:before,.fa-reply:before{content:"\f112"}.fa-github-alt:before{content:"\f113"}.fa-folder-o:before{content:"\f114"}.fa-folder-open-o:before{content:"\f115"}.fa-smile-o:before{content:"\f118"}.fa-frown-o:before{content:"\f119"}.fa-meh-o:before{content:"\f11a"}.fa-gamepad:before{content:"\f11b"}.fa-keyboard-o:before{content:"\f11c"}.fa-flag-o:before{content:"\f11d"}.fa-flag-checkered:before{content:"\f11e"}.fa-terminal:before{content:"\f120"}.fa-code:before{content:"\f121"}.fa-mail-reply-all:before,.fa-reply-all:before{content:"\f122"}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:"\f123"}.fa-location-arrow:before{content:"\f124"}.fa-crop:before{content:"\f125"}.fa-code-fork:before{content:"\f126"}.fa-unlink:before,.fa-chain-broken:before{content:"\f127"}.fa-question:before{content:"\f128"}.fa-info:before{content:"\f129"}.fa-exclamation:before{content:"\f12a"}.fa-superscript:before{content:"\f12b"}.fa-subscript:before{content:"\f12c"}.fa-eraser:before{content:"\f12d"}.fa-puzzle-piece:before{content:"\f12e"}.fa-microphone:before{content:"\f130"}.fa-microphone-slash:before{content:"\f131"}.fa-shield:before{content:"\f132"}.fa-calendar-o:before{content:"\f133"}.fa-fire-extinguisher:before{content:"\f134"}.fa-rocket:before{content:"\f135"}.fa-maxcdn:before{content:"\f136"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-circle-up:before{content:"\f139"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-html5:before{content:"\f13b"}.fa-css3:before{content:"\f13c"}.fa-anchor:before{content:"\f13d"}.fa-unlock-alt:before{content:"\f13e"}.fa-bullseye:before{content:"\f140"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-rss-square:before{content:"\f143"}.fa-play-circle:before{content:"\f144"}.fa-ticket:before{content:"\f145"}.fa-minus-square:before{content:"\f146"}.fa-minus-square-o:before{content:"\f147"}.fa-level-up:before{content:"\f148"}.fa-level-down:before{content:"\f149"}.fa-check-square:before{content:"\f14a"}.fa-pencil-square:before{content:"\f14b"}.fa-external-link-square:before{content:"\f14c"}.fa-share-square:before{content:"\f14d"}.fa-compass:before{content:"\f14e"}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:"\f150"}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:"\f151"}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:"\f152"}.fa-euro:before,.fa-eur:before{content:"\f153"}.fa-gbp:before{content:"\f154"}.fa-dollar:before,.fa-usd:before{content:"\f155"}.fa-rupee:before,.fa-inr:before{content:"\f156"}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:"\f157"}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:"\f158"}.fa-won:before,.fa-krw:before{content:"\f159"}.fa-bitcoin:before,.fa-btc:before{content:"\f15a"}.fa-file:before{content:"\f15b"}.fa-file-text:before{content:"\f15c"}.fa-sort-alpha-asc:before{content:"\f15d"}.fa-sort-alpha-desc:before{content:"\f15e"}.fa-sort-amount-asc:before{content:"\f160"}.fa-sort-amount-desc:before{content:"\f161"}.fa-sort-numeric-asc:before{content:"\f162"}.fa-sort-numeric-desc:before{content:"\f163"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbs-down:before{content:"\f165"}.fa-youtube-square:before{content:"\f166"}.fa-youtube:before{content:"\f167"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-youtube-play:before{content:"\f16a"}.fa-dropbox:before{content:"\f16b"}.fa-stack-overflow:before{content:"\f16c"}.fa-instagram:before{content:"\f16d"}.fa-flickr:before{content:"\f16e"}.fa-adn:before{content:"\f170"}.fa-bitbucket:before{content:"\f171"}.fa-bitbucket-square:before{content:"\f172"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-long-arrow-down:before{content:"\f175"}.fa-long-arrow-up:before{content:"\f176"}.fa-long-arrow-left:before{content:"\f177"}.fa-long-arrow-right:before{content:"\f178"}.fa-apple:before{content:"\f179"}.fa-windows:before{content:"\f17a"}.fa-android:before{content:"\f17b"}.fa-linux:before{content:"\f17c"}.fa-dribbble:before{content:"\f17d"}.fa-skype:before{content:"\f17e"}.fa-foursquare:before{content:"\f180"}.fa-trello:before{content:"\f181"}.fa-female:before{content:"\f182"}.fa-male:before{content:"\f183"}.fa-gittip:before,.fa-gratipay:before{content:"\f184"}.fa-sun-o:before{content:"\f185"}.fa-moon-o:before{content:"\f186"}.fa-archive:before{content:"\f187"}.fa-bug:before{content:"\f188"}.fa-vk:before{content:"\f189"}.fa-weibo:before{content:"\f18a"}.fa-renren:before{content:"\f18b"}.fa-pagelines:before{content:"\f18c"}.fa-stack-exchange:before{content:"\f18d"}.fa-arrow-circle-o-right:before{content:"\f18e"}.fa-arrow-circle-o-left:before{content:"\f190"}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:"\f191"}.fa-dot-circle-o:before{content:"\f192"}.fa-wheelchair:before{content:"\f193"}.fa-vimeo-square:before{content:"\f194"}.fa-turkish-lira:before,.fa-try:before{content:"\f195"}.fa-plus-square-o:before{content:"\f196"}.fa-space-shuttle:before{content:"\f197"}.fa-slack:before{content:"\f198"}.fa-envelope-square:before{content:"\f199"}.fa-wordpress:before{content:"\f19a"}.fa-openid:before{content:"\f19b"}.fa-institution:before,.fa-bank:before,.fa-university:before{content:"\f19c"}.fa-mortar-board:before,.fa-graduation-cap:before{content:"\f19d"}.fa-yahoo:before{content:"\f19e"}.fa-google:before{content:"\f1a0"}.fa-reddit:before{content:"\f1a1"}.fa-reddit-square:before{content:"\f1a2"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-stumbleupon:before{content:"\f1a4"}.fa-delicious:before{content:"\f1a5"}.fa-digg:before{content:"\f1a6"}.fa-pied-piper-pp:before{content:"\f1a7"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-drupal:before{content:"\f1a9"}.fa-joomla:before{content:"\f1aa"}.fa-language:before{content:"\f1ab"}.fa-fax:before{content:"\f1ac"}.fa-building:before{content:"\f1ad"}.fa-child:before{content:"\f1ae"}.fa-paw:before{content:"\f1b0"}.fa-spoon:before{content:"\f1b1"}.fa-cube:before{content:"\f1b2"}.fa-cubes:before{content:"\f1b3"}.fa-behance:before{content:"\f1b4"}.fa-behance-square:before{content:"\f1b5"}.fa-steam:before{content:"\f1b6"}.fa-steam-square:before{content:"\f1b7"}.fa-recycle:before{content:"\f1b8"}.fa-automobile:before,.fa-car:before{content:"\f1b9"}.fa-cab:before,.fa-taxi:before{content:"\f1ba"}.fa-tree:before{content:"\f1bb"}.fa-spotify:before{content:"\f1bc"}.fa-deviantart:before{content:"\f1bd"}.fa-soundcloud:before{content:"\f1be"}.fa-database:before{content:"\f1c0"}.fa-file-pdf-o:before{content:"\f1c1"}.fa-file-word-o:before{content:"\f1c2"}.fa-file-excel-o:before{content:"\f1c3"}.fa-file-powerpoint-o:before{content:"\f1c4"}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:"\f1c5"}.fa-file-zip-o:before,.fa-file-archive-o:before{content:"\f1c6"}.fa-file-sound-o:before,.fa-file-audio-o:before{content:"\f1c7"}.fa-file-movie-o:before,.fa-file-video-o:before{content:"\f1c8"}.fa-file-code-o:before{content:"\f1c9"}.fa-vine:before{content:"\f1ca"}.fa-codepen:before{content:"\f1cb"}.fa-jsfiddle:before{content:"\f1cc"}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:"\f1cd"}.fa-circle-o-notch:before{content:"\f1ce"}.fa-ra:before,.fa-resistance:before,.fa-rebel:before{content:"\f1d0"}.fa-ge:before,.fa-empire:before{content:"\f1d1"}.fa-git-square:before{content:"\f1d2"}.fa-git:before{content:"\f1d3"}.fa-y-combinator-square:before,.fa-yc-square:before,.fa-hacker-news:before{content:"\f1d4"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-qq:before{content:"\f1d6"}.fa-wechat:before,.fa-weixin:before{content:"\f1d7"}.fa-send:before,.fa-paper-plane:before{content:"\f1d8"}.fa-send-o:before,.fa-paper-plane-o:before{content:"\f1d9"}.fa-history:before{content:"\f1da"}.fa-circle-thin:before{content:"\f1db"}.fa-header:before{content:"\f1dc"}.fa-paragraph:before{content:"\f1dd"}.fa-sliders:before{content:"\f1de"}.fa-share-alt:before{content:"\f1e0"}.fa-share-alt-square:before{content:"\f1e1"}.fa-bomb:before{content:"\f1e2"}.fa-soccer-ball-o:before,.fa-futbol-o:before{content:"\f1e3"}.fa-tty:before{content:"\f1e4"}.fa-binoculars:before{content:"\f1e5"}.fa-plug:before{content:"\f1e6"}.fa-slideshare:before{content:"\f1e7"}.fa-twitch:before{content:"\f1e8"}.fa-yelp:before{content:"\f1e9"}.fa-newspaper-o:before{content:"\f1ea"}.fa-wifi:before{content:"\f1eb"}.fa-calculator:before{content:"\f1ec"}.fa-paypal:before{content:"\f1ed"}.fa-google-wallet:before{content:"\f1ee"}.fa-cc-visa:before{content:"\f1f0"}.fa-cc-mastercard:before{content:"\f1f1"}.fa-cc-discover:before{content:"\f1f2"}.fa-cc-amex:before{content:"\f1f3"}.fa-cc-paypal:before{content:"\f1f4"}.fa-cc-stripe:before{content:"\f1f5"}.fa-bell-slash:before{content:"\f1f6"}.fa-bell-slash-o:before{content:"\f1f7"}.fa-trash:before{content:"\f1f8"}.fa-copyright:before{content:"\f1f9"}.fa-at:before{content:"\f1fa"}.fa-eyedropper:before{content:"\f1fb"}.fa-paint-brush:before{content:"\f1fc"}.fa-birthday-cake:before{content:"\f1fd"}.fa-area-chart:before{content:"\f1fe"}.fa-pie-chart:before{content:"\f200"}.fa-line-chart:before{content:"\f201"}.fa-lastfm:before{content:"\f202"}.fa-lastfm-square:before{content:"\f203"}.fa-toggle-off:before{content:"\f204"}.fa-toggle-on:before{content:"\f205"}.fa-bicycle:before{content:"\f206"}.fa-bus:before{content:"\f207"}.fa-ioxhost:before{content:"\f208"}.fa-angellist:before{content:"\f209"}.fa-cc:before{content:"\f20a"}.fa-shekel:before,.fa-sheqel:before,.fa-ils:before{content:"\f20b"}.fa-meanpath:before{content:"\f20c"}.fa-buysellads:before{content:"\f20d"}.fa-connectdevelop:before{content:"\f20e"}.fa-dashcube:before{content:"\f210"}.fa-forumbee:before{content:"\f211"}.fa-leanpub:before{content:"\f212"}.fa-sellsy:before{content:"\f213"}.fa-shirtsinbulk:before{content:"\f214"}.fa-simplybuilt:before{content:"\f215"}.fa-skyatlas:before{content:"\f216"}.fa-cart-plus:before{content:"\f217"}.fa-cart-arrow-down:before{content:"\f218"}.fa-diamond:before{content:"\f219"}.fa-ship:before{content:"\f21a"}.fa-user-secret:before{content:"\f21b"}.fa-motorcycle:before{content:"\f21c"}.fa-street-view:before{content:"\f21d"}.fa-heartbeat:before{content:"\f21e"}.fa-venus:before{content:"\f221"}.fa-mars:before{content:"\f222"}.fa-mercury:before{content:"\f223"}.fa-intersex:before,.fa-transgender:before{content:"\f224"}.fa-transgender-alt:before{content:"\f225"}.fa-venus-double:before{content:"\f226"}.fa-mars-double:before{content:"\f227"}.fa-venus-mars:before{content:"\f228"}.fa-mars-stroke:before{content:"\f229"}.fa-mars-stroke-v:before{content:"\f22a"}.fa-mars-stroke-h:before{content:"\f22b"}.fa-neuter:before{content:"\f22c"}.fa-genderless:before{content:"\f22d"}.fa-facebook-official:before{content:"\f230"}.fa-pinterest-p:before{content:"\f231"}.fa-whatsapp:before{content:"\f232"}.fa-server:before{content:"\f233"}.fa-user-plus:before{content:"\f234"}.fa-user-times:before{content:"\f235"}.fa-hotel:before,.fa-bed:before{content:"\f236"}.fa-viacoin:before{content:"\f237"}.fa-train:before{content:"\f238"}.fa-subway:before{content:"\f239"}.fa-medium:before{content:"\f23a"}.fa-yc:before,.fa-y-combinator:before{content:"\f23b"}.fa-optin-monster:before{content:"\f23c"}.fa-opencart:before{content:"\f23d"}.fa-expeditedssl:before{content:"\f23e"}.fa-battery-4:before,.fa-battery:before,.fa-battery-full:before{content:"\f240"}.fa-battery-3:before,.fa-battery-three-quarters:before{content:"\f241"}.fa-battery-2:before,.fa-battery-half:before{content:"\f242"}.fa-battery-1:before,.fa-battery-quarter:before{content:"\f243"}.fa-battery-0:before,.fa-battery-empty:before{content:"\f244"}.fa-mouse-pointer:before{content:"\f245"}.fa-i-cursor:before{content:"\f246"}.fa-object-group:before{content:"\f247"}.fa-object-ungroup:before{content:"\f248"}.fa-sticky-note:before{content:"\f249"}.fa-sticky-note-o:before{content:"\f24a"}.fa-cc-jcb:before{content:"\f24b"}.fa-cc-diners-club:before{content:"\f24c"}.fa-clone:before{content:"\f24d"}.fa-balance-scale:before{content:"\f24e"}.fa-hourglass-o:before{content:"\f250"}.fa-hourglass-1:before,.fa-hourglass-start:before{content:"\f251"}.fa-hourglass-2:before,.fa-hourglass-half:before{content:"\f252"}.fa-hourglass-3:before,.fa-hourglass-end:before{content:"\f253"}.fa-hourglass:before{content:"\f254"}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:"\f255"}.fa-hand-stop-o:before,.fa-hand-paper-o:before{content:"\f256"}.fa-hand-scissors-o:before{content:"\f257"}.fa-hand-lizard-o:before{content:"\f258"}.fa-hand-spock-o:before{content:"\f259"}.fa-hand-pointer-o:before{content:"\f25a"}.fa-hand-peace-o:before{content:"\f25b"}.fa-trademark:before{content:"\f25c"}.fa-registered:before{content:"\f25d"}.fa-creative-commons:before{content:"\f25e"}.fa-gg:before{content:"\f260"}.fa-gg-circle:before{content:"\f261"}.fa-tripadvisor:before{content:"\f262"}.fa-odnoklassniki:before{content:"\f263"}.fa-odnoklassniki-square:before{content:"\f264"}.fa-get-pocket:before{content:"\f265"}.fa-wikipedia-w:before{content:"\f266"}.fa-safari:before{content:"\f267"}.fa-chrome:before{content:"\f268"}.fa-firefox:before{content:"\f269"}.fa-opera:before{content:"\f26a"}.fa-internet-explorer:before{content:"\f26b"}.fa-tv:before,.fa-television:before{content:"\f26c"}.fa-contao:before{content:"\f26d"}.fa-500px:before{content:"\f26e"}.fa-amazon:before{content:"\f270"}.fa-calendar-plus-o:before{content:"\f271"}.fa-calendar-minus-o:before{content:"\f272"}.fa-calendar-times-o:before{content:"\f273"}.fa-calendar-check-o:before{content:"\f274"}.fa-industry:before{content:"\f275"}.fa-map-pin:before{content:"\f276"}.fa-map-signs:before{content:"\f277"}.fa-map-o:before{content:"\f278"}.fa-map:before{content:"\f279"}.fa-commenting:before{content:"\f27a"}.fa-commenting-o:before{content:"\f27b"}.fa-houzz:before{content:"\f27c"}.fa-vimeo:before{content:"\f27d"}.fa-black-tie:before{content:"\f27e"}.fa-fonticons:before{content:"\f280"}.fa-reddit-alien:before{content:"\f281"}.fa-edge:before{content:"\f282"}.fa-credit-card-alt:before{content:"\f283"}.fa-codiepie:before{content:"\f284"}.fa-modx:before{content:"\f285"}.fa-fort-awesome:before{content:"\f286"}.fa-usb:before{content:"\f287"}.fa-product-hunt:before{content:"\f288"}.fa-mixcloud:before{content:"\f289"}.fa-scribd:before{content:"\f28a"}.fa-pause-circle:before{content:"\f28b"}.fa-pause-circle-o:before{content:"\f28c"}.fa-stop-circle:before{content:"\f28d"}.fa-stop-circle-o:before{content:"\f28e"}.fa-shopping-bag:before{content:"\f290"}.fa-shopping-basket:before{content:"\f291"}.fa-hashtag:before{content:"\f292"}.fa-bluetooth:before{content:"\f293"}.fa-bluetooth-b:before{content:"\f294"}.fa-percent:before{content:"\f295"}.fa-gitlab:before{content:"\f296"}.fa-wpbeginner:before{content:"\f297"}.fa-wpforms:before{content:"\f298"}.fa-envira:before{content:"\f299"}.fa-universal-access:before{content:"\f29a"}.fa-wheelchair-alt:before{content:"\f29b"}.fa-question-circle-o:before{content:"\f29c"}.fa-blind:before{content:"\f29d"}.fa-audio-description:before{content:"\f29e"}.fa-volume-control-phone:before{content:"\f2a0"}.fa-braille:before{content:"\f2a1"}.fa-assistive-listening-systems:before{content:"\f2a2"}.fa-asl-interpreting:before,.fa-american-sign-language-interpreting:before{content:"\f2a3"}.fa-deafness:before,.fa-hard-of-hearing:before,.fa-deaf:before{content:"\f2a4"}.fa-glide:before{content:"\f2a5"}.fa-glide-g:before{content:"\f2a6"}.fa-signing:before,.fa-sign-language:before{content:"\f2a7"}.fa-low-vision:before{content:"\f2a8"}.fa-viadeo:before{content:"\f2a9"}.fa-viadeo-square:before{content:"\f2aa"}.fa-snapchat:before{content:"\f2ab"}.fa-snapchat-ghost:before{content:"\f2ac"}.fa-snapchat-square:before{content:"\f2ad"}.fa-pied-piper:before{content:"\f2ae"}.fa-first-order:before{content:"\f2b0"}.fa-yoast:before{content:"\f2b1"}.fa-themeisle:before{content:"\f2b2"}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:"\f2b3"}.fa-fa:before,.fa-font-awesome:before{content:"\f2b4"}.fa-handshake-o:before{content:"\f2b5"}.fa-envelope-open:before{content:"\f2b6"}.fa-envelope-open-o:before{content:"\f2b7"}.fa-linode:before{content:"\f2b8"}.fa-address-book:before{content:"\f2b9"}.fa-address-book-o:before{content:"\f2ba"}.fa-vcard:before,.fa-address-card:before{content:"\f2bb"}.fa-vcard-o:before,.fa-address-card-o:before{content:"\f2bc"}.fa-user-circle:before{content:"\f2bd"}.fa-user-circle-o:before{content:"\f2be"}.fa-user-o:before{content:"\f2c0"}.fa-id-badge:before{content:"\f2c1"}.fa-drivers-license:before,.fa-id-card:before{content:"\f2c2"}.fa-drivers-license-o:before,.fa-id-card-o:before{content:"\f2c3"}.fa-quora:before{content:"\f2c4"}.fa-free-code-camp:before{content:"\f2c5"}.fa-telegram:before{content:"\f2c6"}.fa-thermometer-4:before,.fa-thermometer:before,.fa-thermometer-full:before{content:"\f2c7"}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:"\f2c8"}.fa-thermometer-2:before,.fa-thermometer-half:before{content:"\f2c9"}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:"\f2ca"}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:"\f2cb"}.fa-shower:before{content:"\f2cc"}.fa-bathtub:before,.fa-s15:before,.fa-bath:before{content:"\f2cd"}.fa-podcast:before{content:"\f2ce"}.fa-window-maximize:before{content:"\f2d0"}.fa-window-minimize:before{content:"\f2d1"}.fa-window-restore:before{content:"\f2d2"}.fa-times-rectangle:before,.fa-window-close:before{content:"\f2d3"}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:"\f2d4"}.fa-bandcamp:before{content:"\f2d5"}.fa-grav:before{content:"\f2d6"}.fa-etsy:before{content:"\f2d7"}.fa-imdb:before{content:"\f2d8"}.fa-ravelry:before{content:"\f2d9"}.fa-eercast:before{content:"\f2da"}.fa-microchip:before{content:"\f2db"}.fa-snowflake-o:before{content:"\f2dc"}.fa-superpowers:before{content:"\f2dd"}.fa-wpexplorer:before{content:"\f2de"}.fa-meetup:before{content:"\f2e0"}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0, 0, 0, 0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto} -.book .book-header,.book .book-summary{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif}.book-langs-index{width:100%;height:100%;padding:40px 0;margin:0;overflow:auto}@media (max-width:600px){.book-langs-index{padding:0}}.book-langs-index .inner{max-width:600px;width:100%;margin:0 auto;padding:30px;background:#fff;border-radius:3px}.book-langs-index .inner h3{margin:0}.book-langs-index .inner .languages{list-style:none;padding:20px 30px;margin-top:20px;border-top:1px solid #eee}.book-langs-index .inner .languages:after,.book-langs-index .inner .languages:before{content:" ";display:table;line-height:0}.book-langs-index .inner .languages li{width:50%;float:left;padding:10px 5px;font-size:16px}@media (max-width:600px){.book-langs-index .inner .languages li{width:100%;max-width:100%}}.book .book-header{overflow:visible;height:50px;padding:0 8px;z-index:2;font-size:.85em;color:#7e888b;background:0 0}.book .book-header .btn{display:block;height:50px;padding:0 15px;border-bottom:none;color:#ccc;text-transform:uppercase;line-height:50px;-webkit-box-shadow:none!important;box-shadow:none!important;position:relative;font-size:14px}.book .book-header .btn:hover{position:relative;text-decoration:none;color:#444;background:0 0}.book .book-header h1{margin:0;font-size:20px;font-weight:200;text-align:center;line-height:50px;opacity:0;padding-left:200px;padding-right:200px;-webkit-transition:opacity .2s ease;-moz-transition:opacity .2s ease;-o-transition:opacity .2s ease;transition:opacity .2s ease;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.book .book-header h1 a,.book .book-header h1 a:hover{color:inherit;text-decoration:none}@media (max-width:1000px){.book .book-header h1{display:none}}.book .book-header h1 i{display:none}.book .book-header:hover h1{opacity:1}.book.is-loading .book-header h1 i{display:inline-block}.book.is-loading .book-header h1 a{display:none}.dropdown{position:relative}.dropdown-menu{position:absolute;top:100%;left:0;z-index:100;display:none;float:left;min-width:160px;padding:0;margin:2px 0 0;list-style:none;font-size:14px;background-color:#fafafa;border:1px solid rgba(0,0,0,.07);border-radius:1px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,.175);box-shadow:0 6px 12px rgba(0,0,0,.175);background-clip:padding-box}.dropdown-menu.open{display:block}.dropdown-menu.dropdown-left{left:auto;right:4%}.dropdown-menu.dropdown-left .dropdown-caret{right:14px;left:auto}.dropdown-menu .dropdown-caret{position:absolute;top:-8px;left:14px;width:18px;height:10px;float:left;overflow:hidden}.dropdown-menu .dropdown-caret .caret-inner,.dropdown-menu .dropdown-caret .caret-outer{display:inline-block;top:0;border-left:9px solid transparent;border-right:9px solid transparent;position:absolute}.dropdown-menu .dropdown-caret .caret-outer{border-bottom:9px solid rgba(0,0,0,.1);height:auto;left:0;width:auto;margin-left:-1px}.dropdown-menu .dropdown-caret .caret-inner{margin-top:-1px;top:1px;border-bottom:9px solid #fafafa}.dropdown-menu .buttons{border-bottom:1px solid rgba(0,0,0,.07)}.dropdown-menu .buttons:after,.dropdown-menu .buttons:before{content:" ";display:table;line-height:0}.dropdown-menu .buttons:last-child{border-bottom:none}.dropdown-menu .buttons .button{border:0;background-color:transparent;color:#a6a6a6;width:100%;text-align:center;float:left;line-height:1.42857143;padding:8px 4px}.alert,.dropdown-menu .buttons .button:hover{color:#444}.dropdown-menu .buttons .button:focus,.dropdown-menu .buttons .button:hover{outline:0}.dropdown-menu .buttons .button.size-2{width:50%}.dropdown-menu .buttons .button.size-3{width:33%}.alert{padding:15px;margin-bottom:20px;background:#eee;border-bottom:5px solid #ddd}.alert-success{background:#dff0d8;border-color:#d6e9c6;color:#3c763d}.alert-info{background:#d9edf7;border-color:#bce8f1;color:#31708f}.alert-danger{background:#f2dede;border-color:#ebccd1;color:#a94442}.alert-warning{background:#fcf8e3;border-color:#faebcc;color:#8a6d3b}.book .book-summary{position:absolute;top:0;left:-300px;bottom:0;z-index:1;width:300px;color:#364149;background:#fafafa;border-right:1px solid rgba(0,0,0,.07);-webkit-transition:left 250ms ease;-moz-transition:left 250ms ease;-o-transition:left 250ms ease;transition:left 250ms ease}.book .book-summary ul.summary{position:absolute;top:0;left:0;right:0;bottom:0;overflow-y:auto;list-style:none;margin:0;padding:0;-webkit-transition:top .5s ease;-moz-transition:top .5s ease;-o-transition:top .5s ease;transition:top .5s ease}.book .book-summary ul.summary li{list-style:none}.book .book-summary ul.summary li.divider{height:1px;margin:7px 0;overflow:hidden;background:rgba(0,0,0,.07)}.book .book-summary ul.summary li i.fa-check{display:none;position:absolute;right:9px;top:16px;font-size:9px;color:#3c3}.book .book-summary ul.summary li.done>a{color:#364149;font-weight:400}.book .book-summary ul.summary li.done>a i{display:inline}.book .book-summary ul.summary li a,.book .book-summary ul.summary li span{display:block;padding:10px 15px;border-bottom:none;color:#364149;background:0 0;text-overflow:ellipsis;overflow:hidden;white-space:nowrap;position:relative}.book .book-summary ul.summary li span{cursor:not-allowed;opacity:.3;filter:alpha(opacity=30)}.book .book-summary ul.summary li a:hover,.book .book-summary ul.summary li.active>a{color:#008cff;background:0 0;text-decoration:none}.book .book-summary ul.summary li ul{padding-left:20px}@media (max-width:600px){.book .book-summary{width:calc(100% - 60px);bottom:0;left:-100%}}.book.with-summary .book-summary{left:0}.book.without-animation .book-summary{-webkit-transition:none!important;-moz-transition:none!important;-o-transition:none!important;transition:none!important}.book{position:relative;width:100%;height:100%}.book .book-body,.book .book-body .body-inner{position:absolute;top:0;left:0;overflow-y:auto;bottom:0;right:0}.book .book-body{color:#000;background:#fff;-webkit-transition:left 250ms ease;-moz-transition:left 250ms ease;-o-transition:left 250ms ease;transition:left 250ms ease}.book .book-body .page-wrapper{position:relative;outline:0}.book .book-body .page-wrapper .page-inner{max-width:800px;margin:0 auto;padding:20px 0 40px}.book .book-body .page-wrapper .page-inner section{margin:0;padding:5px 15px;background:#fff;border-radius:2px;line-height:1.7;font-size:1.6rem}.book .book-body .page-wrapper .page-inner .btn-group .btn{border-radius:0;background:#eee;border:0}@media (max-width:1240px){.book .book-body{-webkit-transition:-webkit-transform 250ms ease;-moz-transition:-moz-transform 250ms ease;-o-transition:-o-transform 250ms ease;transition:transform 250ms ease;padding-bottom:20px}.book .book-body .body-inner{position:static;min-height:calc(100% - 50px)}}@media (min-width:600px){.book.with-summary .book-body{left:300px}}@media (max-width:600px){.book.with-summary{overflow:hidden}.book.with-summary .book-body{-webkit-transform:translate(calc(100% - 60px),0);-moz-transform:translate(calc(100% - 60px),0);-ms-transform:translate(calc(100% - 60px),0);-o-transform:translate(calc(100% - 60px),0);transform:translate(calc(100% - 60px),0)}}.book.without-animation .book-body{-webkit-transition:none!important;-moz-transition:none!important;-o-transition:none!important;transition:none!important}.buttons:after,.buttons:before{content:" ";display:table;line-height:0}.button{border:0;background:#eee;color:#666;width:100%;text-align:center;float:left;line-height:1.42857143;padding:8px 4px}.button:hover{color:#444}.button:focus,.button:hover{outline:0}.button.size-2{width:50%}.button.size-3{width:33%}.book .book-body .page-wrapper .page-inner section{display:none}.book .book-body .page-wrapper .page-inner section.normal{display:block;word-wrap:break-word;overflow:hidden;color:#333;line-height:1.7;text-size-adjust:100%;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%;-moz-text-size-adjust:100%}.book .book-body .page-wrapper .page-inner section.normal *{box-sizing:border-box;-webkit-box-sizing:border-box;}.book .book-body .page-wrapper .page-inner section.normal>:first-child{margin-top:0!important}.book .book-body .page-wrapper .page-inner section.normal>:last-child{margin-bottom:0!important}.book .book-body .page-wrapper .page-inner section.normal blockquote,.book .book-body .page-wrapper .page-inner section.normal code,.book .book-body .page-wrapper .page-inner section.normal figure,.book .book-body .page-wrapper .page-inner section.normal img,.book .book-body .page-wrapper .page-inner section.normal pre,.book .book-body .page-wrapper .page-inner section.normal table,.book .book-body .page-wrapper .page-inner section.normal tr{page-break-inside:avoid}.book .book-body .page-wrapper .page-inner section.normal h2,.book .book-body .page-wrapper .page-inner section.normal h3,.book .book-body .page-wrapper .page-inner section.normal h4,.book .book-body .page-wrapper .page-inner section.normal h5,.book .book-body .page-wrapper .page-inner section.normal p{orphans:3;widows:3}.book .book-body .page-wrapper .page-inner section.normal h1,.book .book-body .page-wrapper .page-inner section.normal h2,.book .book-body .page-wrapper .page-inner section.normal h3,.book .book-body .page-wrapper .page-inner section.normal h4,.book .book-body .page-wrapper .page-inner section.normal h5{page-break-after:avoid}.book .book-body .page-wrapper .page-inner section.normal b,.book .book-body .page-wrapper .page-inner section.normal strong{font-weight:700}.book .book-body .page-wrapper .page-inner section.normal em{font-style:italic}.book .book-body .page-wrapper .page-inner section.normal blockquote,.book .book-body .page-wrapper .page-inner section.normal dl,.book .book-body .page-wrapper .page-inner section.normal ol,.book .book-body .page-wrapper .page-inner section.normal p,.book .book-body .page-wrapper .page-inner section.normal table,.book .book-body .page-wrapper .page-inner section.normal ul{margin-top:0;margin-bottom:.85em}.book .book-body .page-wrapper .page-inner section.normal a{color:#4183c4;text-decoration:none;background:0 0}.book .book-body .page-wrapper .page-inner section.normal a:active,.book .book-body .page-wrapper .page-inner section.normal a:focus,.book .book-body .page-wrapper .page-inner section.normal a:hover{outline:0;text-decoration:underline}.book .book-body .page-wrapper .page-inner section.normal img{border:0;max-width:100%}.book .book-body .page-wrapper .page-inner section.normal hr{height:4px;padding:0;margin:1.7em 0;overflow:hidden;background-color:#e7e7e7;border:none}.book .book-body .page-wrapper .page-inner section.normal hr:after,.book .book-body .page-wrapper .page-inner section.normal hr:before{display:table;content:" "}.book .book-body .page-wrapper .page-inner section.normal h1,.book .book-body .page-wrapper .page-inner section.normal h2,.book .book-body .page-wrapper .page-inner section.normal h3,.book .book-body .page-wrapper .page-inner section.normal h4,.book .book-body .page-wrapper .page-inner section.normal h5,.book .book-body .page-wrapper .page-inner section.normal h6{margin-top:1.275em;margin-bottom:.85em;}.book .book-body .page-wrapper .page-inner section.normal h1{font-size:2em}.book .book-body .page-wrapper .page-inner section.normal h2{font-size:1.75em}.book .book-body .page-wrapper .page-inner section.normal h3{font-size:1.5em}.book .book-body .page-wrapper .page-inner section.normal h4{font-size:1.25em}.book .book-body .page-wrapper .page-inner section.normal h5{font-size:1em}.book .book-body .page-wrapper .page-inner section.normal h6{font-size:1em;color:#777}.book .book-body .page-wrapper .page-inner section.normal code,.book .book-body .page-wrapper .page-inner section.normal pre{font-family:Consolas,"Liberation Mono",Menlo,Courier,monospace;direction:ltr;border:none;color:inherit}.book .book-body .page-wrapper .page-inner section.normal pre{overflow:auto;word-wrap:normal;margin:0 0 1.275em;padding:.85em 1em;background:#f7f7f7}.book .book-body .page-wrapper .page-inner section.normal pre>code{display:inline;max-width:initial;padding:0;margin:0;overflow:initial;line-height:inherit;font-size:.85em;white-space:pre;background:0 0}.book .book-body .page-wrapper .page-inner section.normal pre>code:after,.book .book-body .page-wrapper .page-inner section.normal pre>code:before{content:normal}.book .book-body .page-wrapper .page-inner section.normal code{padding:.2em;margin:0;font-size:.85em;background-color:#f7f7f7}.book .book-body .page-wrapper .page-inner section.normal code:after,.book .book-body .page-wrapper .page-inner section.normal code:before{letter-spacing:-.2em;content:"\00a0"}.book .book-body .page-wrapper .page-inner section.normal ol,.book .book-body .page-wrapper .page-inner section.normal ul{padding:0 0 0 2em;margin:0 0 .85em}.book .book-body .page-wrapper .page-inner section.normal ol ol,.book .book-body .page-wrapper .page-inner section.normal ol ul,.book .book-body .page-wrapper .page-inner section.normal ul ol,.book .book-body .page-wrapper .page-inner section.normal ul ul{margin-top:0;margin-bottom:0}.book .book-body .page-wrapper .page-inner section.normal ol ol{list-style-type:lower-roman}.book .book-body .page-wrapper .page-inner section.normal blockquote{margin:0 0 .85em;padding:0 15px;opacity:0.75;border-left:4px solid #dcdcdc}.book .book-body .page-wrapper .page-inner section.normal blockquote:first-child{margin-top:0}.book .book-body .page-wrapper .page-inner section.normal blockquote:last-child{margin-bottom:0}.book .book-body .page-wrapper .page-inner section.normal dl{padding:0}.book .book-body .page-wrapper .page-inner section.normal dl dt{padding:0;margin-top:.85em;font-style:italic;font-weight:700}.book .book-body .page-wrapper .page-inner section.normal dl dd{padding:0 .85em;margin-bottom:.85em}.book .book-body .page-wrapper .page-inner section.normal dd{margin-left:0}.book .book-body .page-wrapper .page-inner section.normal .glossary-term{cursor:help;text-decoration:underline}.book .book-body .navigation{position:absolute;top:50px;bottom:0;margin:0;max-width:150px;min-width:90px;display:flex;justify-content:center;align-content:center;flex-direction:column;font-size:40px;color:#ccc;text-align:center;-webkit-transition:all 350ms ease;-moz-transition:all 350ms ease;-o-transition:all 350ms ease;transition:all 350ms ease}.book .book-body .navigation:hover{text-decoration:none;color:#444}.book .book-body .navigation.navigation-next{right:0}.book .book-body .navigation.navigation-prev{left:0}@media (max-width:1240px){.book .book-body .navigation{position:static;top:auto;max-width:50%;width:50%;display:inline-block;float:left}.book .book-body .navigation.navigation-unique{max-width:100%;width:100%}}.book .book-body .page-wrapper .page-inner section.glossary{margin-bottom:40px}.book .book-body .page-wrapper .page-inner section.glossary h2 a,.book .book-body .page-wrapper .page-inner section.glossary h2 a:hover{color:inherit;text-decoration:none}.book .book-body .page-wrapper .page-inner section.glossary .glossary-index{list-style:none;margin:0;padding:0}.book .book-body .page-wrapper .page-inner section.glossary .glossary-index li{display:inline;margin:0 8px;white-space:nowrap}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;-webkit-overflow-scrolling:auto;-webkit-tap-highlight-color:transparent;-webkit-text-size-adjust:none;-webkit-touch-callout:none}a{text-decoration:none}body,html{height:100%}html{font-size:62.5%}body{text-rendering:optimizeLegibility;font-smoothing:antialiased;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;letter-spacing:.2px;text-size-adjust:100%} -.book .book-summary ul.summary li a span {display:inline;padding:initial;overflow:visible;cursor:auto;opacity:1;} -/* show arrow before summary tag as in bootstrap */ -details > summary {display:list-item;cursor:pointer;} diff --git a/_book/libs/gitbook-2.6.7/js/app.min.js b/_book/libs/gitbook-2.6.7/js/app.min.js deleted file mode 100644 index 643f1f983..000000000 --- a/_book/libs/gitbook-2.6.7/js/app.min.js +++ /dev/null @@ -1 +0,0 @@ -(function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o"'`]/g,reHasEscapedHtml=RegExp(reEscapedHtml.source),reHasUnescapedHtml=RegExp(reUnescapedHtml.source);var reEscape=/<%-([\s\S]+?)%>/g,reEvaluate=/<%([\s\S]+?)%>/g,reInterpolate=/<%=([\s\S]+?)%>/g;var reIsDeepProp=/\.|\[(?:[^[\]]*|(["'])(?:(?!\1)[^\n\\]|\\.)*?\1)\]/,reIsPlainProp=/^\w*$/,rePropName=/[^.[\]]+|\[(?:(-?\d+(?:\.\d+)?)|(["'])((?:(?!\2)[^\n\\]|\\.)*?)\2)\]/g;var reRegExpChars=/^[:!,]|[\\^$.*+?()[\]{}|\/]|(^[0-9a-fA-Fnrtuvx])|([\n\r\u2028\u2029])/g,reHasRegExpChars=RegExp(reRegExpChars.source);var reComboMark=/[\u0300-\u036f\ufe20-\ufe23]/g;var reEscapeChar=/\\(\\)?/g;var reEsTemplate=/\$\{([^\\}]*(?:\\.[^\\}]*)*)\}/g;var reFlags=/\w*$/;var reHasHexPrefix=/^0[xX]/;var reIsHostCtor=/^\[object .+?Constructor\]$/;var reIsUint=/^\d+$/;var reLatin1=/[\xc0-\xd6\xd8-\xde\xdf-\xf6\xf8-\xff]/g;var reNoMatch=/($^)/;var reUnescapedString=/['\n\r\u2028\u2029\\]/g;var reWords=function(){var upper="[A-Z\\xc0-\\xd6\\xd8-\\xde]",lower="[a-z\\xdf-\\xf6\\xf8-\\xff]+";return RegExp(upper+"+(?="+upper+lower+")|"+upper+"?"+lower+"|"+upper+"+|[0-9]+","g")}();var contextProps=["Array","ArrayBuffer","Date","Error","Float32Array","Float64Array","Function","Int8Array","Int16Array","Int32Array","Math","Number","Object","RegExp","Set","String","_","clearTimeout","isFinite","parseFloat","parseInt","setTimeout","TypeError","Uint8Array","Uint8ClampedArray","Uint16Array","Uint32Array","WeakMap"];var templateCounter=-1;var typedArrayTags={};typedArrayTags[float32Tag]=typedArrayTags[float64Tag]=typedArrayTags[int8Tag]=typedArrayTags[int16Tag]=typedArrayTags[int32Tag]=typedArrayTags[uint8Tag]=typedArrayTags[uint8ClampedTag]=typedArrayTags[uint16Tag]=typedArrayTags[uint32Tag]=true;typedArrayTags[argsTag]=typedArrayTags[arrayTag]=typedArrayTags[arrayBufferTag]=typedArrayTags[boolTag]=typedArrayTags[dateTag]=typedArrayTags[errorTag]=typedArrayTags[funcTag]=typedArrayTags[mapTag]=typedArrayTags[numberTag]=typedArrayTags[objectTag]=typedArrayTags[regexpTag]=typedArrayTags[setTag]=typedArrayTags[stringTag]=typedArrayTags[weakMapTag]=false;var cloneableTags={};cloneableTags[argsTag]=cloneableTags[arrayTag]=cloneableTags[arrayBufferTag]=cloneableTags[boolTag]=cloneableTags[dateTag]=cloneableTags[float32Tag]=cloneableTags[float64Tag]=cloneableTags[int8Tag]=cloneableTags[int16Tag]=cloneableTags[int32Tag]=cloneableTags[numberTag]=cloneableTags[objectTag]=cloneableTags[regexpTag]=cloneableTags[stringTag]=cloneableTags[uint8Tag]=cloneableTags[uint8ClampedTag]=cloneableTags[uint16Tag]=cloneableTags[uint32Tag]=true;cloneableTags[errorTag]=cloneableTags[funcTag]=cloneableTags[mapTag]=cloneableTags[setTag]=cloneableTags[weakMapTag]=false;var deburredLetters={"À":"A","Á":"A","Â":"A","Ã":"A","Ä":"A","Å":"A","à":"a","á":"a","â":"a","ã":"a","ä":"a","å":"a","Ç":"C","ç":"c","Ð":"D","ð":"d","È":"E","É":"E","Ê":"E","Ë":"E","è":"e","é":"e","ê":"e","ë":"e","Ì":"I","Í":"I","Î":"I","Ï":"I","ì":"i","í":"i","î":"i","ï":"i","Ñ":"N","ñ":"n","Ò":"O","Ó":"O","Ô":"O","Õ":"O","Ö":"O","Ø":"O","ò":"o","ó":"o","ô":"o","õ":"o","ö":"o","ø":"o","Ù":"U","Ú":"U","Û":"U","Ü":"U","ù":"u","ú":"u","û":"u","ü":"u","Ý":"Y","ý":"y","ÿ":"y","Æ":"Ae","æ":"ae","Þ":"Th","þ":"th","ß":"ss"};var htmlEscapes={"&":"&","<":"<",">":">",'"':""","'":"'","`":"`"};var htmlUnescapes={"&":"&","<":"<",">":">",""":'"',"'":"'","`":"`"};var objectTypes={function:true,object:true};var regexpEscapes={0:"x30",1:"x31",2:"x32",3:"x33",4:"x34",5:"x35",6:"x36",7:"x37",8:"x38",9:"x39",A:"x41",B:"x42",C:"x43",D:"x44",E:"x45",F:"x46",a:"x61",b:"x62",c:"x63",d:"x64",e:"x65",f:"x66",n:"x6e",r:"x72",t:"x74",u:"x75",v:"x76",x:"x78"};var stringEscapes={"\\":"\\","'":"'","\n":"n","\r":"r","\u2028":"u2028","\u2029":"u2029"};var freeExports=objectTypes[typeof exports]&&exports&&!exports.nodeType&&exports;var freeModule=objectTypes[typeof module]&&module&&!module.nodeType&&module;var freeGlobal=freeExports&&freeModule&&typeof global=="object"&&global&&global.Object&&global;var freeSelf=objectTypes[typeof self]&&self&&self.Object&&self;var freeWindow=objectTypes[typeof window]&&window&&window.Object&&window;var moduleExports=freeModule&&freeModule.exports===freeExports&&freeExports;var root=freeGlobal||freeWindow!==(this&&this.window)&&freeWindow||freeSelf||this;function baseCompareAscending(value,other){if(value!==other){var valIsNull=value===null,valIsUndef=value===undefined,valIsReflexive=value===value;var othIsNull=other===null,othIsUndef=other===undefined,othIsReflexive=other===other;if(value>other&&!othIsNull||!valIsReflexive||valIsNull&&!othIsUndef&&othIsReflexive||valIsUndef&&othIsReflexive){return 1}if(value-1){}return index}function charsRightIndex(string,chars){var index=string.length;while(index--&&chars.indexOf(string.charAt(index))>-1){}return index}function compareAscending(object,other){return baseCompareAscending(object.criteria,other.criteria)||object.index-other.index}function compareMultiple(object,other,orders){var index=-1,objCriteria=object.criteria,othCriteria=other.criteria,length=objCriteria.length,ordersLength=orders.length;while(++index=ordersLength){return result}var order=orders[index];return result*(order==="asc"||order===true?1:-1)}}return object.index-other.index}function deburrLetter(letter){return deburredLetters[letter]}function escapeHtmlChar(chr){return htmlEscapes[chr]}function escapeRegExpChar(chr,leadingChar,whitespaceChar){if(leadingChar){chr=regexpEscapes[chr]}else if(whitespaceChar){chr=stringEscapes[chr]}return"\\"+chr}function escapeStringChar(chr){return"\\"+stringEscapes[chr]}function indexOfNaN(array,fromIndex,fromRight){var length=array.length,index=fromIndex+(fromRight?0:-1);while(fromRight?index--:++index=9&&charCode<=13)||charCode==32||charCode==160||charCode==5760||charCode==6158||charCode>=8192&&(charCode<=8202||charCode==8232||charCode==8233||charCode==8239||charCode==8287||charCode==12288||charCode==65279)}function replaceHolders(array,placeholder){var index=-1,length=array.length,resIndex=-1,result=[];while(++index>>1;var MAX_SAFE_INTEGER=9007199254740991;var metaMap=WeakMap&&new WeakMap;var realNames={};function lodash(value){if(isObjectLike(value)&&!isArray(value)&&!(value instanceof LazyWrapper)){if(value instanceof LodashWrapper){return value}if(hasOwnProperty.call(value,"__chain__")&&hasOwnProperty.call(value,"__wrapped__")){return wrapperClone(value)}}return new LodashWrapper(value)}function baseLodash(){}function LodashWrapper(value,chainAll,actions){this.__wrapped__=value;this.__actions__=actions||[];this.__chain__=!!chainAll}var support=lodash.support={};lodash.templateSettings={escape:reEscape,evaluate:reEvaluate,interpolate:reInterpolate,variable:"",imports:{_:lodash}};function LazyWrapper(value){this.__wrapped__=value;this.__actions__=[];this.__dir__=1;this.__filtered__=false;this.__iteratees__=[];this.__takeCount__=POSITIVE_INFINITY;this.__views__=[]}function lazyClone(){var result=new LazyWrapper(this.__wrapped__);result.__actions__=arrayCopy(this.__actions__);result.__dir__=this.__dir__;result.__filtered__=this.__filtered__;result.__iteratees__=arrayCopy(this.__iteratees__);result.__takeCount__=this.__takeCount__;result.__views__=arrayCopy(this.__views__);return result}function lazyReverse(){if(this.__filtered__){var result=new LazyWrapper(this);result.__dir__=-1;result.__filtered__=true}else{result=this.clone();result.__dir__*=-1}return result}function lazyValue(){var array=this.__wrapped__.value(),dir=this.__dir__,isArr=isArray(array),isRight=dir<0,arrLength=isArr?array.length:0,view=getView(0,arrLength,this.__views__),start=view.start,end=view.end,length=end-start,index=isRight?end:start-1,iteratees=this.__iteratees__,iterLength=iteratees.length,resIndex=0,takeCount=nativeMin(length,this.__takeCount__);if(!isArr||arrLength=LARGE_ARRAY_SIZE?createCache(values):null,valuesLength=values.length;if(cache){indexOf=cacheIndexOf;isCommon=false;values=cache}outer:while(++indexlength?0:length+start}end=end===undefined||end>length?length:+end||0;if(end<0){end+=length}length=start>end?0:end>>>0;start>>>=0;while(startlength?0:length+start}end=end===undefined||end>length?length:+end||0;if(end<0){end+=length}length=start>end?0:end-start>>>0;start>>>=0;var result=Array(length);while(++index=LARGE_ARRAY_SIZE,seen=isLarge?createCache():null,result=[];if(seen){indexOf=cacheIndexOf;isCommon=false}else{isLarge=false;seen=iteratee?[]:result}outer:while(++index>>1,computed=array[mid];if((retHighest?computed<=value:computed2?sources[length-2]:undefined,guard=length>2?sources[2]:undefined,thisArg=length>1?sources[length-1]:undefined;if(typeof customizer=="function"){customizer=bindCallback(customizer,thisArg,5);length-=2}else{customizer=typeof thisArg=="function"?thisArg:undefined;length-=customizer?1:0}if(guard&&isIterateeCall(sources[0],sources[1],guard)){customizer=length<3?undefined:customizer;length=1}while(++index-1?collection[index]:undefined}return baseFind(collection,predicate,eachFunc)}}function createFindIndex(fromRight){return function(array,predicate,thisArg){if(!(array&&array.length)){return-1}predicate=getCallback(predicate,thisArg,3);return baseFindIndex(array,predicate,fromRight)}}function createFindKey(objectFunc){return function(object,predicate,thisArg){predicate=getCallback(predicate,thisArg,3);return baseFind(object,predicate,objectFunc,true)}}function createFlow(fromRight){return function(){var wrapper,length=arguments.length,index=fromRight?length:-1,leftIndex=0,funcs=Array(length);while(fromRight?index--:++index=LARGE_ARRAY_SIZE){return wrapper.plant(value).value()}var index=0,result=length?funcs[index].apply(this,args):value;while(++index=length||!nativeIsFinite(length)){return""}var padLength=length-strLength;chars=chars==null?" ":chars+"";return repeat(chars,nativeCeil(padLength/chars.length)).slice(0,padLength)}function createPartialWrapper(func,bitmask,thisArg,partials){var isBind=bitmask&BIND_FLAG,Ctor=createCtorWrapper(func);function wrapper(){var argsIndex=-1,argsLength=arguments.length,leftIndex=-1,leftLength=partials.length,args=Array(leftLength+argsLength);while(++leftIndexarrLength)){return false}while(++index-1&&value%1==0&&value-1&&value%1==0&&value<=MAX_SAFE_INTEGER}function isStrictComparable(value){return value===value&&!isObject(value)}function mergeData(data,source){var bitmask=data[1],srcBitmask=source[1],newBitmask=bitmask|srcBitmask,isCommon=newBitmask0){if(++count>=HOT_COUNT){return key}}else{count=0}return baseSetData(key,value)}}();function shimKeys(object){var props=keysIn(object),propsLength=props.length,length=propsLength&&object.length;var allowIndexes=!!length&&isLength(length)&&(isArray(object)||isArguments(object));var index=-1,result=[];while(++index=120?createCache(othIndex&&value):null}var array=arrays[0],index=-1,length=array?array.length:0,seen=caches[0];outer:while(++index-1){splice.call(array,fromIndex,1)}}return array}var pullAt=restParam(function(array,indexes){indexes=baseFlatten(indexes);var result=baseAt(array,indexes);basePullAt(array,indexes.sort(baseCompareAscending));return result});function remove(array,predicate,thisArg){var result=[];if(!(array&&array.length)){return result}var index=-1,indexes=[],length=array.length;predicate=getCallback(predicate,thisArg,3);while(++index2?arrays[length-2]:undefined,thisArg=length>1?arrays[length-1]:undefined;if(length>2&&typeof iteratee=="function"){length-=2}else{iteratee=length>1&&typeof thisArg=="function"?(--length,thisArg):undefined;thisArg=undefined}arrays.length=length;return unzipWith(arrays,iteratee,thisArg)});function chain(value){var result=lodash(value);result.__chain__=true;return result}function tap(value,interceptor,thisArg){interceptor.call(thisArg,value);return value}function thru(value,interceptor,thisArg){return interceptor.call(thisArg,value)}function wrapperChain(){return chain(this)}function wrapperCommit(){return new LodashWrapper(this.value(),this.__chain__)}var wrapperConcat=restParam(function(values){values=baseFlatten(values);return this.thru(function(array){return arrayConcat(isArray(array)?array:[toObject(array)],values)})});function wrapperPlant(value){var result,parent=this;while(parent instanceof baseLodash){var clone=wrapperClone(parent);if(result){previous.__wrapped__=clone}else{result=clone}var previous=clone;parent=parent.__wrapped__}previous.__wrapped__=value;return result}function wrapperReverse(){var value=this.__wrapped__;var interceptor=function(value){return wrapped&&wrapped.__dir__<0?value:value.reverse()};if(value instanceof LazyWrapper){var wrapped=value;if(this.__actions__.length){wrapped=new LazyWrapper(this)}wrapped=wrapped.reverse();wrapped.__actions__.push({func:thru,args:[interceptor],thisArg:undefined});return new LodashWrapper(wrapped,this.__chain__)}return this.thru(interceptor)}function wrapperToString(){return this.value()+""}function wrapperValue(){return baseWrapperValue(this.__wrapped__,this.__actions__)}var at=restParam(function(collection,props){return baseAt(collection,baseFlatten(props))});var countBy=createAggregator(function(result,value,key){hasOwnProperty.call(result,key)?++result[key]:result[key]=1});function every(collection,predicate,thisArg){var func=isArray(collection)?arrayEvery:baseEvery;if(thisArg&&isIterateeCall(collection,predicate,thisArg)){predicate=undefined}if(typeof predicate!="function"||thisArg!==undefined){predicate=getCallback(predicate,thisArg,3)}return func(collection,predicate)}function filter(collection,predicate,thisArg){var func=isArray(collection)?arrayFilter:baseFilter;predicate=getCallback(predicate,thisArg,3);return func(collection,predicate)}var find=createFind(baseEach);var findLast=createFind(baseEachRight,true);function findWhere(collection,source){return find(collection,baseMatches(source))}var forEach=createForEach(arrayEach,baseEach);var forEachRight=createForEach(arrayEachRight,baseEachRight);var groupBy=createAggregator(function(result,value,key){if(hasOwnProperty.call(result,key)){result[key].push(value)}else{result[key]=[value]}});function includes(collection,target,fromIndex,guard){var length=collection?getLength(collection):0;if(!isLength(length)){collection=values(collection);length=collection.length}if(typeof fromIndex!="number"||guard&&isIterateeCall(target,fromIndex,guard)){fromIndex=0}else{fromIndex=fromIndex<0?nativeMax(length+fromIndex,0):fromIndex||0}return typeof collection=="string"||!isArray(collection)&&isString(collection)?fromIndex<=length&&collection.indexOf(target,fromIndex)>-1:!!length&&getIndexOf(collection,target,fromIndex)>-1}var indexBy=createAggregator(function(result,value,key){result[key]=value});var invoke=restParam(function(collection,path,args){var index=-1,isFunc=typeof path=="function",isProp=isKey(path),result=isArrayLike(collection)?Array(collection.length):[];baseEach(collection,function(value){var func=isFunc?path:isProp&&value!=null?value[path]:undefined;result[++index]=func?func.apply(value,args):invokePath(value,path,args)});return result});function map(collection,iteratee,thisArg){var func=isArray(collection)?arrayMap:baseMap;iteratee=getCallback(iteratee,thisArg,3);return func(collection,iteratee)}var partition=createAggregator(function(result,value,key){result[key?0:1].push(value)},function(){return[[],[]]});function pluck(collection,path){return map(collection,property(path))}var reduce=createReduce(arrayReduce,baseEach);var reduceRight=createReduce(arrayReduceRight,baseEachRight);function reject(collection,predicate,thisArg){var func=isArray(collection)?arrayFilter:baseFilter;predicate=getCallback(predicate,thisArg,3);return func(collection,function(value,index,collection){return!predicate(value,index,collection)})}function sample(collection,n,guard){if(guard?isIterateeCall(collection,n,guard):n==null){collection=toIterable(collection);var length=collection.length;return length>0?collection[baseRandom(0,length-1)]:undefined}var index=-1,result=toArray(collection),length=result.length,lastIndex=length-1;n=nativeMin(n<0?0:+n||0,length);while(++index0){result=func.apply(this,arguments)}if(n<=1){func=undefined}return result}}var bind=restParam(function(func,thisArg,partials){var bitmask=BIND_FLAG;if(partials.length){var holders=replaceHolders(partials,bind.placeholder);bitmask|=PARTIAL_FLAG}return createWrapper(func,bitmask,thisArg,partials,holders)});var bindAll=restParam(function(object,methodNames){methodNames=methodNames.length?baseFlatten(methodNames):functions(object);var index=-1,length=methodNames.length;while(++indexwait){complete(trailingCall,maxTimeoutId)}else{timeoutId=setTimeout(delayed,remaining)}}function maxDelayed(){complete(trailing,timeoutId)}function debounced(){args=arguments;stamp=now();thisArg=this;trailingCall=trailing&&(timeoutId||!leading);if(maxWait===false){var leadingCall=leading&&!timeoutId}else{if(!maxTimeoutId&&!leading){lastCalled=stamp}var remaining=maxWait-(stamp-lastCalled),isCalled=remaining<=0||remaining>maxWait;if(isCalled){if(maxTimeoutId){maxTimeoutId=clearTimeout(maxTimeoutId)}lastCalled=stamp;result=func.apply(thisArg,args)}else if(!maxTimeoutId){maxTimeoutId=setTimeout(maxDelayed,remaining)}}if(isCalled&&timeoutId){timeoutId=clearTimeout(timeoutId)}else if(!timeoutId&&wait!==maxWait){timeoutId=setTimeout(delayed,wait)}if(leadingCall){isCalled=true;result=func.apply(thisArg,args)}if(isCalled&&!timeoutId&&!maxTimeoutId){args=thisArg=undefined}return result}debounced.cancel=cancel;return debounced}var defer=restParam(function(func,args){return baseDelay(func,1,args)});var delay=restParam(function(func,wait,args){return baseDelay(func,wait,args)});var flow=createFlow();var flowRight=createFlow(true);function memoize(func,resolver){if(typeof func!="function"||resolver&&typeof resolver!="function"){throw new TypeError(FUNC_ERROR_TEXT)}var memoized=function(){var args=arguments,key=resolver?resolver.apply(this,args):args[0],cache=memoized.cache;if(cache.has(key)){return cache.get(key)}var result=func.apply(this,args);memoized.cache=cache.set(key,result);return result};memoized.cache=new memoize.Cache;return memoized}var modArgs=restParam(function(func,transforms){transforms=baseFlatten(transforms);if(typeof func!="function"||!arrayEvery(transforms,baseIsFunction)){throw new TypeError(FUNC_ERROR_TEXT)}var length=transforms.length;return restParam(function(args){var index=nativeMin(args.length,length);while(index--){args[index]=transforms[index](args[index])}return func.apply(this,args)})});function negate(predicate){if(typeof predicate!="function"){throw new TypeError(FUNC_ERROR_TEXT)}return function(){return!predicate.apply(this,arguments)}}function once(func){return before(2,func)}var partial=createPartial(PARTIAL_FLAG);var partialRight=createPartial(PARTIAL_RIGHT_FLAG);var rearg=restParam(function(func,indexes){return createWrapper(func,REARG_FLAG,undefined,undefined,undefined,baseFlatten(indexes))});function restParam(func,start){if(typeof func!="function"){throw new TypeError(FUNC_ERROR_TEXT)}start=nativeMax(start===undefined?func.length-1:+start||0,0);return function(){var args=arguments,index=-1,length=nativeMax(args.length-start,0),rest=Array(length);while(++indexother}function gte(value,other){return value>=other}function isArguments(value){return isObjectLike(value)&&isArrayLike(value)&&hasOwnProperty.call(value,"callee")&&!propertyIsEnumerable.call(value,"callee")}var isArray=nativeIsArray||function(value){return isObjectLike(value)&&isLength(value.length)&&objToString.call(value)==arrayTag};function isBoolean(value){return value===true||value===false||isObjectLike(value)&&objToString.call(value)==boolTag}function isDate(value){return isObjectLike(value)&&objToString.call(value)==dateTag}function isElement(value){return!!value&&value.nodeType===1&&isObjectLike(value)&&!isPlainObject(value)}function isEmpty(value){if(value==null){return true}if(isArrayLike(value)&&(isArray(value)||isString(value)||isArguments(value)||isObjectLike(value)&&isFunction(value.splice))){return!value.length}return!keys(value).length}function isEqual(value,other,customizer,thisArg){customizer=typeof customizer=="function"?bindCallback(customizer,thisArg,3):undefined;var result=customizer?customizer(value,other):undefined;return result===undefined?baseIsEqual(value,other,customizer):!!result}function isError(value){return isObjectLike(value)&&typeof value.message=="string"&&objToString.call(value)==errorTag}function isFinite(value){return typeof value=="number"&&nativeIsFinite(value)}function isFunction(value){return isObject(value)&&objToString.call(value)==funcTag}function isObject(value){var type=typeof value;return!!value&&(type=="object"||type=="function")}function isMatch(object,source,customizer,thisArg){customizer=typeof customizer=="function"?bindCallback(customizer,thisArg,3):undefined;return baseIsMatch(object,getMatchData(source),customizer)}function isNaN(value){return isNumber(value)&&value!=+value}function isNative(value){if(value==null){return false}if(isFunction(value)){return reIsNative.test(fnToString.call(value))}return isObjectLike(value)&&reIsHostCtor.test(value)}function isNull(value){return value===null}function isNumber(value){return typeof value=="number"||isObjectLike(value)&&objToString.call(value)==numberTag}function isPlainObject(value){var Ctor;if(!(isObjectLike(value)&&objToString.call(value)==objectTag&&!isArguments(value))||!hasOwnProperty.call(value,"constructor")&&(Ctor=value.constructor,typeof Ctor=="function"&&!(Ctor instanceof Ctor))){return false}var result;baseForIn(value,function(subValue,key){result=key});return result===undefined||hasOwnProperty.call(value,result)}function isRegExp(value){return isObject(value)&&objToString.call(value)==regexpTag}function isString(value){return typeof value=="string"||isObjectLike(value)&&objToString.call(value)==stringTag}function isTypedArray(value){return isObjectLike(value)&&isLength(value.length)&&!!typedArrayTags[objToString.call(value)]}function isUndefined(value){return value===undefined}function lt(value,other){return value0;while(++index=nativeMin(start,end)&&value=0&&string.indexOf(target,position)==position}function escape(string){string=baseToString(string);return string&&reHasUnescapedHtml.test(string)?string.replace(reUnescapedHtml,escapeHtmlChar):string}function escapeRegExp(string){string=baseToString(string);return string&&reHasRegExpChars.test(string)?string.replace(reRegExpChars,escapeRegExpChar):string||"(?:)"}var kebabCase=createCompounder(function(result,word,index){return result+(index?"-":"")+word.toLowerCase()});function pad(string,length,chars){string=baseToString(string);length=+length;var strLength=string.length;if(strLength>=length||!nativeIsFinite(length)){return string}var mid=(length-strLength)/2,leftLength=nativeFloor(mid),rightLength=nativeCeil(mid);chars=createPadding("",rightLength,chars);return chars.slice(0,leftLength)+string+chars}var padLeft=createPadDir();var padRight=createPadDir(true);function parseInt(string,radix,guard){if(guard?isIterateeCall(string,radix,guard):radix==null){radix=0}else if(radix){radix=+radix}string=trim(string);return nativeParseInt(string,radix||(reHasHexPrefix.test(string)?16:10))}function repeat(string,n){var result="";string=baseToString(string);n=+n;if(n<1||!string||!nativeIsFinite(n)){return result}do{if(n%2){result+=string}n=nativeFloor(n/2);string+=string}while(n);return result}var snakeCase=createCompounder(function(result,word,index){return result+(index?"_":"")+word.toLowerCase()});var startCase=createCompounder(function(result,word,index){return result+(index?" ":"")+(word.charAt(0).toUpperCase()+word.slice(1))});function startsWith(string,target,position){string=baseToString(string);position=position==null?0:nativeMin(position<0?0:+position||0,string.length);return string.lastIndexOf(target,position)==position}function template(string,options,otherOptions){var settings=lodash.templateSettings;if(otherOptions&&isIterateeCall(string,options,otherOptions)){options=otherOptions=undefined}string=baseToString(string);options=assignWith(baseAssign({},otherOptions||options),settings,assignOwnDefaults);var imports=assignWith(baseAssign({},options.imports),settings.imports,assignOwnDefaults),importsKeys=keys(imports),importsValues=baseValues(imports,importsKeys);var isEscaping,isEvaluating,index=0,interpolate=options.interpolate||reNoMatch,source="__p += '";var reDelimiters=RegExp((options.escape||reNoMatch).source+"|"+interpolate.source+"|"+(interpolate===reInterpolate?reEsTemplate:reNoMatch).source+"|"+(options.evaluate||reNoMatch).source+"|$","g");var sourceURL="//# sourceURL="+("sourceURL"in options?options.sourceURL:"lodash.templateSources["+ ++templateCounter+"]")+"\n";string.replace(reDelimiters,function(match,escapeValue,interpolateValue,esTemplateValue,evaluateValue,offset){interpolateValue||(interpolateValue=esTemplateValue);source+=string.slice(index,offset).replace(reUnescapedString,escapeStringChar);if(escapeValue){isEscaping=true;source+="' +\n__e("+escapeValue+") +\n'"}if(evaluateValue){isEvaluating=true;source+="';\n"+evaluateValue+";\n__p += '"}if(interpolateValue){source+="' +\n((__t = ("+interpolateValue+")) == null ? '' : __t) +\n'"}index=offset+match.length;return match});source+="';\n";var variable=options.variable;if(!variable){source="with (obj) {\n"+source+"\n}\n"}source=(isEvaluating?source.replace(reEmptyStringLeading,""):source).replace(reEmptyStringMiddle,"$1").replace(reEmptyStringTrailing,"$1;");source="function("+(variable||"obj")+") {\n"+(variable?"":"obj || (obj = {});\n")+"var __t, __p = ''"+(isEscaping?", __e = _.escape":"")+(isEvaluating?", __j = Array.prototype.join;\n"+"function print() { __p += __j.call(arguments, '') }\n":";\n")+source+"return __p\n}";var result=attempt(function(){return Function(importsKeys,sourceURL+"return "+source).apply(undefined,importsValues)});result.source=source;if(isError(result)){throw result}return result}function trim(string,chars,guard){var value=string;string=baseToString(string);if(!string){return string}if(guard?isIterateeCall(value,chars,guard):chars==null){return string.slice(trimmedLeftIndex(string),trimmedRightIndex(string)+1)}chars=chars+"";return string.slice(charsLeftIndex(string,chars),charsRightIndex(string,chars)+1)}function trimLeft(string,chars,guard){var value=string;string=baseToString(string);if(!string){return string}if(guard?isIterateeCall(value,chars,guard):chars==null){return string.slice(trimmedLeftIndex(string))}return string.slice(charsLeftIndex(string,chars+""))}function trimRight(string,chars,guard){var value=string;string=baseToString(string);if(!string){return string}if(guard?isIterateeCall(value,chars,guard):chars==null){return string.slice(0,trimmedRightIndex(string)+1)}return string.slice(0,charsRightIndex(string,chars+"")+1)}function trunc(string,options,guard){if(guard&&isIterateeCall(string,options,guard)){options=undefined}var length=DEFAULT_TRUNC_LENGTH,omission=DEFAULT_TRUNC_OMISSION;if(options!=null){if(isObject(options)){var separator="separator"in options?options.separator:separator;length="length"in options?+options.length||0:length;omission="omission"in options?baseToString(options.omission):omission}else{length=+options||0}}string=baseToString(string);if(length>=string.length){return string}var end=length-omission.length;if(end<1){return omission}var result=string.slice(0,end);if(separator==null){return result+omission}if(isRegExp(separator)){if(string.slice(end).search(separator)){var match,newEnd,substring=string.slice(0,end);if(!separator.global){separator=RegExp(separator.source,(reFlags.exec(separator)||"")+"g")}separator.lastIndex=0;while(match=separator.exec(substring)){newEnd=match.index}result=result.slice(0,newEnd==null?end:newEnd)}}else if(string.indexOf(separator,end)!=end){var index=result.lastIndexOf(separator);if(index>-1){result=result.slice(0,index)}}return result+omission}function unescape(string){string=baseToString(string);return string&&reHasEscapedHtml.test(string)?string.replace(reEscapedHtml,unescapeHtmlChar):string}function words(string,pattern,guard){if(guard&&isIterateeCall(string,pattern,guard)){pattern=undefined}string=baseToString(string);return string.match(pattern||reWords)||[]}var attempt=restParam(function(func,args){try{return func.apply(undefined,args)}catch(e){return isError(e)?e:new Error(e)}});function callback(func,thisArg,guard){if(guard&&isIterateeCall(func,thisArg,guard)){thisArg=undefined}return isObjectLike(func)?matches(func):baseCallback(func,thisArg)}function constant(value){return function(){return value}}function identity(value){return value}function matches(source){return baseMatches(baseClone(source,true))}function matchesProperty(path,srcValue){return baseMatchesProperty(path,baseClone(srcValue,true))}var method=restParam(function(path,args){return function(object){return invokePath(object,path,args)}});var methodOf=restParam(function(object,args){return function(path){return invokePath(object,path,args)}});function mixin(object,source,options){if(options==null){var isObj=isObject(source),props=isObj?keys(source):undefined,methodNames=props&&props.length?baseFunctions(source,props):undefined;if(!(methodNames?methodNames.length:isObj)){methodNames=false;options=source;source=object;object=this}}if(!methodNames){methodNames=baseFunctions(source,keys(source))}var chain=true,index=-1,isFunc=isFunction(object),length=methodNames.length;if(options===false){chain=false}else if(isObject(options)&&"chain"in options){chain=options.chain}while(++index0||end<0)){return new LazyWrapper(result)}if(start<0){result=result.takeRight(-start)}else if(start){result=result.drop(start)}if(end!==undefined){end=+end||0;result=end<0?result.dropRight(-end):result.take(end-start)}return result};LazyWrapper.prototype.takeRightWhile=function(predicate,thisArg){return this.reverse().takeWhile(predicate,thisArg).reverse()};LazyWrapper.prototype.toArray=function(){return this.take(POSITIVE_INFINITY)};baseForOwn(LazyWrapper.prototype,function(func,methodName){var checkIteratee=/^(?:filter|map|reject)|While$/.test(methodName),retUnwrapped=/^(?:first|last)$/.test(methodName),lodashFunc=lodash[retUnwrapped?"take"+(methodName=="last"?"Right":""):methodName];if(!lodashFunc){return}lodash.prototype[methodName]=function(){var args=retUnwrapped?[1]:arguments,chainAll=this.__chain__,value=this.__wrapped__,isHybrid=!!this.__actions__.length,isLazy=value instanceof LazyWrapper,iteratee=args[0],useLazy=isLazy||isArray(value);if(useLazy&&checkIteratee&&typeof iteratee=="function"&&iteratee.length!=1){isLazy=useLazy=false}var interceptor=function(value){return retUnwrapped&&chainAll?lodashFunc(value,1)[0]:lodashFunc.apply(undefined,arrayPush([value],args))};var action={func:thru,args:[interceptor],thisArg:undefined},onlyLazy=isLazy&&!isHybrid;if(retUnwrapped&&!chainAll){if(onlyLazy){value=value.clone();value.__actions__.push(action);return func.call(value)}return lodashFunc.call(undefined,this.value())[0]}if(!retUnwrapped&&useLazy){value=onlyLazy?value:new LazyWrapper(this);var result=func.apply(value,args);result.__actions__.push(action);return new LodashWrapper(result,chainAll)}return this.thru(interceptor)}});arrayEach(["join","pop","push","replace","shift","sort","splice","split","unshift"],function(methodName){var func=(/^(?:replace|split)$/.test(methodName)?stringProto:arrayProto)[methodName],chainName=/^(?:push|sort|unshift)$/.test(methodName)?"tap":"thru",retUnwrapped=/^(?:join|pop|replace|shift)$/.test(methodName);lodash.prototype[methodName]=function(){var args=arguments;if(retUnwrapped&&!this.__chain__){return func.apply(this.value(),args)}return this[chainName](function(value){return func.apply(value,args)})}});baseForOwn(LazyWrapper.prototype,function(func,methodName){var lodashFunc=lodash[methodName];if(lodashFunc){var key=lodashFunc.name,names=realNames[key]||(realNames[key]=[]);names.push({name:methodName,func:lodashFunc})}});realNames[createHybridWrapper(undefined,BIND_KEY_FLAG).name]=[{name:"wrapper",func:undefined}];LazyWrapper.prototype.clone=lazyClone;LazyWrapper.prototype.reverse=lazyReverse;LazyWrapper.prototype.value=lazyValue;lodash.prototype.chain=wrapperChain;lodash.prototype.commit=wrapperCommit;lodash.prototype.concat=wrapperConcat;lodash.prototype.plant=wrapperPlant;lodash.prototype.reverse=wrapperReverse;lodash.prototype.toString=wrapperToString;lodash.prototype.run=lodash.prototype.toJSON=lodash.prototype.valueOf=lodash.prototype.value=wrapperValue;lodash.prototype.collect=lodash.prototype.map;lodash.prototype.head=lodash.prototype.first;lodash.prototype.select=lodash.prototype.filter;lodash.prototype.tail=lodash.prototype.rest;return lodash}var _=runInContext();if(typeof define=="function"&&typeof define.amd=="object"&&define.amd){root._=_;define(function(){return _})}else if(freeExports&&freeModule){if(moduleExports){(freeModule.exports=_)._=_}else{freeExports._=_}}else{root._=_}}).call(this)}).call(this,typeof global!=="undefined"?global:typeof self!=="undefined"?self:typeof window!=="undefined"?window:{})},{}],3:[function(require,module,exports){(function(window,document,undefined){var _MAP={8:"backspace",9:"tab",13:"enter",16:"shift",17:"ctrl",18:"alt",20:"capslock",27:"esc",32:"space",33:"pageup",34:"pagedown",35:"end",36:"home",37:"left",38:"up",39:"right",40:"down",45:"ins",46:"del",91:"meta",93:"meta",224:"meta"};var _KEYCODE_MAP={106:"*",107:"+",109:"-",110:".",111:"/",186:";",187:"=",188:",",189:"-",190:".",191:"/",192:"`",219:"[",220:"\\",221:"]",222:"'"};var _SHIFT_MAP={"~":"`","!":"1","@":"2","#":"3",$:"4","%":"5","^":"6","&":"7","*":"8","(":"9",")":"0",_:"-","+":"=",":":";",'"':"'","<":",",">":".","?":"/","|":"\\"};var _SPECIAL_ALIASES={option:"alt",command:"meta",return:"enter",escape:"esc",plus:"+",mod:/Mac|iPod|iPhone|iPad/.test(navigator.platform)?"meta":"ctrl"};var _REVERSE_MAP;for(var i=1;i<20;++i){_MAP[111+i]="f"+i}for(i=0;i<=9;++i){_MAP[i+96]=i}function _addEvent(object,type,callback){if(object.addEventListener){object.addEventListener(type,callback,false);return}object.attachEvent("on"+type,callback)}function _characterFromEvent(e){if(e.type=="keypress"){var character=String.fromCharCode(e.which);if(!e.shiftKey){character=character.toLowerCase()}return character}if(_MAP[e.which]){return _MAP[e.which]}if(_KEYCODE_MAP[e.which]){return _KEYCODE_MAP[e.which]}return String.fromCharCode(e.which).toLowerCase()}function _modifiersMatch(modifiers1,modifiers2){return modifiers1.sort().join(",")===modifiers2.sort().join(",")}function _eventModifiers(e){var modifiers=[];if(e.shiftKey){modifiers.push("shift")}if(e.altKey){modifiers.push("alt")}if(e.ctrlKey){modifiers.push("ctrl")}if(e.metaKey){modifiers.push("meta")}return modifiers}function _preventDefault(e){if(e.preventDefault){e.preventDefault();return}e.returnValue=false}function _stopPropagation(e){if(e.stopPropagation){e.stopPropagation();return}e.cancelBubble=true}function _isModifier(key){return key=="shift"||key=="ctrl"||key=="alt"||key=="meta"}function _getReverseMap(){if(!_REVERSE_MAP){_REVERSE_MAP={};for(var key in _MAP){if(key>95&&key<112){continue}if(_MAP.hasOwnProperty(key)){_REVERSE_MAP[_MAP[key]]=key}}}return _REVERSE_MAP}function _pickBestAction(key,modifiers,action){if(!action){action=_getReverseMap()[key]?"keydown":"keypress"}if(action=="keypress"&&modifiers.length){action="keydown"}return action}function _keysFromString(combination){if(combination==="+"){return["+"]}combination=combination.replace(/\+{2}/g,"+plus");return combination.split("+")}function _getKeyInfo(combination,action){var keys;var key;var i;var modifiers=[];keys=_keysFromString(combination);for(i=0;i1){_bindSequence(combination,sequence,callback,action);return}info=_getKeyInfo(combination,action);self._callbacks[info.key]=self._callbacks[info.key]||[];_getMatches(info.key,info.modifiers,{type:info.action},sequenceName,combination,level);self._callbacks[info.key][sequenceName?"unshift":"push"]({callback:callback,modifiers:info.modifiers,action:info.action,seq:sequenceName,level:level,combo:combination})}self._bindMultiple=function(combinations,callback,action){for(var i=0;i-1){return false}if(_belongsTo(element,self.target)){return false}return element.tagName=="INPUT"||element.tagName=="SELECT"||element.tagName=="TEXTAREA"||element.isContentEditable};Mousetrap.prototype.handleKey=function(){var self=this;return self._handleKey.apply(self,arguments)};Mousetrap.init=function(){var documentMousetrap=Mousetrap(document);for(var method in documentMousetrap){if(method.charAt(0)!=="_"){Mousetrap[method]=function(method){return function(){return documentMousetrap[method].apply(documentMousetrap,arguments)}}(method)}}};Mousetrap.init();window.Mousetrap=Mousetrap;if(typeof module!=="undefined"&&module.exports){module.exports=Mousetrap}if(typeof define==="function"&&define.amd){define(function(){return Mousetrap})}})(window,document)},{}],4:[function(require,module,exports){(function(process){function normalizeArray(parts,allowAboveRoot){var up=0;for(var i=parts.length-1;i>=0;i--){var last=parts[i];if(last==="."){parts.splice(i,1)}else if(last===".."){parts.splice(i,1);up++}else if(up){parts.splice(i,1);up--}}if(allowAboveRoot){for(;up--;up){parts.unshift("..")}}return parts}var splitPathRe=/^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/;var splitPath=function(filename){return splitPathRe.exec(filename).slice(1)};exports.resolve=function(){var resolvedPath="",resolvedAbsolute=false;for(var i=arguments.length-1;i>=-1&&!resolvedAbsolute;i--){var path=i>=0?arguments[i]:process.cwd();if(typeof path!=="string"){throw new TypeError("Arguments to path.resolve must be strings")}else if(!path){continue}resolvedPath=path+"/"+resolvedPath;resolvedAbsolute=path.charAt(0)==="/"}resolvedPath=normalizeArray(filter(resolvedPath.split("/"),function(p){return!!p}),!resolvedAbsolute).join("/");return(resolvedAbsolute?"/":"")+resolvedPath||"."};exports.normalize=function(path){var isAbsolute=exports.isAbsolute(path),trailingSlash=substr(path,-1)==="/";path=normalizeArray(filter(path.split("/"),function(p){return!!p}),!isAbsolute).join("/");if(!path&&!isAbsolute){path="."}if(path&&trailingSlash){path+="/"}return(isAbsolute?"/":"")+path};exports.isAbsolute=function(path){return path.charAt(0)==="/"};exports.join=function(){var paths=Array.prototype.slice.call(arguments,0);return exports.normalize(filter(paths,function(p,index){if(typeof p!=="string"){throw new TypeError("Arguments to path.join must be strings")}return p}).join("/"))};exports.relative=function(from,to){from=exports.resolve(from).substr(1);to=exports.resolve(to).substr(1);function trim(arr){var start=0;for(;start=0;end--){if(arr[end]!=="")break}if(start>end)return[];return arr.slice(start,end-start+1)}var fromParts=trim(from.split("/"));var toParts=trim(to.split("/"));var length=Math.min(fromParts.length,toParts.length);var samePartsLength=length;for(var i=0;i1){for(var i=1;i= 0x80 (not a basic code point)","invalid-input":"Invalid input"},baseMinusTMin=base-tMin,floor=Math.floor,stringFromCharCode=String.fromCharCode,key;function error(type){throw RangeError(errors[type])}function map(array,fn){var length=array.length;var result=[];while(length--){result[length]=fn(array[length])}return result}function mapDomain(string,fn){var parts=string.split("@");var result="";if(parts.length>1){result=parts[0]+"@";string=parts[1]}string=string.replace(regexSeparators,".");var labels=string.split(".");var encoded=map(labels,fn).join(".");return result+encoded}function ucs2decode(string){var output=[],counter=0,length=string.length,value,extra;while(counter=55296&&value<=56319&&counter65535){value-=65536;output+=stringFromCharCode(value>>>10&1023|55296);value=56320|value&1023}output+=stringFromCharCode(value);return output}).join("")}function basicToDigit(codePoint){if(codePoint-48<10){return codePoint-22}if(codePoint-65<26){return codePoint-65}if(codePoint-97<26){return codePoint-97}return base}function digitToBasic(digit,flag){return digit+22+75*(digit<26)-((flag!=0)<<5)}function adapt(delta,numPoints,firstTime){var k=0;delta=firstTime?floor(delta/damp):delta>>1;delta+=floor(delta/numPoints);for(;delta>baseMinusTMin*tMax>>1;k+=base){delta=floor(delta/baseMinusTMin)}return floor(k+(baseMinusTMin+1)*delta/(delta+skew))}function decode(input){var output=[],inputLength=input.length,out,i=0,n=initialN,bias=initialBias,basic,j,index,oldi,w,k,digit,t,baseMinusT;basic=input.lastIndexOf(delimiter);if(basic<0){basic=0}for(j=0;j=128){error("not-basic")}output.push(input.charCodeAt(j))}for(index=basic>0?basic+1:0;index=inputLength){error("invalid-input")}digit=basicToDigit(input.charCodeAt(index++));if(digit>=base||digit>floor((maxInt-i)/w)){error("overflow")}i+=digit*w;t=k<=bias?tMin:k>=bias+tMax?tMax:k-bias;if(digitfloor(maxInt/baseMinusT)){error("overflow")}w*=baseMinusT}out=output.length+1;bias=adapt(i-oldi,out,oldi==0);if(floor(i/out)>maxInt-n){error("overflow")}n+=floor(i/out);i%=out;output.splice(i++,0,n)}return ucs2encode(output)}function encode(input){var n,delta,handledCPCount,basicLength,bias,j,m,q,k,t,currentValue,output=[],inputLength,handledCPCountPlusOne,baseMinusT,qMinusT;input=ucs2decode(input);inputLength=input.length;n=initialN;delta=0;bias=initialBias;for(j=0;j=n&¤tValuefloor((maxInt-delta)/handledCPCountPlusOne)){error("overflow")}delta+=(m-n)*handledCPCountPlusOne;n=m;for(j=0;jmaxInt){error("overflow")}if(currentValue==n){for(q=delta,k=base;;k+=base){t=k<=bias?tMin:k>=bias+tMax?tMax:k-bias;if(q0&&len>maxKeys){len=maxKeys}for(var i=0;i=0){kstr=x.substr(0,idx);vstr=x.substr(idx+1)}else{kstr=x;vstr=""}k=decodeURIComponent(kstr);v=decodeURIComponent(vstr);if(!hasOwnProperty(obj,k)){obj[k]=v}else if(isArray(obj[k])){obj[k].push(v)}else{obj[k]=[obj[k],v]}}return obj};var isArray=Array.isArray||function(xs){return Object.prototype.toString.call(xs)==="[object Array]"}},{}],8:[function(require,module,exports){"use strict";var stringifyPrimitive=function(v){switch(typeof v){case"string":return v;case"boolean":return v?"true":"false";case"number":return isFinite(v)?v:"";default:return""}};module.exports=function(obj,sep,eq,name){sep=sep||"&";eq=eq||"=";if(obj===null){obj=undefined}if(typeof obj==="object"){return map(objectKeys(obj),function(k){var ks=encodeURIComponent(stringifyPrimitive(k))+eq;if(isArray(obj[k])){return map(obj[k],function(v){return ks+encodeURIComponent(stringifyPrimitive(v))}).join(sep)}else{return ks+encodeURIComponent(stringifyPrimitive(obj[k]))}}).join(sep)}if(!name)return"";return encodeURIComponent(stringifyPrimitive(name))+eq+encodeURIComponent(stringifyPrimitive(obj))};var isArray=Array.isArray||function(xs){return Object.prototype.toString.call(xs)==="[object Array]"};function map(xs,f){if(xs.map)return xs.map(f);var res=[];for(var i=0;i",'"',"`"," ","\r","\n","\t"],unwise=["{","}","|","\\","^","`"].concat(delims),autoEscape=["'"].concat(unwise),nonHostChars=["%","/","?",";","#"].concat(autoEscape),hostEndingChars=["/","?","#"],hostnameMaxLen=255,hostnamePartPattern=/^[a-z0-9A-Z_-]{0,63}$/,hostnamePartStart=/^([a-z0-9A-Z_-]{0,63})(.*)$/,unsafeProtocol={javascript:true,"javascript:":true},hostlessProtocol={javascript:true,"javascript:":true},slashedProtocol={http:true,https:true,ftp:true,gopher:true,file:true,"http:":true,"https:":true,"ftp:":true,"gopher:":true,"file:":true},querystring=require("querystring");function urlParse(url,parseQueryString,slashesDenoteHost){if(url&&isObject(url)&&url instanceof Url)return url;var u=new Url;u.parse(url,parseQueryString,slashesDenoteHost);return u}Url.prototype.parse=function(url,parseQueryString,slashesDenoteHost){if(!isString(url)){throw new TypeError("Parameter 'url' must be a string, not "+typeof url)}var rest=url;rest=rest.trim();var proto=protocolPattern.exec(rest);if(proto){proto=proto[0];var lowerProto=proto.toLowerCase();this.protocol=lowerProto;rest=rest.substr(proto.length)}if(slashesDenoteHost||proto||rest.match(/^\/\/[^@\/]+@[^@\/]+/)){var slashes=rest.substr(0,2)==="//";if(slashes&&!(proto&&hostlessProtocol[proto])){rest=rest.substr(2);this.slashes=true}}if(!hostlessProtocol[proto]&&(slashes||proto&&!slashedProtocol[proto])){var hostEnd=-1;for(var i=0;i127){newpart+="x"}else{newpart+=part[j]}}if(!newpart.match(hostnamePartPattern)){var validParts=hostparts.slice(0,i);var notHost=hostparts.slice(i+1);var bit=part.match(hostnamePartStart);if(bit){validParts.push(bit[1]);notHost.unshift(bit[2])}if(notHost.length){rest="/"+notHost.join(".")+rest}this.hostname=validParts.join(".");break}}}}if(this.hostname.length>hostnameMaxLen){this.hostname=""}else{this.hostname=this.hostname.toLowerCase()}if(!ipv6Hostname){var domainArray=this.hostname.split(".");var newOut=[];for(var i=0;i0?result.host.split("@"):false;if(authInHost){result.auth=authInHost.shift();result.host=result.hostname=authInHost.shift()}}result.search=relative.search;result.query=relative.query;if(!isNull(result.pathname)||!isNull(result.search)){result.path=(result.pathname?result.pathname:"")+(result.search?result.search:"")}result.href=result.format();return result}if(!srcPath.length){result.pathname=null;if(result.search){result.path="/"+result.search}else{result.path=null}result.href=result.format();return result}var last=srcPath.slice(-1)[0];var hasTrailingSlash=(result.host||relative.host)&&(last==="."||last==="..")||last==="";var up=0;for(var i=srcPath.length;i>=0;i--){last=srcPath[i];if(last=="."){srcPath.splice(i,1)}else if(last===".."){srcPath.splice(i,1);up++}else if(up){srcPath.splice(i,1);up--}}if(!mustEndAbs&&!removeAllDots){for(;up--;up){srcPath.unshift("..")}}if(mustEndAbs&&srcPath[0]!==""&&(!srcPath[0]||srcPath[0].charAt(0)!=="/")){srcPath.unshift("")}if(hasTrailingSlash&&srcPath.join("/").substr(-1)!=="/"){srcPath.push("")}var isAbsolute=srcPath[0]===""||srcPath[0]&&srcPath[0].charAt(0)==="/";if(psychotic){result.hostname=result.host=isAbsolute?"":srcPath.length?srcPath.shift():"";var authInHost=result.host&&result.host.indexOf("@")>0?result.host.split("@"):false;if(authInHost){result.auth=authInHost.shift();result.host=result.hostname=authInHost.shift()}}mustEndAbs=mustEndAbs||result.host&&srcPath.length;if(mustEndAbs&&!isAbsolute){srcPath.unshift("")}if(!srcPath.length){result.pathname=null;result.path=null}else{result.pathname=srcPath.join("/")}if(!isNull(result.pathname)||!isNull(result.search)){result.path=(result.pathname?result.pathname:"")+(result.search?result.search:"")}result.auth=relative.auth||result.auth;result.slashes=result.slashes||relative.slashes;result.href=result.format();return result};Url.prototype.parseHost=function(){var host=this.host;var port=portPattern.exec(host);if(port){port=port[0];if(port!==":"){this.port=port.substr(1)}host=host.substr(0,host.length-port.length)}if(host)this.hostname=host};function isString(arg){return typeof arg==="string"}function isObject(arg){return typeof arg==="object"&&arg!==null}function isNull(arg){return arg===null}function isNullOrUndefined(arg){return arg==null}},{punycode:6,querystring:9}],11:[function(require,module,exports){var $=require("jquery");function toggleDropdown(e){var $dropdown=$(e.currentTarget).parent().find(".dropdown-menu");$dropdown.toggleClass("open");e.stopPropagation();e.preventDefault()}function closeDropdown(e){$(".dropdown-menu").removeClass("open")}function init(){$(document).on("click",".toggle-dropdown",toggleDropdown);$(document).on("click",".dropdown-menu",function(e){e.stopPropagation()});$(document).on("click",closeDropdown)}module.exports={init:init}},{jquery:1}],12:[function(require,module,exports){var $=require("jquery");module.exports=$({})},{jquery:1}],13:[function(require,module,exports){var $=require("jquery");var _=require("lodash");var storage=require("./storage");var dropdown=require("./dropdown");var events=require("./events");var state=require("./state");var keyboard=require("./keyboard");var navigation=require("./navigation");var sidebar=require("./sidebar");var toolbar=require("./toolbar");function start(config){sidebar.init();keyboard.init();dropdown.init();navigation.init();toolbar.createButton({index:0,icon:"fa fa-align-justify",label:"Toggle Sidebar",onClick:function(e){e.preventDefault();sidebar.toggle()}});events.trigger("start",config);navigation.notify()}var gitbook={start:start,events:events,state:state,toolbar:toolbar,sidebar:sidebar,storage:storage,keyboard:keyboard};var MODULES={gitbook:gitbook,jquery:$,lodash:_};window.gitbook=gitbook;window.$=$;window.jQuery=$;gitbook.require=function(mods,fn){mods=_.map(mods,function(mod){mod=mod.toLowerCase();if(!MODULES[mod]){throw new Error("GitBook module "+mod+" doesn't exist")}return MODULES[mod]});fn.apply(null,mods)};module.exports={}},{"./dropdown":11,"./events":12,"./keyboard":14,"./navigation":16,"./sidebar":18,"./state":19,"./storage":20,"./toolbar":21,jquery:1,lodash:2}],14:[function(require,module,exports){var Mousetrap=require("mousetrap");var navigation=require("./navigation");var sidebar=require("./sidebar");function bindShortcut(keys,fn){Mousetrap.bind(keys,function(e){fn();return false})}function init(){bindShortcut(["right"],function(e){navigation.goNext()});bindShortcut(["left"],function(e){navigation.goPrev()});bindShortcut(["s"],function(e){sidebar.toggle()})}module.exports={init:init,bind:bindShortcut}},{"./navigation":16,"./sidebar":18,mousetrap:3}],15:[function(require,module,exports){var state=require("./state");function showLoading(p){state.$book.addClass("is-loading");p.always(function(){state.$book.removeClass("is-loading")});return p}module.exports={show:showLoading}},{"./state":19}],16:[function(require,module,exports){var $=require("jquery");var url=require("url");var events=require("./events");var state=require("./state");var loading=require("./loading");var usePushState=typeof history.pushState!=="undefined";function handleNavigation(relativeUrl,push){var uri=url.resolve(window.location.pathname,relativeUrl);notifyPageChange();location.href=relativeUrl;return}function updateNavigationPosition(){var bodyInnerWidth,pageWrapperWidth;bodyInnerWidth=parseInt($(".body-inner").css("width"),10);pageWrapperWidth=parseInt($(".page-wrapper").css("width"),10);$(".navigation-next").css("margin-right",bodyInnerWidth-pageWrapperWidth+"px")}function notifyPageChange(){events.trigger("page.change")}function preparePage(notify){var $bookBody=$(".book-body");var $bookInner=$bookBody.find(".body-inner");var $pageWrapper=$bookInner.find(".page-wrapper");updateNavigationPosition();$bookInner.scrollTop(0);$bookBody.scrollTop(0);if(notify!==false)notifyPageChange()}function isLeftClickEvent(e){return e.button===0}function isModifiedEvent(e){return!!(e.metaKey||e.altKey||e.ctrlKey||e.shiftKey)}function handlePagination(e){if(isModifiedEvent(e)||!isLeftClickEvent(e)){return}e.stopPropagation();e.preventDefault();var url=$(this).attr("href");if(url)handleNavigation(url,true)}function goNext(){var url=$(".navigation-next").attr("href");if(url)handleNavigation(url,true)}function goPrev(){var url=$(".navigation-prev").attr("href");if(url)handleNavigation(url,true)}function init(){$.ajaxSetup({});if(location.protocol!=="file:"){history.replaceState({path:window.location.href},"")}window.onpopstate=function(event){if(event.state===null){return}return handleNavigation(event.state.path,false)};$(document).on("click",".navigation-prev",handlePagination);$(document).on("click",".navigation-next",handlePagination);$(document).on("click",".summary [data-path] a",handlePagination);$(window).resize(updateNavigationPosition);preparePage(false)}module.exports={init:init,goNext:goNext,goPrev:goPrev,notify:notifyPageChange}},{"./events":12,"./loading":15,"./state":19,jquery:1,url:10}],17:[function(require,module,exports){module.exports={isMobile:function(){return document.body.clientWidth<=600}}},{}],18:[function(require,module,exports){var $=require("jquery");var _=require("lodash");var storage=require("./storage");var platform=require("./platform");var state=require("./state");function toggleSidebar(_state,animation){if(state!=null&&isOpen()==_state)return;if(animation==null)animation=true;state.$book.toggleClass("without-animation",!animation);state.$book.toggleClass("with-summary",_state);storage.set("sidebar",isOpen())}function isOpen(){return state.$book.hasClass("with-summary")}function init(){if(platform.isMobile()){toggleSidebar(false,false)}else{toggleSidebar(storage.get("sidebar",true),false)}$(document).on("click",".book-summary li.chapter a",function(e){if(platform.isMobile())toggleSidebar(false,false)})}function filterSummary(paths){var $summary=$(".book-summary");$summary.find("li").each(function(){var path=$(this).data("path");var st=paths==null||_.contains(paths,path);$(this).toggle(st);if(st)$(this).parents("li").show()})}module.exports={init:init,isOpen:isOpen,toggle:toggleSidebar,filter:filterSummary}},{"./platform":17,"./state":19,"./storage":20,jquery:1,lodash:2}],19:[function(require,module,exports){var $=require("jquery");var url=require("url");var path=require("path");var state={};state.update=function(dom){var $book=$(dom.find(".book"));state.$book=$book;state.level=$book.data("level");state.basePath=$book.data("basepath");state.innerLanguage=$book.data("innerlanguage");state.revision=$book.data("revision");state.filepath=$book.data("filepath");state.chapterTitle=$book.data("chapter-title");state.root=url.resolve(location.protocol+"//"+location.host,path.dirname(path.resolve(location.pathname.replace(/\/$/,"/index.html"),state.basePath))).replace(/\/?$/,"/");state.bookRoot=state.innerLanguage?url.resolve(state.root,".."):state.root};state.update($);module.exports=state},{jquery:1,path:4,url:10}],20:[function(require,module,exports){var baseKey="";module.exports={setBaseKey:function(key){baseKey=key},set:function(key,value){key=baseKey+":"+key;try{sessionStorage[key]=JSON.stringify(value)}catch(e){}},get:function(key,def){key=baseKey+":"+key;if(sessionStorage[key]===undefined)return def;try{var v=JSON.parse(sessionStorage[key]);return v==null?def:v}catch(err){return sessionStorage[key]||def}},remove:function(key){key=baseKey+":"+key;sessionStorage.removeItem(key)}}},{}],21:[function(require,module,exports){var $=require("jquery");var _=require("lodash");var events=require("./events");var buttons=[];function insertAt(parent,selector,index,element){var lastIndex=parent.children(selector).length;if(index<0){index=Math.max(0,lastIndex+1+index)}parent.append(element);if(index",{class:"dropdown-menu",html:''});if(_.isString(dropdown)){$menu.append(dropdown)}else{var groups=_.map(dropdown,function(group){if(_.isArray(group))return group;else return[group]});_.each(groups,function(group){var $group=$("
",{class:"buttons"});var sizeClass="size-"+group.length;_.each(group,function(btn){btn=_.defaults(btn||{},{text:"",className:"",onClick:defaultOnClick});var $btn=$("'; - var clipboard; - - gitbook.events.bind("page.change", function() { - - if (!ClipboardJS.isSupported()) return; - - // the page.change event is thrown twice: before and after the page changes - if (clipboard) { - // clipboard is already defined but we are on the same page - if (clipboard._prevPage === window.location.pathname) return; - // clipboard is already defined and url path change - // we can deduct that we are before page changes - clipboard.destroy(); // destroy the previous events listeners - clipboard = undefined; // reset the clipboard object - return; - } - - $(copyButton).prependTo("div.sourceCode"); - - clipboard = new ClipboardJS(".copy-to-clipboard-button", { - text: function(trigger) { - return trigger.parentNode.textContent; - } - }); - - clipboard._prevPage = window.location.pathname - - }); - -}); diff --git a/_book/libs/gitbook-2.6.7/js/plugin-fontsettings.js b/_book/libs/gitbook-2.6.7/js/plugin-fontsettings.js deleted file mode 100644 index a70f0fb37..000000000 --- a/_book/libs/gitbook-2.6.7/js/plugin-fontsettings.js +++ /dev/null @@ -1,152 +0,0 @@ -gitbook.require(["gitbook", "lodash", "jQuery"], function(gitbook, _, $) { - var fontState; - - var THEMES = { - "white": 0, - "sepia": 1, - "night": 2 - }; - - var FAMILY = { - "serif": 0, - "sans": 1 - }; - - // Save current font settings - function saveFontSettings() { - gitbook.storage.set("fontState", fontState); - update(); - } - - // Increase font size - function enlargeFontSize(e) { - e.preventDefault(); - if (fontState.size >= 4) return; - - fontState.size++; - saveFontSettings(); - }; - - // Decrease font size - function reduceFontSize(e) { - e.preventDefault(); - if (fontState.size <= 0) return; - - fontState.size--; - saveFontSettings(); - }; - - // Change font family - function changeFontFamily(index, e) { - e.preventDefault(); - - fontState.family = index; - saveFontSettings(); - }; - - // Change type of color - function changeColorTheme(index, e) { - e.preventDefault(); - - var $book = $(".book"); - - if (fontState.theme !== 0) - $book.removeClass("color-theme-"+fontState.theme); - - fontState.theme = index; - if (fontState.theme !== 0) - $book.addClass("color-theme-"+fontState.theme); - - saveFontSettings(); - }; - - function update() { - var $book = gitbook.state.$book; - - $(".font-settings .font-family-list li").removeClass("active"); - $(".font-settings .font-family-list li:nth-child("+(fontState.family+1)+")").addClass("active"); - - $book[0].className = $book[0].className.replace(/\bfont-\S+/g, ''); - $book.addClass("font-size-"+fontState.size); - $book.addClass("font-family-"+fontState.family); - - if(fontState.theme !== 0) { - $book[0].className = $book[0].className.replace(/\bcolor-theme-\S+/g, ''); - $book.addClass("color-theme-"+fontState.theme); - } - }; - - function init(config) { - var $bookBody, $book; - - //Find DOM elements. - $book = gitbook.state.$book; - $bookBody = $book.find(".book-body"); - - // Instantiate font state object - fontState = gitbook.storage.get("fontState", { - size: config.size || 2, - family: FAMILY[config.family || "sans"], - theme: THEMES[config.theme || "white"] - }); - - update(); - }; - - - gitbook.events.bind("start", function(e, config) { - var opts = config.fontsettings; - if (!opts) return; - - // Create buttons in toolbar - gitbook.toolbar.createButton({ - icon: 'fa fa-font', - label: 'Font Settings', - className: 'font-settings', - dropdown: [ - [ - { - text: 'A', - className: 'font-reduce', - onClick: reduceFontSize - }, - { - text: 'A', - className: 'font-enlarge', - onClick: enlargeFontSize - } - ], - [ - { - text: 'Serif', - onClick: _.partial(changeFontFamily, 0) - }, - { - text: 'Sans', - onClick: _.partial(changeFontFamily, 1) - } - ], - [ - { - text: 'White', - onClick: _.partial(changeColorTheme, 0) - }, - { - text: 'Sepia', - onClick: _.partial(changeColorTheme, 1) - }, - { - text: 'Night', - onClick: _.partial(changeColorTheme, 2) - } - ] - ] - }); - - - // Init current settings - init(opts); - }); -}); - - diff --git a/_book/libs/gitbook-2.6.7/js/plugin-search.js b/_book/libs/gitbook-2.6.7/js/plugin-search.js deleted file mode 100644 index 747fccebd..000000000 --- a/_book/libs/gitbook-2.6.7/js/plugin-search.js +++ /dev/null @@ -1,270 +0,0 @@ -gitbook.require(["gitbook", "lodash", "jQuery"], function(gitbook, _, $) { - var index = null; - var fuse = null; - var _search = {engine: 'lunr', opts: {}}; - var $searchInput, $searchLabel, $searchForm; - var $highlighted = [], hi, hiOpts = { className: 'search-highlight' }; - var collapse = false, toc_visible = []; - - function init(config) { - // Instantiate search settings - _search = gitbook.storage.get("search", { - engine: config.search.engine || 'lunr', - opts: config.search.options || {}, - }); - }; - - // Save current search settings - function saveSearchSettings() { - gitbook.storage.set("search", _search); - } - - // Use a specific index - function loadIndex(data) { - // [Yihui] In bookdown, I use a character matrix to store the chapter - // content, and the index is dynamically built on the client side. - // Gitbook prebuilds the index data instead: https://github.com/GitbookIO/plugin-search - // We can certainly do that via R packages V8 and jsonlite, but let's - // see how slow it really is before improving it. On the other hand, - // lunr cannot handle non-English text very well, e.g. the default - // tokenizer cannot deal with Chinese text, so we may want to replace - // lunr with a dumb simple text matching approach. - if (_search.engine === 'lunr') { - index = lunr(function () { - this.ref('url'); - this.field('title', { boost: 10 }); - this.field('body'); - }); - data.map(function(item) { - index.add({ - url: item[0], - title: item[1], - body: item[2] - }); - }); - return; - } - fuse = new Fuse(data.map((_data => { - return { - url: _data[0], - title: _data[1], - body: _data[2] - }; - })), Object.assign( - { - includeScore: true, - threshold: 0.1, - ignoreLocation: true, - keys: ["title", "body"] - }, - _search.opts - )); - } - - // Fetch the search index - function fetchIndex() { - return $.getJSON(gitbook.state.basePath+"/search_index.json") - .then(loadIndex); // [Yihui] we need to use this object later - } - - // Search for a term and return results - function search(q) { - let results = []; - switch (_search.engine) { - case 'fuse': - if (!fuse) return; - results = fuse.search(q).map(function(result) { - var parts = result.item.url.split('#'); - return { - path: parts[0], - hash: parts[1] - }; - }); - break; - case 'lunr': - default: - if (!index) return; - results = _.chain(index.search(q)).map(function(result) { - var parts = result.ref.split("#"); - return { - path: parts[0], - hash: parts[1] - }; - }) - .value(); - } - - // [Yihui] Highlight the search keyword on current page - $highlighted = $('.page-inner') - .unhighlight(hiOpts).highlight(q, hiOpts).find('span.search-highlight'); - scrollToHighlighted(0); - - return results; - } - - // [Yihui] Scroll the chapter body to the i-th highlighted string - function scrollToHighlighted(d) { - var n = $highlighted.length; - hi = hi === undefined ? 0 : hi + d; - // navignate to the previous/next page in the search results if reached the top/bottom - var b = hi < 0; - if (d !== 0 && (b || hi >= n)) { - var path = currentPath(), n2 = toc_visible.length; - if (n2 === 0) return; - for (var i = b ? 0 : n2; (b && i < n2) || (!b && i >= 0); i += b ? 1 : -1) { - if (toc_visible.eq(i).data('path') === path) break; - } - i += b ? -1 : 1; - if (i < 0) i = n2 - 1; - if (i >= n2) i = 0; - var lnk = toc_visible.eq(i).find('a[href$=".html"]'); - if (lnk.length) lnk[0].click(); - return; - } - if (n === 0) return; - var $p = $highlighted.eq(hi); - $p[0].scrollIntoView(); - $highlighted.css('background-color', ''); - // an orange background color on the current item and removed later - $p.css('background-color', 'orange'); - setTimeout(function() { - $p.css('background-color', ''); - }, 2000); - } - - function currentPath() { - var href = window.location.pathname; - href = href.substr(href.lastIndexOf('/') + 1); - return href === '' ? 'index.html' : href; - } - - // Create search form - function createForm(value) { - if ($searchForm) $searchForm.remove(); - if ($searchLabel) $searchLabel.remove(); - if ($searchInput) $searchInput.remove(); - - $searchForm = $('
', { - 'class': 'book-search', - 'role': 'search' - }); - - $searchLabel = $('",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+S+"-]").length||v.push("~="),(t=C.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||v.push("\\["+M+"*name"+M+"*="+M+"*(?:''|\"\")"),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+S+"+*").length||v.push(".#.+[+~]"),e.querySelectorAll("\\\f"),v.push("[\\r\\n\\f]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",F)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},j=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e==C||e.ownerDocument==p&&y(p,e)?-1:t==C||t.ownerDocument==p&&y(p,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e==C?-1:t==C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]==p?-1:s[r]==p?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if(T(e),d.matchesSelector&&E&&!N[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){N(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=m[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&m(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?S.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?S.grep(e,function(e){return e===n!==r}):"string"!=typeof n?S.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(S.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||D,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:q.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof S?t[0]:t,S.merge(this,S.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),N.test(r[1])&&S.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(S):S.makeArray(e,this)}).prototype=S.fn,D=S(E);var L=/^(?:parents|prev(?:Until|All))/,H={children:!0,contents:!0,next:!0,prev:!0};function O(e,t){while((e=e[t])&&1!==e.nodeType);return e}S.fn.extend({has:function(e){var t=S(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i;ce=E.createDocumentFragment().appendChild(E.createElement("div")),(fe=E.createElement("input")).setAttribute("type","radio"),fe.setAttribute("checked","checked"),fe.setAttribute("name","t"),ce.appendChild(fe),y.checkClone=ce.cloneNode(!0).cloneNode(!0).lastChild.checked,ce.innerHTML="",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="",y.option=!!ce.lastChild;var ge={thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n",""]);var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d\s*$/g;function je(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function De(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function qe(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Le(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(Y.hasData(e)&&(s=Y.get(e).events))for(i in Y.remove(t,"handle events"),s)for(n=0,r=s[i].length;n").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var _t,zt=[],Ut=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=zt.pop()||S.expando+"_"+wt.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Ut.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Ut.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Ut,"$1"+r):!1!==e.jsonp&&(e.url+=(Tt.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?S(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,zt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((_t=E.implementation.createHTMLDocument("").body).innerHTML="
",2===_t.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=xe([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===S.css(e,"position"))e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return $(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=Fe(y.pixelPosition,function(e,t){if(t)return t=We(e,n),Pe.test(t)?S(e).position()[n]+"px":t})}),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return $(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 0 - - - - - - 4 Matrices and arrays | The comprehensive TMB documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
- -
-
- - -
-
- -
-
-

4 Matrices and arrays

-
-

4.1 Relationship to R

-

In R you can apply both matrix multiplication (%*%) and elementwise -multiplication (*) to objects of type “matrix,” i.e. it is the -operator that determines the operation. In TMB we instead have two -different types of objects, while the multiplication operator (*) is -the same:

-
    -
  • matrix: linear algebra
  • -
  • array: elementwise operations; () and [] style indexing.
  • -
  • vector: can be used in linear algebra with matrix, but at the -same time admits R style element-wise operations.
  • -
-

See the file matrix_arrays.cpp for examples of use.

-
-
-

4.2 Relationship to Eigen

-

The TMB types matrix and array (in dimension 2) inherits -directly from the the Eigen types Matrix and Array. The advanced user -of TMB will benefit from familiarity with the -Eigen documentation. -Note that arrays of dimension 3 or higher are specially implemented in -TMB, i.e. are not directly inherited from Eigen.

-
-
-
- -
-
-
- - -
-
- - - - - - - - - - - - - - - diff --git a/_book/search_index.json b/_book/search_index.json deleted file mode 100644 index 02f24a83d..000000000 --- a/_book/search_index.json +++ /dev/null @@ -1 +0,0 @@ -[["Introduction.html", "The comprehensive TMB documentation 1 Introduction", " The comprehensive TMB documentation 1 Introduction TMB (Template Model Builder) is an R package for fitting statistical latent variable models to data. It is strongly inspired by ADMB. Unlike most other R packages the model is formulated in C++. This provides great flexibility, but requires some familiarity with the C/C++ programming language. TMB can calculate first and second order derivatives of the likelihood function by AD, or any objective function written in C++. The objective function (and its derivatives) can be called from R. Hence, parameter estimation via e.g. nlminb() is easy. The user can specify that the Laplace approximation should be applied to any subset of the function arguments. Yields marginal likelihood in latent variable model. Standard deviations of any parameter, or derived parameter, obtained by the ‘delta method’. Pre and post-processing of data done in R. TMB is based on state-of-the art software: CppAD, Eigen, … A more general introduction including the underlying theory used in TMB can be found in this paper. "],["Tutorial.html", "2 Tutorial 2.1 Obtaining data and parameter values from R 2.2 An extended C++ language 2.3 Statistical modelling", " 2 Tutorial A TMB project consists of an R file (.R) and a C++ file (.cpp). The R file does pre- and post processing of data in addition to maximizing the log-likelihood contained in *.cpp. See Examples for more details. All R functions are documented within the standard help system in R. This tutorial describes how to write the C++ file, and assumes familiarity with C++ and to some extent with R. The purpose of the C++ program is to evaluate the objective function, i.e. the negative log-likelihood of the model. The program is compiled and called from R, where it can be fed to a function minimizer like nlminb(). The objective function should be of the following C++ type: #include <TMB.hpp> template<class Type> Type objective_function<Type>::operator() () { .... Here goes your C++ code ..... } The first line includes the source code for the whole TMB package (and all its dependencies). The objective function is a templated class where <Type> is the data type of both the input values and the return value of the objective function. This allows us to evaluate both the objective function and its derivatives using the same chunk of C++ code (via the AD package CppAD). The technical aspects of this are hidden from the user. There is however one aspect that surprises the new TMB user. When a constant like “1.2” is used in a calculation that affects the return value it must be “cast” to Type: Type nll; // Define variable that holds the return value (neg. log. lik) nll = Type(1.2); // Assign value 1.2; a cast is needed. 2.1 Obtaining data and parameter values from R Obviously, we will need to pass both data and parameter values to the objective function. This is done through a set of macros that TMB defines for us. 2.1.1 List of data macros DATA_ARRAY(), DATA_FACTOR(), DATA_IARRAY(), DATA_IMATRIX(), DATA_INTEGER(), DATA_IVECTOR(), DATA_MATRIX(), DATA_SCALAR(), DATA_SPARSE_MATRIX(), DATA_STRING(), DATA_STRUCT(), DATA_UPDATE(), DATA_VECTOR() 2.1.2 List of parameter macros PARAMETER(), PARAMETER_ARRAY(), PARAMETER_MATRIX(), PARAMETER_VECTOR() To see which macros are available start typing DATA_ or PARAMETER_ in the Doxygen search field of your browser (you may need to refresh the browser window between each time you make a new search). A simple example if you want to read a vector of numbers (doubles) is the following DATA_VECTOR(x); // Vector x(0),x(1),...,x(n-1), where n is the length of x Note that all vectors and matrices in TMB uses a zero-based indexing scheme. It is not necessary to explicitly pass the dimension of x, as it can be retrieved inside the C++ program: int n = x.size(); 2.2 An extended C++ language TMB extends C++ with functionality that is important for formulating likelihood functions. You have different toolboxes available: Standard C++ used for infrastructure like loops etc. Vector, matrix and array library (see Matrices and arrays) Probability distributions (see Densities and R style distributions) In addition to the variables defined through the DATA_ or PARAMETER_ macros there can be “local” variables, for which ordinary C++ scoping rules apply. There must also be a variable that holds the return value (neg. log. likelihood). DATA_VECTOR(x); // Vector x(0), x(1), ..., x(n-1) Type tmp = x(1); Type nll = tmp * tmp; As in ordinary C++ local variable tmp must be assigned a value before it can enter into a calculation. 2.3 Statistical modelling TMB can handle complex statistical problems with hierarchical structure (latent random variables) and multiple data sources. Latent random variables must be continuous (discrete distributions are not handled). The PARAMETER_ macros are used to pass two types of parameters. Parameters: to be estimated by maximum likelihood. These include fixed effects and variance components in the mixed model literature. They will also correspond to hyper parameters with non-informative priors in the Bayesian literature. Latent random variables: to be integrated out of the likelihood using a Laplace approximation. Which of these are chosen is controlled from R, via the random argument to the function MakeADFun. However, on the C++ side it is usually necessary to assign a probability distribution to the parameter. The purpose of the C++ program is to calculate the (negative) joint density of data and latent random variables. Each datum and individual latent random effect gives a contribution to log likelihood, which may be though of as a “distribution assignment” by users familiar with software in the BUGS family. PARAMETER_VECTOR(u); // Latent random variable Type nll = Type(0); // Return value nll -= dnorm(u(0),0,1,true) // Distributional assignment: u(0) ~ N(0,1) The following rules apply: Distribution assignments do not need to take place before the latent variable is used in a calculation. More complicated distributional assignments are allowed, say u(0)-u(1) ~ N(0,1), but this requires the user to have a deeper understanding of the probabilistic aspects of the model. For latent variables only normal distributions should be used (otherwise the Laplace approximation will perform poorly). For response variables all probability distributions (discrete or continuous) are allowed. If a non-gaussian latent is needed the “transformation trick” can be used. The namespaces R style distributions and Densities contain many probability distributions, including multivariate normal distributions. For probability distributions not available from these libraries, the user can use raw C++ code: DATA_VECTOR(y); // Data vector Type nll = Type(0); // Return value nll -= sum(-log(Type(1.0)+y*y)); // y are i.i.d. Cauchy distributed See Toolbox for more about statistical modelling. "],["Structure_TMB.html", "3 The structure of TMB", " 3 The structure of TMB This documentation only covers the TMB specific code, not CppAD or Eigen These packages have their own documentation, which may be relevant. In particular, some of the standard functions like sin() and cos() are part of CppAD, and are hence not documented through TMB. TMB components "],["matrix_arrays.html", "4 Matrices and arrays 4.1 Relationship to R 4.2 Relationship to Eigen", " 4 Matrices and arrays 4.1 Relationship to R In R you can apply both matrix multiplication (%*%) and elementwise multiplication (*) to objects of type “matrix,” i.e. it is the operator that determines the operation. In TMB we instead have two different types of objects, while the multiplication operator (*) is the same: matrix: linear algebra array: elementwise operations; () and [] style indexing. vector: can be used in linear algebra with matrix, but at the same time admits R style element-wise operations. See the file matrix_arrays.cpp for examples of use. 4.2 Relationship to Eigen The TMB types matrix and array (in dimension 2) inherits directly from the the Eigen types Matrix and Array. The advanced user of TMB will benefit from familiarity with the Eigen documentation. Note that arrays of dimension 3 or higher are specially implemented in TMB, i.e. are not directly inherited from Eigen. "],["R_style_distribution.html", "5 R style probability distributions", " 5 R style probability distributions Attempts have been made to make the interface (function name and arguments) as close as possible to that of R. The densities (d...) are provided both in the discrete and continuous case, cumulative distributions (p...) and inverse cumulative distributions (q...) are provided only for continuous distributions. Scalar and vector arguments (in combination) are supported, but not array or matrix arguments. The last argument (of type int) corresponds to the log argument in R: 1=logaritm, 0=ordinary scale. true (logaritm) and false (ordinary scale) can also be used. Vector arguments must all be of the same length (no recycling of elements). If vectors of different lengths are used an “out of range” error will occur, which can be picked up by the debugger. DATA_IVECTOR() and DATA_INTEGER() cannot be used with probability distributions, except possibly for the last (log) argument. An example: DATA_SCALAR(y); DATA_VECTOR(x); vector<Type> rate(10); matrix<Type> rate_matrix(10, 10); dexp(y, rate, true); // OK, returns vector of length 10 of log-densities dexp(x, rate, true); // OK if x is length 10 dexp(x, rate_matrix, true); // Error, matrix arguments not allowed To sum over elements in the vector returned use sum(dexp(x,rate)); "],["Densities.html", "6 Multivariate distributions 6.1 Multivariate normal distributions 6.2 Autoregressive processes 6.3 Gaussian Markov random fields (GMRF) 6.4 Separable construction of covariance (precision) matrices", " 6 Multivariate distributions The namespace using namespace density; gives access to a variety of multivariate normal distributions: Multivariate normal distributions specified via a covariance matrix (structured or unstructured). Autoregressive (AR) processes. Gaussian Markov random fields (GMRF) defined on regular grids or defined via a (sparse) precision matrix. Separable covariance functions, i.e. time-space separability. These seemingly unrelated concepts are all implemented via the notion of a distribution, which explains why they are placed in the same namespace. You can combine two distributions, and this lets you build up complex multivariate distributions using extremely compact notation. Due to the flexibility of the approach it is more abstract than other parts of TMB, but here it will be explained from scratch. Before looking at the different categories of multivariate distributions we note the following which is of practical importance: All members in the density namespace return the negative log density, opposed to the univariate densities in R style distributions. 6.1 Multivariate normal distributions Consider a zero-mean multivariate normal distribution with covariance matrix Sigma (symmetric positive definite), that we want to evaluate at x: int n = 10; vector<Type> x(n); // Evaluation point x.fill(0.0); // Point of evaluation: x = (0,0,...,0) The negative log-normal density is evaluated as follows: using namespace density; matrix<Type> Sigma(n,n); // Covariance matrix // ..... User must assign value to Sigma here res = MVNORM(Sigma)(x); // Evaluate negative log likelihod In the last line MVNORM(Sigma) should be interpreted as a multivariate density, which via the last parenthesis (x) is evaluated at x. A less compact way of expressing this is MVNORM_t<Type> your_dmnorm(Sigma); res = your_dmnorm(x); in which your_dmnorm is a variable that holds the “density.” Note, that the latter way (using the MVNORM_t) is more efficient when you need to evaluate the density more than once, i.e. for different values of x. Sigma can be parameterized in different ways. Due to the symmetry of Sigma there are at most n(n+1)/2 free parameters (n variances and n(n-1)/2 correlation parameters). If you want to estimate all of these freely (modulo the positive definite constraint) you can use UNSTRUCTURED_CORR() to specify the correlation matrix, and VECSCALE() to specify variances. UNSTRUCTURED_CORR() takes as input a vector a dummy parameters that internally is used to build the correlation matrix via its cholesky factor. using namespace density; int n = 10; vector<Type> unconstrained_params(n*(n-1)/2); // Dummy parameterization of correlation matrix vector<Type> sds(n); // Standard deviations res = VECSCALE(UNSTRUCTURED_CORR(unconstrained_params),sds)(x); If all elements of dummy_params are estimated we are in effect estimating a full correlation matrix without any constraints on its elements (except for the mandatory positive definiteness). The actual value of the correlation matrix, but not the full covariance matrix, can easily be assessed using the .cov() operator matrix<Type> Sigma(n,n); Sigma = UNSTRUCTURED_CORR(unconstrained_params).cov(); REPORT(Sigma); // Report back to R session 6.2 Autoregressive processes Consider a stationary univariate Gaussian AR1 process x(t),t=0,…,n-1. The stationary distribution is choosen so that: x(t) has mean 0 and variance 1 (for all t). The multivariate density of the vector x can be evaluated as follows int n = 10; using namespace density; vector<Type> x(n); // Evaluation point x.fill(0.0); // Point of evaluation: x = (0,0,...,0) Type rho = 0.2; // Correlation parameter res = AR1(rho)(x); // Evaluate negative log-density of AR1 process at point x Due to the assumed stationarity the correlation parameter must satisfy: Stationarity constraint: -1 < rho < 1 Note that cor[x(t),x(t-1)] = rho. The SCALE() function can be used to set the standard deviation. Type sigma = 2.1; // standard deviation of x res = SCALE(AR1(rho),sigma)(x); Now, var[x(t)] = sigma^2. Because all elements of x are scaled by the same constant we use SCALE rather than VECSCALE. 6.2.0.1 Multivariate AR1 processes This is the first real illustration of how distributions can be used as building blocks to obtain more complex distributions. Consider the p dimensional AR1 process int n = 10; // Number of time steps int p=3; // dim(x) array<Type> x(p,n); // Evaluation point The columns in x refer to the different time points. We then evaluate the (negative log) joint density of the time series. MVNORM_t<Type> your_dmnorm(Sigma); // Density of x(t) Type phi; // Correlation parameter res = AR1(phi,your_dmnorm)(x); Note the following: We have introduced an intermediate variable your_dmnorm, which holds the p-dim density marginal density of x(t). This is a zero-mean normal density with covariance matrix Sigma. All p univarite time series have the same serial correlation phi. The multivariate process x(t) is stationary in the same sense as the univariate AR1 process described above. 6.2.0.2 Higher order AR processes There also exists ARk_t of arbitrary autoregressive order. 6.3 Gaussian Markov random fields (GMRF) GMRF may be defined in two ways: Via a (sparse) precision matrix Q. Via a d-dimensional lattice. For further details please see GMRF_t. Under 1) a sparse Q corresponding to a Matern covariance function can be obtained via the R_inla namespace. 6.4 Separable construction of covariance (precision) matrices A typical use of separability is to create space-time models with a sparse precision matrix. Details are given in SEPARABLE_t. Here we give a simple example. Assume that we study a quantity x that changes both in space and time. For simplicity we consider only a one-dimensional space. We discretize space and time using equidistant grids, and assume that the distance between grid points is 1 in both dimensions. We then define an AR1(rho_s) process in space and one in time AR1(rho_t). The separable assumption is that two points x1 and x2, separated in space by a distance ds and in time by a distance dt, have correlation given by rho_s^ds*rho_t^dt This is implemented as using namespace density; int n_s = 10; // Number of grid points in space int n_t = 10; // Number of grid points in time Type rho_s = 0.2; // Correlation in space Type rho_t = 0.4; // Correlation in time array<Type> x(n_s,n_t); x.setZero(); // x = 0 res = SEPARABLE(AR1(rho_t),AR1(rho_s))(x); Note that the arguments to SEPARABLE() are given in the opposite order to the dimensions of x. "],["Examples.html", "7 Example collection 7.1 Example overview", " 7 Example collection A list of all examples is found on the “Examples” tab on the top of the page. Locations of example files: adcomp/tmb_examples and adcomp/TMB/inst/examples. For each example there is both a .cpp and a .R file. Take for instance the linear regression example: C++ template // Simple linear regression. #include <TMB.hpp> template<class Type> Type objective_function<Type>::operator() () { DATA_VECTOR(Y); DATA_VECTOR(x); PARAMETER(a); PARAMETER(b); PARAMETER(logSigma); ADREPORT(exp(2*logSigma)); Type nll = -sum(dnorm(Y, a+b*x, exp(logSigma), true)); return nll; } Controlling R code library(TMB) compile("linreg.cpp") dyn.load(dynlib("linreg")) set.seed(123) data <- list(Y = rnorm(10) + 1:10, x=1:10) parameters <- list(a=0, b=0, logSigma=0) obj <- MakeADFun(data, parameters, DLL="linreg") obj$hessian <- TRUE opt <- do.call("optim", obj) opt opt$hessian ## <-- FD hessian from optim obj$he() ## <-- Analytical hessian sdreport(obj) To run this example use the R command source("linreg.R") 7.1 Example overview Example Description adaptive_integration.cpp Adaptive integration using ‘tiny_ad’ ar1_4D.cpp Separable covariance on 4D lattice with AR1 structure in each direction. compois.cpp Conway-Maxwell-Poisson distribution fft.cpp Multivariate normal distribution with circulant covariance hmm.cpp Inference in a ‘double-well’ stochastic differential equation using HMM filter. laplace.cpp Laplace approximation from scratch demonstrated on ‘spatial’ example. linreg_parallel.cpp Parallel linear regression. linreg.cpp Simple linear regression. longlinreg.cpp Linear regression - 10^6 observations. lr_test.cpp Illustrate map feature of TMB to perform likelihood ratio tests on a ragged array dataset. matern.cpp Gaussian process with Matern covariance. mvrw_sparse.cpp Identical with random walk example. Utilizing sparse block structure so efficient when the number of states is high. mvrw.cpp Random walk with multivariate correlated increments and measurement noise. nmix.cpp nmix example from https://groups.nceas.ucsb.edu/non-linear-modeling/projects/nmix orange_big.cpp Scaled up version of the Orange Tree example (5000 latent random variables) register_atomic_parallel.cpp Parallel version of ‘register_atomic’ register_atomic.cpp Similar to example ‘adaptive_integration’ using CppAD Romberg integration. REGISTER_ATOMIC is used to reduce tape size. sam.cpp State space assessment model from Nielsen and Berg 2014, Fisheries Research. sde_linear.cpp Inference in a linear scalar stochastic differential equation. sdv_multi_compact.cpp Compact version of sdv_multi sdv_multi.cpp Multivatiate SV model from Skaug and Yu 2013, Comp. Stat & data Analysis (to appear) socatt.cpp socatt from ADMB example collection. spatial.cpp Spatial poisson GLMM on a grid, with exponentially decaying correlation function spde_aniso_speedup.cpp Speedup “spde_aniso.cpp” by moving normalization out of the template. spde_aniso.cpp Anisotropic version of “spde.cpp.” spde.cpp Illustration SPDE/INLA approach to spatial modelling via Matern correlation function thetalog.cpp Theta logistic population model from Pedersen et al 2012, Ecol. Modelling. TMBad/interpol.cpp Demonstrate 2D interpolation operator TMBad/sam.cpp State space assessment model from Nielsen and Berg 2014, Fisheries Research. TMBad/solver.cpp Demonstrate adaptive solver of TMBad TMBad/spa_gauss.cpp Demonstrate saddlepoint approximation (SPA) TMBad/spatial.cpp Spatial poisson GLMM on a grid, with exponentially decaying correlation function TMBad/spde_epsilon.cpp Low-level demonstration of fast epsilon bias correction using ‘sparse plus lowrank’ hessian TMBad/thetalog.cpp Theta logistic population model from Pedersen et al 2012, Ecol. Modelling. transform_parallel.cpp Parallel version of transform transform.cpp Gamma distributed random effects using copulas. transform2.cpp Beta distributed random effects using copulas. tweedie.cpp Estimating parameters in a Tweedie distribution. validation/MVRandomWalkValidation.cpp Estimate and validate a multivariate random walk model with correlated increments and correlated observations. validation/randomwalkvalidation.cpp Estimate and validate a random walk model with and without drift validation/rickervalidation.cpp Estimate and validate a Ricker model based on data simulated from the logistic map "],["Errors.html", "8 Compilation and run time errors 8.1 Compilation errors 8.2 Run time errors", " 8 Compilation and run time errors The R interface to the debugger (gdb) is documented as part of the R help system, i.e. you can type ?gdbsource in R to get info. The current document only adresses isses that the relate to C++. 8.1 Compilation errors It may be hard to understand the compilation errors for the following reasons The Eigen libraries use templated C++ which generate non-intuitive error messages. 8.2 Run time errors Run time errors are broadly speaking of two types: Out-of-bounds (you are “walking out of an array”) Floating point exceptions You can use the debugger to locate both types of errors, but the procedure is a little bit different in the two cases. The following assumes that you have the GNU debugger gdb installed. 8.2.1 Out-of-bounds error An example is: vector<Type> y(4); y(5); // 5 is not a valid index value here This will cause TMB and R to crash with the following error message: TMB has received an error from Eigen. The following condition was not met: index >= 0 && index < size() Please check your matrix-vector bounds etc., or run your program through a debugger. Aborted (core dumped) So, you must restart R and give the commands library(TMB) gdbsource("my_project.R") #5 objective_function::operator() (this=) at nan_error_ex.cpp:11 and you can see that the debugger points to line number 11 in the .cpp file. gdbsource() is an R function that is part of TMB. 8.2.2 Floating point exception If you on the other hand perform an illegal mathematical operation, such as Type f = sqrt(-1.); R will not crash, but the objective function will return a NaN value. However, you will not know in which part of your C++ code the error occured. By including the fenv.h library (part of many C++ compilers, but can otherwise be downloaded from http://www.scs.stanford.edu/histar/src/uinc/fenv.h) nan_error_ex.cpp: // Illustrates how to make the debugger catch a floating point error. #include <TMB.hpp> #include <fenv.h> // Extra line needed template<class Type> Type objective_function<Type>::operator() () { feenableexcept(FE_INVALID | FE_OVERFLOW | FE_DIVBYZERO | FE_UNDERFLOW); // Extra line needed DATA_SCALAR(lambda); PARAMETER(x); Type f; f = sqrt(-1.); // FE_INVALID ( sqrt(-1.) returns NaN ) //f = 1./0.; // FE_DIVBYZERO ( division by zero ) //f = exp(100000.); // FE_OVERFLOW ( exp(100000.) returns Inf ) [Does not work on all platforms] //f = exp(-100000.); // FE_UNDERFLOW ( exp(-100000.) returns 0 ) return f; } a floating point exception will be turned into an actual error that can be picked up by the debugger. There are only two extra lines that need to be included (“//Extra line needed” in the above example). When we try to run this program in the usual way, the program crashes: source("nan_error_ex.R") Floating point exception (core dumped) tmp3> At this stage you should run the debugger to find out that the floating point exception occurs at line number 14: library(TMB) gdbsource("nan_error_ex.R") #1 0x00007ffff0e7eb09 in objective_function::operator() (this=) at nan_error_ex.cpp:14 This enabling of floating point errors applies to R as well as the TMB program. For more elaborate R-scripts it may therefore happen that a NaN occurs in the R-script before the floating point exception in the TMB program (i.e. the problem of interest) happens. To circumvent this problem one can run without NaN debugging enabled and save the parameter vector that gave the floating point exception (e.g. badpar <- obj$env$last.par after the NaN evaluation), then enable NaN debugging, re-compile, and evaluate obj$env$f( badpar, type=\"double\"). 8.2.3 Missing casts for vectorized functions TMB vectorized functions cannot be called directly with expressions, for example the following will fail to compile: DATA_VECTOR(x); // Don't do this! Doesn't compile vector<Type> out = lgamma(x + 1); error: could not convert ‘atomic::D_lgamma(const CppAD::vector&) … from ‘double’ to ‘Eigen::CwiseBinaryOp<Eigen::internal::scalar_sum_op<double, double>, … >’ Eigen lazy-evaluates expressions, and the templating of lgamma means we expect to return a “x + y”-typed object, which it obviously can’t do. To work around this, cast the input: DATA_VECTOR(x); vector<Type> out = lgamma(vector<Type>(x + 1)); "],["Toolbox.html", "9 Toolbox", " 9 Toolbox First read the Statistical Modelling section of Tutorial. 9.0.1 Non-normal latent variables (random effects) The underlying latent random variables in TMB must be Gaussian for the Laplace approximation to be accurate. To obtain other distributions, say a gamma distribution, the “transformation trick” can be used. We start out with normally distributed variables u and transform these into new variables w via the pnorm and qgamma functions as follows: PARAMETER_VECTOR(u); // Underlying latent random variables Type nll=Type(0.0); nll -= sum(dnorm(u,Type(0),Type(1),true)); // Assign N(0,1) distribution u vector<Type> v = pnorm(u,Type(0),Type(1)); // Uniformly distributed variables (on [0,1]) vector<Type> w = qgamma(v,shape,scale); w now has a gamma distribution. 9.0.2 Discrete latent variables The Laplace approximation can not be applied to discrete latent variables that occur in mixture models and HMMs (Hidden Markov models). However, such likelihoods have analytic expressions, and may be coded up in TMB. TMB would still calculate the exact gradient of the HMM likelihood. 9.0.3 Mixture models Although mixture models are a special case of discrete latent variable models, they do deserve special attention. Consider the case that we want a mixture of two zero-mean normal distributions (with different standard deviations). This can be implemented as: DATA_VECTOR(x); PARAMETER_VECTOR(sigma); // sigma0 og sigma1 PARAMETER(p); // Mixture proportion of model 0 Type nll=Type(0.0); nll -= sum( log( p * dnorm(x, Type(0), sigma(0), false) + (1.0-p) * dnorm(x, Type(0), sigma(1), false) ) ); 9.0.4 Time series Autoregressive (AR) processes may be implemented using the compact notation of section Densities. The resulting AR process may be applied both in the observational part and in the distribution of a latent variable. Nonlinear time must be implemented from scratch, as in the example thetalog.cpp 9.0.5 Spatial models TMB has strong support for spatial model and space-time models via the GMRF() and SEPARABLE() functions, and the notion of a distribution. The reader is referred to section Densities for details and examples. "],["CppTutorial.html", "10 C++ tutorial", " 10 C++ tutorial 10.0.1 I know R but not C++ Summary of how syntax differs between R and C++: R code C++/TMB code Comments # // // Comment symbol Constants 3.4 Type(3.4); // Explicit casting recommended in TMB Scalar x = 5.2 Type x = Type(5.2); // Variables must have type Arrays x = numeric(10) vector<Type> x(10); // C++ code here does NOT initialize to 0 Indexing x[1]+x[10] x(0)+x(9); // C++ indexing is zero-based Loops for(i in 1:10) for(int i=1;i<=10;i++) // Integer i must be declared in C++ Increments x[1] = x[1] + 3 x(0) += 3.0; // += -= *= /= incremental operators in C++ It is important to note the following difference compared to R: Vectors, matrices and arrays are not zero-initialized in C++. A zero initialized object is created using Eigens setZero(): matrix<Type> m(4,5); m.setZero(); 10.0.2 I know C++ TMB specific C++ include: - You should not use if(x) statements where x is a PARAMETER, or is derived from a variable of type PARAMETER. (It is OK to use if on DATA types, however.) TMB will remove the if(x) statement, so the code will produce unexpected results. "],["ModelObject.html", "11 Model object", " 11 Model object The TMB core model object is the object returned by MakeADFun(). A number of options can be passed to MakeADFun to control the model. The current section walks you through all the options. Additionally we demonstrate some of the methods that can be applied to a fitted model object. We shall see how to: Fix and collect parameters using the map argument. Switch parameters back and forth between the inner and outer optimization problem using the arguments random and profile. Set options for the inner optimization problem. sdreporting a fitted object. Bias correction if random effect estimates. Likelihood profiling a fitted object. FIXME: NOT DONE YET ! "],["Sparsity.html", "12 Sparsity 12.1 Conditional independence graphs and DAGs 12.2 The theta logistic example", " 12 Sparsity Large random effect models require sparsity in order to work in TMB. In this section we will discuss: What exactly we mean by sparsity. How to formulate sparse models (the same model can sometimes be formulated as both dense and sparse). How to calculate the sparsity pattern of a given TMB model. How to visualize sparsity either as a matrix or a graph. How to use sparsity for general optimization problems (not just random effects). 12.1 Conditional independence graphs and DAGs 12.1.1 Conditional independence There are various graph representations that are commonly used to visualize probabilistic structure. One such is the conditional independence graph. Say we have a model of four random variables \\(X_1,...,X_4\\) for which the joint density is: \\[p(x_1,x_2,x_3,x_4) \\propto f_1(x_1,x_2)f_2(x_2,x_3)f_3(x_3,x_4)f_4(x_4,x_1)\\] The separability of factors on the right hand side implies some conditional independence properties. For instance if \\(x_1\\) and \\(x_3\\) are held constant then \\(x_2\\) and \\(x_4\\) varies independently. We say that \\(x_2\\) and \\(x_4\\) are conditionally independent given \\(x_1\\) and \\(x_3\\). The conditional independence graph is defined by drawing undirected edges between variables occurring in the same factor \\(f_i\\): Equivalently the graph may be visualized via its adjacency matrix: This is the sparsity pattern of the model. The sparsity pattern visualizes the conditional independence structure of the random effects in the model. 12.1.2 Node removal properties Important probabilistic properties can be deduced directly from the graph. This is due to the following node removal properties. The conditional distribution given node \\(X_i\\) is found by removing \\(X_i\\) and its edges from the graph. For instance conditional on \\(X_4\\) we get the following graph: The marginal distribution wrt. a node \\(X_i\\) is found by removing \\(X_i\\) from the graph and connecting all \\(X_i\\)’s neighbors. For instance when integrating \\(X_4\\) out of the joint density we get the following graph for the remaining nodes: Conditioning preserves sparseness. Marginalizing tend to destroy sparseness by adding more edges to the graph. 12.1.3 Directed acyclic graph When building models in TMB it is often more natural to specify processes in incremental steps - i.e. through the successive conditional distributions. The previous example could be simulated by drawing the variables \\(X_1,X_2,X_3,X_4\\) one by one in the given order as illustrated by the following directed graph: The graph shows dependencies of any specific node given past nodes. The edge from \\(X_1\\) to \\(X_3\\) was not in the original (undirected) graph. This is a so-called fill-in. Order matters. The DAG is different from the conditional independence graph. 12.1.4 The effect of adding data It is convenient to use a box-shape for nodes that represent data. For instance if we pretend that \\(X_4\\) is a data point we would illustrate it by: Here there are only three variables left. The conditional independence structure of the variables is: which is the same graph as was previously found by integrating \\(X_4\\) out of the joint distribution. Data nodes destroy sparsity the same way as marginalization. To avoid this, try to associate each data point with a single random effect. 12.2 The theta logistic example Consider the ``theta logistic’’ population model (Pedersen et al. 2011). This is a state-space model with state equation \\[u_t = u_{t-1} + r_0\\left(1-\\left(\\frac{\\exp(u_{t-1})}{K}\\right)^\\psi\\right) + e_t\\] and observation equation \\[y_t = u_t + v_t\\] where \\(e_t \\sim N(0,Q)\\), \\(v_t \\sim N(0,R)\\) and \\(t\\in \\{0,...,n-1\\}\\). A uniform prior is implicitly assigned to \\(u_0\\). The joint negative log-likelihood of state vector \\(u\\) and measurements \\(y\\) is implemented in the C++ template thetalog.cpp. The example can be run by: runExample("thetalog", exfolder="adcomp/tmb_examples") We demonstrate it in the case \\(n=5\\). Here is the DAG This is a standard hidden Markov structure. Each data node is bound to a single random effect - hence the data does not introduce additional edges in the random effect structure. We can use the image function from the Matrix package to plot the random effect structure (we must first load the Matrix package): library(Matrix) obj <- MakeADFun(data, parameters, random=c("X"), DLL="thetalog") image(obj$env$spHess(random=TRUE)) FIXME: NOT DONE YET ! References "],["Simulation.html", "13 Simulation 13.1 Overview of simulation methods in TMB 13.2 Simulation blocks", " 13 Simulation When building models in TMB it is generally recommended to test the implementation on simulated data. Obviously, data can be simulated from R and passed to the C++ template. In practice this amounts to implementing the model twice and is thus a strong way to validate the implementation of the model. However, with increased model complexity it becomes inconvenient to maintain two separate implementations. Therefore, TMB allows the the user to write the simulation code as an integrated part of the C++ model template. 13.1 Overview of simulation methods in TMB 13.1.1 Standard generators The TMB simulation routines use the same naming convention as the R simulators. For instance rnorm() is used to simulate from a normal distribution. However, the argument convention is slightly different: rnorm(n, mu, sd) draws n simulations from a normal distribution. Unlike R this works for scalar parameters only. rnorm(mu, sd) is a TMB specific variant that works for mixed scalar and vector input. Output length follows the length of the longest input (no re-cycling) hence is consistent with dnorm(mu, sd). Currently the following simulators are implemented: rnorm(), rpois(), runif(), rbinom(), rgamma(), rexp(), rbeta(), rf(), rlogis(), rt(), rweibull(), rcompois(), rtweedie(), rnbinom(), rnbinom2() 13.1.2 Generators for density objects Objects from the density namespace have their own simulate() method. Taking the multivariate normal distribution as example we have the following ways to draw a simulation: MVNORM(Sigma).simulate() returns a vector with a simulation from the multivariate normal distribution. The void argument version is only available when there is no ambiguity in the dimension of the output. In the MVNORM case the dimension of the output is known from the dimension of Sigma. In other cases e.g. AR1(phi) the dimension of the output is not known hence the void argument version is not available. MVNORM(Sigma).simulate(x) pass x by reference and writes the simulation directly to x without returning anything. This version is available for all the classes because the dimension of the simulation can always be deduced from x. 13.1.3 Controlling the random seed All TMB simulation methods are based on R’s random number generator. It follows that the random seed can be controlled from R the usual way using set.seed even though the simulation is performed on the C++ side. 13.2 Simulation blocks Simulation functions can be called from anywhere in the C++ program. However, usually one should put the simulation code inside specialized simulation blocks that allows the code to only be executed when requested from R. 13.2.1 A linear regression example A complete example extending the example linreg.cpp with simulation code is: #include <TMB.hpp> template<class Type> Type objective_function<Type>::operator() () { DATA_VECTOR(y); DATA_VECTOR(x); PARAMETER(a); PARAMETER(b); PARAMETER(sd); vector<Type> mu = a + b * x; Type nll = -sum(dnorm(y, mu, sd, true)); SIMULATE { y = rnorm(mu, sd); // Simulate response REPORT(y); // Report the simulation } return nll; } The SIMULATE block marks the simulation and is not executed by default. We compile the C++-file and the model object is constructed as usual: obj <- MakeADFun(data, parameters, DLL="linreg") Now a simulation can be generated with set.seed(1) ## optional obj$simulate() ## $y ## [1] -0.6264538 0.1836433 -0.8356286 1.5952808 0.3295078 -0.8204684 ## [7] 0.4874291 0.7383247 0.5757814 -0.3053884 This only includes the simulated response - not the rest of the data. A complete dataset can be generated by: set.seed(1) ## optional - Note: same y as previous obj$simulate(complete=TRUE) ## $y ## [1] -0.6264538 0.1836433 -0.8356286 1.5952808 0.3295078 -0.8204684 ## [7] 0.4874291 0.7383247 0.5757814 -0.3053884 ## ## $x ## [1] 1 2 3 4 5 6 7 8 9 10 ## ## attr(,"check.passed") ## [1] TRUE Here we did not explicitely state the parameter values to use with the simulation. The simulate method takes an additional argument par that can be used for this. The default parameter values used for the simulation is obj$env$last.par. 13.2.2 A simulation study Simulating datasets from known parameters and re-estimationg those parameters can be done generically by: sim <- replicate(50, { simdata <- obj$simulate(par=obj$par, complete=TRUE) obj2 <- MakeADFun(simdata, parameters, DLL="linreg", silent=TRUE) nlminb(obj2$par, obj2$fn, obj2$gr)$par }) We reshape and plot the result: library(lattice) df <- data.frame(estimate=as.vector(sim), parameter=names(obj$par)[row(sim)]) densityplot( ~ estimate | parameter, data=df, layout=c(3,1)) Compare with the true parameter values of the simulation: obj$par ## a b sd ## 0 0 1 13.2.3 Advanced examples The examples sam.cpp and ar1_4D.cpp includes more advanced simulation code. The latter demonstrates how to simulate from the density objects: // Separable covariance on 4D lattice with AR1 structure in each direction. #include <TMB.hpp> /* Parameter transform */ template <class Type> Type f(Type x){return Type(2)/(Type(1) + exp(-Type(2) * x)) - Type(1);} template<class Type> Type objective_function<Type>::operator() () { DATA_VECTOR(N) PARAMETER_ARRAY(eta); PARAMETER(transf_phi); /* fastest running dim */ Type phi=f(transf_phi); ADREPORT(phi); using namespace density; Type res=0; res+=AR1(phi,AR1(phi,AR1(phi,AR1(phi))))(eta); // logdpois = N log lam - lam for(int i=0;i<N.size();i++)res-=N[i]*eta[i]-exp(eta[i]); SIMULATE { AR1(phi,AR1(phi,AR1(phi,AR1(phi)))).simulate(eta); vector<Type> lam = exp(eta); N = rpois(lam); REPORT(eta); REPORT(N); } return res; } In this example the 4D-array eta is passed to the simulator by reference. Thereby the simulator knows the dimension of eta and can fill eta with a simulation. 13.2.4 Further notes The above example only used one simulation block. In general there is no limitation on the number of simulation blocks that can be used in a model and simulation blocks can use temporaries calculated outside the blocks (as demonstrated in the linear regression example). For clarity reasons, it is often a good idea to add a simulation block after each likelihood contribution. However, note that simulation blocks are in general not commutative (unlike likelihood accumulation). It is therefore further recommended to add likelihood contributions of random effects in the natural hierarchical order. "],["Validation.html", "14 Validation 14.1 Residuals 14.2 Checking the Laplace approximation", " 14 Validation 14.1 Residuals The underlying framework is the same for all cases listed in this section. [ Description of general framework FIXME ] For models that does not include random effects the calculations can be simplified greatly. 14.1.1 Models without random effects 14.1.1.1 Normal distribution (Pearson residuals) . This example shows how standardized residuals can be calculated within the template code and reported back to R using the REPORT function in TMB. // linear regression with reporting of residuals #include <TMB.hpp> template<class Type> Type objective_function<Type>::operator() () { DATA_VECTOR(Y); DATA_VECTOR(x); PARAMETER(a); PARAMETER(b); PARAMETER(logSigma); Type sigma = exp(logSigma); Vector<Type> pred = a + b*x; Type nll = -sum(dnorm(Y, a+b*x, sigma, true)); Vector<Type> residuals = (Y - pred)/sigma; REPORT(residuals); return nll; } Assuming that the model parameters have been fitted, and the model object is called obj, the standardized residuals can now be extracted from the model object usinig the report() function and inspected for normality as follows: ... rep <- obj$report() qqnorm(rep$residuals) abline(0,1) 14.1.1.2 Non-normal distributions 14.1.1.2.1 Continuous We now consider situations where the error distribution is continuous but not Gaussian. Residuals that are standard normal distributed given that the model is correct, can be obtained be using the “transformation trick,” here illustrated using a model that fits a gamma distribution. #include <TMB.hpp> template<class Type> Type objective_function<Type>::operator() () { DATA_VECTOR(Y); PARAMETER(shape); PARAMETER(scale); Type nll=-dgamma(Y,shape,scale,true).sum(); vector<Type> residuals = qnorm( pgamma(Y,shape,scale) ); REPORT(residuals); return nll; } 14.1.1.2.2 Discrete For discrete probability distributions the transformation trick can also be used, but an element of randomization must be added in order to obtain residuals that are truly Gaussian. Assume that you have a series of observed counts y and you have fitted some TMB model using a Poisson likelihood, and the predicted values from that model have been reported and saved in a vector called mu. ... a <- ppois(y - 1, mu) b <- ppois(y, mu) u <- runif(n = length(y), min = a, max = b) residuals <- qnorm(u) 14.1.2 Models with random effects Model validation using residuals is considerably more complicated for random effect models. Further information can be found in (Thygesen et al. 2017) FIXME: not generating reference. 14.1.2.1 One-step-ahead residuals Other names are one step prediction errors, forecast pseudo-residuals, and recursive residuals. These residuals can be computed using the oneStepPredict function. There are several methods available within this function, and it is the responsibility of the user to ensure that an appropriate method is chosen for a given model. The following examples of its use are availabe in the tmb_examples/validation folder. Example Description validation/MVRandomWalkValidation.cpp Estimate and validate a multivariate random walk model with correlated increments and correlated observations. validation/randomwalkvalidation.cpp Estimate and validate a random walk model with and without drift validation/rickervalidation.cpp Estimate and validate a Ricker model based on data simulated from the logistic map 14.1.2.2 One sample from the posterior An alternative (and faster) method is based on a single sample of the random effects from the their posterior distribution given the data. For state space models we can derive both process- and observation errors from the single sample and the observations, and compare these with the assumptions in the model. An example can be found at the end of the randomwalkvalidation.R file in the tmb_examples/validation folder 14.2 Checking the Laplace approximation FIXME: References "],["AtomicFunctions.html", "15 Atomic functions 15.1 Reverse mode differentiation 15.2 Example: Adding new primitive function with known derivatives 15.3 Other approaches", " 15 Atomic functions Custom functions and derivatives can be added to the TMB library. This may be necessary for the following reasons: Adaptive (e.g. iterative) algorithms cannot be represented by a fixed computational graph and thus cannot be directly differentiated using TMB. Algorithms that use parameter dependent if-else branching are examples of such functions. Some functions have so many floating point operations that it is infeasible to expand the computational graph. Memory usage may be greatly reduced in such cases by collapsing the computational graph to a singe node with multiple inputs and outputs. 15.1 Reverse mode differentiation TMB uses CppAD as its engine for reverse mode derivatives. In order to add a new primitive function \\[f: R^n \\rightarrow R^m\\] we must inform CppAD how to calculate derivatives of this function in reverse mode. That is, for any range space vector \\(w \\in R^m\\) we must calculate the gradient of the function \\(R^n \\rightarrow R\\) given by \\[ x \\rightarrow \\text{sum}( f(x) \\odot w ) \\] where ‘\\(\\odot\\)’ is pointwise multiplication. 15.2 Example: Adding new primitive function with known derivatives As an example consider the Lambert W function defined implicitly by \\[y = W(y e^y)\\] Here, we only consider \\(W\\) as defined on the positive reals. It follows, by differentiating the above identity, that \\[ W'(x) = \\frac{1}{ \\exp\\left(W(x)\\right) \\left(1 + W(x)\\right) } \\] When coding reverse-mode derivatives we can assume that the function value \\(W(x)\\) has already been computed during a forward pass. For efficiency reasons we should use this intermediate calculation rather than re-calculating \\(W(x)\\) in the reverse pass. We’ll assume that a plain C++ function (taking double types as input/output) is available to calculate \\(W(x)\\). It doesn’t matter whether you have the source code of an implementation or just the header with linkage to an external library: double LambertW(double x); The macro TMB_ATOMIC_VECTOR_FUNCTION() is used to declare our new primitive Lambert \\(W\\) function: TMB_ATOMIC_VECTOR_FUNCTION( // ATOMIC_NAME LambertW , // OUTPUT_DIM 1, // ATOMIC_DOUBLE ty[0] = LambertW(tx[0]); // Call the 'double' version , // ATOMIC_REVERSE Type W = ty[0]; // Function value from forward pass Type DW = 1. / (exp(W) * (1. + W)); // Derivative px[0] = DW * py[0]; // Reverse mode chain rule ) Let’s explain in detail what is going on. The macro takes four arguments: ATOMIC_NAME: Name of new primitive function taking CppAD::vector as input and output. OUTPUT_DIM: Dimension of the CppAD::vector which is the function output. ATOMIC_DOUBLE: Specifies how to evaluate the primitive function for the ordinary double type. tx denotes the input vector and ty the output vector of the function \\(f: R^n \\rightarrow R^m\\). In this case both have dimension one. ATOMIC_REVERSE: How to calculate the reverse mode derivatives for a general Type. Again tx and ty denote function input and output but now ty has been computed and is available as an intermediate value. The vectors px and py denote partial derivatives of the end result with respect to \\(x\\) and \\(y\\) respectively. py is given and we must calculate px using the chain rule. This first order derivative rule is automatically expanded up to higher orders required when using TMB’s random effects calculations. To make the function work like other TMB functions it is convenient to define scalar and a vectorized versions that call the atomic function: // Scalar version template<class Type> Type LambertW(Type x){ CppAD::vector<Type> tx(1); tx[0] = x; return LambertW(tx)[0]; } // Vectorized version VECTORIZE_1t(LambertW) 15.2.1 Testing the primitive function Here is a complete example using Newton’s method to calculate the Lambert \\(W\\) function (there are more sophisticated algorithms such as the one by Fukushima (2013), but that doesn’t matter for this example): #include <TMB.hpp> // Double version of Lambert W function double LambertW(double x) { double logx = log(x); double y = (logx > 0 ? logx : 0); int niter = 100, i=0; for (; i < niter; i++) { if ( fabs( logx - log(y) - y) < 1e-9) break; y -= (y - exp(logx - y)) / (1 + y); } if (i == niter) Rf_warning("W: failed convergence"); return y; } TMB_ATOMIC_VECTOR_FUNCTION( // ATOMIC_NAME LambertW , // OUTPUT_DIM 1, // ATOMIC_DOUBLE ty[0] = LambertW(tx[0]); // Call the 'double' version , // ATOMIC_REVERSE Type W = ty[0]; // Function value from forward pass Type DW = 1. / (exp(W) * (1. + W)); // Derivative px[0] = DW * py[0]; // Reverse mode chain rule ) // Scalar version template<class Type> Type LambertW(Type x){ CppAD::vector<Type> tx(1); tx[0] = x; return LambertW(tx)[0]; } // Vectorized version VECTORIZE1_t(LambertW) template<class Type> Type objective_function<Type>::operator() () { PARAMETER_VECTOR(x); Type f = LambertW(x).sum(); return f; } And from R compile("lambert.cpp") dyn.load(dynlib("lambert")) 15.2.1.1 Checking function value and derivatives Check definition of the function: obj <- MakeADFun(data=list(), parameters=list(x=1), DLL="lambert") obj$fn(7 * exp(7)) ## [1] 7 Check derivatives using the numDeriv package: numDeriv::grad(obj$fn, 7) ## [1] 0.08626538 obj$gr(7) ## [,1] ## [1,] 0.08626538 Also try second order derivatives: numDeriv::hessian(obj$fn, 7) ## [,1] ## [1,] -0.01038959 obj$he(7) ## [,1] ## [1,] -0.01038969 15.3 Other approaches For the Lambert \\(W\\) function we know how to calculate the derivatives. There are cases for which the derivatives are impossible (or difficult) to write down. If you’re in this situation you may want to try using forward mode AD to help in defining an atomic function. A full worked out example is available here: adaptive_integration.cpp. Derivatives are calculated automatically and if-else branching is allowed. The main downside with this approach is that it is limited to functions with very few inputs. Checkpointing is another useful technique. It is demonstrated in the example register_atomic.cpp. It does not work for adaptive algorithms but is otherwise automatic. It is useful to reduce AD memory usage in cases where the same sequence of operations is being applied many times. "],["Appendix.html", "16 Appendix 16.1 Notation 16.2 Profiling the inner problem 16.3 Theory underlying sdreport", " 16 Appendix 16.1 Notation We use the following notation Notation Explanation \\(u\\) The random effects vector \\(\\theta\\) Parameter vector (first part) \\(\\beta\\) Parameter vector (second part) \\(f(u,\\beta,\\theta)\\) Joint negative log likelihood \\(x\\) Data \\(E(u|x)\\) Conditional expectation of random effect given data \\(\\hat u\\) The posterior mode \\(\\arg \\min_{u} f(u,\\beta,\\theta)\\) 16.2 Profiling the inner problem This section describes the underlying theory of the argument profile to MakeADFun intended to speedup and robustify linear mixed effect models with a large number of fixed effects. With a few common model properties (Assumption 1 and 2 below), which must be checked by the user, one can apply the profile argument to move outer parameters to the inner problem without affecting the model result. Theorem 1 (Profiling inner problem) Assume that for any \\(\\beta\\) and \\(\\theta\\) Assumption 1 The partial derivative \\(\\partial_{\\beta} f(u,\\beta,\\theta)\\) is a linear function of u. Assumption 2 The posterior mean is equal to the posterior mode: \\(E(u|x)=\\hat u\\) Then the MLE \\[\\hat \\beta := \\arg \\max_{\\beta} \\left( \\int \\exp(-f(u,\\beta,\\theta)) \\: du \\right) \\] is a solution to the augmented system \\[ \\begin{split} \\partial_{u} f(u,\\beta,\\theta) &= 0 \\\\ \\partial_{\\beta} f(u,\\beta,\\theta) &= 0 \\end{split} \\] The augmented system defines \\(\\hat \\beta\\) implicitly as function of the posterior mode \\(\\hat u\\). Proof Differentiation of the negative log marginal likelihood gives \\[ \\begin{split} \\partial_{\\beta} \\left( -\\log \\int \\exp(-f(u,\\beta,\\theta)) \\: du \\right) &= E(\\partial_{\\beta}f(u,\\beta,\\theta) |x) \\\\ &= \\partial_{\\beta} f(u,\\beta,\\theta)_{|u=\\hat u(\\beta,\\theta)} \\end{split} \\] where the first equality holds in general and the second equality follows from assumptions (1) and (2). \\(\\square\\) 16.2.1 Example The standard situation for which assumption 1 holds is when the \\(\\beta\\)s are the linear fixed effects of a mixed model. In this case the joint negative log density takes the form \\[ f(u,\\beta,\\theta) = \\frac{1}{2}(u-A\\beta)'\\Sigma_{\\theta}^{-1}(u-A\\beta) + ... \\] for some design matrix \\(A\\) where ’ \\(...\\) ’ does not depend on \\(\\beta\\). The derivative \\[ \\partial_{\\beta} f(u,\\beta,\\theta) = A'\\Sigma_{\\theta}^{-1}(u-A\\beta) \\] is thus a linear function of the random effect \\(u\\). In general assumption 2 holds exact for models with a symmetric (e.g. Gaussian) posterior distribution. 16.3 Theory underlying sdreport This section supplements the documentation of ?sdreport by adding some missing details. As previously, we consider a general latent variable model with parameter vector \\(\\theta\\), random effect vector \\(u\\) and observation vector \\(x\\). The TMB estimation procedure works as follows: The MLE \\(\\hat\\theta=\\hat\\theta(x)\\) is calculated and used as estimator of \\(\\theta\\). Denote by \\(\\hat u(\\theta,x)\\) the random effect mode depending on \\(\\theta\\) and \\(x\\). Now, plug in the MLE, and we get our estimator \\(\\hat u\\left(\\hat\\theta(x),x\\right)\\) of \\(u\\). In general, we assume that \\(\\hat\\theta\\) is a consistent estimator of \\(\\theta\\). However, we do not in general require \\(\\hat u\\) to be consistent for \\(u\\). The purpose of sdreport is, for a given realization of the pair \\((u,x)\\), to quantify the joint uncertainty of \\((\\hat u,\\hat\\theta)\\) as estimator of \\((u,\\theta)\\). That is, we are interested in the variance matrix of the difference \\[D:=\\begin{pmatrix}\\hat u\\left(\\hat\\theta(x),x\\right) - u\\\\ \\hat\\theta(x) - \\theta\\end{pmatrix}\\] An important point of the uncertainty quantification is to account for plugging in \\(\\hat\\theta\\) rather than using the true \\(\\theta\\). We calculate the variance using the standard formula: \\[V[D]=E(V(D|x))+V(E(D|x))\\] Consider \\(D\\) conditionally on \\(x\\). The second component does not depend on \\(u\\) and \\(\\hat u\\) is constant given \\(x\\): \\[V[D|x]=\\begin{pmatrix}V[u|x] & 0 \\\\ 0 & 0 \\end{pmatrix}\\] It follows that \\[E(V[D|x])=\\begin{pmatrix}E(V[u|x]) & 0 \\\\ 0 & 0 \\end{pmatrix}\\] As central estimator of \\(E(V[u|x])\\) we use \\(V[u|x]\\) which is approximated by the inverse random effect Hessian \\(H_{uu}^{-1}\\) based on the assumption that \\(u|x\\) is well approximated by a Gaussian distribution (a reasonable assumption given that we are using the Laplace approximation). This explains the first term of variance formula in ?sdreport: \\[E(V[D|x]) \\approx \\begin{pmatrix} H_{uu}^{-1} & 0 \\\\ 0 & 0 \\end{pmatrix}\\] Likewise, \\[E[D|x]=\\begin{pmatrix}\\hat u\\left(\\hat\\theta(x),x\\right) - E(u|x)\\\\ \\hat\\theta(x) - \\theta\\end{pmatrix}\\] Again, asuming a Gaussian approximation of \\(u|x\\), it follows that \\(E(u|x) \\approx \\hat u(\\theta,x)\\): \\[E[D|x]=\\begin{pmatrix}\\hat u\\left(\\hat\\theta(x),x\\right) - \\hat u(\\theta,x)\\\\ \\hat\\theta(x) - \\theta\\end{pmatrix}\\] We approximate the expectation using linerization of \\(\\theta \\rightarrow \\hat u(\\theta,x)\\) around \\(\\hat\\theta(x)\\) \\[E[D|x]=J_x \\cdot (\\hat\\theta(x) - \\theta)\\] We now have the second term of the variance formula in ?sdreport: \\[V(E[D|x]) \\approx J_x V(\\hat\\theta(x)) J_x'\\] This term becomes negligible if the amount of data is high because of the assumed asymptotic consistency of \\(\\hat\\theta\\). "],["404.html", "Page not found", " Page not found The page you requested cannot be found (perhaps it was moved or renamed). You may want to try searching to find the page's new location, or use the table of contents to find the page you are looking for. "]] diff --git a/ad__blas_8hpp_source.html b/ad__blas_8hpp_source.html index ce6780751..15b972d0e 100644 --- a/ad__blas_8hpp_source.html +++ b/ad__blas_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
@@ -73,7 +73,7 @@
ad_blas.hpp
-
1 #ifndef HAVE_AD_BLAS_HPP
2 #define HAVE_AD_BLAS_HPP
3 // Autogenerated - do not edit by hand !
4 #include <Eigen/Dense>
5 #include "global.hpp"
6 
7 namespace TMBad {
8 
22 template <class Matrix>
24  bool yes = true;
25  Index j_previous = -1;
26  for (size_t i = 0; i < (size_t)x.size(); i++) {
27  if (!x(i).on_some_tape()) {
28  yes = false;
29  break;
30  }
31  Index j = ad_plain(x(i)).index;
32  if (i > 0) {
33  if (j != j_previous + 1) {
34  yes = false;
35  break;
36  }
37  }
38  j_previous = j;
39  }
40  if (yes) {
41  return global::ad_segment(ad_plain(x(0)), x.rows(), x.cols());
42  }
43 
44  ad_plain ans;
45  for (size_t i = 0; i < (size_t)x.size(); i++) {
46  ad_plain xi_cpy = x(i).copy();
47 
48  x(i).override_by(xi_cpy);
49  if (i == 0) ans = xi_cpy;
50  }
51  return global::ad_segment(ans, x.rows(), x.cols());
52 }
53 
54 using Eigen::Dynamic;
55 using Eigen::Map;
56 using Eigen::Matrix;
57 typedef Matrix<double, Dynamic, Dynamic> dmatrix;
58 typedef Matrix<global::Replay, Dynamic, Dynamic> vmatrix;
59 
60 template <class Target>
61 void fill(Target &y, const global::ad_segment x) {
62  TMBAD_ASSERT((size_t)y.size() == (size_t)x.size());
63  for (size_t i = 0; i < (size_t)y.size(); i++) {
64  y(i) = x[i];
65  }
66 }
67 
68 template <bool XT, bool YT, bool ZT, bool UP>
69 struct MatMul;
70 template <bool XT, bool YT, bool ZT, bool UP>
71 void matmul(const vmatrix &x, const vmatrix &y, Map<vmatrix> z) {
74  if (!UP) {
75  global::ad_segment out =
76  get_glob()->add_to_stack<MatMul<XT, YT, ZT, UP> >(xc, yc);
77  fill(z, out);
78  } else {
80  get_glob()->add_to_stack<MatMul<XT, YT, ZT, UP> >(xc, yc, zc);
81  }
82 }
83 
85 vmatrix matmul(const vmatrix &x, const vmatrix &y);
86 
88 dmatrix matmul(const dmatrix &x, const dmatrix &y);
89 
91 template <bool XT, bool YT, bool ZT, bool UP>
92 void matmul(Map<const dmatrix> x, Map<const dmatrix> y, Map<dmatrix> z) {
93  if (!UP) {
94  if (XT && YT && ZT) z.transpose() = x.transpose() * y.transpose();
95  if (!XT && YT && ZT) z.transpose() = x * y.transpose();
96  if (XT && !YT && ZT) z.transpose() = x.transpose() * y;
97  if (XT && YT && !ZT) z = x.transpose() * y.transpose();
98  if (!XT && !YT && ZT) z.transpose() = x * y;
99  if (XT && !YT && !ZT) z = x.transpose() * y;
100  if (!XT && YT && !ZT) z = x * y.transpose();
101  if (!XT && !YT && !ZT) z = x * y;
102  }
103  if (UP) {
104  if (XT && YT && ZT) z.transpose() += x.transpose() * y.transpose();
105  if (!XT && YT && ZT) z.transpose() += x * y.transpose();
106  if (XT && !YT && ZT) z.transpose() += x.transpose() * y;
107  if (XT && YT && !ZT) z += x.transpose() * y.transpose();
108  if (!XT && !YT && ZT) z.transpose() += x * y;
109  if (XT && !YT && !ZT) z += x.transpose() * y;
110  if (!XT && YT && !ZT) z += x * y.transpose();
111  if (!XT && !YT && !ZT) z += x * y;
112  }
113 }
114 
115 template <bool XT, bool YT, bool ZT, bool UP>
116 struct MatMul : global::Operator<2 + UP, -1> {
117  static const bool dynamic = true;
118  static const int max_fuse_depth = 0;
119  int n1, n2, n3;
120  static const int ninput = 2 + UP;
122  set_dim(X.rows(), X.cols(), Y.rows(), Y.cols());
123  }
124  MatMul(int n1, int n2, int n3) : n1(n1), n2(n2), n3(n3) {}
125  Index input_size() const { return 2 + UP; }
126  Index output_size() const {
127  if (UP) return 0;
128  int Xrows, Xcols, Yrows, Ycols, Zrows, Zcols;
129  get_dim(Xrows, Xcols, Yrows, Ycols, Zrows, Zcols);
130  return Zrows * Zcols;
131  }
132  static const bool have_input_size_output_size = true;
133  void set_dim(int Xrows, int Xcols, int Yrows, int Ycols) {
134  n1 = Xrows;
135  n2 = Xcols;
136  n3 = (YT ? Yrows : Ycols);
137  }
138  void get_dim(int &Xrows, int &Xcols, int &Yrows, int &Ycols, int &Zrows,
139  int &Zcols) const {
140  Xrows = n1;
141  Xcols = n2;
142 
143  int Xop_rows = Xrows, Xop_cols = Xcols;
144  if (XT) std::swap(Xop_rows, Xop_cols);
145 
146  int Yop_rows = Xop_cols, Yop_cols = n3;
147 
148  Yrows = Yop_rows;
149  Ycols = Yop_cols;
150  if (YT) std::swap(Yrows, Ycols);
151 
152  int Zop_rows = Xop_rows, Zop_cols = Yop_cols;
153 
154  Zrows = Zop_rows;
155  Zcols = Zop_cols;
156  if (ZT) std::swap(Zrows, Zcols);
157  }
158  template <class Type>
159  void forward(ForwardArgs<Type> &args) {
160  int Xrows, Xcols, Yrows, Ycols, Zrows, Zcols;
161  get_dim(Xrows, Xcols, Yrows, Ycols, Zrows, Zcols);
162  typedef Map<Matrix<Type, Dynamic, Dynamic> > MapMatrix;
163  typedef Map<const Matrix<Type, Dynamic, Dynamic> > ConstMapMatrix;
164  Type *zp = (UP ? args.x_ptr(2) : args.y_ptr(0));
165  ConstMapMatrix X(args.x_ptr(0), Xrows, Xcols);
166  ConstMapMatrix Y(args.x_ptr(1), Yrows, Ycols);
167  MapMatrix Z(zp, Zrows, Zcols);
168  matmul<XT, YT, ZT, UP>(X, Y, Z);
169  }
170  template <class Type>
171  void reverse(ReverseArgs<Type> &args) {
172  int Xrows, Xcols, Yrows, Ycols, Zrows, Zcols;
173  get_dim(Xrows, Xcols, Yrows, Ycols, Zrows, Zcols);
174  typedef Map<Matrix<Type, Dynamic, Dynamic> > MapMatrix;
175  typedef Map<const Matrix<Type, Dynamic, Dynamic> > ConstMapMatrix;
176  Type *dzp = (UP ? args.dx_ptr(2) : args.dy_ptr(0));
177  ConstMapMatrix X(args.x_ptr(0), Xrows, Xcols);
178  ConstMapMatrix Y(args.x_ptr(1), Yrows, Ycols);
179  ConstMapMatrix W(dzp, Zrows, Zcols);
180  MapMatrix DX(args.dx_ptr(0), Xrows, Xcols);
181  MapMatrix DY(args.dx_ptr(1), Yrows, Ycols);
182 
183  matmul<ZT, !YT, XT, true>(W, Y, DX);
184  matmul<!XT, ZT, YT, true>(X, W, DY);
185  }
186 
187  void dependencies(Args<> &args, Dependencies &dep) const {
188  int Xrows, Xcols, Yrows, Ycols, Zrows, Zcols;
189  get_dim(Xrows, Xcols, Yrows, Ycols, Zrows, Zcols);
190  dep.add_segment(args.input(0), Xrows * Xcols);
191  dep.add_segment(args.input(1), Yrows * Ycols);
192  }
193 
194  void dependencies_updating(Args<> &args, Dependencies &dep) const {
195  int Xrows, Xcols, Yrows, Ycols, Zrows, Zcols;
196  get_dim(Xrows, Xcols, Yrows, Ycols, Zrows, Zcols);
197  if (UP) {
198  dep.add_segment(args.input(2), Zrows * Zcols);
199  }
200  }
201  static const bool have_dependencies = true;
203  static const bool implicit_dependencies = true;
205  static const bool allow_remap = false;
207  static const bool updating = true;
208 
209  void forward(ForwardArgs<Writer> &args) { TMBAD_ASSERT(false); }
210  void reverse(ReverseArgs<Writer> &args) { TMBAD_ASSERT(false); }
211  const char *op_name() { return "MatMul"; }
212 };
213 
214 } // namespace TMBad
215 #endif // HAVE_AD_BLAS_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:153
+
1 #ifndef HAVE_AD_BLAS_HPP
2 #define HAVE_AD_BLAS_HPP
3 // Autogenerated - do not edit by hand !
4 #include <Eigen/Dense>
5 #include "global.hpp"
6 
7 namespace TMBad {
8 
22 template <class Matrix>
24  bool yes = true;
25  Index j_previous = -1;
26  for (size_t i = 0; i < (size_t)x.size(); i++) {
27  if (!x(i).on_some_tape()) {
28  yes = false;
29  break;
30  }
31  Index j = ad_plain(x(i)).index;
32  if (i > 0) {
33  if (j != j_previous + 1) {
34  yes = false;
35  break;
36  }
37  }
38  j_previous = j;
39  }
40  if (yes) {
41  return global::ad_segment(ad_plain(x(0)), x.rows(), x.cols());
42  }
43 
44  ad_plain ans;
45  for (size_t i = 0; i < (size_t)x.size(); i++) {
46  ad_plain xi_cpy = x(i).copy();
47 
48  x(i).override_by(xi_cpy);
49  if (i == 0) ans = xi_cpy;
50  }
51  return global::ad_segment(ans, x.rows(), x.cols());
52 }
53 
54 using Eigen::Dynamic;
55 using Eigen::Map;
56 using Eigen::Matrix;
57 typedef Matrix<double, Dynamic, Dynamic> dmatrix;
58 typedef Matrix<global::Replay, Dynamic, Dynamic> vmatrix;
59 
60 template <class Target>
61 void fill(Target &y, const global::ad_segment x) {
62  TMBAD_ASSERT((size_t)y.size() == (size_t)x.size());
63  for (size_t i = 0; i < (size_t)y.size(); i++) {
64  y(i) = x[i];
65  }
66 }
67 
68 template <bool XT, bool YT, bool ZT, bool UP>
69 struct MatMul;
70 template <bool XT, bool YT, bool ZT, bool UP>
71 void matmul(const vmatrix &x, const vmatrix &y, Map<vmatrix> z) {
74  if (!UP) {
75  global::ad_segment out =
76  get_glob()->add_to_stack<MatMul<XT, YT, ZT, UP> >(xc, yc);
77  fill(z, out);
78  } else {
80  get_glob()->add_to_stack<MatMul<XT, YT, ZT, UP> >(xc, yc, zc);
81  }
82 }
83 
85 vmatrix matmul(const vmatrix &x, const vmatrix &y);
86 
88 dmatrix matmul(const dmatrix &x, const dmatrix &y);
89 
91 template <bool XT, bool YT, bool ZT, bool UP>
92 void matmul(Map<const dmatrix> x, Map<const dmatrix> y, Map<dmatrix> z) {
93  if (!UP) {
94  if (XT && YT && ZT) z.transpose() = x.transpose() * y.transpose();
95  if (!XT && YT && ZT) z.transpose() = x * y.transpose();
96  if (XT && !YT && ZT) z.transpose() = x.transpose() * y;
97  if (XT && YT && !ZT) z = x.transpose() * y.transpose();
98  if (!XT && !YT && ZT) z.transpose() = x * y;
99  if (XT && !YT && !ZT) z = x.transpose() * y;
100  if (!XT && YT && !ZT) z = x * y.transpose();
101  if (!XT && !YT && !ZT) z = x * y;
102  }
103  if (UP) {
104  if (XT && YT && ZT) z.transpose() += x.transpose() * y.transpose();
105  if (!XT && YT && ZT) z.transpose() += x * y.transpose();
106  if (XT && !YT && ZT) z.transpose() += x.transpose() * y;
107  if (XT && YT && !ZT) z += x.transpose() * y.transpose();
108  if (!XT && !YT && ZT) z.transpose() += x * y;
109  if (XT && !YT && !ZT) z += x.transpose() * y;
110  if (!XT && YT && !ZT) z += x * y.transpose();
111  if (!XT && !YT && !ZT) z += x * y;
112  }
113 }
114 
115 template <bool XT, bool YT, bool ZT, bool UP>
116 struct MatMul : global::Operator<2 + UP, -1> {
117  static const bool dynamic = true;
118  static const int max_fuse_depth = 0;
119  int n1, n2, n3;
120  static const int ninput = 2 + UP;
122  set_dim(X.rows(), X.cols(), Y.rows(), Y.cols());
123  }
124  MatMul(int n1, int n2, int n3) : n1(n1), n2(n2), n3(n3) {}
125  Index input_size() const { return 2 + UP; }
126  Index output_size() const {
127  if (UP) return 0;
128  int Xrows, Xcols, Yrows, Ycols, Zrows, Zcols;
129  get_dim(Xrows, Xcols, Yrows, Ycols, Zrows, Zcols);
130  return Zrows * Zcols;
131  }
132  static const bool have_input_size_output_size = true;
133  void set_dim(int Xrows, int Xcols, int Yrows, int Ycols) {
134  n1 = Xrows;
135  n2 = Xcols;
136  n3 = (YT ? Yrows : Ycols);
137  }
138  void get_dim(int &Xrows, int &Xcols, int &Yrows, int &Ycols, int &Zrows,
139  int &Zcols) const {
140  Xrows = n1;
141  Xcols = n2;
142 
143  int Xop_rows = Xrows, Xop_cols = Xcols;
144  if (XT) std::swap(Xop_rows, Xop_cols);
145 
146  int Yop_rows = Xop_cols, Yop_cols = n3;
147 
148  Yrows = Yop_rows;
149  Ycols = Yop_cols;
150  if (YT) std::swap(Yrows, Ycols);
151 
152  int Zop_rows = Xop_rows, Zop_cols = Yop_cols;
153 
154  Zrows = Zop_rows;
155  Zcols = Zop_cols;
156  if (ZT) std::swap(Zrows, Zcols);
157  }
158  template <class Type>
159  void forward(ForwardArgs<Type> &args) {
160  int Xrows, Xcols, Yrows, Ycols, Zrows, Zcols;
161  get_dim(Xrows, Xcols, Yrows, Ycols, Zrows, Zcols);
162  typedef Map<Matrix<Type, Dynamic, Dynamic> > MapMatrix;
163  typedef Map<const Matrix<Type, Dynamic, Dynamic> > ConstMapMatrix;
164  Type *zp = (UP ? args.x_ptr(2) : args.y_ptr(0));
165  ConstMapMatrix X(args.x_ptr(0), Xrows, Xcols);
166  ConstMapMatrix Y(args.x_ptr(1), Yrows, Ycols);
167  MapMatrix Z(zp, Zrows, Zcols);
168  matmul<XT, YT, ZT, UP>(X, Y, Z);
169  }
170  template <class Type>
171  void reverse(ReverseArgs<Type> &args) {
172  int Xrows, Xcols, Yrows, Ycols, Zrows, Zcols;
173  get_dim(Xrows, Xcols, Yrows, Ycols, Zrows, Zcols);
174  typedef Map<Matrix<Type, Dynamic, Dynamic> > MapMatrix;
175  typedef Map<const Matrix<Type, Dynamic, Dynamic> > ConstMapMatrix;
176  Type *dzp = (UP ? args.dx_ptr(2) : args.dy_ptr(0));
177  ConstMapMatrix X(args.x_ptr(0), Xrows, Xcols);
178  ConstMapMatrix Y(args.x_ptr(1), Yrows, Ycols);
179  ConstMapMatrix W(dzp, Zrows, Zcols);
180  MapMatrix DX(args.dx_ptr(0), Xrows, Xcols);
181  MapMatrix DY(args.dx_ptr(1), Yrows, Ycols);
182 
183  matmul<ZT, !YT, XT, true>(W, Y, DX);
184  matmul<!XT, ZT, YT, true>(X, W, DY);
185  }
186 
187  void dependencies(Args<> &args, Dependencies &dep) const {
188  int Xrows, Xcols, Yrows, Ycols, Zrows, Zcols;
189  get_dim(Xrows, Xcols, Yrows, Ycols, Zrows, Zcols);
190  dep.add_segment(args.input(0), Xrows * Xcols);
191  dep.add_segment(args.input(1), Yrows * Ycols);
192  }
193 
194  void dependencies_updating(Args<> &args, Dependencies &dep) const {
195  int Xrows, Xcols, Yrows, Ycols, Zrows, Zcols;
196  get_dim(Xrows, Xcols, Yrows, Ycols, Zrows, Zcols);
197  if (UP) {
198  dep.add_segment(args.input(2), Zrows * Zcols);
199  }
200  }
201  static const bool have_dependencies = true;
203  static const bool implicit_dependencies = true;
205  static const bool allow_remap = false;
207  static const bool updating = true;
208 
209  void forward(ForwardArgs<Writer> &args) { TMBAD_ASSERT(false); }
210  void reverse(ReverseArgs<Writer> &args) { TMBAD_ASSERT(false); }
211  const char *op_name() { return "MatMul"; }
212 };
213 
214 } // namespace TMBad
215 #endif // HAVE_AD_BLAS_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:157
Operator with input/output dimension known at compile time.
Definition: global.hpp:1491
global * get_glob()
Get pointer to current global AD context (or NULL if no context is active).
Definition: TMBad.cpp:690
ad_plain add_to_stack(Scalar result=0)
Add nullary operator to the stack based on its result
Definition: global.hpp:2448
diff --git a/adaptive_integration_8cpp-example.html b/adaptive_integration_8cpp-example.html index 4381252df..af9004a6b 100644 --- a/adaptive_integration_8cpp-example.html +++ b/adaptive_integration_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/annotated.html b/annotated.html index 6d8f7a805..6a3f7cd2e 100644 --- a/annotated.html +++ b/annotated.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/ar1_4D_8cpp-example.html b/ar1_4D_8cpp-example.html index 1cdff5a47..4211a5572 100644 --- a/ar1_4D_8cpp-example.html +++ b/ar1_4D_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/array_8hpp_source.html b/array_8hpp_source.html index f6121f723..3329519ab 100644 --- a/array_8hpp_source.html +++ b/array_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/atomic_8hpp.html b/atomic_8hpp.html index 78826a444..2f9b1949b 100644 --- a/atomic_8hpp.html +++ b/atomic_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/atomic_8hpp_source.html b/atomic_8hpp_source.html index 31bd632c6..6fb4b2d73 100644 --- a/atomic_8hpp_source.html +++ b/atomic_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/atomic__convolve_8hpp_source.html b/atomic__convolve_8hpp_source.html index e281a546b..74b9584b5 100644 --- a/atomic__convolve_8hpp_source.html +++ b/atomic__convolve_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/atomic__macro_8hpp_source.html b/atomic__macro_8hpp_source.html index fc8b4c0e7..e3acd4b71 100644 --- a/atomic__macro_8hpp_source.html +++ b/atomic__macro_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
@@ -74,7 +74,7 @@
1 // Copyright (C) 2013-2015 Kasper Kristensen
2 // License: GPL-2
3 
4 /* Flag to detect if any atomic functions have been created */
5 TMB_EXTERN bool atomicFunctionGenerated CSKIP(= false;)
6 
8 #define TMB_ATOMIC_VECTOR_FUNCTION(ATOMIC_NAME, OUTPUT_DIM, ATOMIC_DOUBLE, \
9  ATOMIC_REVERSE) \
10  \
11  template<class Double> \
12  void ATOMIC_NAME(const CppAD::vector<Double>& tx, \
13  CppAD::vector<Double>& ty) CSKIP_ATOMIC({ \
14  ATOMIC_DOUBLE; \
15  }) \
16  template<class Double> \
17  CppAD::vector<double> \
18  ATOMIC_NAME(const CppAD::vector<Double>& tx) CSKIP_ATOMIC({ \
19  CppAD::vector<double> ty(OUTPUT_DIM); \
20  ATOMIC_NAME(tx, ty); \
21  return ty; \
22  }) \
23  IF_TMB_PRECOMPILE_ATOMICS( \
24  template \
25  void ATOMIC_NAME<double>(const CppAD::vector<double>& tx, \
26  CppAD::vector<double>& ty); \
27  template \
28  CppAD::vector<double> ATOMIC_NAME<double>(const CppAD::vector<double>& tx); \
29  ) \
30  template <class Type> \
31  void ATOMIC_NAME(const CppAD::vector<AD<Type> >& tx, \
32  CppAD::vector<AD<Type> >& ty); \
33  template <class Type> \
34  CppAD::vector<AD<Type> > ATOMIC_NAME(const CppAD::vector<AD<Type> >& tx); \
35  template <class Type> \
36  class atomic##ATOMIC_NAME : public CppAD::atomic_base<Type> { \
37  public: \
38  atomic##ATOMIC_NAME(const char* name) : CppAD::atomic_base<Type>(name) { \
39  atomic::atomicFunctionGenerated = true; \
40  if (config.trace.atomic) \
41  std::cout << "Constructing atomic " << #ATOMIC_NAME << "\n"; \
42  this->option(CppAD::atomic_base<Type>::bool_sparsity_enum); \
43  } \
44  \
45  private: \
46  virtual bool forward(size_t p, size_t q, const CppAD::vector<bool>& vx, \
47  CppAD::vector<bool>& vy, \
48  const CppAD::vector<Type>& tx, \
49  CppAD::vector<Type>& ty) { \
50  if (q > 0) \
51  Rf_error("Atomic '" #ATOMIC_NAME "' order not implemented.\n"); \
52  if (vx.size() > 0) { \
53  bool anyvx = false; \
54  for (size_t i = 0; i < vx.size(); i++) anyvx |= vx[i]; \
55  for (size_t i = 0; i < vy.size(); i++) vy[i] = anyvx; \
56  } \
57  ATOMIC_NAME(tx, ty); \
58  return true; \
59  } \
60  virtual bool reverse(size_t q, const CppAD::vector<Type>& tx, \
61  const CppAD::vector<Type>& ty, \
62  CppAD::vector<Type>& px, \
63  const CppAD::vector<Type>& py) { \
64  if (q > 0) \
65  Rf_error("Atomic '" #ATOMIC_NAME "' order not implemented.\n"); \
66  ATOMIC_REVERSE; \
67  return true; \
68  } \
69  virtual bool rev_sparse_jac(size_t q, const CppAD::vector<bool>& rt, \
70  CppAD::vector<bool>& st) { \
71  bool anyrt = false; \
72  for (size_t i = 0; i < rt.size(); i++) anyrt |= rt[i]; \
73  for (size_t i = 0; i < st.size(); i++) st[i] = anyrt; \
74  return true; \
75  } \
76  virtual bool rev_sparse_jac(size_t q, \
77  const CppAD::vector<std::set<size_t> >& rt, \
78  CppAD::vector<std::set<size_t> >& st) { \
79  Rf_error("Should not be called"); \
80  } \
81  }; \
82  template <class Type> \
83  void ATOMIC_NAME(const CppAD::vector<AD<Type> >& tx, \
84  CppAD::vector<AD<Type> >& ty) { \
85  static atomic##ATOMIC_NAME<Type> afun##ATOMIC_NAME( \
86  "atomic_" #ATOMIC_NAME); \
87  afun##ATOMIC_NAME(tx, ty); \
88  } \
89  template <class Type> \
90  CppAD::vector<AD<Type> > ATOMIC_NAME(const CppAD::vector<AD<Type> >& tx) { \
91  CppAD::vector<AD<Type> > ty(OUTPUT_DIM); \
92  ATOMIC_NAME(tx, ty); \
93  return ty; \
94  }
95 
96 #define TMB_ATOMIC_STATIC_FUNCTION( \
97  ATOMIC_NAME, \
98  INPUT_SIZE, \
99  ATOMIC_DOUBLE, \
100  ATOMIC_REVERSE \
101 ) \
102 template<class dummy=void> \
103 double ATOMIC_NAME (const double *tx) { \
104  double ty[1]; \
105  ATOMIC_DOUBLE; \
106  return ty[0]; \
107 } \
108 template <class Type> \
109 CppAD::vector<AD<Type> > ATOMIC_NAME(const CppAD::vector<AD<Type> >& tx);\
110 template<class Type> \
111 Type ATOMIC_NAME (const Type *tx) { \
112  CppAD::vector<Type> tx_(INPUT_SIZE); \
113  for (size_t i=0; i<INPUT_SIZE; i++) tx_[i]=tx[i]; \
114  return ATOMIC_NAME(tx_)[0]; \
115 } \
116 TMB_ATOMIC_VECTOR_FUNCTION( \
117  ATOMIC_NAME, \
118  1, \
119  ATOMIC_DOUBLE, \
120  ATOMIC_REVERSE \
121 )
122 // Helper to forward declare atomic
123 #define TMB_ATOMIC_VECTOR_FUNCTION_DECLARE(ATOMIC_NAME) \
124 template<class T> \
125 CppAD::vector<AD<T> > ATOMIC_NAME(const CppAD::vector<AD<T> > &x); \
126 template<class Double> \
127 CppAD::vector<double> ATOMIC_NAME(const CppAD::vector<Double > &x);
128 // Helper to forward define atomic
129 #define TMB_ATOMIC_VECTOR_FUNCTION_DEFINE(ATOMIC_NAME, \
130  OUTPUT_DIM, \
131  ATOMIC_DOUBLE, \
132  ATOMIC_REVERSE) \
133 TMB_ATOMIC_VECTOR_FUNCTION(ATOMIC_NAME, \
134  OUTPUT_DIM, \
135  ATOMIC_DOUBLE, \
136  ATOMIC_REVERSE)
Namespace with special functions and derivatives.
-
Definition: TMB.hpp:132
+
Definition: TMB.hpp:136
Type rt(Type df)
Simulate from a Student&#39;s t distribution.
#define TMB_ATOMIC_VECTOR_FUNCTION( ATOMIC_NAME, OUTPUT_DIM, ATOMIC_DOUBLE, ATOMIC_REVERSE)
Construct atomic vector function based on known derivatives.
bool atomic
Trace construction of atomic functions.
Definition: config.hpp:31
diff --git a/atomic__math_8hpp_source.html b/atomic__math_8hpp_source.html index bc77f2a4e..c0315ca4b 100644 --- a/atomic__math_8hpp_source.html +++ b/atomic__math_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/autodiff_8hpp.html b/autodiff_8hpp.html index 471641793..1111241e5 100644 --- a/autodiff_8hpp.html +++ b/autodiff_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/autodiff_8hpp_source.html b/autodiff_8hpp_source.html index f44cb5215..33cf8299b 100644 --- a/autodiff_8hpp_source.html +++ b/autodiff_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/bessel_2undefs_8h_source.html b/bessel_2undefs_8h_source.html index f0e4934e4..6218afbd9 100644 --- a/bessel_2undefs_8h_source.html +++ b/bessel_2undefs_8h_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/bessel_8h_source.html b/bessel_8h_source.html index ddf4c575e..a27693525 100644 --- a/bessel_8h_source.html +++ b/bessel_8h_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/bessel_8hpp_source.html b/bessel_8hpp_source.html index ad5b50e63..afd49e120 100644 --- a/bessel_8hpp_source.html +++ b/bessel_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/bessel__i_8cpp_source.html b/bessel__i_8cpp_source.html index 2434b3f9f..32ec24e88 100644 --- a/bessel__i_8cpp_source.html +++ b/bessel__i_8cpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/bessel__j_8cpp_source.html b/bessel__j_8cpp_source.html index c96230551..c9bac6521 100644 --- a/bessel__j_8cpp_source.html +++ b/bessel__j_8cpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/bessel__k_8cpp_source.html b/bessel__k_8cpp_source.html index c9e367451..432bc4338 100644 --- a/bessel__k_8cpp_source.html +++ b/bessel__k_8cpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/bessel__y_8cpp_source.html b/bessel__y_8cpp_source.html index 495458041..661279ce3 100644 --- a/bessel__y_8cpp_source.html +++ b/bessel__y_8cpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/beta_2undefs_8h_source.html b/beta_2undefs_8h_source.html index f2d5d262b..b5c92fc80 100644 --- a/beta_2undefs_8h_source.html +++ b/beta_2undefs_8h_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/chebyshev_8cpp_source.html b/chebyshev_8cpp_source.html index 15d22328f..2369b9904 100644 --- a/chebyshev_8cpp_source.html +++ b/chebyshev_8cpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/checkpoint_8hpp_source.html b/checkpoint_8hpp_source.html index a3e742e01..2f565f183 100644 --- a/checkpoint_8hpp_source.html +++ b/checkpoint_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
@@ -73,7 +73,7 @@
checkpoint.hpp
-
1 #ifndef HAVE_CHECKPOINT_HPP
2 #define HAVE_CHECKPOINT_HPP
3 // Autogenerated - do not edit by hand !
4 #include <memory>
5 #include "global.hpp"
6 #include "vectorize.hpp"
7 
8 namespace TMBad {
9 
11 template <class ADFun, bool packed_ = false>
12 struct standard_derivative_table : std::vector<ADFun> {
13  static const bool packed = packed_;
15  void requireOrder(size_t n) {
16  while ((*this).size() <= n) {
17  (*this).push_back((*this).back().WgtJacFun());
18  }
19  }
21  void retape(ForwardArgs<Scalar> &args) {}
23  standard_derivative_table(const ADFun &F) : std::vector<ADFun>(1, F) {}
24 };
25 
32  static const bool packed = false;
33  std::vector<Scalar> x_prev;
34  bool operator()(const std::vector<Scalar> &x);
35 };
36 
38 template <class Functor, class ADFun, class Test = ParametersChanged,
39  bool packed_ = false>
41  Functor F;
42  Test test;
46  size_t n = (*this)[0].Domain();
47  std::vector<Scalar> x = args.x_segment(0, n);
48  bool change = test(x);
49  if (change) {
50  (*this).resize(1);
51  (*this)[0] = ADFun(F, x);
52  }
53  }
56  template <class V>
57  retaping_derivative_table(const Functor &F, const V &x, Test test = Test())
58  : standard_derivative_table<ADFun, packed_>(ADFun(F, x)),
59  F(F),
60  test(test) {}
61 };
62 
94 template <class T>
96  typedef std::shared_ptr<T> Base;
97  Base sp;
98  std::shared_ptr<std::vector<std::weak_ptr<T> > > weak_refs;
99 
100  omp_shared_ptr(const Base &x)
101  : sp(x), weak_refs(std::make_shared<std::vector<std::weak_ptr<T> > >()) {
102  (*weak_refs).resize(TMBAD_MAX_NUM_THREADS);
103  (*weak_refs)[TMBAD_THREAD_NUM] = x;
104  }
105  omp_shared_ptr(const omp_shared_ptr &other) : weak_refs(other.weak_refs) {
106  if ((*weak_refs)[TMBAD_THREAD_NUM].expired()) {
107  sp = std::make_shared<T>(*other);
108 
109  (*weak_refs)[TMBAD_THREAD_NUM] = sp;
110  } else {
111  sp = (*weak_refs)[TMBAD_THREAD_NUM].lock();
112  }
113  }
114  omp_shared_ptr() {}
115  T &operator*() const { return *sp; }
116  T *operator->() const { return sp.get(); }
117  explicit operator bool() const { return (bool)sp; }
118 };
119 
166 template <class DerivativeTable>
167 struct AtomOp : global::DynamicOperator<-1, -1> {
168  static const bool have_input_size_output_size = true;
169  static const bool add_forward_replay_copy = true;
170 
171  TMBAD_SHARED_PTR<DerivativeTable> dtab;
172 
173  int order;
174 
175  template <class T1>
176  AtomOp(const T1 &F) : dtab(std::make_shared<DerivativeTable>(F)), order(0) {}
177  template <class T1, class T2>
178  AtomOp(const T1 &F, const T2 &x)
179  : dtab(std::make_shared<DerivativeTable>(F, x)), order(0) {}
180  template <class T1, class T2, class T3>
181  AtomOp(const T1 &F, const T2 &x, const T3 &t)
182  : dtab(std::make_shared<DerivativeTable>(F, x, t)), order(0) {}
183 
184  Index input_size() const { return (*dtab)[order].Domain(); }
185  Index output_size() const { return (*dtab)[order].Range(); }
186 
187  void forward(ForwardArgs<Scalar> &args) {
188  (*dtab).retape(args);
189 
190  (*dtab).requireOrder(order);
191 
192  size_t n = input_size();
193  size_t m = output_size();
194 
195  auto x = args.x_segment(0, n);
196 
197  args.y_segment(0, m) = (*dtab)[order](x);
198  }
199 
200  void reverse(ReverseArgs<Scalar> &args) {
201  size_t n = input_size();
202  size_t m = output_size();
203 
204  auto x = args.x_segment(0, n);
205  auto w = args.dy_segment(0, m);
206 
207  args.dx_segment(0, n) += (*dtab)[order].Jacobian(x, w);
208  }
209 
210  void reverse(ReverseArgs<global::Replay> &args) {
211  size_t n = input_size();
212  size_t m = output_size();
213 
214  std::vector<global::Replay> x = args.x_segment(0, n);
215  if (DerivativeTable::packed) x = repack(x);
216  std::vector<global::Replay> w = args.dy_segment(0, m);
217  std::vector<global::Replay> xw;
218  xw.insert(xw.end(), x.begin(), x.end());
219  xw.insert(xw.end(), w.begin(), w.end());
220 
221  (*dtab).requireOrder(order + 1);
222  AtomOp cpy(*this);
223  cpy.order++;
224  args.dx_segment(0, n) += global::Complete<AtomOp>(cpy)(xw);
225  }
226 
227  template <class T>
228  void forward(ForwardArgs<T> &args) {
229  TMBAD_ASSERT(false);
230  }
231  void reverse(ReverseArgs<Writer> &args) { TMBAD_ASSERT(false); }
232 
233  const char *op_name() { return "AtomOp"; }
234 
235  void print(global::print_config cfg) {
236  Rcout << cfg.prefix;
237  Rcout << "order=" << order << " ";
238  Rcout << "(*dtab).size()=" << (*dtab).size() << " ";
239  Rcout << "dtab=" << &(*dtab) << "\n";
240  (*dtab)[order].print(cfg);
241  }
242 };
243 
252 template <class Functor>
253 struct PackWrap {
254  Functor F;
255  PackWrap(const Functor &F) : F(F) {}
258  template <class T>
259  std::vector<T> operator()(const std::vector<T> &xp) {
260  Index K = ScalarPack<SegmentRef>::size;
261  size_t n = xp.size() / K;
262  TMBAD_ASSERT2(n * K == xp.size(), "Invalid packed arguments");
263  std::vector<ad_segment> x(n);
264  for (size_t i = 0; i < n; i++) x[i] = unpack(xp, i);
265  ad_segment y = F(x);
266  ad_segment yp = pack(y);
267  std::vector<T> ans = concat(std::vector<ad_segment>(1, yp));
268  return ans;
269  }
272  bool operator()(const std::vector<Scalar> &xp) {
273  Index K = ScalarPack<SegmentRef>::size;
274  size_t n = xp.size() / K;
275  TMBAD_ASSERT2(n * K == xp.size(), "Invalid packed arguments");
276  std::vector<Scalar *> x(n);
277  for (size_t i = 0; i < n; i++) x[i] = unpack(xp, i);
278  return F(x);
279  }
280 };
281 
282 } // namespace TMBad
283 #endif // HAVE_CHECKPOINT_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:153
+
1 #ifndef HAVE_CHECKPOINT_HPP
2 #define HAVE_CHECKPOINT_HPP
3 // Autogenerated - do not edit by hand !
4 #include <memory>
5 #include "global.hpp"
6 #include "vectorize.hpp"
7 
8 namespace TMBad {
9 
11 template <class ADFun, bool packed_ = false>
12 struct standard_derivative_table : std::vector<ADFun> {
13  static const bool packed = packed_;
15  void requireOrder(size_t n) {
16  while ((*this).size() <= n) {
17  (*this).push_back((*this).back().WgtJacFun());
18  }
19  }
21  void retape(ForwardArgs<Scalar> &args) {}
23  standard_derivative_table(const ADFun &F) : std::vector<ADFun>(1, F) {}
24 };
25 
32  static const bool packed = false;
33  std::vector<Scalar> x_prev;
34  bool operator()(const std::vector<Scalar> &x);
35 };
36 
38 template <class Functor, class ADFun, class Test = ParametersChanged,
39  bool packed_ = false>
41  Functor F;
42  Test test;
46  size_t n = (*this)[0].Domain();
47  std::vector<Scalar> x = args.x_segment(0, n);
48  bool change = test(x);
49  if (change) {
50  (*this).resize(1);
51  (*this)[0] = ADFun(F, x);
52  }
53  }
56  template <class V>
57  retaping_derivative_table(const Functor &F, const V &x, Test test = Test())
58  : standard_derivative_table<ADFun, packed_>(ADFun(F, x)),
59  F(F),
60  test(test) {}
61 };
62 
94 template <class T>
96  typedef std::shared_ptr<T> Base;
97  Base sp;
98  std::shared_ptr<std::vector<std::weak_ptr<T> > > weak_refs;
99 
100  omp_shared_ptr(const Base &x)
101  : sp(x), weak_refs(std::make_shared<std::vector<std::weak_ptr<T> > >()) {
102  (*weak_refs).resize(TMBAD_MAX_NUM_THREADS);
103  (*weak_refs)[TMBAD_THREAD_NUM] = x;
104  }
105  omp_shared_ptr(const omp_shared_ptr &other) : weak_refs(other.weak_refs) {
106  if ((*weak_refs)[TMBAD_THREAD_NUM].expired()) {
107  sp = std::make_shared<T>(*other);
108 
109  (*weak_refs)[TMBAD_THREAD_NUM] = sp;
110  } else {
111  sp = (*weak_refs)[TMBAD_THREAD_NUM].lock();
112  }
113  }
114  omp_shared_ptr() {}
115  T &operator*() const { return *sp; }
116  T *operator->() const { return sp.get(); }
117  explicit operator bool() const { return (bool)sp; }
118 };
119 
166 template <class DerivativeTable>
167 struct AtomOp : global::DynamicOperator<-1, -1> {
168  static const bool have_input_size_output_size = true;
169  static const bool add_forward_replay_copy = true;
170 
171  TMBAD_SHARED_PTR<DerivativeTable> dtab;
172 
173  int order;
174 
175  template <class T1>
176  AtomOp(const T1 &F) : dtab(std::make_shared<DerivativeTable>(F)), order(0) {}
177  template <class T1, class T2>
178  AtomOp(const T1 &F, const T2 &x)
179  : dtab(std::make_shared<DerivativeTable>(F, x)), order(0) {}
180  template <class T1, class T2, class T3>
181  AtomOp(const T1 &F, const T2 &x, const T3 &t)
182  : dtab(std::make_shared<DerivativeTable>(F, x, t)), order(0) {}
183 
184  Index input_size() const { return (*dtab)[order].Domain(); }
185  Index output_size() const { return (*dtab)[order].Range(); }
186 
187  void forward(ForwardArgs<Scalar> &args) {
188  (*dtab).retape(args);
189 
190  (*dtab).requireOrder(order);
191 
192  size_t n = input_size();
193  size_t m = output_size();
194 
195  auto x = args.x_segment(0, n);
196 
197  args.y_segment(0, m) = (*dtab)[order](x);
198  }
199 
200  void reverse(ReverseArgs<Scalar> &args) {
201  size_t n = input_size();
202  size_t m = output_size();
203 
204  auto x = args.x_segment(0, n);
205  auto w = args.dy_segment(0, m);
206 
207  args.dx_segment(0, n) += (*dtab)[order].Jacobian(x, w);
208  }
209 
210  void reverse(ReverseArgs<global::Replay> &args) {
211  size_t n = input_size();
212  size_t m = output_size();
213 
214  std::vector<global::Replay> x = args.x_segment(0, n);
215  if (DerivativeTable::packed) x = repack(x);
216  std::vector<global::Replay> w = args.dy_segment(0, m);
217  std::vector<global::Replay> xw;
218  xw.insert(xw.end(), x.begin(), x.end());
219  xw.insert(xw.end(), w.begin(), w.end());
220 
221  (*dtab).requireOrder(order + 1);
222  AtomOp cpy(*this);
223  cpy.order++;
224  args.dx_segment(0, n) += global::Complete<AtomOp>(cpy)(xw);
225  }
226 
227  template <class T>
228  void forward(ForwardArgs<T> &args) {
229  TMBAD_ASSERT(false);
230  }
231  void reverse(ReverseArgs<Writer> &args) { TMBAD_ASSERT(false); }
232 
233  const char *op_name() { return "AtomOp"; }
234 
235  void print(global::print_config cfg) {
236  Rcout << cfg.prefix;
237  Rcout << "order=" << order << " ";
238  Rcout << "(*dtab).size()=" << (*dtab).size() << " ";
239  Rcout << "dtab=" << &(*dtab) << "\n";
240  (*dtab)[order].print(cfg);
241  }
242 };
243 
252 template <class Functor>
253 struct PackWrap {
254  Functor F;
255  PackWrap(const Functor &F) : F(F) {}
258  template <class T>
259  std::vector<T> operator()(const std::vector<T> &xp) {
260  Index K = ScalarPack<SegmentRef>::size;
261  size_t n = xp.size() / K;
262  TMBAD_ASSERT2(n * K == xp.size(), "Invalid packed arguments");
263  std::vector<ad_segment> x(n);
264  for (size_t i = 0; i < n; i++) x[i] = unpack(xp, i);
265  ad_segment y = F(x);
266  ad_segment yp = pack(y);
267  std::vector<T> ans = concat(std::vector<ad_segment>(1, yp));
268  return ans;
269  }
272  bool operator()(const std::vector<Scalar> &xp) {
273  Index K = ScalarPack<SegmentRef>::size;
274  size_t n = xp.size() / K;
275  TMBAD_ASSERT2(n * K == xp.size(), "Invalid packed arguments");
276  std::vector<Scalar *> x(n);
277  for (size_t i = 0; i < n; i++) x[i] = unpack(xp, i);
278  return F(x);
279  }
280 };
281 
282 } // namespace TMBad
283 #endif // HAVE_CHECKPOINT_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:157
segment_ref< ReverseArgs, dx_write > dx_segment(Index from, Index size)
segment version
Definition: global.hpp:344
Vector class used by TMB.
Definition: vector.hpp:17
segment_ref< ReverseArgs, x_read > x_segment(Index from, Index size)
segment version
Definition: global.hpp:336
diff --git a/checkpoint__macro_8hpp_source.html b/checkpoint__macro_8hpp_source.html index a1222a091..f22f97195 100644 --- a/checkpoint__macro_8hpp_source.html +++ b/checkpoint__macro_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classdensity_1_1AR1__t-members.html b/classdensity_1_1AR1__t-members.html index 6fd23f5a8..24538414d 100644 --- a/classdensity_1_1AR1__t-members.html +++ b/classdensity_1_1AR1__t-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classdensity_1_1AR1__t.html b/classdensity_1_1AR1__t.html index 3822381dc..977051cb9 100644 --- a/classdensity_1_1AR1__t.html +++ b/classdensity_1_1AR1__t.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classdensity_1_1ARk__t-members.html b/classdensity_1_1ARk__t-members.html index a1eb46b41..5687d09e3 100644 --- a/classdensity_1_1ARk__t-members.html +++ b/classdensity_1_1ARk__t-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classdensity_1_1ARk__t.html b/classdensity_1_1ARk__t.html index 0606b6a4c..838c2481a 100644 --- a/classdensity_1_1ARk__t.html +++ b/classdensity_1_1ARk__t.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classdensity_1_1GMRF__t-members.html b/classdensity_1_1GMRF__t-members.html index 4d715420c..dab4d372f 100644 --- a/classdensity_1_1GMRF__t-members.html +++ b/classdensity_1_1GMRF__t-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classdensity_1_1GMRF__t.html b/classdensity_1_1GMRF__t.html index 4d1af7570..dea33287e 100644 --- a/classdensity_1_1GMRF__t.html +++ b/classdensity_1_1GMRF__t.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classdensity_1_1MVNORM__t-members.html b/classdensity_1_1MVNORM__t-members.html index daf90c38c..8d1b1027a 100644 --- a/classdensity_1_1MVNORM__t-members.html +++ b/classdensity_1_1MVNORM__t-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classdensity_1_1MVNORM__t.html b/classdensity_1_1MVNORM__t.html index b3097fa2b..517612e6e 100644 --- a/classdensity_1_1MVNORM__t.html +++ b/classdensity_1_1MVNORM__t.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classdensity_1_1N01-members.html b/classdensity_1_1N01-members.html index 3dcd9b375..50f7a19e1 100644 --- a/classdensity_1_1N01-members.html +++ b/classdensity_1_1N01-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classdensity_1_1N01.html b/classdensity_1_1N01.html index af66603c2..978bd064c 100644 --- a/classdensity_1_1N01.html +++ b/classdensity_1_1N01.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classdensity_1_1PROJ__t-members.html b/classdensity_1_1PROJ__t-members.html index 05a7db235..ee1bb22ae 100644 --- a/classdensity_1_1PROJ__t-members.html +++ b/classdensity_1_1PROJ__t-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classdensity_1_1PROJ__t.html b/classdensity_1_1PROJ__t.html index 0c52458a0..9ac719ff1 100644 --- a/classdensity_1_1PROJ__t.html +++ b/classdensity_1_1PROJ__t.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classdensity_1_1SCALE__t-members.html b/classdensity_1_1SCALE__t-members.html index addad626a..f5febbbb6 100644 --- a/classdensity_1_1SCALE__t-members.html +++ b/classdensity_1_1SCALE__t-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classdensity_1_1SCALE__t.html b/classdensity_1_1SCALE__t.html index 2448838ff..e279a4523 100644 --- a/classdensity_1_1SCALE__t.html +++ b/classdensity_1_1SCALE__t.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classdensity_1_1SEPARABLE__t-members.html b/classdensity_1_1SEPARABLE__t-members.html index e871c9199..2d4f8708c 100644 --- a/classdensity_1_1SEPARABLE__t-members.html +++ b/classdensity_1_1SEPARABLE__t-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classdensity_1_1SEPARABLE__t.html b/classdensity_1_1SEPARABLE__t.html index 52f2322a4..593167776 100644 --- a/classdensity_1_1SEPARABLE__t.html +++ b/classdensity_1_1SEPARABLE__t.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classdensity_1_1UNSTRUCTURED__CORR__t-members.html b/classdensity_1_1UNSTRUCTURED__CORR__t-members.html index 1f9a751e6..3d4f0af6c 100644 --- a/classdensity_1_1UNSTRUCTURED__CORR__t-members.html +++ b/classdensity_1_1UNSTRUCTURED__CORR__t-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classdensity_1_1UNSTRUCTURED__CORR__t.html b/classdensity_1_1UNSTRUCTURED__CORR__t.html index dbe9fe525..527c5366c 100644 --- a/classdensity_1_1UNSTRUCTURED__CORR__t.html +++ b/classdensity_1_1UNSTRUCTURED__CORR__t.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classdensity_1_1VECSCALE__t-members.html b/classdensity_1_1VECSCALE__t-members.html index f8b9c44ea..2b6c1740f 100644 --- a/classdensity_1_1VECSCALE__t-members.html +++ b/classdensity_1_1VECSCALE__t-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classdensity_1_1VECSCALE__t.html b/classdensity_1_1VECSCALE__t.html index 8cf70f091..b436f2346 100644 --- a/classdensity_1_1VECSCALE__t.html +++ b/classdensity_1_1VECSCALE__t.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classdensity_1_1contAR2__t-members.html b/classdensity_1_1contAR2__t-members.html index c196ab4f3..01ddb6673 100644 --- a/classdensity_1_1contAR2__t-members.html +++ b/classdensity_1_1contAR2__t-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classdensity_1_1contAR2__t.html b/classdensity_1_1contAR2__t.html index e56983753..8e7052cd2 100644 --- a/classdensity_1_1contAR2__t.html +++ b/classdensity_1_1contAR2__t.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classes.html b/classes.html index f30b57223..6298d2621 100644 --- a/classes.html +++ b/classes.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classtmbutils_1_1order-members.html b/classtmbutils_1_1order-members.html index e14552cd2..717b5a220 100644 --- a/classtmbutils_1_1order-members.html +++ b/classtmbutils_1_1order-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classtmbutils_1_1order.html b/classtmbutils_1_1order.html index afc38b9f3..5cfd000f9 100644 --- a/classtmbutils_1_1order.html +++ b/classtmbutils_1_1order.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classtmbutils_1_1splinefun-members.html b/classtmbutils_1_1splinefun-members.html index 8aea837a0..db60a2879 100644 --- a/classtmbutils_1_1splinefun-members.html +++ b/classtmbutils_1_1splinefun-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/classtmbutils_1_1splinefun.html b/classtmbutils_1_1splinefun.html index 273c4bc8a..ada545c64 100644 --- a/classtmbutils_1_1splinefun.html +++ b/classtmbutils_1_1splinefun.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/code__generator_8hpp_source.html b/code__generator_8hpp_source.html index 3e0f4aad9..84a023c80 100644 --- a/code__generator_8hpp_source.html +++ b/code__generator_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
@@ -73,6 +73,6 @@
code_generator.hpp
-
1 #ifndef HAVE_CODE_GENERATOR_HPP
2 #define HAVE_CODE_GENERATOR_HPP
3 // Autogenerated - do not edit by hand !
4 #include <fstream>
5 #include <iostream>
6 #include <sstream>
7 #include "global.hpp"
8 
9 namespace TMBad {
10 
11 void searchReplace(std::string& str, const std::string& oldStr,
12  const std::string& newStr);
13 
14 struct code_config {
15  bool asm_comments;
16  bool gpu;
17  std::string indent;
18  std::string header_comment;
19  std::string float_str;
20  std::ostream* cout;
21  std::string float_ptr();
22  std::string void_str();
23  void init_code();
24  void write_header_comment();
25  code_config();
26 };
27 
28 void write_common(std::ostringstream& buffer, code_config cfg, size_t node);
29 
30 void write_forward(global& glob, code_config cfg = code_config());
31 
32 void write_reverse(global& glob, code_config cfg = code_config());
33 
34 void write_all(global glob, code_config cfg = code_config());
35 
36 } // namespace TMBad
37 #endif // HAVE_CODE_GENERATOR_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:153
+
1 #ifndef HAVE_CODE_GENERATOR_HPP
2 #define HAVE_CODE_GENERATOR_HPP
3 // Autogenerated - do not edit by hand !
4 #include <fstream>
5 #include <iostream>
6 #include <sstream>
7 #include "global.hpp"
8 
9 namespace TMBad {
10 
11 void searchReplace(std::string& str, const std::string& oldStr,
12  const std::string& newStr);
13 
14 struct code_config {
15  bool asm_comments;
16  bool gpu;
17  std::string indent;
18  std::string header_comment;
19  std::string float_str;
20  std::ostream* cout;
21  std::string float_ptr();
22  std::string void_str();
23  void init_code();
24  void write_header_comment();
25  code_config();
26 };
27 
28 void write_common(std::ostringstream& buffer, code_config cfg, size_t node);
29 
30 void write_forward(global& glob, code_config cfg = code_config());
31 
32 void write_reverse(global& glob, code_config cfg = code_config());
33 
34 void write_all(global glob, code_config cfg = code_config());
35 
36 } // namespace TMBad
37 #endif // HAVE_CODE_GENERATOR_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:157
License: GPL v2 diff --git a/compile_8hpp_source.html b/compile_8hpp_source.html index 9aa22c5f9..d9582f1bc 100644 --- a/compile_8hpp_source.html +++ b/compile_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
@@ -73,6 +73,6 @@
compile.hpp
-
1 #ifndef _WIN32
2 #ifndef HAVE_COMPILE_HPP
3 #define HAVE_COMPILE_HPP
4 // Autogenerated - do not edit by hand !
5 #include <dlfcn.h>
6 #include <stdlib.h>
7 #include "code_generator.hpp"
8 #include "global.hpp"
9 
10 namespace TMBad {
11 
12 void compile(global &glob, code_config cfg = code_config());
13 }
14 #endif // HAVE_COMPILE_HPP
15 #endif
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:153
+
1 #ifndef _WIN32
2 #ifndef HAVE_COMPILE_HPP
3 #define HAVE_COMPILE_HPP
4 // Autogenerated - do not edit by hand !
5 #include <dlfcn.h>
6 #include <stdlib.h>
7 #include "code_generator.hpp"
8 #include "global.hpp"
9 
10 namespace TMBad {
11 
12 void compile(global &glob, code_config cfg = code_config());
13 }
14 #endif // HAVE_COMPILE_HPP
15 #endif
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:157
License: GPL v2 diff --git a/compois_8cpp-example.html b/compois_8cpp-example.html index 8be5566cc..ada5cc81e 100644 --- a/compois_8cpp-example.html +++ b/compois_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/compois_8hpp_source.html b/compois_8hpp_source.html index 53b615e5d..0571b73ad 100644 --- a/compois_8hpp_source.html +++ b/compois_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/compression_8hpp_source.html b/compression_8hpp_source.html index 498bab483..43d2ae54c 100644 --- a/compression_8hpp_source.html +++ b/compression_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
@@ -73,7 +73,7 @@
compression.hpp
-
1 #ifndef HAVE_COMPRESSION_HPP
2 #define HAVE_COMPRESSION_HPP
3 // Autogenerated - do not edit by hand !
4 #include "global.hpp"
5 #include "graph_transform.hpp" // subset
6 #include "radix.hpp" // first_occurance
7 
8 namespace TMBad {
9 
11 struct period {
13  size_t begin;
15  size_t size;
17  size_t rep;
18 };
19 
20 std::ostream &operator<<(std::ostream &os, const period &x);
21 
40 template <class T>
41 struct periodic {
42  const std::vector<T> &x;
47  periodic(const std::vector<T> &x, size_t max_period_size,
48  size_t min_period_rep = 2)
49  : x(x),
50  max_period_size(max_period_size),
51  min_period_rep(min_period_rep) {}
56  bool test_period(size_t start, size_t p) {
57  if (start + (p - 1) + p >= x.size()) return false;
58  for (size_t i = 0; i < p; i++) {
59  if (x[start + i] != x[start + i + p]) return false;
60  }
61  return true;
62  }
68  size_t numrep_period(size_t start, size_t p) {
69  size_t n = 1;
70  while (test_period(start, p)) {
71  n++;
72  start += p;
73  }
74  return n;
75  }
87  period find_best_period(size_t start) {
88  size_t p_best = -1, rep_best = 0;
89  for (size_t p = 1; p < max_period_size; p++) {
90  size_t rep = numrep_period(start, p);
91  if (rep > rep_best) {
92  p_best = p;
93  rep_best = rep;
94  p = p * rep;
95  }
96  }
97  period ans = {start, p_best, rep_best};
98  return ans;
99  }
100  std::vector<period> find_all() {
101  std::vector<period> ans;
102  for (size_t i = 0; i < x.size();) {
103  period result = find_best_period(i);
104  if (result.rep >= min_period_rep) {
105  ans.push_back(result);
106  i += result.size * result.rep;
107  } else {
108  i++;
109  }
110  }
111  return ans;
112  }
113 };
114 
115 template <class T>
116 struct matrix_view {
117  const T *x;
118  size_t nrow, ncol;
119  matrix_view(const T *x, size_t nrow, size_t ncol)
120  : x(x), nrow(nrow), ncol(ncol) {}
121  T operator()(size_t i, size_t j) const { return x[i + j * nrow]; }
122  size_t rows() const { return nrow; }
123  size_t cols() const { return ncol; }
124  template <class Diff_T>
125  std::vector<Diff_T> row_diff(size_t i) {
126  size_t nd = (cols() >= 1 ? cols() - 1 : 0);
127  std::vector<Diff_T> xd(nd);
128  for (size_t j = 1; j < cols(); j++)
129  xd[j - 1] = (Diff_T)(*this)(i, j) - (Diff_T)(*this)(i, j - 1);
130  return xd;
131  }
132 };
133 
147 std::vector<period> split_period(global *glob, period p,
148  size_t max_period_size);
149 
150 struct compressed_input {
151  typedef std::ptrdiff_t ptrdiff_t;
152 
153  mutable std::vector<ptrdiff_t> increment_pattern;
154  std::vector<Index> which_periodic;
155  std::vector<Index> period_sizes;
156  std::vector<Index> period_offsets;
157  std::vector<ptrdiff_t> period_data;
158 
159  Index n, m;
160  Index nrep;
161  Index np;
162 
163  mutable Index counter;
164  mutable std::vector<Index> inputs;
165  std::vector<Index> input_diff;
166  size_t input_size() const;
167  void update_increment_pattern() const;
168 
169  void increment(Args<> &args) const;
170 
171  void decrement(Args<> &args) const;
172  void forward_init(Args<> &args) const;
173  void reverse_init(Args<> &args);
174  void dependencies_intervals(Args<> &args, std::vector<Index> &lower,
175  std::vector<Index> &upper) const;
176 
177  size_t max_period_size;
178 
179  bool test_period(std::vector<ptrdiff_t> &x, size_t p);
180 
181  size_t find_shortest(std::vector<ptrdiff_t> &x);
182  compressed_input();
183  compressed_input(std::vector<Index> &x, size_t offset, size_t nrow, size_t m,
184  size_t ncol, size_t max_period_size);
185 };
186 
187 template <class T1, class T2>
188 struct compare_types {
189  const static bool equal = false;
190 };
191 template <class T>
192 struct compare_types<T, T> {
193  const static bool equal = true;
194 };
195 
196 void compress(global &glob, size_t max_period_size);
197 struct StackOp : global::SharedDynamicOperator {
198  typedef std::ptrdiff_t ptrdiff_t;
199  global::operation_stack opstack;
200  compressed_input ci;
201  StackOp(global *glob, period p, IndexPair ptr, size_t max_period_size);
203  StackOp(const StackOp &x);
204  void print(global::print_config cfg);
205  Index input_size() const;
206  Index output_size() const;
207  static const bool have_input_size_output_size = true;
214  template <class Type>
215  void forward(ForwardArgs<Type> args) {
216  ci.forward_init(args);
217 
218  size_t opstack_size = opstack.size();
219  for (size_t i = 0; i < ci.nrep; i++) {
220  for (size_t j = 0; j < opstack_size; j++) {
221  opstack[j]->forward_incr(args);
222  }
223  ci.increment(args);
224  }
225  if (compare_types<Type, Replay>::equal) {
226  compress(*get_glob(), ci.max_period_size);
227  }
228  }
229  void forward(ForwardArgs<Writer> &args);
236  template <class Type>
237  void reverse(ReverseArgs<Type> args) {
238  ci.reverse_init(args);
239  size_t opstack_size = opstack.size();
240  for (size_t i = 0; i < ci.nrep; i++) {
241  ci.decrement(args);
242 
243  for (size_t j = opstack_size; j > 0;) {
244  j--;
245  opstack[j]->reverse_decr(args);
246  }
247  }
248  if (compare_types<Type, Replay>::equal) {
249  compress(*get_glob(), ci.max_period_size);
250  }
251  }
252  void reverse(ReverseArgs<Writer> &args);
257  void dependencies(Args<> args, Dependencies &dep) const;
259  static const bool have_dependencies = true;
261  static const bool implicit_dependencies = true;
263  static const bool allow_remap = false;
264  const char *op_name();
265 };
266 
267 template <class T>
268 void trim(std::vector<T> &v, const T &elt) {
269  v.erase(std::remove(v.begin(), v.end(), elt), v.end());
270 }
271 
272 template <class T>
273 struct toposort_remap {
274  std::vector<T> &remap;
275  T i;
276  toposort_remap(std::vector<T> &remap, T i) : remap(remap), i(i) {}
277  void operator()(Index k) {
278  if (remap[k] >= remap[i]) {
279  remap[i] = i;
280  }
281  }
282 };
283 
308 void reorder_sub_expressions(global &glob);
309 
310 template <class T>
311 struct temporaries_remap {
312  std::vector<T> &remap;
313  T i;
314  temporaries_remap(std::vector<T> &remap, T i) : remap(remap), i(i) {}
315  void operator()(Index k) {
316  if (remap[k] == T(-1)) {
317  if (i > k + 1) remap[k] = i;
318  return;
319  }
320 
321  remap[k] = k;
322  }
323 };
324 
332 void reorder_temporaries(global &glob);
333 
334 template <class T>
335 struct dfs_add_to_stack {
336  std::vector<T> &stack;
337  std::vector<bool> &visited;
338  std::vector<T> &v2o;
339  dfs_add_to_stack(std::vector<T> &stack, std::vector<bool> &visited,
340  std::vector<T> &v2o)
341  : stack(stack), visited(visited), v2o(v2o) {}
342  void operator()(T var) {
343  Index op = v2o[var];
344  if (!visited[op]) {
345  stack.push_back(op);
346  visited[op] = true;
347  }
348  }
349 };
350 
354 void reorder_depth_first(global &glob);
355 
356 void compress(global &glob, size_t max_period_size = 1024);
357 
358 } // namespace TMBad
359 #endif // HAVE_COMPRESSION_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:153
+
1 #ifndef HAVE_COMPRESSION_HPP
2 #define HAVE_COMPRESSION_HPP
3 // Autogenerated - do not edit by hand !
4 #include "global.hpp"
5 #include "graph_transform.hpp" // subset
6 #include "radix.hpp" // first_occurance
7 
8 namespace TMBad {
9 
11 struct period {
13  size_t begin;
15  size_t size;
17  size_t rep;
18 };
19 
20 std::ostream &operator<<(std::ostream &os, const period &x);
21 
40 template <class T>
41 struct periodic {
42  const std::vector<T> &x;
47  periodic(const std::vector<T> &x, size_t max_period_size,
48  size_t min_period_rep = 2)
49  : x(x),
50  max_period_size(max_period_size),
51  min_period_rep(min_period_rep) {}
56  bool test_period(size_t start, size_t p) {
57  if (start + (p - 1) + p >= x.size()) return false;
58  for (size_t i = 0; i < p; i++) {
59  if (x[start + i] != x[start + i + p]) return false;
60  }
61  return true;
62  }
68  size_t numrep_period(size_t start, size_t p) {
69  size_t n = 1;
70  while (test_period(start, p)) {
71  n++;
72  start += p;
73  }
74  return n;
75  }
87  period find_best_period(size_t start) {
88  size_t p_best = -1, rep_best = 0;
89  for (size_t p = 1; p < max_period_size; p++) {
90  size_t rep = numrep_period(start, p);
91  if (rep > rep_best) {
92  p_best = p;
93  rep_best = rep;
94  p = p * rep;
95  }
96  }
97  period ans = {start, p_best, rep_best};
98  return ans;
99  }
100  std::vector<period> find_all() {
101  std::vector<period> ans;
102  for (size_t i = 0; i < x.size();) {
103  period result = find_best_period(i);
104  if (result.rep >= min_period_rep) {
105  ans.push_back(result);
106  i += result.size * result.rep;
107  } else {
108  i++;
109  }
110  }
111  return ans;
112  }
113 };
114 
115 template <class T>
116 struct matrix_view {
117  const T *x;
118  size_t nrow, ncol;
119  matrix_view(const T *x, size_t nrow, size_t ncol)
120  : x(x), nrow(nrow), ncol(ncol) {}
121  T operator()(size_t i, size_t j) const { return x[i + j * nrow]; }
122  size_t rows() const { return nrow; }
123  size_t cols() const { return ncol; }
124  template <class Diff_T>
125  std::vector<Diff_T> row_diff(size_t i) {
126  size_t nd = (cols() >= 1 ? cols() - 1 : 0);
127  std::vector<Diff_T> xd(nd);
128  for (size_t j = 1; j < cols(); j++)
129  xd[j - 1] = (Diff_T)(*this)(i, j) - (Diff_T)(*this)(i, j - 1);
130  return xd;
131  }
132 };
133 
147 std::vector<period> split_period(global *glob, period p,
148  size_t max_period_size);
149 
150 struct compressed_input {
151  typedef std::ptrdiff_t ptrdiff_t;
152 
153  mutable std::vector<ptrdiff_t> increment_pattern;
154  std::vector<Index> which_periodic;
155  std::vector<Index> period_sizes;
156  std::vector<Index> period_offsets;
157  std::vector<ptrdiff_t> period_data;
158 
159  Index n, m;
160  Index nrep;
161  Index np;
162 
163  mutable Index counter;
164  mutable std::vector<Index> inputs;
165  std::vector<Index> input_diff;
166  size_t input_size() const;
167  void update_increment_pattern() const;
168 
169  void increment(Args<> &args) const;
170 
171  void decrement(Args<> &args) const;
172  void forward_init(Args<> &args) const;
173  void reverse_init(Args<> &args);
174  void dependencies_intervals(Args<> &args, std::vector<Index> &lower,
175  std::vector<Index> &upper) const;
176 
177  size_t max_period_size;
178 
179  bool test_period(std::vector<ptrdiff_t> &x, size_t p);
180 
181  size_t find_shortest(std::vector<ptrdiff_t> &x);
182  compressed_input();
183  compressed_input(std::vector<Index> &x, size_t offset, size_t nrow, size_t m,
184  size_t ncol, size_t max_period_size);
185 };
186 
187 template <class T1, class T2>
188 struct compare_types {
189  const static bool equal = false;
190 };
191 template <class T>
192 struct compare_types<T, T> {
193  const static bool equal = true;
194 };
195 
196 void compress(global &glob, size_t max_period_size);
197 struct StackOp : global::SharedDynamicOperator {
198  typedef std::ptrdiff_t ptrdiff_t;
199  global::operation_stack opstack;
200  compressed_input ci;
201  StackOp(global *glob, period p, IndexPair ptr, size_t max_period_size);
203  StackOp(const StackOp &x);
204  void print(global::print_config cfg);
205  Index input_size() const;
206  Index output_size() const;
207  static const bool have_input_size_output_size = true;
214  template <class Type>
215  void forward(ForwardArgs<Type> args) {
216  ci.forward_init(args);
217 
218  size_t opstack_size = opstack.size();
219  for (size_t i = 0; i < ci.nrep; i++) {
220  for (size_t j = 0; j < opstack_size; j++) {
221  opstack[j]->forward_incr(args);
222  }
223  ci.increment(args);
224  }
225  if (compare_types<Type, Replay>::equal) {
226  compress(*get_glob(), ci.max_period_size);
227  }
228  }
229  void forward(ForwardArgs<Writer> &args);
236  template <class Type>
237  void reverse(ReverseArgs<Type> args) {
238  ci.reverse_init(args);
239  size_t opstack_size = opstack.size();
240  for (size_t i = 0; i < ci.nrep; i++) {
241  ci.decrement(args);
242 
243  for (size_t j = opstack_size; j > 0;) {
244  j--;
245  opstack[j]->reverse_decr(args);
246  }
247  }
248  if (compare_types<Type, Replay>::equal) {
249  compress(*get_glob(), ci.max_period_size);
250  }
251  }
252  void reverse(ReverseArgs<Writer> &args);
257  void dependencies(Args<> args, Dependencies &dep) const;
259  static const bool have_dependencies = true;
261  static const bool implicit_dependencies = true;
263  static const bool allow_remap = false;
264  const char *op_name();
265 };
266 
267 template <class T>
268 void trim(std::vector<T> &v, const T &elt) {
269  v.erase(std::remove(v.begin(), v.end(), elt), v.end());
270 }
271 
272 template <class T>
273 struct toposort_remap {
274  std::vector<T> &remap;
275  T i;
276  toposort_remap(std::vector<T> &remap, T i) : remap(remap), i(i) {}
277  void operator()(Index k) {
278  if (remap[k] >= remap[i]) {
279  remap[i] = i;
280  }
281  }
282 };
283 
308 void reorder_sub_expressions(global &glob);
309 
310 template <class T>
311 struct temporaries_remap {
312  std::vector<T> &remap;
313  T i;
314  temporaries_remap(std::vector<T> &remap, T i) : remap(remap), i(i) {}
315  void operator()(Index k) {
316  if (remap[k] == T(-1)) {
317  if (i > k + 1) remap[k] = i;
318  return;
319  }
320 
321  remap[k] = k;
322  }
323 };
324 
332 void reorder_temporaries(global &glob);
333 
334 template <class T>
335 struct dfs_add_to_stack {
336  std::vector<T> &stack;
337  std::vector<bool> &visited;
338  std::vector<T> &v2o;
339  dfs_add_to_stack(std::vector<T> &stack, std::vector<bool> &visited,
340  std::vector<T> &v2o)
341  : stack(stack), visited(visited), v2o(v2o) {}
342  void operator()(T var) {
343  Index op = v2o[var];
344  if (!visited[op]) {
345  stack.push_back(op);
346  visited[op] = true;
347  }
348  }
349 };
350 
354 void reorder_depth_first(global &glob);
355 
356 void compress(global &glob, size_t max_period_size = 1024);
357 
358 } // namespace TMBad
359 #endif // HAVE_COMPRESSION_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:157
void reorder_temporaries(global &glob)
Re-order computational graph to make it more compressible.
Definition: TMBad.cpp:567
size_t rep
Number of consecutive period replicates.
Definition: compression.hpp:17
size_t size
Size of the period.
Definition: compression.hpp:15
diff --git a/concat_8hpp.html b/concat_8hpp.html index 15c68cc7e..b68ad3f56 100644 --- a/concat_8hpp.html +++ b/concat_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/concat_8hpp_source.html b/concat_8hpp_source.html index 3ab828306..0ab753e66 100644 --- a/concat_8hpp_source.html +++ b/concat_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/config_8hpp.html b/config_8hpp.html index 941875cf8..feb9348e6 100644 --- a/config_8hpp.html +++ b/config_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/config_8hpp_source.html b/config_8hpp_source.html index 89b15399f..15c2966d1 100644 --- a/config_8hpp_source.html +++ b/config_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/convenience_8hpp.html b/convenience_8hpp.html index 805f7d7d5..8b68b7da0 100644 --- a/convenience_8hpp.html +++ b/convenience_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/convenience_8hpp_source.html b/convenience_8hpp_source.html index a181721a5..137ca6a2e 100644 --- a/convenience_8hpp_source.html +++ b/convenience_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/convert_8hpp.html b/convert_8hpp.html index 6737e1aa7..86f408262 100644 --- a/convert_8hpp.html +++ b/convert_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/convert_8hpp_source.html b/convert_8hpp_source.html index 81e0edbc6..402b4df48 100644 --- a/convert_8hpp_source.html +++ b/convert_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/d1mach_8cpp_source.html b/d1mach_8cpp_source.html index 51671587e..a14e2fd07 100644 --- a/d1mach_8cpp_source.html +++ b/d1mach_8cpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/density_8hpp.html b/density_8hpp.html index 60a8db978..4a36a25f1 100644 --- a/density_8hpp.html +++ b/density_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/density_8hpp_source.html b/density_8hpp_source.html index 76048f2a0..d58eb996b 100644 --- a/density_8hpp_source.html +++ b/density_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/dir_0ed320c20f1ad77c05be36888a08764e.html b/dir_0ed320c20f1ad77c05be36888a08764e.html index a61fa504b..81ceff374 100644 --- a/dir_0ed320c20f1ad77c05be36888a08764e.html +++ b/dir_0ed320c20f1ad77c05be36888a08764e.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/dir_210882393b2bcaa5854358e0a3bca581.html b/dir_210882393b2bcaa5854358e0a3bca581.html index 4fbce7227..d97f2a95d 100644 --- a/dir_210882393b2bcaa5854358e0a3bca581.html +++ b/dir_210882393b2bcaa5854358e0a3bca581.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/dir_37a1aac862973e81aabd6d4befa678e7.html b/dir_37a1aac862973e81aabd6d4befa678e7.html index 299b835e7..62fb5fc5f 100644 --- a/dir_37a1aac862973e81aabd6d4befa678e7.html +++ b/dir_37a1aac862973e81aabd6d4befa678e7.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/dir_3b792911f0128263c695c07e98433ff2.html b/dir_3b792911f0128263c695c07e98433ff2.html index 63518d0b4..ba574208d 100644 --- a/dir_3b792911f0128263c695c07e98433ff2.html +++ b/dir_3b792911f0128263c695c07e98433ff2.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/dir_5093d667102b00c01eb804fef6752bc8.html b/dir_5093d667102b00c01eb804fef6752bc8.html index 892967571..04592b71b 100644 --- a/dir_5093d667102b00c01eb804fef6752bc8.html +++ b/dir_5093d667102b00c01eb804fef6752bc8.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/dir_56cff852b5ffbcd99fdf7006be8eac05.html b/dir_56cff852b5ffbcd99fdf7006be8eac05.html index 9e588499c..5349f670a 100644 --- a/dir_56cff852b5ffbcd99fdf7006be8eac05.html +++ b/dir_56cff852b5ffbcd99fdf7006be8eac05.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/dir_7986f491c1def81944ebfd5732acd304.html b/dir_7986f491c1def81944ebfd5732acd304.html index 8df17ade6..9f1103fdf 100644 --- a/dir_7986f491c1def81944ebfd5732acd304.html +++ b/dir_7986f491c1def81944ebfd5732acd304.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/dir_83072381524e36aa65dff0229ec85225.html b/dir_83072381524e36aa65dff0229ec85225.html index 359bf0a99..da5ce56c6 100644 --- a/dir_83072381524e36aa65dff0229ec85225.html +++ b/dir_83072381524e36aa65dff0229ec85225.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/dir_a4729cf25b10cc47faf5469f3ab485c8.html b/dir_a4729cf25b10cc47faf5469f3ab485c8.html index e5b8e4c4b..9877cda95 100644 --- a/dir_a4729cf25b10cc47faf5469f3ab485c8.html +++ b/dir_a4729cf25b10cc47faf5469f3ab485c8.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/dir_af6748d7eaf25926ed9b20125d2a0e4f.html b/dir_af6748d7eaf25926ed9b20125d2a0e4f.html index 56074ae36..4d9087e23 100644 --- a/dir_af6748d7eaf25926ed9b20125d2a0e4f.html +++ b/dir_af6748d7eaf25926ed9b20125d2a0e4f.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/dir_c71304b49dff604a28ae32b79c530cf0.html b/dir_c71304b49dff604a28ae32b79c530cf0.html index 1f3a3daf1..67824de50 100644 --- a/dir_c71304b49dff604a28ae32b79c530cf0.html +++ b/dir_c71304b49dff604a28ae32b79c530cf0.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/dir_d1dccbcb6e2b278e28674b581d2efc15.html b/dir_d1dccbcb6e2b278e28674b581d2efc15.html index 4f136c3c5..44032f60a 100644 --- a/dir_d1dccbcb6e2b278e28674b581d2efc15.html +++ b/dir_d1dccbcb6e2b278e28674b581d2efc15.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/dir_da5438f8d8a9fd8231ec2bcfd3201752.html b/dir_da5438f8d8a9fd8231ec2bcfd3201752.html index 31976d81c..da6d6b201 100644 --- a/dir_da5438f8d8a9fd8231ec2bcfd3201752.html +++ b/dir_da5438f8d8a9fd8231ec2bcfd3201752.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/dir_e38e55e7e7264875fa8223b9800d88d2.html b/dir_e38e55e7e7264875fa8223b9800d88d2.html index 4d0401cc2..bcc3eb60c 100644 --- a/dir_e38e55e7e7264875fa8223b9800d88d2.html +++ b/dir_e38e55e7e7264875fa8223b9800d88d2.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/distributions_8hpp_source.html b/distributions_8hpp_source.html index d0ebb0d01..d8cd1e47f 100644 --- a/distributions_8hpp_source.html +++ b/distributions_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/distributions__R_8hpp.html b/distributions__R_8hpp.html index 46ebc4c68..43e33b011 100644 --- a/distributions__R_8hpp.html +++ b/distributions__R_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/distributions__R_8hpp_source.html b/distributions__R_8hpp_source.html index 64afb1203..a842ea949 100644 --- a/distributions__R_8hpp_source.html +++ b/distributions__R_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/dnorm_8hpp.html b/dnorm_8hpp.html index d141cf5de..d3c7067ab 100644 --- a/dnorm_8hpp.html +++ b/dnorm_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/dnorm_8hpp_source.html b/dnorm_8hpp_source.html index 194677eae..ec1a3c9d8 100644 --- a/dnorm_8hpp_source.html +++ b/dnorm_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/dox_2gamma_8cpp_source.html b/dox_2gamma_8cpp_source.html index d0c513fde..7aeef6b3c 100644 --- a/dox_2gamma_8cpp_source.html +++ b/dox_2gamma_8cpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/dpq_8h_source.html b/dpq_8h_source.html index 6a9f66ae8..af5c1e89f 100644 --- a/dpq_8h_source.html +++ b/dpq_8h_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/dynamic__data_8hpp_source.html b/dynamic__data_8hpp_source.html index 7e880465f..ce1eb4886 100644 --- a/dynamic__data_8hpp_source.html +++ b/dynamic__data_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/eigen__numtraits_8hpp_source.html b/eigen__numtraits_8hpp_source.html index db666bd82..733e951b7 100644 --- a/eigen__numtraits_8hpp_source.html +++ b/eigen__numtraits_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
@@ -73,7 +73,7 @@
eigen_numtraits.hpp
-
1 #ifndef HAVE_EIGEN_NUMTRAITS_HPP
2 #define HAVE_EIGEN_NUMTRAITS_HPP
3 // Autogenerated - do not edit by hand !
4 #include <Eigen/Core>
5 #include "global.hpp"
6 
7 namespace Eigen {
8 
9 template <>
10 struct NumTraits<TMBad::ad_aug> : NumTraits<TMBad::Scalar> {
11  typedef TMBad::ad_aug Real;
12  typedef TMBad::ad_aug NonInteger;
13  typedef TMBad::ad_aug Nested;
14 };
15 
16 template <>
17 struct NumTraits<TMBad::ad_adapt> : NumTraits<TMBad::Scalar> {
18  typedef TMBad::ad_adapt Real;
19  typedef TMBad::ad_adapt NonInteger;
20  typedef TMBad::ad_adapt Nested;
21 };
22 
23 template <typename BinOp>
24 struct ScalarBinaryOpTraits<TMBad::ad_aug, TMBad::Scalar, BinOp> {
25  typedef TMBad::ad_aug ReturnType;
26 };
27 template <typename BinOp>
28 struct ScalarBinaryOpTraits<TMBad::Scalar, TMBad::ad_aug, BinOp> {
29  typedef TMBad::ad_aug ReturnType;
30 };
31 
32 } // namespace Eigen
33 #endif // HAVE_EIGEN_NUMTRAITS_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:153
+
1 #ifndef HAVE_EIGEN_NUMTRAITS_HPP
2 #define HAVE_EIGEN_NUMTRAITS_HPP
3 // Autogenerated - do not edit by hand !
4 #include <Eigen/Core>
5 #include "global.hpp"
6 
7 namespace Eigen {
8 
9 template <>
10 struct NumTraits<TMBad::ad_aug> : NumTraits<TMBad::Scalar> {
11  typedef TMBad::ad_aug Real;
12  typedef TMBad::ad_aug NonInteger;
13  typedef TMBad::ad_aug Nested;
14 };
15 
16 template <>
17 struct NumTraits<TMBad::ad_adapt> : NumTraits<TMBad::Scalar> {
18  typedef TMBad::ad_adapt Real;
19  typedef TMBad::ad_adapt NonInteger;
20  typedef TMBad::ad_adapt Nested;
21 };
22 
23 template <typename BinOp>
24 struct ScalarBinaryOpTraits<TMBad::ad_aug, TMBad::Scalar, BinOp> {
25  typedef TMBad::ad_aug ReturnType;
26 };
27 template <typename BinOp>
28 struct ScalarBinaryOpTraits<TMBad::Scalar, TMBad::ad_aug, BinOp> {
29  typedef TMBad::ad_aug ReturnType;
30 };
31 
32 } // namespace Eigen
33 #endif // HAVE_EIGEN_NUMTRAITS_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:157
Enable weak comparison operators of an ad type.
Definition: global.hpp:2969
Augmented AD type.
Definition: global.hpp:2831
diff --git a/examples.html b/examples.html index 55b821b50..ba7cc5d22 100644 --- a/examples.html +++ b/examples.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/expm_8hpp_source.html b/expm_8hpp_source.html index 3cb8f38b2..5634e25ab 100644 --- a/expm_8hpp_source.html +++ b/expm_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/fft_8cpp-example.html b/fft_8cpp-example.html index 4c2be34fe..a89a76bc0 100644 --- a/fft_8cpp-example.html +++ b/fft_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/fft_8hpp_source.html b/fft_8hpp_source.html index 6590b4d00..c1b8bfddb 100644 --- a/fft_8hpp_source.html +++ b/fft_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/files.html b/files.html index b571a3a40..716e9208c 100644 --- a/files.html +++ b/files.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions.html b/functions.html index 16747f2aa..ab9ce22ca 100644 --- a/functions.html +++ b/functions.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_b.html b/functions_b.html index cf2e8a299..8fc886637 100644 --- a/functions_b.html +++ b/functions_b.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_c.html b/functions_c.html index 1cde7a58a..676cd0633 100644 --- a/functions_c.html +++ b/functions_c.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_d.html b/functions_d.html index 1be01174b..1d682eff2 100644 --- a/functions_d.html +++ b/functions_d.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_e.html b/functions_e.html index 7268982b8..aa8d6e703 100644 --- a/functions_e.html +++ b/functions_e.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_enum.html b/functions_enum.html index 604e14292..1de057396 100644 --- a/functions_enum.html +++ b/functions_enum.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_eval.html b/functions_eval.html index d9bd718bf..b55626086 100644 --- a/functions_eval.html +++ b/functions_eval.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_f.html b/functions_f.html index bd59aa580..fb3244317 100644 --- a/functions_f.html +++ b/functions_f.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func.html b/functions_func.html index fcfacf3c3..9618b4d57 100644 --- a/functions_func.html +++ b/functions_func.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func_b.html b/functions_func_b.html index 0c7da15f0..bd882e778 100644 --- a/functions_func_b.html +++ b/functions_func_b.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func_c.html b/functions_func_c.html index 39b3e8404..18a096546 100644 --- a/functions_func_c.html +++ b/functions_func_c.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func_d.html b/functions_func_d.html index 0745e7716..e89bafa3a 100644 --- a/functions_func_d.html +++ b/functions_func_d.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func_e.html b/functions_func_e.html index b1c1096e3..e955a9278 100644 --- a/functions_func_e.html +++ b/functions_func_e.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func_f.html b/functions_func_f.html index 15ddcc67a..3670fd612 100644 --- a/functions_func_f.html +++ b/functions_func_f.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func_g.html b/functions_func_g.html index 68c946833..b496c9c8f 100644 --- a/functions_func_g.html +++ b/functions_func_g.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func_h.html b/functions_func_h.html index 2b450297d..fbff8f514 100644 --- a/functions_func_h.html +++ b/functions_func_h.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func_i.html b/functions_func_i.html index d1b5f73b1..d51bcedbd 100644 --- a/functions_func_i.html +++ b/functions_func_i.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func_j.html b/functions_func_j.html index 7805fab6a..0288b822d 100644 --- a/functions_func_j.html +++ b/functions_func_j.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func_l.html b/functions_func_l.html index 479bb0030..8bc4fde2a 100644 --- a/functions_func_l.html +++ b/functions_func_l.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func_m.html b/functions_func_m.html index f85fb3151..f3de24e59 100644 --- a/functions_func_m.html +++ b/functions_func_m.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func_n.html b/functions_func_n.html index 75ff51c47..59d5b656f 100644 --- a/functions_func_n.html +++ b/functions_func_n.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func_o.html b/functions_func_o.html index de87b75b0..faec6cb1c 100644 --- a/functions_func_o.html +++ b/functions_func_o.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func_p.html b/functions_func_p.html index 22d56028b..0a24937e3 100644 --- a/functions_func_p.html +++ b/functions_func_p.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func_r.html b/functions_func_r.html index ccc4f6673..fd0415da5 100644 --- a/functions_func_r.html +++ b/functions_func_r.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func_s.html b/functions_func_s.html index 4d6adbfff..058ae57ea 100644 --- a/functions_func_s.html +++ b/functions_func_s.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func_t.html b/functions_func_t.html index bf83e5315..6799ca028 100644 --- a/functions_func_t.html +++ b/functions_func_t.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func_u.html b/functions_func_u.html index 2467769e4..6067e5996 100644 --- a/functions_func_u.html +++ b/functions_func_u.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func_v.html b/functions_func_v.html index 002e52ac9..6793ad2a9 100644 --- a/functions_func_v.html +++ b/functions_func_v.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func_w.html b/functions_func_w.html index 1024fcfac..f18acddae 100644 --- a/functions_func_w.html +++ b/functions_func_w.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func_x.html b/functions_func_x.html index 83734ade5..6751a427b 100644 --- a/functions_func_x.html +++ b/functions_func_x.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_func_y.html b/functions_func_y.html index ecd0650f2..0223a3487 100644 --- a/functions_func_y.html +++ b/functions_func_y.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_g.html b/functions_g.html index e8fe8f625..100634ee4 100644 --- a/functions_g.html +++ b/functions_g.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_h.html b/functions_h.html index f8a63a80c..67a130c3b 100644 --- a/functions_h.html +++ b/functions_h.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_i.html b/functions_i.html index 356543c26..f8198ece3 100644 --- a/functions_i.html +++ b/functions_i.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_j.html b/functions_j.html index 7f30808cb..5f3723421 100644 --- a/functions_j.html +++ b/functions_j.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_k.html b/functions_k.html index 586603c0b..0155ebb4b 100644 --- a/functions_k.html +++ b/functions_k.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_l.html b/functions_l.html index 17a5be2d5..d2b3b3f0e 100644 --- a/functions_l.html +++ b/functions_l.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_m.html b/functions_m.html index 72c1022c3..5730ff207 100644 --- a/functions_m.html +++ b/functions_m.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_n.html b/functions_n.html index 803523081..4537f1ef0 100644 --- a/functions_n.html +++ b/functions_n.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_o.html b/functions_o.html index 92e28f37e..9c799405e 100644 --- a/functions_o.html +++ b/functions_o.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_p.html b/functions_p.html index 10f04db63..3977d0ed6 100644 --- a/functions_p.html +++ b/functions_p.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_r.html b/functions_r.html index cd26b4fa8..e75a58690 100644 --- a/functions_r.html +++ b/functions_r.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_s.html b/functions_s.html index bed2c1ee3..5df6f9d1e 100644 --- a/functions_s.html +++ b/functions_s.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_t.html b/functions_t.html index efd14ea55..f51794dd1 100644 --- a/functions_t.html +++ b/functions_t.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_type.html b/functions_type.html index d227f751d..3f8776ecb 100644 --- a/functions_type.html +++ b/functions_type.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_u.html b/functions_u.html index d94279afd..21b3ca5be 100644 --- a/functions_u.html +++ b/functions_u.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_v.html b/functions_v.html index 8007d59cf..87798fd47 100644 --- a/functions_v.html +++ b/functions_v.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_vars.html b/functions_vars.html index 9a73018e0..686d6b514 100644 --- a/functions_vars.html +++ b/functions_vars.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_w.html b/functions_w.html index 734951da7..85ca85e38 100644 --- a/functions_w.html +++ b/functions_w.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_x.html b/functions_x.html index 535f4278c..63057ddf3 100644 --- a/functions_x.html +++ b/functions_x.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/functions_y.html b/functions_y.html index c8ad1bc12..b75defa7d 100644 --- a/functions_y.html +++ b/functions_y.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/gamma_2undefs_8h_source.html b/gamma_2undefs_8h_source.html index ce856b6a7..be6a80d44 100644 --- a/gamma_2undefs_8h_source.html +++ b/gamma_2undefs_8h_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/gamma_8hpp_source.html b/gamma_8hpp_source.html index e0a02b5ef..c130f23f8 100644 --- a/gamma_8hpp_source.html +++ b/gamma_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/gamma__cody_8cpp_source.html b/gamma__cody_8cpp_source.html index bc401df8a..6c536e7e7 100644 --- a/gamma__cody_8cpp_source.html +++ b/gamma__cody_8cpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/getListElement_8hpp_source.html b/getListElement_8hpp_source.html index 79abdf132..b266cf3f2 100644 --- a/getListElement_8hpp_source.html +++ b/getListElement_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
@@ -73,5 +73,5 @@
getListElement.hpp
-
1 /* Helpers, to check that data and parameters are of the right types.
2  "RObjectTester" denotes the type of a pointer to a test function.
3  Examples of test functions are "isMatrix", "Rf_isArray", "isNumeric",
4  etc (see Rinternals.h).
5 */
6 typedef Rboolean (*RObjectTester)(SEXP);
7 #ifdef WITH_LIBTMB
8 void RObjectTestExpectedType(SEXP x, RObjectTester expectedtype, const char *nam);
9 Rboolean isValidSparseMatrix(SEXP x);
10 Rboolean isNumericScalar(SEXP x);
11 #else
12 void RObjectTestExpectedType(SEXP x, RObjectTester expectedtype, const char *nam){
13  if(expectedtype != NULL){
14  if(!expectedtype(x)){
15  if(Rf_isNull(x)){
16  Rf_warning("Expected object. Got NULL.");
17  }
18  Rf_error("Error when reading the variable: '%s'. Please check data and parameters.",nam);
19  }
20  }
21 }
22 Rboolean isValidSparseMatrix(SEXP x){
23  if(!Rf_inherits(x,"dgTMatrix"))Rf_warning("Expected sparse matrix of class 'dgTMatrix'.");
24  return Rf_inherits(x,"dgTMatrix");
25 }
26 Rboolean isNumericScalar(SEXP x){
27  if(LENGTH(x)!=1){
28  Rf_warning("Expected scalar. Got length=%i",LENGTH(x));
29  return FALSE;
30  }
31  return Rf_isNumeric(x);
32 }
33 #endif
34 
36 #ifdef WITH_LIBTMB
37 SEXP getListElement(SEXP list, const char *str, RObjectTester expectedtype=NULL);
38 int getListInteger(SEXP list, const char *str, int default_value = 0);
39 #else
40 SEXP getListElement(SEXP list, const char *str, RObjectTester expectedtype=NULL)
41 {
42  if(config.debug.getListElement)std::cout << "getListElement: " << str << " ";
43  SEXP elmt = R_NilValue, names = Rf_getAttrib(list, R_NamesSymbol);
44  int i;
45  for (i = 0; i < Rf_length(list); i++)
46  if(strcmp(CHAR(STRING_ELT(names, i)), str) == 0)
47  {
48  elmt = VECTOR_ELT(list, i);
49  break;
50  }
51  if(config.debug.getListElement)std::cout << "Length: " << LENGTH(elmt) << " ";
52  if(config.debug.getListElement)std::cout << "\n";
53  RObjectTestExpectedType(elmt, expectedtype, str);
54  return elmt;
55 }
56 int getListInteger(SEXP list, const char *str, int default_value = 0) {
57  SEXP tmp = getListElement(list, str);
58  if ( tmp == R_NilValue ) {
59  Rf_warning("Missing integer variable '%s'. Using default: %d. (Perhaps you are using a model object created with an old TMB version?)", str, default_value);
60  return default_value;
61  }
62  return INTEGER(tmp)[0];
63 }
64 #endif
+
1 /* Helpers, to check that data and parameters are of the right types.
2  "RObjectTester" denotes the type of a pointer to a test function.
3  Examples of test functions are "isMatrix", "Rf_isArray", "isNumeric",
4  etc (see Rinternals.h).
5 */
6 typedef Rboolean (*RObjectTester)(SEXP);
7 #ifdef WITH_LIBTMB
8 void RObjectTestExpectedType(SEXP x, RObjectTester expectedtype, const char *nam);
9 Rboolean isValidSparseMatrix(SEXP x);
10 Rboolean isNumericScalar(SEXP x);
11 #else
12 void RObjectTestExpectedType(SEXP x, RObjectTester expectedtype, const char *nam){
13  if(expectedtype != NULL){
14  if(!expectedtype(x)){
15  if(Rf_isNull(x)){
16  Rf_warning("Expected object. Got NULL.");
17  }
18  if(Rf_isNumeric(x) && !Rf_isReal(x)) {
19  Rf_warning("NOTE: 'storage.mode(%s)' must be 'double' when attribute 'check.passed' is set for 'data'.",nam);
20  }
21  Rf_error("Error when reading the variable: '%s'. Please check data and parameters.",nam);
22  }
23  }
24 }
25 Rboolean isValidSparseMatrix(SEXP x){
26  if(!Rf_inherits(x,"dgTMatrix"))Rf_warning("Expected sparse matrix of class 'dgTMatrix'.");
27  return Rf_inherits(x,"dgTMatrix");
28 }
29 Rboolean isNumericScalar(SEXP x){
30  if(LENGTH(x)!=1){
31  Rf_warning("Expected scalar. Got length=%i",LENGTH(x));
32  return FALSE;
33  }
34  return Rf_isReal(x);
35 }
36 #endif
37 
39 #ifdef WITH_LIBTMB
40 SEXP getListElement(SEXP list, const char *str, RObjectTester expectedtype=NULL);
41 int getListInteger(SEXP list, const char *str, int default_value = 0);
42 #else
43 SEXP getListElement(SEXP list, const char *str, RObjectTester expectedtype=NULL)
44 {
45  if(config.debug.getListElement)std::cout << "getListElement: " << str << " ";
46  SEXP elmt = R_NilValue, names = Rf_getAttrib(list, R_NamesSymbol);
47  int i;
48  for (i = 0; i < Rf_length(list); i++)
49  if(strcmp(CHAR(STRING_ELT(names, i)), str) == 0)
50  {
51  elmt = VECTOR_ELT(list, i);
52  break;
53  }
54  if(config.debug.getListElement)std::cout << "Length: " << LENGTH(elmt) << " ";
55  if(config.debug.getListElement)std::cout << "\n";
56  RObjectTestExpectedType(elmt, expectedtype, str);
57  return elmt;
58 }
59 int getListInteger(SEXP list, const char *str, int default_value = 0) {
60  SEXP tmp = getListElement(list, str);
61  if ( tmp == R_NilValue ) {
62  Rf_warning("Missing integer variable '%s'. Using default: %d. (Perhaps you are using a model object created with an old TMB version?)", str, default_value);
63  return default_value;
64  }
65  return INTEGER(tmp)[0];
66 }
67 #endif
License: GPL v2 diff --git a/global_8hpp_source.html b/global_8hpp_source.html index 7352b2662..3e493f6f5 100644 --- a/global_8hpp_source.html +++ b/global_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
@@ -73,7 +73,7 @@
global.hpp
-
1 #ifndef HAVE_GLOBAL_HPP
2 #define HAVE_GLOBAL_HPP
3 // Autogenerated - do not edit by hand !
4 #include <algorithm>
5 #include <cmath>
6 #include <ctime>
7 #include <iomanip>
8 #include <iostream>
9 #include <limits>
10 #include <set>
11 #include <sstream>
12 #include <valarray>
13 #include <vector>
14 #include "config.hpp"
15 #include "radix.hpp"
16 
20 namespace TMBad {
21 
22 typedef TMBAD_HASH_TYPE hash_t;
23 typedef TMBAD_INDEX_TYPE Index;
24 typedef TMBAD_SCALAR_TYPE Scalar;
25 typedef std::pair<Index, Index> IndexPair;
26 typedef TMBAD_INDEX_VECTOR IndexVector;
27 
28 struct global;
31 global *get_glob();
32 
33 template <class T>
34 std::ostream &operator<<(std::ostream &out, const std::vector<T> &v) {
35  out << "{";
36  size_t last = v.size() - 1;
37  for (size_t i = 0; i < v.size(); ++i) {
38  out << v[i];
39  if (i != last) out << ", ";
40  }
41  out << "}";
42  return out;
43 }
44 
46 template <class T>
47 struct intervals {
48  struct ep : std::pair<T, bool> {
49  bool left() const { return !this->second; }
50  ep(T x, bool type) : std::pair<T, bool>(x, type) {}
51  operator T() { return this->first; }
52  };
53  std::set<ep> x;
54  typedef typename std::set<ep>::iterator iterator;
58  bool insert(T a, T b) {
59  ep x1(a, false);
60  ep x2(b, true);
61  iterator it1 = x.upper_bound(x1);
62  iterator it2 = x.lower_bound(x2);
63 
64  bool insert_x1 = (it1 == x.end()) || it1->left();
65  bool insert_x2 = (it2 == x.end()) || it2->left();
66 
67  bool change = (it1 != it2) || insert_x1;
68 
69  if (it1 != it2) {
70  x.erase(it1, it2);
71  }
72 
73  if (insert_x1) x.insert(x1);
74  if (insert_x2) x.insert(x2);
75  return change;
76  }
78  template <class F>
79  F &apply(F &f) const {
80  for (iterator it = x.begin(); it != x.end();) {
81  ep a = *it;
82  ++it;
83  ep b = *it;
84  ++it;
85  f(a, b);
86  }
87  return f;
88  }
89  struct print_interval {
90  void operator()(T a, T b) { Rcout << "[ " << a << " , " << b << " ] "; }
91  };
92  void print() {
93  print_interval f;
94  this->apply(f);
95  Rcout << "\n";
96  }
97 };
98 
99 struct Dependencies : std::vector<Index> {
100  typedef std::vector<Index> Base;
101  std::vector<std::pair<Index, Index> > I;
102  Dependencies();
103  void clear();
104  void add_interval(Index a, Index b);
105  void add_segment(Index start, Index size);
106 
107  void monotone_transform_inplace(const std::vector<Index> &x);
108 
109  template <class F>
110  F &apply(F &f) {
111  for (size_t i = 0; i < this->size(); i++) f((*this)[i]);
112  for (size_t i = 0; i < I.size(); i++) {
113  for (Index j = I[i].first; j <= I[i].second; j++) {
114  f(j);
115  }
116  }
117  return f;
118  }
119 
120  template <class F>
121  F &apply_if_not_visited(F &f, intervals<Index> &visited) {
122  for (size_t i = 0; i < this->size(); i++) f((*this)[i]);
123  for (size_t i = 0; i < I.size(); i++) {
124  if (visited.insert(I[i].first, I[i].second)) {
125  for (Index j = I[i].first; j <= I[i].second; j++) {
126  f(j);
127  }
128  }
129  }
130  return f;
131  }
132 
133  bool any(const std::vector<bool> &x) const;
134 };
135 
138 enum ArrayAccess { x_read, y_read, y_write, dx_read, dx_write, dy_read };
139 template <class Args, ArrayAccess What>
140 struct Accessor {};
141 template <class Args>
142 struct Accessor<Args, x_read> {
143  typename Args::value_type operator()(const Args &args, Index j) const {
144  return args.x(j);
145  }
146 };
147 template <class Args>
148 struct Accessor<Args, y_read> {
149  typename Args::value_type operator()(const Args &args, Index j) const {
150  return args.y(j);
151  }
152 };
153 template <class Args>
154 struct Accessor<Args, y_write> {
155  typename Args::value_type &operator()(Args &args, Index j) {
156  return args.y(j);
157  }
158 };
159 template <class Args>
160 struct Accessor<Args, dx_read> {
161  typename Args::value_type operator()(const Args &args, Index j) const {
162  return args.dx(j);
163  }
164 };
165 template <class Args>
166 struct Accessor<Args, dx_write> {
167  typename Args::value_type &operator()(Args &args, Index j) {
168  return args.dx(j);
169  }
170 };
171 template <class Args>
172 struct Accessor<Args, dy_read> {
173  typename Args::value_type operator()(const Args &args, Index j) const {
174  return args.dy(j);
175  }
176 };
177 
183 template <class T>
185  const std::vector<T> &x;
186  const std::vector<Index> &i;
187  IndirectAccessor(const std::vector<T> &x, const std::vector<Index> &i)
188  : x(x), i(i) {}
189  T operator[](size_t j) const { return x[i[j]]; }
190  size_t size() const { return i.size(); }
191  operator std::vector<T>() const {
192  std::vector<T> ans(i.size());
193  for (size_t j = 0; j < ans.size(); j++) ans[j] = (*this)[j];
194  return ans;
195  }
196 };
197 
205 template <class Args, ArrayAccess What>
206 struct segment_ref {
207  typedef typename Args::value_type Type;
208  Accessor<Args, What> element_access;
209  Args args;
210  Index from, n;
211  segment_ref(const Args &args, Index from, Index n)
212  : args(args), from(from), n(n) {}
213  template <class Other>
214  operator Other() {
215  Other ans(n);
216  for (size_t i = 0; i < n; i++) {
217  ans[i] = element_access(args, from + i);
218  }
219  return ans;
220  }
221  Type operator[](Index i) const { return element_access(args, from + i); }
222  size_t size() const { return n; }
223  template <class Other>
224  segment_ref &operator=(const Other &other) {
225  for (size_t i = 0; i < n; i++) {
226  element_access(args, from + i) = other[i];
227  }
228  return *this;
229  }
230  template <class Other>
231  segment_ref &operator+=(const Other &other) {
232  for (size_t i = 0; i < n; i++) {
233  element_access(args, from + i) += other[i];
234  }
235  return *this;
236  }
237  template <class Other>
238  segment_ref &operator-=(const Other &other) {
239  for (size_t i = 0; i < n; i++) {
240  element_access(args, from + i) -= other[i];
241  }
242  return *this;
243  }
244 };
245 
255 template <class dummy = void>
256 struct Args {
258  const Index *inputs;
263  IndexPair ptr;
265  Index input(Index j) const { return inputs[ptr.first + j]; }
267  Index output(Index j) const { return ptr.second + j; }
268  Args(const IndexVector &inputs) : inputs(inputs.data()) {
269  ptr.first = 0;
270  ptr.second = 0;
271  }
272 };
278 template <class Type>
279 struct ForwardArgs : Args<> {
280  typedef std::vector<Type> TypeVector;
281  typedef Type value_type;
282  Type *values;
283  global *glob_ptr;
285  Type x(Index j) const { return values[input(j)]; }
287  Type &y(Index j) { return values[output(j)]; }
289  Type *x_ptr(Index j) { return &values[input(j)]; }
291  Type *y_ptr(Index j) { return &values[output(j)]; }
293  segment_ref<ForwardArgs, x_read> x_segment(Index from, Index size) {
294  return segment_ref<ForwardArgs, x_read>(*this, from, size);
295  }
298  return segment_ref<ForwardArgs, y_write>(*this, from, size);
299  }
300  ForwardArgs(const IndexVector &inputs, TypeVector &values,
301  global *glob_ptr = NULL)
302  : Args<>(inputs), values(values.data()), glob_ptr(glob_ptr) {}
303 };
310 template <class Type>
311 struct ReverseArgs : Args<> {
312  typedef std::vector<Type> TypeVector;
313  typedef Type value_type;
314  Type *values;
315  Type *derivs;
316  global *glob_ptr;
318  Type x(Index j) const { return values[input(j)]; }
320  Type y(Index j) const { return values[output(j)]; }
323  Type &dx(Index j) { return derivs[input(j)]; }
326  Type dy(Index j) const { return derivs[output(j)]; }
328  Type *x_ptr(Index j) { return &values[input(j)]; }
330  Type *y_ptr(Index j) { return &values[output(j)]; }
332  Type *dx_ptr(Index j) { return &derivs[input(j)]; }
334  Type *dy_ptr(Index j) { return &derivs[output(j)]; }
336  segment_ref<ReverseArgs, x_read> x_segment(Index from, Index size) {
337  return segment_ref<ReverseArgs, x_read>(*this, from, size);
338  }
340  segment_ref<ReverseArgs, y_read> y_segment(Index from, Index size) {
341  return segment_ref<ReverseArgs, y_read>(*this, from, size);
342  }
345  return segment_ref<ReverseArgs, dx_write>(*this, from, size);
346  }
349  return segment_ref<ReverseArgs, dy_read>(*this, from, size);
350  }
351  ReverseArgs(const IndexVector &inputs, TypeVector &values, TypeVector &derivs,
352  global *glob_ptr = NULL)
353  : Args<>(inputs),
354  values(values.data()),
355  derivs(derivs.data()),
356  glob_ptr(glob_ptr) {
357  ptr.first = (Index)inputs.size();
358  ptr.second = (Index)values.size();
359  }
360 };
361 
362 template <>
363 struct ForwardArgs<bool> : Args<> {
364  typedef std::vector<bool> BoolVector;
365  BoolVector &values;
366  intervals<Index> &marked_intervals;
367  bool x(Index j) { return values[input(j)]; }
368  BoolVector::reference y(Index j) { return values[output(j)]; }
369  ForwardArgs(const IndexVector &inputs, BoolVector &values,
370  intervals<Index> &marked_intervals)
371  : Args<>(inputs), values(values), marked_intervals(marked_intervals) {}
373  template <class Operator>
374  bool any_marked_input(const Operator &op) {
375  if (Operator::implicit_dependencies) {
376  Dependencies dep;
377  op.dependencies(*this, dep);
378  return dep.any(values);
379  } else {
380  Index ninput = op.input_size();
381  for (Index j = 0; j < ninput; j++)
382  if (x(j)) return true;
383  }
384  return false;
385  }
387  template <class Operator>
388  void mark_all_output(const Operator &op) {
389  if (Operator::updating && op.output_size() == 0) {
390  Dependencies dep;
391  op.dependencies_updating(*this, dep);
392 
393  for (size_t i = 0; i < dep.size(); i++) values[dep[i]] = true;
394 
395  for (size_t i = 0; i < dep.I.size(); i++) {
396  Index a = dep.I[i].first;
397  Index b = dep.I[i].second;
398  bool insert = marked_intervals.insert(a, b);
399  if (insert) {
400  for (Index j = a; j <= b; j++) {
401  values[j] = true;
402  }
403  }
404  }
405  } else {
406  Index noutput = op.output_size();
407  for (Index j = 0; j < noutput; j++) y(j) = true;
408  }
409  }
411  template <class Operator>
412  bool mark_dense(const Operator &op) {
413  if (any_marked_input(op)) {
414  mark_all_output(op);
415  return true;
416  }
417  return false;
418  }
419 };
420 
421 template <>
422 struct ReverseArgs<bool> : Args<> {
423  typedef std::vector<bool> BoolVector;
424  BoolVector &values;
425  intervals<Index> &marked_intervals;
426  BoolVector::reference x(Index j) { return values[input(j)]; }
427  bool y(Index j) { return values[output(j)]; }
428  ReverseArgs(IndexVector &inputs, BoolVector &values,
429  intervals<Index> &marked_intervals)
430  : Args<>(inputs), values(values), marked_intervals(marked_intervals) {
431  ptr.first = (Index)inputs.size();
432  ptr.second = (Index)values.size();
433  }
435  template <class Operator>
436  bool any_marked_output(const Operator &op) {
437  if (Operator::elimination_protected) return true;
438  if (Operator::updating && op.output_size() == 0) {
439  Dependencies dep;
440  op.dependencies_updating(*this, dep);
441  return dep.any(values);
442  } else {
443  Index noutput = op.output_size();
444  for (Index j = 0; j < noutput; j++)
445  if (y(j)) return true;
446  }
447  return false;
448  }
450  template <class Operator>
451  void mark_all_input(const Operator &op) {
452  if (Operator::implicit_dependencies) {
453  Dependencies dep;
454  op.dependencies(*this, dep);
455 
456  for (size_t i = 0; i < dep.size(); i++) values[dep[i]] = true;
457 
458  for (size_t i = 0; i < dep.I.size(); i++) {
459  Index a = dep.I[i].first;
460  Index b = dep.I[i].second;
461  bool insert = marked_intervals.insert(a, b);
462  if (insert) {
463  for (Index j = a; j <= b; j++) {
464  values[j] = true;
465  }
466  }
467  }
468  } else {
469  Index ninput = op.input_size();
470  for (Index j = 0; j < ninput; j++) x(j) = true;
471  }
472  }
474  template <class Operator>
475  bool mark_dense(const Operator &op) {
476  if (any_marked_output(op)) {
477  mark_all_input(op);
478  return true;
479  }
480  return false;
481  }
482 };
483 
484 std::string tostr(const Index &x);
485 
486 std::string tostr(const Scalar &x);
487 
488 struct Writer : std::string {
489  static std::ostream *cout;
490  Writer(std::string str);
491  Writer(Scalar x);
492  Writer();
493 
494  template <class V>
495  std::string vinit(const V &x) {
496  std::string y = "{";
497  for (size_t i = 0; i < x.size(); i++)
498  y = y + (i == 0 ? "" : ",") + tostr(x[i]);
499  y = y + "}";
500  return y;
501  }
502 
503  std::string p(std::string x);
504  Writer operator+(const Writer &other);
505  Writer operator-(const Writer &other);
506  Writer operator-();
507  Writer operator*(const Writer &other);
508  Writer operator/(const Writer &other);
509 
510  Writer operator*(const Scalar &other);
511  Writer operator+(const Scalar &other);
512 
513  void operator=(const Writer &other);
514  void operator+=(const Writer &other);
515  void operator-=(const Writer &other);
516  void operator*=(const Writer &other);
517  void operator/=(const Writer &other);
518 
519  template <class T>
520  friend Writer &operator<<(Writer &w, const T &v) {
521  *cout << v;
522  return w;
523  }
524  template <class T>
525  friend Writer &operator<<(Writer &w, const std::valarray<T> &x) {
526  *cout << w.vinit(x);
527  return w;
528  }
529 };
530 
531 template <>
532 struct ForwardArgs<Writer> : ForwardArgs<Scalar> {
533  typedef std::vector<Scalar> ScalarVector;
534  typedef ForwardArgs<Scalar> Base;
536  bool const_literals;
538  bool indirect;
539  void set_indirect() {
540  indirect = true;
541  ptr.first = 0;
542  ptr.second = 0;
543  }
544  Writer xd(Index j) { return "v[" + tostr(input(j)) + "]"; }
545  Writer yd(Index j) { return "v[" + tostr(output(j)) + "]"; }
546  Writer xi(Index j) { return "v[i[" + tostr(Index(ptr.first + j)) + "]]"; }
547  Writer yi(Index j) { return "v[o[" + tostr(Index(ptr.second + j)) + "]]"; }
548  Writer x(Index j) { return (indirect ? xi(j) : xd(j)); }
549  Writer y(Index j) { return (indirect ? yi(j) : yd(j)); }
550  Writer y_const(Index j) {
551  TMBAD_ASSERT2(!indirect, "Attempt to write constants within loop?");
552  return tostr(Base::y(j));
553  }
554  ForwardArgs(IndexVector &inputs, ScalarVector &values)
555  : ForwardArgs<Scalar>(inputs, values) {
556  const_literals = false;
557  indirect = false;
558  }
559 };
560 
561 template <>
562 struct ReverseArgs<Writer> : Args<> {
563  typedef std::vector<Scalar> ScalarVector;
565  bool const_literals;
567  bool indirect;
568  void set_indirect() {
569  indirect = true;
570  ptr.first = 0;
571  ptr.second = 0;
572  }
573  Writer dxd(Index j) { return "d[" + tostr(input(j)) + "]"; }
574  Writer dyd(Index j) { return "d[" + tostr(output(j)) + "]"; }
575  Writer xd(Index j) { return "v[" + tostr(input(j)) + "]"; }
576  Writer yd(Index j) { return "v[" + tostr(output(j)) + "]"; }
577  Writer dxi(Index j) { return "d[i[" + tostr(Index(ptr.first + j)) + "]]"; }
578  Writer dyi(Index j) { return "d[o[" + tostr(Index(ptr.second + j)) + "]]"; }
579  Writer xi(Index j) { return "v[i[" + tostr(Index(ptr.first + j)) + "]]"; }
580  Writer yi(Index j) { return "v[o[" + tostr(Index(ptr.second + j)) + "]]"; }
581  Writer x(Index j) { return (indirect ? xi(j) : xd(j)); }
582  Writer y(Index j) { return (indirect ? yi(j) : yd(j)); }
583  Writer dx(Index j) { return (indirect ? dxi(j) : dxd(j)); }
584  Writer dy(Index j) { return (indirect ? dyi(j) : dyd(j)); }
585 
586  ReverseArgs(IndexVector &inputs, ScalarVector &values) : Args<>(inputs) {
587  const_literals = false;
588  indirect = false;
589  ptr.first = (Index)inputs.size();
590  ptr.second = (Index)values.size();
591  }
592 };
593 
594 struct Position {
595  Position(Index node, Index first, Index second);
596  Position();
597  Index node;
598  IndexPair ptr;
599  bool operator<(const Position &other) const;
600 };
601 
603 template <class T>
604 void sort_inplace(std::vector<T> &x) {
605  std::sort(x.begin(), x.end());
606 }
607 
609 template <class T>
610 void sort_unique_inplace(std::vector<T> &x) {
611  std::sort(x.begin(), x.end());
612  typename std::vector<T>::iterator last = std::unique(x.begin(), x.end());
613  x.erase(last, x.end());
614 }
615 
617 struct graph {
618  std::vector<Index> j;
619  std::vector<Index> p;
620  graph();
621  size_t num_neighbors(Index node);
622  Index *neighbors(Index node);
623  bool empty();
624  size_t num_nodes();
625  void print();
628  std::vector<bool> mark;
630  std::vector<Index> inv2op;
632  std::vector<Index> dep2op;
634  std::vector<Index> rowcounts();
636  std::vector<Index> colcounts();
646  void bfs(const std::vector<Index> &start, std::vector<bool> &visited,
647  std::vector<Index> &result);
660  void search(std::vector<Index> &start, bool sort_input = true,
661  bool sort_output = true);
669  void search(std::vector<Index> &start, std::vector<bool> &visited,
670  bool sort_input = true, bool sort_output = true);
676  std::vector<Index> boundary(const std::vector<Index> &subgraph);
681  graph(size_t num_nodes, const std::vector<IndexPair> &edges);
682 };
683 
684 namespace {
685 template <class CompleteOperator, bool dynamic>
686 struct constructOperator {};
687 template <class CompleteOperator>
688 struct constructOperator<CompleteOperator, false> {
689  CompleteOperator *operator()() {
690  static CompleteOperator *pOp = new CompleteOperator();
691  return pOp;
692  }
693 };
694 template <class CompleteOperator>
695 struct constructOperator<CompleteOperator, true> {
696  CompleteOperator *operator()() {
697  CompleteOperator *pOp = new CompleteOperator();
698  return pOp;
699  }
700 
701  template <class T1>
702  CompleteOperator *operator()(const T1 &x1) {
703  CompleteOperator *pOp = new CompleteOperator(x1);
704  return pOp;
705  }
706 
707  template <class T1, class T2>
708  CompleteOperator *operator()(const T1 &x1, const T2 &x2) {
709  CompleteOperator *pOp = new CompleteOperator(x1, x2);
710  return pOp;
711  }
712 
713  template <class T1, class T2, class T3>
714  CompleteOperator *operator()(const T1 &x1, const T2 &x2, const T3 &x3) {
715  CompleteOperator *pOp = new CompleteOperator(x1, x2, x3);
716  return pOp;
717  }
718 
719  template <class T1, class T2, class T3, class T4>
720  CompleteOperator *operator()(const T1 &x1, const T2 &x2, const T3 &x3,
721  const T4 &x4) {
722  CompleteOperator *pOp = new CompleteOperator(x1, x2, x3, x4);
723  return pOp;
724  }
725 };
726 } // namespace
727 
732 struct op_info {
734  typedef int IntRep;
736  IntRep code;
738  enum op_flag {
758  op_flag_count
759  };
760  template <class T>
761  IntRep get_flags(T op) {
762  return
763 
764  (op.dynamic * (1 << dynamic)) |
765  (op.smart_pointer * (1 << smart_pointer)) |
766  (op.is_linear * (1 << is_linear)) |
767  (op.is_constant * (1 << is_constant)) |
768  (op.independent_variable * (1 << independent_variable)) |
769  (op.dependent_variable * (1 << dependent_variable)) |
770  (op.allow_remap * (1 << allow_remap)) |
771  (op.elimination_protected * (1 << elimination_protected)) |
772  (op.updating * (1 << updating));
773  }
774  op_info();
775  op_info(op_flag f);
776 
777  template <class T>
778  op_info(T op) : code(get_flags(op)) {}
780  bool test(op_flag f) const;
781  op_info &operator|=(const op_info &other);
782  op_info &operator&=(const op_info &other);
783 };
784 
797 struct global {
798  struct ad_plain;
799  struct ad_aug;
800  typedef TMBAD_REPLAY_TYPE Replay;
801  struct ad_segment;
802  struct print_config;
811  struct OperatorPure {
814  virtual void increment(IndexPair &ptr) = 0;
817  virtual void decrement(IndexPair &ptr) = 0;
819  virtual void forward(ForwardArgs<Scalar> &args) = 0;
821  virtual void reverse(ReverseArgs<Scalar> &args) = 0;
823  virtual void forward_incr(ForwardArgs<Scalar> &args) = 0;
825  virtual void reverse_decr(ReverseArgs<Scalar> &args) = 0;
827  virtual Index input_size() = 0;
829  virtual Index output_size() = 0;
834  virtual void forward(ForwardArgs<bool> &args) = 0;
839  virtual void reverse(ReverseArgs<bool> &args) = 0;
841  virtual void forward_incr(ForwardArgs<bool> &args) = 0;
843  virtual void reverse_decr(ReverseArgs<bool> &args) = 0;
845  virtual void forward_incr_mark_dense(ForwardArgs<bool> &args) = 0;
859  virtual void dependencies(Args<> &args, Dependencies &dep) = 0;
863  virtual void dependencies_updating(Args<> &args, Dependencies &dep) = 0;
865  virtual void forward(ForwardArgs<Replay> &args) = 0;
867  virtual void reverse(ReverseArgs<Replay> &args) = 0;
869  virtual void forward_incr(ForwardArgs<Replay> &args) = 0;
871  virtual void reverse_decr(ReverseArgs<Replay> &args) = 0;
873  virtual void forward(ForwardArgs<Writer> &args) = 0;
875  virtual void reverse(ReverseArgs<Writer> &args) = 0;
877  virtual void forward_incr(ForwardArgs<Writer> &args) = 0;
879  virtual void reverse_decr(ReverseArgs<Writer> &args) = 0;
881  virtual const char *op_name() { return "NoName"; }
885  virtual OperatorPure *self_fuse() = 0;
889  virtual OperatorPure *other_fuse(OperatorPure *other) = 0;
891  virtual OperatorPure *copy() = 0;
893  virtual void deallocate() = 0;
895  virtual op_info info() = 0;
897  virtual void *operator_data() = 0;
902  virtual void *identifier() = 0;
904  virtual void print(print_config cfg) = 0;
907  virtual void *incomplete() = 0;
908  virtual ~OperatorPure() {}
909  };
910 
917  struct operation_stack : std::vector<OperatorPure *> {
918  typedef std::vector<OperatorPure *> Base;
922  operation_stack();
924  operation_stack(const operation_stack &other);
927  void push_back(OperatorPure *x);
929  operation_stack &operator=(const operation_stack &other);
930  ~operation_stack();
932  void clear();
933  void copy_from(const operation_stack &other);
934  };
935 
940  std::vector<Scalar> values;
943  std::vector<Scalar> derivs;
945  IndexVector inputs;
948  std::vector<Index> inv_index;
951  std::vector<Index> dep_index;
952 
953  mutable std::vector<IndexPair> subgraph_ptr;
954  std::vector<Index> subgraph_seq;
956  void (*forward_compiled)(Scalar *);
958  void (*reverse_compiled)(Scalar *, Scalar *);
959 
960  global();
963  void clear();
964 
980  void shrink_to_fit(double tol = .9);
981 
985  void clear_deriv(Position start = Position(0, 0, 0));
986 
988  Scalar &value_inv(Index i);
990  Scalar &deriv_inv(Index i);
992  Scalar &value_dep(Index i);
994  Scalar &deriv_dep(Index i);
996  Position begin();
998  Position end();
999 
1001  struct no_filter {
1002  CONSTEXPR bool operator[](size_t i) const;
1003  };
1009  template <class ForwardArgs, class NodeFilter>
1010  void forward_loop(ForwardArgs &args, size_t begin,
1011  const NodeFilter &node_filter) const {
1012  for (size_t i = begin; i < opstack.size(); i++) {
1013  if (node_filter[i])
1014  opstack[i]->forward_incr(args);
1015  else
1016  opstack[i]->increment(args.ptr);
1017  }
1018  }
1020  template <class ForwardArgs>
1021  void forward_loop(ForwardArgs &args, size_t begin = 0) const {
1022  forward_loop(args, begin, no_filter());
1023  }
1028  template <class ReverseArgs, class NodeFilter>
1029  void reverse_loop(ReverseArgs &args, size_t begin,
1030  const NodeFilter &node_filter) const {
1031  for (size_t i = opstack.size(); i > begin;) {
1032  i--;
1033  if (node_filter[i])
1034  opstack[i]->reverse_decr(args);
1035  else
1036  opstack[i]->decrement(args.ptr);
1037  }
1038  }
1040  template <class ReverseArgs>
1041  void reverse_loop(ReverseArgs &args, size_t begin = 0) const {
1042  reverse_loop(args, begin, no_filter());
1043  }
1045  template <class ForwardArgs>
1047  subgraph_cache_ptr();
1048  for (size_t j = 0; j < subgraph_seq.size(); j++) {
1049  Index i = subgraph_seq[j];
1050  args.ptr = subgraph_ptr[i];
1051  opstack[i]->forward(args);
1052  }
1053  }
1055  template <class ReverseArgs>
1057  subgraph_cache_ptr();
1058  for (size_t j = subgraph_seq.size(); j > 0;) {
1059  j--;
1060  Index i = subgraph_seq[j];
1061  args.ptr = subgraph_ptr[i];
1062  opstack[i]->reverse(args);
1063  }
1064  }
1075  template <class Vector>
1077  typename Vector::value_type value =
1078  typename Vector::value_type(0)) const {
1079  if (array.size() != values.size()) {
1080  array.resize(values.size());
1081  std::fill(array.begin(), array.end(), value);
1082  return;
1083  }
1084  subgraph_cache_ptr();
1085  for (size_t j = 0; j < subgraph_seq.size(); j++) {
1086  Index i = subgraph_seq[j];
1087  size_t noutput = opstack[i]->output_size();
1088  for (size_t k = 0; k < noutput; k++)
1089  array[subgraph_ptr[i].second + k] = value;
1090  }
1091  }
1092 
1097  void forward(Position start = Position(0, 0, 0));
1105  void reverse(Position start = Position(0, 0, 0));
1107  void forward_sub();
1109  void reverse_sub();
1110 
1112  void forward(std::vector<bool> &marks);
1114  void reverse(std::vector<bool> &marks);
1119  void forward_sub(std::vector<bool> &marks,
1120  const std::vector<bool> &node_filter = std::vector<bool>());
1125  void reverse_sub(std::vector<bool> &marks,
1126  const std::vector<bool> &node_filter = std::vector<bool>());
1135  void forward_dense(std::vector<bool> &marks);
1136 
1137  intervals<Index> updating_intervals() const;
1138 
1139  intervals<Index> updating_intervals_sub() const;
1140 
1141  struct replay {
1143  std::vector<Replay> values;
1146  std::vector<Replay> derivs;
1148  const global &orig;
1150  global &target;
1152  global *parent_glob;
1154  Replay &value_inv(Index i);
1156  Replay &deriv_inv(Index i);
1158  Replay &value_dep(Index i);
1160  Replay &deriv_dep(Index i);
1164  replay(const global &orig, global &target);
1173  void start();
1178  void stop();
1180  void add_updatable_derivs(const intervals<Index> &I);
1182  void clear_deriv();
1189  void forward(bool inv_tags = true, bool dep_tags = true,
1190  Position start = Position(0, 0, 0),
1191  const std::vector<bool> &node_filter = std::vector<bool>());
1199  void reverse(bool dep_tags = true, bool inv_tags = false,
1200  Position start = Position(0, 0, 0),
1201  const std::vector<bool> &node_filter = std::vector<bool>());
1203  void forward_sub();
1205  void reverse_sub();
1207  void clear_deriv_sub();
1208  };
1209 
1214  void forward_replay(bool inv_tags = true, bool dep_tags = true);
1215 
1221  void subgraph_cache_ptr() const;
1229  void set_subgraph(const std::vector<bool> &marks, bool append = false);
1231  void mark_subgraph(std::vector<bool> &marks);
1233  void unmark_subgraph(std::vector<bool> &marks);
1235  void subgraph_trivial();
1241  void clear_deriv_sub();
1274  global extract_sub(std::vector<Index> &var_remap, global new_glob = global());
1279  void extract_sub_inplace(std::vector<bool> marks);
1283  global extract_sub();
1284 
1293  std::vector<Index> var2op();
1299  std::vector<bool> var2op(const std::vector<bool> &values);
1301  std::vector<Index> op2var(const std::vector<Index> &seq);
1303  std::vector<bool> op2var(const std::vector<bool> &seq_mark);
1312  std::vector<Index> op2idx(const std::vector<Index> &var_subset,
1313  Index NA = (Index)-1);
1315  std::vector<bool> mark_space(size_t n, const std::vector<Index> ind);
1317  std::vector<bool> inv_marks();
1319  std::vector<bool> dep_marks();
1321  std::vector<bool> subgraph_marks();
1322 
1323  struct append_edges {
1324  size_t &i;
1325  const std::vector<bool> &keep_var;
1326  std::vector<Index> &var2op;
1327  std::vector<IndexPair> &edges;
1328 
1329  std::vector<bool> op_marks;
1330  size_t pos;
1331  append_edges(size_t &i, size_t num_nodes, const std::vector<bool> &keep_var,
1332  std::vector<Index> &var2op, std::vector<IndexPair> &edges);
1333  void operator()(Index dep_j);
1334 
1335  void start_iteration();
1336 
1337  void end_iteration();
1338  };
1347  graph build_graph(bool transpose, const std::vector<bool> &keep_var);
1351  graph forward_graph(std::vector<bool> keep_var = std::vector<bool>(0));
1355  graph reverse_graph(std::vector<bool> keep_var = std::vector<bool>(0));
1356 
1361  bool identical(const global &other) const;
1362 
1364  template <class T>
1365  void hash(hash_t &h, T x) const {
1366  static const size_t n =
1367  (sizeof(T) / sizeof(hash_t)) + (sizeof(T) % sizeof(hash_t) != 0);
1368  hash_t buffer[n];
1369  std::fill(buffer, buffer + n, 0);
1370  for (size_t i = 0; i < sizeof(x); i++)
1371  ((char *)buffer)[i] = ((char *)&x)[i];
1372  hash_t A = 54059;
1373  hash_t B = 76963;
1374  for (size_t i = 0; i < n; i++) h = (A * h) ^ (B * buffer[i]);
1375  }
1376 
1385  hash_t hash() const;
1386 
1388  struct hash_config {
1398  bool reduce;
1402  std::vector<Index> inv_seed;
1403  };
1404 
1459  std::vector<hash_t> hash_sweep(hash_config cfg) const;
1461  std::vector<hash_t> hash_sweep(bool weak = true) const;
1462 
1476  void eliminate();
1477 
1479  struct print_config {
1480  std::string prefix, mark;
1481  int depth;
1482  print_config();
1483  };
1485  void print(print_config cfg);
1487  void print();
1488 
1490  template <int ninput_, int noutput_ = 1>
1491  struct Operator {
1493  static const bool dynamic = false;
1495  static const int ninput = ninput_;
1497  static const int noutput = noutput_;
1499  static const int independent_variable = false;
1501  static const int dependent_variable = false;
1503  static const bool have_input_size_output_size = false;
1505  static const bool have_increment_decrement = false;
1507  static const bool have_forward_reverse = true;
1509  static const bool have_forward_incr_reverse_decr = false;
1511  static const bool have_forward_mark_reverse_mark = false;
1513  static const bool have_dependencies = false;
1519  static const bool allow_remap = true;
1530  static const bool implicit_dependencies = false;
1532  static const bool add_static_identifier = false;
1535  static const bool add_forward_replay_copy = false;
1538  static const bool have_eval = false;
1540  static const int max_fuse_depth = 2;
1542  static const bool is_linear = false;
1544  static const bool is_constant = false;
1546  static const bool smart_pointer = false;
1548  static const bool elimination_protected = false;
1574  static const bool updating = false;
1577  void dependencies_updating(Args<> &args, Dependencies &dep) const {}
1580  return NULL;
1581  }
1583  void *operator_data() { return NULL; }
1585  void print(print_config cfg) {}
1586  };
1589  template <int ninput, int noutput>
1590  struct DynamicOperator : Operator<ninput, noutput> {
1592  static const bool dynamic = true;
1594  static const int max_fuse_depth = 0;
1595  };
1598  template <int ninput>
1599  struct DynamicOutputOperator : Operator<ninput, -1> {
1601  static const bool dynamic = true;
1603  static const int max_fuse_depth = 0;
1604  Index noutput;
1605  };
1606  template <int noutput = 1>
1607  struct DynamicInputOperator : Operator<-1, noutput> {
1609  static const bool dynamic = true;
1611  static const int max_fuse_depth = 0;
1612  Index ninput;
1613  };
1614  struct DynamicInputOutputOperator : Operator<-1, -1> {
1616  static const bool dynamic = true;
1618  static const int max_fuse_depth = 0;
1619  Index ninput_, noutput_;
1620  DynamicInputOutputOperator(Index ninput, Index noutput);
1621  Index input_size() const;
1622  Index output_size() const;
1623  static const bool have_input_size_output_size = true;
1624  };
1625  struct UniqueDynamicOperator : Operator<-1, -1> {
1627  static const bool dynamic = true;
1629  static const int max_fuse_depth = 0;
1631  static const bool smart_pointer = false;
1634  static const bool have_input_size_output_size = true;
1635  };
1636  struct SharedDynamicOperator : UniqueDynamicOperator {
1638  static const bool smart_pointer = true;
1639  };
1640 
1643  template <class OperatorBase>
1644  struct AddInputSizeOutputSize : OperatorBase {
1645  INHERIT_CTOR(AddInputSizeOutputSize, OperatorBase)
1646  Index input_size() const { return this->ninput; }
1647  Index output_size() const { return this->noutput; }
1648  static const bool have_input_size_output_size = true;
1649  };
1650 
1653  template <class OperatorBase>
1654  struct AddIncrementDecrement : OperatorBase {
1655  INHERIT_CTOR(AddIncrementDecrement, OperatorBase)
1656  void increment(IndexPair &ptr) {
1657  ptr.first += this->input_size();
1658  ptr.second += this->output_size();
1659  }
1660  void decrement(IndexPair &ptr) {
1661  ptr.first -= this->input_size();
1662  ptr.second -= this->output_size();
1663  }
1664  static const bool have_increment_decrement = true;
1665  };
1666 
1670  template <class OperatorBase>
1671  struct AddForwardReverse : OperatorBase {
1672  INHERIT_CTOR(AddForwardReverse, OperatorBase)
1673 
1674  template <class Type>
1675  void forward(ForwardArgs<Type> &args) {
1676  ForwardArgs<Type> args_cpy(args);
1677  OperatorBase::forward_incr(args_cpy);
1678  }
1679  template <class Type>
1680  void reverse(ReverseArgs<Type> &args) {
1681  ReverseArgs<Type> args_cpy(args);
1682  OperatorBase::increment(args_cpy.ptr);
1683  OperatorBase::reverse_decr(args_cpy);
1684  }
1685  static const bool have_forward_reverse = true;
1686  };
1687 
1691  template <class OperatorBase>
1692  struct AddForwardIncrReverseDecr : OperatorBase {
1693  INHERIT_CTOR(AddForwardIncrReverseDecr, OperatorBase)
1694 
1695  template <class Type>
1696  void forward_incr(ForwardArgs<Type> &args) {
1697  OperatorBase::forward(args);
1698  OperatorBase::increment(args.ptr);
1699  }
1700 
1701  template <class Type>
1702  void reverse_decr(ReverseArgs<Type> &args) {
1703  OperatorBase::decrement(args.ptr);
1704  OperatorBase::reverse(args);
1705  }
1706  static const bool have_forward_incr_reverse_decr = true;
1707  };
1708 
1711  template <class OperatorBase>
1712  struct AddForwardMarkReverseMark : OperatorBase {
1713  INHERIT_CTOR(AddForwardMarkReverseMark, OperatorBase)
1714 
1715  template <class Type>
1716  void forward(ForwardArgs<Type> &args) {
1717  OperatorBase::forward(args);
1718  }
1719  template <class Type>
1720  void reverse(ReverseArgs<Type> &args) {
1721  OperatorBase::reverse(args);
1722  }
1723 
1724  void forward(ForwardArgs<bool> &args) { args.mark_dense(*this); }
1725  void reverse(ReverseArgs<bool> &args) { args.mark_dense(*this); }
1726  static const bool have_forward_mark_reverse_mark = true;
1727  };
1728 
1731  template <class OperatorBase>
1732  struct AddDependencies : OperatorBase {
1733  INHERIT_CTOR(AddDependencies, OperatorBase)
1734  void dependencies(Args<> &args, Dependencies &dep) const {
1735  Index ninput_ = this->input_size();
1736  for (Index j = 0; j < ninput_; j++) dep.push_back(args.input(j));
1737  }
1738  static const bool have_dependencies = true;
1739  };
1740 
1743  template <class OperatorBase, int ninput>
1744  struct AddForwardFromEval : OperatorBase {};
1746  template <class OperatorBase>
1747  struct AddForwardFromEval<OperatorBase, 1> : OperatorBase {
1748  INHERIT_CTOR(AddForwardFromEval, OperatorBase)
1749  template <class Type>
1750  void forward(ForwardArgs<Type> &args) {
1751  args.y(0) = this->eval(args.x(0));
1752  }
1753  };
1755  template <class OperatorBase>
1756  struct AddForwardFromEval<OperatorBase, 2> : OperatorBase {
1757  INHERIT_CTOR(AddForwardFromEval, OperatorBase)
1758  template <class Type>
1759  void forward(ForwardArgs<Type> &args) {
1760  args.y(0) = this->eval(args.x(0), args.x(1));
1761  }
1762  };
1763 
1765  template <bool flag, class dummy>
1767  void increment() {}
1768  void decrement() {}
1769  size_t operator()() const { return 0; }
1770  };
1771  template <class dummy>
1772  struct ReferenceCounter<true, dummy> {
1773  size_t counter;
1774  ReferenceCounter() : counter(0) {}
1775  void increment() { counter++; }
1776  void decrement() { counter--; }
1777  size_t operator()() const { return counter; }
1778  };
1779 
1781  template <bool flag, class Yes, class No>
1782  struct if_else {};
1783  template <class Yes, class No>
1784  struct if_else<true, Yes, No> {
1785  typedef Yes type;
1786  };
1787  template <class Yes, class No>
1788  struct if_else<false, Yes, No> {
1789  typedef No type;
1790  };
1791 
1793  template <class OperatorBase>
1794  struct CPL {
1795  static const bool test1 = !OperatorBase::have_eval;
1797  typedef typename if_else<
1798  test1, OperatorBase,
1800 
1801  static const bool test2 = Result1::have_input_size_output_size;
1803  typedef
1806 
1807  static const bool test3 = !Result2::have_dependencies;
1809  typedef typename if_else<test3, AddDependencies<Result2>, Result2>::type
1811 
1812  static const bool test4 = Result3::have_increment_decrement;
1814  typedef
1817 
1818  static const bool test5 = Result4::have_forward_mark_reverse_mark;
1820  typedef typename if_else<test5, Result4,
1822 
1823  static const bool test6 = Result5::have_forward_reverse &&
1824  !Result5::have_forward_incr_reverse_decr;
1827  Result5>::type Result6;
1828 
1829  static const bool test7 = Result6::have_forward_incr_reverse_decr &&
1830  !Result6::have_forward_reverse;
1832  typedef typename if_else<test7, AddForwardReverse<Result6>, Result6>::type
1834 
1835  typedef Result7 type;
1836  };
1837 
1839  template <class Operator1, class Operator2>
1840  struct Fused : Operator<Operator1::ninput + Operator2::ninput,
1841  Operator1::noutput + Operator2::noutput> {
1842  typename CPL<Operator1>::type Op1;
1843  typename CPL<Operator2>::type Op2;
1845  static const int independent_variable =
1846  Operator1::independent_variable && Operator2::independent_variable;
1848  static const int dependent_variable =
1849  Operator1::dependent_variable && Operator2::dependent_variable;
1851  static const int max_fuse_depth =
1852  (Operator1::max_fuse_depth < Operator2::max_fuse_depth
1853  ? Operator1::max_fuse_depth - 1
1854  : Operator2::max_fuse_depth - 1);
1856  static const bool is_linear = Operator1::is_linear && Operator2::is_linear;
1857  template <class Type>
1858  void forward_incr(ForwardArgs<Type> &args) {
1859  Op1.forward_incr(args);
1860  Op2.forward_incr(args);
1861  }
1862  template <class Type>
1863  void reverse_decr(ReverseArgs<Type> &args) {
1864  Op2.reverse_decr(args);
1865  Op1.reverse_decr(args);
1866  }
1868  static const bool have_forward_incr_reverse_decr = true;
1870  static const bool have_forward_reverse = false;
1871  const char *op_name() { return "Fused"; }
1872  };
1881  template <class Operator1>
1882  struct Rep : DynamicOperator<-1, -1> {
1883  typename CPL<Operator1>::type Op;
1885  static const int independent_variable = Operator1::independent_variable;
1887  static const int dependent_variable = Operator1::dependent_variable;
1889  static const bool is_linear = Operator1::is_linear;
1890  Index n;
1891  Rep(Index n) : n(n) {}
1892  Index input_size() const { return Operator1::ninput * n; }
1893  Index output_size() const { return Operator1::noutput * n; }
1895  static const bool have_input_size_output_size = true;
1896  template <class Type>
1897  void forward_incr(ForwardArgs<Type> &args) {
1898  for (size_t i = 0; i < (size_t)n; i++) Op.forward_incr(args);
1899  }
1900  template <class Type>
1901  void reverse_decr(ReverseArgs<Type> &args) {
1902  for (size_t i = 0; i < (size_t)n; i++) Op.reverse_decr(args);
1903  }
1905  static const bool have_forward_incr_reverse_decr = true;
1907  static const bool have_forward_reverse = false;
1914  TMBAD_ASSERT(false);
1915  std::vector<Index> &inputs = get_glob()->inputs;
1916  size_t k = Op.input_size();
1917  size_t start = inputs.size() - k * n;
1918  std::valarray<Index> increment(k);
1919  if (k > 0) {
1920  for (size_t i = 0; i < (size_t)n - 1; i++) {
1921  std::valarray<Index> v1(&inputs[start + i * k], k);
1922  std::valarray<Index> v2(&inputs[start + (i + 1) * k], k);
1923  if (i == 0) {
1924  increment = v2 - v1;
1925  } else {
1926  bool ok = (increment == (v2 - v1)).min();
1927  if (!ok) return NULL;
1928  }
1929  }
1930  }
1931 
1932  size_t reduction = (n - 1) * k;
1933  inputs.resize(inputs.size() - reduction);
1934  return get_glob()->getOperator<RepCompress<Operator1> >(n, increment);
1935  }
1936  OperatorPure *other_fuse(OperatorPure *self, OperatorPure *other) {
1937  OperatorPure *op1 = get_glob()->getOperator<Operator1>();
1938  if (op1 == other) {
1939  this->n++;
1940  return self;
1941  }
1942  return NULL;
1943  }
1944  const char *op_name() { return "Rep"; }
1945  };
1956  template <class Operator1>
1957  struct RepCompress : DynamicOperator<-1, -1> {
1959  static const int independent_variable = Operator1::independent_variable;
1961  static const int dependent_variable = Operator1::dependent_variable;
1963  static const bool is_linear = Operator1::is_linear;
1964  typename CPL<Operator1>::type Op;
1965  Index n;
1966 
1967  std::valarray<Index> increment_pattern;
1968  RepCompress(Index n, std::valarray<Index> v) : n(n), increment_pattern(v) {}
1969  Index input_size() const { return Operator1::ninput; }
1970  Index output_size() const { return Operator1::noutput * n; }
1972  static const bool have_input_size_output_size = true;
1974  template <class Type>
1976  std::valarray<Index> inputs(input_size());
1977  for (size_t i = 0; i < inputs.size(); i++) inputs[i] = args.input(i);
1978  ForwardArgs<Type> args_cpy = args;
1979  args_cpy.inputs = &inputs[0];
1980  args_cpy.ptr.first = 0;
1981  for (size_t i = 0; i < (size_t)n; i++) {
1982  Op.forward(args_cpy);
1983  inputs += this->increment_pattern;
1984  args_cpy.ptr.second += Op.output_size();
1985  }
1986  }
1988  template <class Type>
1990  std::valarray<Index> inputs(input_size());
1991  for (size_t i = 0; i < inputs.size(); i++) inputs[i] = args.input(i);
1992  inputs += n * this->increment_pattern;
1993  ReverseArgs<Type> args_cpy = args;
1994  args_cpy.inputs = &inputs[0];
1995  args_cpy.ptr.first = 0;
1996  args_cpy.ptr.second += n * Op.output_size();
1997  for (size_t i = 0; i < (size_t)n; i++) {
1998  inputs -= this->increment_pattern;
1999  args_cpy.ptr.second -= Op.output_size();
2000  Op.reverse(args_cpy);
2001  }
2002  }
2004  void dependencies(Args<> &args, Dependencies &dep) const {
2005  std::valarray<Index> inputs(input_size());
2006  for (size_t i = 0; i < inputs.size(); i++) inputs[i] = args.input(i);
2007  for (size_t i = 0; i < (size_t)n; i++) {
2008  dep.insert(dep.end(), &inputs[0], &inputs[0] + inputs.size());
2009  inputs += this->increment_pattern;
2010  }
2011  }
2012  static const bool have_dependencies = true;
2013  void forward(ForwardArgs<Writer> &args) {
2014  std::valarray<Index> inputs(Op.input_size());
2015  for (size_t i = 0; i < (size_t)Op.input_size(); i++)
2016  inputs[i] = args.input(i);
2017  std::valarray<Index> outputs(Op.output_size());
2018  for (size_t i = 0; i < (size_t)Op.output_size(); i++)
2019  outputs[i] = args.output(i);
2020  Writer w;
2021  int ninp = Op.input_size();
2022  int nout = Op.output_size();
2023 
2024  w << "for (int count = 0, "
2025  << "i[" << ninp << "]=" << inputs << ", "
2026  << "di[" << ninp << "]=" << increment_pattern << ", "
2027  << "o[" << nout << "]=" << outputs << "; "
2028  << "count < " << n << "; count++) {\n";
2029 
2030  w << " ";
2031  ForwardArgs<Writer> args_cpy = args;
2032  args_cpy.set_indirect();
2033  Op.forward(args_cpy);
2034  w << "\n";
2035 
2036  w << " ";
2037  w << "for (int k=0; k<" << ninp << "; k++) i[k] += di[k];\n";
2038  w << " ";
2039  w << "for (int k=0; k<" << nout << "; k++) o[k] += " << nout << ";\n";
2040 
2041  w << " ";
2042  w << "}";
2043  }
2044  void reverse(ReverseArgs<Writer> &args) {
2045  std::valarray<Index> inputs(Op.input_size());
2046  for (size_t i = 0; i < (size_t)Op.input_size(); i++)
2047  inputs[i] = args.input(i);
2048  inputs += n * increment_pattern;
2049  std::valarray<Index> outputs(Op.output_size());
2050  for (size_t i = 0; i < (size_t)Op.output_size(); i++)
2051  outputs[i] = args.output(i);
2052  outputs += n * Op.output_size();
2053  Writer w;
2054  int ninp = Op.input_size();
2055  int nout = Op.output_size();
2056 
2057  w << "for (int count = 0, "
2058  << "i[" << ninp << "]=" << inputs << ", "
2059  << "di[" << ninp << "]=" << increment_pattern << ", "
2060  << "o[" << nout << "]=" << outputs << "; "
2061  << "count < " << n << "; count++) {\n";
2062 
2063  w << " ";
2064  w << "for (int k=0; k<" << ninp << "; k++) i[k] -= di[k];\n";
2065  w << " ";
2066  w << "for (int k=0; k<" << nout << "; k++) o[k] -= " << nout << ";\n";
2067 
2068  w << " ";
2069  ReverseArgs<Writer> args_cpy = args;
2070  args_cpy.set_indirect();
2071  Op.reverse(args_cpy);
2072  w << "\n";
2073 
2074  w << " ";
2075  w << "}";
2076  }
2078  static const bool have_forward_incr_reverse_decr = false;
2080  static const bool have_forward_reverse = true;
2082  static const bool have_forward_mark_reverse_mark = true;
2083  const char *op_name() { return "CRep"; }
2084 
2085  struct operator_data_t {
2086  OperatorPure *Op;
2087  Index n;
2088  std::valarray<Index> ip;
2089  operator_data_t(const RepCompress &x)
2090  : Op(get_glob()->getOperator<Operator1>()),
2091  n(x.n),
2092  ip(x.increment_pattern) {}
2093  ~operator_data_t() { Op->deallocate(); }
2094  bool operator==(const operator_data_t &other) {
2095  return (Op == other.Op) && (ip.size() == other.ip.size()) &&
2096  ((ip - other.ip).min() == 0);
2097  }
2098  };
2099  void *operator_data() { return new operator_data_t(*this); }
2100  OperatorPure *other_fuse(OperatorPure *self, OperatorPure *other) {
2101  if (this->op_name() == other->op_name()) {
2102  operator_data_t *p1 =
2103  static_cast<operator_data_t *>(self->operator_data());
2104  operator_data_t *p2 =
2105  static_cast<operator_data_t *>(other->operator_data());
2106  bool match = (*p1 == *p2);
2107  int other_n = p2->n;
2108  delete p1;
2109  delete p2;
2110  if (match) {
2111  std::vector<Index> &inputs = get_glob()->inputs;
2112  size_t reduction = increment_pattern.size();
2113  inputs.resize(inputs.size() - reduction);
2114  this->n += other_n;
2115  other->deallocate();
2116  return self;
2117  }
2118  }
2119  return NULL;
2120  }
2121  };
2122 
2128  template <class OperatorBase>
2130  typename CPL<OperatorBase>::type Op;
2131  INHERIT_CTOR(Complete, Op)
2132  ~Complete() {}
2133  void forward(ForwardArgs<Scalar> &args) { Op.forward(args); }
2134  void reverse(ReverseArgs<Scalar> &args) { Op.reverse(args); }
2135  void forward_incr(ForwardArgs<Scalar> &args) { Op.forward_incr(args); }
2136  void reverse_decr(ReverseArgs<Scalar> &args) { Op.reverse_decr(args); }
2137 
2139  if (Op.add_forward_replay_copy)
2140  forward_replay_copy(args);
2141  else
2142  Op.forward(args);
2143  }
2144  void reverse(ReverseArgs<Replay> &args) { Op.reverse(args); }
2146  if (Op.add_forward_replay_copy) {
2147  forward_replay_copy(args);
2148  increment(args.ptr);
2149  } else
2150  Op.forward_incr(args);
2151  }
2152  void reverse_decr(ReverseArgs<Replay> &args) { Op.reverse_decr(args); }
2153 
2154  void forward(ForwardArgs<bool> &args) { Op.forward(args); }
2155  void reverse(ReverseArgs<bool> &args) { Op.reverse(args); }
2156  void forward_incr(ForwardArgs<bool> &args) { Op.forward_incr(args); }
2157  void reverse_decr(ReverseArgs<bool> &args) { Op.reverse_decr(args); }
2159  args.mark_dense(Op);
2160  Op.increment(args.ptr);
2161  };
2162 
2163  void forward(ForwardArgs<Writer> &args) { Op.forward(args); }
2164  void reverse(ReverseArgs<Writer> &args) { Op.reverse(args); }
2165  void forward_incr(ForwardArgs<Writer> &args) { Op.forward_incr(args); }
2166  void reverse_decr(ReverseArgs<Writer> &args) { Op.reverse_decr(args); }
2171  std::vector<ad_plain> operator()(const std::vector<ad_plain> &x) {
2172  TMBAD_ASSERT2(OperatorBase::dynamic,
2173  "Stack to heap copy only allowed for dynamic operators");
2174  Complete *pOp = new Complete(*this);
2175  TMBAD_ASSERT2(pOp->ref_count() == 0, "Operator already on the heap");
2176  pOp->ref_count.increment();
2177  return get_glob()->add_to_stack<OperatorBase>(pOp, x);
2178  }
2179  ad_segment operator()(const ad_segment &x) {
2180  TMBAD_ASSERT2(OperatorBase::dynamic,
2181  "Stack to heap copy only allowed for dynamic operators");
2182  Complete *pOp = new Complete(*this);
2183  TMBAD_ASSERT2(pOp->ref_count() == 0, "Operator already on the heap");
2184  pOp->ref_count.increment();
2185  return get_glob()->add_to_stack<OperatorBase>(pOp, x);
2186  }
2187  ad_segment operator()(const ad_segment &x, const ad_segment &y) {
2188  TMBAD_ASSERT2(OperatorBase::dynamic,
2189  "Stack to heap copy only allowed for dynamic operators");
2190  Complete *pOp = new Complete(*this);
2191  TMBAD_ASSERT2(pOp->ref_count() == 0, "Operator already on the heap");
2192  pOp->ref_count.increment();
2193  return get_glob()->add_to_stack<OperatorBase>(pOp, x, y);
2194  }
2195  template <class T>
2196  std::vector<T> operator()(const std::vector<T> &x) {
2197  std::vector<ad_plain> x_(x.begin(), x.end());
2198  std::vector<ad_plain> y_ = (*this)(x_);
2199  std::vector<T> y(y_.begin(), y_.end());
2200  return y;
2201  }
2202  void forward_replay_copy(ForwardArgs<Replay> &args) {
2203  std::vector<ad_plain> x(Op.input_size());
2204  for (size_t i = 0; i < x.size(); i++) x[i] = args.x(i);
2205  std::vector<ad_plain> y =
2206  get_glob()->add_to_stack<OperatorBase>(this->copy(), x);
2207  for (size_t i = 0; i < y.size(); i++) args.y(i) = y[i];
2208  }
2209  void dependencies(Args<> &args, Dependencies &dep) {
2210  Op.dependencies(args, dep);
2211  }
2212  void dependencies_updating(Args<> &args, Dependencies &dep) {
2213  Op.dependencies_updating(args, dep);
2214  }
2215  void increment(IndexPair &ptr) { Op.increment(ptr); }
2216  void decrement(IndexPair &ptr) { Op.decrement(ptr); }
2217  Index input_size() { return Op.input_size(); }
2218  Index output_size() { return Op.output_size(); }
2219  const char *op_name() { return Op.op_name(); }
2220  void print(print_config cfg) { Op.print(cfg); }
2221 
2222  template <class Operator_, int depth>
2223  struct SelfFuse {
2224  typedef Rep<Operator_> type;
2225  OperatorPure *operator()() {
2226  return get_glob()->template getOperator<type>(2);
2227  }
2228  };
2229  template <class Operator_>
2230  struct SelfFuse<Operator_, 0> {
2231  OperatorPure *operator()() { return NULL; }
2232  };
2234  return SelfFuse<OperatorBase, OperatorBase::max_fuse_depth>()();
2235  }
2237  return Op.other_fuse(this, other);
2238  }
2241  if (Op.smart_pointer) {
2242  ref_count.increment();
2243  return this;
2244  } else if (Op.dynamic)
2245  return new Complete(*this);
2246  else
2247  return this;
2248  }
2249  void deallocate() {
2250  if (!Op.dynamic) return;
2251  if (Op.smart_pointer) {
2252  if (ref_count() > 1) {
2253  ref_count.decrement();
2254  return;
2255  }
2256  }
2257  delete this;
2258  }
2260  op_info info(Op);
2261  return info;
2262  }
2263  void *identifier() {
2264  if (Op.add_static_identifier) {
2265  static void *id = new char();
2266  return id;
2267  } else
2268  return (void *)this;
2269  }
2270  void *operator_data() { return Op.operator_data(); }
2271  void *incomplete() { return &Op; }
2272  };
2273 
2274  template <class OperatorBase>
2275  Complete<OperatorBase> *getOperator() const {
2276  return constructOperator<Complete<OperatorBase>, OperatorBase::dynamic>()();
2277  }
2278  template <class OperatorBase, class T1>
2279  Complete<OperatorBase> *getOperator(const T1 &x1) const {
2280  return constructOperator<Complete<OperatorBase>, OperatorBase::dynamic>()(
2281  x1);
2282  }
2283  template <class OperatorBase, class T1, class T2>
2284  Complete<OperatorBase> *getOperator(const T1 &x1, const T2 &x2) const {
2285  return constructOperator<Complete<OperatorBase>, OperatorBase::dynamic>()(
2286  x1, x2);
2287  }
2288  template <class OperatorBase, class T1, class T2, class T3>
2289  Complete<OperatorBase> *getOperator(const T1 &x1, const T2 &x2,
2290  const T3 &x3) const {
2291  return constructOperator<Complete<OperatorBase>, OperatorBase::dynamic>()(
2292  x1, x2, x3);
2293  }
2294  template <class OperatorBase, class T1, class T2, class T3, class T4>
2295  Complete<OperatorBase> *getOperator(const T1 &x1, const T2 &x2, const T3 &x3,
2296  const T4 &x4) const {
2297  return constructOperator<Complete<OperatorBase>, OperatorBase::dynamic>()(
2298  x1, x2, x3, x4);
2299  }
2300  struct InvOp : Operator<0> {
2301  static const int independent_variable = true;
2302  template <class Type>
2303  void forward(ForwardArgs<Type> &args) {}
2304  template <class Type>
2305  void reverse(ReverseArgs<Type> &args) {}
2306  const char *op_name();
2307  };
2308 
2309  struct DepOp : Operator<1> {
2310  static const bool is_linear = true;
2311  static const int dependent_variable = true;
2312  static const bool have_eval = true;
2313  template <class Type>
2314  Type eval(Type x0) {
2315  return x0;
2316  }
2317  template <class Type>
2318  void reverse(ReverseArgs<Type> &args) {
2319  args.dx(0) += args.dy(0);
2320  }
2321  const char *op_name();
2322  };
2323 
2324  struct ConstOp : Operator<0, 1> {
2325  static const bool is_linear = true;
2326  static const bool is_constant = true;
2327  template <class Type>
2328  void forward(ForwardArgs<Type> &args) {}
2329  void forward(ForwardArgs<Replay> &args);
2330  template <class Type>
2331  void reverse(ReverseArgs<Type> &args) {}
2332  const char *op_name();
2333  void forward(ForwardArgs<Writer> &args);
2334  };
2335  struct DataOp : DynamicOutputOperator<0> {
2336  typedef DynamicOutputOperator<0> Base;
2337  static const bool is_linear = true;
2338  DataOp(Index n);
2339  template <class Type>
2340  void forward(ForwardArgs<Type> &args) {}
2341  template <class Type>
2342  void reverse(ReverseArgs<Type> &args) {}
2343  const char *op_name();
2344  void forward(ForwardArgs<Writer> &args);
2345  };
2356  static const bool add_forward_replay_copy = true;
2357  ZeroOp(Index n);
2358  template <class Type>
2359  void forward(ForwardArgs<Type> &args) {
2360  for (Index i = 0; i < Base::noutput; i++) args.y(i) = Type(0);
2361  }
2362  template <class Type>
2363  void reverse(ReverseArgs<Type> &args) {}
2364  const char *op_name();
2365  void forward(ForwardArgs<Writer> &args);
2368  void operator()(Replay *x, Index n);
2369  };
2371  struct NullOp : Operator<0, 0> {
2372  NullOp();
2373  const char *op_name();
2374  template <class T>
2375  void forward(ForwardArgs<T> &args) {}
2376  template <class T>
2377  void reverse(ReverseArgs<T> &args) {}
2378  };
2380  struct NullOp2 : DynamicInputOutputOperator {
2381  NullOp2(Index ninput, Index noutput);
2382  const char *op_name();
2383  template <class T>
2384  void forward(ForwardArgs<T> &args) {}
2385  template <class T>
2386  void reverse(ReverseArgs<T> &args) {}
2387  };
2408  struct RefOp : DynamicOperator<0, 1> {
2409  static const bool dynamic = true;
2410  global *glob;
2411  Index i;
2412  RefOp(global *glob, Index i);
2414  void forward(ForwardArgs<Scalar> &args);
2416  void forward(ForwardArgs<Replay> &args);
2419  template <class Type>
2421  TMBAD_ASSERT2(false,
2422  "Reverse mode updates are forbidden until all references "
2423  "are resolved");
2424  }
2426  void reverse(ReverseArgs<Replay> &args);
2427  const char *op_name();
2428  };
2429 
2430  typedef Operator<1> UnaryOperator;
2431  typedef Operator<2> BinaryOperator;
2432 
2433  OperatorPure *Fuse(OperatorPure *Op1, OperatorPure *Op2);
2434 
2435  static bool fuse;
2436 
2441  void set_fuse(bool flag);
2442 
2445  void add_to_opstack(OperatorPure *pOp);
2447  template <class OperatorBase>
2448  ad_plain add_to_stack(Scalar result = 0) {
2449  ad_plain ans;
2450  ans.index = this->values.size();
2451 
2452  this->values.push_back(result);
2453 
2454  Complete<OperatorBase> *pOp = this->template getOperator<OperatorBase>();
2455  add_to_opstack(pOp);
2456 
2457  TMBAD_ASSERT(!TMBAD_INDEX_OVERFLOW(values.size()));
2458  return ans;
2459  }
2461  template <class OperatorBase>
2462  ad_plain add_to_stack(const ad_plain &x) {
2463  ad_plain ans;
2464  ans.index = this->values.size();
2465 
2466  this->values.push_back(OperatorBase().eval(x.Value()));
2467 
2468  this->inputs.push_back(x.index);
2469 
2470  Complete<OperatorBase> *pOp = this->template getOperator<OperatorBase>();
2471  add_to_opstack(pOp);
2472 
2473  TMBAD_ASSERT(!TMBAD_INDEX_OVERFLOW(values.size()));
2474  TMBAD_ASSERT(!TMBAD_INDEX_OVERFLOW(inputs.size()));
2475  return ans;
2476  }
2478  template <class OperatorBase>
2479  ad_plain add_to_stack(const ad_plain &x, const ad_plain &y) {
2480  ad_plain ans;
2481  ans.index = this->values.size();
2482 
2483  this->values.push_back(OperatorBase().eval(x.Value(), y.Value()));
2484 
2485  this->inputs.push_back(x.index);
2486  this->inputs.push_back(y.index);
2487 
2488  Complete<OperatorBase> *pOp = this->template getOperator<OperatorBase>();
2489  add_to_opstack(pOp);
2490 
2491  TMBAD_ASSERT(!TMBAD_INDEX_OVERFLOW(values.size()));
2492  TMBAD_ASSERT(!TMBAD_INDEX_OVERFLOW(inputs.size()));
2493  return ans;
2494  }
2495  template <class OperatorBase>
2496  ad_segment add_to_stack(ad_segment lhs, ad_segment rhs,
2497  ad_segment more = ad_segment()) {
2498  IndexPair ptr((Index)inputs.size(), (Index)values.size());
2499  Complete<OperatorBase> *pOp =
2500  this->template getOperator<OperatorBase>(lhs, rhs);
2501  size_t n = pOp->output_size();
2502  ad_segment ans(values.size(), n);
2503  inputs.push_back(lhs.index());
2504  inputs.push_back(rhs.index());
2505  if (more.size() > 0) inputs.push_back(more.index());
2506  opstack.push_back(pOp);
2507  values.resize(values.size() + n);
2508  ForwardArgs<Scalar> args(inputs, values, this);
2509  args.ptr = ptr;
2510  pOp->forward(args);
2511 
2512  TMBAD_ASSERT(!TMBAD_INDEX_OVERFLOW(values.size()));
2513  TMBAD_ASSERT(!TMBAD_INDEX_OVERFLOW(inputs.size()));
2514  return ans;
2515  }
2516 
2517  template <class OperatorBase>
2518  ad_segment add_to_stack(Complete<OperatorBase> *pOp, ad_segment lhs,
2519  ad_segment rhs = ad_segment()) {
2520  static_assert(
2521  OperatorBase::dynamic,
2522  "Unlikely that you want to use this method for static operators?");
2523  static_assert(
2524  OperatorBase::ninput == 0 || OperatorBase::implicit_dependencies,
2525  "Operators with pointer inputs should always implement "
2526  "'implicit_dependencies'");
2527 
2528  IndexPair ptr((Index)inputs.size(), (Index)values.size());
2529  size_t n = pOp->output_size();
2530  ad_segment ans(values.size(), n);
2531  TMBAD_ASSERT((Index)(lhs.size() > 0) + (Index)(rhs.size() > 0) ==
2532  pOp->input_size());
2533  if (lhs.size() > 0) inputs.push_back(lhs.index());
2534  if (rhs.size() > 0) inputs.push_back(rhs.index());
2535  opstack.push_back(pOp);
2536  values.resize(values.size() + n);
2537  ForwardArgs<Scalar> args(inputs, values, this);
2538  args.ptr = ptr;
2539  pOp->forward(args);
2540 
2541  TMBAD_ASSERT(!TMBAD_INDEX_OVERFLOW(values.size()));
2542  TMBAD_ASSERT(!TMBAD_INDEX_OVERFLOW(inputs.size()));
2543  return ans;
2544  }
2547  template <class OperatorBase>
2548  std::vector<ad_plain> add_to_stack(OperatorPure *pOp,
2549  const std::vector<ad_plain> &x) {
2550  IndexPair ptr((Index)inputs.size(), (Index)values.size());
2551  size_t m = pOp->input_size();
2552  size_t n = pOp->output_size();
2553  ad_segment ans(values.size(), n);
2554  for (size_t i = 0; i < m; i++) inputs.push_back(x[i].index);
2555  opstack.push_back(pOp);
2556  values.resize(values.size() + n);
2557  ForwardArgs<Scalar> args(inputs, values, this);
2558  args.ptr = ptr;
2559  pOp->forward(args);
2560 
2561  TMBAD_ASSERT(!TMBAD_INDEX_OVERFLOW(values.size()));
2562  TMBAD_ASSERT(!TMBAD_INDEX_OVERFLOW(inputs.size()));
2563  std::vector<ad_plain> out(n);
2564  for (size_t i = 0; i < n; i++) out[i].index = ans.index() + i;
2565  return out;
2566  }
2567 
2568  struct ad_plain {
2569  Index index;
2570  static const Index NA = (Index)-1;
2571  bool initialized() const;
2572  bool on_some_tape() const;
2574  void addToTape() const;
2576  global *glob() const;
2580  void override_by(const ad_plain &x) const;
2581 
2586  ad_plain();
2587 
2589  ad_plain(Scalar x);
2591  ad_plain(ad_aug x);
2592 
2594  struct CopyOp : Operator<1> {
2595  static const bool have_eval = true;
2596  template <class Type>
2597  Type eval(Type x0) {
2598  return x0;
2599  }
2600  Replay eval(Replay x0);
2601  template <class Type>
2602  void reverse(ReverseArgs<Type> &args) {
2603  args.dx(0) += args.dy(0);
2604  }
2605  const char *op_name();
2606  };
2614  ad_plain copy() const;
2625  struct ValOp : Operator<1> {
2626  static const bool have_dependencies = true;
2627  static const bool have_eval = true;
2629  template <class Type>
2630  Type eval(Type x0) {
2631  return x0;
2632  }
2633  Replay eval(Replay x0);
2635  template <class Type>
2643  void dependencies(Args<> &args, Dependencies &dep) const;
2644  const char *op_name();
2645  };
2649  ad_plain copy0() const;
2650 
2651  template <bool left_var, bool right_var>
2652  struct AddOp_ : BinaryOperator {
2653  static const bool is_linear = true;
2654  static const bool have_eval = true;
2655  template <class Type>
2656  Type eval(Type x0, Type x1) {
2657  return x0 + x1;
2658  }
2659  template <class Type>
2660  void reverse(ReverseArgs<Type> &args) {
2661  if (left_var) args.dx(0) += args.dy(0);
2662  if (right_var) args.dx(1) += args.dy(0);
2663  }
2664  const char *op_name() { return "AddOp"; }
2665  OperatorPure *other_fuse(OperatorPure *self, OperatorPure *other) {
2666  if (other == get_glob()->getOperator<MulOp>()) {
2667  return get_glob()->getOperator<Fused<AddOp_, MulOp> >();
2668  }
2669  return NULL;
2670  }
2671  };
2672  typedef AddOp_<true, true> AddOp;
2673  ad_plain operator+(const ad_plain &other) const;
2674 
2675  template <bool left_var, bool right_var>
2676  struct SubOp_ : BinaryOperator {
2677  static const bool is_linear = true;
2678  static const bool have_eval = true;
2679  template <class Type>
2680  Type eval(Type x0, Type x1) {
2681  return x0 - x1;
2682  }
2683  template <class Type>
2684  void reverse(ReverseArgs<Type> &args) {
2685  if (left_var) args.dx(0) += args.dy(0);
2686  if (right_var) args.dx(1) -= args.dy(0);
2687  }
2688  const char *op_name() { return "SubOp"; }
2689  };
2690  typedef SubOp_<true, true> SubOp;
2691  ad_plain operator-(const ad_plain &other) const;
2692 
2693  template <bool left_var, bool right_var>
2694  struct MulOp_ : BinaryOperator {
2695  static const bool have_eval = true;
2696  static const bool is_linear = !left_var || !right_var;
2697  template <class Type>
2698  Type eval(Type x0, Type x1) {
2699  return x0 * x1;
2700  }
2701  template <class Type>
2702  void reverse(ReverseArgs<Type> &args) {
2703  if (left_var) args.dx(0) += args.x(1) * args.dy(0);
2704  if (right_var) args.dx(1) += args.x(0) * args.dy(0);
2705  }
2706  const char *op_name() { return "MulOp"; }
2707  };
2708  typedef MulOp_<true, true> MulOp;
2709  ad_plain operator*(const ad_plain &other) const;
2710  ad_plain operator*(const Scalar &other) const;
2711 
2712  template <bool left_var, bool right_var>
2713  struct DivOp_ : BinaryOperator {
2714  static const bool have_eval = true;
2715  template <class Type>
2716  Type eval(Type x0, Type x1) {
2717  return x0 / x1;
2718  }
2719  template <class Type>
2720  void reverse(ReverseArgs<Type> &args) {
2721  Type tmp0 = args.dy(0) / args.x(1);
2722  if (left_var) args.dx(0) += tmp0;
2723  if (right_var) args.dx(1) -= args.y(0) * tmp0;
2724  }
2725  const char *op_name() { return "DivOp"; }
2726  };
2727  typedef DivOp_<true, true> DivOp;
2728  ad_plain operator/(const ad_plain &other) const;
2729 
2730  struct NegOp : UnaryOperator {
2731  static const bool is_linear = true;
2732  static const bool have_eval = true;
2733  template <class Type>
2734  Type eval(Type x0) {
2735  return -x0;
2736  }
2737  template <class Type>
2738  void reverse(ReverseArgs<Type> &args) {
2739  args.dx(0) -= args.dy(0);
2740  }
2741  const char *op_name();
2742  };
2743  ad_plain operator-() const;
2744 
2745  ad_plain &operator+=(const ad_plain &other);
2746  ad_plain &operator-=(const ad_plain &other);
2747  ad_plain &operator*=(const ad_plain &other);
2748  ad_plain &operator/=(const ad_plain &other);
2749 
2750  void Dependent();
2751 
2752  void Independent();
2753  Scalar &Value();
2754  Scalar Value() const;
2755  Scalar Value(global *glob) const;
2756  Scalar &Deriv();
2757  };
2765  bool in_use;
2769  void ad_start();
2771  void ad_stop();
2772  void Independent(std::vector<ad_plain> &x);
2780  struct ad_segment {
2781  ad_plain x;
2782  size_t n;
2783  size_t c;
2785  ad_segment();
2787  ad_segment(ad_plain x, size_t n);
2789  ad_segment(ad_aug x);
2791  ad_segment(Scalar x);
2793  ad_segment(Index idx, size_t n);
2795  ad_segment(ad_plain x, size_t r, size_t c);
2798  ad_segment(Replay *x, size_t n, bool zero_check = false);
2799  bool identicalZero();
2800  bool all_on_active_tape(Replay *x, size_t n);
2801  bool is_contiguous(Replay *x, size_t n);
2802  bool all_zero(Replay *x, size_t n);
2803  bool all_constant(Replay *x, size_t n);
2804  size_t size() const;
2805  size_t rows() const;
2806  size_t cols() const;
2807 
2808  ad_plain operator[](size_t i) const;
2809  ad_plain offset() const;
2810  Index index() const;
2811  };
2831  struct ad_aug {
2834  mutable ad_plain taped_value;
2838  TMBAD_UNION_OR_STRUCT {
2839  Scalar value;
2840  mutable global *glob;
2841  }
2842  data;
2844  bool on_some_tape() const;
2846  bool on_active_tape() const;
2848  bool ontape() const;
2852  bool constant() const;
2853  Index index() const;
2859  global *glob() const;
2861  Scalar Value() const;
2865  ad_aug();
2869  ad_aug(Scalar x);
2871  ad_aug(ad_plain x);
2876  void addToTape() const;
2880  void override_by(const ad_plain &x) const;
2882  bool in_context_stack(global *glob) const;
2885  ad_aug copy() const;
2887  ad_aug copy0() const;
2890  bool identicalZero() const;
2893  bool identicalOne() const;
2897  bool bothConstant(const ad_aug &other) const;
2901  bool identical(const ad_aug &other) const;
2906  ad_aug operator+(const ad_aug &other) const;
2912  ad_aug operator-(const ad_aug &other) const;
2914  ad_aug operator-() const;
2921  ad_aug operator*(const ad_aug &other) const;
2926  ad_aug operator/(const ad_aug &other) const;
2929  ad_aug &operator+=(const ad_aug &other);
2932  ad_aug &operator-=(const ad_aug &other);
2935  ad_aug &operator*=(const ad_aug &other);
2938  ad_aug &operator/=(const ad_aug &other);
2940  void Dependent();
2942  void Independent();
2943  Scalar &Value();
2944  Scalar &Deriv();
2945  };
2946  void Independent(std::vector<ad_aug> &x);
2947 };
2948 
2949 template <class S, class T>
2950 std::ostream &operator<<(std::ostream &os, const std::pair<S, T> &x) {
2951  os << "(" << x.first << ", " << x.second << ")";
2952  return os;
2953 }
2954 
2955 std::ostream &operator<<(std::ostream &os, const global::ad_plain &x);
2956 std::ostream &operator<<(std::ostream &os, const global::ad_aug &x);
2957 
2968 template <class T>
2969 struct adaptive : T {
2970  INHERIT_CTOR(adaptive, T)
2971  bool operator==(const T &other) const {
2972  return this->Value() == other.Value();
2973  }
2974  bool operator!=(const T &other) const {
2975  return this->Value() != other.Value();
2976  }
2977  bool operator>=(const T &other) const {
2978  return this->Value() >= other.Value();
2979  }
2980  bool operator<=(const T &other) const {
2981  return this->Value() <= other.Value();
2982  }
2983  bool operator<(const T &other) const { return this->Value() < other.Value(); }
2984  bool operator>(const T &other) const { return this->Value() > other.Value(); }
2985 
2986  adaptive operator+(const T &other) const {
2987  return adaptive(T(*this) + other);
2988  }
2989  adaptive operator-(const T &other) const {
2990  return adaptive(T(*this) - other);
2991  }
2992  adaptive operator*(const T &other) const {
2993  return adaptive(T(*this) * other);
2994  }
2995  adaptive operator/(const T &other) const {
2996  return adaptive(T(*this) / other);
2997  }
2998 
2999  adaptive operator-() const { return adaptive(-(T(*this))); }
3000 };
3001 
3002 typedef global::ad_plain ad_plain;
3003 typedef global::ad_aug ad_aug;
3004 typedef global::Replay Replay;
3005 typedef adaptive<ad_aug> ad_adapt;
3014 struct ad_plain_index : ad_plain {
3015  ad_plain_index(const Index &i);
3016  ad_plain_index(const ad_plain &x);
3017 };
3018 struct ad_aug_index : ad_aug {
3019  ad_aug_index(const Index &i);
3020  ad_aug_index(const ad_aug &x);
3021  ad_aug_index(const ad_plain &x);
3022 };
3023 
3024 template <class T>
3025 void Independent(std::vector<T> &x) {
3026  for (size_t i = 0; i < x.size(); i++) x[i].Independent();
3027 }
3028 template <class T>
3029 void Dependent(std::vector<T> &x) {
3030  for (size_t i = 0; i < x.size(); i++) x[i].Dependent();
3031 }
3032 template <class T>
3033 Scalar Value(T x) {
3034  return x.Value();
3035 }
3036 Scalar Value(Scalar x);
3037 
3044 template <class V>
3045 bool isContiguous(V &x) {
3046  bool ok = true;
3047  Index j_previous;
3048  for (size_t i = 0; i < (size_t)x.size(); i++) {
3049  if (!x[i].on_some_tape()) {
3050  ok = false;
3051  break;
3052  }
3053  Index j = ad_plain(x[i]).index;
3054  if (i > 0) {
3055  if (j != j_previous + 1) {
3056  ok = false;
3057  break;
3058  }
3059  }
3060  j_previous = j;
3061  }
3062  return ok;
3063 }
3070 template <class V>
3071 V getContiguous(const V &x) {
3072  V y(x.size());
3073  for (size_t i = 0; i < (size_t)x.size(); i++) y[i] = x[i].copy();
3074  return y;
3075 }
3082 template <class V>
3083 void forceContiguous(V &x) {
3084  if (!isContiguous(x)) x = getContiguous(x);
3085 }
3086 ad_aug operator+(const double &x, const ad_aug &y);
3087 ad_aug operator-(const double &x, const ad_aug &y);
3088 ad_aug operator*(const double &x, const ad_aug &y);
3089 ad_aug operator/(const double &x, const ad_aug &y);
3090 
3091 bool operator<(const double &x, const ad_adapt &y);
3092 bool operator<=(const double &x, const ad_adapt &y);
3093 bool operator>(const double &x, const ad_adapt &y);
3094 bool operator>=(const double &x, const ad_adapt &y);
3095 bool operator==(const double &x, const ad_adapt &y);
3096 bool operator!=(const double &x, const ad_adapt &y);
3097 using ::round;
3098 using ::trunc;
3099 using std::ceil;
3100 using std::floor;
3101 Writer floor(const Writer &x);
3102 struct FloorOp : global::UnaryOperator {
3103  static const bool have_eval = true;
3104  template <class Type>
3105  Type eval(Type x) {
3106  return floor(x);
3107  }
3108  template <class Type>
3109  void reverse(ReverseArgs<Type> &args) {}
3110  const char *op_name();
3111 };
3112 ad_plain floor(const ad_plain &x);
3113 ad_aug floor(const ad_aug &x);
3114 Writer ceil(const Writer &x);
3115 struct CeilOp : global::UnaryOperator {
3116  static const bool have_eval = true;
3117  template <class Type>
3118  Type eval(Type x) {
3119  return ceil(x);
3120  }
3121  template <class Type>
3122  void reverse(ReverseArgs<Type> &args) {}
3123  const char *op_name();
3124 };
3125 ad_plain ceil(const ad_plain &x);
3126 ad_aug ceil(const ad_aug &x);
3127 Writer trunc(const Writer &x);
3128 struct TruncOp : global::UnaryOperator {
3129  static const bool have_eval = true;
3130  template <class Type>
3131  Type eval(Type x) {
3132  return trunc(x);
3133  }
3134  template <class Type>
3135  void reverse(ReverseArgs<Type> &args) {}
3136  const char *op_name();
3137 };
3138 ad_plain trunc(const ad_plain &x);
3139 ad_aug trunc(const ad_aug &x);
3140 Writer round(const Writer &x);
3141 struct RoundOp : global::UnaryOperator {
3142  static const bool have_eval = true;
3143  template <class Type>
3144  Type eval(Type x) {
3145  return round(x);
3146  }
3147  template <class Type>
3148  void reverse(ReverseArgs<Type> &args) {}
3149  const char *op_name();
3150 };
3151 ad_plain round(const ad_plain &x);
3152 ad_aug round(const ad_aug &x);
3153 
3154 double sign(const double &x);
3155 Writer sign(const Writer &x);
3156 struct SignOp : global::UnaryOperator {
3157  static const bool have_eval = true;
3158  template <class Type>
3159  Type eval(Type x) {
3160  return sign(x);
3161  }
3162  template <class Type>
3163  void reverse(ReverseArgs<Type> &args) {}
3164  const char *op_name();
3165 };
3166 ad_plain sign(const ad_plain &x);
3167 ad_aug sign(const ad_aug &x);
3168 
3169 double ge0(const double &x);
3170 double lt0(const double &x);
3171 Writer ge0(const Writer &x);
3172 struct Ge0Op : global::UnaryOperator {
3173  static const bool have_eval = true;
3174  template <class Type>
3175  Type eval(Type x) {
3176  return ge0(x);
3177  }
3178  template <class Type>
3179  void reverse(ReverseArgs<Type> &args) {}
3180  const char *op_name();
3181 };
3182 ad_plain ge0(const ad_plain &x);
3183 ad_aug ge0(const ad_aug &x);
3184 Writer lt0(const Writer &x);
3185 struct Lt0Op : global::UnaryOperator {
3186  static const bool have_eval = true;
3187  template <class Type>
3188  Type eval(Type x) {
3189  return lt0(x);
3190  }
3191  template <class Type>
3192  void reverse(ReverseArgs<Type> &args) {}
3193  const char *op_name();
3194 };
3195 ad_plain lt0(const ad_plain &x);
3196 ad_aug lt0(const ad_aug &x);
3197 using ::expm1;
3198 using ::fabs;
3199 using ::log1p;
3200 using std::acos;
3201 using std::acosh;
3202 using std::asin;
3203 using std::asinh;
3204 using std::atan;
3205 using std::atanh;
3206 using std::cos;
3207 using std::cosh;
3208 using std::exp;
3209 using std::log;
3210 using std::sin;
3211 using std::sinh;
3212 using std::sqrt;
3213 using std::tan;
3214 using std::tanh;
3215 
3216 Writer fabs(const Writer &x);
3217 struct AbsOp : global::UnaryOperator {
3218  static const bool have_eval = true;
3219  template <class Type>
3220  Type eval(Type x) {
3221  return fabs(x);
3222  }
3223  template <class Type>
3224  void reverse(ReverseArgs<Type> &args) {
3225  args.dx(0) += args.dy(0) * sign(args.x(0));
3226  }
3227  void reverse(ReverseArgs<Scalar> &args);
3228  const char *op_name();
3229 };
3230 ad_plain fabs(const ad_plain &x);
3231 ad_aug fabs(const ad_aug &x);
3232 ad_adapt fabs(const ad_adapt &x);
3233 Writer cos(const Writer &x);
3234 ad_aug cos(const ad_aug &x);
3235 Writer sin(const Writer &x);
3236 struct SinOp : global::UnaryOperator {
3237  static const bool have_eval = true;
3238  template <class Type>
3239  Type eval(Type x) {
3240  return sin(x);
3241  }
3242  template <class Type>
3243  void reverse(ReverseArgs<Type> &args) {
3244  args.dx(0) += args.dy(0) * cos(args.x(0));
3245  }
3246  void reverse(ReverseArgs<Scalar> &args);
3247  const char *op_name();
3248 };
3249 ad_plain sin(const ad_plain &x);
3250 ad_aug sin(const ad_aug &x);
3251 ad_adapt sin(const ad_adapt &x);
3252 Writer cos(const Writer &x);
3253 struct CosOp : global::UnaryOperator {
3254  static const bool have_eval = true;
3255  template <class Type>
3256  Type eval(Type x) {
3257  return cos(x);
3258  }
3259  template <class Type>
3260  void reverse(ReverseArgs<Type> &args) {
3261  args.dx(0) += args.dy(0) * -sin(args.x(0));
3262  }
3263  void reverse(ReverseArgs<Scalar> &args);
3264  const char *op_name();
3265 };
3266 ad_plain cos(const ad_plain &x);
3267 ad_aug cos(const ad_aug &x);
3268 ad_adapt cos(const ad_adapt &x);
3269 Writer exp(const Writer &x);
3270 struct ExpOp : global::UnaryOperator {
3271  static const bool have_eval = true;
3272  template <class Type>
3273  Type eval(Type x) {
3274  return exp(x);
3275  }
3276  template <class Type>
3277  void reverse(ReverseArgs<Type> &args) {
3278  args.dx(0) += args.dy(0) * args.y(0);
3279  }
3280  void reverse(ReverseArgs<Scalar> &args);
3281  const char *op_name();
3282 };
3283 ad_plain exp(const ad_plain &x);
3284 ad_aug exp(const ad_aug &x);
3285 ad_adapt exp(const ad_adapt &x);
3286 Writer log(const Writer &x);
3287 struct LogOp : global::UnaryOperator {
3288  static const bool have_eval = true;
3289  template <class Type>
3290  Type eval(Type x) {
3291  return log(x);
3292  }
3293  template <class Type>
3294  void reverse(ReverseArgs<Type> &args) {
3295  args.dx(0) += args.dy(0) * Type(1.) / args.x(0);
3296  }
3297  void reverse(ReverseArgs<Scalar> &args);
3298  const char *op_name();
3299 };
3300 ad_plain log(const ad_plain &x);
3301 ad_aug log(const ad_aug &x);
3302 ad_adapt log(const ad_adapt &x);
3303 Writer sqrt(const Writer &x);
3304 struct SqrtOp : global::UnaryOperator {
3305  static const bool have_eval = true;
3306  template <class Type>
3307  Type eval(Type x) {
3308  return sqrt(x);
3309  }
3310  template <class Type>
3311  void reverse(ReverseArgs<Type> &args) {
3312  args.dx(0) += args.dy(0) * Type(0.5) / args.y(0);
3313  }
3314  void reverse(ReverseArgs<Scalar> &args);
3315  const char *op_name();
3316 };
3317 ad_plain sqrt(const ad_plain &x);
3318 ad_aug sqrt(const ad_aug &x);
3319 ad_adapt sqrt(const ad_adapt &x);
3320 Writer tan(const Writer &x);
3321 struct TanOp : global::UnaryOperator {
3322  static const bool have_eval = true;
3323  template <class Type>
3324  Type eval(Type x) {
3325  return tan(x);
3326  }
3327  template <class Type>
3328  void reverse(ReverseArgs<Type> &args) {
3329  args.dx(0) += args.dy(0) * Type(1.) / (cos(args.x(0)) * cos(args.x(0)));
3330  }
3331  void reverse(ReverseArgs<Scalar> &args);
3332  const char *op_name();
3333 };
3334 ad_plain tan(const ad_plain &x);
3335 ad_aug tan(const ad_aug &x);
3336 ad_adapt tan(const ad_adapt &x);
3337 Writer cosh(const Writer &x);
3338 ad_aug cosh(const ad_aug &x);
3339 Writer sinh(const Writer &x);
3340 struct SinhOp : global::UnaryOperator {
3341  static const bool have_eval = true;
3342  template <class Type>
3343  Type eval(Type x) {
3344  return sinh(x);
3345  }
3346  template <class Type>
3347  void reverse(ReverseArgs<Type> &args) {
3348  args.dx(0) += args.dy(0) * cosh(args.x(0));
3349  }
3350  void reverse(ReverseArgs<Scalar> &args);
3351  const char *op_name();
3352 };
3353 ad_plain sinh(const ad_plain &x);
3354 ad_aug sinh(const ad_aug &x);
3355 ad_adapt sinh(const ad_adapt &x);
3356 Writer cosh(const Writer &x);
3357 struct CoshOp : global::UnaryOperator {
3358  static const bool have_eval = true;
3359  template <class Type>
3360  Type eval(Type x) {
3361  return cosh(x);
3362  }
3363  template <class Type>
3364  void reverse(ReverseArgs<Type> &args) {
3365  args.dx(0) += args.dy(0) * sinh(args.x(0));
3366  }
3367  void reverse(ReverseArgs<Scalar> &args);
3368  const char *op_name();
3369 };
3370 ad_plain cosh(const ad_plain &x);
3371 ad_aug cosh(const ad_aug &x);
3372 ad_adapt cosh(const ad_adapt &x);
3373 Writer tanh(const Writer &x);
3374 struct TanhOp : global::UnaryOperator {
3375  static const bool have_eval = true;
3376  template <class Type>
3377  Type eval(Type x) {
3378  return tanh(x);
3379  }
3380  template <class Type>
3381  void reverse(ReverseArgs<Type> &args) {
3382  args.dx(0) += args.dy(0) * Type(1.) / (cosh(args.x(0)) * cosh(args.x(0)));
3383  }
3384  void reverse(ReverseArgs<Scalar> &args);
3385  const char *op_name();
3386 };
3387 ad_plain tanh(const ad_plain &x);
3388 ad_aug tanh(const ad_aug &x);
3389 ad_adapt tanh(const ad_adapt &x);
3390 Writer expm1(const Writer &x);
3391 struct Expm1 : global::UnaryOperator {
3392  static const bool have_eval = true;
3393  template <class Type>
3394  Type eval(Type x) {
3395  return expm1(x);
3396  }
3397  template <class Type>
3398  void reverse(ReverseArgs<Type> &args) {
3399  args.dx(0) += args.dy(0) * args.y(0) + Type(1.);
3400  }
3401  void reverse(ReverseArgs<Scalar> &args);
3402  const char *op_name();
3403 };
3404 ad_plain expm1(const ad_plain &x);
3405 ad_aug expm1(const ad_aug &x);
3406 ad_adapt expm1(const ad_adapt &x);
3407 Writer log1p(const Writer &x);
3408 struct Log1p : global::UnaryOperator {
3409  static const bool have_eval = true;
3410  template <class Type>
3411  Type eval(Type x) {
3412  return log1p(x);
3413  }
3414  template <class Type>
3415  void reverse(ReverseArgs<Type> &args) {
3416  args.dx(0) += args.dy(0) * Type(1.) / (args.x(0) + Type(1.));
3417  }
3418  void reverse(ReverseArgs<Scalar> &args);
3419  const char *op_name();
3420 };
3421 ad_plain log1p(const ad_plain &x);
3422 ad_aug log1p(const ad_aug &x);
3423 ad_adapt log1p(const ad_adapt &x);
3424 Writer asin(const Writer &x);
3425 struct AsinOp : global::UnaryOperator {
3426  static const bool have_eval = true;
3427  template <class Type>
3428  Type eval(Type x) {
3429  return asin(x);
3430  }
3431  template <class Type>
3432  void reverse(ReverseArgs<Type> &args) {
3433  args.dx(0) +=
3434  args.dy(0) * Type(1.) / sqrt(Type(1.) - args.x(0) * args.x(0));
3435  }
3436  void reverse(ReverseArgs<Scalar> &args);
3437  const char *op_name();
3438 };
3439 ad_plain asin(const ad_plain &x);
3440 ad_aug asin(const ad_aug &x);
3441 ad_adapt asin(const ad_adapt &x);
3442 Writer acos(const Writer &x);
3443 struct AcosOp : global::UnaryOperator {
3444  static const bool have_eval = true;
3445  template <class Type>
3446  Type eval(Type x) {
3447  return acos(x);
3448  }
3449  template <class Type>
3450  void reverse(ReverseArgs<Type> &args) {
3451  args.dx(0) +=
3452  args.dy(0) * Type(-1.) / sqrt(Type(1.) - args.x(0) * args.x(0));
3453  }
3454  void reverse(ReverseArgs<Scalar> &args);
3455  const char *op_name();
3456 };
3457 ad_plain acos(const ad_plain &x);
3458 ad_aug acos(const ad_aug &x);
3459 ad_adapt acos(const ad_adapt &x);
3460 Writer atan(const Writer &x);
3461 struct AtanOp : global::UnaryOperator {
3462  static const bool have_eval = true;
3463  template <class Type>
3464  Type eval(Type x) {
3465  return atan(x);
3466  }
3467  template <class Type>
3468  void reverse(ReverseArgs<Type> &args) {
3469  args.dx(0) += args.dy(0) * Type(1.) / (Type(1.) + args.x(0) * args.x(0));
3470  }
3471  void reverse(ReverseArgs<Scalar> &args);
3472  const char *op_name();
3473 };
3474 ad_plain atan(const ad_plain &x);
3475 ad_aug atan(const ad_aug &x);
3476 ad_adapt atan(const ad_adapt &x);
3477 Writer asinh(const Writer &x);
3478 struct AsinhOp : global::UnaryOperator {
3479  static const bool have_eval = true;
3480  template <class Type>
3481  Type eval(Type x) {
3482  return asinh(x);
3483  }
3484  template <class Type>
3485  void reverse(ReverseArgs<Type> &args) {
3486  args.dx(0) +=
3487  args.dy(0) * Type(1.) / sqrt(args.x(0) * args.x(0) + Type(1.));
3488  }
3489  void reverse(ReverseArgs<Scalar> &args);
3490  const char *op_name();
3491 };
3492 ad_plain asinh(const ad_plain &x);
3493 ad_aug asinh(const ad_aug &x);
3494 ad_adapt asinh(const ad_adapt &x);
3495 Writer acosh(const Writer &x);
3496 struct AcoshOp : global::UnaryOperator {
3497  static const bool have_eval = true;
3498  template <class Type>
3499  Type eval(Type x) {
3500  return acosh(x);
3501  }
3502  template <class Type>
3503  void reverse(ReverseArgs<Type> &args) {
3504  args.dx(0) +=
3505  args.dy(0) * Type(1.) / sqrt(args.x(0) * args.x(0) - Type(1.));
3506  }
3507  void reverse(ReverseArgs<Scalar> &args);
3508  const char *op_name();
3509 };
3510 ad_plain acosh(const ad_plain &x);
3511 ad_aug acosh(const ad_aug &x);
3512 ad_adapt acosh(const ad_adapt &x);
3513 Writer atanh(const Writer &x);
3514 struct AtanhOp : global::UnaryOperator {
3515  static const bool have_eval = true;
3516  template <class Type>
3517  Type eval(Type x) {
3518  return atanh(x);
3519  }
3520  template <class Type>
3521  void reverse(ReverseArgs<Type> &args) {
3522  args.dx(0) += args.dy(0) * Type(1.) / (Type(1) - args.x(0) * args.x(0));
3523  }
3524  void reverse(ReverseArgs<Scalar> &args);
3525  const char *op_name();
3526 };
3527 ad_plain atanh(const ad_plain &x);
3528 ad_aug atanh(const ad_aug &x);
3529 ad_adapt atanh(const ad_adapt &x);
3530 
3531 template <class T>
3532 T abs(const T &x) {
3533  return fabs(x);
3534 }
3535 using std::pow;
3536 Writer pow(const Writer &x1, const Writer &x2);
3537 struct PowOp : global::BinaryOperator {
3538  static const bool have_eval = true;
3539  template <class Type>
3540  Type eval(Type x1, Type x2) {
3541  return pow(x1, x2);
3542  }
3543  template <class Type>
3544  void reverse(ReverseArgs<Type> &args) {
3545  args.dx(0) += args.dy(0) * args.x(1) * pow(args.x(0), args.x(1) - Type(1.));
3546  args.dx(1) += args.dy(0) * args.y(0) * log(args.x(0));
3547  }
3548  const char *op_name();
3549 };
3550 ad_plain pow(const ad_plain &x1, const ad_plain &x2);
3551 ad_aug pow(const ad_aug &x1, const ad_aug &x2);
3552 ad_adapt pow(const ad_adapt &x1, const ad_adapt &x2);
3553 using std::atan2;
3554 Writer atan2(const Writer &x1, const Writer &x2);
3555 struct Atan2 : global::BinaryOperator {
3556  static const bool have_eval = true;
3557  template <class Type>
3558  Type eval(Type x1, Type x2) {
3559  return atan2(x1, x2);
3560  }
3561  template <class Type>
3562  void reverse(ReverseArgs<Type> &args) {
3563  args.dx(0) += args.dy(0) * args.x(1) /
3564  (args.x(0) * args.x(0) + args.x(1) * args.x(1));
3565  args.dx(1) += args.dy(0) * -args.x(0) /
3566  (args.x(0) * args.x(0) + args.x(1) * args.x(1));
3567  }
3568  const char *op_name();
3569 };
3570 ad_plain atan2(const ad_plain &x1, const ad_plain &x2);
3571 ad_aug atan2(const ad_aug &x1, const ad_aug &x2);
3572 ad_adapt atan2(const ad_adapt &x1, const ad_adapt &x2);
3573 using std::max;
3574 Writer max(const Writer &x1, const Writer &x2);
3575 struct MaxOp : global::BinaryOperator {
3576  static const bool have_eval = true;
3577  template <class Type>
3578  Type eval(Type x1, Type x2) {
3579  return max(x1, x2);
3580  }
3581  template <class Type>
3582  void reverse(ReverseArgs<Type> &args) {
3583  args.dx(0) += args.dy(0) * ge0(args.x(0) - args.x(1));
3584  args.dx(1) += args.dy(0) * lt0(args.x(0) - args.x(1));
3585  }
3586  const char *op_name();
3587 };
3588 ad_plain max(const ad_plain &x1, const ad_plain &x2);
3589 ad_aug max(const ad_aug &x1, const ad_aug &x2);
3590 ad_adapt max(const ad_adapt &x1, const ad_adapt &x2);
3591 
3592 using std::min;
3593 Writer min(const Writer &x1, const Writer &x2);
3594 struct MinOp : global::BinaryOperator {
3595  static const bool have_eval = true;
3596  template <class Type>
3597  Type eval(Type x1, Type x2) {
3598  return min(x1, x2);
3599  }
3600  template <class Type>
3601  void reverse(ReverseArgs<Type> &args) {
3602  args.dx(0) += args.dy(0) * ge0(args.x(1) - args.x(0));
3603  args.dx(1) += args.dy(0) * lt0(args.x(1) - args.x(0));
3604  }
3605  const char *op_name();
3606 };
3607 ad_plain min(const ad_plain &x1, const ad_plain &x2);
3608 ad_aug min(const ad_aug &x1, const ad_aug &x2);
3609 ad_adapt min(const ad_adapt &x1, const ad_adapt &x2);
3610 Replay CondExpEq(const Replay &x0, const Replay &x1, const Replay &x2,
3611  const Replay &x3);
3612 struct CondExpEqOp : global::Operator<4, 1> {
3613  void forward(ForwardArgs<Scalar> &args);
3614  void reverse(ReverseArgs<Scalar> &args);
3615  void forward(ForwardArgs<Replay> &args);
3616  void reverse(ReverseArgs<Replay> &args);
3617  void forward(ForwardArgs<Writer> &args);
3618  void reverse(ReverseArgs<Writer> &args);
3619  template <class Type>
3620  void forward(ForwardArgs<Type> &args) {
3621  TMBAD_ASSERT(false);
3622  }
3623  template <class Type>
3624  void reverse(ReverseArgs<Type> &args) {
3625  TMBAD_ASSERT(false);
3626  }
3627  const char *op_name();
3628 };
3629 Scalar CondExpEq(const Scalar &x0, const Scalar &x1, const Scalar &x2,
3630  const Scalar &x3);
3631 ad_plain CondExpEq(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
3632  const ad_plain &x3);
3633 ad_aug CondExpEq(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3634  const ad_aug &x3);
3635 Replay CondExpNe(const Replay &x0, const Replay &x1, const Replay &x2,
3636  const Replay &x3);
3637 struct CondExpNeOp : global::Operator<4, 1> {
3638  void forward(ForwardArgs<Scalar> &args);
3639  void reverse(ReverseArgs<Scalar> &args);
3640  void forward(ForwardArgs<Replay> &args);
3641  void reverse(ReverseArgs<Replay> &args);
3642  void forward(ForwardArgs<Writer> &args);
3643  void reverse(ReverseArgs<Writer> &args);
3644  template <class Type>
3645  void forward(ForwardArgs<Type> &args) {
3646  TMBAD_ASSERT(false);
3647  }
3648  template <class Type>
3649  void reverse(ReverseArgs<Type> &args) {
3650  TMBAD_ASSERT(false);
3651  }
3652  const char *op_name();
3653 };
3654 Scalar CondExpNe(const Scalar &x0, const Scalar &x1, const Scalar &x2,
3655  const Scalar &x3);
3656 ad_plain CondExpNe(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
3657  const ad_plain &x3);
3658 ad_aug CondExpNe(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3659  const ad_aug &x3);
3660 Replay CondExpGt(const Replay &x0, const Replay &x1, const Replay &x2,
3661  const Replay &x3);
3662 struct CondExpGtOp : global::Operator<4, 1> {
3663  void forward(ForwardArgs<Scalar> &args);
3664  void reverse(ReverseArgs<Scalar> &args);
3665  void forward(ForwardArgs<Replay> &args);
3666  void reverse(ReverseArgs<Replay> &args);
3667  void forward(ForwardArgs<Writer> &args);
3668  void reverse(ReverseArgs<Writer> &args);
3669  template <class Type>
3670  void forward(ForwardArgs<Type> &args) {
3671  TMBAD_ASSERT(false);
3672  }
3673  template <class Type>
3674  void reverse(ReverseArgs<Type> &args) {
3675  TMBAD_ASSERT(false);
3676  }
3677  const char *op_name();
3678 };
3679 Scalar CondExpGt(const Scalar &x0, const Scalar &x1, const Scalar &x2,
3680  const Scalar &x3);
3681 ad_plain CondExpGt(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
3682  const ad_plain &x3);
3683 ad_aug CondExpGt(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3684  const ad_aug &x3);
3685 Replay CondExpLt(const Replay &x0, const Replay &x1, const Replay &x2,
3686  const Replay &x3);
3687 struct CondExpLtOp : global::Operator<4, 1> {
3688  void forward(ForwardArgs<Scalar> &args);
3689  void reverse(ReverseArgs<Scalar> &args);
3690  void forward(ForwardArgs<Replay> &args);
3691  void reverse(ReverseArgs<Replay> &args);
3692  void forward(ForwardArgs<Writer> &args);
3693  void reverse(ReverseArgs<Writer> &args);
3694  template <class Type>
3695  void forward(ForwardArgs<Type> &args) {
3696  TMBAD_ASSERT(false);
3697  }
3698  template <class Type>
3699  void reverse(ReverseArgs<Type> &args) {
3700  TMBAD_ASSERT(false);
3701  }
3702  const char *op_name();
3703 };
3704 Scalar CondExpLt(const Scalar &x0, const Scalar &x1, const Scalar &x2,
3705  const Scalar &x3);
3706 ad_plain CondExpLt(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
3707  const ad_plain &x3);
3708 ad_aug CondExpLt(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3709  const ad_aug &x3);
3710 Replay CondExpGe(const Replay &x0, const Replay &x1, const Replay &x2,
3711  const Replay &x3);
3712 struct CondExpGeOp : global::Operator<4, 1> {
3713  void forward(ForwardArgs<Scalar> &args);
3714  void reverse(ReverseArgs<Scalar> &args);
3715  void forward(ForwardArgs<Replay> &args);
3716  void reverse(ReverseArgs<Replay> &args);
3717  void forward(ForwardArgs<Writer> &args);
3718  void reverse(ReverseArgs<Writer> &args);
3719  template <class Type>
3720  void forward(ForwardArgs<Type> &args) {
3721  TMBAD_ASSERT(false);
3722  }
3723  template <class Type>
3724  void reverse(ReverseArgs<Type> &args) {
3725  TMBAD_ASSERT(false);
3726  }
3727  const char *op_name();
3728 };
3729 Scalar CondExpGe(const Scalar &x0, const Scalar &x1, const Scalar &x2,
3730  const Scalar &x3);
3731 ad_plain CondExpGe(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
3732  const ad_plain &x3);
3733 ad_aug CondExpGe(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3734  const ad_aug &x3);
3735 Replay CondExpLe(const Replay &x0, const Replay &x1, const Replay &x2,
3736  const Replay &x3);
3737 struct CondExpLeOp : global::Operator<4, 1> {
3738  void forward(ForwardArgs<Scalar> &args);
3739  void reverse(ReverseArgs<Scalar> &args);
3740  void forward(ForwardArgs<Replay> &args);
3741  void reverse(ReverseArgs<Replay> &args);
3742  void forward(ForwardArgs<Writer> &args);
3743  void reverse(ReverseArgs<Writer> &args);
3744  template <class Type>
3745  void forward(ForwardArgs<Type> &args) {
3746  TMBAD_ASSERT(false);
3747  }
3748  template <class Type>
3749  void reverse(ReverseArgs<Type> &args) {
3750  TMBAD_ASSERT(false);
3751  }
3752  const char *op_name();
3753 };
3754 Scalar CondExpLe(const Scalar &x0, const Scalar &x1, const Scalar &x2,
3755  const Scalar &x3);
3756 ad_plain CondExpLe(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
3757  const ad_plain &x3);
3758 ad_aug CondExpLe(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3759  const ad_aug &x3);
3760 
3761 template <class Info>
3762 struct InfoOp : global::DynamicOperator<-1, 0> {
3763  Index n;
3764  Info info;
3765  InfoOp(Index n, Info info) : n(n), info(info) {}
3766  static const bool elimination_protected = true;
3767  static const bool add_forward_replay_copy = true;
3768  static const bool have_input_size_output_size = true;
3769  template <class Type>
3770  void forward(ForwardArgs<Type> &args) {}
3771  template <class Type>
3772  void reverse(ReverseArgs<Type> &args) {}
3773  Index input_size() const { return n; }
3774  Index output_size() const { return 0; }
3775  const char *op_name() { return "InfoOp"; }
3776  void print(global::print_config cfg) {
3777  Rcout << cfg.prefix << info << std::endl;
3778  }
3779  void *operator_data() { return &info; }
3780 };
3781 template <class Info>
3782 void addInfo(const std::vector<ad_aug> &x, const Info &info) {
3783  global::Complete<InfoOp<Info> >(x.size(), info)(x);
3784 }
3785 template <class Info>
3786 void addInfo(const std::vector<double> &x, const Info &info) {}
3787 
3788 struct SumOp : global::DynamicOperator<-1, 1> {
3789  static const bool is_linear = true;
3790  static const bool have_input_size_output_size = true;
3791  static const bool add_forward_replay_copy = true;
3792  size_t n;
3793  Index input_size() const;
3794  Index output_size() const;
3795  SumOp(size_t n);
3796  template <class Type>
3797  void forward(ForwardArgs<Type> &args) {
3798  args.y(0) = 0;
3799  for (size_t i = 0; i < n; i++) {
3800  args.y(0) += args.x(i);
3801  }
3802  }
3803  template <class Type>
3804  void reverse(ReverseArgs<Type> &args) {
3805  for (size_t i = 0; i < n; i++) {
3806  args.dx(i) += args.dy(0);
3807  }
3808  }
3809  const char *op_name();
3810 };
3811 template <class T>
3812 T sum(const std::vector<T> &x) {
3813  return global::Complete<SumOp>(x.size())(x)[0];
3814 }
3815 
3816 ad_plain logspace_sum(const std::vector<ad_plain> &x);
3817 struct LogSpaceSumOp : global::DynamicOperator<-1, 1> {
3818  size_t n;
3819  static const bool have_input_size_output_size = true;
3820  Index input_size() const;
3821  Index output_size() const;
3822  LogSpaceSumOp(size_t n);
3823  void forward(ForwardArgs<Scalar> &args);
3824  void forward(ForwardArgs<Replay> &args);
3825  template <class Type>
3826  void reverse(ReverseArgs<Type> &args) {
3827  for (size_t i = 0; i < n; i++) {
3828  args.dx(i) += exp(args.x(i) - args.y(0)) * args.dy(0);
3829  }
3830  }
3831  const char *op_name();
3832 };
3833 ad_plain logspace_sum(const std::vector<ad_plain> &x);
3834 template <class T>
3835 T logspace_sum(const std::vector<T> &x_) {
3836  std::vector<ad_plain> x(x_.begin(), x_.end());
3837  return logspace_sum(x);
3838 }
3839 
3840 ad_plain logspace_sum_stride(const std::vector<ad_plain> &x,
3841  const std::vector<Index> &stride, size_t n);
3842 struct LogSpaceSumStrideOp : global::DynamicOperator<-1, 1> {
3843  std::vector<Index> stride;
3844  size_t n;
3845  static const bool have_input_size_output_size = true;
3846 
3847  Index number_of_terms() const;
3848  template <class Type>
3849  Type &entry(Type **px, size_t i, size_t j) const {
3850  return px[j][0 + i * stride[j]];
3851  }
3852  template <class Type>
3853  Type rowsum(Type **px, size_t i) const {
3854  size_t m = stride.size();
3855  Type s = (Scalar)(0);
3856  for (size_t j = 0; j < m; j++) {
3857  s += entry(px, i, j);
3858  }
3859  return s;
3860  }
3861  Index input_size() const;
3862  Index output_size() const;
3863  LogSpaceSumStrideOp(std::vector<Index> stride, size_t n);
3864  void forward(ForwardArgs<Scalar> &args);
3865  void forward(ForwardArgs<Replay> &args);
3866  template <class Type>
3867  void reverse(ReverseArgs<Type> &args) {
3868  size_t m = stride.size();
3869  std::vector<Type *> wrk1(m);
3870  std::vector<Type *> wrk2(m);
3871  Type **px = &(wrk1[0]);
3872  Type **pdx = &(wrk2[0]);
3873  for (size_t i = 0; i < m; i++) {
3874  px[i] = args.x_ptr(i);
3875  pdx[i] = args.dx_ptr(i);
3876  }
3877  for (size_t i = 0; i < n; i++) {
3878  Type s = rowsum(px, i);
3879  Type tmp = exp(s - args.y(0)) * args.dy(0);
3880  for (size_t j = 0; j < m; j++) {
3881  entry(pdx, i, j) += tmp;
3882  }
3883  }
3884  }
3889  void dependencies(Args<> &args, Dependencies &dep) const;
3891  static const bool have_dependencies = true;
3893  static const bool implicit_dependencies = true;
3895  static const bool allow_remap = false;
3896  const char *op_name();
3897 
3898  void forward(ForwardArgs<Writer> &args);
3899  void reverse(ReverseArgs<Writer> &args);
3900 };
3901 ad_plain logspace_sum_stride(const std::vector<ad_plain> &x,
3902  const std::vector<Index> &stride, size_t n);
3903 template <class T>
3904 T logspace_sum_stride(const std::vector<T> &x_,
3905  const std::vector<Index> &stride, size_t n) {
3906  std::vector<ad_plain> x(x_.begin(), x_.end());
3907  return logspace_sum_stride(x, stride, n);
3908 }
3909 } // namespace TMBad
3910 #endif // HAVE_GLOBAL_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:153
+
1 #ifndef HAVE_GLOBAL_HPP
2 #define HAVE_GLOBAL_HPP
3 // Autogenerated - do not edit by hand !
4 #include <algorithm>
5 #include <cmath>
6 #include <ctime>
7 #include <iomanip>
8 #include <iostream>
9 #include <limits>
10 #include <set>
11 #include <sstream>
12 #include <valarray>
13 #include <vector>
14 #include "config.hpp"
15 #include "radix.hpp"
16 
20 namespace TMBad {
21 
22 typedef TMBAD_HASH_TYPE hash_t;
23 typedef TMBAD_INDEX_TYPE Index;
24 typedef TMBAD_SCALAR_TYPE Scalar;
25 typedef std::pair<Index, Index> IndexPair;
26 typedef TMBAD_INDEX_VECTOR IndexVector;
27 
28 struct global;
31 global *get_glob();
32 
33 template <class T>
34 std::ostream &operator<<(std::ostream &out, const std::vector<T> &v) {
35  out << "{";
36  size_t last = v.size() - 1;
37  for (size_t i = 0; i < v.size(); ++i) {
38  out << v[i];
39  if (i != last) out << ", ";
40  }
41  out << "}";
42  return out;
43 }
44 
46 template <class T>
47 struct intervals {
48  struct ep : std::pair<T, bool> {
49  bool left() const { return !this->second; }
50  ep(T x, bool type) : std::pair<T, bool>(x, type) {}
51  operator T() { return this->first; }
52  };
53  std::set<ep> x;
54  typedef typename std::set<ep>::iterator iterator;
58  bool insert(T a, T b) {
59  ep x1(a, false);
60  ep x2(b, true);
61  iterator it1 = x.upper_bound(x1);
62  iterator it2 = x.lower_bound(x2);
63 
64  bool insert_x1 = (it1 == x.end()) || it1->left();
65  bool insert_x2 = (it2 == x.end()) || it2->left();
66 
67  bool change = (it1 != it2) || insert_x1;
68 
69  if (it1 != it2) {
70  x.erase(it1, it2);
71  }
72 
73  if (insert_x1) x.insert(x1);
74  if (insert_x2) x.insert(x2);
75  return change;
76  }
78  template <class F>
79  F &apply(F &f) const {
80  for (iterator it = x.begin(); it != x.end();) {
81  ep a = *it;
82  ++it;
83  ep b = *it;
84  ++it;
85  f(a, b);
86  }
87  return f;
88  }
89  struct print_interval {
90  void operator()(T a, T b) { Rcout << "[ " << a << " , " << b << " ] "; }
91  };
92  void print() {
93  print_interval f;
94  this->apply(f);
95  Rcout << "\n";
96  }
97 };
98 
99 struct Dependencies : std::vector<Index> {
100  typedef std::vector<Index> Base;
101  std::vector<std::pair<Index, Index> > I;
102  Dependencies();
103  void clear();
104  void add_interval(Index a, Index b);
105  void add_segment(Index start, Index size);
106 
107  void monotone_transform_inplace(const std::vector<Index> &x);
108 
109  template <class F>
110  F &apply(F &f) {
111  for (size_t i = 0; i < this->size(); i++) f((*this)[i]);
112  for (size_t i = 0; i < I.size(); i++) {
113  for (Index j = I[i].first; j <= I[i].second; j++) {
114  f(j);
115  }
116  }
117  return f;
118  }
119 
120  template <class F>
121  F &apply_if_not_visited(F &f, intervals<Index> &visited) {
122  for (size_t i = 0; i < this->size(); i++) f((*this)[i]);
123  for (size_t i = 0; i < I.size(); i++) {
124  if (visited.insert(I[i].first, I[i].second)) {
125  for (Index j = I[i].first; j <= I[i].second; j++) {
126  f(j);
127  }
128  }
129  }
130  return f;
131  }
132 
133  bool any(const std::vector<bool> &x) const;
134 };
135 
138 enum ArrayAccess { x_read, y_read, y_write, dx_read, dx_write, dy_read };
139 template <class Args, ArrayAccess What>
140 struct Accessor {};
141 template <class Args>
142 struct Accessor<Args, x_read> {
143  typename Args::value_type operator()(const Args &args, Index j) const {
144  return args.x(j);
145  }
146 };
147 template <class Args>
148 struct Accessor<Args, y_read> {
149  typename Args::value_type operator()(const Args &args, Index j) const {
150  return args.y(j);
151  }
152 };
153 template <class Args>
154 struct Accessor<Args, y_write> {
155  typename Args::value_type &operator()(Args &args, Index j) {
156  return args.y(j);
157  }
158 };
159 template <class Args>
160 struct Accessor<Args, dx_read> {
161  typename Args::value_type operator()(const Args &args, Index j) const {
162  return args.dx(j);
163  }
164 };
165 template <class Args>
166 struct Accessor<Args, dx_write> {
167  typename Args::value_type &operator()(Args &args, Index j) {
168  return args.dx(j);
169  }
170 };
171 template <class Args>
172 struct Accessor<Args, dy_read> {
173  typename Args::value_type operator()(const Args &args, Index j) const {
174  return args.dy(j);
175  }
176 };
177 
183 template <class T>
185  const std::vector<T> &x;
186  const std::vector<Index> &i;
187  IndirectAccessor(const std::vector<T> &x, const std::vector<Index> &i)
188  : x(x), i(i) {}
189  T operator[](size_t j) const { return x[i[j]]; }
190  size_t size() const { return i.size(); }
191  operator std::vector<T>() const {
192  std::vector<T> ans(i.size());
193  for (size_t j = 0; j < ans.size(); j++) ans[j] = (*this)[j];
194  return ans;
195  }
196 };
197 
205 template <class Args, ArrayAccess What>
206 struct segment_ref {
207  typedef typename Args::value_type Type;
208  Accessor<Args, What> element_access;
209  Args args;
210  Index from, n;
211  segment_ref(const Args &args, Index from, Index n)
212  : args(args), from(from), n(n) {}
213  template <class Other>
214  operator Other() {
215  Other ans(n);
216  for (size_t i = 0; i < n; i++) {
217  ans[i] = element_access(args, from + i);
218  }
219  return ans;
220  }
221  Type operator[](Index i) const { return element_access(args, from + i); }
222  size_t size() const { return n; }
223  template <class Other>
224  segment_ref &operator=(const Other &other) {
225  for (size_t i = 0; i < n; i++) {
226  element_access(args, from + i) = other[i];
227  }
228  return *this;
229  }
230  template <class Other>
231  segment_ref &operator+=(const Other &other) {
232  for (size_t i = 0; i < n; i++) {
233  element_access(args, from + i) += other[i];
234  }
235  return *this;
236  }
237  template <class Other>
238  segment_ref &operator-=(const Other &other) {
239  for (size_t i = 0; i < n; i++) {
240  element_access(args, from + i) -= other[i];
241  }
242  return *this;
243  }
244 };
245 
255 template <class dummy = void>
256 struct Args {
258  const Index *inputs;
263  IndexPair ptr;
265  Index input(Index j) const { return inputs[ptr.first + j]; }
267  Index output(Index j) const { return ptr.second + j; }
268  Args(const IndexVector &inputs) : inputs(inputs.data()) {
269  ptr.first = 0;
270  ptr.second = 0;
271  }
272 };
278 template <class Type>
279 struct ForwardArgs : Args<> {
280  typedef std::vector<Type> TypeVector;
281  typedef Type value_type;
282  Type *values;
283  global *glob_ptr;
285  Type x(Index j) const { return values[input(j)]; }
287  Type &y(Index j) { return values[output(j)]; }
289  Type *x_ptr(Index j) { return &values[input(j)]; }
291  Type *y_ptr(Index j) { return &values[output(j)]; }
293  segment_ref<ForwardArgs, x_read> x_segment(Index from, Index size) {
294  return segment_ref<ForwardArgs, x_read>(*this, from, size);
295  }
298  return segment_ref<ForwardArgs, y_write>(*this, from, size);
299  }
300  ForwardArgs(const IndexVector &inputs, TypeVector &values,
301  global *glob_ptr = NULL)
302  : Args<>(inputs), values(values.data()), glob_ptr(glob_ptr) {}
303 };
310 template <class Type>
311 struct ReverseArgs : Args<> {
312  typedef std::vector<Type> TypeVector;
313  typedef Type value_type;
314  Type *values;
315  Type *derivs;
316  global *glob_ptr;
318  Type x(Index j) const { return values[input(j)]; }
320  Type y(Index j) const { return values[output(j)]; }
323  Type &dx(Index j) { return derivs[input(j)]; }
326  Type dy(Index j) const { return derivs[output(j)]; }
328  Type *x_ptr(Index j) { return &values[input(j)]; }
330  Type *y_ptr(Index j) { return &values[output(j)]; }
332  Type *dx_ptr(Index j) { return &derivs[input(j)]; }
334  Type *dy_ptr(Index j) { return &derivs[output(j)]; }
336  segment_ref<ReverseArgs, x_read> x_segment(Index from, Index size) {
337  return segment_ref<ReverseArgs, x_read>(*this, from, size);
338  }
340  segment_ref<ReverseArgs, y_read> y_segment(Index from, Index size) {
341  return segment_ref<ReverseArgs, y_read>(*this, from, size);
342  }
345  return segment_ref<ReverseArgs, dx_write>(*this, from, size);
346  }
349  return segment_ref<ReverseArgs, dy_read>(*this, from, size);
350  }
351  ReverseArgs(const IndexVector &inputs, TypeVector &values, TypeVector &derivs,
352  global *glob_ptr = NULL)
353  : Args<>(inputs),
354  values(values.data()),
355  derivs(derivs.data()),
356  glob_ptr(glob_ptr) {
357  ptr.first = (Index)inputs.size();
358  ptr.second = (Index)values.size();
359  }
360 };
361 
362 template <>
363 struct ForwardArgs<bool> : Args<> {
364  typedef std::vector<bool> BoolVector;
365  BoolVector &values;
366  intervals<Index> &marked_intervals;
367  bool x(Index j) { return values[input(j)]; }
368  BoolVector::reference y(Index j) { return values[output(j)]; }
369  ForwardArgs(const IndexVector &inputs, BoolVector &values,
370  intervals<Index> &marked_intervals)
371  : Args<>(inputs), values(values), marked_intervals(marked_intervals) {}
373  template <class Operator>
374  bool any_marked_input(const Operator &op) {
375  if (Operator::implicit_dependencies) {
376  Dependencies dep;
377  op.dependencies(*this, dep);
378  return dep.any(values);
379  } else {
380  Index ninput = op.input_size();
381  for (Index j = 0; j < ninput; j++)
382  if (x(j)) return true;
383  }
384  return false;
385  }
387  template <class Operator>
388  void mark_all_output(const Operator &op) {
389  if (Operator::updating && op.output_size() == 0) {
390  Dependencies dep;
391  op.dependencies_updating(*this, dep);
392 
393  for (size_t i = 0; i < dep.size(); i++) values[dep[i]] = true;
394 
395  for (size_t i = 0; i < dep.I.size(); i++) {
396  Index a = dep.I[i].first;
397  Index b = dep.I[i].second;
398  bool insert = marked_intervals.insert(a, b);
399  if (insert) {
400  for (Index j = a; j <= b; j++) {
401  values[j] = true;
402  }
403  }
404  }
405  } else {
406  Index noutput = op.output_size();
407  for (Index j = 0; j < noutput; j++) y(j) = true;
408  }
409  }
411  template <class Operator>
412  bool mark_dense(const Operator &op) {
413  if (any_marked_input(op)) {
414  mark_all_output(op);
415  return true;
416  }
417  return false;
418  }
419 };
420 
421 template <>
422 struct ReverseArgs<bool> : Args<> {
423  typedef std::vector<bool> BoolVector;
424  BoolVector &values;
425  intervals<Index> &marked_intervals;
426  BoolVector::reference x(Index j) { return values[input(j)]; }
427  bool y(Index j) { return values[output(j)]; }
428  ReverseArgs(IndexVector &inputs, BoolVector &values,
429  intervals<Index> &marked_intervals)
430  : Args<>(inputs), values(values), marked_intervals(marked_intervals) {
431  ptr.first = (Index)inputs.size();
432  ptr.second = (Index)values.size();
433  }
435  template <class Operator>
436  bool any_marked_output(const Operator &op) {
437  if (Operator::elimination_protected) return true;
438  if (Operator::updating && op.output_size() == 0) {
439  Dependencies dep;
440  op.dependencies_updating(*this, dep);
441  return dep.any(values);
442  } else {
443  Index noutput = op.output_size();
444  for (Index j = 0; j < noutput; j++)
445  if (y(j)) return true;
446  }
447  return false;
448  }
450  template <class Operator>
451  void mark_all_input(const Operator &op) {
452  if (Operator::implicit_dependencies) {
453  Dependencies dep;
454  op.dependencies(*this, dep);
455 
456  for (size_t i = 0; i < dep.size(); i++) values[dep[i]] = true;
457 
458  for (size_t i = 0; i < dep.I.size(); i++) {
459  Index a = dep.I[i].first;
460  Index b = dep.I[i].second;
461  bool insert = marked_intervals.insert(a, b);
462  if (insert) {
463  for (Index j = a; j <= b; j++) {
464  values[j] = true;
465  }
466  }
467  }
468  } else {
469  Index ninput = op.input_size();
470  for (Index j = 0; j < ninput; j++) x(j) = true;
471  }
472  }
474  template <class Operator>
475  bool mark_dense(const Operator &op) {
476  if (any_marked_output(op)) {
477  mark_all_input(op);
478  return true;
479  }
480  return false;
481  }
482 };
483 
484 std::string tostr(const Index &x);
485 
486 std::string tostr(const Scalar &x);
487 
488 struct Writer : std::string {
489  static std::ostream *cout;
490  Writer(std::string str);
491  Writer(Scalar x);
492  Writer();
493 
494  template <class V>
495  std::string vinit(const V &x) {
496  std::string y = "{";
497  for (size_t i = 0; i < x.size(); i++)
498  y = y + (i == 0 ? "" : ",") + tostr(x[i]);
499  y = y + "}";
500  return y;
501  }
502 
503  std::string p(std::string x);
504  Writer operator+(const Writer &other);
505  Writer operator-(const Writer &other);
506  Writer operator-();
507  Writer operator*(const Writer &other);
508  Writer operator/(const Writer &other);
509 
510  Writer operator*(const Scalar &other);
511  Writer operator+(const Scalar &other);
512 
513  void operator=(const Writer &other);
514  void operator+=(const Writer &other);
515  void operator-=(const Writer &other);
516  void operator*=(const Writer &other);
517  void operator/=(const Writer &other);
518 
519  template <class T>
520  friend Writer &operator<<(Writer &w, const T &v) {
521  *cout << v;
522  return w;
523  }
524  template <class T>
525  friend Writer &operator<<(Writer &w, const std::valarray<T> &x) {
526  *cout << w.vinit(x);
527  return w;
528  }
529 };
530 
531 template <>
532 struct ForwardArgs<Writer> : ForwardArgs<Scalar> {
533  typedef std::vector<Scalar> ScalarVector;
534  typedef ForwardArgs<Scalar> Base;
536  bool const_literals;
538  bool indirect;
539  void set_indirect() {
540  indirect = true;
541  ptr.first = 0;
542  ptr.second = 0;
543  }
544  Writer xd(Index j) { return "v[" + tostr(input(j)) + "]"; }
545  Writer yd(Index j) { return "v[" + tostr(output(j)) + "]"; }
546  Writer xi(Index j) { return "v[i[" + tostr(Index(ptr.first + j)) + "]]"; }
547  Writer yi(Index j) { return "v[o[" + tostr(Index(ptr.second + j)) + "]]"; }
548  Writer x(Index j) { return (indirect ? xi(j) : xd(j)); }
549  Writer y(Index j) { return (indirect ? yi(j) : yd(j)); }
550  Writer y_const(Index j) {
551  TMBAD_ASSERT2(!indirect, "Attempt to write constants within loop?");
552  return tostr(Base::y(j));
553  }
554  ForwardArgs(IndexVector &inputs, ScalarVector &values)
555  : ForwardArgs<Scalar>(inputs, values) {
556  const_literals = false;
557  indirect = false;
558  }
559 };
560 
561 template <>
562 struct ReverseArgs<Writer> : Args<> {
563  typedef std::vector<Scalar> ScalarVector;
565  bool const_literals;
567  bool indirect;
568  void set_indirect() {
569  indirect = true;
570  ptr.first = 0;
571  ptr.second = 0;
572  }
573  Writer dxd(Index j) { return "d[" + tostr(input(j)) + "]"; }
574  Writer dyd(Index j) { return "d[" + tostr(output(j)) + "]"; }
575  Writer xd(Index j) { return "v[" + tostr(input(j)) + "]"; }
576  Writer yd(Index j) { return "v[" + tostr(output(j)) + "]"; }
577  Writer dxi(Index j) { return "d[i[" + tostr(Index(ptr.first + j)) + "]]"; }
578  Writer dyi(Index j) { return "d[o[" + tostr(Index(ptr.second + j)) + "]]"; }
579  Writer xi(Index j) { return "v[i[" + tostr(Index(ptr.first + j)) + "]]"; }
580  Writer yi(Index j) { return "v[o[" + tostr(Index(ptr.second + j)) + "]]"; }
581  Writer x(Index j) { return (indirect ? xi(j) : xd(j)); }
582  Writer y(Index j) { return (indirect ? yi(j) : yd(j)); }
583  Writer dx(Index j) { return (indirect ? dxi(j) : dxd(j)); }
584  Writer dy(Index j) { return (indirect ? dyi(j) : dyd(j)); }
585 
586  ReverseArgs(IndexVector &inputs, ScalarVector &values) : Args<>(inputs) {
587  const_literals = false;
588  indirect = false;
589  ptr.first = (Index)inputs.size();
590  ptr.second = (Index)values.size();
591  }
592 };
593 
594 struct Position {
595  Position(Index node, Index first, Index second);
596  Position();
597  Index node;
598  IndexPair ptr;
599  bool operator<(const Position &other) const;
600 };
601 
603 template <class T>
604 void sort_inplace(std::vector<T> &x) {
605  std::sort(x.begin(), x.end());
606 }
607 
609 template <class T>
610 void sort_unique_inplace(std::vector<T> &x) {
611  std::sort(x.begin(), x.end());
612  typename std::vector<T>::iterator last = std::unique(x.begin(), x.end());
613  x.erase(last, x.end());
614 }
615 
617 struct graph {
618  std::vector<Index> j;
619  std::vector<Index> p;
620  graph();
621  size_t num_neighbors(Index node);
622  Index *neighbors(Index node);
623  bool empty();
624  size_t num_nodes();
625  void print();
628  std::vector<bool> mark;
630  std::vector<Index> inv2op;
632  std::vector<Index> dep2op;
634  std::vector<Index> rowcounts();
636  std::vector<Index> colcounts();
646  void bfs(const std::vector<Index> &start, std::vector<bool> &visited,
647  std::vector<Index> &result);
660  void search(std::vector<Index> &start, bool sort_input = true,
661  bool sort_output = true);
669  void search(std::vector<Index> &start, std::vector<bool> &visited,
670  bool sort_input = true, bool sort_output = true);
676  std::vector<Index> boundary(const std::vector<Index> &subgraph);
681  graph(size_t num_nodes, const std::vector<IndexPair> &edges);
682 };
683 
684 namespace {
685 template <class CompleteOperator, bool dynamic>
686 struct constructOperator {};
687 template <class CompleteOperator>
688 struct constructOperator<CompleteOperator, false> {
689  CompleteOperator *operator()() {
690  static CompleteOperator *pOp = new CompleteOperator();
691  return pOp;
692  }
693 };
694 template <class CompleteOperator>
695 struct constructOperator<CompleteOperator, true> {
696  CompleteOperator *operator()() {
697  CompleteOperator *pOp = new CompleteOperator();
698  return pOp;
699  }
700 
701  template <class T1>
702  CompleteOperator *operator()(const T1 &x1) {
703  CompleteOperator *pOp = new CompleteOperator(x1);
704  return pOp;
705  }
706 
707  template <class T1, class T2>
708  CompleteOperator *operator()(const T1 &x1, const T2 &x2) {
709  CompleteOperator *pOp = new CompleteOperator(x1, x2);
710  return pOp;
711  }
712 
713  template <class T1, class T2, class T3>
714  CompleteOperator *operator()(const T1 &x1, const T2 &x2, const T3 &x3) {
715  CompleteOperator *pOp = new CompleteOperator(x1, x2, x3);
716  return pOp;
717  }
718 
719  template <class T1, class T2, class T3, class T4>
720  CompleteOperator *operator()(const T1 &x1, const T2 &x2, const T3 &x3,
721  const T4 &x4) {
722  CompleteOperator *pOp = new CompleteOperator(x1, x2, x3, x4);
723  return pOp;
724  }
725 };
726 } // namespace
727 
732 struct op_info {
734  typedef int IntRep;
736  IntRep code;
738  enum op_flag {
758  op_flag_count
759  };
760  template <class T>
761  IntRep get_flags(T op) {
762  return
763 
764  (op.dynamic * (1 << dynamic)) |
765  (op.smart_pointer * (1 << smart_pointer)) |
766  (op.is_linear * (1 << is_linear)) |
767  (op.is_constant * (1 << is_constant)) |
768  (op.independent_variable * (1 << independent_variable)) |
769  (op.dependent_variable * (1 << dependent_variable)) |
770  (op.allow_remap * (1 << allow_remap)) |
771  (op.elimination_protected * (1 << elimination_protected)) |
772  (op.updating * (1 << updating));
773  }
774  op_info();
775  op_info(op_flag f);
776 
777  template <class T>
778  op_info(T op) : code(get_flags(op)) {}
780  bool test(op_flag f) const;
781  op_info &operator|=(const op_info &other);
782  op_info &operator&=(const op_info &other);
783 };
784 
797 struct global {
798  struct ad_plain;
799  struct ad_aug;
800  typedef TMBAD_REPLAY_TYPE Replay;
801  struct ad_segment;
802  struct print_config;
811  struct OperatorPure {
814  virtual void increment(IndexPair &ptr) = 0;
817  virtual void decrement(IndexPair &ptr) = 0;
819  virtual void forward(ForwardArgs<Scalar> &args) = 0;
821  virtual void reverse(ReverseArgs<Scalar> &args) = 0;
823  virtual void forward_incr(ForwardArgs<Scalar> &args) = 0;
825  virtual void reverse_decr(ReverseArgs<Scalar> &args) = 0;
827  virtual Index input_size() = 0;
829  virtual Index output_size() = 0;
834  virtual void forward(ForwardArgs<bool> &args) = 0;
839  virtual void reverse(ReverseArgs<bool> &args) = 0;
841  virtual void forward_incr(ForwardArgs<bool> &args) = 0;
843  virtual void reverse_decr(ReverseArgs<bool> &args) = 0;
845  virtual void forward_incr_mark_dense(ForwardArgs<bool> &args) = 0;
859  virtual void dependencies(Args<> &args, Dependencies &dep) = 0;
863  virtual void dependencies_updating(Args<> &args, Dependencies &dep) = 0;
865  virtual void forward(ForwardArgs<Replay> &args) = 0;
867  virtual void reverse(ReverseArgs<Replay> &args) = 0;
869  virtual void forward_incr(ForwardArgs<Replay> &args) = 0;
871  virtual void reverse_decr(ReverseArgs<Replay> &args) = 0;
873  virtual void forward(ForwardArgs<Writer> &args) = 0;
875  virtual void reverse(ReverseArgs<Writer> &args) = 0;
877  virtual void forward_incr(ForwardArgs<Writer> &args) = 0;
879  virtual void reverse_decr(ReverseArgs<Writer> &args) = 0;
881  virtual const char *op_name() { return "NoName"; }
885  virtual OperatorPure *self_fuse() = 0;
889  virtual OperatorPure *other_fuse(OperatorPure *other) = 0;
891  virtual OperatorPure *copy() = 0;
893  virtual void deallocate() = 0;
895  virtual op_info info() = 0;
897  virtual void *operator_data() = 0;
902  virtual void *identifier() = 0;
904  virtual void print(print_config cfg) = 0;
907  virtual void *incomplete() = 0;
908  virtual ~OperatorPure() {}
909  };
910 
917  struct operation_stack : std::vector<OperatorPure *> {
918  typedef std::vector<OperatorPure *> Base;
922  operation_stack();
924  operation_stack(const operation_stack &other);
927  void push_back(OperatorPure *x);
929  operation_stack &operator=(const operation_stack &other);
930  ~operation_stack();
932  void clear();
933  void copy_from(const operation_stack &other);
934  };
935 
940  std::vector<Scalar> values;
943  std::vector<Scalar> derivs;
945  IndexVector inputs;
948  std::vector<Index> inv_index;
951  std::vector<Index> dep_index;
952 
953  mutable std::vector<IndexPair> subgraph_ptr;
954  std::vector<Index> subgraph_seq;
956  void (*forward_compiled)(Scalar *);
958  void (*reverse_compiled)(Scalar *, Scalar *);
959 
960  global();
963  void clear();
964 
980  void shrink_to_fit(double tol = .9);
981 
985  void clear_deriv(Position start = Position(0, 0, 0));
986 
988  Scalar &value_inv(Index i);
990  Scalar &deriv_inv(Index i);
992  Scalar &value_dep(Index i);
994  Scalar &deriv_dep(Index i);
996  Position begin();
998  Position end();
999 
1001  struct no_filter {
1002  CONSTEXPR bool operator[](size_t i) const;
1003  };
1009  template <class ForwardArgs, class NodeFilter>
1010  void forward_loop(ForwardArgs &args, size_t begin,
1011  const NodeFilter &node_filter) const {
1012  for (size_t i = begin; i < opstack.size(); i++) {
1013  if (node_filter[i])
1014  opstack[i]->forward_incr(args);
1015  else
1016  opstack[i]->increment(args.ptr);
1017  }
1018  }
1020  template <class ForwardArgs>
1021  void forward_loop(ForwardArgs &args, size_t begin = 0) const {
1022  forward_loop(args, begin, no_filter());
1023  }
1028  template <class ReverseArgs, class NodeFilter>
1029  void reverse_loop(ReverseArgs &args, size_t begin,
1030  const NodeFilter &node_filter) const {
1031  for (size_t i = opstack.size(); i > begin;) {
1032  i--;
1033  if (node_filter[i])
1034  opstack[i]->reverse_decr(args);
1035  else
1036  opstack[i]->decrement(args.ptr);
1037  }
1038  }
1040  template <class ReverseArgs>
1041  void reverse_loop(ReverseArgs &args, size_t begin = 0) const {
1042  reverse_loop(args, begin, no_filter());
1043  }
1045  template <class ForwardArgs>
1047  subgraph_cache_ptr();
1048  for (size_t j = 0; j < subgraph_seq.size(); j++) {
1049  Index i = subgraph_seq[j];
1050  args.ptr = subgraph_ptr[i];
1051  opstack[i]->forward(args);
1052  }
1053  }
1055  template <class ReverseArgs>
1057  subgraph_cache_ptr();
1058  for (size_t j = subgraph_seq.size(); j > 0;) {
1059  j--;
1060  Index i = subgraph_seq[j];
1061  args.ptr = subgraph_ptr[i];
1062  opstack[i]->reverse(args);
1063  }
1064  }
1075  template <class Vector>
1077  typename Vector::value_type value =
1078  typename Vector::value_type(0)) const {
1079  if (array.size() != values.size()) {
1080  array.resize(values.size());
1081  std::fill(array.begin(), array.end(), value);
1082  return;
1083  }
1084  subgraph_cache_ptr();
1085  for (size_t j = 0; j < subgraph_seq.size(); j++) {
1086  Index i = subgraph_seq[j];
1087  size_t noutput = opstack[i]->output_size();
1088  for (size_t k = 0; k < noutput; k++)
1089  array[subgraph_ptr[i].second + k] = value;
1090  }
1091  }
1092 
1097  void forward(Position start = Position(0, 0, 0));
1105  void reverse(Position start = Position(0, 0, 0));
1107  void forward_sub();
1109  void reverse_sub();
1110 
1112  void forward(std::vector<bool> &marks);
1114  void reverse(std::vector<bool> &marks);
1119  void forward_sub(std::vector<bool> &marks,
1120  const std::vector<bool> &node_filter = std::vector<bool>());
1125  void reverse_sub(std::vector<bool> &marks,
1126  const std::vector<bool> &node_filter = std::vector<bool>());
1135  void forward_dense(std::vector<bool> &marks);
1136 
1137  intervals<Index> updating_intervals() const;
1138 
1139  intervals<Index> updating_intervals_sub() const;
1140 
1141  struct replay {
1143  std::vector<Replay> values;
1146  std::vector<Replay> derivs;
1148  const global &orig;
1150  global &target;
1152  global *parent_glob;
1154  Replay &value_inv(Index i);
1156  Replay &deriv_inv(Index i);
1158  Replay &value_dep(Index i);
1160  Replay &deriv_dep(Index i);
1164  replay(const global &orig, global &target);
1173  void start();
1178  void stop();
1180  void add_updatable_derivs(const intervals<Index> &I);
1182  void clear_deriv();
1189  void forward(bool inv_tags = true, bool dep_tags = true,
1190  Position start = Position(0, 0, 0),
1191  const std::vector<bool> &node_filter = std::vector<bool>());
1199  void reverse(bool dep_tags = true, bool inv_tags = false,
1200  Position start = Position(0, 0, 0),
1201  const std::vector<bool> &node_filter = std::vector<bool>());
1203  void forward_sub();
1205  void reverse_sub();
1207  void clear_deriv_sub();
1208  };
1209 
1214  void forward_replay(bool inv_tags = true, bool dep_tags = true);
1215 
1221  void subgraph_cache_ptr() const;
1229  void set_subgraph(const std::vector<bool> &marks, bool append = false);
1231  void mark_subgraph(std::vector<bool> &marks);
1233  void unmark_subgraph(std::vector<bool> &marks);
1235  void subgraph_trivial();
1241  void clear_deriv_sub();
1274  global extract_sub(std::vector<Index> &var_remap, global new_glob = global());
1279  void extract_sub_inplace(std::vector<bool> marks);
1283  global extract_sub();
1284 
1293  std::vector<Index> var2op();
1299  std::vector<bool> var2op(const std::vector<bool> &values);
1301  std::vector<Index> op2var(const std::vector<Index> &seq);
1303  std::vector<bool> op2var(const std::vector<bool> &seq_mark);
1312  std::vector<Index> op2idx(const std::vector<Index> &var_subset,
1313  Index NA = (Index)-1);
1315  std::vector<bool> mark_space(size_t n, const std::vector<Index> ind);
1317  std::vector<bool> inv_marks();
1319  std::vector<bool> dep_marks();
1321  std::vector<bool> subgraph_marks();
1322 
1323  struct append_edges {
1324  size_t &i;
1325  const std::vector<bool> &keep_var;
1326  std::vector<Index> &var2op;
1327  std::vector<IndexPair> &edges;
1328 
1329  std::vector<bool> op_marks;
1330  size_t pos;
1331  append_edges(size_t &i, size_t num_nodes, const std::vector<bool> &keep_var,
1332  std::vector<Index> &var2op, std::vector<IndexPair> &edges);
1333  void operator()(Index dep_j);
1334 
1335  void start_iteration();
1336 
1337  void end_iteration();
1338  };
1347  graph build_graph(bool transpose, const std::vector<bool> &keep_var);
1351  graph forward_graph(std::vector<bool> keep_var = std::vector<bool>(0));
1355  graph reverse_graph(std::vector<bool> keep_var = std::vector<bool>(0));
1356 
1361  bool identical(const global &other) const;
1362 
1364  template <class T>
1365  void hash(hash_t &h, T x) const {
1366  static const size_t n =
1367  (sizeof(T) / sizeof(hash_t)) + (sizeof(T) % sizeof(hash_t) != 0);
1368  hash_t buffer[n];
1369  std::fill(buffer, buffer + n, 0);
1370  for (size_t i = 0; i < sizeof(x); i++)
1371  ((char *)buffer)[i] = ((char *)&x)[i];
1372  hash_t A = 54059;
1373  hash_t B = 76963;
1374  for (size_t i = 0; i < n; i++) h = (A * h) ^ (B * buffer[i]);
1375  }
1376 
1385  hash_t hash() const;
1386 
1388  struct hash_config {
1398  bool reduce;
1402  std::vector<Index> inv_seed;
1403  };
1404 
1459  std::vector<hash_t> hash_sweep(hash_config cfg) const;
1461  std::vector<hash_t> hash_sweep(bool weak = true) const;
1462 
1476  void eliminate();
1477 
1479  struct print_config {
1480  std::string prefix, mark;
1481  int depth;
1482  print_config();
1483  };
1485  void print(print_config cfg);
1487  void print();
1488 
1490  template <int ninput_, int noutput_ = 1>
1491  struct Operator {
1493  static const bool dynamic = false;
1495  static const int ninput = ninput_;
1497  static const int noutput = noutput_;
1499  static const int independent_variable = false;
1501  static const int dependent_variable = false;
1503  static const bool have_input_size_output_size = false;
1505  static const bool have_increment_decrement = false;
1507  static const bool have_forward_reverse = true;
1509  static const bool have_forward_incr_reverse_decr = false;
1511  static const bool have_forward_mark_reverse_mark = false;
1513  static const bool have_dependencies = false;
1519  static const bool allow_remap = true;
1530  static const bool implicit_dependencies = false;
1532  static const bool add_static_identifier = false;
1535  static const bool add_forward_replay_copy = false;
1538  static const bool have_eval = false;
1540  static const int max_fuse_depth = 2;
1542  static const bool is_linear = false;
1544  static const bool is_constant = false;
1546  static const bool smart_pointer = false;
1548  static const bool elimination_protected = false;
1574  static const bool updating = false;
1577  void dependencies_updating(Args<> &args, Dependencies &dep) const {}
1580  return NULL;
1581  }
1583  void *operator_data() { return NULL; }
1585  void print(print_config cfg) {}
1586  };
1589  template <int ninput, int noutput>
1590  struct DynamicOperator : Operator<ninput, noutput> {
1592  static const bool dynamic = true;
1594  static const int max_fuse_depth = 0;
1595  };
1598  template <int ninput>
1599  struct DynamicOutputOperator : Operator<ninput, -1> {
1601  static const bool dynamic = true;
1603  static const int max_fuse_depth = 0;
1604  Index noutput;
1605  };
1606  template <int noutput = 1>
1607  struct DynamicInputOperator : Operator<-1, noutput> {
1609  static const bool dynamic = true;
1611  static const int max_fuse_depth = 0;
1612  Index ninput;
1613  };
1614  struct DynamicInputOutputOperator : Operator<-1, -1> {
1616  static const bool dynamic = true;
1618  static const int max_fuse_depth = 0;
1619  Index ninput_, noutput_;
1620  DynamicInputOutputOperator(Index ninput, Index noutput);
1621  Index input_size() const;
1622  Index output_size() const;
1623  static const bool have_input_size_output_size = true;
1624  };
1625  struct UniqueDynamicOperator : Operator<-1, -1> {
1627  static const bool dynamic = true;
1629  static const int max_fuse_depth = 0;
1631  static const bool smart_pointer = false;
1634  static const bool have_input_size_output_size = true;
1635  };
1636  struct SharedDynamicOperator : UniqueDynamicOperator {
1638  static const bool smart_pointer = true;
1639  };
1640 
1643  template <class OperatorBase>
1644  struct AddInputSizeOutputSize : OperatorBase {
1645  INHERIT_CTOR(AddInputSizeOutputSize, OperatorBase)
1646  Index input_size() const { return this->ninput; }
1647  Index output_size() const { return this->noutput; }
1648  static const bool have_input_size_output_size = true;
1649  };
1650 
1653  template <class OperatorBase>
1654  struct AddIncrementDecrement : OperatorBase {
1655  INHERIT_CTOR(AddIncrementDecrement, OperatorBase)
1656  void increment(IndexPair &ptr) {
1657  ptr.first += this->input_size();
1658  ptr.second += this->output_size();
1659  }
1660  void decrement(IndexPair &ptr) {
1661  ptr.first -= this->input_size();
1662  ptr.second -= this->output_size();
1663  }
1664  static const bool have_increment_decrement = true;
1665  };
1666 
1670  template <class OperatorBase>
1671  struct AddForwardReverse : OperatorBase {
1672  INHERIT_CTOR(AddForwardReverse, OperatorBase)
1673 
1674  template <class Type>
1675  void forward(ForwardArgs<Type> &args) {
1676  ForwardArgs<Type> args_cpy(args);
1677  OperatorBase::forward_incr(args_cpy);
1678  }
1679  template <class Type>
1680  void reverse(ReverseArgs<Type> &args) {
1681  ReverseArgs<Type> args_cpy(args);
1682  OperatorBase::increment(args_cpy.ptr);
1683  OperatorBase::reverse_decr(args_cpy);
1684  }
1685  static const bool have_forward_reverse = true;
1686  };
1687 
1691  template <class OperatorBase>
1692  struct AddForwardIncrReverseDecr : OperatorBase {
1693  INHERIT_CTOR(AddForwardIncrReverseDecr, OperatorBase)
1694 
1695  template <class Type>
1696  void forward_incr(ForwardArgs<Type> &args) {
1697  OperatorBase::forward(args);
1698  OperatorBase::increment(args.ptr);
1699  }
1700 
1701  template <class Type>
1702  void reverse_decr(ReverseArgs<Type> &args) {
1703  OperatorBase::decrement(args.ptr);
1704  OperatorBase::reverse(args);
1705  }
1706  static const bool have_forward_incr_reverse_decr = true;
1707  };
1708 
1711  template <class OperatorBase>
1712  struct AddForwardMarkReverseMark : OperatorBase {
1713  INHERIT_CTOR(AddForwardMarkReverseMark, OperatorBase)
1714 
1715  template <class Type>
1716  void forward(ForwardArgs<Type> &args) {
1717  OperatorBase::forward(args);
1718  }
1719  template <class Type>
1720  void reverse(ReverseArgs<Type> &args) {
1721  OperatorBase::reverse(args);
1722  }
1723 
1724  void forward(ForwardArgs<bool> &args) { args.mark_dense(*this); }
1725  void reverse(ReverseArgs<bool> &args) { args.mark_dense(*this); }
1726  static const bool have_forward_mark_reverse_mark = true;
1727  };
1728 
1731  template <class OperatorBase>
1732  struct AddDependencies : OperatorBase {
1733  INHERIT_CTOR(AddDependencies, OperatorBase)
1734  void dependencies(Args<> &args, Dependencies &dep) const {
1735  Index ninput_ = this->input_size();
1736  for (Index j = 0; j < ninput_; j++) dep.push_back(args.input(j));
1737  }
1738  static const bool have_dependencies = true;
1739  };
1740 
1743  template <class OperatorBase, int ninput>
1744  struct AddForwardFromEval : OperatorBase {};
1746  template <class OperatorBase>
1747  struct AddForwardFromEval<OperatorBase, 1> : OperatorBase {
1748  INHERIT_CTOR(AddForwardFromEval, OperatorBase)
1749  template <class Type>
1750  void forward(ForwardArgs<Type> &args) {
1751  args.y(0) = this->eval(args.x(0));
1752  }
1753  };
1755  template <class OperatorBase>
1756  struct AddForwardFromEval<OperatorBase, 2> : OperatorBase {
1757  INHERIT_CTOR(AddForwardFromEval, OperatorBase)
1758  template <class Type>
1759  void forward(ForwardArgs<Type> &args) {
1760  args.y(0) = this->eval(args.x(0), args.x(1));
1761  }
1762  };
1763 
1765  template <bool flag, class dummy>
1767  void increment() {}
1768  void decrement() {}
1769  size_t operator()() const { return 0; }
1770  };
1771  template <class dummy>
1772  struct ReferenceCounter<true, dummy> {
1773  size_t counter;
1774  ReferenceCounter() : counter(0) {}
1775  void increment() { counter++; }
1776  void decrement() { counter--; }
1777  size_t operator()() const { return counter; }
1778  };
1779 
1781  template <bool flag, class Yes, class No>
1782  struct if_else {};
1783  template <class Yes, class No>
1784  struct if_else<true, Yes, No> {
1785  typedef Yes type;
1786  };
1787  template <class Yes, class No>
1788  struct if_else<false, Yes, No> {
1789  typedef No type;
1790  };
1791 
1793  template <class OperatorBase>
1794  struct CPL {
1795  static const bool test1 = !OperatorBase::have_eval;
1797  typedef typename if_else<
1798  test1, OperatorBase,
1800 
1801  static const bool test2 = Result1::have_input_size_output_size;
1803  typedef
1806 
1807  static const bool test3 = !Result2::have_dependencies;
1809  typedef typename if_else<test3, AddDependencies<Result2>, Result2>::type
1811 
1812  static const bool test4 = Result3::have_increment_decrement;
1814  typedef
1817 
1818  static const bool test5 = Result4::have_forward_mark_reverse_mark;
1820  typedef typename if_else<test5, Result4,
1822 
1823  static const bool test6 = Result5::have_forward_reverse &&
1824  !Result5::have_forward_incr_reverse_decr;
1827  Result5>::type Result6;
1828 
1829  static const bool test7 = Result6::have_forward_incr_reverse_decr &&
1830  !Result6::have_forward_reverse;
1832  typedef typename if_else<test7, AddForwardReverse<Result6>, Result6>::type
1834 
1835  typedef Result7 type;
1836  };
1837 
1839  template <class Operator1, class Operator2>
1840  struct Fused : Operator<Operator1::ninput + Operator2::ninput,
1841  Operator1::noutput + Operator2::noutput> {
1842  typename CPL<Operator1>::type Op1;
1843  typename CPL<Operator2>::type Op2;
1845  static const int independent_variable =
1846  Operator1::independent_variable && Operator2::independent_variable;
1848  static const int dependent_variable =
1849  Operator1::dependent_variable && Operator2::dependent_variable;
1851  static const int max_fuse_depth =
1852  (Operator1::max_fuse_depth < Operator2::max_fuse_depth
1853  ? Operator1::max_fuse_depth - 1
1854  : Operator2::max_fuse_depth - 1);
1856  static const bool is_linear = Operator1::is_linear && Operator2::is_linear;
1857  template <class Type>
1858  void forward_incr(ForwardArgs<Type> &args) {
1859  Op1.forward_incr(args);
1860  Op2.forward_incr(args);
1861  }
1862  template <class Type>
1863  void reverse_decr(ReverseArgs<Type> &args) {
1864  Op2.reverse_decr(args);
1865  Op1.reverse_decr(args);
1866  }
1868  static const bool have_forward_incr_reverse_decr = true;
1870  static const bool have_forward_reverse = false;
1871  const char *op_name() { return "Fused"; }
1872  };
1881  template <class Operator1>
1882  struct Rep : DynamicOperator<-1, -1> {
1883  typename CPL<Operator1>::type Op;
1885  static const int independent_variable = Operator1::independent_variable;
1887  static const int dependent_variable = Operator1::dependent_variable;
1889  static const bool is_linear = Operator1::is_linear;
1890  Index n;
1891  Rep(Index n) : n(n) {}
1892  Index input_size() const { return Operator1::ninput * n; }
1893  Index output_size() const { return Operator1::noutput * n; }
1895  static const bool have_input_size_output_size = true;
1896  template <class Type>
1897  void forward_incr(ForwardArgs<Type> &args) {
1898  for (size_t i = 0; i < (size_t)n; i++) Op.forward_incr(args);
1899  }
1900  template <class Type>
1901  void reverse_decr(ReverseArgs<Type> &args) {
1902  for (size_t i = 0; i < (size_t)n; i++) Op.reverse_decr(args);
1903  }
1905  static const bool have_forward_incr_reverse_decr = true;
1907  static const bool have_forward_reverse = false;
1914  TMBAD_ASSERT(false);
1915  std::vector<Index> &inputs = get_glob()->inputs;
1916  size_t k = Op.input_size();
1917  size_t start = inputs.size() - k * n;
1918  std::valarray<Index> increment(k);
1919  if (k > 0) {
1920  for (size_t i = 0; i < (size_t)n - 1; i++) {
1921  std::valarray<Index> v1(&inputs[start + i * k], k);
1922  std::valarray<Index> v2(&inputs[start + (i + 1) * k], k);
1923  if (i == 0) {
1924  increment = v2 - v1;
1925  } else {
1926  bool ok = (increment == (v2 - v1)).min();
1927  if (!ok) return NULL;
1928  }
1929  }
1930  }
1931 
1932  size_t reduction = (n - 1) * k;
1933  inputs.resize(inputs.size() - reduction);
1934  return get_glob()->getOperator<RepCompress<Operator1> >(n, increment);
1935  }
1936  OperatorPure *other_fuse(OperatorPure *self, OperatorPure *other) {
1937  OperatorPure *op1 = get_glob()->getOperator<Operator1>();
1938  if (op1 == other) {
1939  this->n++;
1940  return self;
1941  }
1942  return NULL;
1943  }
1944  const char *op_name() { return "Rep"; }
1945  };
1956  template <class Operator1>
1957  struct RepCompress : DynamicOperator<-1, -1> {
1959  static const int independent_variable = Operator1::independent_variable;
1961  static const int dependent_variable = Operator1::dependent_variable;
1963  static const bool is_linear = Operator1::is_linear;
1964  typename CPL<Operator1>::type Op;
1965  Index n;
1966 
1967  std::valarray<Index> increment_pattern;
1968  RepCompress(Index n, std::valarray<Index> v) : n(n), increment_pattern(v) {}
1969  Index input_size() const { return Operator1::ninput; }
1970  Index output_size() const { return Operator1::noutput * n; }
1972  static const bool have_input_size_output_size = true;
1974  template <class Type>
1976  std::valarray<Index> inputs(input_size());
1977  for (size_t i = 0; i < inputs.size(); i++) inputs[i] = args.input(i);
1978  ForwardArgs<Type> args_cpy = args;
1979  args_cpy.inputs = &inputs[0];
1980  args_cpy.ptr.first = 0;
1981  for (size_t i = 0; i < (size_t)n; i++) {
1982  Op.forward(args_cpy);
1983  inputs += this->increment_pattern;
1984  args_cpy.ptr.second += Op.output_size();
1985  }
1986  }
1988  template <class Type>
1990  std::valarray<Index> inputs(input_size());
1991  for (size_t i = 0; i < inputs.size(); i++) inputs[i] = args.input(i);
1992  inputs += n * this->increment_pattern;
1993  ReverseArgs<Type> args_cpy = args;
1994  args_cpy.inputs = &inputs[0];
1995  args_cpy.ptr.first = 0;
1996  args_cpy.ptr.second += n * Op.output_size();
1997  for (size_t i = 0; i < (size_t)n; i++) {
1998  inputs -= this->increment_pattern;
1999  args_cpy.ptr.second -= Op.output_size();
2000  Op.reverse(args_cpy);
2001  }
2002  }
2004  void dependencies(Args<> &args, Dependencies &dep) const {
2005  std::valarray<Index> inputs(input_size());
2006  for (size_t i = 0; i < inputs.size(); i++) inputs[i] = args.input(i);
2007  for (size_t i = 0; i < (size_t)n; i++) {
2008  dep.insert(dep.end(), &inputs[0], &inputs[0] + inputs.size());
2009  inputs += this->increment_pattern;
2010  }
2011  }
2012  static const bool have_dependencies = true;
2013  void forward(ForwardArgs<Writer> &args) {
2014  std::valarray<Index> inputs(Op.input_size());
2015  for (size_t i = 0; i < (size_t)Op.input_size(); i++)
2016  inputs[i] = args.input(i);
2017  std::valarray<Index> outputs(Op.output_size());
2018  for (size_t i = 0; i < (size_t)Op.output_size(); i++)
2019  outputs[i] = args.output(i);
2020  Writer w;
2021  int ninp = Op.input_size();
2022  int nout = Op.output_size();
2023 
2024  w << "for (int count = 0, "
2025  << "i[" << ninp << "]=" << inputs << ", "
2026  << "di[" << ninp << "]=" << increment_pattern << ", "
2027  << "o[" << nout << "]=" << outputs << "; "
2028  << "count < " << n << "; count++) {\n";
2029 
2030  w << " ";
2031  ForwardArgs<Writer> args_cpy = args;
2032  args_cpy.set_indirect();
2033  Op.forward(args_cpy);
2034  w << "\n";
2035 
2036  w << " ";
2037  w << "for (int k=0; k<" << ninp << "; k++) i[k] += di[k];\n";
2038  w << " ";
2039  w << "for (int k=0; k<" << nout << "; k++) o[k] += " << nout << ";\n";
2040 
2041  w << " ";
2042  w << "}";
2043  }
2044  void reverse(ReverseArgs<Writer> &args) {
2045  std::valarray<Index> inputs(Op.input_size());
2046  for (size_t i = 0; i < (size_t)Op.input_size(); i++)
2047  inputs[i] = args.input(i);
2048  inputs += n * increment_pattern;
2049  std::valarray<Index> outputs(Op.output_size());
2050  for (size_t i = 0; i < (size_t)Op.output_size(); i++)
2051  outputs[i] = args.output(i);
2052  outputs += n * Op.output_size();
2053  Writer w;
2054  int ninp = Op.input_size();
2055  int nout = Op.output_size();
2056 
2057  w << "for (int count = 0, "
2058  << "i[" << ninp << "]=" << inputs << ", "
2059  << "di[" << ninp << "]=" << increment_pattern << ", "
2060  << "o[" << nout << "]=" << outputs << "; "
2061  << "count < " << n << "; count++) {\n";
2062 
2063  w << " ";
2064  w << "for (int k=0; k<" << ninp << "; k++) i[k] -= di[k];\n";
2065  w << " ";
2066  w << "for (int k=0; k<" << nout << "; k++) o[k] -= " << nout << ";\n";
2067 
2068  w << " ";
2069  ReverseArgs<Writer> args_cpy = args;
2070  args_cpy.set_indirect();
2071  Op.reverse(args_cpy);
2072  w << "\n";
2073 
2074  w << " ";
2075  w << "}";
2076  }
2078  static const bool have_forward_incr_reverse_decr = false;
2080  static const bool have_forward_reverse = true;
2082  static const bool have_forward_mark_reverse_mark = true;
2083  const char *op_name() { return "CRep"; }
2084 
2085  struct operator_data_t {
2086  OperatorPure *Op;
2087  Index n;
2088  std::valarray<Index> ip;
2089  operator_data_t(const RepCompress &x)
2090  : Op(get_glob()->getOperator<Operator1>()),
2091  n(x.n),
2092  ip(x.increment_pattern) {}
2093  ~operator_data_t() { Op->deallocate(); }
2094  bool operator==(const operator_data_t &other) {
2095  return (Op == other.Op) && (ip.size() == other.ip.size()) &&
2096  ((ip - other.ip).min() == 0);
2097  }
2098  };
2099  void *operator_data() { return new operator_data_t(*this); }
2100  OperatorPure *other_fuse(OperatorPure *self, OperatorPure *other) {
2101  if (this->op_name() == other->op_name()) {
2102  operator_data_t *p1 =
2103  static_cast<operator_data_t *>(self->operator_data());
2104  operator_data_t *p2 =
2105  static_cast<operator_data_t *>(other->operator_data());
2106  bool match = (*p1 == *p2);
2107  int other_n = p2->n;
2108  delete p1;
2109  delete p2;
2110  if (match) {
2111  std::vector<Index> &inputs = get_glob()->inputs;
2112  size_t reduction = increment_pattern.size();
2113  inputs.resize(inputs.size() - reduction);
2114  this->n += other_n;
2115  other->deallocate();
2116  return self;
2117  }
2118  }
2119  return NULL;
2120  }
2121  };
2122 
2128  template <class OperatorBase>
2130  typename CPL<OperatorBase>::type Op;
2131  INHERIT_CTOR(Complete, Op)
2132  ~Complete() {}
2133  void forward(ForwardArgs<Scalar> &args) { Op.forward(args); }
2134  void reverse(ReverseArgs<Scalar> &args) { Op.reverse(args); }
2135  void forward_incr(ForwardArgs<Scalar> &args) { Op.forward_incr(args); }
2136  void reverse_decr(ReverseArgs<Scalar> &args) { Op.reverse_decr(args); }
2137 
2139  if (Op.add_forward_replay_copy)
2140  forward_replay_copy(args);
2141  else
2142  Op.forward(args);
2143  }
2144  void reverse(ReverseArgs<Replay> &args) { Op.reverse(args); }
2146  if (Op.add_forward_replay_copy) {
2147  forward_replay_copy(args);
2148  increment(args.ptr);
2149  } else
2150  Op.forward_incr(args);
2151  }
2152  void reverse_decr(ReverseArgs<Replay> &args) { Op.reverse_decr(args); }
2153 
2154  void forward(ForwardArgs<bool> &args) { Op.forward(args); }
2155  void reverse(ReverseArgs<bool> &args) { Op.reverse(args); }
2156  void forward_incr(ForwardArgs<bool> &args) { Op.forward_incr(args); }
2157  void reverse_decr(ReverseArgs<bool> &args) { Op.reverse_decr(args); }
2159  args.mark_dense(Op);
2160  Op.increment(args.ptr);
2161  };
2162 
2163  void forward(ForwardArgs<Writer> &args) { Op.forward(args); }
2164  void reverse(ReverseArgs<Writer> &args) { Op.reverse(args); }
2165  void forward_incr(ForwardArgs<Writer> &args) { Op.forward_incr(args); }
2166  void reverse_decr(ReverseArgs<Writer> &args) { Op.reverse_decr(args); }
2171  std::vector<ad_plain> operator()(const std::vector<ad_plain> &x) {
2172  TMBAD_ASSERT2(OperatorBase::dynamic,
2173  "Stack to heap copy only allowed for dynamic operators");
2174  Complete *pOp = new Complete(*this);
2175  TMBAD_ASSERT2(pOp->ref_count() == 0, "Operator already on the heap");
2176  pOp->ref_count.increment();
2177  return get_glob()->add_to_stack<OperatorBase>(pOp, x);
2178  }
2179  ad_segment operator()(const ad_segment &x) {
2180  TMBAD_ASSERT2(OperatorBase::dynamic,
2181  "Stack to heap copy only allowed for dynamic operators");
2182  Complete *pOp = new Complete(*this);
2183  TMBAD_ASSERT2(pOp->ref_count() == 0, "Operator already on the heap");
2184  pOp->ref_count.increment();
2185  return get_glob()->add_to_stack<OperatorBase>(pOp, x);
2186  }
2187  ad_segment operator()(const ad_segment &x, const ad_segment &y) {
2188  TMBAD_ASSERT2(OperatorBase::dynamic,
2189  "Stack to heap copy only allowed for dynamic operators");
2190  Complete *pOp = new Complete(*this);
2191  TMBAD_ASSERT2(pOp->ref_count() == 0, "Operator already on the heap");
2192  pOp->ref_count.increment();
2193  return get_glob()->add_to_stack<OperatorBase>(pOp, x, y);
2194  }
2195  template <class T>
2196  std::vector<T> operator()(const std::vector<T> &x) {
2197  std::vector<ad_plain> x_(x.begin(), x.end());
2198  std::vector<ad_plain> y_ = (*this)(x_);
2199  std::vector<T> y(y_.begin(), y_.end());
2200  return y;
2201  }
2202  void forward_replay_copy(ForwardArgs<Replay> &args) {
2203  std::vector<ad_plain> x(Op.input_size());
2204  for (size_t i = 0; i < x.size(); i++) x[i] = args.x(i);
2205  std::vector<ad_plain> y =
2206  get_glob()->add_to_stack<OperatorBase>(this->copy(), x);
2207  for (size_t i = 0; i < y.size(); i++) args.y(i) = y[i];
2208  }
2209  void dependencies(Args<> &args, Dependencies &dep) {
2210  Op.dependencies(args, dep);
2211  }
2212  void dependencies_updating(Args<> &args, Dependencies &dep) {
2213  Op.dependencies_updating(args, dep);
2214  }
2215  void increment(IndexPair &ptr) { Op.increment(ptr); }
2216  void decrement(IndexPair &ptr) { Op.decrement(ptr); }
2217  Index input_size() { return Op.input_size(); }
2218  Index output_size() { return Op.output_size(); }
2219  const char *op_name() { return Op.op_name(); }
2220  void print(print_config cfg) { Op.print(cfg); }
2221 
2222  template <class Operator_, int depth>
2223  struct SelfFuse {
2224  typedef Rep<Operator_> type;
2225  OperatorPure *operator()() {
2226  return get_glob()->template getOperator<type>(2);
2227  }
2228  };
2229  template <class Operator_>
2230  struct SelfFuse<Operator_, 0> {
2231  OperatorPure *operator()() { return NULL; }
2232  };
2234  return SelfFuse<OperatorBase, OperatorBase::max_fuse_depth>()();
2235  }
2237  return Op.other_fuse(this, other);
2238  }
2241  if (Op.smart_pointer) {
2242  ref_count.increment();
2243  return this;
2244  } else if (Op.dynamic)
2245  return new Complete(*this);
2246  else
2247  return this;
2248  }
2249  void deallocate() {
2250  if (!Op.dynamic) return;
2251  if (Op.smart_pointer) {
2252  if (ref_count() > 1) {
2253  ref_count.decrement();
2254  return;
2255  }
2256  }
2257  delete this;
2258  }
2260  op_info info(Op);
2261  return info;
2262  }
2263  void *identifier() {
2264  if (Op.add_static_identifier) {
2265  static void *id = new char();
2266  return id;
2267  } else
2268  return (void *)this;
2269  }
2270  void *operator_data() { return Op.operator_data(); }
2271  void *incomplete() { return &Op; }
2272  };
2273 
2274  template <class OperatorBase>
2275  Complete<OperatorBase> *getOperator() const {
2276  return constructOperator<Complete<OperatorBase>, OperatorBase::dynamic>()();
2277  }
2278  template <class OperatorBase, class T1>
2279  Complete<OperatorBase> *getOperator(const T1 &x1) const {
2280  return constructOperator<Complete<OperatorBase>, OperatorBase::dynamic>()(
2281  x1);
2282  }
2283  template <class OperatorBase, class T1, class T2>
2284  Complete<OperatorBase> *getOperator(const T1 &x1, const T2 &x2) const {
2285  return constructOperator<Complete<OperatorBase>, OperatorBase::dynamic>()(
2286  x1, x2);
2287  }
2288  template <class OperatorBase, class T1, class T2, class T3>
2289  Complete<OperatorBase> *getOperator(const T1 &x1, const T2 &x2,
2290  const T3 &x3) const {
2291  return constructOperator<Complete<OperatorBase>, OperatorBase::dynamic>()(
2292  x1, x2, x3);
2293  }
2294  template <class OperatorBase, class T1, class T2, class T3, class T4>
2295  Complete<OperatorBase> *getOperator(const T1 &x1, const T2 &x2, const T3 &x3,
2296  const T4 &x4) const {
2297  return constructOperator<Complete<OperatorBase>, OperatorBase::dynamic>()(
2298  x1, x2, x3, x4);
2299  }
2300  struct InvOp : Operator<0> {
2301  static const int independent_variable = true;
2302  template <class Type>
2303  void forward(ForwardArgs<Type> &args) {}
2304  template <class Type>
2305  void reverse(ReverseArgs<Type> &args) {}
2306  const char *op_name();
2307  };
2308 
2309  struct DepOp : Operator<1> {
2310  static const bool is_linear = true;
2311  static const int dependent_variable = true;
2312  static const bool have_eval = true;
2313  template <class Type>
2314  Type eval(Type x0) {
2315  return x0;
2316  }
2317  template <class Type>
2318  void reverse(ReverseArgs<Type> &args) {
2319  args.dx(0) += args.dy(0);
2320  }
2321  const char *op_name();
2322  };
2323 
2324  struct ConstOp : Operator<0, 1> {
2325  static const bool is_linear = true;
2326  static const bool is_constant = true;
2327  template <class Type>
2328  void forward(ForwardArgs<Type> &args) {}
2329  void forward(ForwardArgs<Replay> &args);
2330  template <class Type>
2331  void reverse(ReverseArgs<Type> &args) {}
2332  const char *op_name();
2333  void forward(ForwardArgs<Writer> &args);
2334  };
2335  struct DataOp : DynamicOutputOperator<0> {
2336  typedef DynamicOutputOperator<0> Base;
2337  static const bool is_linear = true;
2338  DataOp(Index n);
2339  template <class Type>
2340  void forward(ForwardArgs<Type> &args) {}
2341  template <class Type>
2342  void reverse(ReverseArgs<Type> &args) {}
2343  const char *op_name();
2344  void forward(ForwardArgs<Writer> &args);
2345  };
2356  static const bool add_forward_replay_copy = true;
2357  ZeroOp(Index n);
2358  template <class Type>
2359  void forward(ForwardArgs<Type> &args) {
2360  for (Index i = 0; i < Base::noutput; i++) args.y(i) = Type(0);
2361  }
2362  template <class Type>
2363  void reverse(ReverseArgs<Type> &args) {}
2364  const char *op_name();
2365  void forward(ForwardArgs<Writer> &args);
2368  void operator()(Replay *x, Index n);
2369  };
2371  struct NullOp : Operator<0, 0> {
2372  NullOp();
2373  const char *op_name();
2374  template <class T>
2375  void forward(ForwardArgs<T> &args) {}
2376  template <class T>
2377  void reverse(ReverseArgs<T> &args) {}
2378  };
2380  struct NullOp2 : DynamicInputOutputOperator {
2381  NullOp2(Index ninput, Index noutput);
2382  const char *op_name();
2383  template <class T>
2384  void forward(ForwardArgs<T> &args) {}
2385  template <class T>
2386  void reverse(ReverseArgs<T> &args) {}
2387  };
2408  struct RefOp : DynamicOperator<0, 1> {
2409  static const bool dynamic = true;
2410  global *glob;
2411  Index i;
2412  RefOp(global *glob, Index i);
2414  void forward(ForwardArgs<Scalar> &args);
2416  void forward(ForwardArgs<Replay> &args);
2419  template <class Type>
2421  TMBAD_ASSERT2(false,
2422  "Reverse mode updates are forbidden until all references "
2423  "are resolved");
2424  }
2426  void reverse(ReverseArgs<Replay> &args);
2427  const char *op_name();
2428  };
2429 
2430  typedef Operator<1> UnaryOperator;
2431  typedef Operator<2> BinaryOperator;
2432 
2433  OperatorPure *Fuse(OperatorPure *Op1, OperatorPure *Op2);
2434 
2435  static bool fuse;
2436 
2441  void set_fuse(bool flag);
2442 
2445  void add_to_opstack(OperatorPure *pOp);
2447  template <class OperatorBase>
2448  ad_plain add_to_stack(Scalar result = 0) {
2449  ad_plain ans;
2450  ans.index = this->values.size();
2451 
2452  this->values.push_back(result);
2453 
2454  Complete<OperatorBase> *pOp = this->template getOperator<OperatorBase>();
2455  add_to_opstack(pOp);
2456 
2457  TMBAD_ASSERT(!TMBAD_INDEX_OVERFLOW(values.size()));
2458  return ans;
2459  }
2461  template <class OperatorBase>
2462  ad_plain add_to_stack(const ad_plain &x) {
2463  ad_plain ans;
2464  ans.index = this->values.size();
2465 
2466  this->values.push_back(OperatorBase().eval(x.Value()));
2467 
2468  this->inputs.push_back(x.index);
2469 
2470  Complete<OperatorBase> *pOp = this->template getOperator<OperatorBase>();
2471  add_to_opstack(pOp);
2472 
2473  TMBAD_ASSERT(!TMBAD_INDEX_OVERFLOW(values.size()));
2474  TMBAD_ASSERT(!TMBAD_INDEX_OVERFLOW(inputs.size()));
2475  return ans;
2476  }
2478  template <class OperatorBase>
2479  ad_plain add_to_stack(const ad_plain &x, const ad_plain &y) {
2480  ad_plain ans;
2481  ans.index = this->values.size();
2482 
2483  this->values.push_back(OperatorBase().eval(x.Value(), y.Value()));
2484 
2485  this->inputs.push_back(x.index);
2486  this->inputs.push_back(y.index);
2487 
2488  Complete<OperatorBase> *pOp = this->template getOperator<OperatorBase>();
2489  add_to_opstack(pOp);
2490 
2491  TMBAD_ASSERT(!TMBAD_INDEX_OVERFLOW(values.size()));
2492  TMBAD_ASSERT(!TMBAD_INDEX_OVERFLOW(inputs.size()));
2493  return ans;
2494  }
2495  template <class OperatorBase>
2496  ad_segment add_to_stack(ad_segment lhs, ad_segment rhs,
2497  ad_segment more = ad_segment()) {
2498  IndexPair ptr((Index)inputs.size(), (Index)values.size());
2499  Complete<OperatorBase> *pOp =
2500  this->template getOperator<OperatorBase>(lhs, rhs);
2501  size_t n = pOp->output_size();
2502  ad_segment ans(values.size(), n);
2503  inputs.push_back(lhs.index());
2504  inputs.push_back(rhs.index());
2505  if (more.size() > 0) inputs.push_back(more.index());
2506  opstack.push_back(pOp);
2507  values.resize(values.size() + n);
2508  ForwardArgs<Scalar> args(inputs, values, this);
2509  args.ptr = ptr;
2510  pOp->forward(args);
2511 
2512  TMBAD_ASSERT(!TMBAD_INDEX_OVERFLOW(values.size()));
2513  TMBAD_ASSERT(!TMBAD_INDEX_OVERFLOW(inputs.size()));
2514  return ans;
2515  }
2516 
2517  template <class OperatorBase>
2518  ad_segment add_to_stack(Complete<OperatorBase> *pOp, ad_segment lhs,
2519  ad_segment rhs = ad_segment()) {
2520  static_assert(
2521  OperatorBase::dynamic,
2522  "Unlikely that you want to use this method for static operators?");
2523  static_assert(
2524  OperatorBase::ninput == 0 || OperatorBase::implicit_dependencies,
2525  "Operators with pointer inputs should always implement "
2526  "'implicit_dependencies'");
2527 
2528  IndexPair ptr((Index)inputs.size(), (Index)values.size());
2529  size_t n = pOp->output_size();
2530  ad_segment ans(values.size(), n);
2531  TMBAD_ASSERT((Index)(lhs.size() > 0) + (Index)(rhs.size() > 0) ==
2532  pOp->input_size());
2533  if (lhs.size() > 0) inputs.push_back(lhs.index());
2534  if (rhs.size() > 0) inputs.push_back(rhs.index());
2535  opstack.push_back(pOp);
2536  values.resize(values.size() + n);
2537  ForwardArgs<Scalar> args(inputs, values, this);
2538  args.ptr = ptr;
2539  pOp->forward(args);
2540 
2541  TMBAD_ASSERT(!TMBAD_INDEX_OVERFLOW(values.size()));
2542  TMBAD_ASSERT(!TMBAD_INDEX_OVERFLOW(inputs.size()));
2543  return ans;
2544  }
2547  template <class OperatorBase>
2548  std::vector<ad_plain> add_to_stack(OperatorPure *pOp,
2549  const std::vector<ad_plain> &x) {
2550  IndexPair ptr((Index)inputs.size(), (Index)values.size());
2551  size_t m = pOp->input_size();
2552  size_t n = pOp->output_size();
2553  ad_segment ans(values.size(), n);
2554  for (size_t i = 0; i < m; i++) inputs.push_back(x[i].index);
2555  opstack.push_back(pOp);
2556  values.resize(values.size() + n);
2557  ForwardArgs<Scalar> args(inputs, values, this);
2558  args.ptr = ptr;
2559  pOp->forward(args);
2560 
2561  TMBAD_ASSERT(!TMBAD_INDEX_OVERFLOW(values.size()));
2562  TMBAD_ASSERT(!TMBAD_INDEX_OVERFLOW(inputs.size()));
2563  std::vector<ad_plain> out(n);
2564  for (size_t i = 0; i < n; i++) out[i].index = ans.index() + i;
2565  return out;
2566  }
2567 
2568  struct ad_plain {
2569  Index index;
2570  static const Index NA = (Index)-1;
2571  bool initialized() const;
2572  bool on_some_tape() const;
2574  void addToTape() const;
2576  global *glob() const;
2580  void override_by(const ad_plain &x) const;
2581 
2586  ad_plain();
2587 
2589  ad_plain(Scalar x);
2591  ad_plain(ad_aug x);
2592 
2594  struct CopyOp : Operator<1> {
2595  static const bool have_eval = true;
2596  template <class Type>
2597  Type eval(Type x0) {
2598  return x0;
2599  }
2600  Replay eval(Replay x0);
2601  template <class Type>
2602  void reverse(ReverseArgs<Type> &args) {
2603  args.dx(0) += args.dy(0);
2604  }
2605  const char *op_name();
2606  };
2614  ad_plain copy() const;
2625  struct ValOp : Operator<1> {
2626  static const bool have_dependencies = true;
2627  static const bool have_eval = true;
2629  template <class Type>
2630  Type eval(Type x0) {
2631  return x0;
2632  }
2633  Replay eval(Replay x0);
2635  template <class Type>
2643  void dependencies(Args<> &args, Dependencies &dep) const;
2644  const char *op_name();
2645  };
2649  ad_plain copy0() const;
2650 
2651  template <bool left_var, bool right_var>
2652  struct AddOp_ : BinaryOperator {
2653  static const bool is_linear = true;
2654  static const bool have_eval = true;
2655  template <class Type>
2656  Type eval(Type x0, Type x1) {
2657  return x0 + x1;
2658  }
2659  template <class Type>
2660  void reverse(ReverseArgs<Type> &args) {
2661  if (left_var) args.dx(0) += args.dy(0);
2662  if (right_var) args.dx(1) += args.dy(0);
2663  }
2664  const char *op_name() { return "AddOp"; }
2665  OperatorPure *other_fuse(OperatorPure *self, OperatorPure *other) {
2666  if (other == get_glob()->getOperator<MulOp>()) {
2667  return get_glob()->getOperator<Fused<AddOp_, MulOp> >();
2668  }
2669  return NULL;
2670  }
2671  };
2672  typedef AddOp_<true, true> AddOp;
2673  ad_plain operator+(const ad_plain &other) const;
2674 
2675  template <bool left_var, bool right_var>
2676  struct SubOp_ : BinaryOperator {
2677  static const bool is_linear = true;
2678  static const bool have_eval = true;
2679  template <class Type>
2680  Type eval(Type x0, Type x1) {
2681  return x0 - x1;
2682  }
2683  template <class Type>
2684  void reverse(ReverseArgs<Type> &args) {
2685  if (left_var) args.dx(0) += args.dy(0);
2686  if (right_var) args.dx(1) -= args.dy(0);
2687  }
2688  const char *op_name() { return "SubOp"; }
2689  };
2690  typedef SubOp_<true, true> SubOp;
2691  ad_plain operator-(const ad_plain &other) const;
2692 
2693  template <bool left_var, bool right_var>
2694  struct MulOp_ : BinaryOperator {
2695  static const bool have_eval = true;
2696  static const bool is_linear = !left_var || !right_var;
2697  template <class Type>
2698  Type eval(Type x0, Type x1) {
2699  return x0 * x1;
2700  }
2701  template <class Type>
2702  void reverse(ReverseArgs<Type> &args) {
2703  if (left_var) args.dx(0) += args.x(1) * args.dy(0);
2704  if (right_var) args.dx(1) += args.x(0) * args.dy(0);
2705  }
2706  const char *op_name() { return "MulOp"; }
2707  };
2708  typedef MulOp_<true, true> MulOp;
2709  ad_plain operator*(const ad_plain &other) const;
2710  ad_plain operator*(const Scalar &other) const;
2711 
2712  template <bool left_var, bool right_var>
2713  struct DivOp_ : BinaryOperator {
2714  static const bool have_eval = true;
2715  template <class Type>
2716  Type eval(Type x0, Type x1) {
2717  return x0 / x1;
2718  }
2719  template <class Type>
2720  void reverse(ReverseArgs<Type> &args) {
2721  Type tmp0 = args.dy(0) / args.x(1);
2722  if (left_var) args.dx(0) += tmp0;
2723  if (right_var) args.dx(1) -= args.y(0) * tmp0;
2724  }
2725  const char *op_name() { return "DivOp"; }
2726  };
2727  typedef DivOp_<true, true> DivOp;
2728  ad_plain operator/(const ad_plain &other) const;
2729 
2730  struct NegOp : UnaryOperator {
2731  static const bool is_linear = true;
2732  static const bool have_eval = true;
2733  template <class Type>
2734  Type eval(Type x0) {
2735  return -x0;
2736  }
2737  template <class Type>
2738  void reverse(ReverseArgs<Type> &args) {
2739  args.dx(0) -= args.dy(0);
2740  }
2741  const char *op_name();
2742  };
2743  ad_plain operator-() const;
2744 
2745  ad_plain &operator+=(const ad_plain &other);
2746  ad_plain &operator-=(const ad_plain &other);
2747  ad_plain &operator*=(const ad_plain &other);
2748  ad_plain &operator/=(const ad_plain &other);
2749 
2750  void Dependent();
2751 
2752  void Independent();
2753  Scalar &Value();
2754  Scalar Value() const;
2755  Scalar Value(global *glob) const;
2756  Scalar &Deriv();
2757  };
2765  bool in_use;
2769  void ad_start();
2771  void ad_stop();
2772  void Independent(std::vector<ad_plain> &x);
2780  struct ad_segment {
2781  ad_plain x;
2782  size_t n;
2783  size_t c;
2785  ad_segment();
2787  ad_segment(ad_plain x, size_t n);
2789  ad_segment(ad_aug x);
2791  ad_segment(Scalar x);
2793  ad_segment(Index idx, size_t n);
2795  ad_segment(ad_plain x, size_t r, size_t c);
2798  ad_segment(Replay *x, size_t n, bool zero_check = false);
2799  bool identicalZero();
2800  bool all_on_active_tape(Replay *x, size_t n);
2801  bool is_contiguous(Replay *x, size_t n);
2802  bool all_zero(Replay *x, size_t n);
2803  bool all_constant(Replay *x, size_t n);
2804  size_t size() const;
2805  size_t rows() const;
2806  size_t cols() const;
2807 
2808  ad_plain operator[](size_t i) const;
2809  ad_plain offset() const;
2810  Index index() const;
2811  };
2831  struct ad_aug {
2834  mutable ad_plain taped_value;
2838  TMBAD_UNION_OR_STRUCT {
2839  Scalar value;
2840  mutable global *glob;
2841  }
2842  data;
2844  bool on_some_tape() const;
2846  bool on_active_tape() const;
2848  bool ontape() const;
2852  bool constant() const;
2853  Index index() const;
2859  global *glob() const;
2861  Scalar Value() const;
2865  ad_aug();
2869  ad_aug(Scalar x);
2871  ad_aug(ad_plain x);
2876  void addToTape() const;
2880  void override_by(const ad_plain &x) const;
2882  bool in_context_stack(global *glob) const;
2885  ad_aug copy() const;
2887  ad_aug copy0() const;
2890  bool identicalZero() const;
2893  bool identicalOne() const;
2897  bool bothConstant(const ad_aug &other) const;
2901  bool identical(const ad_aug &other) const;
2906  ad_aug operator+(const ad_aug &other) const;
2912  ad_aug operator-(const ad_aug &other) const;
2914  ad_aug operator-() const;
2921  ad_aug operator*(const ad_aug &other) const;
2926  ad_aug operator/(const ad_aug &other) const;
2929  ad_aug &operator+=(const ad_aug &other);
2932  ad_aug &operator-=(const ad_aug &other);
2935  ad_aug &operator*=(const ad_aug &other);
2938  ad_aug &operator/=(const ad_aug &other);
2940  void Dependent();
2942  void Independent();
2943  Scalar &Value();
2944  Scalar &Deriv();
2945  };
2946  void Independent(std::vector<ad_aug> &x);
2947 };
2948 
2949 template <class S, class T>
2950 std::ostream &operator<<(std::ostream &os, const std::pair<S, T> &x) {
2951  os << "(" << x.first << ", " << x.second << ")";
2952  return os;
2953 }
2954 
2955 std::ostream &operator<<(std::ostream &os, const global::ad_plain &x);
2956 std::ostream &operator<<(std::ostream &os, const global::ad_aug &x);
2957 
2968 template <class T>
2969 struct adaptive : T {
2970  INHERIT_CTOR(adaptive, T)
2971  bool operator==(const T &other) const {
2972  return this->Value() == other.Value();
2973  }
2974  bool operator!=(const T &other) const {
2975  return this->Value() != other.Value();
2976  }
2977  bool operator>=(const T &other) const {
2978  return this->Value() >= other.Value();
2979  }
2980  bool operator<=(const T &other) const {
2981  return this->Value() <= other.Value();
2982  }
2983  bool operator<(const T &other) const { return this->Value() < other.Value(); }
2984  bool operator>(const T &other) const { return this->Value() > other.Value(); }
2985 
2986  adaptive operator+(const T &other) const {
2987  return adaptive(T(*this) + other);
2988  }
2989  adaptive operator-(const T &other) const {
2990  return adaptive(T(*this) - other);
2991  }
2992  adaptive operator*(const T &other) const {
2993  return adaptive(T(*this) * other);
2994  }
2995  adaptive operator/(const T &other) const {
2996  return adaptive(T(*this) / other);
2997  }
2998 
2999  adaptive operator-() const { return adaptive(-(T(*this))); }
3000 };
3001 
3002 typedef global::ad_plain ad_plain;
3003 typedef global::ad_aug ad_aug;
3004 typedef global::Replay Replay;
3005 typedef adaptive<ad_aug> ad_adapt;
3014 struct ad_plain_index : ad_plain {
3015  ad_plain_index(const Index &i);
3016  ad_plain_index(const ad_plain &x);
3017 };
3018 struct ad_aug_index : ad_aug {
3019  ad_aug_index(const Index &i);
3020  ad_aug_index(const ad_aug &x);
3021  ad_aug_index(const ad_plain &x);
3022 };
3023 
3024 template <class T>
3025 void Independent(std::vector<T> &x) {
3026  for (size_t i = 0; i < x.size(); i++) x[i].Independent();
3027 }
3028 template <class T>
3029 void Dependent(std::vector<T> &x) {
3030  for (size_t i = 0; i < x.size(); i++) x[i].Dependent();
3031 }
3032 template <class T>
3033 Scalar Value(T x) {
3034  return x.Value();
3035 }
3036 Scalar Value(Scalar x);
3037 
3044 template <class V>
3045 bool isContiguous(V &x) {
3046  bool ok = true;
3047  Index j_previous;
3048  for (size_t i = 0; i < (size_t)x.size(); i++) {
3049  if (!x[i].on_some_tape()) {
3050  ok = false;
3051  break;
3052  }
3053  Index j = ad_plain(x[i]).index;
3054  if (i > 0) {
3055  if (j != j_previous + 1) {
3056  ok = false;
3057  break;
3058  }
3059  }
3060  j_previous = j;
3061  }
3062  return ok;
3063 }
3070 template <class V>
3071 V getContiguous(const V &x) {
3072  V y(x.size());
3073  for (size_t i = 0; i < (size_t)x.size(); i++) y[i] = x[i].copy();
3074  return y;
3075 }
3082 template <class V>
3083 void forceContiguous(V &x) {
3084  if (!isContiguous(x)) x = getContiguous(x);
3085 }
3086 ad_aug operator+(const double &x, const ad_aug &y);
3087 ad_aug operator-(const double &x, const ad_aug &y);
3088 ad_aug operator*(const double &x, const ad_aug &y);
3089 ad_aug operator/(const double &x, const ad_aug &y);
3090 
3091 bool operator<(const double &x, const ad_adapt &y);
3092 bool operator<=(const double &x, const ad_adapt &y);
3093 bool operator>(const double &x, const ad_adapt &y);
3094 bool operator>=(const double &x, const ad_adapt &y);
3095 bool operator==(const double &x, const ad_adapt &y);
3096 bool operator!=(const double &x, const ad_adapt &y);
3097 using ::round;
3098 using ::trunc;
3099 using std::ceil;
3100 using std::floor;
3101 Writer floor(const Writer &x);
3102 struct FloorOp : global::UnaryOperator {
3103  static const bool have_eval = true;
3104  template <class Type>
3105  Type eval(Type x) {
3106  return floor(x);
3107  }
3108  template <class Type>
3109  void reverse(ReverseArgs<Type> &args) {}
3110  const char *op_name();
3111 };
3112 ad_plain floor(const ad_plain &x);
3113 ad_aug floor(const ad_aug &x);
3114 Writer ceil(const Writer &x);
3115 struct CeilOp : global::UnaryOperator {
3116  static const bool have_eval = true;
3117  template <class Type>
3118  Type eval(Type x) {
3119  return ceil(x);
3120  }
3121  template <class Type>
3122  void reverse(ReverseArgs<Type> &args) {}
3123  const char *op_name();
3124 };
3125 ad_plain ceil(const ad_plain &x);
3126 ad_aug ceil(const ad_aug &x);
3127 Writer trunc(const Writer &x);
3128 struct TruncOp : global::UnaryOperator {
3129  static const bool have_eval = true;
3130  template <class Type>
3131  Type eval(Type x) {
3132  return trunc(x);
3133  }
3134  template <class Type>
3135  void reverse(ReverseArgs<Type> &args) {}
3136  const char *op_name();
3137 };
3138 ad_plain trunc(const ad_plain &x);
3139 ad_aug trunc(const ad_aug &x);
3140 Writer round(const Writer &x);
3141 struct RoundOp : global::UnaryOperator {
3142  static const bool have_eval = true;
3143  template <class Type>
3144  Type eval(Type x) {
3145  return round(x);
3146  }
3147  template <class Type>
3148  void reverse(ReverseArgs<Type> &args) {}
3149  const char *op_name();
3150 };
3151 ad_plain round(const ad_plain &x);
3152 ad_aug round(const ad_aug &x);
3153 
3154 double sign(const double &x);
3155 Writer sign(const Writer &x);
3156 struct SignOp : global::UnaryOperator {
3157  static const bool have_eval = true;
3158  template <class Type>
3159  Type eval(Type x) {
3160  return sign(x);
3161  }
3162  template <class Type>
3163  void reverse(ReverseArgs<Type> &args) {}
3164  const char *op_name();
3165 };
3166 ad_plain sign(const ad_plain &x);
3167 ad_aug sign(const ad_aug &x);
3168 
3169 double ge0(const double &x);
3170 double lt0(const double &x);
3171 Writer ge0(const Writer &x);
3172 struct Ge0Op : global::UnaryOperator {
3173  static const bool have_eval = true;
3174  template <class Type>
3175  Type eval(Type x) {
3176  return ge0(x);
3177  }
3178  template <class Type>
3179  void reverse(ReverseArgs<Type> &args) {}
3180  const char *op_name();
3181 };
3182 ad_plain ge0(const ad_plain &x);
3183 ad_aug ge0(const ad_aug &x);
3184 Writer lt0(const Writer &x);
3185 struct Lt0Op : global::UnaryOperator {
3186  static const bool have_eval = true;
3187  template <class Type>
3188  Type eval(Type x) {
3189  return lt0(x);
3190  }
3191  template <class Type>
3192  void reverse(ReverseArgs<Type> &args) {}
3193  const char *op_name();
3194 };
3195 ad_plain lt0(const ad_plain &x);
3196 ad_aug lt0(const ad_aug &x);
3197 using ::expm1;
3198 using ::fabs;
3199 using ::log1p;
3200 using std::acos;
3201 using std::acosh;
3202 using std::asin;
3203 using std::asinh;
3204 using std::atan;
3205 using std::atanh;
3206 using std::cos;
3207 using std::cosh;
3208 using std::exp;
3209 using std::log;
3210 using std::sin;
3211 using std::sinh;
3212 using std::sqrt;
3213 using std::tan;
3214 using std::tanh;
3215 
3216 Writer fabs(const Writer &x);
3217 struct AbsOp : global::UnaryOperator {
3218  static const bool have_eval = true;
3219  template <class Type>
3220  Type eval(Type x) {
3221  return fabs(x);
3222  }
3223  template <class Type>
3224  void reverse(ReverseArgs<Type> &args) {
3225  args.dx(0) += args.dy(0) * sign(args.x(0));
3226  }
3227  void reverse(ReverseArgs<Scalar> &args);
3228  const char *op_name();
3229 };
3230 ad_plain fabs(const ad_plain &x);
3231 ad_aug fabs(const ad_aug &x);
3232 ad_adapt fabs(const ad_adapt &x);
3233 Writer cos(const Writer &x);
3234 ad_aug cos(const ad_aug &x);
3235 Writer sin(const Writer &x);
3236 struct SinOp : global::UnaryOperator {
3237  static const bool have_eval = true;
3238  template <class Type>
3239  Type eval(Type x) {
3240  return sin(x);
3241  }
3242  template <class Type>
3243  void reverse(ReverseArgs<Type> &args) {
3244  args.dx(0) += args.dy(0) * cos(args.x(0));
3245  }
3246  void reverse(ReverseArgs<Scalar> &args);
3247  const char *op_name();
3248 };
3249 ad_plain sin(const ad_plain &x);
3250 ad_aug sin(const ad_aug &x);
3251 ad_adapt sin(const ad_adapt &x);
3252 Writer cos(const Writer &x);
3253 struct CosOp : global::UnaryOperator {
3254  static const bool have_eval = true;
3255  template <class Type>
3256  Type eval(Type x) {
3257  return cos(x);
3258  }
3259  template <class Type>
3260  void reverse(ReverseArgs<Type> &args) {
3261  args.dx(0) += args.dy(0) * -sin(args.x(0));
3262  }
3263  void reverse(ReverseArgs<Scalar> &args);
3264  const char *op_name();
3265 };
3266 ad_plain cos(const ad_plain &x);
3267 ad_aug cos(const ad_aug &x);
3268 ad_adapt cos(const ad_adapt &x);
3269 Writer exp(const Writer &x);
3270 struct ExpOp : global::UnaryOperator {
3271  static const bool have_eval = true;
3272  template <class Type>
3273  Type eval(Type x) {
3274  return exp(x);
3275  }
3276  template <class Type>
3277  void reverse(ReverseArgs<Type> &args) {
3278  args.dx(0) += args.dy(0) * args.y(0);
3279  }
3280  void reverse(ReverseArgs<Scalar> &args);
3281  const char *op_name();
3282 };
3283 ad_plain exp(const ad_plain &x);
3284 ad_aug exp(const ad_aug &x);
3285 ad_adapt exp(const ad_adapt &x);
3286 Writer log(const Writer &x);
3287 struct LogOp : global::UnaryOperator {
3288  static const bool have_eval = true;
3289  template <class Type>
3290  Type eval(Type x) {
3291  return log(x);
3292  }
3293  template <class Type>
3294  void reverse(ReverseArgs<Type> &args) {
3295  args.dx(0) += args.dy(0) * Type(1.) / args.x(0);
3296  }
3297  void reverse(ReverseArgs<Scalar> &args);
3298  const char *op_name();
3299 };
3300 ad_plain log(const ad_plain &x);
3301 ad_aug log(const ad_aug &x);
3302 ad_adapt log(const ad_adapt &x);
3303 Writer sqrt(const Writer &x);
3304 struct SqrtOp : global::UnaryOperator {
3305  static const bool have_eval = true;
3306  template <class Type>
3307  Type eval(Type x) {
3308  return sqrt(x);
3309  }
3310  template <class Type>
3311  void reverse(ReverseArgs<Type> &args) {
3312  args.dx(0) += args.dy(0) * Type(0.5) / args.y(0);
3313  }
3314  void reverse(ReverseArgs<Scalar> &args);
3315  const char *op_name();
3316 };
3317 ad_plain sqrt(const ad_plain &x);
3318 ad_aug sqrt(const ad_aug &x);
3319 ad_adapt sqrt(const ad_adapt &x);
3320 Writer tan(const Writer &x);
3321 struct TanOp : global::UnaryOperator {
3322  static const bool have_eval = true;
3323  template <class Type>
3324  Type eval(Type x) {
3325  return tan(x);
3326  }
3327  template <class Type>
3328  void reverse(ReverseArgs<Type> &args) {
3329  args.dx(0) += args.dy(0) * Type(1.) / (cos(args.x(0)) * cos(args.x(0)));
3330  }
3331  void reverse(ReverseArgs<Scalar> &args);
3332  const char *op_name();
3333 };
3334 ad_plain tan(const ad_plain &x);
3335 ad_aug tan(const ad_aug &x);
3336 ad_adapt tan(const ad_adapt &x);
3337 Writer cosh(const Writer &x);
3338 ad_aug cosh(const ad_aug &x);
3339 Writer sinh(const Writer &x);
3340 struct SinhOp : global::UnaryOperator {
3341  static const bool have_eval = true;
3342  template <class Type>
3343  Type eval(Type x) {
3344  return sinh(x);
3345  }
3346  template <class Type>
3347  void reverse(ReverseArgs<Type> &args) {
3348  args.dx(0) += args.dy(0) * cosh(args.x(0));
3349  }
3350  void reverse(ReverseArgs<Scalar> &args);
3351  const char *op_name();
3352 };
3353 ad_plain sinh(const ad_plain &x);
3354 ad_aug sinh(const ad_aug &x);
3355 ad_adapt sinh(const ad_adapt &x);
3356 Writer cosh(const Writer &x);
3357 struct CoshOp : global::UnaryOperator {
3358  static const bool have_eval = true;
3359  template <class Type>
3360  Type eval(Type x) {
3361  return cosh(x);
3362  }
3363  template <class Type>
3364  void reverse(ReverseArgs<Type> &args) {
3365  args.dx(0) += args.dy(0) * sinh(args.x(0));
3366  }
3367  void reverse(ReverseArgs<Scalar> &args);
3368  const char *op_name();
3369 };
3370 ad_plain cosh(const ad_plain &x);
3371 ad_aug cosh(const ad_aug &x);
3372 ad_adapt cosh(const ad_adapt &x);
3373 Writer tanh(const Writer &x);
3374 struct TanhOp : global::UnaryOperator {
3375  static const bool have_eval = true;
3376  template <class Type>
3377  Type eval(Type x) {
3378  return tanh(x);
3379  }
3380  template <class Type>
3381  void reverse(ReverseArgs<Type> &args) {
3382  args.dx(0) += args.dy(0) * Type(1.) / (cosh(args.x(0)) * cosh(args.x(0)));
3383  }
3384  void reverse(ReverseArgs<Scalar> &args);
3385  const char *op_name();
3386 };
3387 ad_plain tanh(const ad_plain &x);
3388 ad_aug tanh(const ad_aug &x);
3389 ad_adapt tanh(const ad_adapt &x);
3390 Writer expm1(const Writer &x);
3391 struct Expm1 : global::UnaryOperator {
3392  static const bool have_eval = true;
3393  template <class Type>
3394  Type eval(Type x) {
3395  return expm1(x);
3396  }
3397  template <class Type>
3398  void reverse(ReverseArgs<Type> &args) {
3399  args.dx(0) += args.dy(0) * args.y(0) + Type(1.);
3400  }
3401  void reverse(ReverseArgs<Scalar> &args);
3402  const char *op_name();
3403 };
3404 ad_plain expm1(const ad_plain &x);
3405 ad_aug expm1(const ad_aug &x);
3406 ad_adapt expm1(const ad_adapt &x);
3407 Writer log1p(const Writer &x);
3408 struct Log1p : global::UnaryOperator {
3409  static const bool have_eval = true;
3410  template <class Type>
3411  Type eval(Type x) {
3412  return log1p(x);
3413  }
3414  template <class Type>
3415  void reverse(ReverseArgs<Type> &args) {
3416  args.dx(0) += args.dy(0) * Type(1.) / (args.x(0) + Type(1.));
3417  }
3418  void reverse(ReverseArgs<Scalar> &args);
3419  const char *op_name();
3420 };
3421 ad_plain log1p(const ad_plain &x);
3422 ad_aug log1p(const ad_aug &x);
3423 ad_adapt log1p(const ad_adapt &x);
3424 Writer asin(const Writer &x);
3425 struct AsinOp : global::UnaryOperator {
3426  static const bool have_eval = true;
3427  template <class Type>
3428  Type eval(Type x) {
3429  return asin(x);
3430  }
3431  template <class Type>
3432  void reverse(ReverseArgs<Type> &args) {
3433  args.dx(0) +=
3434  args.dy(0) * Type(1.) / sqrt(Type(1.) - args.x(0) * args.x(0));
3435  }
3436  void reverse(ReverseArgs<Scalar> &args);
3437  const char *op_name();
3438 };
3439 ad_plain asin(const ad_plain &x);
3440 ad_aug asin(const ad_aug &x);
3441 ad_adapt asin(const ad_adapt &x);
3442 Writer acos(const Writer &x);
3443 struct AcosOp : global::UnaryOperator {
3444  static const bool have_eval = true;
3445  template <class Type>
3446  Type eval(Type x) {
3447  return acos(x);
3448  }
3449  template <class Type>
3450  void reverse(ReverseArgs<Type> &args) {
3451  args.dx(0) +=
3452  args.dy(0) * Type(-1.) / sqrt(Type(1.) - args.x(0) * args.x(0));
3453  }
3454  void reverse(ReverseArgs<Scalar> &args);
3455  const char *op_name();
3456 };
3457 ad_plain acos(const ad_plain &x);
3458 ad_aug acos(const ad_aug &x);
3459 ad_adapt acos(const ad_adapt &x);
3460 Writer atan(const Writer &x);
3461 struct AtanOp : global::UnaryOperator {
3462  static const bool have_eval = true;
3463  template <class Type>
3464  Type eval(Type x) {
3465  return atan(x);
3466  }
3467  template <class Type>
3468  void reverse(ReverseArgs<Type> &args) {
3469  args.dx(0) += args.dy(0) * Type(1.) / (Type(1.) + args.x(0) * args.x(0));
3470  }
3471  void reverse(ReverseArgs<Scalar> &args);
3472  const char *op_name();
3473 };
3474 ad_plain atan(const ad_plain &x);
3475 ad_aug atan(const ad_aug &x);
3476 ad_adapt atan(const ad_adapt &x);
3477 Writer asinh(const Writer &x);
3478 struct AsinhOp : global::UnaryOperator {
3479  static const bool have_eval = true;
3480  template <class Type>
3481  Type eval(Type x) {
3482  return asinh(x);
3483  }
3484  template <class Type>
3485  void reverse(ReverseArgs<Type> &args) {
3486  args.dx(0) +=
3487  args.dy(0) * Type(1.) / sqrt(args.x(0) * args.x(0) + Type(1.));
3488  }
3489  void reverse(ReverseArgs<Scalar> &args);
3490  const char *op_name();
3491 };
3492 ad_plain asinh(const ad_plain &x);
3493 ad_aug asinh(const ad_aug &x);
3494 ad_adapt asinh(const ad_adapt &x);
3495 Writer acosh(const Writer &x);
3496 struct AcoshOp : global::UnaryOperator {
3497  static const bool have_eval = true;
3498  template <class Type>
3499  Type eval(Type x) {
3500  return acosh(x);
3501  }
3502  template <class Type>
3503  void reverse(ReverseArgs<Type> &args) {
3504  args.dx(0) +=
3505  args.dy(0) * Type(1.) / sqrt(args.x(0) * args.x(0) - Type(1.));
3506  }
3507  void reverse(ReverseArgs<Scalar> &args);
3508  const char *op_name();
3509 };
3510 ad_plain acosh(const ad_plain &x);
3511 ad_aug acosh(const ad_aug &x);
3512 ad_adapt acosh(const ad_adapt &x);
3513 Writer atanh(const Writer &x);
3514 struct AtanhOp : global::UnaryOperator {
3515  static const bool have_eval = true;
3516  template <class Type>
3517  Type eval(Type x) {
3518  return atanh(x);
3519  }
3520  template <class Type>
3521  void reverse(ReverseArgs<Type> &args) {
3522  args.dx(0) += args.dy(0) * Type(1.) / (Type(1) - args.x(0) * args.x(0));
3523  }
3524  void reverse(ReverseArgs<Scalar> &args);
3525  const char *op_name();
3526 };
3527 ad_plain atanh(const ad_plain &x);
3528 ad_aug atanh(const ad_aug &x);
3529 ad_adapt atanh(const ad_adapt &x);
3530 
3531 template <class T>
3532 T abs(const T &x) {
3533  return fabs(x);
3534 }
3535 using std::pow;
3536 Writer pow(const Writer &x1, const Writer &x2);
3537 struct PowOp : global::BinaryOperator {
3538  static const bool have_eval = true;
3539  template <class Type>
3540  Type eval(Type x1, Type x2) {
3541  return pow(x1, x2);
3542  }
3543  template <class Type>
3544  void reverse(ReverseArgs<Type> &args) {
3545  args.dx(0) += args.dy(0) * args.x(1) * pow(args.x(0), args.x(1) - Type(1.));
3546  args.dx(1) += args.dy(0) * args.y(0) * log(args.x(0));
3547  }
3548  const char *op_name();
3549 };
3550 ad_plain pow(const ad_plain &x1, const ad_plain &x2);
3551 ad_aug pow(const ad_aug &x1, const ad_aug &x2);
3552 ad_adapt pow(const ad_adapt &x1, const ad_adapt &x2);
3553 using std::atan2;
3554 Writer atan2(const Writer &x1, const Writer &x2);
3555 struct Atan2 : global::BinaryOperator {
3556  static const bool have_eval = true;
3557  template <class Type>
3558  Type eval(Type x1, Type x2) {
3559  return atan2(x1, x2);
3560  }
3561  template <class Type>
3562  void reverse(ReverseArgs<Type> &args) {
3563  args.dx(0) += args.dy(0) * args.x(1) /
3564  (args.x(0) * args.x(0) + args.x(1) * args.x(1));
3565  args.dx(1) += args.dy(0) * -args.x(0) /
3566  (args.x(0) * args.x(0) + args.x(1) * args.x(1));
3567  }
3568  const char *op_name();
3569 };
3570 ad_plain atan2(const ad_plain &x1, const ad_plain &x2);
3571 ad_aug atan2(const ad_aug &x1, const ad_aug &x2);
3572 ad_adapt atan2(const ad_adapt &x1, const ad_adapt &x2);
3573 using std::max;
3574 Writer max(const Writer &x1, const Writer &x2);
3575 struct MaxOp : global::BinaryOperator {
3576  static const bool have_eval = true;
3577  template <class Type>
3578  Type eval(Type x1, Type x2) {
3579  return max(x1, x2);
3580  }
3581  template <class Type>
3582  void reverse(ReverseArgs<Type> &args) {
3583  args.dx(0) += args.dy(0) * ge0(args.x(0) - args.x(1));
3584  args.dx(1) += args.dy(0) * lt0(args.x(0) - args.x(1));
3585  }
3586  const char *op_name();
3587 };
3588 ad_plain max(const ad_plain &x1, const ad_plain &x2);
3589 ad_aug max(const ad_aug &x1, const ad_aug &x2);
3590 ad_adapt max(const ad_adapt &x1, const ad_adapt &x2);
3591 
3592 using std::min;
3593 Writer min(const Writer &x1, const Writer &x2);
3594 struct MinOp : global::BinaryOperator {
3595  static const bool have_eval = true;
3596  template <class Type>
3597  Type eval(Type x1, Type x2) {
3598  return min(x1, x2);
3599  }
3600  template <class Type>
3601  void reverse(ReverseArgs<Type> &args) {
3602  args.dx(0) += args.dy(0) * ge0(args.x(1) - args.x(0));
3603  args.dx(1) += args.dy(0) * lt0(args.x(1) - args.x(0));
3604  }
3605  const char *op_name();
3606 };
3607 ad_plain min(const ad_plain &x1, const ad_plain &x2);
3608 ad_aug min(const ad_aug &x1, const ad_aug &x2);
3609 ad_adapt min(const ad_adapt &x1, const ad_adapt &x2);
3610 Replay CondExpEq(const Replay &x0, const Replay &x1, const Replay &x2,
3611  const Replay &x3);
3612 struct CondExpEqOp : global::Operator<4, 1> {
3613  void forward(ForwardArgs<Scalar> &args);
3614  void reverse(ReverseArgs<Scalar> &args);
3615  void forward(ForwardArgs<Replay> &args);
3616  void reverse(ReverseArgs<Replay> &args);
3617  void forward(ForwardArgs<Writer> &args);
3618  void reverse(ReverseArgs<Writer> &args);
3619  template <class Type>
3620  void forward(ForwardArgs<Type> &args) {
3621  TMBAD_ASSERT(false);
3622  }
3623  template <class Type>
3624  void reverse(ReverseArgs<Type> &args) {
3625  TMBAD_ASSERT(false);
3626  }
3627  const char *op_name();
3628 };
3629 Scalar CondExpEq(const Scalar &x0, const Scalar &x1, const Scalar &x2,
3630  const Scalar &x3);
3631 ad_plain CondExpEq(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
3632  const ad_plain &x3);
3633 ad_aug CondExpEq(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3634  const ad_aug &x3);
3635 Replay CondExpNe(const Replay &x0, const Replay &x1, const Replay &x2,
3636  const Replay &x3);
3637 struct CondExpNeOp : global::Operator<4, 1> {
3638  void forward(ForwardArgs<Scalar> &args);
3639  void reverse(ReverseArgs<Scalar> &args);
3640  void forward(ForwardArgs<Replay> &args);
3641  void reverse(ReverseArgs<Replay> &args);
3642  void forward(ForwardArgs<Writer> &args);
3643  void reverse(ReverseArgs<Writer> &args);
3644  template <class Type>
3645  void forward(ForwardArgs<Type> &args) {
3646  TMBAD_ASSERT(false);
3647  }
3648  template <class Type>
3649  void reverse(ReverseArgs<Type> &args) {
3650  TMBAD_ASSERT(false);
3651  }
3652  const char *op_name();
3653 };
3654 Scalar CondExpNe(const Scalar &x0, const Scalar &x1, const Scalar &x2,
3655  const Scalar &x3);
3656 ad_plain CondExpNe(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
3657  const ad_plain &x3);
3658 ad_aug CondExpNe(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3659  const ad_aug &x3);
3660 Replay CondExpGt(const Replay &x0, const Replay &x1, const Replay &x2,
3661  const Replay &x3);
3662 struct CondExpGtOp : global::Operator<4, 1> {
3663  void forward(ForwardArgs<Scalar> &args);
3664  void reverse(ReverseArgs<Scalar> &args);
3665  void forward(ForwardArgs<Replay> &args);
3666  void reverse(ReverseArgs<Replay> &args);
3667  void forward(ForwardArgs<Writer> &args);
3668  void reverse(ReverseArgs<Writer> &args);
3669  template <class Type>
3670  void forward(ForwardArgs<Type> &args) {
3671  TMBAD_ASSERT(false);
3672  }
3673  template <class Type>
3674  void reverse(ReverseArgs<Type> &args) {
3675  TMBAD_ASSERT(false);
3676  }
3677  const char *op_name();
3678 };
3679 Scalar CondExpGt(const Scalar &x0, const Scalar &x1, const Scalar &x2,
3680  const Scalar &x3);
3681 ad_plain CondExpGt(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
3682  const ad_plain &x3);
3683 ad_aug CondExpGt(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3684  const ad_aug &x3);
3685 Replay CondExpLt(const Replay &x0, const Replay &x1, const Replay &x2,
3686  const Replay &x3);
3687 struct CondExpLtOp : global::Operator<4, 1> {
3688  void forward(ForwardArgs<Scalar> &args);
3689  void reverse(ReverseArgs<Scalar> &args);
3690  void forward(ForwardArgs<Replay> &args);
3691  void reverse(ReverseArgs<Replay> &args);
3692  void forward(ForwardArgs<Writer> &args);
3693  void reverse(ReverseArgs<Writer> &args);
3694  template <class Type>
3695  void forward(ForwardArgs<Type> &args) {
3696  TMBAD_ASSERT(false);
3697  }
3698  template <class Type>
3699  void reverse(ReverseArgs<Type> &args) {
3700  TMBAD_ASSERT(false);
3701  }
3702  const char *op_name();
3703 };
3704 Scalar CondExpLt(const Scalar &x0, const Scalar &x1, const Scalar &x2,
3705  const Scalar &x3);
3706 ad_plain CondExpLt(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
3707  const ad_plain &x3);
3708 ad_aug CondExpLt(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3709  const ad_aug &x3);
3710 Replay CondExpGe(const Replay &x0, const Replay &x1, const Replay &x2,
3711  const Replay &x3);
3712 struct CondExpGeOp : global::Operator<4, 1> {
3713  void forward(ForwardArgs<Scalar> &args);
3714  void reverse(ReverseArgs<Scalar> &args);
3715  void forward(ForwardArgs<Replay> &args);
3716  void reverse(ReverseArgs<Replay> &args);
3717  void forward(ForwardArgs<Writer> &args);
3718  void reverse(ReverseArgs<Writer> &args);
3719  template <class Type>
3720  void forward(ForwardArgs<Type> &args) {
3721  TMBAD_ASSERT(false);
3722  }
3723  template <class Type>
3724  void reverse(ReverseArgs<Type> &args) {
3725  TMBAD_ASSERT(false);
3726  }
3727  const char *op_name();
3728 };
3729 Scalar CondExpGe(const Scalar &x0, const Scalar &x1, const Scalar &x2,
3730  const Scalar &x3);
3731 ad_plain CondExpGe(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
3732  const ad_plain &x3);
3733 ad_aug CondExpGe(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3734  const ad_aug &x3);
3735 Replay CondExpLe(const Replay &x0, const Replay &x1, const Replay &x2,
3736  const Replay &x3);
3737 struct CondExpLeOp : global::Operator<4, 1> {
3738  void forward(ForwardArgs<Scalar> &args);
3739  void reverse(ReverseArgs<Scalar> &args);
3740  void forward(ForwardArgs<Replay> &args);
3741  void reverse(ReverseArgs<Replay> &args);
3742  void forward(ForwardArgs<Writer> &args);
3743  void reverse(ReverseArgs<Writer> &args);
3744  template <class Type>
3745  void forward(ForwardArgs<Type> &args) {
3746  TMBAD_ASSERT(false);
3747  }
3748  template <class Type>
3749  void reverse(ReverseArgs<Type> &args) {
3750  TMBAD_ASSERT(false);
3751  }
3752  const char *op_name();
3753 };
3754 Scalar CondExpLe(const Scalar &x0, const Scalar &x1, const Scalar &x2,
3755  const Scalar &x3);
3756 ad_plain CondExpLe(const ad_plain &x0, const ad_plain &x1, const ad_plain &x2,
3757  const ad_plain &x3);
3758 ad_aug CondExpLe(const ad_aug &x0, const ad_aug &x1, const ad_aug &x2,
3759  const ad_aug &x3);
3760 
3761 template <class Info>
3762 struct InfoOp : global::DynamicOperator<-1, 0> {
3763  Index n;
3764  Info info;
3765  InfoOp(Index n, Info info) : n(n), info(info) {}
3766  static const bool elimination_protected = true;
3767  static const bool add_forward_replay_copy = true;
3768  static const bool have_input_size_output_size = true;
3769  template <class Type>
3770  void forward(ForwardArgs<Type> &args) {}
3771  template <class Type>
3772  void reverse(ReverseArgs<Type> &args) {}
3773  Index input_size() const { return n; }
3774  Index output_size() const { return 0; }
3775  const char *op_name() { return "InfoOp"; }
3776  void print(global::print_config cfg) {
3777  Rcout << cfg.prefix << info << std::endl;
3778  }
3779  void *operator_data() { return &info; }
3780 };
3781 template <class Info>
3782 void addInfo(const std::vector<ad_aug> &x, const Info &info) {
3783  global::Complete<InfoOp<Info> >(x.size(), info)(x);
3784 }
3785 template <class Info>
3786 void addInfo(const std::vector<double> &x, const Info &info) {}
3787 
3788 struct SumOp : global::DynamicOperator<-1, 1> {
3789  static const bool is_linear = true;
3790  static const bool have_input_size_output_size = true;
3791  static const bool add_forward_replay_copy = true;
3792  size_t n;
3793  Index input_size() const;
3794  Index output_size() const;
3795  SumOp(size_t n);
3796  template <class Type>
3797  void forward(ForwardArgs<Type> &args) {
3798  args.y(0) = 0;
3799  for (size_t i = 0; i < n; i++) {
3800  args.y(0) += args.x(i);
3801  }
3802  }
3803  template <class Type>
3804  void reverse(ReverseArgs<Type> &args) {
3805  for (size_t i = 0; i < n; i++) {
3806  args.dx(i) += args.dy(0);
3807  }
3808  }
3809  const char *op_name();
3810 };
3811 template <class T>
3812 T sum(const std::vector<T> &x) {
3813  return global::Complete<SumOp>(x.size())(x)[0];
3814 }
3815 
3816 ad_plain logspace_sum(const std::vector<ad_plain> &x);
3817 struct LogSpaceSumOp : global::DynamicOperator<-1, 1> {
3818  size_t n;
3819  static const bool have_input_size_output_size = true;
3820  Index input_size() const;
3821  Index output_size() const;
3822  LogSpaceSumOp(size_t n);
3823  void forward(ForwardArgs<Scalar> &args);
3824  void forward(ForwardArgs<Replay> &args);
3825  template <class Type>
3826  void reverse(ReverseArgs<Type> &args) {
3827  for (size_t i = 0; i < n; i++) {
3828  args.dx(i) += exp(args.x(i) - args.y(0)) * args.dy(0);
3829  }
3830  }
3831  const char *op_name();
3832 };
3833 ad_plain logspace_sum(const std::vector<ad_plain> &x);
3834 template <class T>
3835 T logspace_sum(const std::vector<T> &x_) {
3836  std::vector<ad_plain> x(x_.begin(), x_.end());
3837  return logspace_sum(x);
3838 }
3839 
3840 ad_plain logspace_sum_stride(const std::vector<ad_plain> &x,
3841  const std::vector<Index> &stride, size_t n);
3842 struct LogSpaceSumStrideOp : global::DynamicOperator<-1, 1> {
3843  std::vector<Index> stride;
3844  size_t n;
3845  static const bool have_input_size_output_size = true;
3846 
3847  Index number_of_terms() const;
3848  template <class Type>
3849  Type &entry(Type **px, size_t i, size_t j) const {
3850  return px[j][0 + i * stride[j]];
3851  }
3852  template <class Type>
3853  Type rowsum(Type **px, size_t i) const {
3854  size_t m = stride.size();
3855  Type s = (Scalar)(0);
3856  for (size_t j = 0; j < m; j++) {
3857  s += entry(px, i, j);
3858  }
3859  return s;
3860  }
3861  Index input_size() const;
3862  Index output_size() const;
3863  LogSpaceSumStrideOp(std::vector<Index> stride, size_t n);
3864  void forward(ForwardArgs<Scalar> &args);
3865  void forward(ForwardArgs<Replay> &args);
3866  template <class Type>
3867  void reverse(ReverseArgs<Type> &args) {
3868  size_t m = stride.size();
3869  std::vector<Type *> wrk1(m);
3870  std::vector<Type *> wrk2(m);
3871  Type **px = &(wrk1[0]);
3872  Type **pdx = &(wrk2[0]);
3873  for (size_t i = 0; i < m; i++) {
3874  px[i] = args.x_ptr(i);
3875  pdx[i] = args.dx_ptr(i);
3876  }
3877  for (size_t i = 0; i < n; i++) {
3878  Type s = rowsum(px, i);
3879  Type tmp = exp(s - args.y(0)) * args.dy(0);
3880  for (size_t j = 0; j < m; j++) {
3881  entry(pdx, i, j) += tmp;
3882  }
3883  }
3884  }
3889  void dependencies(Args<> &args, Dependencies &dep) const;
3891  static const bool have_dependencies = true;
3893  static const bool implicit_dependencies = true;
3895  static const bool allow_remap = false;
3896  const char *op_name();
3897 
3898  void forward(ForwardArgs<Writer> &args);
3899  void reverse(ReverseArgs<Writer> &args);
3900 };
3901 ad_plain logspace_sum_stride(const std::vector<ad_plain> &x,
3902  const std::vector<Index> &stride, size_t n);
3903 template <class T>
3904 T logspace_sum_stride(const std::vector<T> &x_,
3905  const std::vector<Index> &stride, size_t n) {
3906  std::vector<ad_plain> x(x_.begin(), x_.end());
3907  return logspace_sum_stride(x, stride, n);
3908 }
3909 } // namespace TMBad
3910 #endif // HAVE_GLOBAL_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:157
segment_ref< ReverseArgs, dx_write > dx_segment(Index from, Index size)
segment version
Definition: global.hpp:344
Add zero allocated workspace to the tape.
Definition: global.hpp:2354
void reverse_decr(ReverseArgs< Writer > &args)
Source code writer.
Definition: global.hpp:2166
diff --git a/globals.html b/globals.html index 7d1baa588..036a58d2f 100644 --- a/globals.html +++ b/globals.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/globals_defs.html b/globals_defs.html index b60d7f22b..79b752a3d 100644 --- a/globals_defs.html +++ b/globals_defs.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/globals_func.html b/globals_func.html index b76dd5e74..cc2109d1a 100644 --- a/globals_func.html +++ b/globals_func.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/graph2dot_8hpp_source.html b/graph2dot_8hpp_source.html index 80f933d4a..da9eed3b4 100644 --- a/graph2dot_8hpp_source.html +++ b/graph2dot_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
@@ -73,6 +73,6 @@
graph2dot.hpp
-
1 #ifndef HAVE_GRAPH2DOT_HPP
2 #define HAVE_GRAPH2DOT_HPP
3 // Autogenerated - do not edit by hand !
4 #include <fstream>
5 #include <iostream>
6 #include "global.hpp"
7 
8 namespace TMBad {
9 
10 void graph2dot(global glob, graph G, bool show_id = false,
11  std::ostream& cout = Rcout);
12 
13 void graph2dot(global glob, bool show_id = false, std::ostream& cout = Rcout);
14 
15 void graph2dot(const char* filename, global glob, graph G,
16  bool show_id = false);
17 
18 void graph2dot(const char* filename, global glob, bool show_id = false);
19 
20 } // namespace TMBad
21 #endif // HAVE_GRAPH2DOT_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:153
+
1 #ifndef HAVE_GRAPH2DOT_HPP
2 #define HAVE_GRAPH2DOT_HPP
3 // Autogenerated - do not edit by hand !
4 #include <fstream>
5 #include <iostream>
6 #include "global.hpp"
7 
8 namespace TMBad {
9 
10 void graph2dot(global glob, graph G, bool show_id = false,
11  std::ostream& cout = Rcout);
12 
13 void graph2dot(global glob, bool show_id = false, std::ostream& cout = Rcout);
14 
15 void graph2dot(const char* filename, global glob, graph G,
16  bool show_id = false);
17 
18 void graph2dot(const char* filename, global glob, bool show_id = false);
19 
20 } // namespace TMBad
21 #endif // HAVE_GRAPH2DOT_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:157
License: GPL v2 diff --git a/graph__transform_8hpp_source.html b/graph__transform_8hpp_source.html index cee9b1b1b..4d31d870b 100644 --- a/graph__transform_8hpp_source.html +++ b/graph__transform_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
@@ -73,7 +73,7 @@
graph_transform.hpp
-
1 #ifndef HAVE_GRAPH_TRANSFORM_HPP
2 #define HAVE_GRAPH_TRANSFORM_HPP
3 // Autogenerated - do not edit by hand !
4 #include <cstring>
5 #include <list>
6 #include <map>
7 #include "checkpoint.hpp"
8 #include "global.hpp"
9 #include "integrate.hpp"
10 #include "radix.hpp"
11 
12 namespace TMBad {
13 
18 template <class T>
19 std::vector<bool> lmatch(const std::vector<T> &x, const std::vector<T> &y) {
20  std::vector<bool> ans(x.size(), false);
21  for (size_t i = 0; i < x.size(); i++)
22  for (size_t j = 0; j < y.size(); j++) ans[i] = ans[i] || (x[i] == y[j]);
23  return ans;
24 }
25 
27 template <class I>
28 std::vector<I> which(const std::vector<bool> &x) {
29  std::vector<I> y;
30  for (size_t i = 0; i < x.size(); i++)
31  if (x[i]) y.push_back(i);
32  return y;
33 }
34 
36 std::vector<size_t> which(const std::vector<bool> &x);
37 
39 template <class T>
40 std::vector<T> subset(const std::vector<T> &x, const std::vector<bool> &y) {
41  TMBAD_ASSERT(x.size() == y.size());
42  std::vector<T> ans;
43  for (size_t i = 0; i < x.size(); i++)
44  if (y[i]) ans.push_back(x[i]);
45  return ans;
46 }
47 
49 template <class T, class I>
50 std::vector<T> subset(const std::vector<T> &x, const std::vector<I> &ind) {
51  std::vector<T> ans(ind.size());
52  for (size_t i = 0; i < ind.size(); i++) ans[i] = x[ind[i]];
53  return ans;
54 }
55 
67 template <class T, class I>
68 void make_space_inplace(std::vector<T> &x, std::vector<I> &i, T space = T(0)) {
69  std::vector<bool> mark(x.size(), false);
70  for (size_t k = 0; k < i.size(); k++) {
71  TMBAD_ASSERT(!mark[i[k]]);
72  mark[i[k]] = true;
73  }
74  std::vector<T> x_new;
75  std::vector<I> i_new;
76  for (size_t k = 0; k < x.size(); k++) {
77  if (mark[k]) {
78  x_new.push_back(space);
79  i_new.push_back(x_new.size());
80  }
81  x_new.push_back(x[k]);
82  }
83  std::swap(x, x_new);
84  std::swap(i, i_new);
85 }
86 
88 template <class T>
89 std::vector<T> invperm(const std::vector<T> &perm) {
90  std::vector<T> iperm(perm.size());
91  for (size_t i = 0; i < perm.size(); i++) iperm[perm[i]] = i;
92  return iperm;
93 }
94 
96 template <class T>
97 std::vector<size_t> match(const std::vector<T> &x, const std::vector<T> &y) {
98  return which(lmatch(x, y));
99 }
100 
102 size_t prod_int(const std::vector<size_t> &x);
103 
116 template <class T>
117 std::vector<size_t> order(std::vector<T> x) {
118  std::vector<std::pair<T, size_t> > y(x.size());
119  for (size_t i = 0; i < x.size(); i++) {
120  y[i].first = x[i];
121  y[i].second = i;
122  }
123  sort_inplace(y);
124  std::vector<size_t> z(x.size());
125  for (size_t i = 0; i < x.size(); i++) {
126  z[i] = y[i].second;
127  }
128  return z;
129 }
130 
132 std::vector<bool> reverse_boundary(global &glob, const std::vector<bool> &vars);
133 
141 std::vector<Index> get_accumulation_tree(global &glob, bool boundary = false);
142 
144 std::vector<Index> find_op_by_name(global &glob, const char *name);
145 
149 std::vector<Index> substitute(global &glob, const std::vector<Index> &seq,
150  bool inv_tags = true, bool dep_tags = true);
151 
153 std::vector<Index> substitute(global &glob, const char *name,
154  bool inv_tags = true, bool dep_tags = true);
155 
163 global accumulation_tree_split(global glob, bool sum_ = false);
164 
171 void aggregate(global &glob, int sign = 1);
172 
177 struct old_state {
178  std::vector<Index> dep_index;
179  size_t opstack_size;
180  global &glob;
181  old_state(global &glob);
182  void restore();
183 };
184 
185 std::vector<Index> remap_identical_sub_expressions(
186  global &glob, std::vector<Index> inv_remap);
187 struct term_info {
188  global &glob;
189  std::vector<Index> id;
190  std::vector<size_t> count;
191  term_info(global &glob, bool do_init = true);
192  void initialize(std::vector<Index> inv_remap = std::vector<Index>(0));
193 };
194 
195 struct gk_config {
196  bool debug;
197  bool adaptive;
198  bool nan2zero;
203  double ytol;
204  double dx;
205  gk_config();
206 };
207 
208 template <class Float = ad_adapt>
209 struct logIntegrate_t {
210  typedef Float Scalar;
211  global glob;
212  double mu, sigma, f_mu;
213  gk_config cfg;
214  double f(double x) {
215  Index k = glob.inv_index.size();
216  glob.value_inv(k - 1) = x;
217  glob.forward();
218  return glob.value_dep(0);
219  }
220  double g(double x) {
221  return (f(x + .5 * cfg.dx) - f(x - .5 * cfg.dx)) / cfg.dx;
222  }
223  double h(double x) {
224  return (g(x + .5 * cfg.dx) - g(x - .5 * cfg.dx)) / cfg.dx;
225  }
232  void rescale_integrand(const std::vector<ad_aug> &x) {
233  TMBAD_ASSERT(x.size() + 1 == glob.inv_index.size());
234  if (cfg.debug) Rcout << "rescale integrand:\n";
235  for (size_t i = 0; i < x.size(); i++) glob.value_inv(i) = x[i].Value();
236  mu = glob.value_inv(x.size());
237  f_mu = f(mu);
238  int i = 0;
239  for (; i < 100; i++) {
240  double g_mu = g(mu);
241  double h_mu = h(mu);
242  if (std::isfinite(f_mu) && !std::isfinite(h_mu)) {
243  cfg.dx = cfg.dx * .5;
244  continue;
245  }
246  double mu_new;
247  if (h_mu < 0)
248  mu_new = mu - g_mu / h_mu;
249  else
250  mu_new = mu + (g_mu > 0 ? cfg.dx : -cfg.dx);
251  double f_mu_new = f(mu_new);
252  if (cfg.debug) {
253  Rcout << "mu=" << mu << " mu_new=" << mu_new << " g_mu=" << g_mu
254  << " h_mu=" << h_mu << " f_mu=" << f_mu
255  << " f_mu_new=" << f_mu_new << "\n";
256  }
257  if (f_mu_new > f_mu + cfg.ytol) {
258  mu = mu_new;
259  f_mu = f_mu_new;
260  } else {
261  break;
262  }
263  }
264  sigma = 1. / sqrt(-h(mu));
265  if (!std::isfinite(sigma)) sigma = 10000;
266  if (cfg.debug)
267  Rcout << "==> i=" << i << " mu=" << mu << " f_mu=" << f_mu
268  << " sigma=" << sigma << "\n";
269  }
270 
271  logIntegrate_t(global &glob, gk_config cfg)
272  : glob(glob), mu(0), sigma(1), f_mu(0), cfg(cfg) {
273  TMBAD_ASSERT(glob.inv_index.size() >= 1);
274  TMBAD_ASSERT(glob.dep_index.size() == 1);
275  }
276  logIntegrate_t() {}
277  global::replay *p_replay;
278 
279  Float operator()(Float u) {
280  Index k = glob.inv_index.size();
281  p_replay->value_inv(k - 1) = sigma * u + mu;
282  p_replay->forward(false, false);
283  Float ans = exp(p_replay->value_dep(0) - f_mu);
284  if (cfg.nan2zero && ans != ans) ans = 0;
285  return ans;
286  }
287 
288  std::vector<ad_aug> operator()(const std::vector<ad_aug> &x) {
289  rescale_integrand(x);
290  global::replay replay(this->glob, *get_glob());
291  p_replay = &replay;
292  replay.start();
293  Index k = glob.inv_index.size();
294  for (Index i = 0; i < k - 1; i++) replay.value_inv(i) = x[i];
295  Float I = integrate(*this);
296  Float ans = log(I) + log(sigma) + f_mu;
297  replay.stop();
298  return std::vector<ad_aug>(1, ans);
299  }
300 };
301 
302 template <class ADFun>
303 struct integrate_subgraph {
304  global &glob;
305  std::vector<Index> random;
306  graph forward_graph;
307  graph reverse_graph;
308  std::vector<Index> var_remap;
309  std::vector<bool> mark;
310  gk_config cfg;
314  integrate_subgraph(global &glob, std::vector<Index> random,
315  gk_config cfg = gk_config())
316  : glob(glob),
317  random(random),
318  forward_graph(glob.forward_graph()),
319  reverse_graph(glob.reverse_graph()),
320  cfg(cfg) {
321  glob.subgraph_cache_ptr();
322  mark.resize(glob.opstack.size(), false);
323  }
327  global &try_integrate_variable(Index i) {
328  const std::vector<Index> &inv2op = forward_graph.inv2op;
329 
330  Index start_node = inv2op[i];
331  glob.subgraph_seq.resize(0);
332  glob.subgraph_seq.push_back(start_node);
333  forward_graph.search(glob.subgraph_seq);
334 
335  if (glob.subgraph_seq.size() == 1) return glob;
336 
337  bool any_marked = false;
338  for (Index i = 0; i < glob.subgraph_seq.size(); i++) {
339  any_marked |= mark[glob.subgraph_seq[i]];
340  if (any_marked) {
341  return glob;
342  }
343  }
344 
345  for (Index i = 0; i < glob.subgraph_seq.size(); i++) {
346  mark[glob.subgraph_seq[i]] = true;
347  }
348 
349  std::vector<Index> boundary = reverse_graph.boundary(glob.subgraph_seq);
350 
351  global new_glob;
352  var_remap.resize(glob.values.size());
353  new_glob.ad_start();
354  Index total_boundary_vars = 0;
355  std::vector<ad_plain> boundary_vars;
356  OperatorPure *constant = glob.getOperator<global::ConstOp>();
357  for (Index i = 0; i < boundary.size(); i++) {
358  Index m = glob.opstack[boundary[i]]->output_size();
359  for (Index j = 0; j < m; j++) {
360  Index boundary_var = glob.subgraph_ptr[boundary[i]].second + j;
361  var_remap[boundary_var] = total_boundary_vars;
362  total_boundary_vars++;
363  if (glob.opstack[boundary[i]] != constant) {
364  ad_plain().Independent();
365  ad_plain tmp;
366  tmp.index = boundary_var;
367  boundary_vars.push_back(tmp);
368  } else {
369  ad_plain(glob.values[boundary_var]);
370  }
371  }
372  }
373  new_glob.ad_stop();
374 
375  new_glob = glob.extract_sub(var_remap, new_glob);
376 
377  aggregate(new_glob);
378 
379  logIntegrate_t<> taped_integral(new_glob, cfg);
380 
381  glob.ad_start();
382  std::vector<ad_aug> boundary_vars2(boundary_vars.begin(),
383  boundary_vars.end());
384  if (cfg.adaptive) {
386  global::Complete<AtomOp<DTab> > taped_integral_operator(taped_integral,
387  boundary_vars2);
388  taped_integral_operator(boundary_vars)[0].Dependent();
389  } else {
390  taped_integral(boundary_vars2)[0].Dependent();
391  }
392  glob.ad_stop();
393  return glob;
394  }
395  global &gk() {
396  for (Index i = 0; i < random.size(); i++) {
397  try_integrate_variable(random[i]);
398  }
399 
400  std::vector<bool> keep_node = mark;
401  keep_node.flip();
402 
403  keep_node.resize(glob.opstack.size(), true);
404 
405  std::vector<Index> v2o = glob.var2op();
406  for (Index i = 0; i < glob.inv_index.size(); i++) {
407  keep_node[v2o[glob.inv_index[i]]] = true;
408  }
409 
410  glob.subgraph_seq.resize(0);
411  for (Index i = 0; i < keep_node.size(); i++) {
412  if (keep_node[i]) glob.subgraph_seq.push_back(i);
413  }
414 
415  glob = glob.extract_sub();
416  return glob;
417  }
418 };
419 
436  private:
437  std::vector<size_t> x;
438  std::vector<bool> mask_;
439  size_t pointer;
440  std::vector<size_t> bound;
441 
442  public:
447  size_t count();
453  multivariate_index(size_t bound_, size_t dim, bool flag = true);
458  multivariate_index(std::vector<size_t> bound, bool flag = true);
460  void flip();
462  multivariate_index &operator++();
464  operator size_t();
466  size_t index(size_t i);
468  std::vector<size_t> index();
470  std::vector<bool>::reference mask(size_t i);
472  void set_mask(const std::vector<bool> &mask);
473 };
474 
480 struct clique {
482  std::vector<Index> indices;
484  std::vector<ad_aug> logsum;
486  std::vector<size_t> dim;
487  size_t clique_size();
488  clique();
489  void subset_inplace(const std::vector<bool> &mask);
490  void logsum_init();
491  bool empty() const;
492  bool contains(Index i);
510  void get_stride(const clique &super, Index ind, std::vector<ad_plain> &offset,
511  Index &stride);
512 };
513 
520 struct sr_grid {
521  std::vector<Scalar> x;
522  std::vector<Scalar> w;
523  sr_grid();
524 
525  sr_grid(Scalar a, Scalar b, size_t n);
526 
527  sr_grid(size_t n);
528  size_t size();
529 
530  std::vector<ad_plain> logw;
531  ad_plain logw_offset();
532 };
533 
582  std::list<clique> cliques;
583  std::vector<sr_grid> grid;
584  std::vector<Index> inv2grid;
585  global &glob;
586  global new_glob;
587  std::vector<Index> random;
588  global::replay replay;
589  std::vector<bool> mark;
590  graph forward_graph;
591  graph reverse_graph;
592  std::vector<Index> var_remap;
593  const static Index NA = -1;
594  std::vector<Index> op2inv_idx;
595  std::vector<Index> op2dep_idx;
596  std::vector<bool> terms_done;
597  term_info tinfo;
598  std::map<size_t, std::vector<ad_aug> > cache;
615  sequential_reduction(global &glob, std::vector<Index> random,
616  std::vector<sr_grid> grid =
617  std::vector<sr_grid>(1, sr_grid(-20, 20, 200)),
618  std::vector<Index> random2grid = std::vector<Index>(0),
619  bool perm = true);
626  void reorder_random();
627 
628  std::vector<size_t> get_grid_bounds(std::vector<Index> inv_index);
629 
630  std::vector<sr_grid *> get_grid(std::vector<Index> inv_index);
642  std::vector<ad_aug> tabulate(std::vector<Index> inv_index, Index dep_index);
643 
665  void merge(Index i);
666 
678  void update(Index i);
679  void show_cliques();
680  void update_all();
681  ad_aug get_result();
682  global marginal();
683 };
684 
718 struct autopar {
719  global &glob;
720  graph reverse_graph;
721  size_t num_threads;
727  std::vector<std::vector<Index> > node_split;
729  std::vector<std::vector<Index> > inv_idx;
731  std::vector<std::vector<Index> > dep_idx;
733  std::vector<global> vglob;
734  autopar(global &glob, size_t num_threads);
737  std::vector<size_t> max_tree_depth();
738 
739  template <class T>
740  size_t which_min(const std::vector<T> &x) {
741  return std::min_element(x.begin(), x.end()) - x.begin();
742  }
743 
744  void run();
746  void extract();
748  size_t input_size() const;
750  size_t output_size() const;
751 };
752 
757  static const bool have_input_size_output_size = true;
759  std::vector<global> vglob;
761  std::vector<std::vector<Index> > inv_idx;
764  std::vector<std::vector<Index> > dep_idx;
765 
766  Index n, m;
767  Index input_size() const;
768  Index output_size() const;
769  ParalOp(const autopar &ap);
770 
771  template <class T>
772  void reverse(ReverseArgs<T> &args) {
773  bool parallel_operator_used_with_valid_type = false;
774  TMBAD_ASSERT(parallel_operator_used_with_valid_type);
775  }
776  static const bool add_forward_replay_copy = true;
777  template <class T>
778  void forward(ForwardArgs<T> &args) {
779  bool parallel_operator_used_with_valid_type = false;
780  TMBAD_ASSERT(parallel_operator_used_with_valid_type);
781  }
782 
783  void forward(ForwardArgs<Scalar> &args);
784  void reverse(ReverseArgs<Scalar> &args);
785  const char *op_name();
786  void print(global::print_config cfg);
787 };
788 
789 std::vector<Index> get_likely_expression_duplicates(
790  const global &glob, std::vector<Index> inv_remap);
791 
796 bool all_allow_remap(const global &glob);
797 
799 template <class T>
800 struct forbid_remap {
801  T &remap;
802  forbid_remap(T &remap) : remap(remap) {}
803  void operator()(Index a, Index b) {
804  bool ok = true;
805  for (Index i = a + 1; i <= b; i++) {
806  ok &= (remap[i] - remap[i - 1] == 1);
807  }
808  if (ok) {
809  return;
810  } else {
811  for (Index i = a; i <= b; i++) {
812  remap[i] = i;
813  }
814  }
815  }
816 };
817 
874 std::vector<Index> remap_identical_sub_expressions(
875  global &glob, std::vector<Index> inv_remap);
876 
878 
879 std::vector<Position> inv_positions(global &glob);
880 
886 void reorder_graph(global &glob, std::vector<Index> inv_idx);
887 
888 } // namespace TMBad
889 #endif // HAVE_GRAPH_TRANSFORM_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:153
+
1 #ifndef HAVE_GRAPH_TRANSFORM_HPP
2 #define HAVE_GRAPH_TRANSFORM_HPP
3 // Autogenerated - do not edit by hand !
4 #include <cstring>
5 #include <list>
6 #include <map>
7 #include "checkpoint.hpp"
8 #include "global.hpp"
9 #include "integrate.hpp"
10 #include "radix.hpp"
11 
12 namespace TMBad {
13 
18 template <class T>
19 std::vector<bool> lmatch(const std::vector<T> &x, const std::vector<T> &y) {
20  std::vector<bool> ans(x.size(), false);
21  for (size_t i = 0; i < x.size(); i++)
22  for (size_t j = 0; j < y.size(); j++) ans[i] = ans[i] || (x[i] == y[j]);
23  return ans;
24 }
25 
27 template <class I>
28 std::vector<I> which(const std::vector<bool> &x) {
29  std::vector<I> y;
30  for (size_t i = 0; i < x.size(); i++)
31  if (x[i]) y.push_back(i);
32  return y;
33 }
34 
36 std::vector<size_t> which(const std::vector<bool> &x);
37 
39 template <class T>
40 std::vector<T> subset(const std::vector<T> &x, const std::vector<bool> &y) {
41  TMBAD_ASSERT(x.size() == y.size());
42  std::vector<T> ans;
43  for (size_t i = 0; i < x.size(); i++)
44  if (y[i]) ans.push_back(x[i]);
45  return ans;
46 }
47 
49 template <class T, class I>
50 std::vector<T> subset(const std::vector<T> &x, const std::vector<I> &ind) {
51  std::vector<T> ans(ind.size());
52  for (size_t i = 0; i < ind.size(); i++) ans[i] = x[ind[i]];
53  return ans;
54 }
55 
67 template <class T, class I>
68 void make_space_inplace(std::vector<T> &x, std::vector<I> &i, T space = T(0)) {
69  std::vector<bool> mark(x.size(), false);
70  for (size_t k = 0; k < i.size(); k++) {
71  TMBAD_ASSERT(!mark[i[k]]);
72  mark[i[k]] = true;
73  }
74  std::vector<T> x_new;
75  std::vector<I> i_new;
76  for (size_t k = 0; k < x.size(); k++) {
77  if (mark[k]) {
78  x_new.push_back(space);
79  i_new.push_back(x_new.size());
80  }
81  x_new.push_back(x[k]);
82  }
83  std::swap(x, x_new);
84  std::swap(i, i_new);
85 }
86 
88 template <class T>
89 std::vector<T> invperm(const std::vector<T> &perm) {
90  std::vector<T> iperm(perm.size());
91  for (size_t i = 0; i < perm.size(); i++) iperm[perm[i]] = i;
92  return iperm;
93 }
94 
96 template <class T>
97 std::vector<size_t> match(const std::vector<T> &x, const std::vector<T> &y) {
98  return which(lmatch(x, y));
99 }
100 
102 size_t prod_int(const std::vector<size_t> &x);
103 
116 template <class T>
117 std::vector<size_t> order(std::vector<T> x) {
118  std::vector<std::pair<T, size_t> > y(x.size());
119  for (size_t i = 0; i < x.size(); i++) {
120  y[i].first = x[i];
121  y[i].second = i;
122  }
123  sort_inplace(y);
124  std::vector<size_t> z(x.size());
125  for (size_t i = 0; i < x.size(); i++) {
126  z[i] = y[i].second;
127  }
128  return z;
129 }
130 
132 std::vector<bool> reverse_boundary(global &glob, const std::vector<bool> &vars);
133 
141 std::vector<Index> get_accumulation_tree(global &glob, bool boundary = false);
142 
144 std::vector<Index> find_op_by_name(global &glob, const char *name);
145 
149 std::vector<Index> substitute(global &glob, const std::vector<Index> &seq,
150  bool inv_tags = true, bool dep_tags = true);
151 
153 std::vector<Index> substitute(global &glob, const char *name,
154  bool inv_tags = true, bool dep_tags = true);
155 
163 global accumulation_tree_split(global glob, bool sum_ = false);
164 
171 void aggregate(global &glob, int sign = 1);
172 
177 struct old_state {
178  std::vector<Index> dep_index;
179  size_t opstack_size;
180  global &glob;
181  old_state(global &glob);
182  void restore();
183 };
184 
185 std::vector<Index> remap_identical_sub_expressions(
186  global &glob, std::vector<Index> inv_remap);
187 struct term_info {
188  global &glob;
189  std::vector<Index> id;
190  std::vector<size_t> count;
191  term_info(global &glob, bool do_init = true);
192  void initialize(std::vector<Index> inv_remap = std::vector<Index>(0));
193 };
194 
195 struct gk_config {
196  bool debug;
197  bool adaptive;
198  bool nan2zero;
203  double ytol;
204  double dx;
205  gk_config();
206 };
207 
208 template <class Float = ad_adapt>
209 struct logIntegrate_t {
210  typedef Float Scalar;
211  global glob;
212  double mu, sigma, f_mu;
213  gk_config cfg;
214  double f(double x) {
215  Index k = glob.inv_index.size();
216  glob.value_inv(k - 1) = x;
217  glob.forward();
218  return glob.value_dep(0);
219  }
220  double g(double x) {
221  return (f(x + .5 * cfg.dx) - f(x - .5 * cfg.dx)) / cfg.dx;
222  }
223  double h(double x) {
224  return (g(x + .5 * cfg.dx) - g(x - .5 * cfg.dx)) / cfg.dx;
225  }
232  void rescale_integrand(const std::vector<ad_aug> &x) {
233  TMBAD_ASSERT(x.size() + 1 == glob.inv_index.size());
234  if (cfg.debug) Rcout << "rescale integrand:\n";
235  for (size_t i = 0; i < x.size(); i++) glob.value_inv(i) = x[i].Value();
236  mu = glob.value_inv(x.size());
237  f_mu = f(mu);
238  int i = 0;
239  for (; i < 100; i++) {
240  double g_mu = g(mu);
241  double h_mu = h(mu);
242  if (std::isfinite(f_mu) && !std::isfinite(h_mu)) {
243  cfg.dx = cfg.dx * .5;
244  continue;
245  }
246  double mu_new;
247  if (h_mu < 0)
248  mu_new = mu - g_mu / h_mu;
249  else
250  mu_new = mu + (g_mu > 0 ? cfg.dx : -cfg.dx);
251  double f_mu_new = f(mu_new);
252  if (cfg.debug) {
253  Rcout << "mu=" << mu << " mu_new=" << mu_new << " g_mu=" << g_mu
254  << " h_mu=" << h_mu << " f_mu=" << f_mu
255  << " f_mu_new=" << f_mu_new << "\n";
256  }
257  if (f_mu_new > f_mu + cfg.ytol) {
258  mu = mu_new;
259  f_mu = f_mu_new;
260  } else {
261  break;
262  }
263  }
264  sigma = 1. / sqrt(-h(mu));
265  if (!std::isfinite(sigma)) sigma = 10000;
266  if (cfg.debug)
267  Rcout << "==> i=" << i << " mu=" << mu << " f_mu=" << f_mu
268  << " sigma=" << sigma << "\n";
269  }
270 
271  logIntegrate_t(global &glob, gk_config cfg)
272  : glob(glob), mu(0), sigma(1), f_mu(0), cfg(cfg) {
273  TMBAD_ASSERT(glob.inv_index.size() >= 1);
274  TMBAD_ASSERT(glob.dep_index.size() == 1);
275  }
276  logIntegrate_t() {}
277  global::replay *p_replay;
278 
279  Float operator()(Float u) {
280  Index k = glob.inv_index.size();
281  p_replay->value_inv(k - 1) = sigma * u + mu;
282  p_replay->forward(false, false);
283  Float ans = exp(p_replay->value_dep(0) - f_mu);
284  if (cfg.nan2zero && ans != ans) ans = 0;
285  return ans;
286  }
287 
288  std::vector<ad_aug> operator()(const std::vector<ad_aug> &x) {
289  rescale_integrand(x);
290  global::replay replay(this->glob, *get_glob());
291  p_replay = &replay;
292  replay.start();
293  Index k = glob.inv_index.size();
294  for (Index i = 0; i < k - 1; i++) replay.value_inv(i) = x[i];
295  Float I = integrate(*this);
296  Float ans = log(I) + log(sigma) + f_mu;
297  replay.stop();
298  return std::vector<ad_aug>(1, ans);
299  }
300 };
301 
302 template <class ADFun>
303 struct integrate_subgraph {
304  global &glob;
305  std::vector<Index> random;
306  graph forward_graph;
307  graph reverse_graph;
308  std::vector<Index> var_remap;
309  std::vector<bool> mark;
310  gk_config cfg;
314  integrate_subgraph(global &glob, std::vector<Index> random,
315  gk_config cfg = gk_config())
316  : glob(glob),
317  random(random),
318  forward_graph(glob.forward_graph()),
319  reverse_graph(glob.reverse_graph()),
320  cfg(cfg) {
321  glob.subgraph_cache_ptr();
322  mark.resize(glob.opstack.size(), false);
323  }
327  global &try_integrate_variable(Index i) {
328  const std::vector<Index> &inv2op = forward_graph.inv2op;
329 
330  Index start_node = inv2op[i];
331  glob.subgraph_seq.resize(0);
332  glob.subgraph_seq.push_back(start_node);
333  forward_graph.search(glob.subgraph_seq);
334 
335  if (glob.subgraph_seq.size() == 1) return glob;
336 
337  bool any_marked = false;
338  for (Index i = 0; i < glob.subgraph_seq.size(); i++) {
339  any_marked |= mark[glob.subgraph_seq[i]];
340  if (any_marked) {
341  return glob;
342  }
343  }
344 
345  for (Index i = 0; i < glob.subgraph_seq.size(); i++) {
346  mark[glob.subgraph_seq[i]] = true;
347  }
348 
349  std::vector<Index> boundary = reverse_graph.boundary(glob.subgraph_seq);
350 
351  global new_glob;
352  var_remap.resize(glob.values.size());
353  new_glob.ad_start();
354  Index total_boundary_vars = 0;
355  std::vector<ad_plain> boundary_vars;
356  OperatorPure *constant = glob.getOperator<global::ConstOp>();
357  for (Index i = 0; i < boundary.size(); i++) {
358  Index m = glob.opstack[boundary[i]]->output_size();
359  for (Index j = 0; j < m; j++) {
360  Index boundary_var = glob.subgraph_ptr[boundary[i]].second + j;
361  var_remap[boundary_var] = total_boundary_vars;
362  total_boundary_vars++;
363  if (glob.opstack[boundary[i]] != constant) {
364  ad_plain().Independent();
365  ad_plain tmp;
366  tmp.index = boundary_var;
367  boundary_vars.push_back(tmp);
368  } else {
369  ad_plain(glob.values[boundary_var]);
370  }
371  }
372  }
373  new_glob.ad_stop();
374 
375  new_glob = glob.extract_sub(var_remap, new_glob);
376 
377  aggregate(new_glob);
378 
379  logIntegrate_t<> taped_integral(new_glob, cfg);
380 
381  glob.ad_start();
382  std::vector<ad_aug> boundary_vars2(boundary_vars.begin(),
383  boundary_vars.end());
384  if (cfg.adaptive) {
386  global::Complete<AtomOp<DTab> > taped_integral_operator(taped_integral,
387  boundary_vars2);
388  taped_integral_operator(boundary_vars)[0].Dependent();
389  } else {
390  taped_integral(boundary_vars2)[0].Dependent();
391  }
392  glob.ad_stop();
393  return glob;
394  }
395  global &gk() {
396  for (Index i = 0; i < random.size(); i++) {
397  try_integrate_variable(random[i]);
398  }
399 
400  std::vector<bool> keep_node = mark;
401  keep_node.flip();
402 
403  keep_node.resize(glob.opstack.size(), true);
404 
405  std::vector<Index> v2o = glob.var2op();
406  for (Index i = 0; i < glob.inv_index.size(); i++) {
407  keep_node[v2o[glob.inv_index[i]]] = true;
408  }
409 
410  glob.subgraph_seq.resize(0);
411  for (Index i = 0; i < keep_node.size(); i++) {
412  if (keep_node[i]) glob.subgraph_seq.push_back(i);
413  }
414 
415  glob = glob.extract_sub();
416  return glob;
417  }
418 };
419 
436  private:
437  std::vector<size_t> x;
438  std::vector<bool> mask_;
439  size_t pointer;
440  std::vector<size_t> bound;
441 
442  public:
447  size_t count();
453  multivariate_index(size_t bound_, size_t dim, bool flag = true);
458  multivariate_index(std::vector<size_t> bound, bool flag = true);
460  void flip();
462  multivariate_index &operator++();
464  operator size_t();
466  size_t index(size_t i);
468  std::vector<size_t> index();
470  std::vector<bool>::reference mask(size_t i);
472  void set_mask(const std::vector<bool> &mask);
473 };
474 
480 struct clique {
482  std::vector<Index> indices;
484  std::vector<ad_aug> logsum;
486  std::vector<size_t> dim;
487  size_t clique_size();
488  clique();
489  void subset_inplace(const std::vector<bool> &mask);
490  void logsum_init();
491  bool empty() const;
492  bool contains(Index i);
510  void get_stride(const clique &super, Index ind, std::vector<ad_plain> &offset,
511  Index &stride);
512 };
513 
520 struct sr_grid {
521  std::vector<Scalar> x;
522  std::vector<Scalar> w;
523  sr_grid();
524 
525  sr_grid(Scalar a, Scalar b, size_t n);
526 
527  sr_grid(size_t n);
528  size_t size();
529 
530  std::vector<ad_plain> logw;
531  ad_plain logw_offset();
532 };
533 
582  std::list<clique> cliques;
583  std::vector<sr_grid> grid;
584  std::vector<Index> inv2grid;
585  global &glob;
586  global new_glob;
587  std::vector<Index> random;
588  global::replay replay;
589  std::vector<bool> mark;
590  graph forward_graph;
591  graph reverse_graph;
592  std::vector<Index> var_remap;
593  const static Index NA = -1;
594  std::vector<Index> op2inv_idx;
595  std::vector<Index> op2dep_idx;
596  std::vector<bool> terms_done;
597  term_info tinfo;
598  std::map<size_t, std::vector<ad_aug> > cache;
615  sequential_reduction(global &glob, std::vector<Index> random,
616  std::vector<sr_grid> grid =
617  std::vector<sr_grid>(1, sr_grid(-20, 20, 200)),
618  std::vector<Index> random2grid = std::vector<Index>(0),
619  bool perm = true);
626  void reorder_random();
627 
628  std::vector<size_t> get_grid_bounds(std::vector<Index> inv_index);
629 
630  std::vector<sr_grid *> get_grid(std::vector<Index> inv_index);
642  std::vector<ad_aug> tabulate(std::vector<Index> inv_index, Index dep_index);
643 
665  void merge(Index i);
666 
678  void update(Index i);
679  void show_cliques();
680  void update_all();
681  ad_aug get_result();
682  global marginal();
683 };
684 
718 struct autopar {
719  global &glob;
720  graph reverse_graph;
721  size_t num_threads;
727  std::vector<std::vector<Index> > node_split;
729  std::vector<std::vector<Index> > inv_idx;
731  std::vector<std::vector<Index> > dep_idx;
733  std::vector<global> vglob;
734  autopar(global &glob, size_t num_threads);
737  std::vector<size_t> max_tree_depth();
738 
739  template <class T>
740  size_t which_min(const std::vector<T> &x) {
741  return std::min_element(x.begin(), x.end()) - x.begin();
742  }
743 
744  void run();
746  void extract();
748  size_t input_size() const;
750  size_t output_size() const;
751 };
752 
757  static const bool have_input_size_output_size = true;
759  std::vector<global> vglob;
761  std::vector<std::vector<Index> > inv_idx;
764  std::vector<std::vector<Index> > dep_idx;
765 
766  Index n, m;
767  Index input_size() const;
768  Index output_size() const;
769  ParalOp(const autopar &ap);
770 
771  template <class T>
772  void reverse(ReverseArgs<T> &args) {
773  bool parallel_operator_used_with_valid_type = false;
774  TMBAD_ASSERT(parallel_operator_used_with_valid_type);
775  }
776  static const bool add_forward_replay_copy = true;
777  template <class T>
778  void forward(ForwardArgs<T> &args) {
779  bool parallel_operator_used_with_valid_type = false;
780  TMBAD_ASSERT(parallel_operator_used_with_valid_type);
781  }
782 
783  void forward(ForwardArgs<Scalar> &args);
784  void reverse(ReverseArgs<Scalar> &args);
785  const char *op_name();
786  void print(global::print_config cfg);
787 };
788 
789 std::vector<Index> get_likely_expression_duplicates(
790  const global &glob, std::vector<Index> inv_remap);
791 
796 bool all_allow_remap(const global &glob);
797 
799 template <class T>
800 struct forbid_remap {
801  T &remap;
802  forbid_remap(T &remap) : remap(remap) {}
803  void operator()(Index a, Index b) {
804  bool ok = true;
805  for (Index i = a + 1; i <= b; i++) {
806  ok &= (remap[i] - remap[i - 1] == 1);
807  }
808  if (ok) {
809  return;
810  } else {
811  for (Index i = a; i <= b; i++) {
812  remap[i] = i;
813  }
814  }
815  }
816 };
817 
874 std::vector<Index> remap_identical_sub_expressions(
875  global &glob, std::vector<Index> inv_remap);
876 
878 
879 std::vector<Position> inv_positions(global &glob);
880 
886 void reorder_graph(global &glob, std::vector<Index> inv_idx);
887 
888 } // namespace TMBad
889 #endif // HAVE_GRAPH_TRANSFORM_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:157
std::vector< T > subset(const std::vector< T > &x, const std::vector< bool > &y)
Vector subset by boolean mask.
graph reverse_graph(std::vector< bool > keep_var=std::vector< bool >(0))
Construct operator graph with reverse connections.
Definition: TMBad.cpp:1584
size_t prod_int(const std::vector< size_t > &x)
Integer product function.
Definition: TMBad.cpp:3534
diff --git a/graph_legend.html b/graph_legend.html index 5cf9c1396..1ebdf8cea 100644 --- a/graph_legend.html +++ b/graph_legend.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/group__Densities.html b/group__Densities.html index e4453d948..0a5d7da22 100644 --- a/group__Densities.html +++ b/group__Densities.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/group__R__style__distribution.html b/group__R__style__distribution.html index 45f185ac2..b4cf06e59 100644 --- a/group__R__style__distribution.html +++ b/group__R__style__distribution.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/group__macros.html b/group__macros.html index a7d6adf81..b307e860a 100644 --- a/group__macros.html +++ b/group__macros.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
@@ -243,7 +243,7 @@

-Value:
if (!Rf_isNull(getListElement(TMB_OBJECTIVE_PTR -> parameters,#name))){ \
name.fill( TMB_OBJECTIVE_PTR -> fillShape(asVector<Type>( \
TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isNumeric)), \
#name), \
Rf_getAttrib( \
TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isNumeric), \
Rf_install("ord")) ); \
}
Utilities for OSA residuals.
Definition: tmb_core.hpp:441
+Value:
if (!Rf_isNull(getListElement(TMB_OBJECTIVE_PTR -> parameters,#name))){ \
name.fill( TMB_OBJECTIVE_PTR -> fillShape(asVector<Type>( \
TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isReal )), \
#name), \
Rf_getAttrib( \
TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isReal ), \
Rf_install("ord")) ); \
}
Utilities for OSA residuals.
Definition: tmb_core.hpp:441
#define TMB_OBJECTIVE_PTR
Pointer to objective function used by DATA and PARAMETER macros.
Definition: tmb_core.hpp:208

Declare an indicator array 'name' of same shape as 'obs'. By default, the indicator array is filled with ones indicating that all observations are enabled.

@@ -269,7 +269,7 @@

-Value:
vector<int> name(asVector<int>( \
getListElement(TMB_OBJECTIVE_PTR -> data, #name, &Rf_isNumeric)));
Vector class used by TMB.
Definition: vector.hpp:17
+Value:
vector<int> name(asVector<int>( \
getListElement(TMB_OBJECTIVE_PTR -> data, #name, &Rf_isReal )));
Vector class used by TMB.
Definition: vector.hpp:17
#define TMB_OBJECTIVE_PTR
Pointer to objective function used by DATA and PARAMETER macros.
Definition: tmb_core.hpp:208

Get data vector of type "factor" from R and declare it as a zero-based integer vector.

@@ -387,7 +387,7 @@

-Value:
vector<Type> name; \
if (!Rf_isNull(getListElement(TMB_OBJECTIVE_PTR -> parameters,#name))){ \
name = TMB_OBJECTIVE_PTR -> fillShape(asVector<Type>( \
TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isNumeric)), #name); \
} else { \
name = asVector<Type>(getListElement( \
TMB_OBJECTIVE_PTR -> data,#name,&Rf_isNumeric)); \
}
Vector class used by TMB.
Definition: vector.hpp:17
+Value:
vector<Type> name; \
if (!Rf_isNull(getListElement(TMB_OBJECTIVE_PTR -> parameters,#name))){ \
name = TMB_OBJECTIVE_PTR -> fillShape(asVector<Type>( \
TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isReal )), #name); \
} else { \
name = asVector<Type>(getListElement( \
TMB_OBJECTIVE_PTR -> data,#name,&Rf_isReal )); \
}
Vector class used by TMB.
Definition: vector.hpp:17
#define TMB_OBJECTIVE_PTR
Pointer to objective function used by DATA and PARAMETER macros.
Definition: tmb_core.hpp:208

Get data vector from R and declare it as vector<Type>

@@ -423,7 +423,7 @@

-Value:
if (!Rf_isNull(getListElement(TMB_OBJECTIVE_PTR -> parameters,#name))){ \
name.fill( TMB_OBJECTIVE_PTR -> fillShape(asVector<Type>( \
TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isNumeric)), \
#name), \
Rf_getAttrib( \
TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isNumeric), \
Rf_install("ord")) ); \
}
Utilities for OSA residuals.
Definition: tmb_core.hpp:441
+Value:
if (!Rf_isNull(getListElement(TMB_OBJECTIVE_PTR -> parameters,#name))){ \
name.fill( TMB_OBJECTIVE_PTR -> fillShape(asVector<Type>( \
TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isReal )), \
#name), \
Rf_getAttrib( \
TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isReal ), \
Rf_install("ord")) ); \
}
Utilities for OSA residuals.
Definition: tmb_core.hpp:441
#define TMB_OBJECTIVE_PTR
Pointer to objective function used by DATA and PARAMETER macros.
Definition: tmb_core.hpp:208

Declare an indicator vector 'name' of same shape as 'obs'. By default, the indicator vector is filled with ones indicating that all observations are enabled.

diff --git a/group__matrix__functions.html b/group__matrix__functions.html index 2a263b603..57db17378 100644 --- a/group__matrix__functions.html +++ b/group__matrix__functions.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/group__parallel.html b/group__parallel.html index 1e1e186d9..24f210aa4 100644 --- a/group__parallel.html +++ b/group__parallel.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/group__special__functions.html b/group__special__functions.html index 6cd4091a1..f86e2ae7f 100644 --- a/group__special__functions.html +++ b/group__special__functions.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/hierarchy.html b/hierarchy.html index 290c5d89a..4f2e03b1f 100644 --- a/hierarchy.html +++ b/hierarchy.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/hmm_8cpp-example.html b/hmm_8cpp-example.html index ac3fa6143..2fd284102 100644 --- a/hmm_8cpp-example.html +++ b/hmm_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/i1mach_8cpp_source.html b/i1mach_8cpp_source.html index 0d783c06c..e75166e38 100644 --- a/i1mach_8cpp_source.html +++ b/i1mach_8cpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/index.html b/index.html index 19b68e9b8..09b96a7ed 100644 --- a/index.html +++ b/index.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/integrate_8cpp_source.html b/integrate_8cpp_source.html index dcc06bb6b..a6703c391 100644 --- a/integrate_8cpp_source.html +++ b/integrate_8cpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/interpol_8hpp.html b/interpol_8hpp.html index 181f6ae9a..ae856a684 100644 --- a/interpol_8hpp.html +++ b/interpol_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/interpol_8hpp_source.html b/interpol_8hpp_source.html index b87356aa7..86e23d306 100644 --- a/interpol_8hpp_source.html +++ b/interpol_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/kronecker_8hpp.html b/kronecker_8hpp.html index 2cc8afb90..762a73a9d 100644 --- a/kronecker_8hpp.html +++ b/kronecker_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/kronecker_8hpp_source.html b/kronecker_8hpp_source.html index 5547ab390..729926149 100644 --- a/kronecker_8hpp_source.html +++ b/kronecker_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/lambert_8cpp_source.html b/lambert_8cpp_source.html index dff5ff9ff..5c2336c6e 100644 --- a/lambert_8cpp_source.html +++ b/lambert_8cpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/laplace_8cpp-example.html b/laplace_8cpp-example.html index c424dbe66..26810eb29 100644 --- a/laplace_8cpp-example.html +++ b/laplace_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/lgamma_8cpp_source.html b/lgamma_8cpp_source.html index 4dc7cee78..0372ebda0 100644 --- a/lgamma_8cpp_source.html +++ b/lgamma_8cpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/lgamma_8hpp.html b/lgamma_8hpp.html index 7358df707..80a830e47 100644 --- a/lgamma_8hpp.html +++ b/lgamma_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/lgamma_8hpp_source.html b/lgamma_8hpp_source.html index 036d17f6b..33fbdcb9e 100644 --- a/lgamma_8hpp_source.html +++ b/lgamma_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/lgammacor_8cpp_source.html b/lgammacor_8cpp_source.html index 27a02ba6f..8e9b7604a 100644 --- a/lgammacor_8cpp_source.html +++ b/lgammacor_8cpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/linreg_8cpp-example.html b/linreg_8cpp-example.html index 84738891b..e041a4930 100644 --- a/linreg_8cpp-example.html +++ b/linreg_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/linreg_8cpp_source.html b/linreg_8cpp_source.html index e12a172bb..c3ae21a77 100644 --- a/linreg_8cpp_source.html +++ b/linreg_8cpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/linreg_parallel_8cpp-example.html b/linreg_parallel_8cpp-example.html index e1c70fc8e..851dd7ade 100644 --- a/linreg_parallel_8cpp-example.html +++ b/linreg_parallel_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/longlinreg_8cpp-example.html b/longlinreg_8cpp-example.html index e3b914386..353d3ce32 100644 --- a/longlinreg_8cpp-example.html +++ b/longlinreg_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/lr_test_8cpp-example.html b/lr_test_8cpp-example.html index 2b803dcd4..da9b0fa31 100644 --- a/lr_test_8cpp-example.html +++ b/lr_test_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/mainpage_8md_source.html b/mainpage_8md_source.html index 164b1daf3..0afb2aee0 100644 --- a/mainpage_8md_source.html +++ b/mainpage_8md_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/mask_8hpp_source.html b/mask_8hpp_source.html index 11ba8f580..046fc73e6 100644 --- a/mask_8hpp_source.html +++ b/mask_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/matern_8cpp-example.html b/matern_8cpp-example.html index 6c6fb739d..221a473b2 100644 --- a/matern_8cpp-example.html +++ b/matern_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/matexp_8hpp.html b/matexp_8hpp.html index 3f85355cb..32e0745b7 100644 --- a/matexp_8hpp.html +++ b/matexp_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/matexp_8hpp_source.html b/matexp_8hpp_source.html index f2927a214..a45056891 100644 --- a/matexp_8hpp_source.html +++ b/matexp_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/matrix_arrays_8cpp-example.html b/matrix_arrays_8cpp-example.html index fb55597fe..6ecf4f1bc 100644 --- a/matrix_arrays_8cpp-example.html +++ b/matrix_arrays_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/md_04-CovStruct.html b/md_04-CovStruct.html index 0690f7ba0..9c110a956 100644 --- a/md_04-CovStruct.html +++ b/md_04-CovStruct.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/md_08-Parallelization.html b/md_08-Parallelization.html index ee4fddfc1..693cc1a0f 100644 --- a/md_08-Parallelization.html +++ b/md_08-Parallelization.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/modules.html b/modules.html index a8882db14..e45c1fd39 100644 --- a/modules.html +++ b/modules.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/multivariate_distributions_8cpp-example.html b/multivariate_distributions_8cpp-example.html index 592c49702..1828b2739 100644 --- a/multivariate_distributions_8cpp-example.html +++ b/multivariate_distributions_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/mvrw_8cpp-example.html b/mvrw_8cpp-example.html index 6426a6911..4980ac271 100644 --- a/mvrw_8cpp-example.html +++ b/mvrw_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/mvrw_sparse_8cpp-example.html b/mvrw_sparse_8cpp-example.html index f1edcd7b8..54ea418b1 100644 --- a/mvrw_sparse_8cpp-example.html +++ b/mvrw_sparse_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/namespaceR__inla.html b/namespaceR__inla.html index 71d0272da..403aed5ba 100644 --- a/namespaceR__inla.html +++ b/namespaceR__inla.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/namespaceTMBad.html b/namespaceTMBad.html index 5bd149d74..f13c37c3a 100644 --- a/namespaceTMBad.html +++ b/namespaceTMBad.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/namespaceatomic.html b/namespaceatomic.html index 322bc51ca..d4bca9e4e 100644 --- a/namespaceatomic.html +++ b/namespaceatomic.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/namespaceautodiff.html b/namespaceautodiff.html index e85b3f831..499e61d30 100644 --- a/namespaceautodiff.html +++ b/namespaceautodiff.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/namespacedensity.html b/namespacedensity.html index 826d900d5..39fb0c517 100644 --- a/namespacedensity.html +++ b/namespacedensity.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/namespacegauss__kronrod.html b/namespacegauss__kronrod.html index 6f4935a55..441091075 100644 --- a/namespacegauss__kronrod.html +++ b/namespacegauss__kronrod.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/namespacemembers.html b/namespacemembers.html index f0bdd6024..06689f075 100644 --- a/namespacemembers.html +++ b/namespacemembers.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/namespacemembers_enum.html b/namespacemembers_enum.html index d4b9bb799..69c64bcdf 100644 --- a/namespacemembers_enum.html +++ b/namespacemembers_enum.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/namespacemembers_func.html b/namespacemembers_func.html index 7eda5a9ba..b3c628a0f 100644 --- a/namespacemembers_func.html +++ b/namespacemembers_func.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/namespacenewton.html b/namespacenewton.html index 6eaede80e..85dee7da6 100644 --- a/namespacenewton.html +++ b/namespacenewton.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/namespaceradix.html b/namespaceradix.html index 2f0807c59..32b2f7441 100644 --- a/namespaceradix.html +++ b/namespaceradix.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/namespaceromberg.html b/namespaceromberg.html index 1e3c41595..fd44beae8 100644 --- a/namespaceromberg.html +++ b/namespaceromberg.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/namespaces.html b/namespaces.html index b618fdbab..965f9a740 100644 --- a/namespaces.html +++ b/namespaces.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/namespacesparse__matrix__exponential.html b/namespacesparse__matrix__exponential.html index 68c36603a..1bd6fef59 100644 --- a/namespacesparse__matrix__exponential.html +++ b/namespacesparse__matrix__exponential.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/namespacetmbutils.html b/namespacetmbutils.html index 7e3b955da..b35756236 100644 --- a/namespacetmbutils.html +++ b/namespacetmbutils.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/namespacevectorize.html b/namespacevectorize.html index 0494d2010..1dec0f944 100644 --- a/namespacevectorize.html +++ b/namespacevectorize.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/nan_error_ex_8cpp-example.html b/nan_error_ex_8cpp-example.html index 60cb2a4cb..c4ffc77c1 100644 --- a/nan_error_ex_8cpp-example.html +++ b/nan_error_ex_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/newton_8hpp_source.html b/newton_8hpp_source.html index 25a08f3d1..faf9f1986 100644 --- a/newton_8hpp_source.html +++ b/newton_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/nmath_8h_source.html b/nmath_8h_source.html index 7e08e8d6f..a3c0ad136 100644 --- a/nmath_8h_source.html +++ b/nmath_8h_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/nmix_8cpp-example.html b/nmix_8cpp-example.html index 8590abb3d..34fb4a09d 100644 --- a/nmix_8cpp-example.html +++ b/nmix_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/orange_big_8cpp-example.html b/orange_big_8cpp-example.html index 9f5e818c1..122d88445 100644 --- a/orange_big_8cpp-example.html +++ b/orange_big_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/order_8hpp_source.html b/order_8hpp_source.html index a7eb43c1f..3e26c8f2c 100644 --- a/order_8hpp_source.html +++ b/order_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/pages.html b/pages.html index 74f0d1d45..0b2e80744 100644 --- a/pages.html +++ b/pages.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/pbeta_8cpp_source.html b/pbeta_8cpp_source.html index 955d1c699..86b412d03 100644 --- a/pbeta_8cpp_source.html +++ b/pbeta_8cpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/pbeta_8hpp_source.html b/pbeta_8hpp_source.html index 9f77aabb6..e843e1811 100644 --- a/pbeta_8hpp_source.html +++ b/pbeta_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/precompile_8hpp_source.html b/precompile_8hpp_source.html index 0bf9a8759..84a7e3649 100644 --- a/precompile_8hpp_source.html +++ b/precompile_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/radix_8hpp_source.html b/radix_8hpp_source.html index f73db766e..831b9f9df 100644 --- a/radix_8hpp_source.html +++ b/radix_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/register_atomic_8cpp-example.html b/register_atomic_8cpp-example.html index 4987fb56d..d16a9cb96 100644 --- a/register_atomic_8cpp-example.html +++ b/register_atomic_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/register_atomic_parallel_8cpp-example.html b/register_atomic_parallel_8cpp-example.html index 59529dd8f..32e32a983 100644 --- a/register_atomic_parallel_8cpp-example.html +++ b/register_atomic_parallel_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/romberg_8hpp.html b/romberg_8hpp.html index 95317d818..0d4791037 100644 --- a/romberg_8hpp.html +++ b/romberg_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/romberg_8hpp_source.html b/romberg_8hpp_source.html index cd21b5c99..8344fff29 100644 --- a/romberg_8hpp_source.html +++ b/romberg_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/sam_8cpp-example.html b/sam_8cpp-example.html index 6cfe0eaa5..0bb2a4de4 100644 --- a/sam_8cpp-example.html +++ b/sam_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/sde_linear_8cpp-example.html b/sde_linear_8cpp-example.html index 4ba0605db..8592fe7e9 100644 --- a/sde_linear_8cpp-example.html +++ b/sde_linear_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/sdv_multi_8cpp-example.html b/sdv_multi_8cpp-example.html index 65e9442cb..b0510f4f2 100644 --- a/sdv_multi_8cpp-example.html +++ b/sdv_multi_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/sdv_multi_compact_8cpp-example.html b/sdv_multi_compact_8cpp-example.html index 618575523..c81e079b0 100644 --- a/sdv_multi_compact_8cpp-example.html +++ b/sdv_multi_compact_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/simplicial__inverse__subset_8hpp_source.html b/simplicial__inverse__subset_8hpp_source.html index 5f1e8c21b..8f3a66e6c 100644 --- a/simplicial__inverse__subset_8hpp_source.html +++ b/simplicial__inverse__subset_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/socatt_8cpp-example.html b/socatt_8cpp-example.html index 9ff842673..9ab2f9c59 100644 --- a/socatt_8cpp-example.html +++ b/socatt_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/sparse__matrix__exponential_8hpp.html b/sparse__matrix__exponential_8hpp.html index df0619062..3937330f8 100644 --- a/sparse__matrix__exponential_8hpp.html +++ b/sparse__matrix__exponential_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/sparse__matrix__exponential_8hpp_source.html b/sparse__matrix__exponential_8hpp_source.html index 8497c270d..3a3f2ca0d 100644 --- a/sparse__matrix__exponential_8hpp_source.html +++ b/sparse__matrix__exponential_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/spatial_8cpp-example.html b/spatial_8cpp-example.html index 6e13bfd68..cab501abc 100644 --- a/spatial_8cpp-example.html +++ b/spatial_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/spde_8cpp-example.html b/spde_8cpp-example.html index 27f525bcd..2f8558938 100644 --- a/spde_8cpp-example.html +++ b/spde_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/spde_aniso_8cpp-example.html b/spde_aniso_8cpp-example.html index 149fbd5a5..c170dd21c 100644 --- a/spde_aniso_8cpp-example.html +++ b/spde_aniso_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/spde_aniso_speedup_8cpp-example.html b/spde_aniso_speedup_8cpp-example.html index eb3ff689e..86ef13a46 100644 --- a/spde_aniso_speedup_8cpp-example.html +++ b/spde_aniso_speedup_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/splines_8hpp.html b/splines_8hpp.html index 6769496e5..57e3f1aa7 100644 --- a/splines_8hpp.html +++ b/splines_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/splines_8hpp_source.html b/splines_8hpp_source.html index dcc42c6f4..986e64c76 100644 --- a/splines_8hpp_source.html +++ b/splines_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/spmat_8hpp.html b/spmat_8hpp.html index f2473d0cd..c5da01770 100644 --- a/spmat_8hpp.html +++ b/spmat_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/spmat_8hpp_source.html b/spmat_8hpp_source.html index e17f1d0e7..fcef89448 100644 --- a/spmat_8hpp_source.html +++ b/spmat_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/start__parallel_8hpp_source.html b/start__parallel_8hpp_source.html index 5dcc13123..1f2b21e73 100644 --- a/start__parallel_8hpp_source.html +++ b/start__parallel_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/stirlerr_8cpp_source.html b/stirlerr_8cpp_source.html index 1e60ef155..470c0d07c 100644 --- a/stirlerr_8cpp_source.html +++ b/stirlerr_8cpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structEigen_1_1Accessible__CholmodSupernodalLLT-members.html b/structEigen_1_1Accessible__CholmodSupernodalLLT-members.html index c8b44cfe9..dd32e79d9 100644 --- a/structEigen_1_1Accessible__CholmodSupernodalLLT-members.html +++ b/structEigen_1_1Accessible__CholmodSupernodalLLT-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structEigen_1_1Accessible__CholmodSupernodalLLT.html b/structEigen_1_1Accessible__CholmodSupernodalLLT.html index a7ec106a3..6618ed099 100644 --- a/structEigen_1_1Accessible__CholmodSupernodalLLT.html +++ b/structEigen_1_1Accessible__CholmodSupernodalLLT.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structR__inla_1_1spde__aniso__t-members.html b/structR__inla_1_1spde__aniso__t-members.html index 9644e7d95..a30f714ac 100644 --- a/structR__inla_1_1spde__aniso__t-members.html +++ b/structR__inla_1_1spde__aniso__t-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structR__inla_1_1spde__aniso__t.html b/structR__inla_1_1spde__aniso__t.html index cb281002e..f97799ce1 100644 --- a/structR__inla_1_1spde__aniso__t.html +++ b/structR__inla_1_1spde__aniso__t.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structR__inla_1_1spde__t-members.html b/structR__inla_1_1spde__t-members.html index 1394f87a1..f8e546a06 100644 --- a/structR__inla_1_1spde__t-members.html +++ b/structR__inla_1_1spde__t-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structR__inla_1_1spde__t.html b/structR__inla_1_1spde__t.html index 897b1b584..e268e5bb7 100644 --- a/structR__inla_1_1spde__t.html +++ b/structR__inla_1_1spde__t.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1ADFun-members.html b/structTMBad_1_1ADFun-members.html index 14a5683f2..513e1b257 100644 --- a/structTMBad_1_1ADFun-members.html +++ b/structTMBad_1_1ADFun-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1ADFun.html b/structTMBad_1_1ADFun.html index 982db53fe..ac59288f7 100644 --- a/structTMBad_1_1ADFun.html +++ b/structTMBad_1_1ADFun.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1ADFun__packed-members.html b/structTMBad_1_1ADFun__packed-members.html index d6e800e84..e9cec65a7 100644 --- a/structTMBad_1_1ADFun__packed-members.html +++ b/structTMBad_1_1ADFun__packed-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1ADFun__packed.html b/structTMBad_1_1ADFun__packed.html index 109846f04..c62ef2132 100644 --- a/structTMBad_1_1ADFun__packed.html +++ b/structTMBad_1_1ADFun__packed.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1Args-members.html b/structTMBad_1_1Args-members.html index 87376e19f..2910e1bb1 100644 --- a/structTMBad_1_1Args-members.html +++ b/structTMBad_1_1Args-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1Args.html b/structTMBad_1_1Args.html index 2466190fc..8b71ef775 100644 --- a/structTMBad_1_1Args.html +++ b/structTMBad_1_1Args.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1AtomOp-members.html b/structTMBad_1_1AtomOp-members.html index 8d14db5af..dd7354510 100644 --- a/structTMBad_1_1AtomOp-members.html +++ b/structTMBad_1_1AtomOp-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1AtomOp.html b/structTMBad_1_1AtomOp.html index 756935f76..6d00387ad 100644 --- a/structTMBad_1_1AtomOp.html +++ b/structTMBad_1_1AtomOp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1Decomp2-members.html b/structTMBad_1_1Decomp2-members.html index 9d0c60efa..2eaf29081 100644 --- a/structTMBad_1_1Decomp2-members.html +++ b/structTMBad_1_1Decomp2-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1Decomp2.html b/structTMBad_1_1Decomp2.html index 2b73fd4b8..d068596b1 100644 --- a/structTMBad_1_1Decomp2.html +++ b/structTMBad_1_1Decomp2.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1Decomp3-members.html b/structTMBad_1_1Decomp3-members.html index fa692d0b2..0ad55bc29 100644 --- a/structTMBad_1_1Decomp3-members.html +++ b/structTMBad_1_1Decomp3-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1Decomp3.html b/structTMBad_1_1Decomp3.html index a29e67020..368751548 100644 --- a/structTMBad_1_1Decomp3.html +++ b/structTMBad_1_1Decomp3.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1ForwardArgs-members.html b/structTMBad_1_1ForwardArgs-members.html index 5f1acad75..f8f291e06 100644 --- a/structTMBad_1_1ForwardArgs-members.html +++ b/structTMBad_1_1ForwardArgs-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1ForwardArgs.html b/structTMBad_1_1ForwardArgs.html index 9c3c3bc7b..fe41afa41 100644 --- a/structTMBad_1_1ForwardArgs.html +++ b/structTMBad_1_1ForwardArgs.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1IndirectAccessor-members.html b/structTMBad_1_1IndirectAccessor-members.html index 2a0d3d809..e9d9fc0e2 100644 --- a/structTMBad_1_1IndirectAccessor-members.html +++ b/structTMBad_1_1IndirectAccessor-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1IndirectAccessor.html b/structTMBad_1_1IndirectAccessor.html index 473f8d172..6b2e4e72e 100644 --- a/structTMBad_1_1IndirectAccessor.html +++ b/structTMBad_1_1IndirectAccessor.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1Integral-members.html b/structTMBad_1_1Integral-members.html index 6ef84adcc..fc7fc8a35 100644 --- a/structTMBad_1_1Integral-members.html +++ b/structTMBad_1_1Integral-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1Integral.html b/structTMBad_1_1Integral.html index 419993122..3e6ac8843 100644 --- a/structTMBad_1_1Integral.html +++ b/structTMBad_1_1Integral.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1PackOp-members.html b/structTMBad_1_1PackOp-members.html index b63c1d50c..60f97687a 100644 --- a/structTMBad_1_1PackOp-members.html +++ b/structTMBad_1_1PackOp-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1PackOp.html b/structTMBad_1_1PackOp.html index 0225c48e0..5eda1475f 100644 --- a/structTMBad_1_1PackOp.html +++ b/structTMBad_1_1PackOp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1PackWrap-members.html b/structTMBad_1_1PackWrap-members.html index 418511600..ba6d42536 100644 --- a/structTMBad_1_1PackWrap-members.html +++ b/structTMBad_1_1PackWrap-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1PackWrap.html b/structTMBad_1_1PackWrap.html index f45f02d46..2edc16fde 100644 --- a/structTMBad_1_1PackWrap.html +++ b/structTMBad_1_1PackWrap.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1ParalOp-members.html b/structTMBad_1_1ParalOp-members.html index 71210cac4..9cc217ae1 100644 --- a/structTMBad_1_1ParalOp-members.html +++ b/structTMBad_1_1ParalOp-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1ParalOp.html b/structTMBad_1_1ParalOp.html index c5e461f24..00329fb8b 100644 --- a/structTMBad_1_1ParalOp.html +++ b/structTMBad_1_1ParalOp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1ParametersChanged-members.html b/structTMBad_1_1ParametersChanged-members.html index dc2c621cb..c66bfe19c 100644 --- a/structTMBad_1_1ParametersChanged-members.html +++ b/structTMBad_1_1ParametersChanged-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1ParametersChanged.html b/structTMBad_1_1ParametersChanged.html index 0f6e2e4f8..f16c1ed1d 100644 --- a/structTMBad_1_1ParametersChanged.html +++ b/structTMBad_1_1ParametersChanged.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1ReverseArgs-members.html b/structTMBad_1_1ReverseArgs-members.html index 1999da289..462b5a51c 100644 --- a/structTMBad_1_1ReverseArgs-members.html +++ b/structTMBad_1_1ReverseArgs-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1ReverseArgs.html b/structTMBad_1_1ReverseArgs.html index 23d0e65d8..e133651be 100644 --- a/structTMBad_1_1ReverseArgs.html +++ b/structTMBad_1_1ReverseArgs.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1SegmentRef-members.html b/structTMBad_1_1SegmentRef-members.html index 63c35f1d2..d4ca7c435 100644 --- a/structTMBad_1_1SegmentRef-members.html +++ b/structTMBad_1_1SegmentRef-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1SegmentRef.html b/structTMBad_1_1SegmentRef.html index 577eb8982..9d81c94d1 100644 --- a/structTMBad_1_1SegmentRef.html +++ b/structTMBad_1_1SegmentRef.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1SpJacFun__config-members.html b/structTMBad_1_1SpJacFun__config-members.html index aa48ca87e..fd51d2140 100644 --- a/structTMBad_1_1SpJacFun__config-members.html +++ b/structTMBad_1_1SpJacFun__config-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1SpJacFun__config.html b/structTMBad_1_1SpJacFun__config.html index e6ae02e36..452424e4b 100644 --- a/structTMBad_1_1SpJacFun__config.html +++ b/structTMBad_1_1SpJacFun__config.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1StdWrap-members.html b/structTMBad_1_1StdWrap-members.html index 2ea8aaf6f..5a702207a 100644 --- a/structTMBad_1_1StdWrap-members.html +++ b/structTMBad_1_1StdWrap-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1StdWrap.html b/structTMBad_1_1StdWrap.html index d193eb633..7e025c9db 100644 --- a/structTMBad_1_1StdWrap.html +++ b/structTMBad_1_1StdWrap.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1UnpkOp-members.html b/structTMBad_1_1UnpkOp-members.html index 6b88b22e3..2dd87f1e3 100644 --- a/structTMBad_1_1UnpkOp-members.html +++ b/structTMBad_1_1UnpkOp-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1UnpkOp.html b/structTMBad_1_1UnpkOp.html index 73692d7aa..f4fb886c1 100644 --- a/structTMBad_1_1UnpkOp.html +++ b/structTMBad_1_1UnpkOp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1ad__plain__index-members.html b/structTMBad_1_1ad__plain__index-members.html index 66332b640..623331e40 100644 --- a/structTMBad_1_1ad__plain__index-members.html +++ b/structTMBad_1_1ad__plain__index-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1ad__plain__index.html b/structTMBad_1_1ad__plain__index.html index 3371d4f19..e9be344e5 100644 --- a/structTMBad_1_1ad__plain__index.html +++ b/structTMBad_1_1ad__plain__index.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1adaptive-members.html b/structTMBad_1_1adaptive-members.html index df132e42b..479775821 100644 --- a/structTMBad_1_1adaptive-members.html +++ b/structTMBad_1_1adaptive-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1adaptive.html b/structTMBad_1_1adaptive.html index 4d3ad4d29..1f84e5f6a 100644 --- a/structTMBad_1_1adaptive.html +++ b/structTMBad_1_1adaptive.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1autopar-members.html b/structTMBad_1_1autopar-members.html index 3f0ba9dd9..08705ab29 100644 --- a/structTMBad_1_1autopar-members.html +++ b/structTMBad_1_1autopar-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1autopar.html b/structTMBad_1_1autopar.html index dbf15c024..e68cb6589 100644 --- a/structTMBad_1_1autopar.html +++ b/structTMBad_1_1autopar.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1clique-members.html b/structTMBad_1_1clique-members.html index 67853e049..17b7f63a3 100644 --- a/structTMBad_1_1clique-members.html +++ b/structTMBad_1_1clique-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1clique.html b/structTMBad_1_1clique.html index dae7e217e..65b51904d 100644 --- a/structTMBad_1_1clique.html +++ b/structTMBad_1_1clique.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1control-members.html b/structTMBad_1_1control-members.html index 317db116b..c7667638e 100644 --- a/structTMBad_1_1control-members.html +++ b/structTMBad_1_1control-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1control.html b/structTMBad_1_1control.html index 9e8d7416a..3875728d3 100644 --- a/structTMBad_1_1control.html +++ b/structTMBad_1_1control.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1forbid__remap-members.html b/structTMBad_1_1forbid__remap-members.html index 59bc11f59..02ffc9a40 100644 --- a/structTMBad_1_1forbid__remap-members.html +++ b/structTMBad_1_1forbid__remap-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1forbid__remap.html b/structTMBad_1_1forbid__remap.html index 6f5374604..c94fb8029 100644 --- a/structTMBad_1_1forbid__remap.html +++ b/structTMBad_1_1forbid__remap.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global-members.html b/structTMBad_1_1global-members.html index 522567e65..251361043 100644 --- a/structTMBad_1_1global-members.html +++ b/structTMBad_1_1global-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global.html b/structTMBad_1_1global.html index b887e98b0..c58c434a8 100644 --- a/structTMBad_1_1global.html +++ b/structTMBad_1_1global.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1AddDependencies-members.html b/structTMBad_1_1global_1_1AddDependencies-members.html index 40408f596..821dbed21 100644 --- a/structTMBad_1_1global_1_1AddDependencies-members.html +++ b/structTMBad_1_1global_1_1AddDependencies-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1AddDependencies.html b/structTMBad_1_1global_1_1AddDependencies.html index f4b8e5874..a03fac3bf 100644 --- a/structTMBad_1_1global_1_1AddDependencies.html +++ b/structTMBad_1_1global_1_1AddDependencies.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1AddForwardFromEval.html b/structTMBad_1_1global_1_1AddForwardFromEval.html index a44d96b38..0641253c1 100644 --- a/structTMBad_1_1global_1_1AddForwardFromEval.html +++ b/structTMBad_1_1global_1_1AddForwardFromEval.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1AddForwardFromEval_3_01OperatorBase_00_011_01_4-members.html b/structTMBad_1_1global_1_1AddForwardFromEval_3_01OperatorBase_00_011_01_4-members.html index cedd1fccd..7124216f7 100644 --- a/structTMBad_1_1global_1_1AddForwardFromEval_3_01OperatorBase_00_011_01_4-members.html +++ b/structTMBad_1_1global_1_1AddForwardFromEval_3_01OperatorBase_00_011_01_4-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1AddForwardFromEval_3_01OperatorBase_00_011_01_4.html b/structTMBad_1_1global_1_1AddForwardFromEval_3_01OperatorBase_00_011_01_4.html index cb89bf869..2586d4c38 100644 --- a/structTMBad_1_1global_1_1AddForwardFromEval_3_01OperatorBase_00_011_01_4.html +++ b/structTMBad_1_1global_1_1AddForwardFromEval_3_01OperatorBase_00_011_01_4.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1AddForwardFromEval_3_01OperatorBase_00_012_01_4-members.html b/structTMBad_1_1global_1_1AddForwardFromEval_3_01OperatorBase_00_012_01_4-members.html index edb1925e0..3d5d5ce67 100644 --- a/structTMBad_1_1global_1_1AddForwardFromEval_3_01OperatorBase_00_012_01_4-members.html +++ b/structTMBad_1_1global_1_1AddForwardFromEval_3_01OperatorBase_00_012_01_4-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1AddForwardFromEval_3_01OperatorBase_00_012_01_4.html b/structTMBad_1_1global_1_1AddForwardFromEval_3_01OperatorBase_00_012_01_4.html index c2c65ce95..940ab0554 100644 --- a/structTMBad_1_1global_1_1AddForwardFromEval_3_01OperatorBase_00_012_01_4.html +++ b/structTMBad_1_1global_1_1AddForwardFromEval_3_01OperatorBase_00_012_01_4.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1AddForwardIncrReverseDecr-members.html b/structTMBad_1_1global_1_1AddForwardIncrReverseDecr-members.html index 1645cc592..30c051738 100644 --- a/structTMBad_1_1global_1_1AddForwardIncrReverseDecr-members.html +++ b/structTMBad_1_1global_1_1AddForwardIncrReverseDecr-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1AddForwardIncrReverseDecr.html b/structTMBad_1_1global_1_1AddForwardIncrReverseDecr.html index 3b9c79677..cd4792498 100644 --- a/structTMBad_1_1global_1_1AddForwardIncrReverseDecr.html +++ b/structTMBad_1_1global_1_1AddForwardIncrReverseDecr.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1AddForwardMarkReverseMark-members.html b/structTMBad_1_1global_1_1AddForwardMarkReverseMark-members.html index a64496a09..f880bf887 100644 --- a/structTMBad_1_1global_1_1AddForwardMarkReverseMark-members.html +++ b/structTMBad_1_1global_1_1AddForwardMarkReverseMark-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1AddForwardMarkReverseMark.html b/structTMBad_1_1global_1_1AddForwardMarkReverseMark.html index d7b23f56f..7e34be089 100644 --- a/structTMBad_1_1global_1_1AddForwardMarkReverseMark.html +++ b/structTMBad_1_1global_1_1AddForwardMarkReverseMark.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1AddForwardReverse-members.html b/structTMBad_1_1global_1_1AddForwardReverse-members.html index cbe565b2f..e8f1ebd11 100644 --- a/structTMBad_1_1global_1_1AddForwardReverse-members.html +++ b/structTMBad_1_1global_1_1AddForwardReverse-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1AddForwardReverse.html b/structTMBad_1_1global_1_1AddForwardReverse.html index 791fed973..fd955fd0a 100644 --- a/structTMBad_1_1global_1_1AddForwardReverse.html +++ b/structTMBad_1_1global_1_1AddForwardReverse.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1AddIncrementDecrement-members.html b/structTMBad_1_1global_1_1AddIncrementDecrement-members.html index 581d14995..79ad848bc 100644 --- a/structTMBad_1_1global_1_1AddIncrementDecrement-members.html +++ b/structTMBad_1_1global_1_1AddIncrementDecrement-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1AddIncrementDecrement.html b/structTMBad_1_1global_1_1AddIncrementDecrement.html index 276c8d92e..e13ac0d2e 100644 --- a/structTMBad_1_1global_1_1AddIncrementDecrement.html +++ b/structTMBad_1_1global_1_1AddIncrementDecrement.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1AddInputSizeOutputSize-members.html b/structTMBad_1_1global_1_1AddInputSizeOutputSize-members.html index cf1be237e..b92e85e88 100644 --- a/structTMBad_1_1global_1_1AddInputSizeOutputSize-members.html +++ b/structTMBad_1_1global_1_1AddInputSizeOutputSize-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1AddInputSizeOutputSize.html b/structTMBad_1_1global_1_1AddInputSizeOutputSize.html index ce6ff555b..e24e83b5b 100644 --- a/structTMBad_1_1global_1_1AddInputSizeOutputSize.html +++ b/structTMBad_1_1global_1_1AddInputSizeOutputSize.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1CPL-members.html b/structTMBad_1_1global_1_1CPL-members.html index 0c0a95655..e6463a468 100644 --- a/structTMBad_1_1global_1_1CPL-members.html +++ b/structTMBad_1_1global_1_1CPL-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1CPL.html b/structTMBad_1_1global_1_1CPL.html index ca599e67b..e18ce21e0 100644 --- a/structTMBad_1_1global_1_1CPL.html +++ b/structTMBad_1_1global_1_1CPL.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1Complete-members.html b/structTMBad_1_1global_1_1Complete-members.html index 3cfb5226b..65e951a67 100644 --- a/structTMBad_1_1global_1_1Complete-members.html +++ b/structTMBad_1_1global_1_1Complete-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1Complete.html b/structTMBad_1_1global_1_1Complete.html index ed5de7a19..754a1cf25 100644 --- a/structTMBad_1_1global_1_1Complete.html +++ b/structTMBad_1_1global_1_1Complete.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1DynamicOperator-members.html b/structTMBad_1_1global_1_1DynamicOperator-members.html index 8fa45e567..211e84c82 100644 --- a/structTMBad_1_1global_1_1DynamicOperator-members.html +++ b/structTMBad_1_1global_1_1DynamicOperator-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1DynamicOperator.html b/structTMBad_1_1global_1_1DynamicOperator.html index 7bbc3b94a..edb527d3c 100644 --- a/structTMBad_1_1global_1_1DynamicOperator.html +++ b/structTMBad_1_1global_1_1DynamicOperator.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1DynamicOutputOperator-members.html b/structTMBad_1_1global_1_1DynamicOutputOperator-members.html index e11bcc531..e68fd3e86 100644 --- a/structTMBad_1_1global_1_1DynamicOutputOperator-members.html +++ b/structTMBad_1_1global_1_1DynamicOutputOperator-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1DynamicOutputOperator.html b/structTMBad_1_1global_1_1DynamicOutputOperator.html index 52aedea8e..f3e6be493 100644 --- a/structTMBad_1_1global_1_1DynamicOutputOperator.html +++ b/structTMBad_1_1global_1_1DynamicOutputOperator.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1Fused-members.html b/structTMBad_1_1global_1_1Fused-members.html index 554787e3c..da33993da 100644 --- a/structTMBad_1_1global_1_1Fused-members.html +++ b/structTMBad_1_1global_1_1Fused-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1Fused.html b/structTMBad_1_1global_1_1Fused.html index de680669d..8427b0b10 100644 --- a/structTMBad_1_1global_1_1Fused.html +++ b/structTMBad_1_1global_1_1Fused.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1NullOp-members.html b/structTMBad_1_1global_1_1NullOp-members.html index e42bd0f14..608f72269 100644 --- a/structTMBad_1_1global_1_1NullOp-members.html +++ b/structTMBad_1_1global_1_1NullOp-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1NullOp.html b/structTMBad_1_1global_1_1NullOp.html index 397433fe9..0a2b47980 100644 --- a/structTMBad_1_1global_1_1NullOp.html +++ b/structTMBad_1_1global_1_1NullOp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1NullOp2-members.html b/structTMBad_1_1global_1_1NullOp2-members.html index cbde94705..16a1cf7bc 100644 --- a/structTMBad_1_1global_1_1NullOp2-members.html +++ b/structTMBad_1_1global_1_1NullOp2-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1NullOp2.html b/structTMBad_1_1global_1_1NullOp2.html index 56a076b84..88fb32d70 100644 --- a/structTMBad_1_1global_1_1NullOp2.html +++ b/structTMBad_1_1global_1_1NullOp2.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1Operator-members.html b/structTMBad_1_1global_1_1Operator-members.html index 04c0994bf..8d2697214 100644 --- a/structTMBad_1_1global_1_1Operator-members.html +++ b/structTMBad_1_1global_1_1Operator-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1Operator.html b/structTMBad_1_1global_1_1Operator.html index 0d6edb75e..c1dc72b1d 100644 --- a/structTMBad_1_1global_1_1Operator.html +++ b/structTMBad_1_1global_1_1Operator.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1OperatorPure-members.html b/structTMBad_1_1global_1_1OperatorPure-members.html index 7d02f31aa..96435e6bc 100644 --- a/structTMBad_1_1global_1_1OperatorPure-members.html +++ b/structTMBad_1_1global_1_1OperatorPure-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1OperatorPure.html b/structTMBad_1_1global_1_1OperatorPure.html index 24ecf9908..fbbbbf198 100644 --- a/structTMBad_1_1global_1_1OperatorPure.html +++ b/structTMBad_1_1global_1_1OperatorPure.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1RefOp-members.html b/structTMBad_1_1global_1_1RefOp-members.html index 45c40e37b..a82d749a6 100644 --- a/structTMBad_1_1global_1_1RefOp-members.html +++ b/structTMBad_1_1global_1_1RefOp-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1RefOp.html b/structTMBad_1_1global_1_1RefOp.html index 0a453a46b..7815d356f 100644 --- a/structTMBad_1_1global_1_1RefOp.html +++ b/structTMBad_1_1global_1_1RefOp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1ReferenceCounter-members.html b/structTMBad_1_1global_1_1ReferenceCounter-members.html index 553acffcd..b5719e213 100644 --- a/structTMBad_1_1global_1_1ReferenceCounter-members.html +++ b/structTMBad_1_1global_1_1ReferenceCounter-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1ReferenceCounter.html b/structTMBad_1_1global_1_1ReferenceCounter.html index 3afb28f0f..e731f561b 100644 --- a/structTMBad_1_1global_1_1ReferenceCounter.html +++ b/structTMBad_1_1global_1_1ReferenceCounter.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1Rep-members.html b/structTMBad_1_1global_1_1Rep-members.html index 53165bb35..c4e8e6e10 100644 --- a/structTMBad_1_1global_1_1Rep-members.html +++ b/structTMBad_1_1global_1_1Rep-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1Rep.html b/structTMBad_1_1global_1_1Rep.html index e246840cd..a117789b0 100644 --- a/structTMBad_1_1global_1_1Rep.html +++ b/structTMBad_1_1global_1_1Rep.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1RepCompress-members.html b/structTMBad_1_1global_1_1RepCompress-members.html index 435ef5568..42927e4ab 100644 --- a/structTMBad_1_1global_1_1RepCompress-members.html +++ b/structTMBad_1_1global_1_1RepCompress-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1RepCompress.html b/structTMBad_1_1global_1_1RepCompress.html index 66f3a7515..6f452f1d6 100644 --- a/structTMBad_1_1global_1_1RepCompress.html +++ b/structTMBad_1_1global_1_1RepCompress.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1ZeroOp-members.html b/structTMBad_1_1global_1_1ZeroOp-members.html index 7721ec85c..944be80c1 100644 --- a/structTMBad_1_1global_1_1ZeroOp-members.html +++ b/structTMBad_1_1global_1_1ZeroOp-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1ZeroOp.html b/structTMBad_1_1global_1_1ZeroOp.html index 6fa5c4cc9..3d5eb09f9 100644 --- a/structTMBad_1_1global_1_1ZeroOp.html +++ b/structTMBad_1_1global_1_1ZeroOp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1ad__aug-members.html b/structTMBad_1_1global_1_1ad__aug-members.html index 797c1eb0c..4699e70fe 100644 --- a/structTMBad_1_1global_1_1ad__aug-members.html +++ b/structTMBad_1_1global_1_1ad__aug-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1ad__aug.html b/structTMBad_1_1global_1_1ad__aug.html index c5626c83f..93b25c90b 100644 --- a/structTMBad_1_1global_1_1ad__aug.html +++ b/structTMBad_1_1global_1_1ad__aug.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1ad__plain_1_1CopyOp-members.html b/structTMBad_1_1global_1_1ad__plain_1_1CopyOp-members.html index f1f353c33..df4c2a6cc 100644 --- a/structTMBad_1_1global_1_1ad__plain_1_1CopyOp-members.html +++ b/structTMBad_1_1global_1_1ad__plain_1_1CopyOp-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1ad__plain_1_1CopyOp.html b/structTMBad_1_1global_1_1ad__plain_1_1CopyOp.html index 2a9dbb1af..1756d4a82 100644 --- a/structTMBad_1_1global_1_1ad__plain_1_1CopyOp.html +++ b/structTMBad_1_1global_1_1ad__plain_1_1CopyOp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1ad__plain_1_1ValOp-members.html b/structTMBad_1_1global_1_1ad__plain_1_1ValOp-members.html index 0f58fe03e..27899f774 100644 --- a/structTMBad_1_1global_1_1ad__plain_1_1ValOp-members.html +++ b/structTMBad_1_1global_1_1ad__plain_1_1ValOp-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1ad__plain_1_1ValOp.html b/structTMBad_1_1global_1_1ad__plain_1_1ValOp.html index 5cd6928b9..0596807f0 100644 --- a/structTMBad_1_1global_1_1ad__plain_1_1ValOp.html +++ b/structTMBad_1_1global_1_1ad__plain_1_1ValOp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1ad__segment-members.html b/structTMBad_1_1global_1_1ad__segment-members.html index c82d31b1b..2d104bc29 100644 --- a/structTMBad_1_1global_1_1ad__segment-members.html +++ b/structTMBad_1_1global_1_1ad__segment-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1ad__segment.html b/structTMBad_1_1global_1_1ad__segment.html index d303436b6..e602ffeff 100644 --- a/structTMBad_1_1global_1_1ad__segment.html +++ b/structTMBad_1_1global_1_1ad__segment.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1hash__config-members.html b/structTMBad_1_1global_1_1hash__config-members.html index cf8da9051..ff9c64f98 100644 --- a/structTMBad_1_1global_1_1hash__config-members.html +++ b/structTMBad_1_1global_1_1hash__config-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1hash__config.html b/structTMBad_1_1global_1_1hash__config.html index 7b1ec212f..f975ad65e 100644 --- a/structTMBad_1_1global_1_1hash__config.html +++ b/structTMBad_1_1global_1_1hash__config.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1if__else.html b/structTMBad_1_1global_1_1if__else.html index 9293db791..48afec670 100644 --- a/structTMBad_1_1global_1_1if__else.html +++ b/structTMBad_1_1global_1_1if__else.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1no__filter-members.html b/structTMBad_1_1global_1_1no__filter-members.html index da972a62c..0dd7dc1ec 100644 --- a/structTMBad_1_1global_1_1no__filter-members.html +++ b/structTMBad_1_1global_1_1no__filter-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1no__filter.html b/structTMBad_1_1global_1_1no__filter.html index cc27ddd88..8c8e58f8c 100644 --- a/structTMBad_1_1global_1_1no__filter.html +++ b/structTMBad_1_1global_1_1no__filter.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1operation__stack-members.html b/structTMBad_1_1global_1_1operation__stack-members.html index 1fd95a5c8..52fbe86b7 100644 --- a/structTMBad_1_1global_1_1operation__stack-members.html +++ b/structTMBad_1_1global_1_1operation__stack-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1operation__stack.html b/structTMBad_1_1global_1_1operation__stack.html index 56ce7d2ec..5606aaa74 100644 --- a/structTMBad_1_1global_1_1operation__stack.html +++ b/structTMBad_1_1global_1_1operation__stack.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1print__config-members.html b/structTMBad_1_1global_1_1print__config-members.html index 1fcbea4e7..aaf8365ab 100644 --- a/structTMBad_1_1global_1_1print__config-members.html +++ b/structTMBad_1_1global_1_1print__config-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1global_1_1print__config.html b/structTMBad_1_1global_1_1print__config.html index 6f09035f8..edd6aded2 100644 --- a/structTMBad_1_1global_1_1print__config.html +++ b/structTMBad_1_1global_1_1print__config.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1graph-members.html b/structTMBad_1_1graph-members.html index 535d8bee3..37ddfc75d 100644 --- a/structTMBad_1_1graph-members.html +++ b/structTMBad_1_1graph-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1graph.html b/structTMBad_1_1graph.html index d17d773c7..9ae6bafa3 100644 --- a/structTMBad_1_1graph.html +++ b/structTMBad_1_1graph.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1intervals-members.html b/structTMBad_1_1intervals-members.html index 3dfa81dcc..c7e92465b 100644 --- a/structTMBad_1_1intervals-members.html +++ b/structTMBad_1_1intervals-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1intervals.html b/structTMBad_1_1intervals.html index f05300e66..f72ca7ce4 100644 --- a/structTMBad_1_1intervals.html +++ b/structTMBad_1_1intervals.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1multivariate__index-members.html b/structTMBad_1_1multivariate__index-members.html index 65c982f67..f65fdcebc 100644 --- a/structTMBad_1_1multivariate__index-members.html +++ b/structTMBad_1_1multivariate__index-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1multivariate__index.html b/structTMBad_1_1multivariate__index.html index a0dcd47e8..d5d345cbb 100644 --- a/structTMBad_1_1multivariate__index.html +++ b/structTMBad_1_1multivariate__index.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1mvIntegral-members.html b/structTMBad_1_1mvIntegral-members.html index e937726d4..59eed8293 100644 --- a/structTMBad_1_1mvIntegral-members.html +++ b/structTMBad_1_1mvIntegral-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1mvIntegral.html b/structTMBad_1_1mvIntegral.html index 275655ccd..0df574a73 100644 --- a/structTMBad_1_1mvIntegral.html +++ b/structTMBad_1_1mvIntegral.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1old__state-members.html b/structTMBad_1_1old__state-members.html index fa51b63b8..81c784ee9 100644 --- a/structTMBad_1_1old__state-members.html +++ b/structTMBad_1_1old__state-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1old__state.html b/structTMBad_1_1old__state.html index e7350058d..bfaab6403 100644 --- a/structTMBad_1_1old__state.html +++ b/structTMBad_1_1old__state.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1omp__shared__ptr-members.html b/structTMBad_1_1omp__shared__ptr-members.html index 687c062da..244ad0758 100644 --- a/structTMBad_1_1omp__shared__ptr-members.html +++ b/structTMBad_1_1omp__shared__ptr-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1omp__shared__ptr.html b/structTMBad_1_1omp__shared__ptr.html index b3013694a..79b0f4bd2 100644 --- a/structTMBad_1_1omp__shared__ptr.html +++ b/structTMBad_1_1omp__shared__ptr.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1op__info-members.html b/structTMBad_1_1op__info-members.html index d64239db5..cf4d111e7 100644 --- a/structTMBad_1_1op__info-members.html +++ b/structTMBad_1_1op__info-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1op__info.html b/structTMBad_1_1op__info.html index 93a31aeb7..86a082540 100644 --- a/structTMBad_1_1op__info.html +++ b/structTMBad_1_1op__info.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1period-members.html b/structTMBad_1_1period-members.html index 2ae49940d..7b53c01c8 100644 --- a/structTMBad_1_1period-members.html +++ b/structTMBad_1_1period-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1period.html b/structTMBad_1_1period.html index b49ec081d..d9292d41f 100644 --- a/structTMBad_1_1period.html +++ b/structTMBad_1_1period.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1periodic-members.html b/structTMBad_1_1periodic-members.html index 4489ba737..3f8710ebc 100644 --- a/structTMBad_1_1periodic-members.html +++ b/structTMBad_1_1periodic-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1periodic.html b/structTMBad_1_1periodic.html index c59fe13fa..48c7640f1 100644 --- a/structTMBad_1_1periodic.html +++ b/structTMBad_1_1periodic.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1retaping__derivative__table-members.html b/structTMBad_1_1retaping__derivative__table-members.html index cf5e7e306..0bb8b12a4 100644 --- a/structTMBad_1_1retaping__derivative__table-members.html +++ b/structTMBad_1_1retaping__derivative__table-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1retaping__derivative__table.html b/structTMBad_1_1retaping__derivative__table.html index 8776362d5..8093784c1 100644 --- a/structTMBad_1_1retaping__derivative__table.html +++ b/structTMBad_1_1retaping__derivative__table.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1segment__ref-members.html b/structTMBad_1_1segment__ref-members.html index eef20aba5..b4c564dac 100644 --- a/structTMBad_1_1segment__ref-members.html +++ b/structTMBad_1_1segment__ref-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1segment__ref.html b/structTMBad_1_1segment__ref.html index 88ee8a6b5..64b882ee7 100644 --- a/structTMBad_1_1segment__ref.html +++ b/structTMBad_1_1segment__ref.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1sequential__reduction-members.html b/structTMBad_1_1sequential__reduction-members.html index 699fe3e90..61c8a87cc 100644 --- a/structTMBad_1_1sequential__reduction-members.html +++ b/structTMBad_1_1sequential__reduction-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1sequential__reduction.html b/structTMBad_1_1sequential__reduction.html index f4bd473cc..ecd4bbb26 100644 --- a/structTMBad_1_1sequential__reduction.html +++ b/structTMBad_1_1sequential__reduction.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1sr__grid-members.html b/structTMBad_1_1sr__grid-members.html index 9cece093c..fd5bbabd0 100644 --- a/structTMBad_1_1sr__grid-members.html +++ b/structTMBad_1_1sr__grid-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1sr__grid.html b/structTMBad_1_1sr__grid.html index 4ba6a5b9d..9f8489485 100644 --- a/structTMBad_1_1sr__grid.html +++ b/structTMBad_1_1sr__grid.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1standard__derivative__table-members.html b/structTMBad_1_1standard__derivative__table-members.html index 94e9a4cb0..ae3688f6f 100644 --- a/structTMBad_1_1standard__derivative__table-members.html +++ b/structTMBad_1_1standard__derivative__table-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structTMBad_1_1standard__derivative__table.html b/structTMBad_1_1standard__derivative__table.html index 50327cd97..ad6a0642a 100644 --- a/structTMBad_1_1standard__derivative__table.html +++ b/structTMBad_1_1standard__derivative__table.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structarray-members.html b/structarray-members.html index f57e9284c..dc6ed8d73 100644 --- a/structarray-members.html +++ b/structarray-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structarray.html b/structarray.html index 673cb22c1..4c19eb874 100644 --- a/structarray.html +++ b/structarray.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structatomic_1_1AtomicGlobal-members.html b/structatomic_1_1AtomicGlobal-members.html index b63560e87..3fa5d40a7 100644 --- a/structatomic_1_1AtomicGlobal-members.html +++ b/structatomic_1_1AtomicGlobal-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structatomic_1_1AtomicGlobal.html b/structatomic_1_1AtomicGlobal.html index 2be8b3a57..b8789d0ad 100644 --- a/structatomic_1_1AtomicGlobal.html +++ b/structatomic_1_1AtomicGlobal.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structatomic_1_1AtomicLocal-members.html b/structatomic_1_1AtomicLocal-members.html index f10ce6374..1279a9d79 100644 --- a/structatomic_1_1AtomicLocal-members.html +++ b/structatomic_1_1AtomicLocal-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structatomic_1_1AtomicLocal.html b/structatomic_1_1AtomicLocal.html index 03aa7e208..c4d21d082 100644 --- a/structatomic_1_1AtomicLocal.html +++ b/structatomic_1_1AtomicLocal.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structconfig__struct-members.html b/structconfig__struct-members.html index b8d92fc29..a4678beaf 100644 --- a/structconfig__struct-members.html +++ b/structconfig__struct-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structconfig__struct.html b/structconfig__struct.html index e6a1b5abd..812ce0f6e 100644 --- a/structconfig__struct.html +++ b/structconfig__struct.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structdata__indicator-members.html b/structdata__indicator-members.html index 915708082..b734fabc1 100644 --- a/structdata__indicator-members.html +++ b/structdata__indicator-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structdata__indicator.html b/structdata__indicator.html index 6d88315a0..a69faeec8 100644 --- a/structdata__indicator.html +++ b/structdata__indicator.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structgauss__kronrod_1_1Integral-members.html b/structgauss__kronrod_1_1Integral-members.html index 34c6f770f..babee5c49 100644 --- a/structgauss__kronrod_1_1Integral-members.html +++ b/structgauss__kronrod_1_1Integral-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structgauss__kronrod_1_1Integral.html b/structgauss__kronrod_1_1Integral.html index b832b318e..9d359cb09 100644 --- a/structgauss__kronrod_1_1Integral.html +++ b/structgauss__kronrod_1_1Integral.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structgauss__kronrod_1_1control-members.html b/structgauss__kronrod_1_1control-members.html index 330e9cd70..05633edb5 100644 --- a/structgauss__kronrod_1_1control-members.html +++ b/structgauss__kronrod_1_1control-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structgauss__kronrod_1_1control.html b/structgauss__kronrod_1_1control.html index d6e6ceb78..2620f2462 100644 --- a/structgauss__kronrod_1_1control.html +++ b/structgauss__kronrod_1_1control.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structgauss__kronrod_1_1mvIntegral-members.html b/structgauss__kronrod_1_1mvIntegral-members.html index c336472f4..12a97acea 100644 --- a/structgauss__kronrod_1_1mvIntegral-members.html +++ b/structgauss__kronrod_1_1mvIntegral-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structgauss__kronrod_1_1mvIntegral.html b/structgauss__kronrod_1_1mvIntegral.html index fe5aa358b..e8dcd50ef 100644 --- a/structgauss__kronrod_1_1mvIntegral.html +++ b/structgauss__kronrod_1_1mvIntegral.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structmatrix-members.html b/structmatrix-members.html index abe051afb..be179f644 100644 --- a/structmatrix-members.html +++ b/structmatrix-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structmatrix.html b/structmatrix.html index ffa7d84df..be35fd516 100644 --- a/structmatrix.html +++ b/structmatrix.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structnewton_1_1HessianSolveVector-members.html b/structnewton_1_1HessianSolveVector-members.html index 825de769e..274f0c719 100644 --- a/structnewton_1_1HessianSolveVector-members.html +++ b/structnewton_1_1HessianSolveVector-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structnewton_1_1HessianSolveVector.html b/structnewton_1_1HessianSolveVector.html index abab0be01..eaa069c9a 100644 --- a/structnewton_1_1HessianSolveVector.html +++ b/structnewton_1_1HessianSolveVector.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structnewton_1_1NewtonOperator-members.html b/structnewton_1_1NewtonOperator-members.html index 96992a671..d0e1e7cee 100644 --- a/structnewton_1_1NewtonOperator-members.html +++ b/structnewton_1_1NewtonOperator-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structnewton_1_1NewtonOperator.html b/structnewton_1_1NewtonOperator.html index 176ebfb69..46d66c91f 100644 --- a/structnewton_1_1NewtonOperator.html +++ b/structnewton_1_1NewtonOperator.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structnewton_1_1TagOp-members.html b/structnewton_1_1TagOp-members.html index 1b5dde256..af4ea4950 100644 --- a/structnewton_1_1TagOp-members.html +++ b/structnewton_1_1TagOp-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structnewton_1_1TagOp.html b/structnewton_1_1TagOp.html index d078d54fd..9e481b792 100644 --- a/structnewton_1_1TagOp.html +++ b/structnewton_1_1TagOp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structnewton_1_1jacobian__dense__t-members.html b/structnewton_1_1jacobian__dense__t-members.html index 8d90c78a5..35847fd5c 100644 --- a/structnewton_1_1jacobian__dense__t-members.html +++ b/structnewton_1_1jacobian__dense__t-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structnewton_1_1jacobian__dense__t.html b/structnewton_1_1jacobian__dense__t.html index 46326eea9..eb7b5e690 100644 --- a/structnewton_1_1jacobian__dense__t.html +++ b/structnewton_1_1jacobian__dense__t.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structnewton_1_1jacobian__sparse__plus__lowrank__t-members.html b/structnewton_1_1jacobian__sparse__plus__lowrank__t-members.html index 463ac3075..74d9f8eff 100644 --- a/structnewton_1_1jacobian__sparse__plus__lowrank__t-members.html +++ b/structnewton_1_1jacobian__sparse__plus__lowrank__t-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structnewton_1_1jacobian__sparse__plus__lowrank__t.html b/structnewton_1_1jacobian__sparse__plus__lowrank__t.html index a8735d2bf..639111553 100644 --- a/structnewton_1_1jacobian__sparse__plus__lowrank__t.html +++ b/structnewton_1_1jacobian__sparse__plus__lowrank__t.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structnewton_1_1jacobian__sparse__t-members.html b/structnewton_1_1jacobian__sparse__t-members.html index 08d8dfd06..ba6d41417 100644 --- a/structnewton_1_1jacobian__sparse__t-members.html +++ b/structnewton_1_1jacobian__sparse__t-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structnewton_1_1jacobian__sparse__t.html b/structnewton_1_1jacobian__sparse__t.html index 4ff829295..38d7ab90d 100644 --- a/structnewton_1_1jacobian__sparse__t.html +++ b/structnewton_1_1jacobian__sparse__t.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structnewton_1_1newton__config-members.html b/structnewton_1_1newton__config-members.html index f7370a877..0071f189c 100644 --- a/structnewton_1_1newton__config-members.html +++ b/structnewton_1_1newton__config-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structnewton_1_1newton__config.html b/structnewton_1_1newton__config.html index 84b1ca057..fe4f64d00 100644 --- a/structnewton_1_1newton__config.html +++ b/structnewton_1_1newton__config.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structnewton_1_1safe__eval-members.html b/structnewton_1_1safe__eval-members.html index e74f5c74e..292f6cef3 100644 --- a/structnewton_1_1safe__eval-members.html +++ b/structnewton_1_1safe__eval-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structnewton_1_1safe__eval.html b/structnewton_1_1safe__eval.html index 698d79075..bf0b418c5 100644 --- a/structnewton_1_1safe__eval.html +++ b/structnewton_1_1safe__eval.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structparallel__accumulator-members.html b/structparallel__accumulator-members.html index d1146d099..2cb89abad 100644 --- a/structparallel__accumulator-members.html +++ b/structparallel__accumulator-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structparallel__accumulator.html b/structparallel__accumulator.html index 219a6916c..54ae7dc55 100644 --- a/structparallel__accumulator.html +++ b/structparallel__accumulator.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structradix_1_1radix-members.html b/structradix_1_1radix-members.html index d1884e254..0878fc704 100644 --- a/structradix_1_1radix-members.html +++ b/structradix_1_1radix-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structradix_1_1radix.html b/structradix_1_1radix.html index 297ffae81..15b2dba17 100644 --- a/structradix_1_1radix.html +++ b/structradix_1_1radix.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structsparse__matrix__exponential_1_1config-members.html b/structsparse__matrix__exponential_1_1config-members.html index a4ccf81dc..f1fec810a 100644 --- a/structsparse__matrix__exponential_1_1config-members.html +++ b/structsparse__matrix__exponential_1_1config-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structsparse__matrix__exponential_1_1config.html b/structsparse__matrix__exponential_1_1config.html index ed066beb2..dcecd31d9 100644 --- a/structsparse__matrix__exponential_1_1config.html +++ b/structsparse__matrix__exponential_1_1config.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structsparse__matrix__exponential_1_1expm__generator-members.html b/structsparse__matrix__exponential_1_1expm__generator-members.html index 79db7017d..fe9bed5f4 100644 --- a/structsparse__matrix__exponential_1_1expm__generator-members.html +++ b/structsparse__matrix__exponential_1_1expm__generator-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structsparse__matrix__exponential_1_1expm__generator.html b/structsparse__matrix__exponential_1_1expm__generator.html index 7c0d2676b..215882098 100644 --- a/structsparse__matrix__exponential_1_1expm__generator.html +++ b/structsparse__matrix__exponential_1_1expm__generator.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structsparse__matrix__exponential_1_1expm__series-members.html b/structsparse__matrix__exponential_1_1expm__series-members.html index 805c6f1eb..eb2b4a1c0 100644 --- a/structsparse__matrix__exponential_1_1expm__series-members.html +++ b/structsparse__matrix__exponential_1_1expm__series-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structsparse__matrix__exponential_1_1expm__series.html b/structsparse__matrix__exponential_1_1expm__series.html index 9ce79f614..0186f837a 100644 --- a/structsparse__matrix__exponential_1_1expm__series.html +++ b/structsparse__matrix__exponential_1_1expm__series.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structtmbutils_1_1Concat-members.html b/structtmbutils_1_1Concat-members.html index 077255c3e..ebb05f038 100644 --- a/structtmbutils_1_1Concat-members.html +++ b/structtmbutils_1_1Concat-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structtmbutils_1_1Concat.html b/structtmbutils_1_1Concat.html index 8d49e4e1e..a9b460c68 100644 --- a/structtmbutils_1_1Concat.html +++ b/structtmbutils_1_1Concat.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structtmbutils_1_1array-members.html b/structtmbutils_1_1array-members.html index 60fa18052..2d2b8f694 100644 --- a/structtmbutils_1_1array-members.html +++ b/structtmbutils_1_1array-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structtmbutils_1_1array.html b/structtmbutils_1_1array.html index c0bbb392b..42f9fc9c3 100644 --- a/structtmbutils_1_1array.html +++ b/structtmbutils_1_1array.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structtmbutils_1_1interpol2D-members.html b/structtmbutils_1_1interpol2D-members.html index ff36f0817..fcb3a3188 100644 --- a/structtmbutils_1_1interpol2D-members.html +++ b/structtmbutils_1_1interpol2D-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structtmbutils_1_1interpol2D.html b/structtmbutils_1_1interpol2D.html index 527127359..626cfe4aa 100644 --- a/structtmbutils_1_1interpol2D.html +++ b/structtmbutils_1_1interpol2D.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structtmbutils_1_1interpol2D__config-members.html b/structtmbutils_1_1interpol2D__config-members.html index 4b0b98012..fb89dbb4e 100644 --- a/structtmbutils_1_1interpol2D__config-members.html +++ b/structtmbutils_1_1interpol2D__config-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structtmbutils_1_1interpol2D__config.html b/structtmbutils_1_1interpol2D__config.html index 53aeaeb09..f8e4276c6 100644 --- a/structtmbutils_1_1interpol2D__config.html +++ b/structtmbutils_1_1interpol2D__config.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structtmbutils_1_1matexp-members.html b/structtmbutils_1_1matexp-members.html index b869191b4..381832392 100644 --- a/structtmbutils_1_1matexp-members.html +++ b/structtmbutils_1_1matexp-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structtmbutils_1_1matexp.html b/structtmbutils_1_1matexp.html index 831c48213..1e75ef671 100644 --- a/structtmbutils_1_1matexp.html +++ b/structtmbutils_1_1matexp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structtmbutils_1_1matexp_3_01scalartype_00_012_01_4-members.html b/structtmbutils_1_1matexp_3_01scalartype_00_012_01_4-members.html index 895ff1295..fd83d786e 100644 --- a/structtmbutils_1_1matexp_3_01scalartype_00_012_01_4-members.html +++ b/structtmbutils_1_1matexp_3_01scalartype_00_012_01_4-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structtmbutils_1_1matexp_3_01scalartype_00_012_01_4.html b/structtmbutils_1_1matexp_3_01scalartype_00_012_01_4.html index bae11fb84..b4832c26a 100644 --- a/structtmbutils_1_1matexp_3_01scalartype_00_012_01_4.html +++ b/structtmbutils_1_1matexp_3_01scalartype_00_012_01_4.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structtmbutils_1_1matrix-members.html b/structtmbutils_1_1matrix-members.html index bfada254d..137bc21e0 100644 --- a/structtmbutils_1_1matrix-members.html +++ b/structtmbutils_1_1matrix-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structtmbutils_1_1matrix.html b/structtmbutils_1_1matrix.html index b6ebd186c..c112f3dec 100644 --- a/structtmbutils_1_1matrix.html +++ b/structtmbutils_1_1matrix.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structtmbutils_1_1vector-members.html b/structtmbutils_1_1vector-members.html index 766af4bbd..1125701d2 100644 --- a/structtmbutils_1_1vector-members.html +++ b/structtmbutils_1_1vector-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structtmbutils_1_1vector.html b/structtmbutils_1_1vector.html index c10298a5d..71ccf5faa 100644 --- a/structtmbutils_1_1vector.html +++ b/structtmbutils_1_1vector.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structvector-members.html b/structvector-members.html index 3fdfdc276..daf825b56 100644 --- a/structvector-members.html +++ b/structvector-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structvector.html b/structvector.html index 04dd83237..934dc8e31 100644 --- a/structvector.html +++ b/structvector.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structvectorize_1_1vector-members.html b/structvectorize_1_1vector-members.html index ec263dac2..ccf109c0e 100644 --- a/structvectorize_1_1vector-members.html +++ b/structvectorize_1_1vector-members.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/structvectorize_1_1vector.html b/structvectorize_1_1vector.html index 5de92f5ca..7b1e97bf7 100644 --- a/structvectorize_1_1vector.html +++ b/structvectorize_1_1vector.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/supernodal__inverse__subset_8hpp_source.html b/supernodal__inverse__subset_8hpp_source.html index c9176074c..590c72569 100644 --- a/supernodal__inverse__subset_8hpp_source.html +++ b/supernodal__inverse__subset_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/thetalog_8cpp-example.html b/thetalog_8cpp-example.html index 1691cb955..b8f199deb 100644 --- a/thetalog_8cpp-example.html +++ b/thetalog_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/tiny__ad_2integrate_2integrate_8hpp_source.html b/tiny__ad_2integrate_2integrate_8hpp_source.html index 41fb0e528..1f17d0525 100644 --- a/tiny__ad_2integrate_2integrate_8hpp_source.html +++ b/tiny__ad_2integrate_2integrate_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/tiny__ad_8hpp_source.html b/tiny__ad_8hpp_source.html index 05a3849b3..f0402030b 100644 --- a/tiny__ad_8hpp_source.html +++ b/tiny__ad_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/tiny__valarray_8hpp_source.html b/tiny__valarray_8hpp_source.html index be4e2263d..a702fd421 100644 --- a/tiny__valarray_8hpp_source.html +++ b/tiny__valarray_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/tiny__vec_8hpp_source.html b/tiny__vec_8hpp_source.html index 548a452a3..eb8670cfc 100644 --- a/tiny__vec_8hpp_source.html +++ b/tiny__vec_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/tmb__core_8hpp.html b/tmb__core_8hpp.html index de9459aeb..86d3a970e 100644 --- a/tmb__core_8hpp.html +++ b/tmb__core_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/tmb__core_8hpp_source.html b/tmb__core_8hpp_source.html index b740347ef..7ea1c30bb 100644 --- a/tmb__core_8hpp_source.html +++ b/tmb__core_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
@@ -73,7 +73,7 @@
tmb_core.hpp

-Go to the documentation of this file.
1 // Copyright (C) 2013-2015 Kasper Kristensen
2 // License: GPL-2
3 
8 /*
9  Call to external C++ code can potentially result in exeptions that
10  will crash R. However, we do not want R to crash on failed memory
11  allocations. Therefore:
12 
13  * All interface functions (those called with .Call from R) must have
14  TMB_TRY wrapped around CppAD/Eigen code that allocates memory.
15 
16  * Special attention must be payed to parallel code, as each thread
17  is responsible for catching its own exceptions.
18 */
19 
20 #ifndef TMB_TRY
21 #define TMB_TRY try
22 #endif
23 // By default we only accept 'bad_alloc' as a valid exception. Everything else => debugger !
24 // Behaviour can be changed by re-defining this macro.
25 #ifndef TMB_CATCH
26 #define TMB_CATCH catch(std::bad_alloc& excpt)
27 #endif
28 // Inside the TMB_CATCH comes 'cleanup code' followed by this error
29 // call (allowed to depend on the exception 'excpt')
30 // Error message can be changed by re-defining this macro.
31 #ifndef TMB_ERROR_BAD_ALLOC
32 #define TMB_ERROR_BAD_ALLOC \
33 Rf_error("Caught exception '%s' in function '%s'\n", \
34  excpt.what(), \
35  __FUNCTION__)
36 #endif
37 // Error call comes outside TMB_CATCH in OpenMP case (so *cannot*
38 // depend on exception e.g. 'excpt')
39 // Error message can be changed by re-defining this macro.
40 #ifndef TMB_ERROR_BAD_THREAD_ALLOC
41 #define TMB_ERROR_BAD_THREAD_ALLOC \
42 Rf_error("Caught exception '%s' in function '%s'\n", \
43  bad_thread_alloc, \
44  __FUNCTION__)
45 #endif
46 
47 /* Memory manager:
48  Count the number of external pointers alive.
49  When total number is zero it is safe to dyn.unload
50  the library.
51 */
52 #include <set>
53 extern "C" void finalizeDoubleFun(SEXP x);
54 extern "C" void finalizeADFun(SEXP x);
55 extern "C" void finalizeparallelADFun(SEXP x);
56 extern "C" SEXP FreeADFunObject(SEXP f) CSKIP ({
57  SEXP tag = R_ExternalPtrTag(f);
58  if (tag == Rf_install("DoubleFun")) {
59  finalizeDoubleFun(f);
60  }
61  else if (tag == Rf_install("ADFun")) {
62  finalizeADFun(f);
63  }
64  else if (tag == Rf_install("parallelADFun")) {
65  finalizeparallelADFun(f);
66  }
67  else {
68  Rf_error("Unknown external ptr type");
69  }
70  R_ClearExternalPtr(f); // Set pointer to 'nil'
71  return R_NilValue;
72 })
74 struct memory_manager_struct {
75  int counter;
77  std::set<SEXP> alive;
79  void RegisterCFinalizer(SEXP list);
81  void CallCFinalizer(SEXP x);
83  void clear();
84  memory_manager_struct();
85 };
86 #ifndef WITH_LIBTMB
87 void memory_manager_struct::RegisterCFinalizer(SEXP x) {
88  counter++;
89  alive.insert(x);
90 }
91 void memory_manager_struct::CallCFinalizer(SEXP x){
92  counter--;
93  alive.erase(x);
94 }
95 void memory_manager_struct::clear(){
96  std::set<SEXP>::iterator it;
97  while (alive.size() > 0) {
98  FreeADFunObject(*alive.begin());
99  }
100 }
101 memory_manager_struct::memory_manager_struct(){
102  counter=0;
103 }
104 #endif
105 TMB_EXTERN memory_manager_struct memory_manager;
106 
117 #ifdef WITH_LIBTMB
118 SEXP ptrList(SEXP x);
119 #else
120 SEXP ptrList(SEXP x)
121 {
122  SEXP ans,names;
123  PROTECT(ans=Rf_allocVector(VECSXP,1));
124  PROTECT(names=Rf_allocVector(STRSXP,1));
125  SET_VECTOR_ELT(ans,0,x);
126  SET_STRING_ELT(names,0,Rf_mkChar("ptr"));
127  Rf_setAttrib(ans,R_NamesSymbol,names);
128  memory_manager.RegisterCFinalizer(x);
129  UNPROTECT(2);
130  return ans;
131 }
132 #endif
133 
134 extern "C"{
135 #ifdef LIB_UNLOAD
136 #include <R_ext/Rdynload.h>
137  void LIB_UNLOAD(DllInfo *dll)
138  {
139  if(memory_manager.counter>0)Rprintf("Warning: %d external pointers will be removed\n",memory_manager.counter);
140  memory_manager.clear();
141  for(int i=0;i<1000;i++){ // 122 seems to be sufficient.
142  if(memory_manager.counter>0){
143  R_gc();
144  R_RunExitFinalizers();
145  } else break;
146  }
147  if(memory_manager.counter>0)Rf_error("Failed to clean. Please manually clean up before unloading\n");
148  }
149 #endif
150 }
151 
152 #ifdef _OPENMP
153 TMB_EXTERN bool _openmp CSKIP( =true; )
154 #else
155 TMB_EXTERN bool _openmp CSKIP( =false; )
156 #endif
157 
159 template<class ADFunPointer>
160 void optimizeTape(ADFunPointer pf){
161  if(!config.optimize.instantly){
162  /* Drop out */
163  return;
164  }
165  if (!config.optimize.parallel){
166 #ifdef _OPENMP
167 #pragma omp critical
168 #endif
169  { /* Avoid multiple tape optimizations at the same time (to reduce memory) */
170  if(config.trace.optimize)std::cout << "Optimizing tape... ";
171  pf->optimize();
172  if(config.trace.optimize)std::cout << "Done\n";
173  }
174  }
175  else
176  { /* Allow multiple tape optimizations at the same time */
177  if(config.trace.optimize)std::cout << "Optimizing tape... ";
178  pf->optimize();
179  if(config.trace.optimize)std::cout << "Done\n";
180  }
181 }
182 
183 /* Macros to obtain data and parameters from R */
184 
208 #define TMB_OBJECTIVE_PTR \
209 this
210 
213 #define PARAMETER_MATRIX(name) \
214 tmbutils::matrix<Type> name(TMB_OBJECTIVE_PTR -> fillShape( \
215 asMatrix<Type> ( TMB_OBJECTIVE_PTR -> getShape( #name, &Rf_isMatrix) ), \
216 #name) );
217 
220 #define PARAMETER_VECTOR(name) \
221 vector<Type> name(TMB_OBJECTIVE_PTR -> fillShape( \
222 asVector<Type>(TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isNumeric)), \
223 #name));
224 
227 #define PARAMETER(name) \
228 Type name(TMB_OBJECTIVE_PTR -> fillShape( \
229 asVector<Type>(TMB_OBJECTIVE_PTR -> getShape(#name,&isNumericScalar)), \
230 #name)[0]);
231 
236 #define DATA_VECTOR(name) \
237 vector<Type> name; \
238 if (!Rf_isNull(getListElement(TMB_OBJECTIVE_PTR -> parameters,#name))){ \
239  name = TMB_OBJECTIVE_PTR -> fillShape(asVector<Type>( \
240  TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isNumeric)), #name); \
241 } else { \
242  name = asVector<Type>(getListElement( \
243  TMB_OBJECTIVE_PTR -> data,#name,&Rf_isNumeric)); \
244 }
245 
248 #define DATA_MATRIX(name) \
249 matrix<Type> name(asMatrix<Type>( \
250 getListElement(TMB_OBJECTIVE_PTR -> data, #name, &Rf_isMatrix)));
251 
254 #define DATA_SCALAR(name) \
255 Type name(asVector<Type>(getListElement(TMB_OBJECTIVE_PTR -> data, \
256 #name,&isNumericScalar))[0]);
257 
260 #define DATA_INTEGER(name) int name(CppAD::Integer(asVector<Type>( \
261 getListElement(TMB_OBJECTIVE_PTR -> data, \
262 #name, &isNumericScalar))[0]));
263 
281 #define DATA_FACTOR(name) vector<int> name(asVector<int>( \
282 getListElement(TMB_OBJECTIVE_PTR -> data, #name, &Rf_isNumeric)));
283 
287 #define DATA_IVECTOR(name) vector<int> name(asVector<int>( \
288 getListElement(TMB_OBJECTIVE_PTR -> data, #name, &Rf_isNumeric)));
289 
292 #define NLEVELS(name) \
293 LENGTH(Rf_getAttrib(getListElement(TMB_OBJECTIVE_PTR -> data, #name), \
294 Rf_install("levels")))
295 
299 #define DATA_SPARSE_MATRIX(name) \
300 Eigen::SparseMatrix<Type> name(tmbutils::asSparseMatrix<Type>( \
301 getListElement(TMB_OBJECTIVE_PTR -> data, \
302 #name, &isValidSparseMatrix)));
303 
304 // NOTE: REPORT() constructs new SEXP so never report in parallel!
313 #define REPORT(name) \
314 if( isDouble<Type>::value && \
315  TMB_OBJECTIVE_PTR -> current_parallel_region<0 ) \
316 { \
317  SEXP _TMB_temporary_sexp_; \
318  PROTECT( _TMB_temporary_sexp_ = asSEXP(name) ); \
319  Rf_defineVar(Rf_install(#name), \
320  _TMB_temporary_sexp_, TMB_OBJECTIVE_PTR -> report); \
321  UNPROTECT(1); \
322 }
323 
329 #define SIMULATE \
330 if(isDouble<Type>::value && TMB_OBJECTIVE_PTR -> do_simulate)
331 
342 #define ADREPORT(name) \
343 TMB_OBJECTIVE_PTR -> reportvector.push(name, #name);
344 
345 #define PARALLEL_REGION \
346 if( TMB_OBJECTIVE_PTR -> parallel_region() )
347 
352 #define DATA_ARRAY(name) \
353 tmbutils::array<Type> name; \
354 if (!Rf_isNull(getListElement(TMB_OBJECTIVE_PTR -> parameters,#name))){ \
355  name = TMB_OBJECTIVE_PTR -> fillShape(tmbutils::asArray<Type>( \
356  TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isArray)), #name); \
357 } else { \
358  name = tmbutils::asArray<Type>(getListElement( \
359  TMB_OBJECTIVE_PTR -> data, #name, &Rf_isArray)); \
360 }
361 
364 #define PARAMETER_ARRAY(name) \
365 tmbutils::array<Type> name(TMB_OBJECTIVE_PTR -> fillShape( \
366 tmbutils::asArray<Type>(TMB_OBJECTIVE_PTR -> getShape( \
367 #name, &Rf_isArray)), #name));
368 
371 #define DATA_IMATRIX(name) \
372 matrix<int> name(asMatrix<int>( \
373 getListElement(TMB_OBJECTIVE_PTR -> data,#name, &Rf_isMatrix)));
374 
377 #define DATA_IARRAY(name) \
378 tmbutils::array<int> name(tmbutils::asArray<int>( \
379 getListElement(TMB_OBJECTIVE_PTR -> data, #name, &Rf_isArray)));
380 
394 #define DATA_STRING(name) \
395 std::string name = \
396  CHAR(STRING_ELT(getListElement(TMB_OBJECTIVE_PTR -> data, #name), 0));
397 
433 #define DATA_STRUCT(name, struct) \
434 struct<Type> name(getListElement(TMB_OBJECTIVE_PTR -> data, #name));
435 
440 template<class VT, class Type = typename VT::Scalar>
441 struct data_indicator : VT{
449  bool osa_flag;
451  data_indicator() { osa_flag = false; }
456  data_indicator(VT obs, bool init_one = false){
457  VT::operator=(obs);
458  if (init_one) VT::fill(Type(1.0));
459  cdf_lower = obs; cdf_lower.setZero();
460  cdf_upper = obs; cdf_upper.setZero();
461  osa_flag = false;
462  }
464  void fill(vector<Type> p, SEXP ord_){
465  int n = (*this).size();
466  if(p.size() >= n ) VT::operator=(p.segment(0, n));
467  if(p.size() >= 2*n) cdf_lower = p.segment(n, n);
468  if(p.size() >= 3*n) cdf_upper = p.segment(2 * n, n);
469  if(!Rf_isNull(ord_)) {
470  this->ord = asVector<int>(ord_);
471  }
472  for (int i=0; i<p.size(); i++) {
473  osa_flag |= CppAD::Variable(p[i]);
474  }
475  }
478  data_indicator segment(int pos, int n) {
479  data_indicator ans ( VT::segment(pos, n) );
480  ans.cdf_lower = cdf_lower.segment(pos, n);
481  ans.cdf_upper = cdf_upper.segment(pos, n);
482  if (ord.size() != 0) {
483  ans.ord = ord.segment(pos, n);
484  }
485  ans.osa_flag = osa_flag;
486  return ans;
487  }
490  int n = this->size();
491  vector<int> ans(n);
492  if (ord.size() == 0) {
493  for (int i=0; i<n; i++)
494  ans(i) = i;
495  } else {
496  if (ord.size() != n) Rf_error("Unexpected 'ord.size() != n'");
497  std::vector<std::pair<int, int> > y(n);
498  for (int i=0; i<n; i++) {
499  y[i].first = ord[i];
500  y[i].second = i;
501  }
502  std::sort(y.begin(), y.end()); // sort inplace
503  for (int i=0; i<n; i++) {
504  ans[i] = y[i].second;
505  }
506  }
507  return ans;
508  }
510  bool osa_active() { return osa_flag; }
511 };
512 
518 #define DATA_ARRAY_INDICATOR(name, obs) \
519 data_indicator<tmbutils::array<Type> > name(obs, true); \
520 if (!Rf_isNull(getListElement(TMB_OBJECTIVE_PTR -> parameters,#name))){ \
521  name.fill( TMB_OBJECTIVE_PTR -> fillShape(asVector<Type>( \
522  TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isNumeric)), \
523  #name), \
524  Rf_getAttrib( \
525  TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isNumeric), \
526  Rf_install("ord")) ); \
527 }
528 
534 #define DATA_VECTOR_INDICATOR(name, obs) \
535 data_indicator<tmbutils::vector<Type> > name(obs, true); \
536 if (!Rf_isNull(getListElement(TMB_OBJECTIVE_PTR -> parameters,#name))){ \
537  name.fill( TMB_OBJECTIVE_PTR -> fillShape(asVector<Type>( \
538  TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isNumeric)), \
539  #name), \
540  Rf_getAttrib( \
541  TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isNumeric), \
542  Rf_install("ord")) ); \
543 }
544 
545 // kasper: Not sure used anywhere
549 template<class Type>
550 matrix<int> HessianSparsityPattern(ADFun<Type> *pf){
551  int n=pf->Domain();
552  vector<bool> Px(n * n);
553  for(int i = 0; i < n; i++)
554  {
555  for(int j = 0; j < n; j++)
556  Px[ i * n + j ] = false;
557  Px[ i * n + i ] = true;
558  }
559  pf->ForSparseJac(n, Px);
560  vector<bool> Py(1); Py[0]=true;
561  vector<int> tmp = (pf->RevSparseHes(n,Py)).template cast<int>();
562  return asMatrix(tmp, n, n);
563 }
564 
566 void Independent(vector<double> x)CSKIP({})
567 
569 template <class Type>
570 struct report_stack{
571  std::vector<const char*> names;
572  std::vector<vector<int> > namedim;
573  std::vector<Type> result;
574  void clear(){
575  names.resize(0);
576  namedim.resize(0);
577  result.resize(0);
578  }
579  // Get dimension of various object types
580  vector<int> getDim(const matrix<Type> &x) {
581  vector<int> dim(2);
582  dim << x.rows(), x.cols();
583  return dim;
584  }
585  vector<int> getDim(const tmbutils::array<Type> &x) {
586  return x.dim;
587  }
588  template<class Other> // i.e. vector or expression
589  vector<int> getDim(const Other &x) {
590  vector<int> dim(1);
591  dim << x.size();
592  return dim;
593  }
594  // push vector, matrix or array
595  template<class Vector_Matrix_Or_Array>
596  void push(Vector_Matrix_Or_Array x, const char* name) {
597  names.push_back(name);
598  namedim.push_back(getDim(x));
599  Eigen::Array<Type, Eigen::Dynamic, Eigen::Dynamic> xa(x);
600  result.insert(result.end(), xa.data(), xa.data() + x.size());
601  }
602  // push scalar (convert to vector case)
603  void push(Type x, const char* name){
604  vector<Type> xvec(1);
605  xvec[0] = x;
606  push(xvec, name);
607  }
608  // Eval: cast to vector<Type>
609  vector<Type> operator()() {
610  return result;
611  }
612  /* Get names (with replicates) to R */
613  SEXP reportnames()
614  {
615  int n = result.size();
616  SEXP nam;
617  PROTECT( nam = Rf_allocVector(STRSXP, n) );
618  int k = 0;
619  for(size_t i = 0; i < names.size(); i++) {
620  int namelength = namedim[i].prod();
621  for(int j = 0; j < namelength; j++) {
622  SET_STRING_ELT(nam, k, Rf_mkChar(names[i]) );
623  k++;
624  }
625  }
626  UNPROTECT(1);
627  return nam;
628  }
629  /* Get AD reported object dims */
630  SEXP reportdims() {
631  SEXP ans, nam;
632  typedef vector<vector<int> > VVI;
633  PROTECT( ans = asSEXP(VVI(namedim)) );
634  PROTECT( nam = Rf_allocVector(STRSXP, names.size()) );
635  for(size_t i = 0; i < names.size(); i++) {
636  SET_STRING_ELT(nam, i, Rf_mkChar(names[i]));
637  }
638  Rf_setAttrib(ans, R_NamesSymbol, nam);
639  UNPROTECT(2);
640  return ans;
641  }
642  EIGEN_DEFAULT_DENSE_INDEX_TYPE size(){return result.size();}
643 }; // report_stack
644 
645 extern "C" {
646  void GetRNGstate(void);
647  void PutRNGstate(void);
648 }
649 
651 template <class Type>
652 class objective_function
653 {
654 // private:
655 public:
656  SEXP data;
657  SEXP parameters;
658  SEXP report;
659 
660  int index;
661  vector<Type> theta;
662  vector<const char*> thetanames;
663  report_stack<Type> reportvector;
664  bool reversefill; // used to find the parameter order in user template (not anymore - use pushParname instead)
665  vector<const char*> parnames;
668  void pushParname(const char* x){
669  parnames.conservativeResize(parnames.size()+1);
670  parnames[parnames.size()-1]=x;
671  }
672 
673  /* ================== For parallel Hessian computation
674  Need three different parallel evaluation modes:
675  (1) *Parallel mode* where a parallel region is evaluated iff
676  current_parallel_region == selected_parallel_region
677  (2) *Serial mode* where all parallel region tests are evaluated
678  to TRUE so that "PARALLEL_REGION" tests are effectively removed.
679  A negative value of "current_parallel_region" or "selected_parallel_region"
680  is used to select this mode (the default).
681  (3) *Count region mode* where statements inside "PARALLEL_REGION{...}"
682  are *ignored* and "current_parallel_region" is increased by one each
683  time a parallel region is visited.
684  NOTE: The macro "PARALLEL_REGION" is supposed to be defined as
685  #define PARALLEL_REGION if(this->parallel_region())
686  where the function "parallel_region" does the book keeping.
687  */
688  bool parallel_ignore_statements;
689  int current_parallel_region; /* Identifier of a code-fragment of user template */
690  int selected_parallel_region; /* Consider _this_ code-fragment */
691  int max_parallel_regions; /* Max number of parallel region identifiers,
692  e.g. max_parallel_regions=config.nthreads;
693  probably best in most cases. */
694  bool parallel_region(){ /* Is this the selected parallel region ? */
695  bool ans;
696  if(config.autopar || current_parallel_region<0 || selected_parallel_region<0)return true; /* Serial mode */
697  ans = (selected_parallel_region==current_parallel_region) && (!parallel_ignore_statements);
698  current_parallel_region++;
699  if(max_parallel_regions>0)current_parallel_region=current_parallel_region % max_parallel_regions;
700  return ans;
701  }
702  /* Note: Some other functions rely on "count_parallel_regions" to run through the users code (!) */
703  int count_parallel_regions(){
704  current_parallel_region=0; /* reset counter */
705  selected_parallel_region=0;
706  parallel_ignore_statements=true; /* Do not evaluate stuff inside PARALLEL_REGION{...} */
707  this->operator()(); /* Run through users code */
708  if (config.autopar) return 0;
709  if(max_parallel_regions>0)return max_parallel_regions;
710  else
711  return current_parallel_region;
712  }
713  void set_parallel_region(int i){ /* Select parallel region (from within openmp loop) */
714  current_parallel_region=0;
715  selected_parallel_region=i;
716  parallel_ignore_statements=false;
717  }
718 
719  bool do_simulate;
720  void set_simulate(bool do_simulate_) {
721  do_simulate = do_simulate_;
722  }
723 
724  /* data_ and parameters_ are R-lists containing R-vectors or R-matrices.
725  report_ is an R-environment.
726  The elements of the vector "unlist(parameters_)" are filled into "theta"
727  which contains the default parameter-values. This happens during the
728  *construction* of the objective_function object.
729  The user defined template "objective_function::operator()" is called
730  from "MakeADFunObject" which tapes the operations and creates the final
731  ADFun-object.
732  */
734  objective_function(SEXP data, SEXP parameters, SEXP report) :
735  data(data), parameters(parameters), report(report), index(0)
736  {
737  /* Fill theta with the default parameters.
738  Pass R-matrices column major. */
739  theta.resize(nparms(parameters));
740  int length_parlist = Rf_length(parameters);
741  for(int i = 0, counter = 0; i < length_parlist; i++) {
742  // x = parameters[[i]]
743  SEXP x = VECTOR_ELT(parameters, i);
744  int nx = Rf_length(x);
745  double* px = REAL(x);
746  for(int j = 0; j < nx; j++) {
747  theta[counter++] = Type( px[j] );
748  }
749  }
750  thetanames.resize(theta.size());
751  for(int i=0;i<thetanames.size();i++)thetanames[i]="";
752  current_parallel_region=-1;
753  selected_parallel_region=-1;
754  max_parallel_regions=-1;
755 #ifdef _OPENMP
756  max_parallel_regions = config.nthreads;
757 #endif
758  reversefill=false;
759  do_simulate = false;
760  GetRNGstate(); /* Read random seed from R. Note: by default we do
761  not write the seed back to R *after*
762  simulation. This ensures that multiple tapes for
763  one model object get the same seed. When in
764  simulation mode (enabled when calling
765  obj$simulate() from R) we *do* write the seed
766  back after simulation in order to get varying
767  replicates. */
768  }
769 
771  void sync_data() {
772  SEXP env = ENCLOS(this->report);
773  this->data = Rf_findVar(Rf_install("data"), env);
774  }
775 
777  SEXP defaultpar()
778  {
779  int n=theta.size();
780  SEXP res;
781  SEXP nam;
782  PROTECT(res=Rf_allocVector(REALSXP,n));
783  PROTECT(nam=Rf_allocVector(STRSXP,n));
784  for(int i=0;i<n;i++){
785  //REAL(res)[i]=CppAD::Value(theta[i]);
786  REAL(res)[i]=value(theta[i]);
787  SET_STRING_ELT(nam,i,Rf_mkChar(thetanames[i]));
788  }
789  Rf_setAttrib(res,R_NamesSymbol,nam);
790  UNPROTECT(2);
791  return res;
792  }
793 
795  SEXP parNames()
796  {
797  int n=parnames.size();
798  SEXP nam;
799  PROTECT(nam=Rf_allocVector(STRSXP,n));
800  for(int i=0;i<n;i++){
801  SET_STRING_ELT(nam,i,Rf_mkChar(parnames[i]));
802  }
803  UNPROTECT(1);
804  return nam;
805  }
806 
807  /* FIXME: "Value" should be "var2par" I guess
808  kasper: Why not use asDouble defined previously? */
818  double value(double x){return x;}
819  double value(AD<double> x){return CppAD::Value(x);}
820  double value(AD<AD<double> > x){return CppAD::Value(CppAD::Value(x));}
821  double value(AD<AD<AD<double> > > x){return CppAD::Value(CppAD::Value(CppAD::Value(x)));}
822 #ifdef TMBAD_FRAMEWORK
823  double value(TMBad::ad_aug x){return x.Value();}
824 #endif
825 
828  int nparms(SEXP obj)
829  {
830  int count=0;
831  for(int i=0;i<Rf_length(obj);i++){
832  if(!Rf_isReal(VECTOR_ELT(obj,i)))Rf_error("PARAMETER COMPONENT NOT A VECTOR!");
833  count+=Rf_length(VECTOR_ELT(obj,i));
834  }
835  return count;
836  }
837 
838  /* The "fill functions" are all used to populate parameter vectors,
839  arrays, matrices etc with the values of the parameter vector theta. */
840  void fill(vector<Type> &x, const char *nam)
841  {
842  pushParname(nam);
843  for(int i=0;i<x.size();i++){
844  thetanames[index]=nam;
845  if(reversefill)theta[index++]=x[i];else x[i]=theta[index++];
846  }
847  }
848  void fill(matrix<Type> &x, const char *nam)
849  {
850  pushParname(nam);
851  for(int j=0;j<x.cols();j++){
852  for(int i=0;i<x.rows();i++){
853  thetanames[index]=nam;
854  if(reversefill)theta[index++]=x(i,j);else x(i,j)=theta[index++];
855  }
856  }
857  }
858  template<class ArrayType>
859  void fill(ArrayType &x, const char *nam)
860  {
861  pushParname(nam);
862  for(int i=0;i<x.size();i++){
863  thetanames[index]=nam;
864  if(reversefill)theta[index++]=x[i];else x[i]=theta[index++];
865  }
866  }
867 
868  /* Experiment: new map feature - currently arrays only */
869  template<class ArrayType>
870  void fillmap(ArrayType &x, const char *nam)
871  {
872  pushParname(nam);
873  SEXP elm=getListElement(parameters,nam);
874  int* map=INTEGER(Rf_getAttrib(elm,Rf_install("map")));
875  int nlevels=INTEGER(Rf_getAttrib(elm,Rf_install("nlevels")))[0];
876  for(int i=0;i<x.size();i++){
877  if(map[i]>=0){
878  thetanames[index+map[i]]=nam;
879  if(reversefill)theta[index+map[i]]=x(i);else x(i)=theta[index+map[i]];
880  }
881  }
882  index+=nlevels;
883  }
884  // Auto detect whether we are in "map-mode"
885  SEXP getShape(const char *nam, RObjectTester expectedtype=NULL){
886  SEXP elm=getListElement(parameters,nam);
887  SEXP shape=Rf_getAttrib(elm,Rf_install("shape"));
888  SEXP ans;
889  if(shape==R_NilValue)ans=elm; else ans=shape;
890  RObjectTestExpectedType(ans, expectedtype, nam);
891  return ans;
892  }
893  template<class ArrayType>
894  //ArrayType fillShape(ArrayType &x, const char *nam){
895  ArrayType fillShape(ArrayType x, const char *nam){
896  SEXP elm=getListElement(parameters,nam);
897  SEXP shape=Rf_getAttrib(elm,Rf_install("shape"));
898  if(shape==R_NilValue)fill(x,nam);
899  else fillmap(x,nam);
900  return x;
901  }
902 
903  void fill(Type &x, char const *nam)
904  {
905  pushParname(nam);
906  thetanames[index]=nam;
907  if(reversefill)theta[index++]=x;else x=theta[index++];
908  }
909 
910  Type operator() ();
911 
912  Type evalUserTemplate(){
913  Type ans=this->operator()();
914  /* After evaluating the template, "index" should be equal to the length of "theta".
915  If not, we assume that the "epsilon method" has been requested from R, I.e.
916  that the un-used theta parameters are reserved for an inner product contribution
917  with the numbers reported via ADREPORT. */
918  if(index != theta.size()){
919  PARAMETER_VECTOR( TMB_epsilon_ );
920  ans += ( this->reportvector() * TMB_epsilon_ ).sum();
921  }
922  return ans;
923  }
924 
925 }; // objective_function
926 
956 template<class Type>
958  Type result;
959  objective_function<Type>* obj;
960  parallel_accumulator(objective_function<Type>* obj_){
961  result=Type(0);
962  obj=obj_;
963 #ifdef _OPENMP
964  obj->max_parallel_regions=config.nthreads;
965 #endif
966  }
967  inline void operator+=(Type x){
968  if(obj->parallel_region())result+=x;
969  }
970  inline void operator-=(Type x){
971  if(obj->parallel_region())result-=x;
972  }
973  operator Type(){
974  return result;
975  }
976 };
977 
978 
979 #ifndef WITH_LIBTMB
980 
981 #ifdef TMBAD_FRAMEWORK
982 template<class ADFunType>
983 SEXP EvalADFunObjectTemplate(SEXP f, SEXP theta, SEXP control)
984 {
985  if(!Rf_isNewList(control))Rf_error("'control' must be a list");
986  ADFunType* pf;
987  pf=(ADFunType*)R_ExternalPtrAddr(f);
988  int data_changed = getListInteger(control, "data_changed", 0);
989  if (data_changed) {
990  pf->force_update();
991  }
992  int set_tail = getListInteger(control, "set_tail", 0) - 1;
993  if (set_tail == -1) {
994  pf -> unset_tail();
995  } else {
996  std::vector<TMBad::Index> r(1, set_tail);
997  pf -> set_tail(r);
998  }
999  PROTECT(theta=Rf_coerceVector(theta,REALSXP));
1000  int n=pf->Domain();
1001  int m=pf->Range();
1002  if(LENGTH(theta)!=n)Rf_error("Wrong parameter length.");
1003  //R-index -> C-index
1004  int rangecomponent = getListInteger(control, "rangecomponent", 1) - 1;
1005  if(!((0<=rangecomponent)&(rangecomponent<=m-1)))
1006  Rf_error("Wrong range component.");
1007  int order = getListInteger(control, "order");
1008  if((order!=0) & (order!=1) & (order!=2) & (order!=3))
1009  Rf_error("order can be 0, 1, 2 or 3");
1010  //int sparsitypattern = getListInteger(control, "sparsitypattern");
1011  //int dumpstack = getListInteger(control, "dumpstack");
1012  SEXP hessiancols; // Hessian columns
1013  PROTECT(hessiancols=getListElement(control,"hessiancols"));
1014  int ncols=Rf_length(hessiancols);
1015  SEXP hessianrows; // Hessian rows
1016  PROTECT(hessianrows=getListElement(control,"hessianrows"));
1017  int nrows=Rf_length(hessianrows);
1018  if((nrows>0)&(nrows!=ncols))Rf_error("hessianrows and hessianrows must have same length");
1019  vector<size_t> cols(ncols);
1020  vector<size_t> cols0(ncols);
1021  vector<size_t> rows(nrows);
1022  if(ncols>0){
1023  for(int i=0;i<ncols;i++){
1024  cols[i]=INTEGER(hessiancols)[i]-1; //R-index -> C-index
1025  cols0[i]=0;
1026  if(nrows>0)rows[i]=INTEGER(hessianrows)[i]-1; //R-index -> C-index
1027  }
1028  }
1029  std::vector<double> x(REAL(theta), REAL(theta) + LENGTH(theta));
1030 
1031  SEXP res=R_NilValue;
1032  SEXP rangeweight=getListElement(control,"rangeweight");
1033  if(rangeweight!=R_NilValue){
1034  if(LENGTH(rangeweight)!=m)Rf_error("rangeweight must have length equal to range dimension");
1035  std::vector<double> w(REAL(rangeweight),
1036  REAL(rangeweight) + LENGTH(rangeweight));
1037  vector<double> ans = pf->Jacobian(x, w);
1038  res = asSEXP(ans);
1039  UNPROTECT(3);
1040  return res;
1041  }
1042  if(order==3){
1043  Rf_error("Not implemented for TMBad");
1044  // vector<double> w(1);
1045  // w[0]=1;
1046  // if((nrows!=1) | (ncols!=1))Rf_error("For 3rd order derivatives a single hessian coordinate must be specified.");
1047  // pf->ForTwo(x,rows,cols); /* Compute forward directions */
1048  // PROTECT(res=asSEXP(asMatrix(pf->Reverse(3,w),n,3)));
1049  }
1050  if(order==0){
1051  //if(dumpstack)CppAD::traceforward0sweep(1);
1052  std::vector<double> ans = pf->operator()(x);
1053  PROTECT(res=asSEXP(ans));
1054  //if(dumpstack)CppAD::traceforward0sweep(0);
1055  SEXP rangenames=Rf_getAttrib(f,Rf_install("range.names"));
1056  if(LENGTH(res)==LENGTH(rangenames)){
1057  Rf_setAttrib(res,R_NamesSymbol,rangenames);
1058  }
1059  }
1060  if(order==1){
1061  std::vector<double> jvec;
1062  SEXP keepx = getListElement(control, "keepx");
1063  if (keepx != R_NilValue && LENGTH(keepx) > 0) {
1064  SEXP keepy = getListElement(control, "keepy");
1065  std::vector<bool> keep_x(pf->Domain(), false);
1066  std::vector<bool> keep_y(pf->Range(), false);
1067  for (int i=0; i<LENGTH(keepx); i++) {
1068  keep_x[INTEGER(keepx)[i] - 1] = true;
1069  }
1070  for (int i=0; i<LENGTH(keepy); i++) {
1071  keep_y[INTEGER(keepy)[i] - 1] = true;
1072  }
1073  n = LENGTH(keepx);
1074  m = LENGTH(keepy);
1075  jvec = pf->Jacobian(x, keep_x, keep_y);
1076  } else {
1077  jvec = pf->Jacobian(x);
1078  }
1079  // if(doforward)pf->Forward(0,x);
1080  matrix<double> jac(m, n);
1081  int k=0;
1082  for (int i=0; i<m; i++) {
1083  for (int j=0; j<n; j++) {
1084  jac(i, j) = jvec[k];
1085  k++;
1086  }
1087  }
1088  PROTECT( res = asSEXP(jac) );
1089  }
1090  //if(order==2)res=asSEXP(pf->Hessian(x,0),1);
1091  if(order==2){
1092  // if(ncols==0){
1093  // if(sparsitypattern){
1094  // PROTECT(res=asSEXP(HessianSparsityPattern(pf)));
1095  // } else {
1096  // PROTECT(res=asSEXP(asMatrix(pf->Hessian(x,rangecomponent),n,n)));
1097  // }
1098  // }
1099  // else if (nrows==0){
1100  // /* Fixme: the cols0 argument should be user changeable */
1101  // PROTECT(res=asSEXP(asMatrix(pf->RevTwo(x,cols0,cols),n,ncols)));
1102  // }
1103  // else PROTECT(res=asSEXP(asMatrix(pf->ForTwo(x,rows,cols),m,ncols)));
1104  }
1105  UNPROTECT(4);
1106  return res;
1107 } // EvalADFunObjectTemplate
1108 #endif
1109 
1110 #ifdef CPPAD_FRAMEWORK
1111 
1145 template<class ADFunType>
1146 SEXP EvalADFunObjectTemplate(SEXP f, SEXP theta, SEXP control)
1147 {
1148  if(!Rf_isNewList(control))Rf_error("'control' must be a list");
1149  ADFunType* pf;
1150  pf=(ADFunType*)R_ExternalPtrAddr(f);
1151  PROTECT(theta=Rf_coerceVector(theta,REALSXP));
1152  int n=pf->Domain();
1153  int m=pf->Range();
1154  if(LENGTH(theta)!=n)Rf_error("Wrong parameter length.");
1155  // Do forwardsweep ?
1156  int doforward = getListInteger(control, "doforward", 1);
1157  //R-index -> C-index
1158  int rangecomponent = getListInteger(control, "rangecomponent", 1) - 1;
1159  if(!((0<=rangecomponent)&(rangecomponent<=m-1)))
1160  Rf_error("Wrong range component.");
1161  int order = getListInteger(control, "order");
1162  if((order!=0) & (order!=1) & (order!=2) & (order!=3))
1163  Rf_error("order can be 0, 1, 2 or 3");
1164  int sparsitypattern = getListInteger(control, "sparsitypattern");
1165  int dumpstack = getListInteger(control, "dumpstack");
1166  SEXP hessiancols; // Hessian columns
1167  PROTECT(hessiancols=getListElement(control,"hessiancols"));
1168  int ncols=Rf_length(hessiancols);
1169  SEXP hessianrows; // Hessian rows
1170  PROTECT(hessianrows=getListElement(control,"hessianrows"));
1171  int nrows=Rf_length(hessianrows);
1172  if((nrows>0)&(nrows!=ncols))Rf_error("hessianrows and hessianrows must have same length");
1173  vector<size_t> cols(ncols);
1174  vector<size_t> cols0(ncols);
1175  vector<size_t> rows(nrows);
1176  if(ncols>0){
1177  for(int i=0;i<ncols;i++){
1178  cols[i]=INTEGER(hessiancols)[i]-1; //R-index -> C-index
1179  cols0[i]=0;
1180  if(nrows>0)rows[i]=INTEGER(hessianrows)[i]-1; //R-index -> C-index
1181  }
1182  }
1183  vector<double> x = asVector<double>(theta);
1184  SEXP res=R_NilValue;
1185  SEXP rangeweight=getListElement(control,"rangeweight");
1186  if(rangeweight!=R_NilValue){
1187  if(LENGTH(rangeweight)!=m)Rf_error("rangeweight must have length equal to range dimension");
1188  if(doforward)pf->Forward(0,x);
1189  res=asSEXP(pf->Reverse(1,asVector<double>(rangeweight)));
1190  UNPROTECT(3);
1191  return res;
1192  }
1193  if(order==3){
1194  vector<double> w(1);
1195  w[0]=1;
1196  if((nrows!=1) | (ncols!=1))Rf_error("For 3rd order derivatives a single hessian coordinate must be specified.");
1197  pf->ForTwo(x,rows,cols); /* Compute forward directions */
1198  PROTECT(res=asSEXP(asMatrix(pf->Reverse(3,w),n,3)));
1199  }
1200  if(order==0){
1201  if(dumpstack)CppAD::traceforward0sweep(1);
1202  PROTECT(res=asSEXP(pf->Forward(0,x)));
1203  if(dumpstack)CppAD::traceforward0sweep(0);
1204  SEXP rangenames=Rf_getAttrib(f,Rf_install("range.names"));
1205  if(LENGTH(res)==LENGTH(rangenames)){
1206  Rf_setAttrib(res,R_NamesSymbol,rangenames);
1207  }
1208  }
1209  if(order==1){
1210  if(doforward)pf->Forward(0,x);
1211  matrix<double> jac(m, n);
1212  vector<double> u(n);
1213  vector<double> v(m);
1214  v.setZero();
1215  for(int i=0; i<m; i++) {
1216  v[i] = 1.0; u = pf->Reverse(1,v);
1217  v[i] = 0.0;
1218  jac.row(i) = u;
1219  }
1220  PROTECT( res = asSEXP(jac) );
1221  }
1222  //if(order==2)res=asSEXP(pf->Hessian(x,0),1);
1223  if(order==2){
1224  if(ncols==0){
1225  if(sparsitypattern){
1226  PROTECT(res=asSEXP(HessianSparsityPattern(pf)));
1227  } else {
1228  PROTECT(res=asSEXP(asMatrix(pf->Hessian(x,rangecomponent),n,n)));
1229  }
1230  }
1231  else if (nrows==0){
1232  /* Fixme: the cols0 argument should be user changeable */
1233  PROTECT(res=asSEXP(asMatrix(pf->RevTwo(x,cols0,cols),n,ncols)));
1234  }
1235  else PROTECT(res=asSEXP(asMatrix(pf->ForTwo(x,rows,cols),m,ncols)));
1236  }
1237  UNPROTECT(4);
1238  return res;
1239 } // EvalADFunObjectTemplate
1240 #endif
1241 
1243 template <class ADFunType>
1244 void finalize(SEXP x)
1245 {
1246  ADFunType* ptr=(ADFunType*)R_ExternalPtrAddr(x);
1247  if(ptr!=NULL)delete ptr;
1248  memory_manager.CallCFinalizer(x);
1249 }
1250 
1251 #ifdef TMBAD_FRAMEWORK
1252 
1253 TMBad::ADFun< TMBad::ad_aug >* MakeADFunObject_(SEXP data, SEXP parameters,
1254  SEXP report, SEXP control, int parallel_region=-1,
1255  SEXP &info=R_NilValue)
1256 {
1257  typedef TMBad::ad_aug ad;
1258  typedef TMBad::ADFun<ad> adfun;
1259  int returnReport = (control!=R_NilValue) && getListInteger(control, "report");
1260  /* Create objective_function "dummy"-object */
1261  objective_function< ad > F(data,parameters,report);
1262  F.set_parallel_region(parallel_region);
1263  /* Create ADFun pointer.
1264  We have the option to tape either the value returned by the
1265  objective_function template or the vector reported using the
1266  macro "ADREPORT" */
1267  adfun* pf = new adfun();
1268  pf->glob.ad_start();
1269  //TMBad::Independent(F.theta); // In both cases theta is the independent variable
1270  for (int i=0; i<F.theta.size(); i++) F.theta(i).Independent();
1271  if(!returnReport){ // Default case: no ad report - parallel run allowed
1272  vector< ad > y(1);
1273  y[0] = F.evalUserTemplate();
1274  //TMBad::Dependent(y);
1275  for (int i=0; i<y.size(); i++) y[i].Dependent();
1276  } else { // ad report case
1277  F(); // Run through user template (modifies reportvector)
1278  //TMBad::Dependent(F.reportvector.result);
1279  for (int i=0; i<F.reportvector.size(); i++) F.reportvector.result[i].Dependent();
1280  info=F.reportvector.reportnames(); // parallel run *not* allowed
1281  }
1282  pf->glob.ad_stop();
1283  return pf;
1284 }
1285 #endif
1286 
1287 #ifdef CPPAD_FRAMEWORK
1288 
1289 ADFun<double>* MakeADFunObject_(SEXP data, SEXP parameters,
1290  SEXP report, SEXP control, int parallel_region=-1,
1291  SEXP &info=R_NilValue)
1292 {
1293  int returnReport = getListInteger(control, "report");
1294  /* Create objective_function "dummy"-object */
1295  objective_function< AD<double> > F(data,parameters,report);
1296  F.set_parallel_region(parallel_region);
1297  /* Create ADFun pointer.
1298  We have the option to tape either the value returned by the
1299  objective_function template or the vector reported using the
1300  macro "ADREPORT" */
1301  Independent(F.theta); // In both cases theta is the independent variable
1302  ADFun< double >* pf;
1303  if(!returnReport){ // Default case: no ad report - parallel run allowed
1304  vector< AD<double> > y(1);
1305  y[0]=F.evalUserTemplate();
1306  pf = new ADFun< double >(F.theta,y);
1307  } else { // ad report case
1308  F(); // Run through user template (modifies reportvector)
1309  pf = new ADFun< double >(F.theta,F.reportvector());
1310  info=F.reportvector.reportnames(); // parallel run *not* allowed
1311  }
1312  return pf;
1313 }
1314 #endif
1315 
1316 extern "C"
1317 {
1318 
1319 #ifdef TMBAD_FRAMEWORK
1320 
1321  void finalizeADFun(SEXP x)
1322  {
1323  finalize<TMBad::ADFun<TMBad::ad_aug> > (x);
1324  }
1325  void finalizeparallelADFun(SEXP x)
1326  {
1327  finalize<parallelADFun<double> > (x);
1328  }
1329 #endif
1330 
1331 #ifdef CPPAD_FRAMEWORK
1332 
1333  void finalizeADFun(SEXP x)
1334  {
1335  finalize<ADFun<double> > (x);
1336  }
1337  void finalizeparallelADFun(SEXP x)
1338  {
1339  finalize<parallelADFun<double> > (x);
1340  }
1341 #endif
1342 
1343  /* --- MakeADFunObject ----------------------------------------------- */
1344 
1345 #ifdef TMBAD_FRAMEWORK
1346 
1347  SEXP MakeADFunObject(SEXP data, SEXP parameters,
1348  SEXP report, SEXP control)
1349  {
1350  typedef TMBad::ad_aug ad;
1351  typedef TMBad::ADFun<ad> adfun;
1352 
1353  adfun* pf = NULL;
1354  /* Some type checking */
1355  if(!Rf_isNewList(data))Rf_error("'data' must be a list");
1356  if(!Rf_isNewList(parameters))Rf_error("'parameters' must be a list");
1357  if(!Rf_isEnvironment(report))Rf_error("'report' must be an environment");
1358  if(!Rf_isNewList(control))Rf_error("'control' must be a list");
1359  int returnReport = getListInteger(control, "report");
1360 
1361  /* Get the default parameter vector (tiny overhead) */
1362  SEXP par,res=NULL,info;
1363  objective_function< double > F(data,parameters,report);
1364 #ifdef _OPENMP
1365  int n=F.count_parallel_regions(); // Evaluates user template
1366 #else
1367  F.count_parallel_regions(); // Evaluates user template
1368 #endif
1369  if(returnReport && F.reportvector.size()==0){
1370  /* Told to report, but no ADREPORT in template: Get out quickly */
1371  return R_NilValue;
1372  }
1373  PROTECT(par=F.defaultpar());
1374  PROTECT(info=R_NilValue); // Important
1375 
1376  if(_openmp && !returnReport){ // Parallel mode
1377 #ifdef _OPENMP
1378  if(config.trace.parallel)
1379  std::cout << n << " regions found.\n";
1380  if (n==0) n++; // No explicit parallel accumulation
1381  start_parallel(); /* FIXME: NOT NEEDED */
1382  vector< adfun* > pfvec(n);
1383  const char* bad_thread_alloc = NULL;
1384 #pragma omp parallel for num_threads(config.nthreads) if (config.tape.parallel && n>1)
1385  for(int i = 0; i < n; i++) {
1386  TMB_TRY {
1387  pfvec[i] = NULL;
1388  pfvec[i] = MakeADFunObject_(data, parameters, report, control, i, info);
1389  if (config.optimize.instantly) pfvec[i]->optimize();
1390  }
1391  TMB_CATCH {
1392  if (pfvec[i] != NULL) delete pfvec[i];
1393  bad_thread_alloc = excpt.what();
1394  }
1395  }
1396  if (bad_thread_alloc) {
1397  TMB_ERROR_BAD_THREAD_ALLOC;
1398  }
1399 
1400  // FIXME: NOT DONE YET
1401 
1402  parallelADFun<double>* ppf=new parallelADFun<double>(pfvec);
1403  /* Convert parallel ADFun pointer to R_ExternalPtr */
1404  PROTECT(res=R_MakeExternalPtr((void*) ppf,Rf_install("parallelADFun"),R_NilValue));
1405  //R_RegisterCFinalizer(res,finalizeparallelADFun);
1406 #endif
1407  } else { // Serial mode
1408  TMB_TRY{
1409  /* Actual work: tape creation */
1410  pf = NULL;
1411  pf = MakeADFunObject_(data, parameters, report, control, -1, info);
1412  if (config.optimize.instantly) pf->optimize();
1413  }
1414  TMB_CATCH {
1415  if (pf != NULL) delete pf;
1416  TMB_ERROR_BAD_ALLOC;
1417  }
1418  /* Convert ADFun pointer to R_ExternalPtr */
1419  PROTECT(res=R_MakeExternalPtr((void*) pf,Rf_install("ADFun"),R_NilValue));
1420  Rf_setAttrib(res,Rf_install("range.names"),info);
1421  }
1422 
1423  /* Return list of external pointer and default-parameter */
1424  SEXP ans;
1425  Rf_setAttrib(res,Rf_install("par"),par);
1426  PROTECT(ans=ptrList(res));
1427  UNPROTECT(4);
1428 
1429  return ans;
1430  } // MakeADFunObject
1431 #endif
1432 
1433 #ifdef CPPAD_FRAMEWORK
1434 
1435  SEXP MakeADFunObject(SEXP data, SEXP parameters,
1436  SEXP report, SEXP control)
1437  {
1438  ADFun<double>* pf = NULL;
1439  /* Some type checking */
1440  if(!Rf_isNewList(data))Rf_error("'data' must be a list");
1441  if(!Rf_isNewList(parameters))Rf_error("'parameters' must be a list");
1442  if(!Rf_isEnvironment(report))Rf_error("'report' must be an environment");
1443  if(!Rf_isNewList(control))Rf_error("'control' must be a list");
1444  int returnReport = getListInteger(control, "report");
1445 
1446  /* Get the default parameter vector (tiny overhead) */
1447  SEXP par,res=NULL,info;
1448  objective_function< double > F(data,parameters,report);
1449 #ifdef _OPENMP
1450  int n=F.count_parallel_regions(); // Evaluates user template
1451 #else
1452  F.count_parallel_regions(); // Evaluates user template
1453 #endif
1454  if(returnReport && F.reportvector.size()==0){
1455  /* Told to report, but no ADREPORT in template: Get out quickly */
1456  return R_NilValue;
1457  }
1458  PROTECT(par=F.defaultpar());
1459  PROTECT(info=R_NilValue); // Important
1460 
1461  if(_openmp && !returnReport){ // Parallel mode
1462 #ifdef _OPENMP
1463  if(config.trace.parallel)
1464  std::cout << n << " regions found.\n";
1465  if (n==0) n++; // No explicit parallel accumulation
1466  start_parallel(); /* Start threads */
1467  vector< ADFun<double>* > pfvec(n);
1468  const char* bad_thread_alloc = NULL;
1469 #pragma omp parallel for num_threads(config.nthreads) if (config.tape.parallel && n>1)
1470  for(int i=0;i<n;i++){
1471  TMB_TRY {
1472  pfvec[i] = NULL;
1473  pfvec[i] = MakeADFunObject_(data, parameters, report, control, i, info);
1474  if (config.optimize.instantly) pfvec[i]->optimize();
1475  }
1476  TMB_CATCH {
1477  if (pfvec[i] != NULL) delete pfvec[i];
1478  bad_thread_alloc = excpt.what();
1479  }
1480  }
1481  if (bad_thread_alloc) {
1482  TMB_ERROR_BAD_THREAD_ALLOC;
1483  }
1484  parallelADFun<double>* ppf=new parallelADFun<double>(pfvec);
1485  /* Convert parallel ADFun pointer to R_ExternalPtr */
1486  PROTECT(res=R_MakeExternalPtr((void*) ppf,Rf_install("parallelADFun"),R_NilValue));
1487 #endif
1488  } else { // Serial mode
1489  TMB_TRY{
1490  /* Actual work: tape creation */
1491  pf = NULL;
1492  pf = MakeADFunObject_(data, parameters, report, control, -1, info);
1493  if (config.optimize.instantly) pf->optimize();
1494  }
1495  TMB_CATCH {
1496  if (pf != NULL) delete pf;
1497  TMB_ERROR_BAD_ALLOC;
1498  }
1499  /* Convert ADFun pointer to R_ExternalPtr */
1500  PROTECT(res=R_MakeExternalPtr((void*) pf,Rf_install("ADFun"),R_NilValue));
1501  Rf_setAttrib(res,Rf_install("range.names"),info);
1502  }
1503 
1504  /* Return list of external pointer and default-parameter */
1505  SEXP ans;
1506  Rf_setAttrib(res,Rf_install("par"),par);
1507  PROTECT(ans=ptrList(res));
1508  UNPROTECT(4);
1509 
1510  return ans;
1511  } // MakeADFunObject
1512 #endif
1513 
1514  /* --- TransformADFunObject ----------------------------------------------- */
1515 
1516 #ifdef TMBAD_FRAMEWORK
1517 inline int get_num_tapes(SEXP f) {
1518  if (Rf_isNull(f))
1519  return 0;
1520  SEXP tag = R_ExternalPtrTag(f);
1521  if (tag != Rf_install("parallelADFun"))
1522  return 0;
1523  return
1524  ((parallelADFun<double>*) R_ExternalPtrAddr(f))->ntapes;
1525 }
1526 SEXP TransformADFunObjectTemplate(TMBad::ADFun<TMBad::ad_aug>* pf, SEXP control)
1527 {
1528  if (pf == NULL)
1529  Rf_error("Cannot transform '<pointer: (nil)>' (unloaded/reloaded DLL?)");
1530  typedef TMBad::ad_aug ad;
1531  typedef TMBad::ADFun<ad> adfun;
1532  // FIXME: Must require non parallel object !!!
1533  std::string method =
1534  CHAR(STRING_ELT(getListElement(control, "method"), 0));
1535  // Test adfun copy
1536  if (method == "copy") {
1537  *pf = adfun(*pf);
1538  return R_NilValue;
1539  }
1540  if (method == "set_compiled") {
1541  int i = 0;
1542 #ifdef _OPENMP
1543  i = omp_get_thread_num();
1544 #endif
1545  typedef void(*fct_ptr1)(double*);
1546  typedef void(*fct_ptr2)(double*,double*);
1547  pf->glob.forward_compiled =
1548  (fct_ptr1) R_ExternalPtrAddr(VECTOR_ELT(getListElement(control, "forward_compiled"), i));
1549  pf->glob.reverse_compiled =
1550  (fct_ptr2) R_ExternalPtrAddr(VECTOR_ELT(getListElement(control, "reverse_compiled"), i));
1551  return R_NilValue;
1552  }
1553  SEXP random_order = getListElement(control, "random_order");
1554  int nr = (Rf_isNull(random_order) ? 0 : LENGTH(random_order));
1555  std::vector<TMBad::Index> random;
1556  if (nr != 0) {
1557  random = std::vector<TMBad::Index>(INTEGER(random_order),
1558  INTEGER(random_order) + nr);
1559  for (size_t i=0; i<random.size(); i++)
1560  random[i] -= 1 ; // R index -> C index
1561  }
1562  TMB_TRY {
1563  if (method == "remove_random_parameters") {
1564  std::vector<bool> mask(pf->Domain(), true);
1565  for (size_t i = 0; i<random.size(); i++)
1566  mask[random[i]] = false;
1567  pf->glob.inv_index = TMBad::subset(pf->glob.inv_index, mask);
1568  }
1569  else if (method == "laplace") {
1570  SEXP config = getListElement(control, "config");
1571  newton::newton_config cfg(config);
1572  *pf = newton::Laplace_(*pf, random, cfg);
1573  }
1574  else if (method == "marginal_gk") {
1575  TMBad::gk_config cfg;
1576  SEXP config = getListElement(control, "config");
1577  if (!Rf_isNull(config)) {
1578  cfg.adaptive = getListInteger(config, "adaptive", 0);
1579  cfg.debug = getListInteger(config, "debug", 0);
1580  }
1581  *pf = pf -> marginal_gk(random, cfg);
1582  }
1583  else if (method == "marginal_sr") {
1584  SEXP config = getListElement(control, "config");
1585  std::vector<TMBad::sr_grid> grids;
1586  SEXP grid = getListElement(config, "grid");
1587  SEXP random2grid = getListElement(config, "random2grid");
1588  for (int i=0; i<LENGTH(grid); i++) {
1589  SEXP grid_i = VECTOR_ELT(grid, i);
1590  SEXP x = getListElement(grid_i, "x");
1591  SEXP w = getListElement(grid_i, "w");
1592  if (LENGTH(x) != LENGTH(w))
1593  Rf_error("Length of grid$x and grid$w must be equal");
1594  TMBad::sr_grid grid_sr;
1595  grid_sr.x = std::vector<double>(REAL(x), REAL(x) + LENGTH(x));
1596  grid_sr.w = std::vector<double>(REAL(w), REAL(w) + LENGTH(w));
1597  grids.push_back(grid_sr);
1598  }
1599  std::vector<TMBad::Index> r2g(INTEGER(random2grid),
1600  INTEGER(random2grid) + LENGTH(random2grid));
1601  for (size_t i=0; i<r2g.size(); i++)
1602  r2g[i] -= 1 ; // R index -> C index
1603  *pf = pf -> marginal_sr(random, grids, r2g, true);
1604  }
1605  else if (method == "parallelize")
1606  *pf = pf -> parallelize(2);
1607  else if (method == "compress") {
1608  int max_period_size = getListInteger(control, "max_period_size", 1024);
1609  TMBad::compress(pf->glob, max_period_size);
1610  }
1611  else if (method == "compress_and_compile") {
1612 #ifdef HAVE_COMPILE_HPP
1613  int max_period_size = getListInteger(control, "max_period_size", 1024);
1614  TMBad::compress(pf->glob, max_period_size);
1615  // if (config.optimize.instantly) pf->glob.eliminate();
1616  TMBad::compile(pf->glob);
1617 #else
1618  Rf_error("TMBad::compile() is unavailable");
1619 #endif
1620  }
1621  else if (method == "accumulation_tree_split")
1622  pf->glob = accumulation_tree_split(pf->glob, true);
1623  else if (method == "fuse_and_replay") {
1624  pf->glob.set_fuse(true);
1625  pf->replay();
1626  pf->glob.set_fuse(false);
1627  }
1628  else if (method == "reorder_random") {
1629  pf->reorder(random);
1630  }
1631  else if (method == "reorder_sub_expressions") {
1633  }
1634  else if (method == "reorder_depth_first") {
1635  TMBad::reorder_depth_first(pf->glob);
1636  }
1637  else if (method == "reorder_temporaries") {
1638  TMBad::reorder_temporaries(pf->glob);
1639  }
1640  else if (method == "parallel_accumulate") {
1641  // Known method - done elsewhere
1642  }
1643  else if (method == "optimize") {
1644  pf->optimize();
1645  } else {
1646  Rf_error("Method unknown: '%s'", method.c_str());
1647  }
1648  }
1649  TMB_CATCH {
1650  TMB_ERROR_BAD_ALLOC;
1651  }
1652  // for (size_t i=0; i<random.size(); i++) random[i] += 1 ; // C index -> R index
1653  // Rf_setAttrib(f, Rf_install("random_order"), asSEXP(random));
1654  return R_NilValue;
1655 }
1657 SEXP TransformADFunObject(SEXP f, SEXP control)
1658 {
1659  if (Rf_isNull(f))
1660  Rf_error("Expected external pointer - got NULL");
1661  SEXP tag = R_ExternalPtrTag(f);
1662  if (tag != Rf_install("ADFun"))
1663  if (tag != Rf_install("parallelADFun"))
1664  Rf_error("Expected ADFun or parallelADFun pointer");
1665  typedef TMBad::ad_aug ad;
1666  typedef TMBad::ADFun<ad> adfun;
1667  if(tag == Rf_install("ADFun")) {
1668  adfun* pf = (adfun*) R_ExternalPtrAddr(f);
1669  TransformADFunObjectTemplate(pf, control);
1670  } else if (tag == Rf_install("parallelADFun")) {
1671  // Warning: Most no meaningful for parallel models!:
1672  // OK : reorder_random etc
1673  // NOT OK : copy, set_compiled, marginal_sr etc
1674  parallelADFun<double>* ppf = (parallelADFun<double>*) R_ExternalPtrAddr(f);
1675  // Apply method for each component except for one special case:
1676  // 'Parallel accumulate'
1677  std::string method =
1678  CHAR(STRING_ELT(getListElement(control, "method"), 0));
1679  if (method == "parallel_accumulate") {
1680  int num_threads = getListInteger(control, "num_threads", 2);
1681  if (num_threads == 1) {
1682  // No need to parallelize
1683  return R_NilValue;
1684  }
1685  if (get_num_tapes(f) > 1) {
1686  // Already parallel (via parallel_accumulator or similar)
1687  return R_NilValue;
1688  }
1689  adfun* pf = (ppf->vecpf)[0]; // One tape - get it
1690  std::vector<adfun> vf = pf->parallel_accumulate(num_threads);
1691  if (config.trace.parallel) {
1692  Rcout << "Autopar work split\n";
1693  for (size_t i=0; i < vf.size(); i++) {
1694  Rcout << "Chunk " << i << ": ";
1695  Rcout << (double) vf[i].glob.opstack.size() / pf->glob.opstack.size() << "\n";
1696  }
1697  }
1698  parallelADFun<double>* new_ppf = new parallelADFun<double>(vf);
1699  delete ppf;
1700  R_SetExternalPtrAddr(f, new_ppf);
1701  return R_NilValue;
1702  }
1703 #ifdef _OPENMP
1704 #pragma omp parallel for num_threads(config.nthreads)
1705 #endif
1706  for (int i=0; i<ppf->ntapes; i++) {
1707  adfun* pf = (ppf->vecpf)[i];
1708  TransformADFunObjectTemplate(pf, control);
1709  }
1710  // Some methods change Domain or Range of individual tapes. This
1711  // is allowed when there is only one tape.
1712  if (ppf->ntapes == 1) {
1713  ppf->domain = (ppf->vecpf)[0]->Domain();
1714  ppf->range = (ppf->vecpf)[0]->Range();
1715  }
1716  // Now, check that it's ok. FIXME: Range() is not checked
1717  for (int i=0; i<ppf->ntapes; i++) {
1718  if (ppf->domain != (ppf->vecpf)[i]->Domain())
1719  Rf_warning("Domain has changed in an invalid way");
1720  }
1721  } else {
1722  Rf_error("Unknown function pointer");
1723  }
1724  return R_NilValue;
1725 }
1726 #endif
1727 
1728 #ifdef CPPAD_FRAMEWORK
1729 
1730 SEXP TransformADFunObject(SEXP f, SEXP control)
1731 {
1732  int mustWork = getListInteger(control, "mustWork", 1);
1733  if (mustWork)
1734  Rf_error("Not supported for CPPAD_FRAMEWORK");
1735  return R_NilValue;
1736 }
1737 #endif
1738 
1739  /* --- InfoADFunObject ---------------------------------------------------- */
1740 
1741 #ifdef TMBAD_FRAMEWORK
1742  SEXP InfoADFunObject(SEXP f) {
1743  typedef TMBad::ad_aug ad;
1744  typedef TMBad::ADFun<ad> adfun;
1745  if (Rf_isNull(f)) Rf_error("Expected external pointer - got NULL");
1746  int num_tapes = get_num_tapes(f);
1747  if (num_tapes >= 2)
1748  Rf_error("'InfoADFunObject' is only available for tapes with one thread");
1749  adfun* pf;
1750  if (num_tapes == 0)
1751  pf = (adfun*) R_ExternalPtrAddr(f);
1752  else {
1753  pf = ( (parallelADFun<double>*) R_ExternalPtrAddr(f) ) -> vecpf[0];
1754  }
1755  SEXP ans, names;
1756  PROTECT(ans = Rf_allocVector(VECSXP, 6));
1757  PROTECT(names = Rf_allocVector(STRSXP, 6));
1758  int i = 0;
1759 #define GET_INFO(EXPR) \
1760  SET_VECTOR_ELT(ans, i, asSEXP(EXPR)); \
1761  SET_STRING_ELT(names, i, Rf_mkChar(#EXPR)); \
1762  i++;
1763  // begin
1764  std::vector<bool> a = pf -> activeDomain();
1765  std::vector<int> ai(a.begin(), a.end());
1766  vector<int> activeDomain(ai);
1767  GET_INFO(activeDomain);
1768  int opstack_size = pf->glob.opstack.size();
1769  GET_INFO(opstack_size);
1770  int values_size = pf->glob.values.size();
1771  GET_INFO(values_size);
1772  int inputs_size = pf->glob.inputs.size();
1773  GET_INFO(inputs_size);
1774  int Domain = pf->Domain();
1775  GET_INFO(Domain);
1776  int Range = pf->Range();
1777  GET_INFO(Range);
1778  // end
1779 #undef GET_INFO
1780  Rf_setAttrib(ans,R_NamesSymbol,names);
1781  UNPROTECT(2);
1782  return ans;
1783  }
1784 #endif
1785 
1786 #ifdef CPPAD_FRAMEWORK
1787  SEXP InfoADFunObject(SEXP f)
1788  {
1789  ADFun<double>* pf;
1790  pf = (ADFun<double>*) R_ExternalPtrAddr(f);
1791  SEXP ans, names;
1792  PROTECT(ans = Rf_allocVector(VECSXP, 12));
1793  PROTECT(names = Rf_allocVector(STRSXP, 12));
1794  int i = 0;
1795 #define GET_MORE_INFO(MEMBER) \
1796  SET_VECTOR_ELT(ans, i, asSEXP(int(pf->MEMBER()))); \
1797  SET_STRING_ELT(names, i, Rf_mkChar(#MEMBER)); \
1798  i++;
1799  GET_MORE_INFO(Domain);
1800  GET_MORE_INFO(Range);
1801  GET_MORE_INFO(size_op);
1802  GET_MORE_INFO(size_op_arg);
1803  GET_MORE_INFO(size_op_seq);
1804  GET_MORE_INFO(size_par);
1805  GET_MORE_INFO(size_order);
1806  GET_MORE_INFO(size_direction);
1807  GET_MORE_INFO(size_text);
1808  GET_MORE_INFO(size_var);
1809  GET_MORE_INFO(size_VecAD);
1810  GET_MORE_INFO(Memory);
1811 #undef GET_MORE_INFO
1812  Rf_setAttrib(ans,R_NamesSymbol,names);
1813  UNPROTECT(2);
1814  return ans;
1815  }
1816 #endif
1817 
1818 #ifdef CPPAD_FRAMEWORK
1819 
1820  SEXP optimizeADFunObject(SEXP f)
1821  {
1822  SEXP tag=R_ExternalPtrTag(f);
1823  if(tag == Rf_install("ADFun")){
1824  ADFun<double>* pf;
1825  pf=(ADFun<double>*)R_ExternalPtrAddr(f);
1826  pf->optimize();
1827  }
1828  if(tag == Rf_install("parallelADFun")){
1829  parallelADFun<double>* pf;
1830  pf=(parallelADFun<double>*)R_ExternalPtrAddr(f);
1831  pf->optimize();
1832  }
1833  return R_NilValue;
1834  }
1835 #endif
1836 
1838  SEXP getTag(SEXP f){
1839  return R_ExternalPtrTag(f);
1840  }
1841 
1842 #ifdef TMBAD_FRAMEWORK
1843  SEXP EvalADFunObject(SEXP f, SEXP theta, SEXP control)
1844  {
1845  typedef TMBad::ad_aug ad;
1846  typedef TMBad::ADFun<ad> adfun;
1847  TMB_TRY {
1848  if(Rf_isNull(f))Rf_error("Expected external pointer - got NULL");
1849  SEXP tag=R_ExternalPtrTag(f);
1850  if(tag == Rf_install("ADFun"))
1851  return EvalADFunObjectTemplate< adfun >(f,theta,control);
1852  if(tag == Rf_install("parallelADFun"))
1853  return EvalADFunObjectTemplate<parallelADFun<double> >(f,theta,control);
1854  Rf_error("NOT A KNOWN FUNCTION POINTER");
1855  }
1856  TMB_CATCH {
1857  TMB_ERROR_BAD_ALLOC;
1858  }
1859  }
1860 #endif
1861 
1862 #ifdef CPPAD_FRAMEWORK
1863  SEXP EvalADFunObject(SEXP f, SEXP theta, SEXP control)
1864  {
1865  TMB_TRY {
1866  if(Rf_isNull(f))Rf_error("Expected external pointer - got NULL");
1867  SEXP tag=R_ExternalPtrTag(f);
1868  if(tag == Rf_install("ADFun"))
1869  return EvalADFunObjectTemplate<ADFun<double> >(f,theta,control);
1870  if(tag == Rf_install("parallelADFun"))
1871  return EvalADFunObjectTemplate<parallelADFun<double> >(f,theta,control);
1872  Rf_error("NOT A KNOWN FUNCTION POINTER");
1873  }
1874  TMB_CATCH {
1875  TMB_ERROR_BAD_ALLOC;
1876  }
1877  }
1878 #endif
1879 
1880 SEXP getSetGlobalPtr(SEXP ptr) {
1881 #ifdef TMBAD_FRAMEWORK
1882  SEXP global_ptr_tag = Rf_install("global_ptr");
1883  if (!Rf_isNull(ptr)) {
1884  SEXP tag = R_ExternalPtrTag(ptr);
1885  if (tag != global_ptr_tag) Rf_error("Invalid pointer type");
1886  TMBad::global_ptr = (TMBad::global**) R_ExternalPtrAddr(ptr);
1887  }
1888  SEXP res = R_MakeExternalPtr( (void*) TMBad::global_ptr, global_ptr_tag, R_NilValue);
1889  return res;
1890 #else
1891  return R_NilValue;
1892 #endif
1893 }
1894 
1895  SEXP tmbad_print(SEXP f, SEXP control) {
1896 #ifdef TMBAD_FRAMEWORK
1897  typedef TMBad::ad_aug ad;
1898  typedef TMBad::ADFun<ad> adfun;
1899  int num_tapes = get_num_tapes(f);
1900  adfun* pf;
1901  if (num_tapes == 0)
1902  pf = (adfun*) R_ExternalPtrAddr(f);
1903  else {
1904  int i = getListInteger(control, "i", 0);
1905  pf = ( (parallelADFun<double>*) R_ExternalPtrAddr(f) ) -> vecpf[i];
1906  }
1907  std::string method =
1908  CHAR(STRING_ELT(getListElement(control, "method"), 0));
1909  if (method == "num_tapes") { // Get number of tapes
1910  return Rf_ScalarInteger(num_tapes);
1911  }
1912  else if (method == "tape") { // Print tape
1913  int depth = getListInteger(control, "depth", 1);
1915  cfg.depth = depth;
1916  pf->glob.print(cfg);
1917  }
1918  else if (method == "dot") { // Print dot format
1919  graph2dot(pf->glob, true, Rcout);
1920  }
1921  else if (method == "inv_index") { // Print member
1922  using TMBad::operator<<;
1923  Rcout << pf->glob.inv_index << "\n";
1924  }
1925  else if (method == "dep_index") { // Print member
1926  using TMBad::operator<<;
1927  Rcout << pf->glob.dep_index << "\n";
1928  }
1929  else if (method == "src") { // Print C src code
1930  TMBad::code_config cfg;
1931  cfg.gpu = false;
1932  cfg.asm_comments = false;
1933  cfg.cout = &Rcout;
1934  *cfg.cout << "#include <cmath>" << std::endl;
1935  *cfg.cout
1936  << "template<class T>T sign(const T &x) { return (x > 0) - (x < 0); }"
1937  << std::endl;
1938  TMBad::global glob = pf->glob; // Invoke deep copy
1939  TMBad::compress(glob);
1940  write_forward(glob, cfg);
1941  write_reverse(glob, cfg);
1942  }
1943  else if (method == "op") {
1944  int name = getListInteger(control, "name", 0);
1945  int address = getListInteger(control, "address", 0);
1946  int input_size = getListInteger(control, "input_size", 0);
1947  int output_size = getListInteger(control, "output_size", 0);
1948  size_t n = pf->glob.opstack.size();
1949  SEXP ans = PROTECT(Rf_allocVector(STRSXP, n));
1950  for (size_t i=0; i<n; i++) {
1951  std::stringstream strm;
1952  if (address) strm << (void*) pf->glob.opstack[i] << " ";
1953  if (name) strm << pf->glob.opstack[i]->op_name() << " ";
1954  if (input_size) strm << pf->glob.opstack[i]->input_size();
1955  if (output_size) strm << pf->glob.opstack[i]->output_size();
1956  const std::string& tmp = strm.str();
1957  SET_STRING_ELT(ans, i, Rf_mkChar(tmp.c_str()));
1958  }
1959  UNPROTECT(1);
1960  return ans;
1961  }
1962  else {
1963  Rf_error("Unknown method: %s", method.c_str());
1964  }
1965 #endif
1966  return R_NilValue;
1967  }
1968 
1969 }
1970 
1971 /* Double interface */
1972 extern "C"
1973 {
1974 
1975  /* How to garbage collect a DoubleFun object pointer */
1976  void finalizeDoubleFun(SEXP x)
1977  {
1978  objective_function<double>* ptr=(objective_function<double>*)R_ExternalPtrAddr(x);
1979  if(ptr!=NULL)delete ptr;
1980  memory_manager.CallCFinalizer(x);
1981  }
1982 
1983  SEXP MakeDoubleFunObject(SEXP data, SEXP parameters, SEXP report, SEXP control)
1984  {
1985  /* Some type checking */
1986  if(!Rf_isNewList(data))Rf_error("'data' must be a list");
1987  if(!Rf_isNewList(parameters))Rf_error("'parameters' must be a list");
1988  if(!Rf_isEnvironment(report))Rf_error("'report' must be an environment");
1989 
1990  /* Create DoubleFun pointer */
1991  objective_function<double>* pF = NULL;
1992  TMB_TRY {
1993  pF = new objective_function<double>(data,parameters,report);
1994  }
1995  TMB_CATCH {
1996  if (pF != NULL) delete pF;
1997  TMB_ERROR_BAD_ALLOC;
1998  }
1999 
2000  /* Convert DoubleFun pointer to R_ExternalPtr */
2001  SEXP res,ans;
2002  PROTECT(res=R_MakeExternalPtr((void*) pF,Rf_install("DoubleFun"),R_NilValue));
2003  PROTECT(ans=ptrList(res));
2004  UNPROTECT(2);
2005  return ans;
2006  }
2007 
2008 
2009  SEXP EvalDoubleFunObject(SEXP f, SEXP theta, SEXP control)
2010  {
2011  TMB_TRY {
2012  int do_simulate = getListInteger(control, "do_simulate");
2013  int get_reportdims = getListInteger(control, "get_reportdims");
2014  objective_function<double>* pf;
2015  pf = (objective_function<double>*) R_ExternalPtrAddr(f);
2016  pf -> sync_data();
2017  PROTECT( theta=Rf_coerceVector(theta,REALSXP) );
2018  int n = pf->theta.size();
2019  if (LENGTH(theta)!=n) Rf_error("Wrong parameter length.");
2020  vector<double> x(n);
2021  for(int i=0;i<n;i++) x[i] = REAL(theta)[i];
2022  pf->theta=x;
2023  /* Since we are actually evaluating objective_function::operator() (not
2024  an ADFun object) we should remember to initialize parameter-index. */
2025  pf->index=0;
2026  pf->parnames.resize(0); // To avoid mem leak.
2027  pf->reportvector.clear();
2028  SEXP res;
2029  GetRNGstate(); /* Get seed from R */
2030  if(do_simulate) pf->set_simulate( true );
2031  PROTECT( res = asSEXP( pf->operator()() ) );
2032  if(do_simulate) {
2033  pf->set_simulate( false );
2034  PutRNGstate(); /* Write seed back to R */
2035  }
2036  if(get_reportdims) {
2037  SEXP reportdims;
2038  PROTECT( reportdims = pf -> reportvector.reportdims() );
2039  Rf_setAttrib( res, Rf_install("reportdims"), reportdims);
2040  UNPROTECT(1);
2041  }
2042  UNPROTECT(2);
2043  return res;
2044  }
2045  TMB_CATCH {
2046  TMB_ERROR_BAD_ALLOC;
2047  }
2048  }
2049 
2053  SEXP getParameterOrder(SEXP data, SEXP parameters, SEXP report, SEXP control)
2054  {
2055  TMB_TRY {
2056  /* Some type checking */
2057  if(!Rf_isNewList(data))Rf_error("'data' must be a list");
2058  if(!Rf_isNewList(parameters))Rf_error("'parameters' must be a list");
2059  if(!Rf_isEnvironment(report))Rf_error("'report' must be an environment");
2060  objective_function<double> F(data,parameters,report);
2061  F(); // Run through user template
2062  return F.parNames();
2063  }
2064  TMB_CATCH {
2065  TMB_ERROR_BAD_ALLOC;
2066  }
2067  }
2068 
2069 } /* Double interface */
2070 
2071 
2072 #ifdef TMBAD_FRAMEWORK
2073 TMBad::ADFun< TMBad::ad_aug >* MakeADGradObject_(SEXP data, SEXP parameters, SEXP report, SEXP control, int parallel_region=-1)
2074 {
2075  typedef TMBad::ad_aug ad;
2076  typedef TMBad::ADFun<ad> adfun;
2077  SEXP f = getListElement(control, "f");
2078  adfun* pf;
2079  bool allocate_new_pf = ( f == R_NilValue );
2080  if ( ! allocate_new_pf ) {
2081  if (parallel_region == -1)
2082  pf = (adfun*) R_ExternalPtrAddr(f);
2083  else
2084  pf = ((parallelADFun<double>*) R_ExternalPtrAddr(f))->vecpf[parallel_region];
2085  } else {
2086  SEXP control_adfun = R_NilValue;
2087  pf = MakeADFunObject_(data, parameters, report, control_adfun, parallel_region);
2088  }
2089  // Optionally skip gradient components (only need 'random' part of gradient)
2090  SEXP random = getListElement(control, "random");
2091  if (random != R_NilValue) {
2092  int set_tail = INTEGER(random)[0] - 1;
2093  std::vector<TMBad::Index> r(1, set_tail);
2094  pf -> set_tail(r);
2095  }
2096  adfun* pgf = new adfun (pf->JacFun());
2097  pf -> unset_tail(); // Not really needed
2098  if (allocate_new_pf) delete pf;
2099  return pgf;
2100 }
2101 #endif
2102 
2103 #ifdef CPPAD_FRAMEWORK
2104 ADFun< double >* MakeADGradObject_(SEXP data, SEXP parameters, SEXP report, SEXP control, int parallel_region=-1)
2105 {
2106  /* Create ADFun pointer */
2107  objective_function< AD<AD<double> > > F(data,parameters,report);
2108  F.set_parallel_region(parallel_region);
2109  int n=F.theta.size();
2110  Independent(F.theta);
2111  vector< AD<AD<double> > > y(1);
2112  y[0]=F.evalUserTemplate();
2113  ADFun<AD<double> > tmp(F.theta,y);
2114  tmp.optimize(); /* Remove 'dead' operations (could result in nan derivatives) */
2115  vector<AD<double> > x(n);
2116  for(int i=0;i<n;i++)x[i]=CppAD::Value(F.theta[i]);
2117  vector<AD<double> > yy(n);
2118  Independent(x);
2119  yy=tmp.Jacobian(x);
2120  ADFun< double >* pf = new ADFun< double >(x,yy);
2121  return pf;
2122 }
2123 #endif
2124 
2125 extern "C"
2126 {
2127 #ifdef TMBAD_FRAMEWORK
2128 
2129  SEXP MakeADGradObject(SEXP data, SEXP parameters, SEXP report, SEXP control)
2130  {
2131  typedef TMBad::ad_aug ad;
2132  typedef TMBad::ADFun<ad> adfun;
2133 
2134  adfun* pf = NULL;
2135  /* Some type checking */
2136  if(!Rf_isNewList(data))Rf_error("'data' must be a list");
2137  if(!Rf_isNewList(parameters))Rf_error("'parameters' must be a list");
2138  if(!Rf_isEnvironment(report))Rf_error("'report' must be an environment");
2139 
2140  /* Get the default parameter vector (tiny overhead) */
2141  SEXP par,res=NULL;
2142  objective_function< double > F(data,parameters,report);
2143 #ifdef _OPENMP
2144  SEXP f = getListElement(control, "f");
2145  int n = get_num_tapes(f);
2146  if (n==0) // No tapes? Count!
2147  n = F.count_parallel_regions(); // Evaluates user template
2148 #else
2149  F.count_parallel_regions(); // Evaluates user template
2150 #endif
2151  PROTECT(par=F.defaultpar());
2152 
2153  if(_openmp){ // Parallel mode
2154 #ifdef _OPENMP
2155  if(config.trace.parallel)
2156  std::cout << n << " regions found.\n";
2157  if (n==0) n++; // No explicit parallel accumulation
2158  start_parallel(); /* Start threads */
2159  vector< adfun* > pfvec(n);
2160  const char* bad_thread_alloc = NULL;
2161 #pragma omp parallel for num_threads(config.nthreads) if (config.tape.parallel && n>1)
2162  for(int i=0;i<n;i++){
2163  TMB_TRY {
2164  pfvec[i] = NULL;
2165  pfvec[i] = MakeADGradObject_(data, parameters, report, control, i);
2166  if (config.optimize.instantly) pfvec[i]->optimize();
2167  }
2168  TMB_CATCH {
2169  if (pfvec[i] != NULL) delete pfvec[i];
2170  bad_thread_alloc = excpt.what();
2171  }
2172  }
2173  if (bad_thread_alloc) {
2174  TMB_ERROR_BAD_THREAD_ALLOC;
2175  }
2176  parallelADFun<double>* ppf=new parallelADFun<double>(pfvec);
2177  /* Convert parallel ADFun pointer to R_ExternalPtr */
2178  PROTECT(res=R_MakeExternalPtr((void*) ppf,Rf_install("parallelADFun"),R_NilValue));
2179  // R_RegisterCFinalizer(res,finalizeparallelADFun);
2180 #endif
2181  } else { // Serial mode
2182  /* Actual work: tape creation */
2183  TMB_TRY {
2184  pf = NULL;
2185  pf = MakeADGradObject_(data, parameters, report, control, -1);
2186  if(config.optimize.instantly)pf->optimize();
2187  }
2188  TMB_CATCH {
2189  if (pf != NULL) delete pf;
2190  TMB_ERROR_BAD_ALLOC;
2191  }
2192  /* Convert ADFun pointer to R_ExternalPtr */
2193  PROTECT(res=R_MakeExternalPtr((void*) pf,Rf_install("ADFun"),R_NilValue));
2194  }
2195 
2196  /* Return ptrList */
2197  SEXP ans;
2198  Rf_setAttrib(res,Rf_install("par"),par);
2199  PROTECT(ans=ptrList(res));
2200  UNPROTECT(3);
2201  return ans;
2202  } // MakeADGradObject
2203 #endif
2204 
2205 #ifdef CPPAD_FRAMEWORK
2206 
2207  SEXP MakeADGradObject(SEXP data, SEXP parameters, SEXP report, SEXP control)
2208  {
2209  ADFun<double>* pf = NULL;
2210  /* Some type checking */
2211  if(!Rf_isNewList(data))Rf_error("'data' must be a list");
2212  if(!Rf_isNewList(parameters))Rf_error("'parameters' must be a list");
2213  if(!Rf_isEnvironment(report))Rf_error("'report' must be an environment");
2214 
2215  /* Get the default parameter vector (tiny overhead) */
2216  SEXP par,res=NULL;
2217  objective_function< double > F(data,parameters,report);
2218 #ifdef _OPENMP
2219  int n=F.count_parallel_regions(); // Evaluates user template
2220 #else
2221  F.count_parallel_regions(); // Evaluates user template
2222 #endif
2223  PROTECT(par=F.defaultpar());
2224 
2225  if(_openmp){ // Parallel mode
2226 #ifdef _OPENMP
2227  if(config.trace.parallel)
2228  std::cout << n << " regions found.\n";
2229  if (n==0) n++; // No explicit parallel accumulation
2230  start_parallel(); /* Start threads */
2231  vector< ADFun<double>* > pfvec(n);
2232  const char* bad_thread_alloc = NULL;
2233 #pragma omp parallel for num_threads(config.nthreads) if (config.tape.parallel && n>1)
2234  for(int i=0;i<n;i++){
2235  TMB_TRY {
2236  pfvec[i] = NULL;
2237  pfvec[i] = MakeADGradObject_(data, parameters, report, control, i);
2238  if (config.optimize.instantly) pfvec[i]->optimize();
2239  }
2240  TMB_CATCH {
2241  if (pfvec[i] != NULL) delete pfvec[i];
2242  bad_thread_alloc = excpt.what();
2243  }
2244  }
2245  if (bad_thread_alloc) {
2246  TMB_ERROR_BAD_THREAD_ALLOC;
2247  }
2248  parallelADFun<double>* ppf=new parallelADFun<double>(pfvec);
2249  /* Convert parallel ADFun pointer to R_ExternalPtr */
2250  PROTECT(res=R_MakeExternalPtr((void*) ppf,Rf_install("parallelADFun"),R_NilValue));
2251 #endif
2252  } else { // Serial mode
2253  /* Actual work: tape creation */
2254  TMB_TRY {
2255  pf = NULL;
2256  pf = MakeADGradObject_(data, parameters, report, control, -1);
2257  if(config.optimize.instantly)pf->optimize();
2258  }
2259  TMB_CATCH {
2260  if (pf != NULL) delete pf;
2261  TMB_ERROR_BAD_ALLOC;
2262  }
2263  /* Convert ADFun pointer to R_ExternalPtr */
2264  PROTECT(res=R_MakeExternalPtr((void*) pf,Rf_install("ADFun"),R_NilValue));
2265  }
2266 
2267  /* Return ptrList */
2268  SEXP ans;
2269  Rf_setAttrib(res,Rf_install("par"),par);
2270  PROTECT(ans=ptrList(res));
2271  UNPROTECT(3);
2272  return ans;
2273  } // MakeADGradObject
2274 #endif
2275 }
2276 
2277 
2284 #ifdef TMBAD_FRAMEWORK
2285 sphess_t< TMBad::ADFun< TMBad::ad_aug > > MakeADHessObject2_(SEXP data, SEXP parameters, SEXP report, SEXP control, int parallel_region=-1)
2286 {
2287  typedef TMBad::ad_aug ad;
2288  typedef TMBad::ADFun<ad> adfun;
2289  typedef sphess_t<adfun> sphess;
2290  SEXP gf = getListElement(control, "gf");
2291  adfun* pgf;
2292  bool allocate_new_pgf = ( gf == R_NilValue );
2293  if ( ! allocate_new_pgf ) {
2294  if (parallel_region == -1)
2295  pgf = (adfun*) R_ExternalPtrAddr(gf);
2296  else
2297  pgf = ((parallelADFun<double>*) R_ExternalPtrAddr(gf))->vecpf[parallel_region];
2298  } else {
2299  SEXP control_adgrad = R_NilValue;
2300  pgf = MakeADGradObject_(data, parameters, report, control_adgrad, parallel_region);
2301  }
2302  if (config.optimize.instantly) pgf->optimize();
2303  int n = pgf->Domain();
2304  std::vector<bool> keepcol(n, true);
2305  SEXP skip = getListElement(control, "skip");
2306  for(int i=0; i<LENGTH(skip); i++) {
2307  keepcol[ INTEGER(skip)[i] - 1 ] = false; // skip is R-index !
2308  }
2309  TMBad::SpJacFun_config spjacfun_cfg;
2310  spjacfun_cfg.index_remap = false;
2311  spjacfun_cfg.compress = config.tmbad.sparse_hessian_compress;
2312  TMBad::Sparse<adfun> h = pgf->SpJacFun(keepcol, keepcol, spjacfun_cfg);
2313  if (allocate_new_pgf) delete pgf;
2314  // NB: Lower triangle, column major =
2315  // Transpose of upper triangle, row major
2316  h.subset_inplace( h.row() <= h.col() ); // Upper triangle, row major
2317  h.transpose_inplace(); // Lower triangle, col major
2318  if (config.optimize.instantly) // Optimize now or later ?
2319  h.optimize();
2320  adfun* phf = new adfun( h );
2321  // Convert h.i and h.j to vector<int>
2322  vector<TMBad::Index> h_i(h.i);
2323  vector<TMBad::Index> h_j(h.j);
2324  sphess ans(phf, h_i.cast<int>(), h_j.cast<int>());
2325  return ans;
2326 } // MakeADHessObject2
2327 #endif
2328 
2335 #ifdef CPPAD_FRAMEWORK
2336 sphess MakeADHessObject2_(SEXP data, SEXP parameters, SEXP report, SEXP control, int parallel_region=-1)
2337 {
2338  /* Some type checking */
2339  if(!Rf_isNewList(data))Rf_error("'data' must be a list");
2340  if(!Rf_isNewList(parameters))Rf_error("'parameters' must be a list");
2341  if(!Rf_isEnvironment(report))Rf_error("'report' must be an environment");
2342 
2343  /* Prepare stuff */
2344  objective_function< AD<AD<AD<double> > > > F(data,parameters,report);
2345  F.set_parallel_region(parallel_region);
2346  int n = F.theta.size();
2347  SEXP skip = getListElement(control, "skip");
2348  vector<bool> keepcol(n); // Scatter for fast lookup
2349  for(int i=0; i<n; i++){
2350  keepcol[i]=true;
2351  }
2352  for(int i=0; i<LENGTH(skip); i++){
2353  keepcol[INTEGER(skip)[i]-1]=false; // skip is R-index !
2354  }
2355 #define KEEP_COL(col) (keepcol[col])
2356 #define KEEP_ROW(row,col) ( KEEP_COL(row) && (row>=col) )
2357 
2358  /* Tape 1: Function R^n -> R */
2359  Independent(F.theta);
2360  vector< AD<AD<AD<double> > > > y(1);
2361  y[0] = F.evalUserTemplate();
2362  ADFun<AD<AD<double> > > tape1(F.theta, y);
2363 
2364  /* Tape 2: Gradient R^n -> R^n (and optimize) */
2365  vector<AD<AD<double> > > xx(n);
2366  for(int i=0; i<n; i++) xx[i] = CppAD::Value(F.theta[i]);
2367  vector<AD<AD<double> > > yy(n);
2368  Independent(xx);
2369  yy = tape1.Jacobian(xx);
2370  ADFun<AD<double > > tape2(xx,yy);
2371  if (config.optimize.instantly) tape2.optimize();
2372 
2373  /* Tape 3: Hessian R^n -> R^m (optimize later) */
2374  tape2.my_init(keepcol);
2375  int colisize;
2376  int m=0; // Count number of non-zeros (m)
2377  for(int i=0; i<int(tape2.colpattern.size()); i++){
2378  colisize = tape2.colpattern[i].size();
2379  if(KEEP_COL(i)){
2380  for(int j=0; j<colisize; j++){
2381  m += KEEP_ROW( tape2.colpattern[i][j] , i);
2382  }
2383  }
2384  }
2385  // Allocate index vectors of non-zero pairs
2386  vector<int> rowindex(m);
2387  vector<int> colindex(m);
2388  // Prepare reverse sweep for Hessian columns
2389  vector<AD<double> > u(n);
2390  vector<AD<double> > v(n);
2391  for(int i = 0; i < n; i++) v[i] = 0.0;
2392  vector<AD<double> > xxx(n);
2393  for(int i=0; i<n; i++) xxx[i]=CppAD::Value(CppAD::Value(F.theta[i]));
2394  vector<AD<double> > yyy(m);
2395  CppAD::vector<int>* icol;
2396  // Do sweeps and fill in non-zero index pairs
2397  Independent(xxx);
2398  tape2.Forward(0, xxx);
2399  int k=0;
2400  for(int i = 0; i < n; i++){
2401  if (KEEP_COL(i)) {
2402  tape2.myReverse(1, v, i /*range comp*/, u /*domain*/);
2403  icol = &tape2.colpattern[i];
2404  for(int j=0; j<int(icol->size()); j++){
2405  if(KEEP_ROW( icol->operator[](j), i )){
2406  rowindex[k] = icol->operator[](j);
2407  colindex[k] = i;
2408  yyy[k] = u[icol->operator[](j)];
2409  k++;
2410  }
2411  }
2412  }
2413  }
2414  ADFun< double >* ptape3 = new ADFun< double >;
2415  ptape3->Dependent(xxx,yyy);
2416  sphess ans(ptape3, rowindex, colindex);
2417  return ans;
2418 } // MakeADHessObject2
2419 #endif
2420 
2421 // kasper: Move to new file e.g. "convert.hpp"
2422 template <class ADFunType>
2424 SEXP asSEXP(const sphess_t<ADFunType> &H, const char* tag)
2425 {
2426  SEXP par;
2427  par=R_NilValue;
2428  /* Convert ADFun pointer to R_ExternalPtr */
2429  SEXP res;
2430  PROTECT( res = R_MakeExternalPtr((void*) H.pf, Rf_install(tag), R_NilValue) );
2431  /* Return list */
2432  SEXP ans;
2433  /* Implicitly protected temporaries */
2434  SEXP par_symbol = Rf_install("par");
2435  SEXP i_symbol = Rf_install("i");
2436  SEXP j_symbol = Rf_install("j");
2437  Rf_setAttrib(res, par_symbol, par);
2438  Rf_setAttrib(res, i_symbol, asSEXP(H.i));
2439  Rf_setAttrib(res, j_symbol, asSEXP(H.j));
2440  PROTECT(ans=ptrList(res));
2441  UNPROTECT(2);
2442  return ans;
2443 }
2444 
2445 
2446 extern "C"
2447 {
2448 
2449 #ifdef TMBAD_FRAMEWORK
2450 #ifdef _OPENMP
2451  SEXP MakeADHessObject2(SEXP data, SEXP parameters, SEXP report, SEXP control){
2452  typedef TMBad::ad_aug ad;
2453  typedef TMBad::ADFun<ad> adfun;
2454  typedef sphess_t<adfun> sphess;
2455  if(config.trace.parallel)
2456  std::cout << "Count num parallel regions\n";
2457  objective_function< double > F(data,parameters,report);
2458  SEXP gf = getListElement(control, "gf");
2459  int n = get_num_tapes(gf);
2460  if (n==0) // No tapes? Count!
2461  n = F.count_parallel_regions(); // Evaluates user template
2462  if(config.trace.parallel)
2463  std::cout << n << " regions found.\n";
2464  if (n==0) n++; // No explicit parallel accumulation
2465  start_parallel(); /* FIXME: not needed */
2466  /* parallel test */
2467  const char* bad_thread_alloc = NULL;
2468  vector<sphess*> Hvec(n);
2469 #pragma omp parallel for num_threads(config.nthreads) if (config.tape.parallel && n>1)
2470  for (int i=0; i<n; i++) {
2471  TMB_TRY {
2472  Hvec[i] = NULL;
2473  Hvec[i] = new sphess( MakeADHessObject2_(data, parameters, report, control, i) );
2474  //optimizeTape( Hvec[i]->pf );
2475  }
2476  TMB_CATCH {
2477  if (Hvec[i] != NULL) {
2478  delete Hvec[i]->pf;
2479  delete Hvec[i];
2480  }
2481  bad_thread_alloc = excpt.what();
2482  }
2483  }
2484  if (bad_thread_alloc) {
2485  TMB_ERROR_BAD_THREAD_ALLOC;
2486  }
2487  parallelADFun<double>* tmp=new parallelADFun<double>(Hvec);
2488  return asSEXP(tmp->convert(),"parallelADFun");
2489  } // MakeADHessObject2
2490 #else
2491  SEXP MakeADHessObject2(SEXP data, SEXP parameters, SEXP report, SEXP control){
2492  typedef TMBad::ad_aug ad;
2493  typedef TMBad::ADFun<ad> adfun;
2494  typedef sphess_t<adfun> sphess;
2495  sphess* pH = NULL;
2496  SEXP ans;
2497  TMB_TRY {
2498  pH = new sphess( MakeADHessObject2_(data, parameters, report, control, -1) );
2499  //optimizeTape( pH->pf );
2500  ans = asSEXP(*pH, "ADFun");
2501  }
2502  TMB_CATCH {
2503  if (pH != NULL) {
2504  delete pH->pf;
2505  delete pH;
2506  }
2507  TMB_ERROR_BAD_ALLOC;
2508  }
2509  delete pH;
2510  return ans;
2511  } // MakeADHessObject2
2512 #endif
2513 #endif
2514 
2515 #ifdef CPPAD_FRAMEWORK
2516 #ifdef _OPENMP
2517  SEXP MakeADHessObject2(SEXP data, SEXP parameters, SEXP report, SEXP control){
2518  if(config.trace.parallel)
2519  std::cout << "Count num parallel regions\n";
2520  objective_function< double > F(data,parameters,report);
2521  int n=F.count_parallel_regions();
2522  if(config.trace.parallel)
2523  std::cout << n << " regions found.\n";
2524  if (n==0) n++; // No explicit parallel accumulation
2525 
2526  start_parallel(); /* Start threads */
2527 
2528  /* parallel test */
2529  const char* bad_thread_alloc = NULL;
2530  vector<sphess*> Hvec(n);
2531 #pragma omp parallel for num_threads(config.nthreads) if (config.tape.parallel && n>1)
2532  for (int i=0; i<n; i++) {
2533  TMB_TRY {
2534  Hvec[i] = NULL;
2535  Hvec[i] = new sphess( MakeADHessObject2_(data, parameters, report, control, i) );
2536  optimizeTape( Hvec[i]->pf );
2537  }
2538  TMB_CATCH {
2539  if (Hvec[i] != NULL) {
2540  delete Hvec[i]->pf;
2541  delete Hvec[i];
2542  }
2543  bad_thread_alloc = excpt.what();
2544  }
2545  }
2546  if (bad_thread_alloc) {
2547  TMB_ERROR_BAD_THREAD_ALLOC;
2548  }
2549  parallelADFun<double>* tmp=new parallelADFun<double>(Hvec);
2550  for(int i=0; i<n; i++) {
2551  delete Hvec[i];
2552  }
2553  // Adds finalizer for 'tmp' !!! (so, don't delete tmp...)
2554  SEXP ans = asSEXP(tmp->convert(),"parallelADFun");
2555  return ans;
2556  } // MakeADHessObject2
2557 #else
2558  SEXP MakeADHessObject2(SEXP data, SEXP parameters, SEXP report, SEXP control){
2559  sphess* pH = NULL;
2560  SEXP ans;
2561  TMB_TRY {
2562  pH = new sphess( MakeADHessObject2_(data, parameters, report, control, -1) );
2563  optimizeTape( pH->pf );
2564  ans = asSEXP(*pH, "ADFun");
2565  }
2566  TMB_CATCH {
2567  if (pH != NULL) {
2568  delete pH->pf;
2569  delete pH;
2570  }
2571  TMB_ERROR_BAD_ALLOC;
2572  }
2573  delete pH;
2574  return ans;
2575  } // MakeADHessObject2
2576 #endif
2577 #endif
2578 }
2579 
2580 extern "C"
2581 {
2582 
2583 #ifdef TMBAD_FRAMEWORK
2584  SEXP usingAtomics(){
2585  SEXP ans;
2586  PROTECT(ans = Rf_allocVector(INTSXP,1));
2587  INTEGER(ans)[0] = 1; // TMBAD doesn't benefit from knowing if 'false'
2588  UNPROTECT(1);
2589  return ans;
2590  }
2591 #endif
2592 
2593 #ifdef CPPAD_FRAMEWORK
2594  SEXP usingAtomics(){
2595  SEXP ans;
2596  PROTECT(ans = Rf_allocVector(INTSXP,1));
2597  INTEGER(ans)[0] = atomic::atomicFunctionGenerated;
2598  UNPROTECT(1);
2599  return ans;
2600  }
2601 #endif
2602 
2603  SEXP getFramework() {
2604  // ans
2605  SEXP ans;
2606 #ifdef TMBAD_FRAMEWORK
2607  ans = Rf_mkString("TMBad");
2608 #elif defined(CPPAD_FRAMEWORK)
2609  ans = Rf_mkString("CppAD");
2610 #else
2611  ans = Rf_mkString("Unknown");
2612 #endif
2613  PROTECT(ans);
2614  // openmp_sym (Not strictly necessary to PROTECT)
2615  SEXP openmp_sym = Rf_install("openmp");
2616  PROTECT(openmp_sym);
2617  // openmp_res
2618  SEXP openmp_res;
2619 #ifdef _OPENMP
2620  openmp_res = Rf_ScalarLogical(1);
2621 #else
2622  openmp_res = Rf_ScalarLogical(0);
2623 #endif
2624  PROTECT(openmp_res);
2625  // Assemble
2626  Rf_setAttrib(ans, openmp_sym, openmp_res);
2627  UNPROTECT(2);
2628  // Add more stuff
2629 #ifdef TMBAD_FRAMEWORK
2630  SEXP index_size_sym = Rf_install("sizeof(Index)");
2631  PROTECT(index_size_sym);
2632  SEXP index_size = Rf_ScalarInteger(sizeof(TMBad::Index));
2633  PROTECT(index_size);
2634  Rf_setAttrib(ans, index_size_sym, index_size);
2635  UNPROTECT(2);
2636 #endif
2637  UNPROTECT(1); // ans
2638  return ans;
2639  }
2640 }
2641 
2642 extern "C"
2643 {
2644  void tmb_forward(SEXP f, const Eigen::VectorXd &x, Eigen::VectorXd &y) {
2645 #ifdef CPPAD_FRAMEWORK
2646  SEXP tag=R_ExternalPtrTag(f);
2647  if(tag == Rf_install("ADFun")) {
2648  ADFun<double>* pf;
2649  pf = (ADFun<double>*) R_ExternalPtrAddr(f);
2650  y = pf->Forward(0, x);
2651  } else
2652  if(tag == Rf_install("parallelADFun")) {
2653  parallelADFun<double>* pf;
2654  pf = (parallelADFun<double>*) R_ExternalPtrAddr(f);
2655  y = pf->Forward(0, x);
2656  } else
2657  Rf_error("Unknown function pointer");
2658 #endif
2659 #ifdef TMBAD_FRAMEWORK
2660  typedef TMBad::ad_aug ad;
2661  typedef TMBad::ADFun<ad> adfun;
2662  SEXP tag=R_ExternalPtrTag(f);
2663  if(tag == Rf_install("ADFun")) {
2664  adfun* pf = (adfun*) R_ExternalPtrAddr(f);
2665  y = pf->forward(x);
2666  } else
2667  if(tag == Rf_install("parallelADFun")) {
2668  parallelADFun<double>* pf;
2669  pf = (parallelADFun<double>*) R_ExternalPtrAddr(f);
2670  y = pf->forward(x);
2671  } else
2672  Rf_error("Unknown function pointer");
2673 #endif
2674  }
2675  void tmb_reverse(SEXP f, const Eigen::VectorXd &v, Eigen::VectorXd &y) {
2676 #ifdef CPPAD_FRAMEWORK
2677  SEXP tag=R_ExternalPtrTag(f);
2678  if(tag == Rf_install("ADFun")) {
2679  ADFun<double>* pf;
2680  pf = (ADFun<double>*) R_ExternalPtrAddr(f);
2681  y = pf->Reverse(1, v);
2682  } else
2683  if(tag == Rf_install("parallelADFun")) {
2684  parallelADFun<double>* pf;
2685  pf = (parallelADFun<double>*) R_ExternalPtrAddr(f);
2686  y = pf->Reverse(1, v);
2687  } else
2688  Rf_error("Unknown function pointer");
2689 #endif
2690 #ifdef TMBAD_FRAMEWORK
2691  typedef TMBad::ad_aug ad;
2692  typedef TMBad::ADFun<ad> adfun;
2693  SEXP tag=R_ExternalPtrTag(f);
2694  if(tag == Rf_install("ADFun")) {
2695  adfun* pf = (adfun*) R_ExternalPtrAddr(f);
2696  y = pf->reverse(v);
2697  } else
2698  if(tag == Rf_install("parallelADFun")) {
2699  parallelADFun<double>* pf;
2700  pf = (parallelADFun<double>*) R_ExternalPtrAddr(f);
2701  y = pf->reverse(v);
2702  } else
2703  Rf_error("Unknown function pointer");
2704 #endif
2705  }
2706 }
2707 
2708 #endif /* #ifndef WITH_LIBTMB */
2709 
2710 
2711 
2712 
2713 
2714 #ifdef WITH_LIBTMB
2715 
2716 template class objective_function<double>;
2717 #ifdef CPPAD_FRAMEWORK
2718 template class objective_function<AD<double> >;
2719 template class objective_function<AD<AD<double> > >;
2720 template class objective_function<AD<AD<AD<double> > > >;
2721 #endif
2722 #ifdef TMBAD_FRAMEWORK
2723 template class objective_function<TMBad::ad_aug>;
2724 #endif
2725 
2726 extern "C"
2727 {
2728  SEXP MakeADFunObject(SEXP data, SEXP parameters, SEXP report, SEXP control);
2729  SEXP InfoADFunObject(SEXP f);
2730  SEXP tmbad_print(SEXP f, SEXP control);
2731  SEXP optimizeADFunObject(SEXP f);
2732  SEXP EvalADFunObject(SEXP f, SEXP theta, SEXP control);
2733  SEXP MakeDoubleFunObject(SEXP data, SEXP parameters, SEXP report, SEXP control);
2734  SEXP EvalDoubleFunObject(SEXP f, SEXP theta, SEXP control);
2735  SEXP getParameterOrder(SEXP data, SEXP parameters, SEXP report, SEXP control);
2736  SEXP MakeADGradObject(SEXP data, SEXP parameters, SEXP report, SEXP control);
2737  SEXP MakeADHessObject2(SEXP data, SEXP parameters, SEXP report, SEXP control);
2738  SEXP usingAtomics();
2739  SEXP getFramework();
2740  SEXP getSetGlobalPtr(SEXP ptr);
2741  SEXP TransformADFunObject(SEXP f, SEXP control);
2742  void tmb_forward(SEXP f, const Eigen::VectorXd &x, Eigen::VectorXd &y);
2743  void tmb_reverse(SEXP f, const Eigen::VectorXd &v, Eigen::VectorXd &y);
2744 }
2745 
2746 #endif /* #ifdef WITH_LIBTMB */
2747 
2748 /* Register native routines (see 'Writing R extensions'). Especially
2749  relevant to avoid symbol lookup overhead for those routines that
2750  are called many times e.g. EvalADFunObject. */
2751 extern "C"{
2752  /* Some string utilities */
2753 #define xstringify(s) stringify(s)
2754 #define stringify(s) #s
2755  /* May be used as part of custom calldef tables */
2756 #define TMB_CALLDEFS \
2757  {"MakeADFunObject", (DL_FUNC) &MakeADFunObject, 4}, \
2758  {"FreeADFunObject", (DL_FUNC) &FreeADFunObject, 1}, \
2759  {"InfoADFunObject", (DL_FUNC) &InfoADFunObject, 1}, \
2760  {"tmbad_print", (DL_FUNC) &tmbad_print, 2}, \
2761  {"EvalADFunObject", (DL_FUNC) &EvalADFunObject, 3}, \
2762  {"TransformADFunObject",(DL_FUNC) &TransformADFunObject,2}, \
2763  {"MakeDoubleFunObject", (DL_FUNC) &MakeDoubleFunObject, 4}, \
2764  {"EvalDoubleFunObject", (DL_FUNC) &EvalDoubleFunObject, 3}, \
2765  {"getParameterOrder", (DL_FUNC) &getParameterOrder, 4}, \
2766  {"MakeADGradObject", (DL_FUNC) &MakeADGradObject, 4}, \
2767  {"MakeADHessObject2", (DL_FUNC) &MakeADHessObject2, 4}, \
2768  {"usingAtomics", (DL_FUNC) &usingAtomics, 0}, \
2769  {"getFramework", (DL_FUNC) &getFramework, 0}, \
2770  {"getSetGlobalPtr", (DL_FUNC) &getSetGlobalPtr, 1}, \
2771  {"TMBconfig", (DL_FUNC) &TMBconfig, 2}
2772  /* May be used as part of custom R_init function
2773  C-callable routines (PACKAGE is 'const char*') */
2774 #define TMB_CCALLABLES(PACKAGE) \
2775  R_RegisterCCallable(PACKAGE, "tmb_forward", (DL_FUNC) &tmb_forward); \
2776  R_RegisterCCallable(PACKAGE, "tmb_reverse", (DL_FUNC) &tmb_reverse);
2777  /* Default (optional) calldef table. */
2778 #ifdef TMB_LIB_INIT
2779 #include <R_ext/Rdynload.h>
2780 static R_CallMethodDef CallEntries[] = {
2781  TMB_CALLDEFS
2782  ,
2783  /* User's R_unload_lib function must also be registered (because we
2784  disable dynamic lookup - see below). The unload function is
2785  mainly useful while developing models in order to clean up
2786  external pointers without restarting R. Should not be used by TMB
2787  dependent packages. */
2788 #ifdef LIB_UNLOAD
2789  {xstringify(LIB_UNLOAD), (DL_FUNC) &LIB_UNLOAD, 1},
2790 #endif
2791  /* End of table */
2792  {NULL, NULL, 0}
2793 };
2794 void TMB_LIB_INIT(DllInfo *dll){
2795  R_registerRoutines(dll, NULL, CallEntries, NULL, NULL);
2796  R_useDynamicSymbols(dll, (Rboolean)FALSE);
2797  // Example: TMB_LIB_INIT = R_init_mypkg
2798  // ^
2799  // +-------+
2800  // ^
2801  TMB_CCALLABLES(&(xstringify(TMB_LIB_INIT)[7]));
2802 }
2803 #endif /* #ifdef TMB_LIB_INIT */
2804 #undef xstringify
2805 #undef stringify
2806 }
VT cdf_upper
Logarithm of upper CDF
Definition: tmb_core.hpp:445
+Go to the documentation of this file.
1 // Copyright (C) 2013-2015 Kasper Kristensen
2 // License: GPL-2
3 
8 /*
9  Call to external C++ code can potentially result in exeptions that
10  will crash R. However, we do not want R to crash on failed memory
11  allocations. Therefore:
12 
13  * All interface functions (those called with .Call from R) must have
14  TMB_TRY wrapped around CppAD/Eigen code that allocates memory.
15 
16  * Special attention must be payed to parallel code, as each thread
17  is responsible for catching its own exceptions.
18 */
19 
20 #ifndef TMB_TRY
21 #define TMB_TRY try
22 #endif
23 // By default we only accept 'bad_alloc' as a valid exception. Everything else => debugger !
24 // Behaviour can be changed by re-defining this macro.
25 #ifndef TMB_CATCH
26 #define TMB_CATCH catch(std::bad_alloc& excpt)
27 #endif
28 // Inside the TMB_CATCH comes 'cleanup code' followed by this error
29 // call (allowed to depend on the exception 'excpt')
30 // Error message can be changed by re-defining this macro.
31 #ifndef TMB_ERROR_BAD_ALLOC
32 #define TMB_ERROR_BAD_ALLOC \
33 Rf_error("Caught exception '%s' in function '%s'\n", \
34  excpt.what(), \
35  __FUNCTION__)
36 #endif
37 // Error call comes outside TMB_CATCH in OpenMP case (so *cannot*
38 // depend on exception e.g. 'excpt')
39 // Error message can be changed by re-defining this macro.
40 #ifndef TMB_ERROR_BAD_THREAD_ALLOC
41 #define TMB_ERROR_BAD_THREAD_ALLOC \
42 Rf_error("Caught exception '%s' in function '%s'\n", \
43  bad_thread_alloc, \
44  __FUNCTION__)
45 #endif
46 
47 /* Memory manager:
48  Count the number of external pointers alive.
49  When total number is zero it is safe to dyn.unload
50  the library.
51 */
52 #include <set>
53 extern "C" void finalizeDoubleFun(SEXP x);
54 extern "C" void finalizeADFun(SEXP x);
55 extern "C" void finalizeparallelADFun(SEXP x);
56 extern "C" SEXP FreeADFunObject(SEXP f) CSKIP ({
57  SEXP tag = R_ExternalPtrTag(f);
58  if (tag == Rf_install("DoubleFun")) {
59  finalizeDoubleFun(f);
60  }
61  else if (tag == Rf_install("ADFun")) {
62  finalizeADFun(f);
63  }
64  else if (tag == Rf_install("parallelADFun")) {
65  finalizeparallelADFun(f);
66  }
67  else {
68  Rf_error("Unknown external ptr type");
69  }
70  R_ClearExternalPtr(f); // Set pointer to 'nil'
71  return R_NilValue;
72 })
74 struct memory_manager_struct {
75  int counter;
77  std::set<SEXP> alive;
79  void RegisterCFinalizer(SEXP list);
81  void CallCFinalizer(SEXP x);
83  void clear();
84  memory_manager_struct();
85 };
86 #ifndef WITH_LIBTMB
87 void memory_manager_struct::RegisterCFinalizer(SEXP x) {
88  counter++;
89  alive.insert(x);
90 }
91 void memory_manager_struct::CallCFinalizer(SEXP x){
92  counter--;
93  alive.erase(x);
94 }
95 void memory_manager_struct::clear(){
96  std::set<SEXP>::iterator it;
97  while (alive.size() > 0) {
98  FreeADFunObject(*alive.begin());
99  }
100 }
101 memory_manager_struct::memory_manager_struct(){
102  counter=0;
103 }
104 #endif
105 TMB_EXTERN memory_manager_struct memory_manager;
106 
117 #ifdef WITH_LIBTMB
118 SEXP ptrList(SEXP x);
119 #else
120 SEXP ptrList(SEXP x)
121 {
122  SEXP ans,names;
123  PROTECT(ans=Rf_allocVector(VECSXP,1));
124  PROTECT(names=Rf_allocVector(STRSXP,1));
125  SET_VECTOR_ELT(ans,0,x);
126  SET_STRING_ELT(names,0,Rf_mkChar("ptr"));
127  Rf_setAttrib(ans,R_NamesSymbol,names);
128  memory_manager.RegisterCFinalizer(x);
129  UNPROTECT(2);
130  return ans;
131 }
132 #endif
133 
134 extern "C"{
135 #ifdef LIB_UNLOAD
136 #include <R_ext/Rdynload.h>
137  void LIB_UNLOAD(DllInfo *dll)
138  {
139  if(memory_manager.counter>0)Rprintf("Warning: %d external pointers will be removed\n",memory_manager.counter);
140  memory_manager.clear();
141  for(int i=0;i<1000;i++){ // 122 seems to be sufficient.
142  if(memory_manager.counter>0){
143  R_gc();
144  R_RunExitFinalizers();
145  } else break;
146  }
147  if(memory_manager.counter>0)Rf_error("Failed to clean. Please manually clean up before unloading\n");
148  }
149 #endif
150 }
151 
152 #ifdef _OPENMP
153 TMB_EXTERN bool _openmp CSKIP( =true; )
154 #else
155 TMB_EXTERN bool _openmp CSKIP( =false; )
156 #endif
157 
159 template<class ADFunPointer>
160 void optimizeTape(ADFunPointer pf){
161  if(!config.optimize.instantly){
162  /* Drop out */
163  return;
164  }
165  if (!config.optimize.parallel){
166 #ifdef _OPENMP
167 #pragma omp critical
168 #endif
169  { /* Avoid multiple tape optimizations at the same time (to reduce memory) */
170  if(config.trace.optimize)std::cout << "Optimizing tape... ";
171  pf->optimize();
172  if(config.trace.optimize)std::cout << "Done\n";
173  }
174  }
175  else
176  { /* Allow multiple tape optimizations at the same time */
177  if(config.trace.optimize)std::cout << "Optimizing tape... ";
178  pf->optimize();
179  if(config.trace.optimize)std::cout << "Done\n";
180  }
181 }
182 
183 /* Macros to obtain data and parameters from R */
184 
208 #define TMB_OBJECTIVE_PTR \
209 this
210 
213 #define PARAMETER_MATRIX(name) \
214 tmbutils::matrix<Type> name(TMB_OBJECTIVE_PTR -> fillShape( \
215 asMatrix<Type> ( TMB_OBJECTIVE_PTR -> getShape( #name, &Rf_isMatrix) ), \
216 #name) );
217 
220 #define PARAMETER_VECTOR(name) \
221 vector<Type> name(TMB_OBJECTIVE_PTR -> fillShape( \
222 asVector<Type>(TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isReal )), \
223 #name));
224 
227 #define PARAMETER(name) \
228 Type name(TMB_OBJECTIVE_PTR -> fillShape( \
229 asVector<Type>(TMB_OBJECTIVE_PTR -> getShape(#name,&isNumericScalar)), \
230 #name)[0]);
231 
236 #define DATA_VECTOR(name) \
237 vector<Type> name; \
238 if (!Rf_isNull(getListElement(TMB_OBJECTIVE_PTR -> parameters,#name))){ \
239  name = TMB_OBJECTIVE_PTR -> fillShape(asVector<Type>( \
240  TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isReal )), #name); \
241 } else { \
242  name = asVector<Type>(getListElement( \
243  TMB_OBJECTIVE_PTR -> data,#name,&Rf_isReal )); \
244 }
245 
248 #define DATA_MATRIX(name) \
249 matrix<Type> name(asMatrix<Type>( \
250 getListElement(TMB_OBJECTIVE_PTR -> data, #name, &Rf_isMatrix)));
251 
254 #define DATA_SCALAR(name) \
255 Type name(asVector<Type>(getListElement(TMB_OBJECTIVE_PTR -> data, \
256 #name,&isNumericScalar))[0]);
257 
260 #define DATA_INTEGER(name) int name(CppAD::Integer(asVector<Type>( \
261 getListElement(TMB_OBJECTIVE_PTR -> data, \
262 #name, &isNumericScalar))[0]));
263 
281 #define DATA_FACTOR(name) vector<int> name(asVector<int>( \
282 getListElement(TMB_OBJECTIVE_PTR -> data, #name, &Rf_isReal )));
283 
287 #define DATA_IVECTOR(name) vector<int> name(asVector<int>( \
288 getListElement(TMB_OBJECTIVE_PTR -> data, #name, &Rf_isReal )));
289 
292 #define NLEVELS(name) \
293 LENGTH(Rf_getAttrib(getListElement(TMB_OBJECTIVE_PTR -> data, #name), \
294 Rf_install("levels")))
295 
299 #define DATA_SPARSE_MATRIX(name) \
300 Eigen::SparseMatrix<Type> name(tmbutils::asSparseMatrix<Type>( \
301 getListElement(TMB_OBJECTIVE_PTR -> data, \
302 #name, &isValidSparseMatrix)));
303 
304 // NOTE: REPORT() constructs new SEXP so never report in parallel!
313 #define REPORT(name) \
314 if( isDouble<Type>::value && \
315  TMB_OBJECTIVE_PTR -> current_parallel_region<0 ) \
316 { \
317  SEXP _TMB_temporary_sexp_; \
318  PROTECT( _TMB_temporary_sexp_ = asSEXP(name) ); \
319  Rf_defineVar(Rf_install(#name), \
320  _TMB_temporary_sexp_, TMB_OBJECTIVE_PTR -> report); \
321  UNPROTECT(1); \
322 }
323 
329 #define SIMULATE \
330 if(isDouble<Type>::value && TMB_OBJECTIVE_PTR -> do_simulate)
331 
342 #define ADREPORT(name) \
343 TMB_OBJECTIVE_PTR -> reportvector.push(name, #name);
344 
345 #define PARALLEL_REGION \
346 if( TMB_OBJECTIVE_PTR -> parallel_region() )
347 
352 #define DATA_ARRAY(name) \
353 tmbutils::array<Type> name; \
354 if (!Rf_isNull(getListElement(TMB_OBJECTIVE_PTR -> parameters,#name))){ \
355  name = TMB_OBJECTIVE_PTR -> fillShape(tmbutils::asArray<Type>( \
356  TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isArray)), #name); \
357 } else { \
358  name = tmbutils::asArray<Type>(getListElement( \
359  TMB_OBJECTIVE_PTR -> data, #name, &Rf_isArray)); \
360 }
361 
364 #define PARAMETER_ARRAY(name) \
365 tmbutils::array<Type> name(TMB_OBJECTIVE_PTR -> fillShape( \
366 tmbutils::asArray<Type>(TMB_OBJECTIVE_PTR -> getShape( \
367 #name, &Rf_isArray)), #name));
368 
371 #define DATA_IMATRIX(name) \
372 matrix<int> name(asMatrix<int>( \
373 getListElement(TMB_OBJECTIVE_PTR -> data,#name, &Rf_isMatrix)));
374 
377 #define DATA_IARRAY(name) \
378 tmbutils::array<int> name(tmbutils::asArray<int>( \
379 getListElement(TMB_OBJECTIVE_PTR -> data, #name, &Rf_isArray)));
380 
394 #define DATA_STRING(name) \
395 std::string name = \
396  CHAR(STRING_ELT(getListElement(TMB_OBJECTIVE_PTR -> data, #name), 0));
397 
433 #define DATA_STRUCT(name, struct) \
434 struct<Type> name(getListElement(TMB_OBJECTIVE_PTR -> data, #name));
435 
440 template<class VT, class Type = typename VT::Scalar>
441 struct data_indicator : VT{
449  bool osa_flag;
451  data_indicator() { osa_flag = false; }
456  data_indicator(VT obs, bool init_one = false){
457  VT::operator=(obs);
458  if (init_one) VT::fill(Type(1.0));
459  cdf_lower = obs; cdf_lower.setZero();
460  cdf_upper = obs; cdf_upper.setZero();
461  osa_flag = false;
462  }
464  void fill(vector<Type> p, SEXP ord_){
465  int n = (*this).size();
466  if(p.size() >= n ) VT::operator=(p.segment(0, n));
467  if(p.size() >= 2*n) cdf_lower = p.segment(n, n);
468  if(p.size() >= 3*n) cdf_upper = p.segment(2 * n, n);
469  if(!Rf_isNull(ord_)) {
470  this->ord = asVector<int>(ord_);
471  }
472  for (int i=0; i<p.size(); i++) {
473  osa_flag |= CppAD::Variable(p[i]);
474  }
475  }
478  data_indicator segment(int pos, int n) {
479  data_indicator ans ( VT::segment(pos, n) );
480  ans.cdf_lower = cdf_lower.segment(pos, n);
481  ans.cdf_upper = cdf_upper.segment(pos, n);
482  if (ord.size() != 0) {
483  ans.ord = ord.segment(pos, n);
484  }
485  ans.osa_flag = osa_flag;
486  return ans;
487  }
490  int n = this->size();
491  vector<int> ans(n);
492  if (ord.size() == 0) {
493  for (int i=0; i<n; i++)
494  ans(i) = i;
495  } else {
496  if (ord.size() != n) Rf_error("Unexpected 'ord.size() != n'");
497  std::vector<std::pair<int, int> > y(n);
498  for (int i=0; i<n; i++) {
499  y[i].first = ord[i];
500  y[i].second = i;
501  }
502  std::sort(y.begin(), y.end()); // sort inplace
503  for (int i=0; i<n; i++) {
504  ans[i] = y[i].second;
505  }
506  }
507  return ans;
508  }
510  bool osa_active() { return osa_flag; }
511 };
512 
518 #define DATA_ARRAY_INDICATOR(name, obs) \
519 data_indicator<tmbutils::array<Type> > name(obs, true); \
520 if (!Rf_isNull(getListElement(TMB_OBJECTIVE_PTR -> parameters,#name))){ \
521  name.fill( TMB_OBJECTIVE_PTR -> fillShape(asVector<Type>( \
522  TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isReal )), \
523  #name), \
524  Rf_getAttrib( \
525  TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isReal ), \
526  Rf_install("ord")) ); \
527 }
528 
534 #define DATA_VECTOR_INDICATOR(name, obs) \
535 data_indicator<tmbutils::vector<Type> > name(obs, true); \
536 if (!Rf_isNull(getListElement(TMB_OBJECTIVE_PTR -> parameters,#name))){ \
537  name.fill( TMB_OBJECTIVE_PTR -> fillShape(asVector<Type>( \
538  TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isReal )), \
539  #name), \
540  Rf_getAttrib( \
541  TMB_OBJECTIVE_PTR -> getShape(#name, &Rf_isReal ), \
542  Rf_install("ord")) ); \
543 }
544 
545 // kasper: Not sure used anywhere
549 template<class Type>
550 matrix<int> HessianSparsityPattern(ADFun<Type> *pf){
551  int n=pf->Domain();
552  vector<bool> Px(n * n);
553  for(int i = 0; i < n; i++)
554  {
555  for(int j = 0; j < n; j++)
556  Px[ i * n + j ] = false;
557  Px[ i * n + i ] = true;
558  }
559  pf->ForSparseJac(n, Px);
560  vector<bool> Py(1); Py[0]=true;
561  vector<int> tmp = (pf->RevSparseHes(n,Py)).template cast<int>();
562  return asMatrix(tmp, n, n);
563 }
564 
566 void Independent(vector<double> x)CSKIP({})
567 
569 template <class Type>
570 struct report_stack{
571  std::vector<const char*> names;
572  std::vector<vector<int> > namedim;
573  std::vector<Type> result;
574  void clear(){
575  names.resize(0);
576  namedim.resize(0);
577  result.resize(0);
578  }
579  // Get dimension of various object types
580  vector<int> getDim(const matrix<Type> &x) {
581  vector<int> dim(2);
582  dim << x.rows(), x.cols();
583  return dim;
584  }
585  vector<int> getDim(const tmbutils::array<Type> &x) {
586  return x.dim;
587  }
588  template<class Other> // i.e. vector or expression
589  vector<int> getDim(const Other &x) {
590  vector<int> dim(1);
591  dim << x.size();
592  return dim;
593  }
594  // push vector, matrix or array
595  template<class Vector_Matrix_Or_Array>
596  void push(Vector_Matrix_Or_Array x, const char* name) {
597  names.push_back(name);
598  namedim.push_back(getDim(x));
599  Eigen::Array<Type, Eigen::Dynamic, Eigen::Dynamic> xa(x);
600  result.insert(result.end(), xa.data(), xa.data() + x.size());
601  }
602  // push scalar (convert to vector case)
603  void push(Type x, const char* name){
604  vector<Type> xvec(1);
605  xvec[0] = x;
606  push(xvec, name);
607  }
608  // Eval: cast to vector<Type>
609  vector<Type> operator()() {
610  return result;
611  }
612  /* Get names (with replicates) to R */
613  SEXP reportnames()
614  {
615  int n = result.size();
616  SEXP nam;
617  PROTECT( nam = Rf_allocVector(STRSXP, n) );
618  int k = 0;
619  for(size_t i = 0; i < names.size(); i++) {
620  int namelength = namedim[i].prod();
621  for(int j = 0; j < namelength; j++) {
622  SET_STRING_ELT(nam, k, Rf_mkChar(names[i]) );
623  k++;
624  }
625  }
626  UNPROTECT(1);
627  return nam;
628  }
629  /* Get AD reported object dims */
630  SEXP reportdims() {
631  SEXP ans, nam;
632  typedef vector<vector<int> > VVI;
633  PROTECT( ans = asSEXP(VVI(namedim)) );
634  PROTECT( nam = Rf_allocVector(STRSXP, names.size()) );
635  for(size_t i = 0; i < names.size(); i++) {
636  SET_STRING_ELT(nam, i, Rf_mkChar(names[i]));
637  }
638  Rf_setAttrib(ans, R_NamesSymbol, nam);
639  UNPROTECT(2);
640  return ans;
641  }
642  EIGEN_DEFAULT_DENSE_INDEX_TYPE size(){return result.size();}
643 }; // report_stack
644 
645 extern "C" {
646  void GetRNGstate(void);
647  void PutRNGstate(void);
648 }
649 
651 template <class Type>
652 class objective_function
653 {
654 // private:
655 public:
656  SEXP data;
657  SEXP parameters;
658  SEXP report;
659 
660  int index;
661  vector<Type> theta;
662  vector<const char*> thetanames;
663  report_stack<Type> reportvector;
664  bool reversefill; // used to find the parameter order in user template (not anymore - use pushParname instead)
665  vector<const char*> parnames;
668  void pushParname(const char* x){
669  parnames.conservativeResize(parnames.size()+1);
670  parnames[parnames.size()-1]=x;
671  }
672 
673  /* ================== For parallel Hessian computation
674  Need three different parallel evaluation modes:
675  (1) *Parallel mode* where a parallel region is evaluated iff
676  current_parallel_region == selected_parallel_region
677  (2) *Serial mode* where all parallel region tests are evaluated
678  to TRUE so that "PARALLEL_REGION" tests are effectively removed.
679  A negative value of "current_parallel_region" or "selected_parallel_region"
680  is used to select this mode (the default).
681  (3) *Count region mode* where statements inside "PARALLEL_REGION{...}"
682  are *ignored* and "current_parallel_region" is increased by one each
683  time a parallel region is visited.
684  NOTE: The macro "PARALLEL_REGION" is supposed to be defined as
685  #define PARALLEL_REGION if(this->parallel_region())
686  where the function "parallel_region" does the book keeping.
687  */
688  bool parallel_ignore_statements;
689  int current_parallel_region; /* Identifier of a code-fragment of user template */
690  int selected_parallel_region; /* Consider _this_ code-fragment */
691  int max_parallel_regions; /* Max number of parallel region identifiers,
692  e.g. max_parallel_regions=config.nthreads;
693  probably best in most cases. */
694  bool parallel_region(){ /* Is this the selected parallel region ? */
695  bool ans;
696  if(config.autopar || current_parallel_region<0 || selected_parallel_region<0)return true; /* Serial mode */
697  ans = (selected_parallel_region==current_parallel_region) && (!parallel_ignore_statements);
698  current_parallel_region++;
699  if(max_parallel_regions>0)current_parallel_region=current_parallel_region % max_parallel_regions;
700  return ans;
701  }
702  /* Note: Some other functions rely on "count_parallel_regions" to run through the users code (!) */
703  int count_parallel_regions(){
704  current_parallel_region=0; /* reset counter */
705  selected_parallel_region=0;
706  parallel_ignore_statements=true; /* Do not evaluate stuff inside PARALLEL_REGION{...} */
707  this->operator()(); /* Run through users code */
708  if (config.autopar) return 0;
709  if(max_parallel_regions>0)return max_parallel_regions;
710  else
711  return current_parallel_region;
712  }
713  void set_parallel_region(int i){ /* Select parallel region (from within openmp loop) */
714  current_parallel_region=0;
715  selected_parallel_region=i;
716  parallel_ignore_statements=false;
717  }
718 
719  bool do_simulate;
720  void set_simulate(bool do_simulate_) {
721  do_simulate = do_simulate_;
722  }
723 
724  /* data_ and parameters_ are R-lists containing R-vectors or R-matrices.
725  report_ is an R-environment.
726  The elements of the vector "unlist(parameters_)" are filled into "theta"
727  which contains the default parameter-values. This happens during the
728  *construction* of the objective_function object.
729  The user defined template "objective_function::operator()" is called
730  from "MakeADFunObject" which tapes the operations and creates the final
731  ADFun-object.
732  */
734  objective_function(SEXP data, SEXP parameters, SEXP report) :
735  data(data), parameters(parameters), report(report), index(0)
736  {
737  /* Fill theta with the default parameters.
738  Pass R-matrices column major. */
739  theta.resize(nparms(parameters));
740  int length_parlist = Rf_length(parameters);
741  for(int i = 0, counter = 0; i < length_parlist; i++) {
742  // x = parameters[[i]]
743  SEXP x = VECTOR_ELT(parameters, i);
744  int nx = Rf_length(x);
745  double* px = REAL(x);
746  for(int j = 0; j < nx; j++) {
747  theta[counter++] = Type( px[j] );
748  }
749  }
750  thetanames.resize(theta.size());
751  for(int i=0;i<thetanames.size();i++)thetanames[i]="";
752  current_parallel_region=-1;
753  selected_parallel_region=-1;
754  max_parallel_regions=-1;
755 #ifdef _OPENMP
756  max_parallel_regions = config.nthreads;
757 #endif
758  reversefill=false;
759  do_simulate = false;
760  GetRNGstate(); /* Read random seed from R. Note: by default we do
761  not write the seed back to R *after*
762  simulation. This ensures that multiple tapes for
763  one model object get the same seed. When in
764  simulation mode (enabled when calling
765  obj$simulate() from R) we *do* write the seed
766  back after simulation in order to get varying
767  replicates. */
768  }
769 
771  void sync_data() {
772  SEXP env = ENCLOS(this->report);
773  this->data = Rf_findVar(Rf_install("data"), env);
774  }
775 
777  SEXP defaultpar()
778  {
779  int n=theta.size();
780  SEXP res;
781  SEXP nam;
782  PROTECT(res=Rf_allocVector(REALSXP,n));
783  PROTECT(nam=Rf_allocVector(STRSXP,n));
784  for(int i=0;i<n;i++){
785  //REAL(res)[i]=CppAD::Value(theta[i]);
786  REAL(res)[i]=value(theta[i]);
787  SET_STRING_ELT(nam,i,Rf_mkChar(thetanames[i]));
788  }
789  Rf_setAttrib(res,R_NamesSymbol,nam);
790  UNPROTECT(2);
791  return res;
792  }
793 
795  SEXP parNames()
796  {
797  int n=parnames.size();
798  SEXP nam;
799  PROTECT(nam=Rf_allocVector(STRSXP,n));
800  for(int i=0;i<n;i++){
801  SET_STRING_ELT(nam,i,Rf_mkChar(parnames[i]));
802  }
803  UNPROTECT(1);
804  return nam;
805  }
806 
807  /* FIXME: "Value" should be "var2par" I guess
808  kasper: Why not use asDouble defined previously? */
818  double value(double x){return x;}
819  double value(AD<double> x){return CppAD::Value(x);}
820  double value(AD<AD<double> > x){return CppAD::Value(CppAD::Value(x));}
821  double value(AD<AD<AD<double> > > x){return CppAD::Value(CppAD::Value(CppAD::Value(x)));}
822 #ifdef TMBAD_FRAMEWORK
823  double value(TMBad::ad_aug x){return x.Value();}
824 #endif
825 
828  int nparms(SEXP obj)
829  {
830  int count=0;
831  for(int i=0;i<Rf_length(obj);i++){
832  if(!Rf_isReal(VECTOR_ELT(obj,i)))Rf_error("PARAMETER COMPONENT NOT A VECTOR!");
833  count+=Rf_length(VECTOR_ELT(obj,i));
834  }
835  return count;
836  }
837 
838  /* The "fill functions" are all used to populate parameter vectors,
839  arrays, matrices etc with the values of the parameter vector theta. */
840  void fill(vector<Type> &x, const char *nam)
841  {
842  pushParname(nam);
843  for(int i=0;i<x.size();i++){
844  thetanames[index]=nam;
845  if(reversefill)theta[index++]=x[i];else x[i]=theta[index++];
846  }
847  }
848  void fill(matrix<Type> &x, const char *nam)
849  {
850  pushParname(nam);
851  for(int j=0;j<x.cols();j++){
852  for(int i=0;i<x.rows();i++){
853  thetanames[index]=nam;
854  if(reversefill)theta[index++]=x(i,j);else x(i,j)=theta[index++];
855  }
856  }
857  }
858  template<class ArrayType>
859  void fill(ArrayType &x, const char *nam)
860  {
861  pushParname(nam);
862  for(int i=0;i<x.size();i++){
863  thetanames[index]=nam;
864  if(reversefill)theta[index++]=x[i];else x[i]=theta[index++];
865  }
866  }
867 
868  /* Experiment: new map feature - currently arrays only */
869  template<class ArrayType>
870  void fillmap(ArrayType &x, const char *nam)
871  {
872  pushParname(nam);
873  SEXP elm=getListElement(parameters,nam);
874  int* map=INTEGER(Rf_getAttrib(elm,Rf_install("map")));
875  int nlevels=INTEGER(Rf_getAttrib(elm,Rf_install("nlevels")))[0];
876  for(int i=0;i<x.size();i++){
877  if(map[i]>=0){
878  thetanames[index+map[i]]=nam;
879  if(reversefill)theta[index+map[i]]=x(i);else x(i)=theta[index+map[i]];
880  }
881  }
882  index+=nlevels;
883  }
884  // Auto detect whether we are in "map-mode"
885  SEXP getShape(const char *nam, RObjectTester expectedtype=NULL){
886  SEXP elm=getListElement(parameters,nam);
887  SEXP shape=Rf_getAttrib(elm,Rf_install("shape"));
888  SEXP ans;
889  if(shape==R_NilValue)ans=elm; else ans=shape;
890  RObjectTestExpectedType(ans, expectedtype, nam);
891  return ans;
892  }
893  template<class ArrayType>
894  //ArrayType fillShape(ArrayType &x, const char *nam){
895  ArrayType fillShape(ArrayType x, const char *nam){
896  SEXP elm=getListElement(parameters,nam);
897  SEXP shape=Rf_getAttrib(elm,Rf_install("shape"));
898  if(shape==R_NilValue)fill(x,nam);
899  else fillmap(x,nam);
900  return x;
901  }
902 
903  void fill(Type &x, char const *nam)
904  {
905  pushParname(nam);
906  thetanames[index]=nam;
907  if(reversefill)theta[index++]=x;else x=theta[index++];
908  }
909 
910  Type operator() ();
911 
912  Type evalUserTemplate(){
913  Type ans=this->operator()();
914  /* After evaluating the template, "index" should be equal to the length of "theta".
915  If not, we assume that the "epsilon method" has been requested from R, I.e.
916  that the un-used theta parameters are reserved for an inner product contribution
917  with the numbers reported via ADREPORT. */
918  if(index != theta.size()){
919  PARAMETER_VECTOR( TMB_epsilon_ );
920  ans += ( this->reportvector() * TMB_epsilon_ ).sum();
921  }
922  return ans;
923  }
924 
925 }; // objective_function
926 
956 template<class Type>
958  Type result;
959  objective_function<Type>* obj;
960  parallel_accumulator(objective_function<Type>* obj_){
961  result=Type(0);
962  obj=obj_;
963 #ifdef _OPENMP
964  obj->max_parallel_regions=config.nthreads;
965 #endif
966  }
967  inline void operator+=(Type x){
968  if(obj->parallel_region())result+=x;
969  }
970  inline void operator-=(Type x){
971  if(obj->parallel_region())result-=x;
972  }
973  operator Type(){
974  return result;
975  }
976 };
977 
978 
979 #ifndef WITH_LIBTMB
980 
981 #ifdef TMBAD_FRAMEWORK
982 template<class ADFunType>
983 SEXP EvalADFunObjectTemplate(SEXP f, SEXP theta, SEXP control)
984 {
985  if(!Rf_isNewList(control))Rf_error("'control' must be a list");
986  ADFunType* pf;
987  pf=(ADFunType*)R_ExternalPtrAddr(f);
988  int data_changed = getListInteger(control, "data_changed", 0);
989  if (data_changed) {
990  pf->force_update();
991  }
992  int set_tail = getListInteger(control, "set_tail", 0) - 1;
993  if (set_tail == -1) {
994  pf -> unset_tail();
995  } else {
996  std::vector<TMBad::Index> r(1, set_tail);
997  pf -> set_tail(r);
998  }
999  PROTECT(theta=Rf_coerceVector(theta,REALSXP));
1000  int n=pf->Domain();
1001  int m=pf->Range();
1002  if(LENGTH(theta)!=n)Rf_error("Wrong parameter length.");
1003  //R-index -> C-index
1004  int rangecomponent = getListInteger(control, "rangecomponent", 1) - 1;
1005  if(!((0<=rangecomponent)&(rangecomponent<=m-1)))
1006  Rf_error("Wrong range component.");
1007  int order = getListInteger(control, "order");
1008  if((order!=0) & (order!=1) & (order!=2) & (order!=3))
1009  Rf_error("order can be 0, 1, 2 or 3");
1010  //int sparsitypattern = getListInteger(control, "sparsitypattern");
1011  //int dumpstack = getListInteger(control, "dumpstack");
1012  SEXP hessiancols; // Hessian columns
1013  PROTECT(hessiancols=getListElement(control,"hessiancols"));
1014  int ncols=Rf_length(hessiancols);
1015  SEXP hessianrows; // Hessian rows
1016  PROTECT(hessianrows=getListElement(control,"hessianrows"));
1017  int nrows=Rf_length(hessianrows);
1018  if((nrows>0)&(nrows!=ncols))Rf_error("hessianrows and hessianrows must have same length");
1019  vector<size_t> cols(ncols);
1020  vector<size_t> cols0(ncols);
1021  vector<size_t> rows(nrows);
1022  if(ncols>0){
1023  for(int i=0;i<ncols;i++){
1024  cols[i]=INTEGER(hessiancols)[i]-1; //R-index -> C-index
1025  cols0[i]=0;
1026  if(nrows>0)rows[i]=INTEGER(hessianrows)[i]-1; //R-index -> C-index
1027  }
1028  }
1029  std::vector<double> x(REAL(theta), REAL(theta) + LENGTH(theta));
1030 
1031  SEXP res=R_NilValue;
1032  SEXP rangeweight=getListElement(control,"rangeweight");
1033  if(rangeweight!=R_NilValue){
1034  if(LENGTH(rangeweight)!=m)Rf_error("rangeweight must have length equal to range dimension");
1035  std::vector<double> w(REAL(rangeweight),
1036  REAL(rangeweight) + LENGTH(rangeweight));
1037  vector<double> ans = pf->Jacobian(x, w);
1038  res = asSEXP(ans);
1039  UNPROTECT(3);
1040  return res;
1041  }
1042  if(order==3){
1043  Rf_error("Not implemented for TMBad");
1044  // vector<double> w(1);
1045  // w[0]=1;
1046  // if((nrows!=1) | (ncols!=1))Rf_error("For 3rd order derivatives a single hessian coordinate must be specified.");
1047  // pf->ForTwo(x,rows,cols); /* Compute forward directions */
1048  // PROTECT(res=asSEXP(asMatrix(pf->Reverse(3,w),n,3)));
1049  }
1050  if(order==0){
1051  //if(dumpstack)CppAD::traceforward0sweep(1);
1052  std::vector<double> ans = pf->operator()(x);
1053  PROTECT(res=asSEXP(ans));
1054  //if(dumpstack)CppAD::traceforward0sweep(0);
1055  SEXP rangenames=Rf_getAttrib(f,Rf_install("range.names"));
1056  if(LENGTH(res)==LENGTH(rangenames)){
1057  Rf_setAttrib(res,R_NamesSymbol,rangenames);
1058  }
1059  }
1060  if(order==1){
1061  std::vector<double> jvec;
1062  SEXP keepx = getListElement(control, "keepx");
1063  if (keepx != R_NilValue && LENGTH(keepx) > 0) {
1064  SEXP keepy = getListElement(control, "keepy");
1065  std::vector<bool> keep_x(pf->Domain(), false);
1066  std::vector<bool> keep_y(pf->Range(), false);
1067  for (int i=0; i<LENGTH(keepx); i++) {
1068  keep_x[INTEGER(keepx)[i] - 1] = true;
1069  }
1070  for (int i=0; i<LENGTH(keepy); i++) {
1071  keep_y[INTEGER(keepy)[i] - 1] = true;
1072  }
1073  n = LENGTH(keepx);
1074  m = LENGTH(keepy);
1075  jvec = pf->Jacobian(x, keep_x, keep_y);
1076  } else {
1077  jvec = pf->Jacobian(x);
1078  }
1079  // if(doforward)pf->Forward(0,x);
1080  matrix<double> jac(m, n);
1081  int k=0;
1082  for (int i=0; i<m; i++) {
1083  for (int j=0; j<n; j++) {
1084  jac(i, j) = jvec[k];
1085  k++;
1086  }
1087  }
1088  PROTECT( res = asSEXP(jac) );
1089  }
1090  //if(order==2)res=asSEXP(pf->Hessian(x,0),1);
1091  if(order==2){
1092  // if(ncols==0){
1093  // if(sparsitypattern){
1094  // PROTECT(res=asSEXP(HessianSparsityPattern(pf)));
1095  // } else {
1096  // PROTECT(res=asSEXP(asMatrix(pf->Hessian(x,rangecomponent),n,n)));
1097  // }
1098  // }
1099  // else if (nrows==0){
1100  // /* Fixme: the cols0 argument should be user changeable */
1101  // PROTECT(res=asSEXP(asMatrix(pf->RevTwo(x,cols0,cols),n,ncols)));
1102  // }
1103  // else PROTECT(res=asSEXP(asMatrix(pf->ForTwo(x,rows,cols),m,ncols)));
1104  }
1105  UNPROTECT(4);
1106  return res;
1107 } // EvalADFunObjectTemplate
1108 #endif
1109 
1110 #ifdef CPPAD_FRAMEWORK
1111 
1145 template<class ADFunType>
1146 SEXP EvalADFunObjectTemplate(SEXP f, SEXP theta, SEXP control)
1147 {
1148  if(!Rf_isNewList(control))Rf_error("'control' must be a list");
1149  ADFunType* pf;
1150  pf=(ADFunType*)R_ExternalPtrAddr(f);
1151  PROTECT(theta=Rf_coerceVector(theta,REALSXP));
1152  int n=pf->Domain();
1153  int m=pf->Range();
1154  if(LENGTH(theta)!=n)Rf_error("Wrong parameter length.");
1155  // Do forwardsweep ?
1156  int doforward = getListInteger(control, "doforward", 1);
1157  //R-index -> C-index
1158  int rangecomponent = getListInteger(control, "rangecomponent", 1) - 1;
1159  if(!((0<=rangecomponent)&(rangecomponent<=m-1)))
1160  Rf_error("Wrong range component.");
1161  int order = getListInteger(control, "order");
1162  if((order!=0) & (order!=1) & (order!=2) & (order!=3))
1163  Rf_error("order can be 0, 1, 2 or 3");
1164  int sparsitypattern = getListInteger(control, "sparsitypattern");
1165  int dumpstack = getListInteger(control, "dumpstack");
1166  SEXP hessiancols; // Hessian columns
1167  PROTECT(hessiancols=getListElement(control,"hessiancols"));
1168  int ncols=Rf_length(hessiancols);
1169  SEXP hessianrows; // Hessian rows
1170  PROTECT(hessianrows=getListElement(control,"hessianrows"));
1171  int nrows=Rf_length(hessianrows);
1172  if((nrows>0)&(nrows!=ncols))Rf_error("hessianrows and hessianrows must have same length");
1173  vector<size_t> cols(ncols);
1174  vector<size_t> cols0(ncols);
1175  vector<size_t> rows(nrows);
1176  if(ncols>0){
1177  for(int i=0;i<ncols;i++){
1178  cols[i]=INTEGER(hessiancols)[i]-1; //R-index -> C-index
1179  cols0[i]=0;
1180  if(nrows>0)rows[i]=INTEGER(hessianrows)[i]-1; //R-index -> C-index
1181  }
1182  }
1183  vector<double> x = asVector<double>(theta);
1184  SEXP res=R_NilValue;
1185  SEXP rangeweight=getListElement(control,"rangeweight");
1186  if(rangeweight!=R_NilValue){
1187  if(LENGTH(rangeweight)!=m)Rf_error("rangeweight must have length equal to range dimension");
1188  if(doforward)pf->Forward(0,x);
1189  res=asSEXP(pf->Reverse(1,asVector<double>(rangeweight)));
1190  UNPROTECT(3);
1191  return res;
1192  }
1193  if(order==3){
1194  vector<double> w(1);
1195  w[0]=1;
1196  if((nrows!=1) | (ncols!=1))Rf_error("For 3rd order derivatives a single hessian coordinate must be specified.");
1197  pf->ForTwo(x,rows,cols); /* Compute forward directions */
1198  PROTECT(res=asSEXP(asMatrix(pf->Reverse(3,w),n,3)));
1199  }
1200  if(order==0){
1201  if(dumpstack)CppAD::traceforward0sweep(1);
1202  PROTECT(res=asSEXP(pf->Forward(0,x)));
1203  if(dumpstack)CppAD::traceforward0sweep(0);
1204  SEXP rangenames=Rf_getAttrib(f,Rf_install("range.names"));
1205  if(LENGTH(res)==LENGTH(rangenames)){
1206  Rf_setAttrib(res,R_NamesSymbol,rangenames);
1207  }
1208  }
1209  if(order==1){
1210  if(doforward)pf->Forward(0,x);
1211  matrix<double> jac(m, n);
1212  vector<double> u(n);
1213  vector<double> v(m);
1214  v.setZero();
1215  for(int i=0; i<m; i++) {
1216  v[i] = 1.0; u = pf->Reverse(1,v);
1217  v[i] = 0.0;
1218  jac.row(i) = u;
1219  }
1220  PROTECT( res = asSEXP(jac) );
1221  }
1222  //if(order==2)res=asSEXP(pf->Hessian(x,0),1);
1223  if(order==2){
1224  if(ncols==0){
1225  if(sparsitypattern){
1226  PROTECT(res=asSEXP(HessianSparsityPattern(pf)));
1227  } else {
1228  PROTECT(res=asSEXP(asMatrix(pf->Hessian(x,rangecomponent),n,n)));
1229  }
1230  }
1231  else if (nrows==0){
1232  /* Fixme: the cols0 argument should be user changeable */
1233  PROTECT(res=asSEXP(asMatrix(pf->RevTwo(x,cols0,cols),n,ncols)));
1234  }
1235  else PROTECT(res=asSEXP(asMatrix(pf->ForTwo(x,rows,cols),m,ncols)));
1236  }
1237  UNPROTECT(4);
1238  return res;
1239 } // EvalADFunObjectTemplate
1240 #endif
1241 
1243 template <class ADFunType>
1244 void finalize(SEXP x)
1245 {
1246  ADFunType* ptr=(ADFunType*)R_ExternalPtrAddr(x);
1247  if(ptr!=NULL)delete ptr;
1248  memory_manager.CallCFinalizer(x);
1249 }
1250 
1251 #ifdef TMBAD_FRAMEWORK
1252 
1253 TMBad::ADFun< TMBad::ad_aug >* MakeADFunObject_(SEXP data, SEXP parameters,
1254  SEXP report, SEXP control, int parallel_region=-1,
1255  SEXP &info=R_NilValue)
1256 {
1257  typedef TMBad::ad_aug ad;
1258  typedef TMBad::ADFun<ad> adfun;
1259  int returnReport = (control!=R_NilValue) && getListInteger(control, "report");
1260  /* Create objective_function "dummy"-object */
1261  objective_function< ad > F(data,parameters,report);
1262  F.set_parallel_region(parallel_region);
1263  /* Create ADFun pointer.
1264  We have the option to tape either the value returned by the
1265  objective_function template or the vector reported using the
1266  macro "ADREPORT" */
1267  adfun* pf = new adfun();
1268  pf->glob.ad_start();
1269  //TMBad::Independent(F.theta); // In both cases theta is the independent variable
1270  for (int i=0; i<F.theta.size(); i++) F.theta(i).Independent();
1271  if(!returnReport){ // Default case: no ad report - parallel run allowed
1272  vector< ad > y(1);
1273  y[0] = F.evalUserTemplate();
1274  //TMBad::Dependent(y);
1275  for (int i=0; i<y.size(); i++) y[i].Dependent();
1276  } else { // ad report case
1277  F(); // Run through user template (modifies reportvector)
1278  //TMBad::Dependent(F.reportvector.result);
1279  for (int i=0; i<F.reportvector.size(); i++) F.reportvector.result[i].Dependent();
1280  info=F.reportvector.reportnames(); // parallel run *not* allowed
1281  }
1282  pf->glob.ad_stop();
1283  return pf;
1284 }
1285 #endif
1286 
1287 #ifdef CPPAD_FRAMEWORK
1288 
1289 ADFun<double>* MakeADFunObject_(SEXP data, SEXP parameters,
1290  SEXP report, SEXP control, int parallel_region=-1,
1291  SEXP &info=R_NilValue)
1292 {
1293  int returnReport = getListInteger(control, "report");
1294  /* Create objective_function "dummy"-object */
1295  objective_function< AD<double> > F(data,parameters,report);
1296  F.set_parallel_region(parallel_region);
1297  /* Create ADFun pointer.
1298  We have the option to tape either the value returned by the
1299  objective_function template or the vector reported using the
1300  macro "ADREPORT" */
1301  Independent(F.theta); // In both cases theta is the independent variable
1302  ADFun< double >* pf;
1303  if(!returnReport){ // Default case: no ad report - parallel run allowed
1304  vector< AD<double> > y(1);
1305  y[0]=F.evalUserTemplate();
1306  pf = new ADFun< double >(F.theta,y);
1307  } else { // ad report case
1308  F(); // Run through user template (modifies reportvector)
1309  pf = new ADFun< double >(F.theta,F.reportvector());
1310  info=F.reportvector.reportnames(); // parallel run *not* allowed
1311  }
1312  return pf;
1313 }
1314 #endif
1315 
1316 extern "C"
1317 {
1318 
1319 #ifdef TMBAD_FRAMEWORK
1320 
1321  void finalizeADFun(SEXP x)
1322  {
1323  finalize<TMBad::ADFun<TMBad::ad_aug> > (x);
1324  }
1325  void finalizeparallelADFun(SEXP x)
1326  {
1327  finalize<parallelADFun<double> > (x);
1328  }
1329 #endif
1330 
1331 #ifdef CPPAD_FRAMEWORK
1332 
1333  void finalizeADFun(SEXP x)
1334  {
1335  finalize<ADFun<double> > (x);
1336  }
1337  void finalizeparallelADFun(SEXP x)
1338  {
1339  finalize<parallelADFun<double> > (x);
1340  }
1341 #endif
1342 
1343  /* --- MakeADFunObject ----------------------------------------------- */
1344 
1345 #ifdef TMBAD_FRAMEWORK
1346 
1347  SEXP MakeADFunObject(SEXP data, SEXP parameters,
1348  SEXP report, SEXP control)
1349  {
1350  typedef TMBad::ad_aug ad;
1351  typedef TMBad::ADFun<ad> adfun;
1352 
1353  adfun* pf = NULL;
1354  /* Some type checking */
1355  if(!Rf_isNewList(data))Rf_error("'data' must be a list");
1356  if(!Rf_isNewList(parameters))Rf_error("'parameters' must be a list");
1357  if(!Rf_isEnvironment(report))Rf_error("'report' must be an environment");
1358  if(!Rf_isNewList(control))Rf_error("'control' must be a list");
1359  int returnReport = getListInteger(control, "report");
1360 
1361  /* Get the default parameter vector (tiny overhead) */
1362  SEXP par,res=NULL,info;
1363  objective_function< double > F(data,parameters,report);
1364 #ifdef _OPENMP
1365  int n=F.count_parallel_regions(); // Evaluates user template
1366 #else
1367  F.count_parallel_regions(); // Evaluates user template
1368 #endif
1369  if(returnReport && F.reportvector.size()==0){
1370  /* Told to report, but no ADREPORT in template: Get out quickly */
1371  return R_NilValue;
1372  }
1373  PROTECT(par=F.defaultpar());
1374  PROTECT(info=R_NilValue); // Important
1375 
1376  if(_openmp && !returnReport){ // Parallel mode
1377 #ifdef _OPENMP
1378  if(config.trace.parallel)
1379  std::cout << n << " regions found.\n";
1380  if (n==0) n++; // No explicit parallel accumulation
1381  start_parallel(); /* FIXME: NOT NEEDED */
1382  vector< adfun* > pfvec(n);
1383  const char* bad_thread_alloc = NULL;
1384 #pragma omp parallel for num_threads(config.nthreads) if (config.tape.parallel && n>1)
1385  for(int i = 0; i < n; i++) {
1386  TMB_TRY {
1387  pfvec[i] = NULL;
1388  pfvec[i] = MakeADFunObject_(data, parameters, report, control, i, info);
1389  if (config.optimize.instantly) pfvec[i]->optimize();
1390  }
1391  TMB_CATCH {
1392  if (pfvec[i] != NULL) delete pfvec[i];
1393  bad_thread_alloc = excpt.what();
1394  }
1395  }
1396  if (bad_thread_alloc) {
1397  TMB_ERROR_BAD_THREAD_ALLOC;
1398  }
1399 
1400  // FIXME: NOT DONE YET
1401 
1402  parallelADFun<double>* ppf=new parallelADFun<double>(pfvec);
1403  /* Convert parallel ADFun pointer to R_ExternalPtr */
1404  PROTECT(res=R_MakeExternalPtr((void*) ppf,Rf_install("parallelADFun"),R_NilValue));
1405  //R_RegisterCFinalizer(res,finalizeparallelADFun);
1406 #endif
1407  } else { // Serial mode
1408  TMB_TRY{
1409  /* Actual work: tape creation */
1410  pf = NULL;
1411  pf = MakeADFunObject_(data, parameters, report, control, -1, info);
1412  if (config.optimize.instantly) pf->optimize();
1413  }
1414  TMB_CATCH {
1415  if (pf != NULL) delete pf;
1416  TMB_ERROR_BAD_ALLOC;
1417  }
1418  /* Convert ADFun pointer to R_ExternalPtr */
1419  PROTECT(res=R_MakeExternalPtr((void*) pf,Rf_install("ADFun"),R_NilValue));
1420  Rf_setAttrib(res,Rf_install("range.names"),info);
1421  }
1422 
1423  /* Return list of external pointer and default-parameter */
1424  SEXP ans;
1425  Rf_setAttrib(res,Rf_install("par"),par);
1426  PROTECT(ans=ptrList(res));
1427  UNPROTECT(4);
1428 
1429  return ans;
1430  } // MakeADFunObject
1431 #endif
1432 
1433 #ifdef CPPAD_FRAMEWORK
1434 
1435  SEXP MakeADFunObject(SEXP data, SEXP parameters,
1436  SEXP report, SEXP control)
1437  {
1438  ADFun<double>* pf = NULL;
1439  /* Some type checking */
1440  if(!Rf_isNewList(data))Rf_error("'data' must be a list");
1441  if(!Rf_isNewList(parameters))Rf_error("'parameters' must be a list");
1442  if(!Rf_isEnvironment(report))Rf_error("'report' must be an environment");
1443  if(!Rf_isNewList(control))Rf_error("'control' must be a list");
1444  int returnReport = getListInteger(control, "report");
1445 
1446  /* Get the default parameter vector (tiny overhead) */
1447  SEXP par,res=NULL,info;
1448  objective_function< double > F(data,parameters,report);
1449 #ifdef _OPENMP
1450  int n=F.count_parallel_regions(); // Evaluates user template
1451 #else
1452  F.count_parallel_regions(); // Evaluates user template
1453 #endif
1454  if(returnReport && F.reportvector.size()==0){
1455  /* Told to report, but no ADREPORT in template: Get out quickly */
1456  return R_NilValue;
1457  }
1458  PROTECT(par=F.defaultpar());
1459  PROTECT(info=R_NilValue); // Important
1460 
1461  if(_openmp && !returnReport){ // Parallel mode
1462 #ifdef _OPENMP
1463  if(config.trace.parallel)
1464  std::cout << n << " regions found.\n";
1465  if (n==0) n++; // No explicit parallel accumulation
1466  start_parallel(); /* Start threads */
1467  vector< ADFun<double>* > pfvec(n);
1468  const char* bad_thread_alloc = NULL;
1469 #pragma omp parallel for num_threads(config.nthreads) if (config.tape.parallel && n>1)
1470  for(int i=0;i<n;i++){
1471  TMB_TRY {
1472  pfvec[i] = NULL;
1473  pfvec[i] = MakeADFunObject_(data, parameters, report, control, i, info);
1474  if (config.optimize.instantly) pfvec[i]->optimize();
1475  }
1476  TMB_CATCH {
1477  if (pfvec[i] != NULL) delete pfvec[i];
1478  bad_thread_alloc = excpt.what();
1479  }
1480  }
1481  if (bad_thread_alloc) {
1482  TMB_ERROR_BAD_THREAD_ALLOC;
1483  }
1484  parallelADFun<double>* ppf=new parallelADFun<double>(pfvec);
1485  /* Convert parallel ADFun pointer to R_ExternalPtr */
1486  PROTECT(res=R_MakeExternalPtr((void*) ppf,Rf_install("parallelADFun"),R_NilValue));
1487 #endif
1488  } else { // Serial mode
1489  TMB_TRY{
1490  /* Actual work: tape creation */
1491  pf = NULL;
1492  pf = MakeADFunObject_(data, parameters, report, control, -1, info);
1493  if (config.optimize.instantly) pf->optimize();
1494  }
1495  TMB_CATCH {
1496  if (pf != NULL) delete pf;
1497  TMB_ERROR_BAD_ALLOC;
1498  }
1499  /* Convert ADFun pointer to R_ExternalPtr */
1500  PROTECT(res=R_MakeExternalPtr((void*) pf,Rf_install("ADFun"),R_NilValue));
1501  Rf_setAttrib(res,Rf_install("range.names"),info);
1502  }
1503 
1504  /* Return list of external pointer and default-parameter */
1505  SEXP ans;
1506  Rf_setAttrib(res,Rf_install("par"),par);
1507  PROTECT(ans=ptrList(res));
1508  UNPROTECT(4);
1509 
1510  return ans;
1511  } // MakeADFunObject
1512 #endif
1513 
1514  /* --- TransformADFunObject ----------------------------------------------- */
1515 
1516 #ifdef TMBAD_FRAMEWORK
1517 inline int get_num_tapes(SEXP f) {
1518  if (Rf_isNull(f))
1519  return 0;
1520  SEXP tag = R_ExternalPtrTag(f);
1521  if (tag != Rf_install("parallelADFun"))
1522  return 0;
1523  return
1524  ((parallelADFun<double>*) R_ExternalPtrAddr(f))->ntapes;
1525 }
1526 SEXP TransformADFunObjectTemplate(TMBad::ADFun<TMBad::ad_aug>* pf, SEXP control)
1527 {
1528  if (pf == NULL)
1529  Rf_error("Cannot transform '<pointer: (nil)>' (unloaded/reloaded DLL?)");
1530  typedef TMBad::ad_aug ad;
1531  typedef TMBad::ADFun<ad> adfun;
1532  // FIXME: Must require non parallel object !!!
1533  std::string method =
1534  CHAR(STRING_ELT(getListElement(control, "method"), 0));
1535  // Test adfun copy
1536  if (method == "copy") {
1537  *pf = adfun(*pf);
1538  return R_NilValue;
1539  }
1540  if (method == "set_compiled") {
1541  int i = 0;
1542 #ifdef _OPENMP
1543  i = omp_get_thread_num();
1544 #endif
1545  typedef void(*fct_ptr1)(double*);
1546  typedef void(*fct_ptr2)(double*,double*);
1547  pf->glob.forward_compiled =
1548  (fct_ptr1) R_ExternalPtrAddr(VECTOR_ELT(getListElement(control, "forward_compiled"), i));
1549  pf->glob.reverse_compiled =
1550  (fct_ptr2) R_ExternalPtrAddr(VECTOR_ELT(getListElement(control, "reverse_compiled"), i));
1551  return R_NilValue;
1552  }
1553  SEXP random_order = getListElement(control, "random_order");
1554  int nr = (Rf_isNull(random_order) ? 0 : LENGTH(random_order));
1555  std::vector<TMBad::Index> random;
1556  if (nr != 0) {
1557  random = std::vector<TMBad::Index>(INTEGER(random_order),
1558  INTEGER(random_order) + nr);
1559  for (size_t i=0; i<random.size(); i++)
1560  random[i] -= 1 ; // R index -> C index
1561  }
1562  TMB_TRY {
1563  if (method == "remove_random_parameters") {
1564  std::vector<bool> mask(pf->Domain(), true);
1565  for (size_t i = 0; i<random.size(); i++)
1566  mask[random[i]] = false;
1567  pf->glob.inv_index = TMBad::subset(pf->glob.inv_index, mask);
1568  }
1569  else if (method == "laplace") {
1570  SEXP config = getListElement(control, "config");
1571  newton::newton_config cfg(config);
1572  *pf = newton::Laplace_(*pf, random, cfg);
1573  }
1574  else if (method == "marginal_gk") {
1575  TMBad::gk_config cfg;
1576  SEXP config = getListElement(control, "config");
1577  if (!Rf_isNull(config)) {
1578  cfg.adaptive = getListInteger(config, "adaptive", 0);
1579  cfg.debug = getListInteger(config, "debug", 0);
1580  }
1581  *pf = pf -> marginal_gk(random, cfg);
1582  }
1583  else if (method == "marginal_sr") {
1584  SEXP config = getListElement(control, "config");
1585  std::vector<TMBad::sr_grid> grids;
1586  SEXP grid = getListElement(config, "grid");
1587  SEXP random2grid = getListElement(config, "random2grid");
1588  for (int i=0; i<LENGTH(grid); i++) {
1589  SEXP grid_i = VECTOR_ELT(grid, i);
1590  SEXP x = getListElement(grid_i, "x");
1591  SEXP w = getListElement(grid_i, "w");
1592  if (LENGTH(x) != LENGTH(w))
1593  Rf_error("Length of grid$x and grid$w must be equal");
1594  TMBad::sr_grid grid_sr;
1595  grid_sr.x = std::vector<double>(REAL(x), REAL(x) + LENGTH(x));
1596  grid_sr.w = std::vector<double>(REAL(w), REAL(w) + LENGTH(w));
1597  grids.push_back(grid_sr);
1598  }
1599  std::vector<TMBad::Index> r2g(INTEGER(random2grid),
1600  INTEGER(random2grid) + LENGTH(random2grid));
1601  for (size_t i=0; i<r2g.size(); i++)
1602  r2g[i] -= 1 ; // R index -> C index
1603  *pf = pf -> marginal_sr(random, grids, r2g, true);
1604  }
1605  else if (method == "parallelize")
1606  *pf = pf -> parallelize(2);
1607  else if (method == "compress") {
1608  int max_period_size = getListInteger(control, "max_period_size", 1024);
1609  TMBad::compress(pf->glob, max_period_size);
1610  }
1611  else if (method == "compress_and_compile") {
1612 #ifdef HAVE_COMPILE_HPP
1613  int max_period_size = getListInteger(control, "max_period_size", 1024);
1614  TMBad::compress(pf->glob, max_period_size);
1615  // if (config.optimize.instantly) pf->glob.eliminate();
1616  TMBad::compile(pf->glob);
1617 #else
1618  Rf_error("TMBad::compile() is unavailable");
1619 #endif
1620  }
1621  else if (method == "accumulation_tree_split")
1622  pf->glob = accumulation_tree_split(pf->glob, true);
1623  else if (method == "fuse_and_replay") {
1624  pf->glob.set_fuse(true);
1625  pf->replay();
1626  pf->glob.set_fuse(false);
1627  }
1628  else if (method == "reorder_random") {
1629  pf->reorder(random);
1630  }
1631  else if (method == "reorder_sub_expressions") {
1633  }
1634  else if (method == "reorder_depth_first") {
1635  TMBad::reorder_depth_first(pf->glob);
1636  }
1637  else if (method == "reorder_temporaries") {
1638  TMBad::reorder_temporaries(pf->glob);
1639  }
1640  else if (method == "parallel_accumulate") {
1641  // Known method - done elsewhere
1642  }
1643  else if (method == "optimize") {
1644  pf->optimize();
1645  } else {
1646  Rf_error("Method unknown: '%s'", method.c_str());
1647  }
1648  }
1649  TMB_CATCH {
1650  TMB_ERROR_BAD_ALLOC;
1651  }
1652  // for (size_t i=0; i<random.size(); i++) random[i] += 1 ; // C index -> R index
1653  // Rf_setAttrib(f, Rf_install("random_order"), asSEXP(random));
1654  return R_NilValue;
1655 }
1657 SEXP TransformADFunObject(SEXP f, SEXP control)
1658 {
1659  if (Rf_isNull(f))
1660  Rf_error("Expected external pointer - got NULL");
1661  SEXP tag = R_ExternalPtrTag(f);
1662  if (tag != Rf_install("ADFun"))
1663  if (tag != Rf_install("parallelADFun"))
1664  Rf_error("Expected ADFun or parallelADFun pointer");
1665  typedef TMBad::ad_aug ad;
1666  typedef TMBad::ADFun<ad> adfun;
1667  if(tag == Rf_install("ADFun")) {
1668  adfun* pf = (adfun*) R_ExternalPtrAddr(f);
1669  TransformADFunObjectTemplate(pf, control);
1670  } else if (tag == Rf_install("parallelADFun")) {
1671  // Warning: Most no meaningful for parallel models!:
1672  // OK : reorder_random etc
1673  // NOT OK : copy, set_compiled, marginal_sr etc
1674  parallelADFun<double>* ppf = (parallelADFun<double>*) R_ExternalPtrAddr(f);
1675  // Apply method for each component except for one special case:
1676  // 'Parallel accumulate'
1677  std::string method =
1678  CHAR(STRING_ELT(getListElement(control, "method"), 0));
1679  if (method == "parallel_accumulate") {
1680  int num_threads = getListInteger(control, "num_threads", 2);
1681  if (num_threads == 1) {
1682  // No need to parallelize
1683  return R_NilValue;
1684  }
1685  if (get_num_tapes(f) > 1) {
1686  // Already parallel (via parallel_accumulator or similar)
1687  return R_NilValue;
1688  }
1689  adfun* pf = (ppf->vecpf)[0]; // One tape - get it
1690  std::vector<adfun> vf = pf->parallel_accumulate(num_threads);
1691  if (config.trace.parallel) {
1692  Rcout << "Autopar work split\n";
1693  for (size_t i=0; i < vf.size(); i++) {
1694  Rcout << "Chunk " << i << ": ";
1695  Rcout << (double) vf[i].glob.opstack.size() / pf->glob.opstack.size() << "\n";
1696  }
1697  }
1698  parallelADFun<double>* new_ppf = new parallelADFun<double>(vf);
1699  delete ppf;
1700  R_SetExternalPtrAddr(f, new_ppf);
1701  return R_NilValue;
1702  }
1703 #ifdef _OPENMP
1704 #pragma omp parallel for num_threads(config.nthreads)
1705 #endif
1706  for (int i=0; i<ppf->ntapes; i++) {
1707  adfun* pf = (ppf->vecpf)[i];
1708  TransformADFunObjectTemplate(pf, control);
1709  }
1710  // Some methods change Domain or Range of individual tapes. This
1711  // is allowed when there is only one tape.
1712  if (ppf->ntapes == 1) {
1713  ppf->domain = (ppf->vecpf)[0]->Domain();
1714  ppf->range = (ppf->vecpf)[0]->Range();
1715  }
1716  // Now, check that it's ok. FIXME: Range() is not checked
1717  for (int i=0; i<ppf->ntapes; i++) {
1718  if (ppf->domain != (ppf->vecpf)[i]->Domain())
1719  Rf_warning("Domain has changed in an invalid way");
1720  }
1721  } else {
1722  Rf_error("Unknown function pointer");
1723  }
1724  return R_NilValue;
1725 }
1726 #endif
1727 
1728 #ifdef CPPAD_FRAMEWORK
1729 
1730 SEXP TransformADFunObject(SEXP f, SEXP control)
1731 {
1732  int mustWork = getListInteger(control, "mustWork", 1);
1733  if (mustWork)
1734  Rf_error("Not supported for CPPAD_FRAMEWORK");
1735  return R_NilValue;
1736 }
1737 #endif
1738 
1739  /* --- InfoADFunObject ---------------------------------------------------- */
1740 
1741 #ifdef TMBAD_FRAMEWORK
1742  SEXP InfoADFunObject(SEXP f) {
1743  typedef TMBad::ad_aug ad;
1744  typedef TMBad::ADFun<ad> adfun;
1745  if (Rf_isNull(f)) Rf_error("Expected external pointer - got NULL");
1746  int num_tapes = get_num_tapes(f);
1747  if (num_tapes >= 2)
1748  Rf_error("'InfoADFunObject' is only available for tapes with one thread");
1749  adfun* pf;
1750  if (num_tapes == 0)
1751  pf = (adfun*) R_ExternalPtrAddr(f);
1752  else {
1753  pf = ( (parallelADFun<double>*) R_ExternalPtrAddr(f) ) -> vecpf[0];
1754  }
1755  SEXP ans, names;
1756  PROTECT(ans = Rf_allocVector(VECSXP, 6));
1757  PROTECT(names = Rf_allocVector(STRSXP, 6));
1758  int i = 0;
1759 #define GET_INFO(EXPR) \
1760  SET_VECTOR_ELT(ans, i, asSEXP(EXPR)); \
1761  SET_STRING_ELT(names, i, Rf_mkChar(#EXPR)); \
1762  i++;
1763  // begin
1764  std::vector<bool> a = pf -> activeDomain();
1765  std::vector<int> ai(a.begin(), a.end());
1766  vector<int> activeDomain(ai);
1767  GET_INFO(activeDomain);
1768  int opstack_size = pf->glob.opstack.size();
1769  GET_INFO(opstack_size);
1770  int values_size = pf->glob.values.size();
1771  GET_INFO(values_size);
1772  int inputs_size = pf->glob.inputs.size();
1773  GET_INFO(inputs_size);
1774  int Domain = pf->Domain();
1775  GET_INFO(Domain);
1776  int Range = pf->Range();
1777  GET_INFO(Range);
1778  // end
1779 #undef GET_INFO
1780  Rf_setAttrib(ans,R_NamesSymbol,names);
1781  UNPROTECT(2);
1782  return ans;
1783  }
1784 #endif
1785 
1786 #ifdef CPPAD_FRAMEWORK
1787  SEXP InfoADFunObject(SEXP f)
1788  {
1789  ADFun<double>* pf;
1790  pf = (ADFun<double>*) R_ExternalPtrAddr(f);
1791  SEXP ans, names;
1792  PROTECT(ans = Rf_allocVector(VECSXP, 12));
1793  PROTECT(names = Rf_allocVector(STRSXP, 12));
1794  int i = 0;
1795 #define GET_MORE_INFO(MEMBER) \
1796  SET_VECTOR_ELT(ans, i, asSEXP(int(pf->MEMBER()))); \
1797  SET_STRING_ELT(names, i, Rf_mkChar(#MEMBER)); \
1798  i++;
1799  GET_MORE_INFO(Domain);
1800  GET_MORE_INFO(Range);
1801  GET_MORE_INFO(size_op);
1802  GET_MORE_INFO(size_op_arg);
1803  GET_MORE_INFO(size_op_seq);
1804  GET_MORE_INFO(size_par);
1805  GET_MORE_INFO(size_order);
1806  GET_MORE_INFO(size_direction);
1807  GET_MORE_INFO(size_text);
1808  GET_MORE_INFO(size_var);
1809  GET_MORE_INFO(size_VecAD);
1810  GET_MORE_INFO(Memory);
1811 #undef GET_MORE_INFO
1812  Rf_setAttrib(ans,R_NamesSymbol,names);
1813  UNPROTECT(2);
1814  return ans;
1815  }
1816 #endif
1817 
1818 #ifdef CPPAD_FRAMEWORK
1819 
1820  SEXP optimizeADFunObject(SEXP f)
1821  {
1822  SEXP tag=R_ExternalPtrTag(f);
1823  if(tag == Rf_install("ADFun")){
1824  ADFun<double>* pf;
1825  pf=(ADFun<double>*)R_ExternalPtrAddr(f);
1826  pf->optimize();
1827  }
1828  if(tag == Rf_install("parallelADFun")){
1829  parallelADFun<double>* pf;
1830  pf=(parallelADFun<double>*)R_ExternalPtrAddr(f);
1831  pf->optimize();
1832  }
1833  return R_NilValue;
1834  }
1835 #endif
1836 
1838  SEXP getTag(SEXP f){
1839  return R_ExternalPtrTag(f);
1840  }
1841 
1842 #ifdef TMBAD_FRAMEWORK
1843  SEXP EvalADFunObject(SEXP f, SEXP theta, SEXP control)
1844  {
1845  typedef TMBad::ad_aug ad;
1846  typedef TMBad::ADFun<ad> adfun;
1847  TMB_TRY {
1848  if(Rf_isNull(f))Rf_error("Expected external pointer - got NULL");
1849  SEXP tag=R_ExternalPtrTag(f);
1850  if(tag == Rf_install("ADFun"))
1851  return EvalADFunObjectTemplate< adfun >(f,theta,control);
1852  if(tag == Rf_install("parallelADFun"))
1853  return EvalADFunObjectTemplate<parallelADFun<double> >(f,theta,control);
1854  Rf_error("NOT A KNOWN FUNCTION POINTER");
1855  }
1856  TMB_CATCH {
1857  TMB_ERROR_BAD_ALLOC;
1858  }
1859  }
1860 #endif
1861 
1862 #ifdef CPPAD_FRAMEWORK
1863  SEXP EvalADFunObject(SEXP f, SEXP theta, SEXP control)
1864  {
1865  TMB_TRY {
1866  if(Rf_isNull(f))Rf_error("Expected external pointer - got NULL");
1867  SEXP tag=R_ExternalPtrTag(f);
1868  if(tag == Rf_install("ADFun"))
1869  return EvalADFunObjectTemplate<ADFun<double> >(f,theta,control);
1870  if(tag == Rf_install("parallelADFun"))
1871  return EvalADFunObjectTemplate<parallelADFun<double> >(f,theta,control);
1872  Rf_error("NOT A KNOWN FUNCTION POINTER");
1873  }
1874  TMB_CATCH {
1875  TMB_ERROR_BAD_ALLOC;
1876  }
1877  }
1878 #endif
1879 
1880 SEXP getSetGlobalPtr(SEXP ptr) {
1881 #ifdef TMBAD_FRAMEWORK
1882  SEXP global_ptr_tag = Rf_install("global_ptr");
1883  if (!Rf_isNull(ptr)) {
1884  SEXP tag = R_ExternalPtrTag(ptr);
1885  if (tag != global_ptr_tag) Rf_error("Invalid pointer type");
1886  TMBad::global_ptr = (TMBad::global**) R_ExternalPtrAddr(ptr);
1887  }
1888  SEXP res = R_MakeExternalPtr( (void*) TMBad::global_ptr, global_ptr_tag, R_NilValue);
1889  return res;
1890 #else
1891  return R_NilValue;
1892 #endif
1893 }
1894 
1895  SEXP tmbad_print(SEXP f, SEXP control) {
1896 #ifdef TMBAD_FRAMEWORK
1897  typedef TMBad::ad_aug ad;
1898  typedef TMBad::ADFun<ad> adfun;
1899  int num_tapes = get_num_tapes(f);
1900  adfun* pf;
1901  if (num_tapes == 0)
1902  pf = (adfun*) R_ExternalPtrAddr(f);
1903  else {
1904  int i = getListInteger(control, "i", 0);
1905  pf = ( (parallelADFun<double>*) R_ExternalPtrAddr(f) ) -> vecpf[i];
1906  }
1907  std::string method =
1908  CHAR(STRING_ELT(getListElement(control, "method"), 0));
1909  if (method == "num_tapes") { // Get number of tapes
1910  return Rf_ScalarInteger(num_tapes);
1911  }
1912  else if (method == "tape") { // Print tape
1913  int depth = getListInteger(control, "depth", 1);
1915  cfg.depth = depth;
1916  pf->glob.print(cfg);
1917  }
1918  else if (method == "dot") { // Print dot format
1919  graph2dot(pf->glob, true, Rcout);
1920  }
1921  else if (method == "inv_index") { // Print member
1922  using TMBad::operator<<;
1923  Rcout << pf->glob.inv_index << "\n";
1924  }
1925  else if (method == "dep_index") { // Print member
1926  using TMBad::operator<<;
1927  Rcout << pf->glob.dep_index << "\n";
1928  }
1929  else if (method == "src") { // Print C src code
1930  TMBad::code_config cfg;
1931  cfg.gpu = false;
1932  cfg.asm_comments = false;
1933  cfg.cout = &Rcout;
1934  *cfg.cout << "#include <cmath>" << std::endl;
1935  *cfg.cout
1936  << "template<class T>T sign(const T &x) { return (x > 0) - (x < 0); }"
1937  << std::endl;
1938  TMBad::global glob = pf->glob; // Invoke deep copy
1939  TMBad::compress(glob);
1940  write_forward(glob, cfg);
1941  write_reverse(glob, cfg);
1942  }
1943  else if (method == "op") {
1944  int name = getListInteger(control, "name", 0);
1945  int address = getListInteger(control, "address", 0);
1946  int input_size = getListInteger(control, "input_size", 0);
1947  int output_size = getListInteger(control, "output_size", 0);
1948  size_t n = pf->glob.opstack.size();
1949  SEXP ans = PROTECT(Rf_allocVector(STRSXP, n));
1950  for (size_t i=0; i<n; i++) {
1951  std::stringstream strm;
1952  if (address) strm << (void*) pf->glob.opstack[i] << " ";
1953  if (name) strm << pf->glob.opstack[i]->op_name() << " ";
1954  if (input_size) strm << pf->glob.opstack[i]->input_size();
1955  if (output_size) strm << pf->glob.opstack[i]->output_size();
1956  const std::string& tmp = strm.str();
1957  SET_STRING_ELT(ans, i, Rf_mkChar(tmp.c_str()));
1958  }
1959  UNPROTECT(1);
1960  return ans;
1961  }
1962  else {
1963  Rf_error("Unknown method: %s", method.c_str());
1964  }
1965 #endif
1966  return R_NilValue;
1967  }
1968 
1969 }
1970 
1971 /* Double interface */
1972 extern "C"
1973 {
1974 
1975  /* How to garbage collect a DoubleFun object pointer */
1976  void finalizeDoubleFun(SEXP x)
1977  {
1978  objective_function<double>* ptr=(objective_function<double>*)R_ExternalPtrAddr(x);
1979  if(ptr!=NULL)delete ptr;
1980  memory_manager.CallCFinalizer(x);
1981  }
1982 
1983  SEXP MakeDoubleFunObject(SEXP data, SEXP parameters, SEXP report, SEXP control)
1984  {
1985  /* Some type checking */
1986  if(!Rf_isNewList(data))Rf_error("'data' must be a list");
1987  if(!Rf_isNewList(parameters))Rf_error("'parameters' must be a list");
1988  if(!Rf_isEnvironment(report))Rf_error("'report' must be an environment");
1989 
1990  /* Create DoubleFun pointer */
1991  objective_function<double>* pF = NULL;
1992  TMB_TRY {
1993  pF = new objective_function<double>(data,parameters,report);
1994  }
1995  TMB_CATCH {
1996  if (pF != NULL) delete pF;
1997  TMB_ERROR_BAD_ALLOC;
1998  }
1999 
2000  /* Convert DoubleFun pointer to R_ExternalPtr */
2001  SEXP res,ans;
2002  PROTECT(res=R_MakeExternalPtr((void*) pF,Rf_install("DoubleFun"),R_NilValue));
2003  PROTECT(ans=ptrList(res));
2004  UNPROTECT(2);
2005  return ans;
2006  }
2007 
2008 
2009  SEXP EvalDoubleFunObject(SEXP f, SEXP theta, SEXP control)
2010  {
2011  TMB_TRY {
2012  int do_simulate = getListInteger(control, "do_simulate");
2013  int get_reportdims = getListInteger(control, "get_reportdims");
2014  objective_function<double>* pf;
2015  pf = (objective_function<double>*) R_ExternalPtrAddr(f);
2016  pf -> sync_data();
2017  PROTECT( theta=Rf_coerceVector(theta,REALSXP) );
2018  int n = pf->theta.size();
2019  if (LENGTH(theta)!=n) Rf_error("Wrong parameter length.");
2020  vector<double> x(n);
2021  for(int i=0;i<n;i++) x[i] = REAL(theta)[i];
2022  pf->theta=x;
2023  /* Since we are actually evaluating objective_function::operator() (not
2024  an ADFun object) we should remember to initialize parameter-index. */
2025  pf->index=0;
2026  pf->parnames.resize(0); // To avoid mem leak.
2027  pf->reportvector.clear();
2028  SEXP res;
2029  GetRNGstate(); /* Get seed from R */
2030  if(do_simulate) pf->set_simulate( true );
2031  PROTECT( res = asSEXP( pf->operator()() ) );
2032  if(do_simulate) {
2033  pf->set_simulate( false );
2034  PutRNGstate(); /* Write seed back to R */
2035  }
2036  if(get_reportdims) {
2037  SEXP reportdims;
2038  PROTECT( reportdims = pf -> reportvector.reportdims() );
2039  Rf_setAttrib( res, Rf_install("reportdims"), reportdims);
2040  UNPROTECT(1);
2041  }
2042  UNPROTECT(2);
2043  return res;
2044  }
2045  TMB_CATCH {
2046  TMB_ERROR_BAD_ALLOC;
2047  }
2048  }
2049 
2053  SEXP getParameterOrder(SEXP data, SEXP parameters, SEXP report, SEXP control)
2054  {
2055  TMB_TRY {
2056  /* Some type checking */
2057  if(!Rf_isNewList(data))Rf_error("'data' must be a list");
2058  if(!Rf_isNewList(parameters))Rf_error("'parameters' must be a list");
2059  if(!Rf_isEnvironment(report))Rf_error("'report' must be an environment");
2060  objective_function<double> F(data,parameters,report);
2061  F(); // Run through user template
2062  return F.parNames();
2063  }
2064  TMB_CATCH {
2065  TMB_ERROR_BAD_ALLOC;
2066  }
2067  }
2068 
2069 } /* Double interface */
2070 
2071 
2072 #ifdef TMBAD_FRAMEWORK
2073 TMBad::ADFun< TMBad::ad_aug >* MakeADGradObject_(SEXP data, SEXP parameters, SEXP report, SEXP control, int parallel_region=-1)
2074 {
2075  typedef TMBad::ad_aug ad;
2076  typedef TMBad::ADFun<ad> adfun;
2077  SEXP f = getListElement(control, "f");
2078  adfun* pf;
2079  bool allocate_new_pf = ( f == R_NilValue );
2080  if ( ! allocate_new_pf ) {
2081  if (parallel_region == -1)
2082  pf = (adfun*) R_ExternalPtrAddr(f);
2083  else
2084  pf = ((parallelADFun<double>*) R_ExternalPtrAddr(f))->vecpf[parallel_region];
2085  } else {
2086  SEXP control_adfun = R_NilValue;
2087  pf = MakeADFunObject_(data, parameters, report, control_adfun, parallel_region);
2088  }
2089  // Optionally skip gradient components (only need 'random' part of gradient)
2090  SEXP random = getListElement(control, "random");
2091  if (random != R_NilValue) {
2092  int set_tail = INTEGER(random)[0] - 1;
2093  std::vector<TMBad::Index> r(1, set_tail);
2094  pf -> set_tail(r);
2095  }
2096  adfun* pgf = new adfun (pf->JacFun());
2097  pf -> unset_tail(); // Not really needed
2098  if (allocate_new_pf) delete pf;
2099  return pgf;
2100 }
2101 #endif
2102 
2103 #ifdef CPPAD_FRAMEWORK
2104 ADFun< double >* MakeADGradObject_(SEXP data, SEXP parameters, SEXP report, SEXP control, int parallel_region=-1)
2105 {
2106  /* Create ADFun pointer */
2107  objective_function< AD<AD<double> > > F(data,parameters,report);
2108  F.set_parallel_region(parallel_region);
2109  int n=F.theta.size();
2110  Independent(F.theta);
2111  vector< AD<AD<double> > > y(1);
2112  y[0]=F.evalUserTemplate();
2113  ADFun<AD<double> > tmp(F.theta,y);
2114  tmp.optimize(); /* Remove 'dead' operations (could result in nan derivatives) */
2115  vector<AD<double> > x(n);
2116  for(int i=0;i<n;i++)x[i]=CppAD::Value(F.theta[i]);
2117  vector<AD<double> > yy(n);
2118  Independent(x);
2119  yy=tmp.Jacobian(x);
2120  ADFun< double >* pf = new ADFun< double >(x,yy);
2121  return pf;
2122 }
2123 #endif
2124 
2125 extern "C"
2126 {
2127 #ifdef TMBAD_FRAMEWORK
2128 
2129  SEXP MakeADGradObject(SEXP data, SEXP parameters, SEXP report, SEXP control)
2130  {
2131  typedef TMBad::ad_aug ad;
2132  typedef TMBad::ADFun<ad> adfun;
2133 
2134  adfun* pf = NULL;
2135  /* Some type checking */
2136  if(!Rf_isNewList(data))Rf_error("'data' must be a list");
2137  if(!Rf_isNewList(parameters))Rf_error("'parameters' must be a list");
2138  if(!Rf_isEnvironment(report))Rf_error("'report' must be an environment");
2139 
2140  /* Get the default parameter vector (tiny overhead) */
2141  SEXP par,res=NULL;
2142  objective_function< double > F(data,parameters,report);
2143 #ifdef _OPENMP
2144  SEXP f = getListElement(control, "f");
2145  int n = get_num_tapes(f);
2146  if (n==0) // No tapes? Count!
2147  n = F.count_parallel_regions(); // Evaluates user template
2148 #else
2149  F.count_parallel_regions(); // Evaluates user template
2150 #endif
2151  PROTECT(par=F.defaultpar());
2152 
2153  if(_openmp){ // Parallel mode
2154 #ifdef _OPENMP
2155  if(config.trace.parallel)
2156  std::cout << n << " regions found.\n";
2157  if (n==0) n++; // No explicit parallel accumulation
2158  start_parallel(); /* Start threads */
2159  vector< adfun* > pfvec(n);
2160  const char* bad_thread_alloc = NULL;
2161 #pragma omp parallel for num_threads(config.nthreads) if (config.tape.parallel && n>1)
2162  for(int i=0;i<n;i++){
2163  TMB_TRY {
2164  pfvec[i] = NULL;
2165  pfvec[i] = MakeADGradObject_(data, parameters, report, control, i);
2166  if (config.optimize.instantly) pfvec[i]->optimize();
2167  }
2168  TMB_CATCH {
2169  if (pfvec[i] != NULL) delete pfvec[i];
2170  bad_thread_alloc = excpt.what();
2171  }
2172  }
2173  if (bad_thread_alloc) {
2174  TMB_ERROR_BAD_THREAD_ALLOC;
2175  }
2176  parallelADFun<double>* ppf=new parallelADFun<double>(pfvec);
2177  /* Convert parallel ADFun pointer to R_ExternalPtr */
2178  PROTECT(res=R_MakeExternalPtr((void*) ppf,Rf_install("parallelADFun"),R_NilValue));
2179  // R_RegisterCFinalizer(res,finalizeparallelADFun);
2180 #endif
2181  } else { // Serial mode
2182  /* Actual work: tape creation */
2183  TMB_TRY {
2184  pf = NULL;
2185  pf = MakeADGradObject_(data, parameters, report, control, -1);
2186  if(config.optimize.instantly)pf->optimize();
2187  }
2188  TMB_CATCH {
2189  if (pf != NULL) delete pf;
2190  TMB_ERROR_BAD_ALLOC;
2191  }
2192  /* Convert ADFun pointer to R_ExternalPtr */
2193  PROTECT(res=R_MakeExternalPtr((void*) pf,Rf_install("ADFun"),R_NilValue));
2194  }
2195 
2196  /* Return ptrList */
2197  SEXP ans;
2198  Rf_setAttrib(res,Rf_install("par"),par);
2199  PROTECT(ans=ptrList(res));
2200  UNPROTECT(3);
2201  return ans;
2202  } // MakeADGradObject
2203 #endif
2204 
2205 #ifdef CPPAD_FRAMEWORK
2206 
2207  SEXP MakeADGradObject(SEXP data, SEXP parameters, SEXP report, SEXP control)
2208  {
2209  ADFun<double>* pf = NULL;
2210  /* Some type checking */
2211  if(!Rf_isNewList(data))Rf_error("'data' must be a list");
2212  if(!Rf_isNewList(parameters))Rf_error("'parameters' must be a list");
2213  if(!Rf_isEnvironment(report))Rf_error("'report' must be an environment");
2214 
2215  /* Get the default parameter vector (tiny overhead) */
2216  SEXP par,res=NULL;
2217  objective_function< double > F(data,parameters,report);
2218 #ifdef _OPENMP
2219  int n=F.count_parallel_regions(); // Evaluates user template
2220 #else
2221  F.count_parallel_regions(); // Evaluates user template
2222 #endif
2223  PROTECT(par=F.defaultpar());
2224 
2225  if(_openmp){ // Parallel mode
2226 #ifdef _OPENMP
2227  if(config.trace.parallel)
2228  std::cout << n << " regions found.\n";
2229  if (n==0) n++; // No explicit parallel accumulation
2230  start_parallel(); /* Start threads */
2231  vector< ADFun<double>* > pfvec(n);
2232  const char* bad_thread_alloc = NULL;
2233 #pragma omp parallel for num_threads(config.nthreads) if (config.tape.parallel && n>1)
2234  for(int i=0;i<n;i++){
2235  TMB_TRY {
2236  pfvec[i] = NULL;
2237  pfvec[i] = MakeADGradObject_(data, parameters, report, control, i);
2238  if (config.optimize.instantly) pfvec[i]->optimize();
2239  }
2240  TMB_CATCH {
2241  if (pfvec[i] != NULL) delete pfvec[i];
2242  bad_thread_alloc = excpt.what();
2243  }
2244  }
2245  if (bad_thread_alloc) {
2246  TMB_ERROR_BAD_THREAD_ALLOC;
2247  }
2248  parallelADFun<double>* ppf=new parallelADFun<double>(pfvec);
2249  /* Convert parallel ADFun pointer to R_ExternalPtr */
2250  PROTECT(res=R_MakeExternalPtr((void*) ppf,Rf_install("parallelADFun"),R_NilValue));
2251 #endif
2252  } else { // Serial mode
2253  /* Actual work: tape creation */
2254  TMB_TRY {
2255  pf = NULL;
2256  pf = MakeADGradObject_(data, parameters, report, control, -1);
2257  if(config.optimize.instantly)pf->optimize();
2258  }
2259  TMB_CATCH {
2260  if (pf != NULL) delete pf;
2261  TMB_ERROR_BAD_ALLOC;
2262  }
2263  /* Convert ADFun pointer to R_ExternalPtr */
2264  PROTECT(res=R_MakeExternalPtr((void*) pf,Rf_install("ADFun"),R_NilValue));
2265  }
2266 
2267  /* Return ptrList */
2268  SEXP ans;
2269  Rf_setAttrib(res,Rf_install("par"),par);
2270  PROTECT(ans=ptrList(res));
2271  UNPROTECT(3);
2272  return ans;
2273  } // MakeADGradObject
2274 #endif
2275 }
2276 
2277 
2284 #ifdef TMBAD_FRAMEWORK
2285 sphess_t< TMBad::ADFun< TMBad::ad_aug > > MakeADHessObject2_(SEXP data, SEXP parameters, SEXP report, SEXP control, int parallel_region=-1)
2286 {
2287  typedef TMBad::ad_aug ad;
2288  typedef TMBad::ADFun<ad> adfun;
2289  typedef sphess_t<adfun> sphess;
2290  SEXP gf = getListElement(control, "gf");
2291  adfun* pgf;
2292  bool allocate_new_pgf = ( gf == R_NilValue );
2293  if ( ! allocate_new_pgf ) {
2294  if (parallel_region == -1)
2295  pgf = (adfun*) R_ExternalPtrAddr(gf);
2296  else
2297  pgf = ((parallelADFun<double>*) R_ExternalPtrAddr(gf))->vecpf[parallel_region];
2298  } else {
2299  SEXP control_adgrad = R_NilValue;
2300  pgf = MakeADGradObject_(data, parameters, report, control_adgrad, parallel_region);
2301  }
2302  if (config.optimize.instantly) pgf->optimize();
2303  int n = pgf->Domain();
2304  std::vector<bool> keepcol(n, true);
2305  SEXP skip = getListElement(control, "skip");
2306  for(int i=0; i<LENGTH(skip); i++) {
2307  keepcol[ INTEGER(skip)[i] - 1 ] = false; // skip is R-index !
2308  }
2309  TMBad::SpJacFun_config spjacfun_cfg;
2310  spjacfun_cfg.index_remap = false;
2311  spjacfun_cfg.compress = config.tmbad.sparse_hessian_compress;
2312  TMBad::Sparse<adfun> h = pgf->SpJacFun(keepcol, keepcol, spjacfun_cfg);
2313  if (allocate_new_pgf) delete pgf;
2314  // NB: Lower triangle, column major =
2315  // Transpose of upper triangle, row major
2316  h.subset_inplace( h.row() <= h.col() ); // Upper triangle, row major
2317  h.transpose_inplace(); // Lower triangle, col major
2318  if (config.optimize.instantly) // Optimize now or later ?
2319  h.optimize();
2320  adfun* phf = new adfun( h );
2321  // Convert h.i and h.j to vector<int>
2322  vector<TMBad::Index> h_i(h.i);
2323  vector<TMBad::Index> h_j(h.j);
2324  sphess ans(phf, h_i.cast<int>(), h_j.cast<int>());
2325  return ans;
2326 } // MakeADHessObject2
2327 #endif
2328 
2335 #ifdef CPPAD_FRAMEWORK
2336 sphess MakeADHessObject2_(SEXP data, SEXP parameters, SEXP report, SEXP control, int parallel_region=-1)
2337 {
2338  /* Some type checking */
2339  if(!Rf_isNewList(data))Rf_error("'data' must be a list");
2340  if(!Rf_isNewList(parameters))Rf_error("'parameters' must be a list");
2341  if(!Rf_isEnvironment(report))Rf_error("'report' must be an environment");
2342 
2343  /* Prepare stuff */
2344  objective_function< AD<AD<AD<double> > > > F(data,parameters,report);
2345  F.set_parallel_region(parallel_region);
2346  int n = F.theta.size();
2347  SEXP skip = getListElement(control, "skip");
2348  vector<bool> keepcol(n); // Scatter for fast lookup
2349  for(int i=0; i<n; i++){
2350  keepcol[i]=true;
2351  }
2352  for(int i=0; i<LENGTH(skip); i++){
2353  keepcol[INTEGER(skip)[i]-1]=false; // skip is R-index !
2354  }
2355 #define KEEP_COL(col) (keepcol[col])
2356 #define KEEP_ROW(row,col) ( KEEP_COL(row) && (row>=col) )
2357 
2358  /* Tape 1: Function R^n -> R */
2359  Independent(F.theta);
2360  vector< AD<AD<AD<double> > > > y(1);
2361  y[0] = F.evalUserTemplate();
2362  ADFun<AD<AD<double> > > tape1(F.theta, y);
2363 
2364  /* Tape 2: Gradient R^n -> R^n (and optimize) */
2365  vector<AD<AD<double> > > xx(n);
2366  for(int i=0; i<n; i++) xx[i] = CppAD::Value(F.theta[i]);
2367  vector<AD<AD<double> > > yy(n);
2368  Independent(xx);
2369  yy = tape1.Jacobian(xx);
2370  ADFun<AD<double > > tape2(xx,yy);
2371  if (config.optimize.instantly) tape2.optimize();
2372 
2373  /* Tape 3: Hessian R^n -> R^m (optimize later) */
2374  tape2.my_init(keepcol);
2375  int colisize;
2376  int m=0; // Count number of non-zeros (m)
2377  for(int i=0; i<int(tape2.colpattern.size()); i++){
2378  colisize = tape2.colpattern[i].size();
2379  if(KEEP_COL(i)){
2380  for(int j=0; j<colisize; j++){
2381  m += KEEP_ROW( tape2.colpattern[i][j] , i);
2382  }
2383  }
2384  }
2385  // Allocate index vectors of non-zero pairs
2386  vector<int> rowindex(m);
2387  vector<int> colindex(m);
2388  // Prepare reverse sweep for Hessian columns
2389  vector<AD<double> > u(n);
2390  vector<AD<double> > v(n);
2391  for(int i = 0; i < n; i++) v[i] = 0.0;
2392  vector<AD<double> > xxx(n);
2393  for(int i=0; i<n; i++) xxx[i]=CppAD::Value(CppAD::Value(F.theta[i]));
2394  vector<AD<double> > yyy(m);
2395  CppAD::vector<int>* icol;
2396  // Do sweeps and fill in non-zero index pairs
2397  Independent(xxx);
2398  tape2.Forward(0, xxx);
2399  int k=0;
2400  for(int i = 0; i < n; i++){
2401  if (KEEP_COL(i)) {
2402  tape2.myReverse(1, v, i /*range comp*/, u /*domain*/);
2403  icol = &tape2.colpattern[i];
2404  for(int j=0; j<int(icol->size()); j++){
2405  if(KEEP_ROW( icol->operator[](j), i )){
2406  rowindex[k] = icol->operator[](j);
2407  colindex[k] = i;
2408  yyy[k] = u[icol->operator[](j)];
2409  k++;
2410  }
2411  }
2412  }
2413  }
2414  ADFun< double >* ptape3 = new ADFun< double >;
2415  ptape3->Dependent(xxx,yyy);
2416  sphess ans(ptape3, rowindex, colindex);
2417  return ans;
2418 } // MakeADHessObject2
2419 #endif
2420 
2421 // kasper: Move to new file e.g. "convert.hpp"
2422 template <class ADFunType>
2424 SEXP asSEXP(const sphess_t<ADFunType> &H, const char* tag)
2425 {
2426  SEXP par;
2427  par=R_NilValue;
2428  /* Convert ADFun pointer to R_ExternalPtr */
2429  SEXP res;
2430  PROTECT( res = R_MakeExternalPtr((void*) H.pf, Rf_install(tag), R_NilValue) );
2431  /* Return list */
2432  SEXP ans;
2433  /* Implicitly protected temporaries */
2434  SEXP par_symbol = Rf_install("par");
2435  SEXP i_symbol = Rf_install("i");
2436  SEXP j_symbol = Rf_install("j");
2437  Rf_setAttrib(res, par_symbol, par);
2438  Rf_setAttrib(res, i_symbol, asSEXP(H.i));
2439  Rf_setAttrib(res, j_symbol, asSEXP(H.j));
2440  PROTECT(ans=ptrList(res));
2441  UNPROTECT(2);
2442  return ans;
2443 }
2444 
2445 
2446 extern "C"
2447 {
2448 
2449 #ifdef TMBAD_FRAMEWORK
2450 #ifdef _OPENMP
2451  SEXP MakeADHessObject2(SEXP data, SEXP parameters, SEXP report, SEXP control){
2452  typedef TMBad::ad_aug ad;
2453  typedef TMBad::ADFun<ad> adfun;
2454  typedef sphess_t<adfun> sphess;
2455  if(config.trace.parallel)
2456  std::cout << "Count num parallel regions\n";
2457  objective_function< double > F(data,parameters,report);
2458  SEXP gf = getListElement(control, "gf");
2459  int n = get_num_tapes(gf);
2460  if (n==0) // No tapes? Count!
2461  n = F.count_parallel_regions(); // Evaluates user template
2462  if(config.trace.parallel)
2463  std::cout << n << " regions found.\n";
2464  if (n==0) n++; // No explicit parallel accumulation
2465  start_parallel(); /* FIXME: not needed */
2466  /* parallel test */
2467  const char* bad_thread_alloc = NULL;
2468  vector<sphess*> Hvec(n);
2469 #pragma omp parallel for num_threads(config.nthreads) if (config.tape.parallel && n>1)
2470  for (int i=0; i<n; i++) {
2471  TMB_TRY {
2472  Hvec[i] = NULL;
2473  Hvec[i] = new sphess( MakeADHessObject2_(data, parameters, report, control, i) );
2474  //optimizeTape( Hvec[i]->pf );
2475  }
2476  TMB_CATCH {
2477  if (Hvec[i] != NULL) {
2478  delete Hvec[i]->pf;
2479  delete Hvec[i];
2480  }
2481  bad_thread_alloc = excpt.what();
2482  }
2483  }
2484  if (bad_thread_alloc) {
2485  TMB_ERROR_BAD_THREAD_ALLOC;
2486  }
2487  parallelADFun<double>* tmp=new parallelADFun<double>(Hvec);
2488  return asSEXP(tmp->convert(),"parallelADFun");
2489  } // MakeADHessObject2
2490 #else
2491  SEXP MakeADHessObject2(SEXP data, SEXP parameters, SEXP report, SEXP control){
2492  typedef TMBad::ad_aug ad;
2493  typedef TMBad::ADFun<ad> adfun;
2494  typedef sphess_t<adfun> sphess;
2495  sphess* pH = NULL;
2496  SEXP ans;
2497  TMB_TRY {
2498  pH = new sphess( MakeADHessObject2_(data, parameters, report, control, -1) );
2499  //optimizeTape( pH->pf );
2500  ans = asSEXP(*pH, "ADFun");
2501  }
2502  TMB_CATCH {
2503  if (pH != NULL) {
2504  delete pH->pf;
2505  delete pH;
2506  }
2507  TMB_ERROR_BAD_ALLOC;
2508  }
2509  delete pH;
2510  return ans;
2511  } // MakeADHessObject2
2512 #endif
2513 #endif
2514 
2515 #ifdef CPPAD_FRAMEWORK
2516 #ifdef _OPENMP
2517  SEXP MakeADHessObject2(SEXP data, SEXP parameters, SEXP report, SEXP control){
2518  if(config.trace.parallel)
2519  std::cout << "Count num parallel regions\n";
2520  objective_function< double > F(data,parameters,report);
2521  int n=F.count_parallel_regions();
2522  if(config.trace.parallel)
2523  std::cout << n << " regions found.\n";
2524  if (n==0) n++; // No explicit parallel accumulation
2525 
2526  start_parallel(); /* Start threads */
2527 
2528  /* parallel test */
2529  const char* bad_thread_alloc = NULL;
2530  vector<sphess*> Hvec(n);
2531 #pragma omp parallel for num_threads(config.nthreads) if (config.tape.parallel && n>1)
2532  for (int i=0; i<n; i++) {
2533  TMB_TRY {
2534  Hvec[i] = NULL;
2535  Hvec[i] = new sphess( MakeADHessObject2_(data, parameters, report, control, i) );
2536  optimizeTape( Hvec[i]->pf );
2537  }
2538  TMB_CATCH {
2539  if (Hvec[i] != NULL) {
2540  delete Hvec[i]->pf;
2541  delete Hvec[i];
2542  }
2543  bad_thread_alloc = excpt.what();
2544  }
2545  }
2546  if (bad_thread_alloc) {
2547  TMB_ERROR_BAD_THREAD_ALLOC;
2548  }
2549  parallelADFun<double>* tmp=new parallelADFun<double>(Hvec);
2550  for(int i=0; i<n; i++) {
2551  delete Hvec[i];
2552  }
2553  // Adds finalizer for 'tmp' !!! (so, don't delete tmp...)
2554  SEXP ans = asSEXP(tmp->convert(),"parallelADFun");
2555  return ans;
2556  } // MakeADHessObject2
2557 #else
2558  SEXP MakeADHessObject2(SEXP data, SEXP parameters, SEXP report, SEXP control){
2559  sphess* pH = NULL;
2560  SEXP ans;
2561  TMB_TRY {
2562  pH = new sphess( MakeADHessObject2_(data, parameters, report, control, -1) );
2563  optimizeTape( pH->pf );
2564  ans = asSEXP(*pH, "ADFun");
2565  }
2566  TMB_CATCH {
2567  if (pH != NULL) {
2568  delete pH->pf;
2569  delete pH;
2570  }
2571  TMB_ERROR_BAD_ALLOC;
2572  }
2573  delete pH;
2574  return ans;
2575  } // MakeADHessObject2
2576 #endif
2577 #endif
2578 }
2579 
2580 extern "C"
2581 {
2582 
2583 #ifdef TMBAD_FRAMEWORK
2584  SEXP usingAtomics(){
2585  SEXP ans;
2586  PROTECT(ans = Rf_allocVector(INTSXP,1));
2587  INTEGER(ans)[0] = 1; // TMBAD doesn't benefit from knowing if 'false'
2588  UNPROTECT(1);
2589  return ans;
2590  }
2591 #endif
2592 
2593 #ifdef CPPAD_FRAMEWORK
2594  SEXP usingAtomics(){
2595  SEXP ans;
2596  PROTECT(ans = Rf_allocVector(INTSXP,1));
2597  INTEGER(ans)[0] = atomic::atomicFunctionGenerated;
2598  UNPROTECT(1);
2599  return ans;
2600  }
2601 #endif
2602 
2603  SEXP getFramework() {
2604  // ans
2605  SEXP ans;
2606 #ifdef TMBAD_FRAMEWORK
2607  ans = Rf_mkString("TMBad");
2608 #elif defined(CPPAD_FRAMEWORK)
2609  ans = Rf_mkString("CppAD");
2610 #else
2611  ans = Rf_mkString("Unknown");
2612 #endif
2613  PROTECT(ans);
2614  // openmp_sym (Not strictly necessary to PROTECT)
2615  SEXP openmp_sym = Rf_install("openmp");
2616  PROTECT(openmp_sym);
2617  // openmp_res
2618  SEXP openmp_res;
2619 #ifdef _OPENMP
2620  openmp_res = Rf_ScalarLogical(1);
2621 #else
2622  openmp_res = Rf_ScalarLogical(0);
2623 #endif
2624  PROTECT(openmp_res);
2625  // Assemble
2626  Rf_setAttrib(ans, openmp_sym, openmp_res);
2627  UNPROTECT(2);
2628  // Add more stuff
2629 #ifdef TMBAD_FRAMEWORK
2630  SEXP index_size_sym = Rf_install("sizeof(Index)");
2631  PROTECT(index_size_sym);
2632  SEXP index_size = Rf_ScalarInteger(sizeof(TMBad::Index));
2633  PROTECT(index_size);
2634  Rf_setAttrib(ans, index_size_sym, index_size);
2635  UNPROTECT(2);
2636 #endif
2637  UNPROTECT(1); // ans
2638  return ans;
2639  }
2640 }
2641 
2642 extern "C"
2643 {
2644  void tmb_forward(SEXP f, const Eigen::VectorXd &x, Eigen::VectorXd &y) {
2645 #ifdef CPPAD_FRAMEWORK
2646  SEXP tag=R_ExternalPtrTag(f);
2647  if(tag == Rf_install("ADFun")) {
2648  ADFun<double>* pf;
2649  pf = (ADFun<double>*) R_ExternalPtrAddr(f);
2650  y = pf->Forward(0, x);
2651  } else
2652  if(tag == Rf_install("parallelADFun")) {
2653  parallelADFun<double>* pf;
2654  pf = (parallelADFun<double>*) R_ExternalPtrAddr(f);
2655  y = pf->Forward(0, x);
2656  } else
2657  Rf_error("Unknown function pointer");
2658 #endif
2659 #ifdef TMBAD_FRAMEWORK
2660  typedef TMBad::ad_aug ad;
2661  typedef TMBad::ADFun<ad> adfun;
2662  SEXP tag=R_ExternalPtrTag(f);
2663  if(tag == Rf_install("ADFun")) {
2664  adfun* pf = (adfun*) R_ExternalPtrAddr(f);
2665  y = pf->forward(x);
2666  } else
2667  if(tag == Rf_install("parallelADFun")) {
2668  parallelADFun<double>* pf;
2669  pf = (parallelADFun<double>*) R_ExternalPtrAddr(f);
2670  y = pf->forward(x);
2671  } else
2672  Rf_error("Unknown function pointer");
2673 #endif
2674  }
2675  void tmb_reverse(SEXP f, const Eigen::VectorXd &v, Eigen::VectorXd &y) {
2676 #ifdef CPPAD_FRAMEWORK
2677  SEXP tag=R_ExternalPtrTag(f);
2678  if(tag == Rf_install("ADFun")) {
2679  ADFun<double>* pf;
2680  pf = (ADFun<double>*) R_ExternalPtrAddr(f);
2681  y = pf->Reverse(1, v);
2682  } else
2683  if(tag == Rf_install("parallelADFun")) {
2684  parallelADFun<double>* pf;
2685  pf = (parallelADFun<double>*) R_ExternalPtrAddr(f);
2686  y = pf->Reverse(1, v);
2687  } else
2688  Rf_error("Unknown function pointer");
2689 #endif
2690 #ifdef TMBAD_FRAMEWORK
2691  typedef TMBad::ad_aug ad;
2692  typedef TMBad::ADFun<ad> adfun;
2693  SEXP tag=R_ExternalPtrTag(f);
2694  if(tag == Rf_install("ADFun")) {
2695  adfun* pf = (adfun*) R_ExternalPtrAddr(f);
2696  y = pf->reverse(v);
2697  } else
2698  if(tag == Rf_install("parallelADFun")) {
2699  parallelADFun<double>* pf;
2700  pf = (parallelADFun<double>*) R_ExternalPtrAddr(f);
2701  y = pf->reverse(v);
2702  } else
2703  Rf_error("Unknown function pointer");
2704 #endif
2705  }
2706 }
2707 
2708 #endif /* #ifndef WITH_LIBTMB */
2709 
2710 
2711 
2712 
2713 
2714 #ifdef WITH_LIBTMB
2715 
2716 template class objective_function<double>;
2717 #ifdef CPPAD_FRAMEWORK
2718 template class objective_function<AD<double> >;
2719 template class objective_function<AD<AD<double> > >;
2720 template class objective_function<AD<AD<AD<double> > > >;
2721 #endif
2722 #ifdef TMBAD_FRAMEWORK
2723 template class objective_function<TMBad::ad_aug>;
2724 #endif
2725 
2726 extern "C"
2727 {
2728  SEXP MakeADFunObject(SEXP data, SEXP parameters, SEXP report, SEXP control);
2729  SEXP InfoADFunObject(SEXP f);
2730  SEXP tmbad_print(SEXP f, SEXP control);
2731  SEXP optimizeADFunObject(SEXP f);
2732  SEXP EvalADFunObject(SEXP f, SEXP theta, SEXP control);
2733  SEXP MakeDoubleFunObject(SEXP data, SEXP parameters, SEXP report, SEXP control);
2734  SEXP EvalDoubleFunObject(SEXP f, SEXP theta, SEXP control);
2735  SEXP getParameterOrder(SEXP data, SEXP parameters, SEXP report, SEXP control);
2736  SEXP MakeADGradObject(SEXP data, SEXP parameters, SEXP report, SEXP control);
2737  SEXP MakeADHessObject2(SEXP data, SEXP parameters, SEXP report, SEXP control);
2738  SEXP usingAtomics();
2739  SEXP getFramework();
2740  SEXP getSetGlobalPtr(SEXP ptr);
2741  SEXP TransformADFunObject(SEXP f, SEXP control);
2742  void tmb_forward(SEXP f, const Eigen::VectorXd &x, Eigen::VectorXd &y);
2743  void tmb_reverse(SEXP f, const Eigen::VectorXd &v, Eigen::VectorXd &y);
2744 }
2745 
2746 #endif /* #ifdef WITH_LIBTMB */
2747 
2748 /* Register native routines (see 'Writing R extensions'). Especially
2749  relevant to avoid symbol lookup overhead for those routines that
2750  are called many times e.g. EvalADFunObject. */
2751 extern "C"{
2752  /* Some string utilities */
2753 #define xstringify(s) stringify(s)
2754 #define stringify(s) #s
2755  /* May be used as part of custom calldef tables */
2756 #define TMB_CALLDEFS \
2757  {"MakeADFunObject", (DL_FUNC) &MakeADFunObject, 4}, \
2758  {"FreeADFunObject", (DL_FUNC) &FreeADFunObject, 1}, \
2759  {"InfoADFunObject", (DL_FUNC) &InfoADFunObject, 1}, \
2760  {"tmbad_print", (DL_FUNC) &tmbad_print, 2}, \
2761  {"EvalADFunObject", (DL_FUNC) &EvalADFunObject, 3}, \
2762  {"TransformADFunObject",(DL_FUNC) &TransformADFunObject,2}, \
2763  {"MakeDoubleFunObject", (DL_FUNC) &MakeDoubleFunObject, 4}, \
2764  {"EvalDoubleFunObject", (DL_FUNC) &EvalDoubleFunObject, 3}, \
2765  {"getParameterOrder", (DL_FUNC) &getParameterOrder, 4}, \
2766  {"MakeADGradObject", (DL_FUNC) &MakeADGradObject, 4}, \
2767  {"MakeADHessObject2", (DL_FUNC) &MakeADHessObject2, 4}, \
2768  {"usingAtomics", (DL_FUNC) &usingAtomics, 0}, \
2769  {"getFramework", (DL_FUNC) &getFramework, 0}, \
2770  {"getSetGlobalPtr", (DL_FUNC) &getSetGlobalPtr, 1}, \
2771  {"TMBconfig", (DL_FUNC) &TMBconfig, 2}
2772  /* May be used as part of custom R_init function
2773  C-callable routines (PACKAGE is 'const char*') */
2774 #define TMB_CCALLABLES(PACKAGE) \
2775  R_RegisterCCallable(PACKAGE, "tmb_forward", (DL_FUNC) &tmb_forward); \
2776  R_RegisterCCallable(PACKAGE, "tmb_reverse", (DL_FUNC) &tmb_reverse);
2777  /* Default (optional) calldef table. */
2778 #ifdef TMB_LIB_INIT
2779 #include <R_ext/Rdynload.h>
2780 static R_CallMethodDef CallEntries[] = {
2781  TMB_CALLDEFS
2782  ,
2783  /* User's R_unload_lib function must also be registered (because we
2784  disable dynamic lookup - see below). The unload function is
2785  mainly useful while developing models in order to clean up
2786  external pointers without restarting R. Should not be used by TMB
2787  dependent packages. */
2788 #ifdef LIB_UNLOAD
2789  {xstringify(LIB_UNLOAD), (DL_FUNC) &LIB_UNLOAD, 1},
2790 #endif
2791  /* End of table */
2792  {NULL, NULL, 0}
2793 };
2794 void TMB_LIB_INIT(DllInfo *dll){
2795  R_registerRoutines(dll, NULL, CallEntries, NULL, NULL);
2796  R_useDynamicSymbols(dll, (Rboolean)FALSE);
2797  // Example: TMB_LIB_INIT = R_init_mypkg
2798  // ^
2799  // +-------+
2800  // ^
2801  TMB_CCALLABLES(&(xstringify(TMB_LIB_INIT)[7]));
2802 }
2803 #endif /* #ifdef TMB_LIB_INIT */
2804 #undef xstringify
2805 #undef stringify
2806 }
VT cdf_upper
Logarithm of upper CDF
Definition: tmb_core.hpp:445
std::vector< T > subset(const std::vector< T > &x, const std::vector< bool > &y)
Vector subset by boolean mask.
Vector class used by TMB.
Definition: vector.hpp:17
void reorder_temporaries(global &glob)
Re-order computational graph to make it more compressible.
Definition: TMBad.cpp:567
diff --git a/tmb__enable__header__only_8hpp.html b/tmb__enable__header__only_8hpp.html index 13f6aea96..dfb5f178a 100644 --- a/tmb__enable__header__only_8hpp.html +++ b/tmb__enable__header__only_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/tmb__enable__header__only_8hpp_source.html b/tmb__enable__header__only_8hpp_source.html index c541f4752..81744bf1e 100644 --- a/tmb__enable__header__only_8hpp_source.html +++ b/tmb__enable__header__only_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/tmb__enable__precompile_8hpp.html b/tmb__enable__precompile_8hpp.html index 72ca8ff63..3be976f0b 100644 --- a/tmb__enable__precompile_8hpp.html +++ b/tmb__enable__precompile_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/tmb__enable__precompile_8hpp_source.html b/tmb__enable__precompile_8hpp_source.html index e222641fb..f3fd30b20 100644 --- a/tmb__enable__precompile_8hpp_source.html +++ b/tmb__enable__precompile_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/tmbad__allow__comparison_8hpp_source.html b/tmbad__allow__comparison_8hpp_source.html index d656e3b45..0376b3777 100644 --- a/tmbad__allow__comparison_8hpp_source.html +++ b/tmbad__allow__comparison_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
@@ -73,6 +73,6 @@
tmbad_allow_comparison.hpp
-
1 #ifndef HAVE_TMBAD_ALLOW_COMPARISON_HPP
2 #define HAVE_TMBAD_ALLOW_COMPARISON_HPP
3 // Autogenerated - do not edit by hand !
4 #include "global.hpp"
5 
6 namespace TMBad {
12 bool operator<(const ad_aug &x, const ad_aug &y);
13 bool operator<(const Scalar &x, const ad_aug &y);
14 bool operator<=(const ad_aug &x, const ad_aug &y);
15 bool operator<=(const Scalar &x, const ad_aug &y);
16 bool operator>(const ad_aug &x, const ad_aug &y);
17 bool operator>(const Scalar &x, const ad_aug &y);
18 bool operator>=(const ad_aug &x, const ad_aug &y);
19 bool operator>=(const Scalar &x, const ad_aug &y);
20 bool operator==(const ad_aug &x, const ad_aug &y);
21 bool operator==(const Scalar &x, const ad_aug &y);
22 bool operator!=(const ad_aug &x, const ad_aug &y);
23 bool operator!=(const Scalar &x, const ad_aug &y);
24 
25 } // namespace TMBad
26 #endif // HAVE_TMBAD_ALLOW_COMPARISON_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:153
+
1 #ifndef HAVE_TMBAD_ALLOW_COMPARISON_HPP
2 #define HAVE_TMBAD_ALLOW_COMPARISON_HPP
3 // Autogenerated - do not edit by hand !
4 #include "global.hpp"
5 
6 namespace TMBad {
12 bool operator<(const ad_aug &x, const ad_aug &y);
13 bool operator<(const Scalar &x, const ad_aug &y);
14 bool operator<=(const ad_aug &x, const ad_aug &y);
15 bool operator<=(const Scalar &x, const ad_aug &y);
16 bool operator>(const ad_aug &x, const ad_aug &y);
17 bool operator>(const Scalar &x, const ad_aug &y);
18 bool operator>=(const ad_aug &x, const ad_aug &y);
19 bool operator>=(const Scalar &x, const ad_aug &y);
20 bool operator==(const ad_aug &x, const ad_aug &y);
21 bool operator==(const Scalar &x, const ad_aug &y);
22 bool operator!=(const ad_aug &x, const ad_aug &y);
23 bool operator!=(const Scalar &x, const ad_aug &y);
24 
25 } // namespace TMBad
26 #endif // HAVE_TMBAD_ALLOW_COMPARISON_HPP
Automatic differentiation library designed for TMB.
Definition: TMB.hpp:157
License: GPL v2 diff --git a/tmbad__atomic__macro_8hpp_source.html b/tmbad__atomic__macro_8hpp_source.html index 337ce58b0..d0706aafc 100644 --- a/tmbad__atomic__macro_8hpp_source.html +++ b/tmbad__atomic__macro_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/tmbutils_2vectorize_8hpp.html b/tmbutils_2vectorize_8hpp.html index b895002df..b491b61ad 100644 --- a/tmbutils_2vectorize_8hpp.html +++ b/tmbutils_2vectorize_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/tmbutils_2vectorize_8hpp_source.html b/tmbutils_2vectorize_8hpp_source.html index 1ddbc2fdc..92192015e 100644 --- a/tmbutils_2vectorize_8hpp_source.html +++ b/tmbutils_2vectorize_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/tmbutils_8hpp.html b/tmbutils_8hpp.html index b2dcc3c36..f33437de0 100644 --- a/tmbutils_8hpp.html +++ b/tmbutils_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/tmbutils_8hpp_source.html b/tmbutils_8hpp_source.html index 3d9cd6835..6061f37d4 100644 --- a/tmbutils_8hpp_source.html +++ b/tmbutils_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/tmbutils__extra_8hpp_source.html b/tmbutils__extra_8hpp_source.html index 12e5a1945..1be186313 100644 --- a/tmbutils__extra_8hpp_source.html +++ b/tmbutils__extra_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/toggle__thread__safe__R_8hpp.html b/toggle__thread__safe__R_8hpp.html index 8afb3a805..dff7a101c 100644 --- a/toggle__thread__safe__R_8hpp.html +++ b/toggle__thread__safe__R_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/toggle__thread__safe__R_8hpp_source.html b/toggle__thread__safe__R_8hpp_source.html index 1b5f224c1..0083fdce6 100644 --- a/toggle__thread__safe__R_8hpp_source.html +++ b/toggle__thread__safe__R_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/toms708_8cpp_source.html b/toms708_8cpp_source.html index 14f74fda7..2f2eb7a01 100644 --- a/toms708_8cpp_source.html +++ b/toms708_8cpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/transform2_8cpp-example.html b/transform2_8cpp-example.html index 9ae0f8582..7ddcbf1fa 100644 --- a/transform2_8cpp-example.html +++ b/transform2_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/transform_8cpp-example.html b/transform_8cpp-example.html index a420e73f8..87f0c0ff8 100644 --- a/transform_8cpp-example.html +++ b/transform_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/transform_parallel_8cpp-example.html b/transform_parallel_8cpp-example.html index 43bae95bb..0991d2e74 100644 --- a/transform_parallel_8cpp-example.html +++ b/transform_parallel_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/tweedie_8cpp-example.html b/tweedie_8cpp-example.html index 5919c9282..66177f51d 100644 --- a/tweedie_8cpp-example.html +++ b/tweedie_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/tweedie_8cpp_source.html b/tweedie_8cpp_source.html index 17c57f7ba..aee5747b6 100644 --- a/tweedie_8cpp_source.html +++ b/tweedie_8cpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/tweedie_8hpp_source.html b/tweedie_8hpp_source.html index bf9225714..e1c4f7808 100644 --- a/tweedie_8hpp_source.html +++ b/tweedie_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/validation_2MVRandomWalkValidation_8cpp-example.html b/validation_2MVRandomWalkValidation_8cpp-example.html index 853e69fc7..759fcf55a 100644 --- a/validation_2MVRandomWalkValidation_8cpp-example.html +++ b/validation_2MVRandomWalkValidation_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/validation_2randomwalkvalidation_8cpp-example.html b/validation_2randomwalkvalidation_8cpp-example.html index c28788fc0..da80911b5 100644 --- a/validation_2randomwalkvalidation_8cpp-example.html +++ b/validation_2randomwalkvalidation_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/validation_2rickervalidation_8cpp-example.html b/validation_2rickervalidation_8cpp-example.html index eaa2c2590..ebfdc0264 100644 --- a/validation_2rickervalidation_8cpp-example.html +++ b/validation_2rickervalidation_8cpp-example.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/vector_8hpp.html b/vector_8hpp.html index e0041bc1d..6cc525e53 100644 --- a/vector_8hpp.html +++ b/vector_8hpp.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11
diff --git a/vector_8hpp_source.html b/vector_8hpp_source.html index b3bd2a8c5..9b550a290 100644 --- a/vector_8hpp_source.html +++ b/vector_8hpp_source.html @@ -28,7 +28,7 @@
TMB Documentation -  v1.9.10 +  v1.9.11