Scippy

SCIP

Solving Constraint Integer Programs

exprinterpret_cppad.cpp
Go to the documentation of this file.
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2 /* */
3 /* This file is part of the program and library */
4 /* SCIP --- Solving Constraint Integer Programs */
5 /* */
6 /* Copyright (C) 2002-2022 Konrad-Zuse-Zentrum */
7 /* fuer Informationstechnik Berlin */
8 /* */
9 /* SCIP is distributed under the terms of the ZIB Academic License. */
10 /* */
11 /* You should have received a copy of the ZIB Academic License */
12 /* along with SCIP; see the file COPYING. If not visit scipopt.org. */
13 /* */
14 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
15 
16 /**@file exprinterpret_cppad.cpp
17  * @brief methods to interpret (evaluate) an expression "fast" using CppAD
18  * @ingroup DEFPLUGINS_EXPRINT
19  * @author Stefan Vigerske
20  */
21 
22 /*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/
23 
24 #include "scip/exprinterpret.h"
25 #include "scip/def.h"
26 #include "scip/intervalarith.h"
27 #include "scip/pub_expr.h"
28 #include "scip/scip_expr.h"
29 #include "scip/expr_pow.h"
30 #include "scip/expr_exp.h"
31 #include "scip/expr_log.h"
32 #include "scip/expr_varidx.h"
33 
34 #include <cmath>
35 #include <cstring>
36 #include <algorithm>
37 #include <vector>
38 using std::vector;
39 
40 /* Turn off lint warning "747: Significant prototype coercion" and "732: Loss of sign".
41  * The first warning is generated for expressions like t[0], where t is a vector, since 0 is an integer constant, but a
42  * size_t is expected (usually long unsigned). The second is generated for expressions like t[n], where n is an
43  * integer. Both code pieces are likely to be correct. It seems to be impossible to inhibit these messages for
44  * vector<*>::operator[] only. */
45 /*lint --e{747,732}*/
46 
47 /* defining EVAL_USE_EXPRHDLR_ALWAYS uses the exprhdlr methods for the evaluation and differentiation of all expression (except for var and value)
48  * instead of only those that are not recognized in this code
49  * this is incomplete (no hessians) and slow (only forward-mode gradients), but can be useful for testing
50  */
51 // #define EVAL_USE_EXPRHDLR_ALWAYS
52 
53 /* defining NO_CPPAD_USER_ATOMIC disables the use of our own implementation of derivatives of power operators
54  * via CppAD's user-atomic function feature
55  * our customized implementation should give better results (tighter intervals) for the interval data type
56  * but we don't use interval types here anymore and the atomic operator does not look threadsafe (might be better in newer CppAD version)
57  */
58 #define NO_CPPAD_USER_ATOMIC
59 
60 /* fallback to non-thread-safe version if C++ is too old to have std::atomic */
61 #if __cplusplus < 201103L && defined(SCIP_THREADSAFE)
62 #undef SCIP_THREADSAFE
63 #endif
64 
65 /* CppAD needs to know a fixed upper bound on the number of threads at compile time.
66  * It is wise to set it to a power of 2, so that if the tape id overflows, it is likely to start at 0 again, which avoids difficult to debug errors.
67  */
68 #ifndef CPPAD_MAX_NUM_THREADS
69 #ifdef SCIP_THREADSAFE
70 #define CPPAD_MAX_NUM_THREADS 64
71 #else
72 #define CPPAD_MAX_NUM_THREADS 1
73 #endif
74 #endif
75 
76 /* disable -Wshadow warnings for upcoming includes of CppAD if using some old GCC
77  * -Wshadow was too strict with some versions of GCC 4 (https://stackoverflow.com/questions/2958457/gcc-wshadow-is-too-strict)
78  * disable -Wimplicit-fallthrough as I don't want to maintain extra comments in CppAD code to suppress these
79  */
80 #ifdef __GNUC__
81 #if __GNUC__ == 4
82 #pragma GCC diagnostic ignored "-Wshadow"
83 #endif
84 #if __GNUC__ >= 7
85 #pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
86 #endif
87 #endif
88 
89 // MS compiler doesn't have log2() - define an expensive workaround
90 #ifdef _MSC_VER
91 #define log2(x) (log((double)x) / log(2.0))
92 #endif
93 
94 #include <cppad/cppad.hpp>
95 #include <cppad/utility/error_handler.hpp>
96 
97 /* CppAD is not thread-safe by itself, but uses some static datastructures
98  * To run it in a multithreading environment, a special CppAD memory allocator that is aware of the multiple threads has to be used.
99  * This allocator requires to know the number of threads and a thread number for each thread.
100  * To implement this, we follow the team_pthread example of CppAD, which uses pthread's thread-specific data management.
101  */
102 #ifdef SCIP_THREADSAFE
103 
104 #include <atomic>
105 
106 /** currently registered number of threads */
107 static std::atomic_size_t ncurthreads{0};
108 static thread_local int thread_number{-1};
109 
110 /** CppAD callback function that indicates whether we are running in parallel mode */
111 static
112 bool in_parallel(void)
113 {
114  return ncurthreads > 1;
115 }
116 
117 /** CppAD callback function that returns the number of the current thread
118  *
119  * assigns a new number to the thread if new
120  */
121 static
122 size_t thread_num(void)
123 {
124  size_t threadnum;
125 
126  /* if no thread_number for this thread yet, then assign a new thread number to the current thread
127  */
128  if( thread_number == -1 )
129  {
130  thread_number = static_cast<int>(ncurthreads.fetch_add(1, std::memory_order_relaxed));
131  }
132 
133  threadnum = static_cast<size_t>(thread_number);
134 
135  return threadnum;
136 }
137 
138 /** sets up CppAD's datastructures for running in multithreading mode
139  *
140  * It must be called once before multithreading is started.
141  * For GCC-compatible compilers, this will happen automatically.
142  */
143 extern "C" SCIP_EXPORT char SCIPexprintCppADInitParallel(void);
144 #ifdef __GNUC__
145 __attribute__((constructor))
146 #endif
147 char SCIPexprintCppADInitParallel(void)
148 {
149  CppAD::thread_alloc::parallel_setup(CPPAD_MAX_NUM_THREADS, in_parallel, thread_num);
150  CppAD::parallel_ad<double>();
151 
152  return 0;
153 }
154 
155 #if !defined(__GNUC__)
156 /** a dummy variable that is initialized to the result of init_parallel
157  *
158  * The purpose is to make sure that init_parallel() is called before any multithreading is started.
159  */
160 static char init_parallel_return = SCIPexprintCppADInitParallel();
161 #endif
162 
163 #endif // SCIP_THREADSAFE
164 
165 using CppAD::AD;
166 
167 class atomic_userexpr;
168 
169 /** expression specific interpreter data */
170 struct SCIP_ExprIntData
171 {
172 public:
173  /** constructor */
174  SCIP_ExprIntData()
175  : val(0.0),
176  need_retape(true),
177  need_retape_always(false),
178  userevalcapability(SCIP_EXPRINTCAPABILITY_ALL),
179  hesrowidxs(NULL),
180  hescolidxs(NULL),
181  hesnnz(0),
182  hesconstant(false)
183  { }
184 
185  /** destructor */
186  ~SCIP_ExprIntData()
187  { }/*lint --e{1540}*/
188 
189  /** gives position of index in varidxs vector */
190  int getVarPos(
191  int varidx /**< variable index to look for */
192  ) const
193  {
194  // varidxs is sorted, so can use binary search functions
195  assert(std::binary_search(varidxs.begin(), varidxs.end(), varidx));
196  return std::lower_bound(varidxs.begin(), varidxs.end(), varidx) - varidxs.begin();
197  }
198 
199  vector< int > varidxs; /**< variable indices used in expression (unique and sorted) */
200  vector< AD<double> > X; /**< vector of dependent variables (same size as varidxs) */
201  vector< AD<double> > Y; /**< result vector (size 1) */
202  CppAD::ADFun<double> f; /**< the function to evaluate as CppAD object */
203 
204  vector<double> x; /**< current values of dependent variables (same size as varidxs) */
205  double val; /**< current function value */
206  bool need_retape; /**< will retaping be required for the next point evaluation? */
207  bool need_retape_always; /**< will retaping be always required? */
208 
209  vector<atomic_userexpr*> userexprs; /**< vector of atomic_userexpr that are created during eval() and need to be kept as long as the tape exists */
210  SCIP_EXPRINTCAPABILITY userevalcapability;/**< (intersection of) capabilities of evaluation routines of user expressions */
211 
212  int* hesrowidxs; /**< row indices of Hessian sparsity: indices are the variables used in expression */
213  int* hescolidxs; /**< column indices of Hessian sparsity: indices are the variables used in expression */
214  vector<SCIP_Real> hesvalues; /**< values of Hessian (a vector<> so we can pass it to CppAD) */
215  int hesnnz; /**< number of nonzeros in Hessian */
216  SCIP_Bool hesconstant; /**< whether Hessian is constant (because expr is at most quadratic) */
217 
218  // Hessian data in CppAD style: indices are 0..n-1 and elements on both lower and upper diagonal of matrix are considered
219  CppAD::local::internal_sparsity<bool>::pattern_type hessparsity_pattern; /**< packed sparsity pattern of Hessian in CppAD-internal form */
220  CppAD::vector<size_t> hessparsity_row; /**< row indices of sparsity pattern of Hessian in CppAD-internal form */
221  CppAD::vector<size_t> hessparsity_col; /**< column indices of sparsity pattern of Hessian in CppAD-internal form */
222  CppAD::sparse_hessian_work heswork; /**< work memory of CppAD for sparse Hessians */
223 };
224 
225 #ifndef NO_CPPAD_USER_ATOMIC
226 
227 /** computes sparsity of jacobian for a univariate function during a forward sweep
228  *
229  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
230  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
231  */
232 static
233 bool univariate_for_sparse_jac(
234  size_t q, /**< number of columns in R */
235  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
236  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
237  )
238 {
239  assert(r.size() == q);
240  assert(s.size() == q);
241 
242  s = r;
243 
244  return true;
245 }
246 
247 /** Computes sparsity of jacobian during a reverse sweep
248  *
249  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
250  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
251  */
252 static
253 bool univariate_rev_sparse_jac(
254  size_t q, /**< number of rows in R */
255  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
256  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
257  )
258 {
259  assert(r.size() == q);
260  assert(s.size() == q);
261 
262  s = r;
263 
264  return true;
265 }
266 
267 /** computes sparsity of hessian during a reverse sweep
268  *
269  * Assume V(x) = (g(f(x)))'' R with f(x) = x^p for a function g:R->R and a matrix R.
270  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
271  */ /*lint -e715*/
272 static
273 bool univariate_rev_sparse_hes(
274  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
275  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
276  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
277  size_t q, /**< number of columns in R, U, and V */
278  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
279  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
280  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
281  )
282 { /*lint --e{439,715}*/ /* @todo take vx into account */
283  assert(r.size() == q);
284  assert(s.size() == 1);
285  assert(t.size() == 1);
286  assert(u.size() == q);
287  assert(v.size() == q);
288 
289  // T(x) = g'(f(x)) * f'(x) = S * f'(x), and f' is not identically 0
290  t[0] = s[0];
291 
292  // V(x) = g''(f(x)) f'(x) f'(x) R + g'(f(x)) f''(x) R466
293  // = f'(x) U + S f''(x) R, with f'(x) and f''(x) not identically 0
294  v = u;
295  if( s[0] )
296  for( size_t j = 0; j < q; ++j )
297  if( r[j] )
298  v[j] = true;
299 
300  return true;
301 }
302 
303 
304 /** Automatic differentiation of x -> x^p, p>=2 integer, as CppAD user-atomic function.
305  *
306  * This class implements forward and reverse operations for the function x -> x^p for use within CppAD.
307  * While CppAD would implement integer powers as a recursion of multiplications, we still use pow functions as they allow us to avoid overestimation in interval arithmetics.
308  *
309  * @todo treat the exponent as a (variable) argument to the function, with the assumption that we never differentiate w.r.t. it (this should make the approach threadsafe again)
310  */
311 template<class Type>
312 class atomic_posintpower : public CppAD::atomic_base<Type>
313 {
314 public:
315  atomic_posintpower()
316  : CppAD::atomic_base<Type>("posintpower"),
317  exponent(0)
318  {
319  /* indicate that we want to use bool-based sparsity pattern */
320  this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
321  }
322 
323 private:
324  /** exponent value for next call to forward or reverse */
325  int exponent;
326 
327  /** stores exponent value corresponding to next call to forward or reverse
328  *
329  * how is this supposed to be threadsafe? (we use only one global instantiation of this class)
330  * TODO using this function is deprecated, do as with atomic_userexpr instead
331  */
332  virtual void set_old(size_t id)
333  {
334  exponent = (int) id;
335  }
336 
337  /** forward sweep of positive integer power
338  *
339  * Given the taylor coefficients for x, we have to compute the taylor coefficients for f(x),
340  * that is, given tx = (x, x', x'', ...), we compute the coefficients ty = (y, y', y'', ...)
341  * in the taylor expansion of f(x) = x^p.
342  * Thus, y = x^p
343  * = tx[0]^p,
344  * y' = p * x^(p-1) * x'
345  * = p * tx[0]^(p-1) * tx[1],
346  * y'' = 1/2 * p * (p-1) * x^(p-2) * x'^2 + p * x^(p-1) * x''
347  * = 1/2 * p * (p-1) * tx[0]^(p-2) * tx[1]^2 + p * tx[0]^(p-1) * tx[2]
348  */
349  bool forward(
350  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
351  size_t p, /**< highest order Taylor coefficient that we are evaluating */
352  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
353  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
354  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
355  CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
356  )
357  {
358  assert(exponent > 1);
359  assert(tx.size() >= p+1);
360  assert(ty.size() >= p+1);
361  assert(q <= p);
362 
363  if( vx.size() > 0 )
364  {
365  assert(vx.size() == 1);
366  assert(vy.size() == 1);
367  assert(p == 0);
368 
369  vy[0] = vx[0];
370  }
371 
372  if( q == 0 /* q <= 0 && 0 <= p */ )
373  {
374  ty[0] = CppAD::pow(tx[0], exponent);
375  }
376 
377  if( q <= 1 && 1 <= p )
378  {
379  ty[1] = CppAD::pow(tx[0], exponent-1) * tx[1];
380  ty[1] *= double(exponent);
381  }
382 
383  if( q <= 2 && 2 <= p )
384  {
385  if( exponent > 2 )
386  {
387  // ty[2] = 1/2 * exponent * (exponent-1) * pow(tx[0], exponent-2) * tx[1] * tx[1] + exponent * pow(tx[0], exponent-1) * tx[2];
388  ty[2] = CppAD::pow(tx[0], exponent-2) * tx[1] * tx[1];
389  ty[2] *= (exponent-1) / 2.0;
390  ty[2] += CppAD::pow(tx[0], exponent-1) * tx[2];
391  ty[2] *= exponent;
392  }
393  else
394  {
395  assert(exponent == 2);
396  // ty[2] = 1/2 * exponent * tx[1] * tx[1] + exponent * tx[0] * tx[2];
397  ty[2] = tx[1] * tx[1] + 2.0 * tx[0] * tx[2];
398  }
399  }
400 
401  /* higher order derivatives not implemented */
402  if( p > 2 )
403  return false;
404 
405  return true;
406  }
407 
408  /** reverse sweep of positive integer power
409  *
410  * Assume y(x) is a function of the taylor coefficients of f(x) = x^p for x, i.e.,
411  * y(x) = [ x^p, p * x^(p-1) * x', p * (p-1) * x^(p-2) * x'^2 + p * x^(p-1) * x'', ... ].
412  * Then in the reverse sweep we have to compute the elements of \f$\partial h / \partial x^[l], l = 0, ..., k,\f$
413  * where x^[l] is the l'th taylor coefficient (x, x', x'', ...) and h(x) = g(y(x)) for some function g:R^k -> R.
414  * That is, we have to compute
415  *\f$
416  * px[l] = \partial h / \partial x^[l] = (\partial g / \partial y) * (\partial y / \partial x^[l])
417  * = \sum_{i=0}^k (\partial g / \partial y_i) * (\partial y_i / \partial x^[l])
418  * = \sum_{i=0}^k py[i] * (\partial y_i / \partial x^[l])
419  * \f$
420  *
421  * For k = 0, this means
422  *\f$
423  * px[0] = py[0] * (\partial y_0 / \partial x^[0])
424  * = py[0] * (\partial x^p / \partial x)
425  * = py[0] * p * tx[0]^(p-1)
426  *\f$
427  *
428  * For k = 1, this means
429  * \f$
430  * px[0] = py[0] * (\partial y_0 / \partial x^[0]) + py[1] * (\partial y_1 / \partial x^[0])
431  * = py[0] * (\partial x^p / \partial x) + py[1] * (\partial (p * x^(p-1) * x') / \partial x)
432  * = py[0] * p * tx[0]^(p-1) + py[1] * p * (p-1) * tx[0]^(p-2) * tx[1]
433  * px[1] = py[0] * (\partial y_0 / \partial x^[1]) + py[1] * (\partial y_1 / \partial x^[1])
434  * = py[0] * (\partial x^p / \partial x') + py[1] * (\partial (p * x^(p-1) x') / \partial x')
435  * = py[0] * 0 + py[1] * p * tx[0]^(p-1)
436  * \f$
437  */ /*lint -e715*/
438  bool reverse(
439  size_t p, /**< highest order Taylor coefficient that we are evaluating */
440  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
441  const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
442  CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
443  const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
444  )
445  { /*lint --e{715}*/
446  assert(exponent > 1);
447  assert(px.size() >= p+1);
448  assert(py.size() >= p+1);
449  assert(tx.size() >= p+1);
450 
451  switch( p )
452  {
453  case 0:
454  // px[0] = py[0] * exponent * pow(tx[0], exponent-1);
455  px[0] = py[0] * CppAD::pow(tx[0], exponent-1);
456  px[0] *= exponent;
457  break;
458 
459  case 1:
460  // px[0] = py[0] * exponent * pow(tx[0], exponent-1) + py[1] * exponent * (exponent-1) * pow(tx[0], exponent-2) * tx[1];
461  px[0] = py[1] * tx[1] * CppAD::pow(tx[0], exponent-2);
462  px[0] *= exponent-1;
463  px[0] += py[0] * CppAD::pow(tx[0], exponent-1);
464  px[0] *= exponent;
465  // px[1] = py[1] * exponent * pow(tx[0], exponent-1);
466  px[1] = py[1] * CppAD::pow(tx[0], exponent-1);
467  px[1] *= exponent;
468  break;
469 
470  default:
471  return false;
472  }
473 
474  return true;
475  }
476 
477  using CppAD::atomic_base<Type>::for_sparse_jac;
478 
479  /** computes sparsity of jacobian during a forward sweep
480  *
481  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
482  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
483  */
484  bool for_sparse_jac(
485  size_t q, /**< number of columns in R */
486  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
487  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
488  )
489  {
490  return univariate_for_sparse_jac(q, r, s);
491  }
492 
493  using CppAD::atomic_base<Type>::rev_sparse_jac;
494 
495  /** computes sparsity of jacobian during a reverse sweep
496  *
497  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
498  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
499  */
500  bool rev_sparse_jac(
501  size_t q, /**< number of rows in R */
502  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
503  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
504  )
505  {
506  return univariate_rev_sparse_jac(q, r, s);
507  }
508 
509  using CppAD::atomic_base<Type>::rev_sparse_hes;
510 
511  /** computes sparsity of hessian during a reverse sweep
512  *
513  * Assume V(x) = (g(f(x)))'' R with f(x) = x^p for a function g:R->R and a matrix R.
514  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
515  */
516  bool rev_sparse_hes(
517  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
518  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
519  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
520  size_t q, /**< number of columns in R, U, and V */
521  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
522  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
523  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
524  )
525  {
526  return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
527  }
528 };
529 
530 /** power function with natural exponents */
531 template<class Type>
532 static
533 void posintpower(
534  const vector<Type>& in, /**< vector which first argument is base */
535  vector<Type>& out, /**< vector where to store result in first argument */
536  size_t exponent /**< exponent */
537  )
538 {
539  static atomic_posintpower<typename Type::value_type> pip;
540  pip(in, out, exponent);
541 }
542 
543 #else
544 
545 /** power function with natural exponents */
546 template<class Type>
548  const vector<Type>& in, /**< vector which first argument is base */
549  vector<Type>& out, /**< vector where to store result in first argument */
550  size_t exponent /**< exponent */
551  )
552 {
553  out[0] = pow(in[0], (int)exponent);
554 }
555 
556 #endif
557 
558 
559 #ifndef NO_CPPAD_USER_ATOMIC
560 
561 /** sign of a value (-1 or +1)
562  *
563  * 0.0 has sign +1
564  */
565 #define SIGN(x) ((x) >= 0.0 ? 1.0 : -1.0)
566 
567 /** Automatic differentiation of x -> sign(x)abs(x)^p, p>=1, as CppAD user-atomic function.
568  *
569  * This class implements forward and reverse operations for the function x -> sign(x)abs(x)^p for use within CppAD.
570  * While we otherwise would have to use discontinuous sign and abs functions, our own implementation allows to provide
571  * a continuously differentiable function.
572  *
573  * @todo treat the exponent as a (variable) argument to the function, with the assumption that we never differentiate w.r.t. it (this should make the approach threadsafe again)
574  */
575 template<class Type>
576 class atomic_signpower : public CppAD::atomic_base<Type>
577 {
578 public:
579  atomic_signpower()
580  : CppAD::atomic_base<Type>("signpower"),
581  exponent(0.0)
582  {
583  /* indicate that we want to use bool-based sparsity pattern */
584  this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
585  }
586 
587 private:
588  /** exponent for use in next call to forward or reverse */
589  SCIP_Real exponent;
590 
591  /** stores exponent corresponding to next call to forward or reverse
592  *
593  * How is this supposed to be threadsafe? (we use only one global instantiation of this class)
594  * TODO using this function is deprecated, do as with atomic_userexpr instead
595  */
596  virtual void set_old(size_t id)
597  {
598  exponent = SCIPgetExponentExprPow((SCIP_EXPR*)(void*)id);
599  }
600 
601  /** forward sweep of signpower
602  *
603  * Given the taylor coefficients for x, we have to compute the taylor coefficients for f(x),
604  * that is, given tx = (x, x', x'', ...), we compute the coefficients ty = (y, y', y'', ...)
605  * in the taylor expansion of f(x) = sign(x)abs(x)^p.
606  * Thus, y = sign(x)abs(x)^p
607  * = sign(tx[0])abs(tx[0])^p,
608  * y' = p * abs(x)^(p-1) * x'
609  * = p * abs(tx[0])^(p-1) * tx[1],
610  * y'' = 1/2 * p * (p-1) * sign(x) * abs(x)^(p-2) * x'^2 + p * abs(x)^(p-1) * x''
611  * = 1/2 * p * (p-1) * sign(tx[0]) * abs(tx[0])^(p-2) * tx[1]^2 + p * abs(tx[0])^(p-1) * tx[2]
612  */
613  bool forward(
614  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
615  size_t p, /**< highest order Taylor coefficient that we are evaluating */
616  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
617  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
618  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
619  CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
620  )
621  {
622  assert(exponent > 0.0);
623  assert(tx.size() >= p+1);
624  assert(ty.size() >= p+1);
625  assert(q <= p);
626 
627  if( vx.size() > 0 )
628  {
629  assert(vx.size() == 1);
630  assert(vy.size() == 1);
631  assert(p == 0);
632 
633  vy[0] = vx[0];
634  }
635 
636  if( q == 0 /* q <= 0 && 0 <= p */ )
637  {
638  ty[0] = SIGN(tx[0]) * pow(REALABS(tx[0]), exponent);
639  }
640 
641  if( q <= 1 && 1 <= p )
642  {
643  ty[1] = pow(REALABS(tx[0]), exponent - 1.0) * tx[1];
644  ty[1] *= exponent;
645  }
646 
647  if( q <= 2 && 2 <= p )
648  {
649  if( exponent != 2.0 )
650  {
651  ty[2] = SIGN(tx[0]) * pow(REALABS(tx[0]), exponent - 2.0) * tx[1] * tx[1];
652  ty[2] *= (exponent - 1.0) / 2.0;
653  ty[2] += pow(REALABS(tx[0]), exponent - 1.0) * tx[2];
654  ty[2] *= exponent;
655  }
656  else
657  {
658  // y'' = 2 (1/2 * sign(x) * x'^2 + |x|*x'') = sign(tx[0]) * tx[1]^2 + 2 * abs(tx[0]) * tx[2]
659  ty[2] = SIGN(tx[0]) * tx[1] * tx[1];
660  ty[2] += 2.0 * REALABS(tx[0]) * tx[2];
661  }
662  }
663 
664  /* higher order derivatives not implemented */
665  if( p > 2 )
666  return false;
667 
668  return true;
669  }
670 
671  /** reverse sweep of signpower
672  *
673  * Assume y(x) is a function of the taylor coefficients of f(x) = sign(x)|x|^p for x, i.e.,
674  * y(x) = [ f(x), f'(x), f''(x), ... ].
675  * Then in the reverse sweep we have to compute the elements of \f$\partial h / \partial x^[l], l = 0, ..., k,\f$
676  * where x^[l] is the l'th taylor coefficient (x, x', x'', ...) and h(x) = g(y(x)) for some function g:R^k -> R.
677  * That is, we have to compute
678  *\f$
679  * px[l] = \partial h / \partial x^[l] = (\partial g / \partial y) * (\partial y / \partial x^[l])
680  * = \sum_{i=0}^k (\partial g / \partial y_i) * (\partial y_i / \partial x^[l])
681  * = \sum_{i=0}^k py[i] * (\partial y_i / \partial x^[l])
682  *\f$
683  *
684  * For k = 0, this means
685  *\f$
686  * px[0] = py[0] * (\partial y_0 / \partial x^[0])
687  * = py[0] * (\partial f(x) / \partial x)
688  * = py[0] * p * abs(tx[0])^(p-1)
689  * \f$
690  *
691  * For k = 1, this means
692  *\f$
693  * px[0] = py[0] * (\partial y_0 / \partial x^[0]) + py[1] * (\partial y_1 / \partial x^[0])
694  * = py[0] * (\partial f(x) / \partial x) + py[1] * (\partial f'(x) / \partial x)
695  * = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
696  * px[1] = py[0] * (\partial y_0 / \partial x^[1]) + py[1] * (\partial y_1 / \partial x^[1])
697  * = py[0] * (\partial f(x) / \partial x') + py[1] * (\partial f'(x) / \partial x')
698  * = py[0] * 0 + py[1] * p * abs(tx[0])^(p-1)
699  * \f$
700  */
701  bool reverse(
702  size_t p, /**< highest order Taylor coefficient that we are evaluating */
703  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
704  const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
705  CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
706  const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
707  )
708  { /*lint --e{715}*/
709  assert(exponent > 1);
710  assert(px.size() >= p+1);
711  assert(py.size() >= p+1);
712  assert(tx.size() >= p+1);
713 
714  switch( p )
715  {
716  case 0:
717  // px[0] = py[0] * p * pow(abs(tx[0]), p-1);
718  px[0] = py[0] * pow(REALABS(tx[0]), exponent - 1.0);
719  px[0] *= p;
720  break;
721 
722  case 1:
723  if( exponent != 2.0 )
724  {
725  // px[0] = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
726  px[0] = py[1] * tx[1] * pow(REALABS(tx[0]), exponent - 2.0) * SIGN(tx[0]);
727  px[0] *= exponent - 1.0;
728  px[0] += py[0] * pow(REALABS(tx[0]), exponent - 1.0);
729  px[0] *= exponent;
730  // px[1] = py[1] * p * abs(tx[0])^(p-1)
731  px[1] = py[1] * pow(REALABS(tx[0]), exponent - 1.0);
732  px[1] *= exponent;
733  }
734  else
735  {
736  // px[0] = py[0] * 2.0 * abs(tx[0]) + py[1] * 2.0 * sign(tx[0]) * tx[1]
737  px[0] = py[1] * tx[1] * SIGN(tx[0]);
738  px[0] += py[0] * REALABS(tx[0]);
739  px[0] *= 2.0;
740  // px[1] = py[1] * 2.0 * abs(tx[0])
741  px[1] = py[1] * REALABS(tx[0]);
742  px[1] *= 2.0;
743  }
744  break;
745 
746  default:
747  return false;
748  }
749 
750  return true;
751  }
752 
753  using CppAD::atomic_base<Type>::for_sparse_jac;
754 
755  /** computes sparsity of jacobian during a forward sweep
756  *
757  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
758  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
759  */
760  bool for_sparse_jac(
761  size_t q, /**< number of columns in R */
762  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
763  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
764  )
765  {
766  return univariate_for_sparse_jac(q, r, s);
767  }
768 
769  using CppAD::atomic_base<Type>::rev_sparse_jac;
770 
771  /** computes sparsity of jacobian during a reverse sweep
772  *
773  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
774  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
775  */
776  bool rev_sparse_jac(
777  size_t q, /**< number of rows in R */
778  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
779  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
780  )
781  {
782  return univariate_rev_sparse_jac(q, r, s);
783  }
784 
785  using CppAD::atomic_base<Type>::rev_sparse_hes;
786 
787  /** computes sparsity of hessian during a reverse sweep
788  *
789  * Assume V(x) = (g(f(x)))'' R with f(x) = sign(x)abs(x)^p for a function g:R->R and a matrix R.
790  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
791  */
792  bool rev_sparse_hes(
793  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
794  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
795  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
796  size_t q, /**< number of columns in S and R */
797  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
798  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
799  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
800  )
801  {
802  return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
803  }
804 
805 };
806 
807 /** template for evaluation for signpower operator */
808 template<class Type>
809 static
810 void evalSignPower(
811  Type& resultant, /**< resultant */
812  const Type& arg, /**< operand */
813  SCIP_EXPR* expr /**< expression that holds the exponent */
814  )
815 {
816  vector<Type> in(1, arg);
817  vector<Type> out(1);
818 
819  static atomic_signpower<typename Type::value_type> sp;
820  sp(in, out, (size_t)(void*)expr);
821 
822  resultant = out[0];
823  return;
824 }
825 
826 #else
827 
828 /** specialization of signpower evaluation for real numbers */
829 template<class Type>
830 static
832  CppAD::AD<Type>& resultant, /**< resultant */
833  const CppAD::AD<Type>& arg, /**< operand */
834  SCIP_EXPR* expr /**< expression that holds the exponent */
835  )
836 {
837  AD<Type> adzero(0.);
838  SCIP_Real exponent;
839 
840  exponent = SCIPgetExponentExprPow(expr);
841  assert(exponent >= 1.0);
842 
843  if( EPSISINT(exponent, 0.0) )
844  {
845  resultant = CppAD::CondExpGe(arg, adzero, pow(arg, (int)exponent), -pow(-arg, (int)exponent));
846  }
847  else
848  {
849  /* pow(0,fractional>1) is not differential in the CppAD world (https://github.com/coin-or/CppAD/discussions/93)
850  * this works around this by evaluating pow(eps,exponent) in this case
851  */
852  resultant = CppAD::CondExpEq(arg, adzero, pow(arg+std::numeric_limits<SCIP_Real>::epsilon(), exponent)-pow(std::numeric_limits<SCIP_Real>::epsilon(), exponent),
853  CppAD::CondExpGe(arg, adzero, pow(arg, exponent), -pow(-arg, exponent)));
854  }
855 }
856 #endif
857 
858 /** Automatic differentiation of user expression as CppAD user-atomic function.
859  *
860  * This class implements forward and reverse operations for a function given by a user expression for use within CppAD.
861  */
862 class atomic_userexpr : public CppAD::atomic_base<SCIP_Real>
863 {
864 public:
866  SCIP* scip_, /**< SCIP data structure */
867  SCIP_EXPR* expr_ /**< expression to use */
868  )
869  : CppAD::atomic_base<SCIP_Real>(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr_)), CppAD::atomic_base<SCIP_Real>::bool_sparsity_enum),
870  scip(scip_),
871  expr(expr_)
872  { }
873 
874 private:
875  /** SCIP data structure */
876  SCIP* scip;
877 
878  /** expression */
879  SCIP_EXPR* expr;
880 
881  /** forward sweep of userexpr
882  *
883  * We follow http://www.coin-or.org/CppAD/Doc/atomic_forward.xml
884  * Note, that p and q are interchanged!
885  *
886  * For a scalar variable t, let
887  * Y(t) = f(X(t))
888  * X(t) = x^0 + x^1 t^1 + ... + x^p t^p
889  * where for x^i the i an index, while for t^i the i is an exponent.
890  * Thus, x^k = 1/k! X^(k) (0), where X^(k)(.) denotes the k-th derivative.
891  *
892  * Next, let y^k = 1/k! Y^(k)(0) be the k'th taylor coefficient of Y. Thus,
893  * y^0 = Y^(0)(0) = Y(0) = f(X(0)) = f(x^0)
894  * y^1 = Y^(1)(0) = Y'(0) = f'(X(0)) * X'(0) = f'(x^0) * x^1
895  * y^2 = 1/2 Y^(2)(0) = 1/2 Y''(0) = 1/2 X'(0) * f''(X(0)) X'(0) + 1/2 * f'(X(0)) * X''(0) = 1/2 x^1 * f''(x^0) * x^1 + f'(x^0) * x^2
896  *
897  * As x^k = (tx[k], tx[(p+1)+k], tx[2*(p+1)+k], ..., tx[n*(p+1)+k], we get
898  * ty[0] = y^0 = f(x^0) = f(tx[{1..n}*(p+1)])
899  * ty[1] = y^1 = f'(x^0) * tx[{1..n}*(p+1)+1] = sum(i=1..n, grad[i] * tx[i*(p+1)+1]), where grad = f'(x^0)
900  * ty[2] = 1/2 sum(i,j=1..n, x[i*(p+1)+1] * x[j*(p+1)+q] * hessian[i,j]) + sum(i=1..n, grad[i] * x[i*(p+1)+2])
901  */
902  bool forward(
903  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
904  size_t p, /**< highest order Taylor coefficient that we are evaluating */
905  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
906  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
907  const CppAD::vector<SCIP_Real>& tx, /**< values for taylor coefficients of x */
908  CppAD::vector<SCIP_Real>& ty /**< vector to store taylor coefficients of y */
909  )
910  {
911  assert(scip != NULL);
912  assert(expr != NULL);
913  assert(ty.size() == p+1);
914  assert(q <= p);
915 
916  size_t n = tx.size() / (p+1);
917  assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
918  assert(n >= 1);
919 
920  SCIPdebugMsg(scip, "expr_%s:forward, q=%zd, p=%zd\n", SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), q, p);
921 
922  if( vx.size() > 0 )
923  {
924  assert(vx.size() == n);
925  assert(vy.size() == 1);
926  assert(p == 0);
927 
928  /* y_0 is a variable if at least one of the x_i is a variable */
929  vy[0] = false;
930  for( size_t i = 0; i < n; ++i )
931  if( vx[i] )
932  {
933  vy[0] = true;
934  break;
935  }
936  }
937 
938  if( p == 0 )
939  {
940  /* only eval requested
941  * arguments are in tx[i * (p+1) + 0], i == 0..n-1, that is tx[0]..tx[n-1]
942  */
943  if( SCIPcallExprEval(scip, expr, const_cast<double*>(tx.data()), &ty[0]) != SCIP_OKAY )
944  return false;
945 
946  if( ty[0] == SCIP_INVALID )
948 
949  return true;
950  }
951 
952  if( p == 1 )
953  {
954  /* eval and forward-diff requested
955  *
956  * point to eval is in tx[i * (p+1) + 0], i == 0..n-1
957  * direction is in tx[i * (p+1) + 1], i == 0..n-1
958  */
959  SCIP_Real* x = new SCIP_Real[n];
960  SCIP_Real* dir = new SCIP_Real[n];
961  for( size_t i = 0; i < n; ++i )
962  {
963  x[i] = tx[i * (p+1) + 0]; /*lint !e835*/
964  dir[i] = tx[i * (p+1) + 1]; /*lint !e835*/
965  }
966 
967  SCIP_RETCODE rc = SCIPcallExprEvalFwdiff(scip, expr, x, dir, &ty[0], &ty[1]);
968 
969  if( ty[0] == SCIP_INVALID )
971  if( ty[1] == SCIP_INVALID )
973 
974  delete[] dir;
975  delete[] x;
976 
977  return rc == SCIP_OKAY;
978  }
979 
980  /* higher order derivatives not implemented yet (TODO) */
981  SCIPABORT();
982  return false;
983 
984 #if SCIP_DISABLED_CODE
985  SCIP_Real* gradient = NULL;
986  SCIP_Real* hessian = NULL;
987 
988  if( q <= 2 && 1 <= p )
989  gradient = new SCIP_Real[n];
990  if( q <= 2 && 2 <= p )
991  hessian = new SCIP_Real[n*n];
992 
993  if( exprEvalUser(expr, x, ty[0], gradient, hessian) != SCIP_OKAY )
994  {
995  delete[] x;
996  delete[] gradient;
997  delete[] hessian;
998  return false;
999  }
1000 
1001  if( gradient != NULL )
1002  {
1003  ty[1] = 0.0;
1004  for( size_t i = 0; i < n; ++i )
1005  ty[1] += gradient[i] * tx[i * (p+1) + 1];
1006  }
1007 
1008  if( hessian != NULL )
1009  {
1010  assert(gradient != NULL);
1011 
1012  ty[2] = 0.0;
1013  for( size_t i = 0; i < n; ++i )
1014  {
1015  for( size_t j = 0; j < n; ++j )
1016  ty[2] += 0.5 * hessian[i*n+j] * tx[i * (p+1) + 1] * tx[j * (p+1) + 1];
1017 
1018  ty[2] += gradient[i] * tx[i * (p+1) + 2];
1019  }
1020  }
1021 
1022  delete[] x;
1023  delete[] gradient;
1024  delete[] hessian;
1025 
1026  /* higher order derivatives not implemented */
1027  if( p > 2 )
1028  return false;
1029 
1030  return true;
1031 #endif
1032  }
1033 
1034  /** reverse sweep of userexpr
1035  *
1036  * We follow http://www.coin-or.org/CppAD/Doc/atomic_reverse.xml
1037  * Note, that there q is our p.
1038  *
1039  * For a scalar variable t, let
1040  * Y(t) = f(X(t))
1041  * X(t) = x^0 + x^1 t^1 + ... + x^p t^p
1042  * where for x^i the i an index, while for t^i the i is an exponent.
1043  * Thus, x^k = 1/k! X^(k) (0), where X^(k)(.) denotes the k-th derivative.
1044  *
1045  * Next, let y^k = 1/k! Y^(k)(0) be the k'th taylor coefficient of Y. Thus,
1046  * Y(t) = y^0 + y^1 t^1 + y^2 t^2 + ...
1047  * y^0, y^1, ... are the taylor coefficients of f(x).
1048  *
1049  * Further, let F(x^0,..,x^p) by given as F^k(x) = y^k. Thus,
1050  * F^0(x) = y^0 = Y^(0)(0) = f(x^0)
1051  * F^1(x) = y^1 = Y^(1)(0) = f'(x^0) * x^1
1052  * F^2(x) = y^2 = 1/2 Y''(0) = 1/2 x^1 f''(x^0) x^1 + f'(x^0) x^2
1053  *
1054  * Given functions G: R^(p+1) -> R and H: R^(n*(p+1)) -> R, where H(x^0, x^1, .., x^p) = G(F(x^0,..,x^p)),
1055  * we have to return the value of \f$\partial H / \partial x^l, l = 0..p,\f$ in px. Therefor,
1056  * \f$
1057  * px^l = \partial H / \partial x^l
1058  * = sum(k=0..p, (\partial G / \partial y^k) * (\partial y^k / \partial x^l)
1059  * = sum(k=0..p, py[k] * (\partial F^k / \partial x^l)
1060  * \f$
1061  *
1062  * For p = 0, this means
1063  * \f$
1064  * px^0 = py[0] * \partial F^0 / \partial x^0
1065  * = py[0] * \partial f(x^0) / \partial x^0
1066  * = py[0] * f'(x^0)
1067  * \f$
1068  *
1069  * For p = 1, this means (for l = 0):
1070  * \f[
1071  * px^0 = py[0] * \partial F^0 / \partial x^0 + py[1] * \partial F^1 / \partial x^0
1072  * = py[0] * \partial f(x^0) / \partial x^0 + py[1] * \partial (f'(x^0) * x^1) / \partial x^0
1073  * = py[0] * f'(x^0) + py[1] * f''(x^0) * x^1
1074  * \f]
1075  * and (for l=1):
1076  * \[
1077  * px^1 = py[0] * \partial F^0 / \partial x^1 + py[1] * \partial F^1 / \partial x^1
1078  * = py[0] * \partial f(x^0) / \partial x^1 + py[1] * \partial (f'(x^0) * x^1) / \partial x^0
1079  * = py[0] * 0 + py[1] * f'(x^0)
1080  * \f]
1081  *
1082  * As x^k = (tx[k], tx[(p+1)+k], tx[2*(p+1)+k], ..., tx[n*(p+1)+k] and
1083  * px^k = (px[k], px[(p+1)+k], px[2*(p+1)+k], ..., px[n*(p+1)+k], we get
1084  * for p = 0:
1085  * px[i] = (px^0)_i = py[0] * grad[i]
1086  * for p = 1:
1087  * px[i*2+0] = (px^0)_i = py[0] * grad[i] + py[1] * sum(j, hessian[j,i] * tx[j*2+1])
1088  * px[i*2+1] = (px^1)_i = py[1] * grad[i]
1089  */
1090  bool reverse(
1091  size_t p, /**< highest order Taylor coefficient that we are evaluating */
1092  const CppAD::vector<SCIP_Real>& tx, /**< values for taylor coefficients of x */
1093  const CppAD::vector<SCIP_Real>& ty, /**< values for taylor coefficients of y */
1094  CppAD::vector<SCIP_Real>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
1095  const CppAD::vector<SCIP_Real>& py /**< values for partial derivatives of g(x) w.r.t. y */
1096  )
1097  {
1098  assert(expr != NULL);
1099  assert(px.size() == tx.size());
1100  assert(py.size() == p+1);
1101 
1102  // TODO implement this again and then remove check for userexprs in SCIPexprintGrad
1103  SCIPABORT();
1104  return false;
1105 
1106 #ifdef SCIP_DISABLED_CODE
1107  size_t n = tx.size() / (p+1);
1108  assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1109  assert(n >= 1);
1110 
1111  SCIP_Real* x = new SCIP_Real[n];
1112  SCIP_Real funcval;
1113  SCIP_Real* gradient = new SCIP_Real[n];
1114  SCIP_Real* hessian = NULL;
1115 
1116  if( p == 1 )
1117  hessian = new SCIP_Real[n*n];
1118 
1119  for( size_t i = 0; i < n; ++i )
1120  x[i] = tx[i * (p+1) + 0]; /*lint !e835*/
1121 
1122  if( exprEvalUser(expr, x, funcval, gradient, hessian) != SCIP_OKAY )
1123  {
1124  delete[] x;
1125  delete[] gradient;
1126  delete[] hessian;
1127  return false;
1128  }
1129 
1130  switch( p )
1131  {
1132  case 0:
1133  // px[j] = (px^0)_j = py[0] * grad[j]
1134  for( size_t i = 0; i < n; ++i )
1135  px[i] = py[0] * gradient[i];
1136  break;
1137 
1138  case 1:
1139  // px[i*2+0] = (px^0)_i = py[0] * grad[i] + py[1] * sum(j, hessian[j,i] * tx[j*2+1])
1140  // px[i*2+1] = (px^1)_i = py[1] * grad[i]
1141  assert(hessian != NULL);
1142  for( size_t i = 0; i < n; ++i )
1143  {
1144  px[i*2+0] = py[0] * gradient[i]; /*lint !e835*/
1145  for( size_t j = 0; j < n; ++j )
1146  px[i*2+0] += py[1] * hessian[i+n*j] * tx[j*2+1]; /*lint !e835*/
1147 
1148  px[i*2+1] = py[1] * gradient[i];
1149  }
1150  break;
1151 
1152  default:
1153  return false;
1154  }
1155 
1156  return true;
1157 #endif
1158  } /*lint !e715*/
1159 
1160  using CppAD::atomic_base<SCIP_Real>::for_sparse_jac;
1161 
1162  /** computes sparsity of jacobian during a forward sweep
1163  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
1164  * Since we assume f'(x) to be dense, the sparsity of S will be the sparsity of R.
1165  */
1166  bool for_sparse_jac(
1167  size_t q, /**< number of columns in R */
1168  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
1169  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
1170  )
1171  {
1172  assert(expr != NULL);
1173  assert(s.size() == q);
1174 
1175  size_t n = r.size() / q;
1176  assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1177 
1178  // sparsity for S(x) = f'(x) * R
1179  for( size_t j = 0; j < q; j++ )
1180  {
1181  s[j] = false;
1182  for( size_t i = 0; i < n; i++ )
1183  s[j] |= (bool)r[i * q + j]; /*lint !e1786*/
1184  }
1185 
1186  return true;
1187  }
1188 
1189  using CppAD::atomic_base<SCIP_Real>::rev_sparse_jac;
1190 
1191  /** computes sparsity of jacobian during a reverse sweep
1192  * For a q x 1 matrix S, we have to return the sparsity pattern of the q x 1 matrix R(x) = S * f'(x).
1193  * Since we assume f'(x) to be dense, the sparsity of R will be the sparsity of S.
1194  */
1195  bool rev_sparse_jac(
1196  size_t q, /**< number of rows in R */
1197  const CppAD::vector<bool>& rt, /**< sparsity of R, rowwise */
1198  CppAD::vector<bool>& st /**< vector to store sparsity of S, rowwise */
1199  )
1200  {
1201  assert(expr != NULL);
1202  assert(rt.size() == q);
1203 
1204  size_t n = st.size() / q;
1205  assert(n == (size_t)SCIPexprGetNChildren(expr));
1206 
1207  // sparsity for S(x)^T = f'(x)^T * R^T
1208  for( size_t j = 0; j < q; j++ )
1209  for( size_t i = 0; i < n; i++ )
1210  st[i * q + j] = rt[j];
1211 
1212  return true;
1213  }
1214 
1215  using CppAD::atomic_base<SCIP_Real>::rev_sparse_hes;
1216 
1217  /** computes sparsity of hessian during a reverse sweep
1218  * Assume V(x) = (g(f(x)))'' R for a function g:R->R and a matrix R.
1219  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
1220  */
1221  bool rev_sparse_hes(
1222  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
1223  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
1224  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
1225  size_t q, /**< number of columns in S and R */
1226  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
1227  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
1228  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
1229  )
1230  {
1231  assert(expr != NULL);
1232  size_t n = vx.size();
1233  assert((size_t)SCIPexprGetNChildren(expr) == n);
1234  assert(s.size() == 1);
1235  assert(t.size() == n);
1236  assert(r.size() == n * q);
1237  assert(u.size() == q);
1238  assert(v.size() == n * q);
1239 
1240  size_t i, j, k;
1241 
1242  // sparsity for T(x) = S(x) * f'(x)
1243  for( i = 0; i < n; ++i )
1244  t[i] = s[0];
1245 
1246  // V(x) = f'(x)^T * g''(y) * f'(x) * R + g'(y) * f''(x) * R
1247  // U(x) = g''(y) * f'(x) * R
1248  // S(x) = g'(y)
1249 
1250  // back propagate the sparsity for U
1251  for( j = 0; j < q; j++ )
1252  for( i = 0; i < n; i++ )
1253  v[ i * q + j] = u[j];
1254 
1255  // include forward Jacobian sparsity in Hessian sparsity
1256  // sparsity for g'(y) * f''(x) * R (Note f''(x) is assumed to be dense)
1257  if( s[0] )
1258  for( j = 0; j < q; j++ )
1259  for( i = 0; i < n; i++ )
1260  for( k = 0; k < n; ++k )
1261  v[ i * q + j] |= (bool) r[ k * q + j];
1262 
1263  return true;
1264  }
1265 };
1266 
1267 
1268 /** integer power operation for arbitrary integer exponents */
1269 template<class Type>
1270 static
1272  Type& resultant, /**< resultant */
1273  const Type& arg, /**< operand */
1274  const int exponent /**< exponent */
1275  )
1276 {
1277  if( exponent > 1 )
1278  {
1279  vector<Type> in(1, arg);
1280  vector<Type> out(1);
1281 
1282  posintpower(in, out, exponent);
1283 
1284  resultant = out[0];
1285  return;
1286  }
1287 
1288  if( exponent < -1 )
1289  {
1290  vector<Type> in(1, arg);
1291  vector<Type> out(1);
1292 
1293  posintpower(in, out, -exponent);
1294 
1295  resultant = Type(1.0)/out[0];
1296  return;
1297  }
1298 
1299  if( exponent == 1 )
1300  {
1301  resultant = arg;
1302  return;
1303  }
1304 
1305  if( exponent == 0 )
1306  {
1307  resultant = Type(1.0);
1308  return;
1309  }
1310 
1311  assert(exponent == -1);
1312  resultant = Type(1.0)/arg;
1313 }
1314 
1315 /** CppAD compatible evaluation of an expression for given arguments */
1316 template<class Type>
1317 static
1319  SCIP* scip, /**< SCIP data structure */
1320  SCIP_EXPR* expr, /**< expression */
1321  SCIP_EXPRINTDATA* exprintdata, /**< interpreter data for root expression */
1322  const vector<Type>& x, /**< values of variables */
1323  Type& val /**< buffer to store expression value */
1324  )
1325 {
1326  Type* buf = NULL;
1327 
1328  assert(expr != NULL);
1329 
1330  // TODO this should iterate instead of using recursion
1331  // but the iterdata wouldn't work to hold Type at the moment
1332  // they could hold Type*, but then we need to alloc small portions all the time
1333  // or we have a big Type-array outside and point to it in iterdata
1334 
1335  if( SCIPisExprVaridx(scip, expr) )
1336  {
1337  val = x[exprintdata->getVarPos(SCIPgetIndexExprVaridx(expr))];
1338  return SCIP_OKAY;
1339  }
1340  if( SCIPisExprValue(scip, expr) )
1341  {
1342  val = SCIPgetValueExprValue(expr);
1343  return SCIP_OKAY;
1344  }
1345 
1346  SCIP_CALL( SCIPallocBufferArray(scip, &buf, SCIPexprGetNChildren(expr)) );
1347  for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
1348  {
1349  SCIP_CALL( eval(scip, SCIPexprGetChildren(expr)[i], exprintdata, x, buf[i]) );
1350  }
1351 
1352 #ifndef EVAL_USE_EXPRHDLR_ALWAYS
1353  if( SCIPisExprSum(scip, expr) )
1354  {
1355  val = SCIPgetConstantExprSum(expr);
1356  for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
1357  val += SCIPgetCoefsExprSum(expr)[i] * buf[i];
1358  }
1359  else if( SCIPisExprProduct(scip, expr) )
1360  {
1361  val = SCIPgetCoefExprProduct(expr);
1362  for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
1363  val *= buf[i];
1364  }
1365  else if( SCIPisExprPower(scip, expr) )
1366  {
1367  SCIP_Real exponent = SCIPgetExponentExprPow(expr);
1368  if( EPSISINT(exponent, 0.0) )
1369  evalIntPower(val, buf[0], (int)SCIPgetExponentExprPow(expr));
1370  else if( exponent == 0.5 )
1371  val = sqrt(buf[0]);
1372  else if( exponent < 1.0 )
1373  val = CppAD::pow(buf[0], SCIPgetExponentExprPow(expr));
1374  else
1375  {
1376  // workaround bug where CppAD claims pow(x,fractional>0) is nondiff at x=0
1377  // https://github.com/coin-or/CppAD/discussions/93#discussioncomment-327876
1378  AD<double> adzero(0.);
1379  val = CppAD::CondExpEq(buf[0], adzero, pow(buf[0]+std::numeric_limits<SCIP_Real>::epsilon(), exponent)-pow(std::numeric_limits<SCIP_Real>::epsilon(), exponent),
1380  pow(buf[0], exponent));
1381  }
1382  }
1383  else if( SCIPisExprSignpower(scip, expr) )
1384  {
1385  evalSignPower(val, buf[0], expr);
1386  }
1387  else if( SCIPisExprExp(scip, expr) )
1388  {
1389  val = exp(buf[0]);
1390  }
1391  else if( SCIPisExprLog(scip, expr) )
1392  {
1393  val = log(buf[0]);
1394  }
1395  else if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "sin") == 0 )
1396  {
1397  val = sin(buf[0]);
1398  }
1399  else if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "cos") == 0 )
1400  {
1401  val = cos(buf[0]);
1402  }
1403  else if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "erf") == 0 )
1404  {
1405  val = erf(buf[0]);
1406  }
1407  else if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "abs") == 0 )
1408  {
1409  val = abs(buf[0]);
1410  }
1411  else if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "entropy") == 0 )
1412  {
1413  /* -x*log(x) if x > 0, else 0
1414  * https://coin-or.github.io/CppAD/doc/cond_exp.cpp.htm suggest to use 0 for the x=0 case
1415  * but then derivatives at 0 are 0, while they are actually infinite (see expr_entropy.c:bwdiff)
1416  * so we use -sqrt(x) for the x=0 case, as this also has value 0 and first derivative -inf at 0
1417  */
1418  val = CppAD::CondExpGt(buf[0], AD<double>(0.), -buf[0] * log(buf[0]), -sqrt(buf[0]));
1419  }
1420  else
1421 #endif
1422  {
1423  //SCIPerrorMessage("using derivative methods of exprhdlr %s in CppAD is not implemented yet", SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)));
1424  //CppAD::ErrorHandler::Call(true, __LINE__, __FILE__, "evalUser()", "Error: user expressions in CppAD not possible without CppAD user atomic facility");
1425  vector<Type> in(buf, buf + SCIPexprGetNChildren(expr));
1426  vector<Type> out(1);
1427 
1428  exprintdata->userexprs.push_back(new atomic_userexpr(scip, expr));
1429  (*exprintdata->userexprs.back())(in, out);
1430 
1431  val = out[0];
1432  }
1433 
1434  SCIPfreeBufferArray(scip, &buf);
1435 
1436  return SCIP_OKAY;
1437 }
1438 
1439 /** replacement for CppAD's default error handler
1440  *
1441  * In debug mode, CppAD gives an error when an evaluation contains a nan.
1442  * We do not want to stop execution in such a case, since the calling routine should check for nan's and decide what to do.
1443  * Since we cannot ignore this particular error, we ignore all.
1444  * @todo find a way to check whether the error corresponds to a nan and communicate this back
1445  */
1446 static
1448  bool known, /**< is the error from a known source? */
1449  int line, /**< line where error occured */
1450  const char* file, /**< file where error occured */
1451  const char* cond, /**< error condition */
1452  const char* msg /**< error message */
1453  )
1454 {
1455  SCIPdebugMessage("ignore CppAD error from %sknown source %s:%d: msg: %s exp: %s\n", known ? "" : "un", file, line, msg, cond);
1456 }
1457 
1458 /* install our error handler */
1459 static CppAD::ErrorHandler errorhandler(cppaderrorcallback);
1460 
1461 /** gets name and version of expression interpreter */
1462 const char* SCIPexprintGetName(void)
1463 {
1464  return CPPAD_PACKAGE_STRING;
1465 }
1466 
1467 /** gets descriptive text of expression interpreter */
1468 const char* SCIPexprintGetDesc(void)
1469 {
1470  return "Algorithmic Differentiation of C++ algorithms developed by B. Bell (github.com/coin-or/CppAD)";
1471 }
1472 
1473 /** gets capabilities of expression interpreter (using bitflags) */
1475  void
1476  )
1477 {
1479 }
1480 
1481 /** creates an expression interpreter object */
1483  SCIP* scip, /**< SCIP data structure */
1484  SCIP_EXPRINT** exprint /**< buffer to store pointer to expression interpreter */
1485  )
1486 {
1487  assert(exprint != NULL);
1488 
1489  *exprint = (SCIP_EXPRINT*)1u; /* some code checks that a non-NULL pointer is returned here, even though it may not point anywhere */
1490 
1491  return SCIP_OKAY;
1492 }
1493 
1494 /** frees an expression interpreter object */
1496  SCIP* scip, /**< SCIP data structure */
1497  SCIP_EXPRINT** exprint /**< expression interpreter that should be freed */
1498  )
1499 {
1500  assert(exprint != NULL);
1501 
1502  *exprint = NULL;
1503 
1504  return SCIP_OKAY;
1505 }
1506 
1507 /** compiles an expression and returns interpreter-specific data for expression
1508  *
1509  * can be called again with existing exprintdata if expression has been changed
1510  *
1511  * @attention *exprintdata needs to be initialized to NULL at first call
1512  * @attention the expression is assumed to use varidx expressions instead of var expressions
1513  */
1515  SCIP* scip, /**< SCIP data structure */
1516  SCIP_EXPRINT* exprint, /**< interpreter data structure */
1517  SCIP_EXPR* rootexpr, /**< expression */
1518  SCIP_EXPRINTDATA** exprintdata /**< buffer to store pointer to compiled data */
1519  )
1520 {
1521  SCIP_EXPRITER* it;
1522  SCIP_EXPR* expr;
1523 
1524  assert(rootexpr != NULL);
1525  assert(exprintdata != NULL);
1526 
1527  if( *exprintdata == NULL )
1528  {
1529  *exprintdata = new SCIP_EXPRINTDATA();
1530  assert(*exprintdata != NULL);
1531  }
1532  else
1533  {
1534  (*exprintdata)->need_retape = true;
1535  (*exprintdata)->need_retape_always = false;
1536  (*exprintdata)->hesconstant = false;
1537  }
1538 
1539  SCIP_CALL( SCIPcreateExpriter(scip, &it) );
1541 
1542  std::set<int> varidxs;
1543  for( expr = SCIPexpriterGetCurrent(it); !SCIPexpriterIsEnd(it); expr = SCIPexpriterGetNext(it) )
1544  {
1545  /* cannot handle var-expressions in exprint so far, should be varidx expressions */
1546  assert(!SCIPisExprVar(scip, expr));
1547 
1548  if( SCIPisExprVaridx(scip, expr) )
1549  {
1550  varidxs.insert(SCIPgetIndexExprVaridx(expr));
1551  continue;
1552  }
1553 
1554  /* check whether expr is of a type we don't recognize */
1555 #ifndef EVAL_USE_EXPRHDLR_ALWAYS
1556  if( !SCIPisExprSum(scip, expr) &&
1557  !SCIPisExprProduct(scip, expr) &&
1558  !SCIPisExprPower(scip, expr) &&
1559  !SCIPisExprSignpower(scip, expr) &&
1560  !SCIPisExprExp(scip, expr) &&
1561  !SCIPisExprLog(scip, expr) &&
1562  strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "abs") != 0 &&
1563  strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "sin") != 0 &&
1564  strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "cos") != 0 &&
1565  strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "entropy") != 0 &&
1566  strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "erf") != 0 &&
1567  !SCIPisExprValue(scip, expr) )
1568 #endif
1569  {
1570  /* an expression for which we have no taping implemented in eval()
1571  * so we will try to use CppAD's atomic operand and the expression handler methods
1572  * however, only atomic_userexpr:forward() is implemented for p=0,1 at the moment, so we cannot do Hessians
1573  * also the exprhdlr needs to implement the fwdiff callback for derivatives
1574  */
1576  (*exprintdata)->userevalcapability &= SCIP_EXPRINTCAPABILITY_FUNCVALUE | SCIP_EXPRINTCAPABILITY_GRADIENT;
1577  else
1578  (*exprintdata)->userevalcapability &= SCIP_EXPRINTCAPABILITY_FUNCVALUE;
1579  }
1580 
1581  //if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "xyz") == 0 )
1582  // (*exprintdata)->need_retape_always = true;
1583  }
1584 
1585  SCIPfreeExpriter(&it);
1586 
1587  (*exprintdata)->varidxs.reserve(varidxs.size());
1588  (*exprintdata)->varidxs.insert((*exprintdata)->varidxs.begin(), varidxs.begin(), varidxs.end());
1589 
1590  size_t n = (*exprintdata)->varidxs.size();
1591  (*exprintdata)->X.resize(n);
1592  (*exprintdata)->x.resize(n);
1593  (*exprintdata)->Y.resize(1);
1594 
1595  // check whether we are quadratic (or linear), so we can save on Hessian time (so
1596  // assumes simplified and skips over x^2 and x*y cases
1597  // not using SCIPcheckExprQuadratic(), because we don't need the quadratic form
1598  if( SCIPisExprSum(scip, rootexpr) )
1599  {
1600  (*exprintdata)->hesconstant = true;
1601  for( int i = 0; i < SCIPexprGetNChildren(rootexpr); ++i )
1602  {
1603  SCIP_EXPR* child;
1604  child = SCIPexprGetChildren(rootexpr)[i];
1605  /* linear term is ok */
1606  if( SCIPisExprVaridx(scip, child) )
1607  continue;
1608  /* square term is ok */
1609  if( SCIPisExprPower(scip, child) && SCIPgetExponentExprPow(child) == 2.0 && SCIPisExprVaridx(scip, SCIPexprGetChildren(child)[0]) )
1610  continue;
1611  /* bilinear term is ok */
1612  if( SCIPisExprProduct(scip, child) && SCIPexprGetNChildren(child) == 2 && SCIPisExprVaridx(scip, SCIPexprGetChildren(child)[0]) && SCIPisExprVaridx(scip, SCIPexprGetChildren(child)[1]) )
1613  continue;
1614  /* everything else means not quadratic (or not simplified) */
1615  (*exprintdata)->hesconstant = false;
1616  break;
1617  }
1618  SCIPdebugMsg(scip, "Hessian found %sconstant\n", (*exprintdata)->hesconstant ? "" : "not ");
1619  }
1620 
1621  return SCIP_OKAY;
1622 }
1623 
1624 /** frees interpreter data for expression */
1626  SCIP* scip, /**< SCIP data structure */
1627  SCIP_EXPRINT* exprint, /**< interpreter data structure */
1628  SCIP_EXPR* expr, /**< expression */
1629  SCIP_EXPRINTDATA** exprintdata /**< pointer to pointer to compiled data to be freed */
1630  )
1631 {
1632  assert( exprintdata != NULL);
1633  assert(*exprintdata != NULL);
1634 
1635  SCIPfreeBlockMemoryArrayNull(scip, &(*exprintdata)->hesrowidxs, (*exprintdata)->hesnnz);
1636  SCIPfreeBlockMemoryArrayNull(scip, &(*exprintdata)->hescolidxs, (*exprintdata)->hesnnz);
1637 
1638  delete *exprintdata;
1639  *exprintdata = NULL;
1640 
1641  return SCIP_OKAY;
1642 }
1643 
1644 /** gives the capability to evaluate an expression by the expression interpreter
1645  *
1646  * In cases of user-given expressions, higher order derivatives may not be available for the user-expression,
1647  * even if the expression interpreter could handle these. This method allows to recognize that, e.g., the
1648  * Hessian for an expression is not available because it contains a user expression that does not provide
1649  * Hessians.
1650  */
1652  SCIP* scip, /**< SCIP data structure */
1653  SCIP_EXPRINT* exprint, /**< interpreter data structure */
1654  SCIP_EXPR* expr, /**< expression */
1655  SCIP_EXPRINTDATA* exprintdata /**< interpreter-specific data for expression */
1656  )
1657 {
1658  assert(exprintdata != NULL);
1659 
1660  return exprintdata->userevalcapability;
1661 }/*lint !e715*/
1662 
1663 /** evaluates an expression */
1665  SCIP* scip, /**< SCIP data structure */
1666  SCIP_EXPRINT* exprint, /**< interpreter data structure */
1667  SCIP_EXPR* expr, /**< expression */
1668  SCIP_EXPRINTDATA* exprintdata, /**< interpreter-specific data for expression */
1669  SCIP_Real* varvals, /**< values of variables */
1670  SCIP_Real* val /**< buffer to store value of expression */
1671  )
1672 {
1673  assert(expr != NULL);
1674  assert(exprintdata != NULL);
1675  assert(varvals != NULL);
1676  assert(val != NULL);
1677 
1678  size_t n = exprintdata->varidxs.size();
1679 
1680  if( n == 0 )
1681  {
1682  SCIP_CALL( SCIPevalExpr(scip, expr, NULL, 0L) );
1683  exprintdata->val = *val = SCIPexprGetEvalValue(expr);
1684  return SCIP_OKAY;
1685  }
1686 
1687  if( exprintdata->need_retape_always || exprintdata->need_retape )
1688  {
1689  /* free old Hessian data, if any */
1690  SCIPfreeBlockMemoryArrayNull(scip, &exprintdata->hesrowidxs, exprintdata->hesnnz);
1691  SCIPfreeBlockMemoryArrayNull(scip, &exprintdata->hescolidxs, exprintdata->hesnnz);
1692  exprintdata->hesvalues.clear();
1693  exprintdata->hesnnz = 0;
1694 
1695  for( size_t i = 0; i < n; ++i )
1696  {
1697  int idx = exprintdata->varidxs[i];
1698  exprintdata->X[i] = varvals[idx];
1699  exprintdata->x[i] = varvals[idx]; /* need this for a following grad or hessian eval with new_x = false */
1700  }
1701 
1702  // delete old atomic_userexprs before we start collecting new ones
1703  for( vector<atomic_userexpr*>::iterator it(exprintdata->userexprs.begin()); it != exprintdata->userexprs.end(); ++it )
1704  delete *it;
1705  exprintdata->userexprs.clear();
1706 
1707  CppAD::Independent(exprintdata->X);
1708 
1709  SCIP_CALL( eval(scip, expr, exprintdata, exprintdata->X, exprintdata->Y[0]) );
1710 
1711  exprintdata->f.Dependent(exprintdata->X, exprintdata->Y);
1712 
1713  exprintdata->val = Value(exprintdata->Y[0]);
1714  SCIPdebugMessage("Eval retaped and computed value %g\n", exprintdata->val);
1715 
1716  // the following is required if the gradient shall be computed by a reverse sweep later
1717  // exprintdata->val = exprintdata->f.Forward(0, exprintdata->x)[0];
1718 
1719  // https://coin-or.github.io/CppAD/doc/optimize.htm
1720  exprintdata->f.optimize();
1721 
1722  exprintdata->need_retape = false;
1723  }
1724  else
1725  {
1726  assert(exprintdata->x.size() >= n);
1727  for( size_t i = 0; i < n; ++i )
1728  exprintdata->x[i] = varvals[exprintdata->varidxs[i]];
1729 
1730  exprintdata->val = exprintdata->f.Forward(0, exprintdata->x)[0];
1731  SCIPdebugMessage("Eval used forward sweep to compute value %g\n", exprintdata->val);
1732  }
1733 
1734  *val = exprintdata->val;
1735 
1736  return SCIP_OKAY;
1737 }
1738 
1739 /** computes value and gradient of an expression */
1741  SCIP* scip, /**< SCIP data structure */
1742  SCIP_EXPRINT* exprint, /**< interpreter data structure */
1743  SCIP_EXPR* expr, /**< expression */
1744  SCIP_EXPRINTDATA* exprintdata, /**< interpreter-specific data for expression */
1745  SCIP_Real* varvals, /**< values of variables, can be NULL if new_varvals is FALSE */
1746  SCIP_Bool new_varvals, /**< have variable values changed since last call to a point evaluation routine? */
1747  SCIP_Real* val, /**< buffer to store expression value */
1748  SCIP_Real* gradient /**< buffer to store expression gradient */
1749  )
1750 {
1751  assert(expr != NULL);
1752  assert(exprintdata != NULL);
1753  assert(varvals != NULL || new_varvals == FALSE);
1754  assert(val != NULL);
1755  assert(gradient != NULL);
1756 
1757  if( new_varvals )
1758  {
1759  SCIP_CALL( SCIPexprintEval(scip, exprint, expr, exprintdata, varvals, val) );
1760  }
1761  else
1762  *val = exprintdata->val;
1763 
1764  size_t n = exprintdata->varidxs.size();
1765 
1766  if( n == 0 )
1767  return SCIP_OKAY;
1768 
1769  vector<double> jac;
1770  if( exprintdata->userexprs.empty() )
1771  jac = exprintdata->f.Jacobian(exprintdata->x);
1772  else
1773  {
1774  // atomic_userexpr:reverse not implemented yet (even for p=0)
1775  // so force use of forward-eval for gradient (though that seems pretty inefficient)
1776  exprintdata->f.Forward(0, exprintdata->x);
1777  jac.resize(n);
1778  CppAD::JacobianFor(exprintdata->f, exprintdata->x, jac);
1779  }
1780 
1781  for( size_t i = 0; i < n; ++i )
1782  gradient[exprintdata->varidxs[i]] = jac[i];
1783 
1784 #ifdef SCIP_DEBUG
1785  SCIPinfoMessage(scip, NULL, "Grad for ");
1786  SCIP_CALL( SCIPprintExpr(scip, expr, NULL) );
1787  SCIPinfoMessage(scip, NULL, "\nx = ");
1788  for( size_t i = 0; i < n; ++i )
1789  {
1790  SCIPinfoMessage(scip, NULL, "\t %g", exprintdata->x[i]);
1791  }
1792  SCIPinfoMessage(scip, NULL, "\ngrad = ");
1793  for( size_t i = 0; i < n; ++i )
1794  {
1795  SCIPinfoMessage(scip, NULL, "\t %g", jac[i]);
1796  }
1797  SCIPinfoMessage(scip, NULL, "\n");
1798 #endif
1799 
1800  return SCIP_OKAY;
1801 }
1802 
1803 /** gives sparsity pattern of lower-triangular part of Hessian
1804  *
1805  * Since the AD code might need to do a forward sweep, variable values need to be passed in here.
1806  *
1807  * Result will have `(*colidxs)[i] <= (*rowidixs)[i]` for `i=0..*nnz`.
1808  */
1810  SCIP* scip, /**< SCIP data structure */
1811  SCIP_EXPRINT* exprint, /**< interpreter data structure */
1812  SCIP_EXPR* expr, /**< expression */
1813  SCIP_EXPRINTDATA* exprintdata, /**< interpreter-specific data for expression */
1814  SCIP_Real* varvals, /**< values of variables */
1815  int** rowidxs, /**< buffer to return array with row indices of Hessian elements */
1816  int** colidxs, /**< buffer to return array with column indices of Hessian elements */
1817  int* nnz /**< buffer to return length of arrays */
1818  )
1819 {
1820  assert(expr != NULL);
1821  assert(exprintdata != NULL);
1822  assert(varvals != NULL);
1823  assert(rowidxs != NULL);
1824  assert(colidxs != NULL);
1825  assert(nnz != NULL);
1826 
1827  if( exprintdata->hesrowidxs == NULL )
1828  {
1829  assert(exprintdata->hescolidxs == NULL);
1830  assert(exprintdata->hesvalues.size() == 0);
1831  assert(exprintdata->hesnnz == 0);
1832 
1833  size_t n = exprintdata->varidxs.size();
1834  if( n == 0 )
1835  {
1836  *nnz = 0;
1837  return SCIP_OKAY;
1838  }
1839 
1840  size_t nn = n*n;
1841 
1842  if( exprintdata->need_retape_always )
1843  {
1844  // pretend dense
1845  exprintdata->hesnnz = (n * (n+1))/2;
1846  SCIP_CALL( SCIPallocBlockMemoryArray(scip, &exprintdata->hesrowidxs, exprintdata->hesnnz) );
1847  SCIP_CALL( SCIPallocBlockMemoryArray(scip, &exprintdata->hescolidxs, exprintdata->hesnnz) );
1848 
1849  int k = 0;
1850  for( size_t i = 0; i < n; ++i )
1851  for( size_t j = 0; j <= i; ++j )
1852  {
1853  exprintdata->hesrowidxs[k] = exprintdata->varidxs[i];
1854  exprintdata->hescolidxs[k] = exprintdata->varidxs[j];
1855  k++;
1856  }
1857 
1858 #ifdef SCIP_DEBUG
1859  SCIPinfoMessage(scip, NULL, "HessianSparsity for ");
1860  SCIP_CALL( SCIPprintExpr(scip, expr, NULL) );
1861  SCIPinfoMessage(scip, NULL, ": all elements, due to discontinuities\n");
1862 #endif
1863 
1864  *rowidxs = exprintdata->hesrowidxs;
1865  *colidxs = exprintdata->hescolidxs;
1866  *nnz = exprintdata->hesnnz;
1867 
1868  return SCIP_OKAY;
1869  }
1870 
1871  if( exprintdata->need_retape )
1872  {
1873  SCIP_Real val;
1874  SCIP_CALL( SCIPexprintEval(scip, exprint, expr, exprintdata, varvals, &val) );
1875  } /*lint !e438*/
1876 
1877  // following https://github.com/coin-or/CppAD/blob/20180000.0/cppad_ipopt/src/vec_fun_pattern.cpp
1878 
1879  SCIPdebugMessage("calling ForSparseJac\n");
1880 
1881  vector<bool> r(nn, false);
1882  for( size_t i = 0; i < n; ++i )
1883  r[i*n+i] = true;
1884  (void) exprintdata->f.ForSparseJac(n, r); // need to compute sparsity for Jacobian first
1885 
1886  SCIPdebugMessage("calling RevSparseHes\n");
1887 
1888  // this was originally
1889  // vector<bool> s(1, true);
1890  // hessparsity = exprintdata->f.RevSparseHes(n, s);
1891  // RevSparseHes is just calling RevSparseHesCase
1892  // to avoid copying hessparsity, call RevSparseHesCase directly
1893  vector<bool> hessparsity;
1894  exprintdata->f.RevSparseHesCase(true, false, n, vector<bool>(1, true), hessparsity);
1895 
1896  // count number of hessian elements and setup hessparsity_pattern
1897  exprintdata->hessparsity_pattern.resize(0, 0); // clear old data, if any
1898  exprintdata->hessparsity_pattern.resize(n, n);
1899  size_t hesnnz_full = 0; // number of nonzeros in full matrix, that is, not only lower-diagonal
1900  for( size_t i = 0; i < nn; ++i )
1901  if( hessparsity[i] )
1902  {
1903  size_t row = i / n;
1904  size_t col = i % n;
1905 
1906  ++hesnnz_full;
1907 
1908  if( col > row )
1909  continue;
1910  ++exprintdata->hesnnz;
1911  }
1912 
1913  // hessian sparsity in sparse form
1914  // - hesrowidxs,hescolidxs are nonzero entries in the lower-diagonal of the Hessian and are returned to the caller; indices are variable indices as in expression
1915  // - hessparsity_row,hessparsity_col are nonzero entries in the full Hessian and are used in SCIPexprintHessian(); indices are 0..n-1 (n=dimension of f)
1916  // - hessparsity_pattern is the same information as hessparsity_row,hessparsity_col, but stored in different form
1917  // originally we were calling CppAD::local::sparsity_user2internal(exprintdata->hessparsity_pattern, exprintdata->hessparsity, n, n, false, ""),
1918  // which was again looping over hessparsity, so this is included into one loop here
1919  SCIP_CALL( SCIPallocBlockMemoryArray(scip, &exprintdata->hesrowidxs, exprintdata->hesnnz) );
1920  SCIP_CALL( SCIPallocBlockMemoryArray(scip, &exprintdata->hescolidxs, exprintdata->hesnnz) );
1921 
1922  exprintdata->hessparsity_row.resize(hesnnz_full);
1923  exprintdata->hessparsity_col.resize(hesnnz_full);
1924 
1925  for( size_t i = 0, j = 0, k = 0; i < nn; ++i )
1926  if( hessparsity[i] )
1927  {
1928  size_t row = i / n;
1929  size_t col = i % n;
1930 
1931  if( (size_t)exprintdata->hesnnz <= nn/4 )
1932  {
1933  // we need hessparsity_pattern only if SCIPexprintHessian is doing a sparse hessian eval; nn/4 is the threshold for that
1934  exprintdata->hessparsity_pattern.add_element(row, col);
1935  }
1936 
1937  if( col > row )
1938  {
1939  // add above-diagonal elements into end part of hessparsity_row/col, so we have a 1:1 correspondence
1940  // between hessparsity_row/col and hesrowidxs/hescolidxs
1941  assert(exprintdata->hesnnz + k < hesnnz_full);
1942  exprintdata->hessparsity_row[exprintdata->hesnnz + k] = row;
1943  exprintdata->hessparsity_col[exprintdata->hesnnz + k] = col;
1944  ++k;
1945  continue;
1946  }
1947 
1948  exprintdata->hessparsity_row[j] = row;
1949  exprintdata->hessparsity_col[j] = col;
1950 
1951  assert(j < (size_t)exprintdata->hesnnz);
1952  exprintdata->hesrowidxs[j] = exprintdata->varidxs[row];
1953  exprintdata->hescolidxs[j] = exprintdata->varidxs[col];
1954  ++j;
1955  }
1956 
1957 #ifdef SCIP_DEBUG
1958  SCIPinfoMessage(scip, NULL, "HessianSparsity for ");
1959  SCIP_CALL( SCIPprintExpr(scip, expr, NULL) );
1960  SCIPinfoMessage(scip, NULL, ":");
1961  for( int i = 0; i < exprintdata->hesnnz; ++i )
1962  {
1963  SCIPinfoMessage(scip, NULL, " (%d,%d)", exprintdata->hesrowidxs[i], exprintdata->hescolidxs[i]);
1964  }
1965  SCIPinfoMessage(scip, NULL, "\n");
1966 #endif
1967  }
1968 
1969  *rowidxs = exprintdata->hesrowidxs;
1970  *colidxs = exprintdata->hescolidxs;
1971  *nnz = exprintdata->hesnnz;
1972 
1973  return SCIP_OKAY;
1974 }
1975 
1976 /** computes value and Hessian of an expression
1977  *
1978  * Returned arrays `rowidxs` and `colidxs` and number of elements `nnz` are the same as given by SCIPexprintHessianSparsity().
1979  * Returned array `hessianvals` will contain the corresponding Hessian elements.
1980  */
1982  SCIP* scip, /**< SCIP data structure */
1983  SCIP_EXPRINT* exprint, /**< interpreter data structure */
1984  SCIP_EXPR* expr, /**< expression */
1985  SCIP_EXPRINTDATA* exprintdata, /**< interpreter-specific data for expression */
1986  SCIP_Real* varvals, /**< values of variables, can be NULL if new_varvals is FALSE */
1987  SCIP_Bool new_varvals, /**< have variable values changed since last call to an evaluation routine? */
1988  SCIP_Real* val, /**< buffer to store function value */
1989  int** rowidxs, /**< buffer to return array with row indices of Hessian elements */
1990  int** colidxs, /**< buffer to return array with column indices of Hessian elements */
1991  SCIP_Real** hessianvals, /**< buffer to return array with Hessian elements */
1992  int* nnz /**< buffer to return length of arrays */
1993  )
1994 {
1995  assert(expr != NULL);
1996  assert(exprintdata != NULL);
1997 
1998  if( exprintdata->hesrowidxs == NULL )
1999  {
2000  /* setup sparsity if not done yet */
2001  int dummy1;
2002  int* dummy2;
2003 
2004  assert(exprintdata->hescolidxs == NULL);
2005  assert(exprintdata->hesvalues.size() == 0);
2006  assert(exprintdata->hesnnz == 0);
2007 
2008  SCIP_CALL( SCIPexprintHessianSparsity(scip, exprint, expr, exprintdata, varvals, &dummy2, &dummy2, &dummy1) );
2009 
2010  new_varvals = FALSE;
2011  }
2012 
2013  if( new_varvals )
2014  {
2015  SCIP_CALL( SCIPexprintEval(scip, exprint, expr, exprintdata, varvals, val) );
2016  }
2017  else
2018  *val = exprintdata->val;
2019 
2020  size_t n = exprintdata->varidxs.size();
2021 
2022  // eval hessian; if constant, then only if not evaluated yet (hesvalues has size 0, but hesnnz > 0)
2023  if( n > 0 && (!exprintdata->hesconstant || exprintdata->hesvalues.size() < (size_t)exprintdata->hesnnz) )
2024  {
2025  size_t nn = n*n;
2026 
2027  if( exprintdata->hesvalues.size() == 0 )
2028  {
2029  // using here hessparsity_row.size() instead of hesnnz as we want to pass hesvalues to CppAD and it prefers to calculate a full Hessian
2030  exprintdata->hesvalues.resize(exprintdata->hessparsity_row.size());
2031  }
2032 
2033  // these use reverse mode
2034  if( (size_t)exprintdata->hesnnz > nn/4 )
2035  {
2036  // use dense Hessian if there are many nonzero elements (approx more than half (recall only lower (or upper)-triangular is considered))
2037  // because it seems faster than the sparse Hessian if there isn't actually much sparsity
2038  vector<double> hess = exprintdata->f.Hessian(exprintdata->x, 0);
2039  for( int i = 0; i < exprintdata->hesnnz; ++i )
2040  exprintdata->hesvalues[i] = hess[exprintdata->hessparsity_row[i] * n + exprintdata->hessparsity_col[i]];
2041  }
2042  else
2043  {
2044  // originally, this was hess = exprintdata->f.SparseHessian(exprintdata->x, vector<double>(1, 1.0), exprintdata->hessparsity),
2045  // where hess was a dense nxn matrix as in the case above
2046  // to reuse the coloring of the sparsity pattern and use also a sparse matrix for the Hessian values, we now call SparseHessianCompute directly
2047  exprintdata->f.SparseHessianCompute(exprintdata->x, vector<double>(1, 1.0), exprintdata->hessparsity_pattern, exprintdata->hessparsity_row, exprintdata->hessparsity_col, exprintdata->hesvalues, exprintdata->heswork);
2048 
2049 #ifndef NDEBUG
2050  // hessparsity_row, hessparsity_col, hessparsity_pattern are no longer used by SparseHessianCompute after coloring has been computed in first call, except for some asserts, so we free some mem
2051  exprintdata->hessparsity_row.clear();
2052  exprintdata->hessparsity_col.clear();
2053  exprintdata->hessparsity_pattern.resize(0,0);
2054 #endif
2055  }
2056  }
2057 
2058 #ifdef SCIP_DEBUG
2059  SCIPinfoMessage(scip, NULL, "Hessian for ");
2060  SCIP_CALL( SCIPprintExpr(scip, expr, NULL) );
2061  SCIPinfoMessage(scip, NULL, "\nat x = ");
2062  for( size_t i = 0; i < n; ++i )
2063  {
2064  SCIPinfoMessage(scip, NULL, "\t %g", exprintdata->x[i]);
2065  }
2066  SCIPinfoMessage(scip, NULL, "\nis ");
2067  for( int i = 0; i < exprintdata->hesnnz; ++i )
2068  {
2069  SCIPinfoMessage(scip, NULL, " (%d,%d)=%g", exprintdata->hesrowidxs[i], exprintdata->hescolidxs[i], exprintdata->hesvalues[i]);
2070  }
2071  SCIPinfoMessage(scip, NULL, "\n");
2072 #endif
2073 
2074  *rowidxs = exprintdata->hesrowidxs;
2075  *colidxs = exprintdata->hescolidxs;
2076  *hessianvals = exprintdata->hesvalues.data();
2077  *nnz = exprintdata->hesnnz;
2078 
2079  return SCIP_OKAY;
2080 }
static SCIP_RETCODE eval(SCIP *scip, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata, const vector< Type > &x, Type &val)
SCIP_RETCODE SCIPexpriterInit(SCIP_EXPRITER *iterator, SCIP_EXPR *expr, SCIP_EXPRITER_TYPE type, SCIP_Bool allowrevisit)
Definition: expriter.c:491
const char * SCIPexprintGetName(void)
#define SCIPallocBlockMemoryArray(scip, ptr, num)
Definition: scip_mem.h:84
methods to interpret (evaluate) an expression "fast"
SCIP_Bool SCIPexprhdlrHasFwdiff(SCIP_EXPRHDLR *exprhdlr)
Definition: expr.c:585
SCIP_RETCODE SCIPprintExpr(SCIP *scip, SCIP_EXPR *expr, FILE *file)
Definition: scip_expr.c:1476
SCIP_RETCODE SCIPexprintCompile(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *rootexpr, SCIP_EXPRINTDATA **exprintdata)
int SCIPexprGetNChildren(SCIP_EXPR *expr)
Definition: expr.c:3798
#define infinity
Definition: gastrans.c:71
const char * SCIPexprhdlrGetName(SCIP_EXPRHDLR *exprhdlr)
Definition: expr.c:525
void posintpower(const vector< Type > &in, vector< Type > &out, size_t exponent)
SCIP_RETCODE SCIPexprintFreeData(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA **exprintdata)
static void evalSignPower(CppAD::AD< Type > &resultant, const CppAD::AD< Type > &arg, SCIP_EXPR *expr)
SCIP_Real SCIPgetConstantExprSum(SCIP_EXPR *expr)
Definition: expr_sum.c:1172
SCIP_RETCODE SCIPexprintHessianSparsity(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata, SCIP_Real *varvals, int **rowidxs, int **colidxs, int *nnz)
#define FALSE
Definition: def.h:87
#define EPSISINT(x, eps)
Definition: def.h:214
SCIP_Real SCIPgetExponentExprPow(SCIP_EXPR *expr)
Definition: expr_pow.c:3343
enum SCIP_Retcode SCIP_RETCODE
Definition: type_retcode.h:54
const char * SCIPexprintGetDesc(void)
SCIP_Bool SCIPisExprLog(SCIP *scip, SCIP_EXPR *expr)
Definition: expr_log.c:638
#define CPPAD_MAX_NUM_THREADS
#define SCIPdebugMessage
Definition: pub_message.h:87
SCIP_RETCODE SCIPexprintGrad(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata, SCIP_Real *varvals, SCIP_Bool new_varvals, SCIP_Real *val, SCIP_Real *gradient)
#define SCIPfreeBufferArray(scip, ptr)
Definition: scip_mem.h:127
unsigned int SCIP_EXPRINTCAPABILITY
#define SCIP_DEBUG
#define SCIPdebugMsg
Definition: scip_message.h:69
SCIP_EXPR * SCIPexpriterGetCurrent(SCIP_EXPRITER *iterator)
Definition: expriter.c:673
SCIP_VAR ** x
Definition: circlepacking.c:54
void SCIPinfoMessage(SCIP *scip, FILE *file, const char *formatstr,...)
Definition: scip_message.c:199
#define SIGN(x)
Definition: expr_pow.c:62
SCIP_Bool SCIPisExprProduct(SCIP *scip, SCIP_EXPR *expr)
Definition: scip_expr.c:1454
SCIP_RETCODE SCIPevalExpr(SCIP *scip, SCIP_EXPR *expr, SCIP_SOL *sol, SCIP_Longint soltag)
Definition: scip_expr.c:1623
public functions to work with algebraic expressions
SCIP_EXPR ** SCIPexprGetChildren(SCIP_EXPR *expr)
Definition: expr.c:3808
SCIP_Real * SCIPgetCoefsExprSum(SCIP_EXPR *expr)
Definition: expr_sum.c:1157
SCIP_RETCODE SCIPcallExprEvalFwdiff(SCIP *scip, SCIP_EXPR *expr, SCIP_Real *childrenvalues, SCIP_Real *direction, SCIP_Real *val, SCIP_Real *dot)
Definition: scip_expr.c:2187
SCIP_Real SCIPexprGetEvalValue(SCIP_EXPR *expr)
Definition: expr.c:3872
handler for variable index expressions
interval arithmetics for provable bounds
static void cppaderrorcallback(bool known, int line, const char *file, const char *cond, const char *msg)
public functions to work with algebraic expressions
SCIP_Bool SCIPisExprValue(SCIP *scip, SCIP_EXPR *expr)
Definition: scip_expr.c:1432
#define NULL
Definition: lpi_spx1.cpp:155
power and signed power expression handlers
#define REALABS(x)
Definition: def.h:201
SCIP_RETCODE SCIPexprintCreate(SCIP *scip, SCIP_EXPRINT **exprint)
SCIP_Bool SCIPisExprSum(SCIP *scip, SCIP_EXPR *expr)
Definition: scip_expr.c:1443
#define SCIP_CALL(x)
Definition: def.h:384
SCIP_EXPRINTCAPABILITY SCIPexprintGetCapability(void)
SCIP_RETCODE SCIPcreateExpriter(SCIP *scip, SCIP_EXPRITER **iterator)
Definition: scip_expr.c:2300
SCIP_Real SCIPgetCoefExprProduct(SCIP_EXPR *expr)
SCIP_RETCODE SCIPexprintEval(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata, SCIP_Real *varvals, SCIP_Real *val)
SCIP_RETCODE SCIPcallExprEval(SCIP *scip, SCIP_EXPR *expr, SCIP_Real *childrenvalues, SCIP_Real *val)
Definition: scip_expr.c:2160
#define SCIPallocBufferArray(scip, ptr, num)
Definition: scip_mem.h:115
#define SCIP_EXPRINTCAPABILITY_ALL
#define SCIP_EXPRINTCAPABILITY_HESSIAN
SCIP_EXPRINTCAPABILITY SCIPexprintGetExprCapability(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata)
logarithm expression handler
#define SCIP_Bool
Definition: def.h:84
unsigned short Type
Definition: cons_xor.c:121
SCIP_RETCODE SCIPexprintFree(SCIP *scip, SCIP_EXPRINT **exprint)
struct SCIP_ExprInt SCIP_EXPRINT
SCIP_EXPR * SCIPexpriterGetNext(SCIP_EXPRITER *iterator)
Definition: expriter.c:848
SCIP_EXPRHDLR * SCIPexprGetHdlr(SCIP_EXPR *expr)
Definition: expr.c:3821
SCIP_Bool SCIPisExprPower(SCIP *scip, SCIP_EXPR *expr)
Definition: scip_expr.c:1465
SCIP_RETCODE SCIPexprintHessian(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata, SCIP_Real *varvals, SCIP_Bool new_varvals, SCIP_Real *val, int **rowidxs, int **colidxs, SCIP_Real **hessianvals, int *nnz)
static CppAD::ErrorHandler errorhandler(cppaderrorcallback)
static void evalIntPower(Type &resultant, const Type &arg, const int exponent)
void SCIPfreeExpriter(SCIP_EXPRITER **iterator)
Definition: scip_expr.c:2314
SCIP_Real * r
Definition: circlepacking.c:50
#define SCIP_EXPRINTCAPABILITY_FUNCVALUE
SCIP_Bool SCIPisExprSignpower(SCIP *scip, SCIP_EXPR *expr)
Definition: expr_pow.c:3215
SCIP_Bool SCIPisExprVaridx(SCIP *scip, SCIP_EXPR *expr)
Definition: expr_varidx.c:242
SCIP_Bool SCIPisExprVar(SCIP *scip, SCIP_EXPR *expr)
Definition: scip_expr.c:1421
SCIP_Bool SCIPisExprExp(SCIP *scip, SCIP_EXPR *expr)
Definition: expr_exp.c:518
#define SCIP_Real
Definition: def.h:177
#define SCIP_INVALID
Definition: def.h:197
SCIP_Real SCIPgetValueExprValue(SCIP_EXPR *expr)
Definition: expr_value.c:285
atomic_userexpr(SCIP *scip_, SCIP_EXPR *expr_)
#define SCIPfreeBlockMemoryArrayNull(scip, ptr, num)
Definition: scip_mem.h:102
common defines and data types used in all packages of SCIP
SCIP_Bool SCIPexpriterIsEnd(SCIP_EXPRITER *iterator)
Definition: expriter.c:959
#define SCIPABORT()
Definition: def.h:356
int SCIPgetIndexExprVaridx(SCIP_EXPR *expr)
Definition: expr_varidx.c:257
struct SCIP_ExprIntData SCIP_EXPRINTDATA
exponential expression handler
#define SCIP_EXPRINTCAPABILITY_GRADIENT