Scippy

SCIP

Solving Constraint Integer Programs

exprinterpret_cppad.cpp
Go to the documentation of this file.
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2 /* */
3 /* This file is part of the program and library */
4 /* SCIP --- Solving Constraint Integer Programs */
5 /* */
6 /* Copyright (C) 2002-2021 Konrad-Zuse-Zentrum */
7 /* fuer Informationstechnik Berlin */
8 /* */
9 /* SCIP is distributed under the terms of the ZIB Academic License. */
10 /* */
11 /* You should have received a copy of the ZIB Academic License */
12 /* along with SCIP; see the file COPYING. If not visit scipopt.org. */
13 /* */
14 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
15 
16 /**@file exprinterpret_cppad.cpp
17  * @brief methods to interpret (evaluate) an expression tree "fast" using CppAD
18  * @ingroup EXPRINTS
19  * @author Stefan Vigerske
20  */
21 
22 /*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/
23 
24 #include "scip/def.h"
25 #include "blockmemshell/memory.h"
26 #include "nlpi/pub_expr.h"
27 #include "nlpi/exprinterpret.h"
28 
29 #include <cmath>
30 #include <vector>
31 using std::vector;
32 
33 /* Turn off lint warning "747: Significant prototype coercion" and "732: Loss of sign".
34  * The first warning is generated for expressions like t[0], where t is a vector, since 0 is an integer constant, but a
35  * size_t is expected (usually long unsigned). The second is generated for expressions like t[n], where n is an
36  * integer. Both code pieces are likely to be correct. It seems to be impossible to inhibit these messages for
37  * vector<*>::operator[] only. */
38 /*lint --e{747,732}*/
39 
40 /* Turn off lint info "1702 operator '...' is both an ordinary function 'CppAD::operator...' and a member function 'CppAD::SCIPInterval::operator...'.
41  * However, the functions have different signatures (the CppAD working on double, the SCIPInterval member
42  * function working on SCIPInterval's.
43  */
44 /*lint --e{1702}*/
45 
46 /* defining NO_CPPAD_USER_ATOMIC disables the use of our own implementation of derivaties of power operators
47  * via CppAD's user-atomic function feature
48  * our customized implementation should give better results (tighter intervals) for the interval data type
49  */
50 /* #define NO_CPPAD_USER_ATOMIC */
51 
52 /** sign of a value (-1 or +1)
53  *
54  * 0.0 has sign +1
55  */
56 #define SIGN(x) ((x) >= 0.0 ? 1.0 : -1.0)
57 
58 /* in order to use intervals as operands in CppAD,
59  * we need to include the intervalarithext.h very early and require the interval operations to be in the CppAD namespace */
60 #define SCIPInterval_NAMESPACE CppAD
61 #include "nlpi/intervalarithext.h"
62 
63 namespace CppAD
64 {
66 }
67 using CppAD::SCIPInterval;
68 
69 /* CppAD needs to know a fixed upper bound on the number of threads at compile time.
70  * It is wise to set it to a power of 2, so that if the tape id overflows, it is likely to start at 0 again, which avoids difficult to debug errors.
71  */
72 #ifndef CPPAD_MAX_NUM_THREADS
73 #ifndef NPARASCIP
74 #define CPPAD_MAX_NUM_THREADS 64
75 #else
76 #define CPPAD_MAX_NUM_THREADS 1
77 #endif
78 #endif
79 
80 /* disable -Wshadow warnings for upcoming includes of CppAD if using some old GCC
81  * -Wshadow was too strict with some versions of GCC 4 (https://stackoverflow.com/questions/2958457/gcc-wshadow-is-too-strict)
82  */
83 #ifdef __GNUC__
84 #if __GNUC__ == 4
85 #pragma GCC diagnostic ignored "-Wshadow"
86 #endif
87 #endif
88 
89 #include <cppad/cppad.hpp>
90 #include <cppad/utility/error_handler.hpp>
91 
92 /* CppAD is not thread-safe by itself, but uses some static datastructures
93  * To run it in a multithreading environment, a special CppAD memory allocator that is aware of the multiple threads has to be used.
94  * This allocator requires to know the number of threads and a thread number for each thread.
95  * To implement this, we follow the team_pthread example of CppAD, which uses pthread's thread-specific data management.
96  */
97 #ifndef NPARASCIP
98 
99 #include <atomic>
100 
101 /** currently registered number of threads */
102 static std::atomic_size_t ncurthreads{0};
103 static thread_local int thread_number{-1};
104 
105 /** CppAD callback function that indicates whether we are running in parallel mode */
106 static
107 bool in_parallel(void)
108 {
109  return ncurthreads > 1;
110 }
111 
112 /** CppAD callback function that returns the number of the current thread
113  *
114  * assigns a new number to the thread if new
115  */
116 static
117 size_t thread_num(void)
118 {
119  size_t threadnum;
120 
121  /* if no thread_number for this thread yet, then assign a new thread number to the current thread
122  */
123  if( thread_number == -1 )
124  {
125  thread_number = static_cast<int>(ncurthreads.fetch_add(1, std::memory_order_relaxed));
126  }
127 
128  threadnum = static_cast<size_t>(thread_number);
129 
130  return threadnum;
131 }
132 
133 /** sets up CppAD's datastructures for running in multithreading mode
134  *
135  * It must be called once before multithreading is started.
136  * For GCC-compatible compilers, this will happen automatically.
137  */
138 extern "C" SCIP_EXPORT char SCIPexprintCppADInitParallel(void);
139 #ifdef __GNUC__
140 __attribute__((constructor))
141 #endif
143 {
144  CppAD::thread_alloc::parallel_setup(CPPAD_MAX_NUM_THREADS, in_parallel, thread_num);
145  CppAD::parallel_ad<double>();
146  CppAD::parallel_ad<SCIPInterval>();
147 
148  return 0;
149 }
150 
151 #if !defined(__GNUC__)
152 /** a dummy variable that is initialized to the result of init_parallel
153  *
154  * The purpose is to make sure that init_parallel() is called before any multithreading is started.
155  */
157 #endif
158 
159 #endif // NPARASCIP
160 
161 /** definition of CondExpOp for SCIPInterval (required by CppAD) */ /*lint -e715*/
162 inline
163 SCIPInterval CondExpOp(
164  enum CppAD::CompareOp cop,
165  const SCIPInterval& left,
166  const SCIPInterval& right,
167  const SCIPInterval& trueCase,
168  const SCIPInterval& falseCase)
169 { /*lint --e{715}*/
170  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
171  "SCIPInterval CondExpOp(...)",
172  "Error: cannot use CondExp with an interval type"
173  );
174 
175  return SCIPInterval();
176 }
177 
178 /** another function required by CppAD */ /*lint -e715*/
179 inline
181  const SCIPInterval& x /**< operand */
182  )
183 { /*lint --e{715}*/
184  return true;
185 }
186 
187 /** returns whether the interval equals [0,0] */
188 inline
190  const SCIPInterval& x /**< operand */
191  )
192 {
193  return (x == 0.0);
194 }
195 
196 /** returns whether the interval equals [1,1] */
197 inline
199  const SCIPInterval& x /**< operand */
200  )
201 {
202  return (x == 1.0);
203 }
204 
205 /** yet another function that checks whether two intervals are equal */
206 inline
208  const SCIPInterval& x, /**< first operand */
209  const SCIPInterval& y /**< second operand */
210  )
211 {
212  return (x == y);
213 }
214 
215 /** greater than zero not defined for intervals */ /*lint -e715*/
216 inline
218  const SCIPInterval& x /**< operand */
219  )
220 { /*lint --e{715}*/
221  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
222  "GreaterThanZero(x)",
223  "Error: cannot use GreaterThanZero with interval"
224  );
225 
226  return false;
227 }
228 
229 /** greater than or equal zero not defined for intervals */ /*lint -e715*/
230 inline
232  const SCIPInterval& x /**< operand */
233  )
234 { /*lint --e{715}*/
235  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__ ,
236  "GreaterThanOrZero(x)",
237  "Error: cannot use GreaterThanOrZero with interval"
238  );
239 
240  return false;
241 }
242 
243 /** less than not defined for intervals */ /*lint -e715*/
244 inline
246  const SCIPInterval& x /**< operand */
247  )
248 { /*lint --e{715}*/
249  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
250  "LessThanZero(x)",
251  "Error: cannot use LessThanZero with interval"
252  );
253 
254  return false;
255 }
256 
257 /** less than or equal not defined for intervals */ /*lint -e715*/
258 inline
260  const SCIPInterval& x /**< operand */
261  )
262 { /*lint --e{715}*/
263  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
264  "LessThanOrZero(x)",
265  "Error: cannot use LessThanOrZero with interval"
266  );
267 
268  return false;
269 }
270 
271 /** conversion to integers not defined for intervals */ /*lint -e715*/
272 inline
274  const SCIPInterval& x /**< operand */
275  )
276 { /*lint --e{715}*/
277  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
278  "Integer(x)",
279  "Error: cannot use Integer with interval"
280  );
281 
282  return 0;
283 }
284 
285 /** absolute zero multiplication
286  *
287  * @return [0,0] if first argument is [0,0] independent of whether the second argument is an empty interval or not
288  */
289 inline
290 SCIPInterval azmul(
291  const SCIPInterval& x, /**< first operand */
292  const SCIPInterval& y /**< second operand */
293  )
294 {
295  if( x.inf == 0.0 && x.sup == 0.0 )
296  return SCIPInterval(0.0, 0.0);
297  return x * y;
298 }
299 
300 /** printing of an interval (required by CppAD) */
301 inline
302 std::ostream& operator<<(std::ostream& out, const SCIP_INTERVAL& x)
303 {
304  out << '[' << x.inf << ',' << x.sup << ']';
305  return out;
306 }
307 
308 using CppAD::AD;
309 
310 /** expression interpreter */
312 {
313  BMS_BLKMEM* blkmem; /**< block memory data structure */
314 };
315 
316 /** expression specific interpreter data */
317 struct SCIP_ExprIntData
318 {
319 public:
320  /** constructor */
321  SCIP_ExprIntData()
322  : val(0.0), need_retape(true), int_need_retape(true), need_retape_always(false), userevalcapability(SCIP_EXPRINTCAPABILITY_ALL), blkmem(NULL), root(NULL)
323  { }
324 
325  /** destructor */
326  ~SCIP_ExprIntData()
327  { }/*lint --e{1540}*/
328 
329  vector< AD<double> > X; /**< vector of dependent variables */
330  vector< AD<double> > Y; /**< result vector */
331  CppAD::ADFun<double> f; /**< the function to evaluate as CppAD object */
332 
333  vector<double> x; /**< current values of dependent variables */
334  double val; /**< current function value */
335  bool need_retape; /**< will retaping be required for the next point evaluation? */
336 
337  vector< AD<SCIPInterval> > int_X; /**< interval vector of dependent variables */
338  vector< AD<SCIPInterval> > int_Y; /**< interval result vector */
339  CppAD::ADFun<SCIPInterval> int_f; /**< the function to evaluate on intervals as CppAD object */
340 
341  vector<SCIPInterval> int_x; /**< current interval values of dependent variables */
342  SCIPInterval int_val; /**< current interval function value */
343  bool int_need_retape; /**< will retaping be required for the next interval evaluation? */
344 
345  bool need_retape_always; /**< will retaping be always required? */
346  SCIP_EXPRINTCAPABILITY userevalcapability; /**< (intersection of) capabilities of evaluation rountines of user expressions */
347 
348  BMS_BLKMEM* blkmem; /**< block memory used to allocate expresstion tree */
349  SCIP_EXPR* root; /**< copy of expression tree; @todo we should not need to make a copy */
350 };
351 
352 #ifndef NO_CPPAD_USER_ATOMIC
353 
354 /** computes sparsity of jacobian for a univariate function during a forward sweep
355  *
356  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
357  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
358  */
359 static
361  size_t q, /**< number of columns in R */
362  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
363  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
364  )
365 {
366  assert(r.size() == q);
367  assert(s.size() == q);
368 
369  s = r;
370 
371  return true;
372 }
373 
374 /** Computes sparsity of jacobian during a reverse sweep
375  *
376  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
377  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
378  */
379 static
381  size_t q, /**< number of rows in R */
382  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
383  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
384  )
385 {
386  assert(r.size() == q);
387  assert(s.size() == q);
388 
389  s = r;
390 
391  return true;
392 }
393 
394 /** computes sparsity of hessian during a reverse sweep
395  *
396  * Assume V(x) = (g(f(x)))'' R with f(x) = x^p for a function g:R->R and a matrix R.
397  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
398  */ /*lint -e715*/
399 static
401  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
402  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
403  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
404  size_t q, /**< number of columns in R, U, and V */
405  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
406  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
407  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
408  )
409 { /*lint --e{439,715}*/ /* @todo take vx into account */
410  assert(r.size() == q);
411  assert(s.size() == 1);
412  assert(t.size() == 1);
413  assert(u.size() == q);
414  assert(v.size() == q);
415 
416  // T(x) = g'(f(x)) * f'(x) = S * f'(x), and f' is not identically 0
417  t[0] = s[0];
418 
419  // V(x) = g''(f(x)) f'(x) f'(x) R + g'(f(x)) f''(x) R466
420  // = f'(x) U + S f''(x) R, with f'(x) and f''(x) not identically 0
421  v = u;
422  if( s[0] )
423  for( size_t j = 0; j < q; ++j )
424  if( r[j] )
425  v[j] = true;
426 
427  return true;
428 }
429 
430 
431 /** Automatic differentiation of x -> x^p, p>=2 integer, as CppAD user-atomic function.
432  *
433  * This class implements forward and reverse operations for the function x -> x^p for use within CppAD.
434  * While CppAD would implement integer powers as a recursion of multiplications, we still use pow functions as they allow us to avoid overestimation in interval arithmetics.
435  *
436  * @todo treat the exponent as a (variable) argument to the function, with the assumption that we never differentiate w.r.t. it (this should make the approach threadsafe again)
437  */
438 template<class Type>
439 class atomic_posintpower : public CppAD::atomic_base<Type>
440 {
441 public:
443  : CppAD::atomic_base<Type>("posintpower"),
444  exponent(0)
445  {
446  /* indicate that we want to use bool-based sparsity pattern */
447  this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
448  }
449 
450 private:
451  /** exponent value for next call to forward or reverse */
452  int exponent;
453 
454  /** stores exponent value corresponding to next call to forward or reverse
455  *
456  * how is this supposed to be threadsafe? (we use only one global instantiation of this class)
457  * TODO according to the CppAD 2018 docu, using this function is deprecated; what is the modern way to do this?
458  */
459  virtual void set_old(size_t id)
460  {
461  exponent = (int) id;
462  }
463 
464  /** forward sweep of positive integer power
465  *
466  * Given the taylor coefficients for x, we have to compute the taylor coefficients for f(x),
467  * that is, given tx = (x, x', x'', ...), we compute the coefficients ty = (y, y', y'', ...)
468  * in the taylor expansion of f(x) = x^p.
469  * Thus, y = x^p
470  * = tx[0]^p,
471  * y' = p * x^(p-1) * x'
472  * = p * tx[0]^(p-1) * tx[1],
473  * y'' = 1/2 * p * (p-1) * x^(p-2) * x'^2 + p * x^(p-1) * x''
474  * = 1/2 * p * (p-1) * tx[0]^(p-2) * tx[1]^2 + p * tx[0]^(p-1) * tx[2]
475  */
476  bool forward(
477  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
478  size_t p, /**< highest order Taylor coefficient that we are evaluating */
479  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
480  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
481  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
482  CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
483  )
484  {
485  assert(exponent > 1);
486  assert(tx.size() >= p+1);
487  assert(ty.size() >= p+1);
488  assert(q <= p);
489 
490  if( vx.size() > 0 )
491  {
492  assert(vx.size() == 1);
493  assert(vy.size() == 1);
494  assert(p == 0);
495 
496  vy[0] = vx[0];
497  }
498 
499  if( q == 0 /* q <= 0 && 0 <= p */ )
500  {
501  ty[0] = CppAD::pow(tx[0], exponent);
502  }
503 
504  if( q <= 1 && 1 <= p )
505  {
506  ty[1] = CppAD::pow(tx[0], exponent-1) * tx[1];
507  ty[1] *= double(exponent);
508  }
509 
510  if( q <= 2 && 2 <= p )
511  {
512  if( exponent > 2 )
513  {
514  // ty[2] = 1/2 * exponent * (exponent-1) * pow(tx[0], exponent-2) * tx[1] * tx[1] + exponent * pow(tx[0], exponent-1) * tx[2];
515  ty[2] = CppAD::pow(tx[0], exponent-2) * tx[1] * tx[1];
516  ty[2] *= (exponent-1) / 2.0;
517  ty[2] += CppAD::pow(tx[0], exponent-1) * tx[2];
518  ty[2] *= exponent;
519  }
520  else
521  {
522  assert(exponent == 2);
523  // ty[2] = 1/2 * exponent * tx[1] * tx[1] + exponent * tx[0] * tx[2];
524  ty[2] = tx[1] * tx[1] + 2.0 * tx[0] * tx[2];
525  }
526  }
527 
528  /* higher order derivatives not implemented */
529  if( p > 2 )
530  return false;
531 
532  return true;
533  }
534 
535  /** reverse sweep of positive integer power
536  *
537  * Assume y(x) is a function of the taylor coefficients of f(x) = x^p for x, i.e.,
538  * y(x) = [ x^p, p * x^(p-1) * x', p * (p-1) * x^(p-2) * x'^2 + p * x^(p-1) * x'', ... ].
539  * Then in the reverse sweep we have to compute the elements of \f$\partial h / \partial x^[l], l = 0, ..., k,\f$
540  * where x^[l] is the l'th taylor coefficient (x, x', x'', ...) and h(x) = g(y(x)) for some function g:R^k -> R.
541  * That is, we have to compute
542  *\f$
543  * px[l] = \partial h / \partial x^[l] = (\partial g / \partial y) * (\partial y / \partial x^[l])
544  * = \sum_{i=0}^k (\partial g / \partial y_i) * (\partial y_i / \partial x^[l])
545  * = \sum_{i=0}^k py[i] * (\partial y_i / \partial x^[l])
546  * \f$
547  *
548  * For k = 0, this means
549  *\f$
550  * px[0] = py[0] * (\partial y_0 / \partial x^[0])
551  * = py[0] * (\partial x^p / \partial x)
552  * = py[0] * p * tx[0]^(p-1)
553  *\f$
554  *
555  * For k = 1, this means
556  * \f$
557  * px[0] = py[0] * (\partial y_0 / \partial x^[0]) + py[1] * (\partial y_1 / \partial x^[0])
558  * = py[0] * (\partial x^p / \partial x) + py[1] * (\partial (p * x^(p-1) * x') / \partial x)
559  * = py[0] * p * tx[0]^(p-1) + py[1] * p * (p-1) * tx[0]^(p-2) * tx[1]
560  * px[1] = py[0] * (\partial y_0 / \partial x^[1]) + py[1] * (\partial y_1 / \partial x^[1])
561  * = py[0] * (\partial x^p / \partial x') + py[1] * (\partial (p * x^(p-1) x') / \partial x')
562  * = py[0] * 0 + py[1] * p * tx[0]^(p-1)
563  * \f$
564  */ /*lint -e715*/
565  bool reverse(
566  size_t p, /**< highest order Taylor coefficient that we are evaluating */
567  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
568  const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
569  CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
570  const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
571  )
572  { /*lint --e{715}*/
573  assert(exponent > 1);
574  assert(px.size() >= p+1);
575  assert(py.size() >= p+1);
576  assert(tx.size() >= p+1);
577 
578  switch( p )
579  {
580  case 0:
581  // px[0] = py[0] * exponent * pow(tx[0], exponent-1);
582  px[0] = py[0] * CppAD::pow(tx[0], exponent-1);
583  px[0] *= exponent;
584  break;
585 
586  case 1:
587  // px[0] = py[0] * exponent * pow(tx[0], exponent-1) + py[1] * exponent * (exponent-1) * pow(tx[0], exponent-2) * tx[1];
588  px[0] = py[1] * tx[1] * CppAD::pow(tx[0], exponent-2);
589  px[0] *= exponent-1;
590  px[0] += py[0] * CppAD::pow(tx[0], exponent-1);
591  px[0] *= exponent;
592  // px[1] = py[1] * exponent * pow(tx[0], exponent-1);
593  px[1] = py[1] * CppAD::pow(tx[0], exponent-1);
594  px[1] *= exponent;
595  break;
596 
597  default:
598  return false;
599  }
600 
601  return true;
602  }
603 
604  using CppAD::atomic_base<Type>::for_sparse_jac;
605 
606  /** computes sparsity of jacobian during a forward sweep
607  *
608  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
609  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
610  */
611  bool for_sparse_jac(
612  size_t q, /**< number of columns in R */
613  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
614  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
615  )
616  {
617  return univariate_for_sparse_jac(q, r, s);
618  }
619 
620  using CppAD::atomic_base<Type>::rev_sparse_jac;
621 
622  /** computes sparsity of jacobian during a reverse sweep
623  *
624  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
625  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
626  */
627  bool rev_sparse_jac(
628  size_t q, /**< number of rows in R */
629  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
630  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
631  )
632  {
633  return univariate_rev_sparse_jac(q, r, s);
634  }
635 
636  using CppAD::atomic_base<Type>::rev_sparse_hes;
637 
638  /** computes sparsity of hessian during a reverse sweep
639  *
640  * Assume V(x) = (g(f(x)))'' R with f(x) = x^p for a function g:R->R and a matrix R.
641  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
642  */
643  bool rev_sparse_hes(
644  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
645  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
646  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
647  size_t q, /**< number of columns in R, U, and V */
648  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
649  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
650  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
651  )
652  {
653  return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
654  }
655 };
656 
657 /** power function with natural exponents */
658 template<class Type>
659 static
661  const vector<Type>& in, /**< vector which first argument is base */
662  vector<Type>& out, /**< vector where to store result in first argument */
663  size_t exponent /**< exponent */
664  )
665 {
667  pip(in, out, exponent);
668 }
669 
670 #else
671 
672 /** power function with natural exponents */
673 template<class Type>
674 void posintpower(
675  const vector<Type>& in, /**< vector which first argument is base */
676  vector<Type>& out, /**< vector where to store result in first argument */
677  size_t exponent /**< exponent */
678  )
679 {
680  out[0] = pow(in[0], (int)exponent);
681 }
682 
683 #endif
684 
685 
686 #ifndef NO_CPPAD_USER_ATOMIC
687 
688 /** Automatic differentiation of x -> sign(x)abs(x)^p, p>=1, as CppAD user-atomic function.
689  *
690  * This class implements forward and reverse operations for the function x -> sign(x)abs(x)^p for use within CppAD.
691  * While we otherwise would have to use discontinuous sign and abs functions, our own implementation allows to provide
692  * a continuously differentiable function.
693  *
694  * @todo treat the exponent as a (variable) argument to the function, with the assumption that we never differentiate w.r.t. it (this should make the approach threadsafe again)
695  */
696 template<class Type>
697 class atomic_signpower : public CppAD::atomic_base<Type>
698 {
699 public:
701  : CppAD::atomic_base<Type>("signpower"),
702  exponent(0.0)
703  {
704  /* indicate that we want to use bool-based sparsity pattern */
705  this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
706  }
707 
708 private:
709  /** exponent for use in next call to forward or reverse */
710  SCIP_Real exponent;
711 
712  /** stores exponent corresponding to next call to forward or reverse
713  *
714  * How is this supposed to be threadsafe? (we use only one global instantiation of this class)
715  * TODO according to the CppAD 2018 docu, using this function is deprecated; what is the modern way to do this?
716  */
717  virtual void set_old(size_t id)
718  {
719  exponent = SCIPexprGetSignPowerExponent((SCIP_EXPR*)(void*)id);
720  }
721 
722  /** forward sweep of signpower
723  *
724  * Given the taylor coefficients for x, we have to compute the taylor coefficients for f(x),
725  * that is, given tx = (x, x', x'', ...), we compute the coefficients ty = (y, y', y'', ...)
726  * in the taylor expansion of f(x) = sign(x)abs(x)^p.
727  * Thus, y = sign(x)abs(x)^p
728  * = sign(tx[0])abs(tx[0])^p,
729  * y' = p * abs(x)^(p-1) * x'
730  * = p * abs(tx[0])^(p-1) * tx[1],
731  * y'' = 1/2 * p * (p-1) * sign(x) * abs(x)^(p-2) * x'^2 + p * abs(x)^(p-1) * x''
732  * = 1/2 * p * (p-1) * sign(tx[0]) * abs(tx[0])^(p-2) * tx[1]^2 + p * abs(tx[0])^(p-1) * tx[2]
733  */
734  bool forward(
735  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
736  size_t p, /**< highest order Taylor coefficient that we are evaluating */
737  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
738  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
739  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
740  CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
741  )
742  {
743  assert(exponent > 0.0);
744  assert(tx.size() >= p+1);
745  assert(ty.size() >= p+1);
746  assert(q <= p);
747 
748  if( vx.size() > 0 )
749  {
750  assert(vx.size() == 1);
751  assert(vy.size() == 1);
752  assert(p == 0);
753 
754  vy[0] = vx[0];
755  }
756 
757  if( q == 0 /* q <= 0 && 0 <= p */ )
758  {
759  ty[0] = SIGN(tx[0]) * pow(REALABS(tx[0]), exponent);
760  }
761 
762  if( q <= 1 && 1 <= p )
763  {
764  ty[1] = pow(REALABS(tx[0]), exponent - 1.0) * tx[1];
765  ty[1] *= exponent;
766  }
767 
768  if( q <= 2 && 2 <= p )
769  {
770  if( exponent != 2.0 )
771  {
772  ty[2] = SIGN(tx[0]) * pow(REALABS(tx[0]), exponent - 2.0) * tx[1] * tx[1];
773  ty[2] *= (exponent - 1.0) / 2.0;
774  ty[2] += pow(REALABS(tx[0]), exponent - 1.0) * tx[2];
775  ty[2] *= exponent;
776  }
777  else
778  {
779  // y'' = 2 (1/2 * sign(x) * x'^2 + |x|*x'') = sign(tx[0]) * tx[1]^2 + 2 * abs(tx[0]) * tx[2]
780  ty[2] = SIGN(tx[0]) * tx[1] * tx[1];
781  ty[2] += 2.0 * REALABS(tx[0]) * tx[2];
782  }
783  }
784 
785  /* higher order derivatives not implemented */
786  if( p > 2 )
787  return false;
788 
789  return true;
790  }
791 
792  /** reverse sweep of signpower
793  *
794  * Assume y(x) is a function of the taylor coefficients of f(x) = sign(x)|x|^p for x, i.e.,
795  * y(x) = [ f(x), f'(x), f''(x), ... ].
796  * Then in the reverse sweep we have to compute the elements of \f$\partial h / \partial x^[l], l = 0, ..., k,\f$
797  * where x^[l] is the l'th taylor coefficient (x, x', x'', ...) and h(x) = g(y(x)) for some function g:R^k -> R.
798  * That is, we have to compute
799  *\f$
800  * px[l] = \partial h / \partial x^[l] = (\partial g / \partial y) * (\partial y / \partial x^[l])
801  * = \sum_{i=0}^k (\partial g / \partial y_i) * (\partial y_i / \partial x^[l])
802  * = \sum_{i=0}^k py[i] * (\partial y_i / \partial x^[l])
803  *\f$
804  *
805  * For k = 0, this means
806  *\f$
807  * px[0] = py[0] * (\partial y_0 / \partial x^[0])
808  * = py[0] * (\partial f(x) / \partial x)
809  * = py[0] * p * abs(tx[0])^(p-1)
810  * \f$
811  *
812  * For k = 1, this means
813  *\f$
814  * px[0] = py[0] * (\partial y_0 / \partial x^[0]) + py[1] * (\partial y_1 / \partial x^[0])
815  * = py[0] * (\partial f(x) / \partial x) + py[1] * (\partial f'(x) / \partial x)
816  * = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
817  * px[1] = py[0] * (\partial y_0 / \partial x^[1]) + py[1] * (\partial y_1 / \partial x^[1])
818  * = py[0] * (\partial f(x) / \partial x') + py[1] * (\partial f'(x) / \partial x')
819  * = py[0] * 0 + py[1] * p * abs(tx[0])^(p-1)
820  * \f$
821  */
822  bool reverse(
823  size_t p, /**< highest order Taylor coefficient that we are evaluating */
824  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
825  const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
826  CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
827  const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
828  )
829  { /*lint --e{715}*/
830  assert(exponent > 1);
831  assert(px.size() >= p+1);
832  assert(py.size() >= p+1);
833  assert(tx.size() >= p+1);
834 
835  switch( p )
836  {
837  case 0:
838  // px[0] = py[0] * p * pow(abs(tx[0]), p-1);
839  px[0] = py[0] * pow(REALABS(tx[0]), exponent - 1.0);
840  px[0] *= p;
841  break;
842 
843  case 1:
844  if( exponent != 2.0 )
845  {
846  // px[0] = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
847  px[0] = py[1] * tx[1] * pow(REALABS(tx[0]), exponent - 2.0) * SIGN(tx[0]);
848  px[0] *= exponent - 1.0;
849  px[0] += py[0] * pow(REALABS(tx[0]), exponent - 1.0);
850  px[0] *= exponent;
851  // px[1] = py[1] * p * abs(tx[0])^(p-1)
852  px[1] = py[1] * pow(REALABS(tx[0]), exponent - 1.0);
853  px[1] *= exponent;
854  }
855  else
856  {
857  // px[0] = py[0] * 2.0 * abs(tx[0]) + py[1] * 2.0 * sign(tx[0]) * tx[1]
858  px[0] = py[1] * tx[1] * SIGN(tx[0]);
859  px[0] += py[0] * REALABS(tx[0]);
860  px[0] *= 2.0;
861  // px[1] = py[1] * 2.0 * abs(tx[0])
862  px[1] = py[1] * REALABS(tx[0]);
863  px[1] *= 2.0;
864  }
865  break;
866 
867  default:
868  return false;
869  }
870 
871  return true;
872  }
873 
874  using CppAD::atomic_base<Type>::for_sparse_jac;
875 
876  /** computes sparsity of jacobian during a forward sweep
877  *
878  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
879  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
880  */
881  bool for_sparse_jac(
882  size_t q, /**< number of columns in R */
883  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
884  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
885  )
886  {
887  return univariate_for_sparse_jac(q, r, s);
888  }
889 
890  using CppAD::atomic_base<Type>::rev_sparse_jac;
891 
892  /** computes sparsity of jacobian during a reverse sweep
893  *
894  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
895  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
896  */
897  bool rev_sparse_jac(
898  size_t q, /**< number of rows in R */
899  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
900  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
901  )
902  {
903  return univariate_rev_sparse_jac(q, r, s);
904  }
905 
906  using CppAD::atomic_base<Type>::rev_sparse_hes;
907 
908  /** computes sparsity of hessian during a reverse sweep
909  *
910  * Assume V(x) = (g(f(x)))'' R with f(x) = sign(x)abs(x)^p for a function g:R->R and a matrix R.
911  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
912  */
913  bool rev_sparse_hes(
914  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
915  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
916  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
917  size_t q, /**< number of columns in S and R */
918  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
919  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
920  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
921  )
922  {
923  return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
924  }
925 
926 };
927 
928 /** Specialization of atomic_signpower template for intervals */
929 template<>
930 class atomic_signpower<SCIPInterval> : public CppAD::atomic_base<SCIPInterval>
931 {
932 public:
934  : CppAD::atomic_base<SCIPInterval>("signpowerint"),
935  exponent(0.0)
936  {
937  /* indicate that we want to use bool-based sparsity pattern */
938  this->option(CppAD::atomic_base<SCIPInterval>::bool_sparsity_enum);
939  }
940 
941 private:
942  /** exponent for use in next call to forward or reverse */
943  SCIP_Real exponent;
944 
945  /** stores exponent corresponding to next call to forward or reverse
946  *
947  * How is this supposed to be threadsafe? (we use only one global instantiation of this class)
948  * TODO according to the CppAD 2018 docu, using this function is deprecated; what is the modern way to do this?
949  */
950  virtual void set_old(size_t id)
951  {
952  exponent = SCIPexprGetSignPowerExponent((SCIP_EXPR*)(void*)id);
953  }
954 
955  /** specialization of atomic_signpower::forward template for SCIPinterval
956  *
957  * @todo try to compute tighter resultants
958  */
959  bool forward(
960  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
961  size_t p, /**< highest order Taylor coefficient that we are evaluating */
962  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
963  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
964  const CppAD::vector<SCIPInterval>& tx, /**< values for taylor coefficients of x */
965  CppAD::vector<SCIPInterval>& ty /**< vector to store taylor coefficients of y */
966  )
967  {
968  assert(exponent > 0.0);
969  assert(tx.size() >= p+1);
970  assert(ty.size() >= p+1);
971  assert(q <= p);
972 
973  if( vx.size() > 0 )
974  {
975  assert(vx.size() == 1);
976  assert(vy.size() == 1);
977  assert(p == 0);
978 
979  vy[0] = vx[0];
980  }
981 
982  if( q == 0 /* q <= 0 && 0 <= p */ )
983  {
984  ty[0] = CppAD::signpow(tx[0], exponent);
985  }
986 
987  if( q <= 1 && 1 <= p )
988  {
989  ty[1] = CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0) * tx[1];
990  ty[1] *= p;
991  }
992 
993  if( q <= 2 && 2 <= p )
994  {
995  if( exponent != 2.0 )
996  {
997  ty[2] = CppAD::signpow(tx[0], exponent - 2.0) * CppAD::square(tx[1]);
998  ty[2] *= (exponent - 1.0) / 2.0;
999  ty[2] += CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0) * tx[2];
1000  ty[2] *= exponent;
1001  }
1002  else
1003  {
1004  // y'' = 2 (1/2 * sign(x) * x'^2 + |x|*x'') = sign(tx[0]) * tx[1]^2 + 2 * abs(tx[0]) * tx[2]
1005  ty[2] = CppAD::sign(tx[0]) * CppAD::square(tx[1]);
1006  ty[2] += 2.0 * CppAD::abs(tx[0]) * tx[2];
1007  }
1008  }
1009 
1010  /* higher order derivatives not implemented */
1011  if( p > 2 )
1012  return false;
1013 
1014  return true;
1015  }
1016 
1017  /** specialization of atomic_signpower::reverse template for SCIPinterval
1018  *
1019  * @todo try to compute tighter resultants
1020  */
1021  bool reverse(
1022  size_t p, /**< highest order Taylor coefficient that we are evaluating */
1023  const CppAD::vector<SCIPInterval>& tx, /**< values for taylor coefficients of x */
1024  const CppAD::vector<SCIPInterval>& ty, /**< values for taylor coefficients of y */
1025  CppAD::vector<SCIPInterval>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
1026  const CppAD::vector<SCIPInterval>& py /**< values for partial derivatives of g(x) w.r.t. y */
1027  )
1028  { /*lint --e{715} */
1029  assert(exponent > 1);
1030  assert(px.size() >= p+1);
1031  assert(py.size() >= p+1);
1032  assert(tx.size() >= p+1);
1033 
1034  switch( p )
1035  {
1036  case 0:
1037  // px[0] = py[0] * p * pow(abs(tx[0]), p-1);
1038  px[0] = py[0] * CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0);
1039  px[0] *= exponent;
1040  break;
1041 
1042  case 1:
1043  if( exponent != 2.0 )
1044  {
1045  // px[0] = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
1046  px[0] = py[1] * tx[1] * CppAD::signpow(tx[0], exponent - 2.0);
1047  px[0] *= exponent - 1.0;
1048  px[0] += py[0] * CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0);
1049  px[0] *= exponent;
1050  // px[1] = py[1] * p * abs(tx[0])^(p-1)
1051  px[1] = py[1] * CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0);
1052  px[1] *= exponent;
1053  }
1054  else
1055  {
1056  // px[0] = py[0] * 2.0 * abs(tx[0]) + py[1] * 2.0 * sign(tx[0]) * tx[1]
1057  px[0] = py[1] * tx[1] * CppAD::sign(tx[0]);
1058  px[0] += py[0] * CppAD::abs(tx[0]);
1059  px[0] *= 2.0;
1060  // px[1] = py[1] * 2.0 * abs(tx[0])
1061  px[1] = py[1] * CppAD::abs(tx[0]);
1062  px[1] *= 2.0;
1063  }
1064  break;
1065 
1066  default:
1067  return false;
1068  }
1069 
1070  return true;
1071  }
1072 
1073  using CppAD::atomic_base<SCIPInterval>::for_sparse_jac;
1074 
1075  /** computes sparsity of jacobian during a forward sweep
1076  *
1077  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
1078  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
1079  */
1080  bool for_sparse_jac(
1081  size_t q, /**< number of columns in R */
1082  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
1083  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
1084  )
1085  {
1086  return univariate_for_sparse_jac(q, r, s);
1087  }
1088 
1089  using CppAD::atomic_base<SCIPInterval>::rev_sparse_jac;
1090 
1091  /** computes sparsity of jacobian during a reverse sweep
1092  *
1093  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
1094  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
1095  */
1096  bool rev_sparse_jac(
1097  size_t q, /**< number of rows in R */
1098  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
1099  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
1100  )
1101  {
1102  return univariate_rev_sparse_jac(q, r, s);
1103  }
1104 
1105  using CppAD::atomic_base<SCIPInterval>::rev_sparse_hes;
1106 
1107  /** computes sparsity of hessian during a reverse sweep
1108  *
1109  * Assume V(x) = (g(f(x)))'' R with f(x) = sign(x)abs(x)^p for a function g:R->R and a matrix R.
1110  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
1111  */
1112  bool rev_sparse_hes(
1113  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
1114  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
1115  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
1116  size_t q, /**< number of columns in S and R */
1117  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
1118  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
1119  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
1120  )
1121  {
1122  return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
1123  }
1124 };
1125 
1126 /** template for evaluation for signpower operator */
1127 template<class Type>
1128 static
1130  Type& resultant, /**< resultant */
1131  const Type& arg, /**< operand */
1132  SCIP_EXPR* expr /**< expression that holds the exponent */
1133  )
1134 {
1135  vector<Type> in(1, arg);
1136  vector<Type> out(1);
1137 
1139  sp(in, out, (size_t)(void*)expr);
1140 
1141  resultant = out[0];
1142  return;
1143 }
1144 
1145 #else
1146 
1147 /** template for evaluation for signpower operator
1148  *
1149  * Only implemented for real numbers, thus gives error by default.
1150  */
1151 template<class Type>
1152 static
1153 void evalSignPower(
1154  Type& resultant, /**< resultant */
1155  const Type& arg, /**< operand */
1156  SCIP_EXPR* expr /**< expression that holds the exponent */
1157  )
1158 { /*lint --e{715}*/
1159  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1160  "evalSignPower()",
1161  "Error: SignPower not implemented for this value type"
1162  );
1163 }
1164 
1165 /** specialization of signpower evaluation for real numbers */
1166 template<>
1167 void evalSignPower(
1168  CppAD::AD<double>& resultant, /**< resultant */
1169  const CppAD::AD<double>& arg, /**< operand */
1170  SCIP_EXPR* expr /**< expression that holds the exponent */
1171  )
1172 {
1173  SCIP_Real exponent;
1174 
1175  exponent = SCIPexprGetSignPowerExponent(expr);
1176 
1177  if( arg == 0.0 )
1178  resultant = 0.0;
1179  else if( arg > 0.0 )
1180  resultant = pow( arg, exponent);
1181  else
1182  resultant = -pow(-arg, exponent);
1183 }
1184 
1185 #endif
1186 
1187 
1188 #ifndef NO_CPPAD_USER_ATOMIC
1189 
1190 template<class Type>
1192  SCIP_EXPR* expr,
1193  Type* x,
1194  Type& funcval,
1195  Type* gradient,
1196  Type* hessian
1197  )
1198 {
1199  return SCIPexprEvalUser(expr, x, &funcval, gradient, hessian); /*lint !e429*/
1200 }
1201 
1202 template<>
1204  SCIP_EXPR* expr,
1205  SCIPInterval* x,
1206  SCIPInterval& funcval,
1207  SCIPInterval* gradient,
1208  SCIPInterval* hessian
1209  )
1210 {
1211  return SCIPexprEvalIntUser(expr, SCIPInterval::infinity, x, &funcval, gradient, hessian);
1212 }
1213 
1214 /** Automatic differentiation of user expression as CppAD user-atomic function.
1215  *
1216  * This class implements forward and reverse operations for a function given by a user expression for use within CppAD.
1217  */
1218 template<class Type>
1219 class atomic_userexpr : public CppAD::atomic_base<Type>
1220 {
1221 public:
1223  : CppAD::atomic_base<Type>("userexpr"),
1224  expr(NULL)
1225  {
1226  /* indicate that we want to use bool-based sparsity pattern */
1227  this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
1228  }
1229 
1230 private:
1231  /** user expression */
1232  SCIP_EXPR* expr;
1233 
1234  /** stores user expression corresponding to next call to forward or reverse
1235  *
1236  * how is this supposed to be threadsafe? (we use only one global instantiation of this class)
1237  * TODO according to the CppAD 2018 docu, using this function is deprecated; what is the modern way to do this?
1238  */
1239  virtual void set_old(size_t id)
1240  {
1241  expr = (SCIP_EXPR*)(void*)id;
1242  assert(SCIPexprGetOperator(expr) == SCIP_EXPR_USER);
1243  }
1244 
1245  /** forward sweep of userexpr
1246  *
1247  * We follow http://www.coin-or.org/CppAD/Doc/atomic_forward.xml
1248  * Note, that p and q are interchanged!
1249  *
1250  * For a scalar variable t, let
1251  * Y(t) = f(X(t))
1252  * X(t) = x^0 + x^1 t^1 + ... + x^p t^p
1253  * where for x^i the i an index, while for t^i the i is an exponent.
1254  * Thus, x^k = 1/k! X^(k) (0), where X^(k)(.) denotes the k-th derivative.
1255  *
1256  * Next, let y^k = 1/k! Y^(k)(0) be the k'th taylor coefficient of Y. Thus,
1257  * y^0 = Y^(0)(0) = Y(0) = f(X(0)) = f(x^0)
1258  * y^1 = Y^(1)(0) = Y'(0) = f'(X(0)) * X'(0) = f'(x^0) * x^1
1259  * y^2 = 1/2 Y^(2)(0) = 1/2 Y''(0) = 1/2 X'(0) * f''(X(0)) X'(0) + 1/2 * f'(X(0)) * X''(0) = 1/2 x^1 * f''(x^0) * x^1 + f'(x^0) * x^2
1260  *
1261  * As x^k = (tx[k], tx[(p+1)+k], tx[2*(p+1)+k], ..., tx[n*(p+1)+k], we get
1262  * ty[0] = y^0 = f(x^0) = f(tx[{1..n}*(p+1)])
1263  * ty[1] = y^1 = f'(x^0) * tx[{1..n}*(p+1)+1] = sum(i=1..n, grad[i] * tx[i*(p+1)+1]), where grad = f'(x^0)
1264  * ty[2] = 1/2 sum(i,j=1..n, x[i*(p+1)+1] * x[j*(p+1)+q] * hessian[i,j]) + sum(i=1..n, grad[i] * x[i*(p+1)+2])
1265  */
1266  bool forward(
1267  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
1268  size_t p, /**< highest order Taylor coefficient that we are evaluating */
1269  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
1270  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
1271  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
1272  CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
1273  )
1274  {
1275  assert(expr != NULL);
1276  assert(ty.size() == p+1);
1277  assert(q <= p);
1278 
1279  size_t n = tx.size() / (p+1);
1280  assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1281  assert(n >= 1);
1282 
1283  if( vx.size() > 0 )
1284  {
1285  assert(vx.size() == n);
1286  assert(vy.size() == 1);
1287  assert(p == 0);
1288 
1289  /* y_0 is a variable if at least one of the x_i is a variable */
1290  vy[0] = false;
1291  for( size_t i = 0; i < n; ++i )
1292  if( vx[i] )
1293  {
1294  vy[0] = true;
1295  break;
1296  }
1297  }
1298 
1299  Type* x = new Type[n];
1300  Type* gradient = NULL;
1301  Type* hessian = NULL;
1302 
1303  if( q <= 2 && 1 <= p )
1304  gradient = new Type[n];
1305  if( q <= 2 && 2 <= p )
1306  hessian = new Type[n*n];
1307 
1308  for( size_t i = 0; i < n; ++i )
1309  x[i] = tx[i * (p+1) + 0]; /*lint !e835*/
1310 
1311  if( exprEvalUser(expr, x, ty[0], gradient, hessian) != SCIP_OKAY )
1312  {
1313  delete[] x;
1314  delete[] gradient;
1315  delete[] hessian;
1316  return false;
1317  }
1318 
1319  if( gradient != NULL )
1320  {
1321  ty[1] = 0.0;
1322  for( size_t i = 0; i < n; ++i )
1323  ty[1] += gradient[i] * tx[i * (p+1) + 1];
1324  }
1325 
1326  if( hessian != NULL )
1327  {
1328  assert(gradient != NULL);
1329 
1330  ty[2] = 0.0;
1331  for( size_t i = 0; i < n; ++i )
1332  {
1333  for( size_t j = 0; j < n; ++j )
1334  ty[2] += 0.5 * hessian[i*n+j] * tx[i * (p+1) + 1] * tx[j * (p+1) + 1];
1335 
1336  ty[2] += gradient[i] * tx[i * (p+1) + 2];
1337  }
1338  }
1339 
1340  delete[] x;
1341  delete[] gradient;
1342  delete[] hessian;
1343 
1344  /* higher order derivatives not implemented */
1345  if( p > 2 )
1346  return false;
1347 
1348  return true;
1349  }
1350 
1351  /** reverse sweep of userexpr
1352  *
1353  * We follow http://www.coin-or.org/CppAD/Doc/atomic_reverse.xml
1354  * Note, that there q is our p.
1355  *
1356  * For a scalar variable t, let
1357  * Y(t) = f(X(t))
1358  * X(t) = x^0 + x^1 t^1 + ... + x^p t^p
1359  * where for x^i the i an index, while for t^i the i is an exponent.
1360  * Thus, x^k = 1/k! X^(k) (0), where X^(k)(.) denotes the k-th derivative.
1361  *
1362  * Next, let y^k = 1/k! Y^(k)(0) be the k'th taylor coefficient of Y. Thus,
1363  * Y(t) = y^0 + y^1 t^1 + y^2 t^2 + ...
1364  * y^0, y^1, ... are the taylor coefficients of f(x).
1365  *
1366  * Further, let F(x^0,..,x^p) by given as F^k(x) = y^k. Thus,
1367  * F^0(x) = y^0 = Y^(0)(0) = f(x^0)
1368  * F^1(x) = y^1 = Y^(1)(0) = f'(x^0) * x^1
1369  * F^2(x) = y^2 = 1/2 Y''(0) = 1/2 x^1 f''(x^0) x^1 + f'(x^0) x^2
1370  *
1371  * Given functions G: R^(p+1) -> R and H: R^(n*(p+1)) -> R, where H(x^0, x^1, .., x^p) = G(F(x^0,..,x^p)),
1372  * we have to return the value of \f$\partial H / \partial x^l, l = 0..p,\f$ in px. Therefor,
1373  * \f$
1374  * px^l = \partial H / \partial x^l
1375  * = sum(k=0..p, (\partial G / \partial y^k) * (\partial y^k / \partial x^l)
1376  * = sum(k=0..p, py[k] * (\partial F^k / \partial x^l)
1377  * \f$
1378  *
1379  * For p = 0, this means
1380  * \f$
1381  * px^0 = py[0] * \partial F^0 / \partial x^0
1382  * = py[0] * \partial f(x^0) / \partial x^0
1383  * = py[0] * f'(x^0)
1384  * \f$
1385  *
1386  * For p = 1, this means (for l = 0):
1387  * \f[
1388  * px^0 = py[0] * \partial F^0 / \partial x^0 + py[1] * \partial F^1 / \partial x^0
1389  * = py[0] * \partial f(x^0) / \partial x^0 + py[1] * \partial (f'(x^0) * x^1) / \partial x^0
1390  * = py[0] * f'(x^0) + py[1] * f''(x^0) * x^1
1391  * \f]
1392  * and (for l=1):
1393  * \[
1394  * px^1 = py[0] * \partial F^0 / \partial x^1 + py[1] * \partial F^1 / \partial x^1
1395  * = py[0] * \partial f(x^0) / \partial x^1 + py[1] * \partial (f'(x^0) * x^1) / \partial x^0
1396  * = py[0] * 0 + py[1] * f'(x^0)
1397  * \f]
1398  *
1399  * As x^k = (tx[k], tx[(p+1)+k], tx[2*(p+1)+k], ..., tx[n*(p+1)+k] and
1400  * px^k = (px[k], px[(p+1)+k], px[2*(p+1)+k], ..., px[n*(p+1)+k], we get
1401  * for p = 0:
1402  * px[i] = (px^0)_i = py[0] * grad[i]
1403  * for p = 1:
1404  * px[i*2+0] = (px^0)_i = py[0] * grad[i] + py[1] * sum(j, hessian[j,i] * tx[j*2+1])
1405  * px[i*2+1] = (px^1)_i = py[1] * grad[i]
1406  */
1407  bool reverse(
1408  size_t p, /**< highest order Taylor coefficient that we are evaluating */
1409  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
1410  const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
1411  CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
1412  const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
1413  )
1414  {
1415  assert(expr != NULL);
1416  assert(px.size() == tx.size());
1417  assert(py.size() == p+1);
1418 
1419  size_t n = tx.size() / (p+1);
1420  assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1421  assert(n >= 1);
1422 
1423  Type* x = new Type[n];
1424  Type funcval;
1425  Type* gradient = new Type[n];
1426  Type* hessian = NULL;
1427 
1428  if( p == 1 )
1429  hessian = new Type[n*n];
1430 
1431  for( size_t i = 0; i < n; ++i )
1432  x[i] = tx[i * (p+1) + 0]; /*lint !e835*/
1433 
1434  if( exprEvalUser(expr, x, funcval, gradient, hessian) != SCIP_OKAY )
1435  {
1436  delete[] x;
1437  delete[] gradient;
1438  delete[] hessian;
1439  return false;
1440  }
1441 
1442  switch( p )
1443  {
1444  case 0:
1445  // px[j] = (px^0)_j = py[0] * grad[j]
1446  for( size_t i = 0; i < n; ++i )
1447  px[i] = py[0] * gradient[i];
1448  break;
1449 
1450  case 1:
1451  // px[i*2+0] = (px^0)_i = py[0] * grad[i] + py[1] * sum(j, hessian[j,i] * tx[j*2+1])
1452  // px[i*2+1] = (px^1)_i = py[1] * grad[i]
1453  assert(hessian != NULL);
1454  for( size_t i = 0; i < n; ++i )
1455  {
1456  px[i*2+0] = py[0] * gradient[i]; /*lint !e835*/
1457  for( size_t j = 0; j < n; ++j )
1458  px[i*2+0] += py[1] * hessian[i+n*j] * tx[j*2+1]; /*lint !e835*/
1459 
1460  px[i*2+1] = py[1] * gradient[i];
1461  }
1462  break;
1463 
1464  default:
1465  return false;
1466  }
1467 
1468  return true;
1469  } /*lint !e715*/
1470 
1471  using CppAD::atomic_base<Type>::for_sparse_jac;
1472 
1473  /** computes sparsity of jacobian during a forward sweep
1474  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
1475  * Since we assume f'(x) to be dense, the sparsity of S will be the sparsity of R.
1476  */
1477  bool for_sparse_jac(
1478  size_t q, /**< number of columns in R */
1479  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
1480  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
1481  )
1482  {
1483  assert(expr != NULL);
1484  assert(s.size() == q);
1485 
1486  size_t n = r.size() / q;
1487  assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1488 
1489  // sparsity for S(x) = f'(x) * R
1490  for( size_t j = 0; j < q; j++ )
1491  {
1492  s[j] = false;
1493  for( size_t i = 0; i < n; i++ )
1494  s[j] |= (bool)r[i * q + j]; /*lint !e1786*/
1495  }
1496 
1497  return true;
1498  }
1499 
1500  using CppAD::atomic_base<Type>::rev_sparse_jac;
1501 
1502  /** computes sparsity of jacobian during a reverse sweep
1503  * For a q x 1 matrix S, we have to return the sparsity pattern of the q x 1 matrix R(x) = S * f'(x).
1504  * Since we assume f'(x) to be dense, the sparsity of R will be the sparsity of S.
1505  */
1506  bool rev_sparse_jac(
1507  size_t q, /**< number of rows in R */
1508  const CppAD::vector<bool>& rt, /**< sparsity of R, rowwise */
1509  CppAD::vector<bool>& st /**< vector to store sparsity of S, rowwise */
1510  )
1511  {
1512  assert(expr != NULL);
1513  assert(rt.size() == q);
1514 
1515  size_t n = st.size() / q;
1516  assert(n == (size_t)SCIPexprGetNChildren(expr));
1517 
1518  // sparsity for S(x)^T = f'(x)^T * R^T
1519  for( size_t j = 0; j < q; j++ )
1520  for( size_t i = 0; i < n; i++ )
1521  st[i * q + j] = rt[j];
1522 
1523  return true;
1524  }
1525 
1526  using CppAD::atomic_base<Type>::rev_sparse_hes;
1527 
1528  /** computes sparsity of hessian during a reverse sweep
1529  * Assume V(x) = (g(f(x)))'' R for a function g:R->R and a matrix R.
1530  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
1531  */
1532  bool rev_sparse_hes(
1533  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
1534  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
1535  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
1536  size_t q, /**< number of columns in S and R */
1537  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
1538  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
1539  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
1540  )
1541  {
1542  assert(expr != NULL);
1543  size_t n = vx.size();
1544  assert((size_t)SCIPexprGetNChildren(expr) == n);
1545  assert(s.size() == 1);
1546  assert(t.size() == n);
1547  assert(r.size() == n * q);
1548  assert(u.size() == q);
1549  assert(v.size() == n * q);
1550 
1551  size_t i, j, k;
1552 
1553  // sparsity for T(x) = S(x) * f'(x)
1554  for( i = 0; i < n; ++i )
1555  t[i] = s[0];
1556 
1557  // V(x) = f'(x)^T * g''(y) * f'(x) * R + g'(y) * f''(x) * R
1558  // U(x) = g''(y) * f'(x) * R
1559  // S(x) = g'(y)
1560 
1561  // back propagate the sparsity for U
1562  for( j = 0; j < q; j++ )
1563  for( i = 0; i < n; i++ )
1564  v[ i * q + j] = u[j];
1565 
1566  // include forward Jacobian sparsity in Hessian sparsity
1567  // sparsity for g'(y) * f''(x) * R (Note f''(x) is assumed to be dense)
1568  if( s[0] )
1569  for( j = 0; j < q; j++ )
1570  for( i = 0; i < n; i++ )
1571  for( k = 0; k < n; ++k )
1572  v[ i * q + j] |= (bool) r[ k * q + j];
1573 
1574  return true;
1575  }
1576 
1577 };
1578 
1579 template<class Type>
1580 static
1582  Type& resultant, /**< resultant */
1583  const Type* args, /**< operands */
1584  SCIP_EXPR* expr /**< expression that holds the user expression */
1585  )
1586 {
1587  assert( args != 0 );
1588  vector<Type> in(args, args + SCIPexprGetNChildren(expr));
1589  vector<Type> out(1);
1590 
1592  u(in, out, (size_t)(void*)expr);
1593 
1594  resultant = out[0];
1595  return;
1596 }
1597 
1598 #else
1599 
1600 template<class Type>
1601 static
1602 void evalUser(
1603  Type& resultant, /**< resultant */
1604  const Type* args, /**< operands */
1605  SCIP_EXPR* expr /**< expression that holds the user expression */
1606  )
1607 {
1608  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1609  "evalUser()",
1610  "Error: user expressions in CppAD not possible without CppAD user atomic facility"
1611  );
1612 }
1613 
1614 #endif
1615 
1616 /** template for evaluation for minimum operator
1617  *
1618  * Only implemented for real numbers, thus gives error by default.
1619  * @todo implement own userad function
1620  */ /*lint -e715*/
1621 template<class Type>
1622 static
1623 void evalMin(
1624  Type& resultant, /**< resultant */
1625  const Type& arg1, /**< first operand */
1626  const Type& arg2 /**< second operand */
1627  )
1628 { /*lint --e{715,1764}*/
1629  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1630  "evalMin()",
1631  "Error: Min not implemented for this value type"
1632  );
1633 }
1634 
1635 /** specialization of minimum evaluation for real numbers */
1636 template<>
1637 void evalMin(
1638  CppAD::AD<double>& resultant, /**< resultant */
1639  const CppAD::AD<double>& arg1, /**< first operand */
1640  const CppAD::AD<double>& arg2 /**< second operand */
1641  )
1642 {
1643  resultant = MIN(arg1, arg2);
1644 }
1645 
1646 /** template for evaluation for maximum operator
1647  *
1648  * Only implemented for real numbers, thus gives error by default.
1649  * @todo implement own userad function
1650  */ /*lint -e715*/
1651 template<class Type>
1652 static
1653 void evalMax(
1654  Type& resultant, /**< resultant */
1655  const Type& arg1, /**< first operand */
1656  const Type& arg2 /**< second operand */
1657  )
1658 { /*lint --e{715,1764}*/
1659  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1660  "evalMax()",
1661  "Error: Max not implemented for this value type"
1662  );
1663 }
1664 
1665 /** specialization of maximum evaluation for real numbers */
1666 template<>
1667 void evalMax(
1668  CppAD::AD<double>& resultant, /**< resultant */
1669  const CppAD::AD<double>& arg1, /**< first operand */
1670  const CppAD::AD<double>& arg2 /**< second operand */
1671  )
1672 {
1673  resultant = MAX(arg1, arg2);
1674 }
1675 
1676 /** template for evaluation for square-root operator
1677  *
1678  * Default is to use the standard sqrt-function.
1679  */
1680 template<class Type>
1681 static
1683  Type& resultant, /**< resultant */
1684  const Type& arg /**< operand */
1685  )
1686 {
1687  resultant = sqrt(arg);
1688 }
1689 
1690 /** template for evaluation for absolute value operator */
1691 template<class Type>
1692 static
1693 void evalAbs(
1694  Type& resultant, /**< resultant */
1695  const Type& arg /**< operand */
1696  )
1697 {
1698  resultant = abs(arg);
1699 }
1700 
1701 /** specialization of absolute value evaluation for intervals
1702  *
1703  * Use sqrt(x^2) for now @todo implement own userad function.
1704  */
1705 template<>
1706 void evalAbs(
1707  CppAD::AD<SCIPInterval>& resultant, /**< resultant */
1708  const CppAD::AD<SCIPInterval>& arg /**< operand */
1709  )
1710 {
1711  vector<CppAD::AD<SCIPInterval> > in(1, arg);
1712  vector<CppAD::AD<SCIPInterval> > out(1);
1713 
1714  posintpower(in, out, 2);
1715 
1716  resultant = sqrt(out[0]);
1717 }
1718 
1719 /** integer power operation for arbitrary integer exponents */
1720 template<class Type>
1721 static
1723  Type& resultant, /**< resultant */
1724  const Type& arg, /**< operand */
1725  const int exponent /**< exponent */
1726  )
1727 {
1728  if( exponent > 1 )
1729  {
1730  vector<Type> in(1, arg);
1731  vector<Type> out(1);
1732 
1733  posintpower(in, out, exponent);
1734 
1735  resultant = out[0];
1736  return;
1737  }
1738 
1739  if( exponent < -1 )
1740  {
1741  vector<Type> in(1, arg);
1742  vector<Type> out(1);
1743 
1744  posintpower(in, out, -exponent);
1745 
1746  resultant = Type(1.0)/out[0];
1747  return;
1748  }
1749 
1750  if( exponent == 1 )
1751  {
1752  resultant = arg;
1753  return;
1754  }
1755 
1756  if( exponent == 0 )
1757  {
1758  resultant = Type(1.0);
1759  return;
1760  }
1761 
1762  assert(exponent == -1);
1763  resultant = Type(1.0)/arg;
1764 }
1765 
1766 /** CppAD compatible evaluation of an expression for given arguments and parameters */
1767 template<class Type>
1768 static
1770  SCIP_EXPR* expr, /**< expression */
1771  const vector<Type>& x, /**< values of variables */
1772  SCIP_Real* param, /**< values of parameters */
1773  Type& val /**< buffer to store expression value */
1774  )
1775 {
1776  Type* buf = 0;
1777 
1778  assert(expr != NULL);
1779 
1780  /* todo use SCIP_MAXCHILD_ESTIMATE as in expression.c */
1781 
1782  if( SCIPexprGetNChildren(expr) )
1783  {
1784  if( BMSallocMemoryArray(&buf, SCIPexprGetNChildren(expr)) == NULL ) /*lint !e666*/
1785  return SCIP_NOMEMORY;
1786 
1787  for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
1788  {
1789  SCIP_CALL( eval(SCIPexprGetChildren(expr)[i], x, param, buf[i]) );
1790  }
1791  }
1792 
1793  switch(SCIPexprGetOperator(expr))
1794  {
1795  case SCIP_EXPR_VARIDX:
1796  assert(SCIPexprGetOpIndex(expr) < (int)x.size());
1797  val = x[SCIPexprGetOpIndex(expr)];
1798  break;
1799 
1800  case SCIP_EXPR_CONST:
1801  val = SCIPexprGetOpReal(expr);
1802  break;
1803 
1804  case SCIP_EXPR_PARAM:
1805  assert(param != NULL);
1806  val = param[SCIPexprGetOpIndex(expr)];
1807  break;
1808 
1809  case SCIP_EXPR_PLUS:
1810  assert( buf != 0 );
1811  val = buf[0] + buf[1];
1812  break;
1813 
1814  case SCIP_EXPR_MINUS:
1815  assert( buf != 0 );
1816  val = buf[0] - buf[1];
1817  break;
1818 
1819  case SCIP_EXPR_MUL:
1820  assert( buf != 0 );
1821  val = buf[0] * buf[1];
1822  break;
1823 
1824  case SCIP_EXPR_DIV:
1825  assert( buf != 0 );
1826  val = buf[0] / buf[1];
1827  break;
1828 
1829  case SCIP_EXPR_SQUARE:
1830  assert( buf != 0 );
1831  evalIntPower(val, buf[0], 2);
1832  break;
1833 
1834  case SCIP_EXPR_SQRT:
1835  assert( buf != 0 );
1836  evalSqrt(val, buf[0]);
1837  break;
1838 
1839  case SCIP_EXPR_REALPOWER:
1840  assert( buf != 0 );
1841  val = CppAD::pow(buf[0], SCIPexprGetRealPowerExponent(expr));
1842  break;
1843 
1844  case SCIP_EXPR_INTPOWER:
1845  assert( buf != 0 );
1846  evalIntPower(val, buf[0], SCIPexprGetIntPowerExponent(expr));
1847  break;
1848 
1849  case SCIP_EXPR_SIGNPOWER:
1850  assert( buf != 0 );
1851  evalSignPower(val, buf[0], expr);
1852  break;
1853 
1854  case SCIP_EXPR_EXP:
1855  assert( buf != 0 );
1856  val = exp(buf[0]);
1857  break;
1858 
1859  case SCIP_EXPR_LOG:
1860  assert( buf != 0 );
1861  val = log(buf[0]);
1862  break;
1863 
1864  case SCIP_EXPR_SIN:
1865  assert( buf != 0 );
1866  val = sin(buf[0]);
1867  break;
1868 
1869  case SCIP_EXPR_COS:
1870  assert( buf != 0 );
1871  val = cos(buf[0]);
1872  break;
1873 
1874  case SCIP_EXPR_TAN:
1875  assert( buf != 0 );
1876  val = tan(buf[0]);
1877  break;
1878 #ifdef SCIP_DISABLED_CODE /* these operators are currently disabled */
1879  case SCIP_EXPR_ERF:
1880  assert( buf != 0 );
1881  val = erf(buf[0]);
1882  break;
1883 
1884  case SCIP_EXPR_ERFI:
1885  return SCIP_ERROR;
1886 #endif
1887  case SCIP_EXPR_MIN:
1888  assert( buf != 0 );
1889  evalMin(val, buf[0], buf[1]);
1890  break;
1891 
1892  case SCIP_EXPR_MAX:
1893  assert( buf != 0 );
1894  evalMax(val, buf[0], buf[1]);
1895  break;
1896 
1897  case SCIP_EXPR_ABS:
1898  assert( buf != 0 );
1899  evalAbs(val, buf[0]);
1900  break;
1901 
1902  case SCIP_EXPR_SIGN:
1903  assert( buf != 0 );
1904  val = sign(buf[0]);
1905  break;
1906 
1907  case SCIP_EXPR_SUM:
1908  assert( buf != 0 );
1909  val = 0.0;
1910  for (int i = 0; i < SCIPexprGetNChildren(expr); ++i)
1911  val += buf[i];
1912  break;
1913 
1914  case SCIP_EXPR_PRODUCT:
1915  assert( buf != 0 );
1916  val = 1.0;
1917  for (int i = 0; i < SCIPexprGetNChildren(expr); ++i)
1918  val *= buf[i];
1919  break;
1920 
1921  case SCIP_EXPR_LINEAR:
1922  {
1923  SCIP_Real* coefs;
1924 
1925  coefs = SCIPexprGetLinearCoefs(expr);
1926  assert(coefs != NULL || SCIPexprGetNChildren(expr) == 0);
1927 
1928  assert( buf != 0 );
1929  val = SCIPexprGetLinearConstant(expr);
1930  for (int i = 0; i < SCIPexprGetNChildren(expr); ++i)
1931  val += coefs[i] * buf[i]; /*lint !e613*/
1932  break;
1933  }
1934 
1935  case SCIP_EXPR_QUADRATIC:
1936  {
1937  SCIP_Real* lincoefs;
1938  SCIP_QUADELEM* quadelems;
1939  int nquadelems;
1940  SCIP_Real sqrcoef;
1941  Type lincoef;
1942  vector<Type> in(1);
1943  vector<Type> out(1);
1944 
1945  assert( buf != 0 );
1946 
1947  lincoefs = SCIPexprGetQuadLinearCoefs(expr);
1948  nquadelems = SCIPexprGetNQuadElements(expr);
1949  quadelems = SCIPexprGetQuadElements(expr);
1950  assert(quadelems != NULL || nquadelems == 0);
1951 
1952  SCIPexprSortQuadElems(expr);
1953 
1954  val = SCIPexprGetQuadConstant(expr);
1955 
1956  /* for each argument, we collect it's linear index from lincoefs, it's square coefficients and all factors from bilinear terms
1957  * then we compute the interval sqrcoef*x^2 + lincoef*x and add it to result */
1958  int i = 0;
1959  for( int argidx = 0; argidx < SCIPexprGetNChildren(expr); ++argidx )
1960  {
1961  if( i == nquadelems || quadelems[i].idx1 > argidx ) /*lint !e613*/
1962  {
1963  /* there are no quadratic terms with argidx in its first argument, that should be easy to handle */
1964  if( lincoefs != NULL )
1965  val += lincoefs[argidx] * buf[argidx];
1966  continue;
1967  }
1968 
1969  sqrcoef = 0.0;
1970  lincoef = lincoefs != NULL ? lincoefs[argidx] : 0.0;
1971 
1972  assert(i < nquadelems && quadelems[i].idx1 == argidx); /*lint !e613*/
1973  do
1974  {
1975  if( quadelems[i].idx2 == argidx ) /*lint !e613*/
1976  sqrcoef += quadelems[i].coef; /*lint !e613*/
1977  else
1978  lincoef += quadelems[i].coef * buf[quadelems[i].idx2]; /*lint !e613*/
1979  ++i;
1980  } while( i < nquadelems && quadelems[i].idx1 == argidx ); /*lint !e613*/
1981  assert(i == nquadelems || quadelems[i].idx1 > argidx); /*lint !e613*/
1982 
1983  /* this is not as good as what we can get from SCIPintervalQuad, but easy to implement */
1984  if( sqrcoef != 0.0 )
1985  {
1986  in[0] = buf[argidx];
1987  posintpower(in, out, 2);
1988  val += sqrcoef * out[0];
1989  }
1990 
1991  val += lincoef * buf[argidx];
1992  }
1993  assert(i == nquadelems);
1994 
1995  break;
1996  }
1997 
1998  case SCIP_EXPR_POLYNOMIAL:
1999  {
2000  SCIP_EXPRDATA_MONOMIAL** monomials;
2001  Type childval;
2002  Type monomialval;
2003  SCIP_Real exponent;
2004  int nmonomials;
2005  int nfactors;
2006  int* childidxs;
2007  SCIP_Real* exponents;
2008  int i;
2009  int j;
2010 
2011  assert( buf != 0 );
2012 
2013  val = SCIPexprGetPolynomialConstant(expr);
2014 
2015  nmonomials = SCIPexprGetNMonomials(expr);
2016  monomials = SCIPexprGetMonomials(expr);
2017 
2018  for( i = 0; i < nmonomials; ++i )
2019  {
2020  nfactors = SCIPexprGetMonomialNFactors(monomials[i]);
2021  childidxs = SCIPexprGetMonomialChildIndices(monomials[i]);
2022  exponents = SCIPexprGetMonomialExponents(monomials[i]);
2023  monomialval = SCIPexprGetMonomialCoef(monomials[i]);
2024 
2025  for( j = 0; j < nfactors; ++j )
2026  {
2027  assert(childidxs[j] >= 0);
2028  assert(childidxs[j] < SCIPexprGetNChildren(expr));
2029 
2030  childval = buf[childidxs[j]];
2031  exponent = exponents[j];
2032 
2033  /* cover some special exponents separately to avoid calling expensive pow function */
2034  if( exponent == 0.0 )
2035  continue;
2036  if( exponent == 1.0 )
2037  {
2038  monomialval *= childval;
2039  continue;
2040  }
2041  if( (int)exponent == exponent )
2042  {
2043  Type tmp;
2044  evalIntPower(tmp, childval, (int)exponent);
2045  monomialval *= tmp;
2046  continue;
2047  }
2048  if( exponent == 0.5 )
2049  {
2050  Type tmp;
2051  evalSqrt(tmp, childval);
2052  monomialval *= tmp;
2053  continue;
2054  }
2055  monomialval *= pow(childval, exponent);
2056  }
2057 
2058  val += monomialval;
2059  }
2060 
2061  break;
2062  }
2063 
2064  case SCIP_EXPR_USER:
2065  evalUser(val, buf, expr);
2066  break;
2067 
2068  case SCIP_EXPR_LAST:
2069  default:
2070  BMSfreeMemoryArrayNull(&buf);
2071  return SCIP_ERROR;
2072  }
2073 
2074  BMSfreeMemoryArrayNull(&buf);
2075 
2076  return SCIP_OKAY;
2077 }
2078 
2079 /** analysis an expression tree whether it requires retaping on every evaluation
2080  *
2081  * This may be the case if the evaluation sequence depends on values of operands (e.g., in case of abs, sign, signpower, ...).
2082  */
2083 static
2085  SCIP_EXPRINTDATA* data,
2086  SCIP_EXPR* expr
2087  )
2088 {
2089  assert(expr != NULL);
2090  assert(SCIPexprGetChildren(expr) != NULL || SCIPexprGetNChildren(expr) == 0);
2091 
2092  for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
2093  analyzeTree(data, SCIPexprGetChildren(expr)[i]);
2094 
2095  switch( SCIPexprGetOperator(expr) )
2096  {
2097  case SCIP_EXPR_MIN:
2098  case SCIP_EXPR_MAX:
2099  case SCIP_EXPR_ABS:
2100 #ifdef NO_CPPAD_USER_ATOMIC
2101  case SCIP_EXPR_SIGNPOWER:
2102 #endif
2103  data->need_retape_always = true;
2104  break;
2105 
2106  case SCIP_EXPR_USER:
2107  data->userevalcapability &= SCIPexprGetUserEvalCapability(expr);
2108  break;
2109 
2110  default: ;
2111  } /*lint !e788*/
2112 
2113 }
2114 
2115 /** replacement for CppAD's default error handler
2116  *
2117  * In debug mode, CppAD gives an error when an evaluation contains a nan.
2118  * We do not want to stop execution in such a case, since the calling routine should check for nan's and decide what to do.
2119  * Since we cannot ignore this particular error, we ignore all.
2120  * @todo find a way to check whether the error corresponds to a nan and communicate this back
2121  */
2122 static
2124  bool known, /**< is the error from a known source? */
2125  int line, /**< line where error occured */
2126  const char* file, /**< file where error occured */
2127  const char* cond, /**< error condition */
2128  const char* msg /**< error message */
2129  )
2130 {
2131  SCIPdebugMessage("ignore CppAD error from %sknown source %s:%d: msg: %s exp: %s\n", known ? "" : "un", file, line, msg, cond);
2132 }
2133 
2134 /* install our error handler */
2135 static CppAD::ErrorHandler errorhandler(cppaderrorcallback);
2136 
2137 /** gets name and version of expression interpreter */
2138 const char* SCIPexprintGetName(void)
2139 {
2140  return CPPAD_PACKAGE_STRING;
2141 }
2142 
2143 /** gets descriptive text of expression interpreter */
2144 const char* SCIPexprintGetDesc(void)
2145 {
2146  return "Algorithmic Differentiation of C++ algorithms developed by B. Bell (www.coin-or.org/CppAD)";
2147 }
2148 
2149 /** gets capabilities of expression interpreter (using bitflags) */
2151  void
2152  )
2153 {
2157 }
2158 
2159 /** creates an expression interpreter object */
2161  BMS_BLKMEM* blkmem, /**< block memory data structure */
2162  SCIP_EXPRINT** exprint /**< buffer to store pointer to expression interpreter */
2163  )
2164 {
2165  assert(blkmem != NULL);
2166  assert(exprint != NULL);
2167 
2168  if( BMSallocBlockMemory(blkmem, exprint) == NULL )
2169  return SCIP_NOMEMORY;
2170 
2171  (*exprint)->blkmem = blkmem;
2172 
2173  return SCIP_OKAY;
2174 }
2175 
2176 /** frees an expression interpreter object */
2178  SCIP_EXPRINT** exprint /**< expression interpreter that should be freed */
2179  )
2180 {
2181  assert( exprint != NULL);
2182  assert(*exprint != NULL);
2183 
2184  BMSfreeBlockMemory((*exprint)->blkmem, exprint);
2185 
2186  return SCIP_OKAY;
2187 }
2188 
2189 /** compiles an expression tree and stores compiled data in expression tree */
2191  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2192  SCIP_EXPRTREE* tree /**< expression tree */
2193  )
2194 { /*lint --e{429} */
2195  assert(tree != NULL);
2196 
2198  if (!data)
2199  {
2200  data = new SCIP_EXPRINTDATA();
2201  assert( data != NULL );
2202  SCIPexprtreeSetInterpreterData(tree, data);
2203  SCIPdebugMessage("set interpreter data in tree %p to %p\n", (void*)tree, (void*)data);
2204  }
2205  else
2206  {
2207  data->need_retape = true;
2208  data->int_need_retape = true;
2209  }
2210 
2211  int n = SCIPexprtreeGetNVars(tree);
2212 
2213  data->X.resize(n);
2214  data->x.resize(n);
2215  data->Y.resize(1);
2216 
2217  data->int_X.resize(n);
2218  data->int_x.resize(n);
2219  data->int_Y.resize(1);
2220 
2221  if( data->root != NULL )
2222  {
2223  SCIPexprFreeDeep(exprint->blkmem, &data->root);
2224  }
2225 
2226  SCIP_EXPR* root = SCIPexprtreeGetRoot(tree);
2227 
2228  SCIP_CALL( SCIPexprCopyDeep(exprint->blkmem, &data->root, root) );
2229 
2230  data->blkmem = exprint->blkmem;
2231 
2232  analyzeTree(data, data->root);
2233 
2234  return SCIP_OKAY;
2235 }
2236 
2237 
2238 /** gives the capability to evaluate an expression by the expression interpreter
2239  *
2240  * In cases of user-given expressions, higher order derivatives may not be available for the user-expression,
2241  * even if the expression interpreter could handle these. This method allows to recognize that, e.g., the
2242  * Hessian for an expression is not available because it contains a user expression that does not provide
2243  * Hessians.
2244  */
2246  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2247  SCIP_EXPRTREE* tree /**< expression tree */
2248  )
2249 {
2250  assert(tree != NULL);
2251 
2253  assert(data != NULL);
2254 
2255  return data->userevalcapability;
2256 }/*lint !e715*/
2257 
2258 /** frees interpreter data */
2260  SCIP_EXPRINTDATA** interpreterdata /**< interpreter data that should freed */
2261  )
2262 {
2263  assert( interpreterdata != NULL);
2264  assert(*interpreterdata != NULL);
2265 
2266  if( (*interpreterdata)->root != NULL )
2267  SCIPexprFreeDeep((*interpreterdata)->blkmem, &(*interpreterdata)->root);
2268 
2269  delete *interpreterdata;
2270  *interpreterdata = NULL;
2271 
2272  return SCIP_OKAY;
2273 }
2274 
2275 /** notify expression interpreter that a new parameterization is used
2276  *
2277  * This probably causes retaping by AD algorithms.
2278  */
2280  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2281  SCIP_EXPRTREE* tree /**< expression tree */
2282  )
2283 {
2284  assert(exprint != NULL);
2285  assert(tree != NULL);
2286 
2288  if( data != NULL )
2289  {
2290  data->need_retape = true;
2291  data->int_need_retape = true;
2292  }
2293 
2294  return SCIP_OKAY;
2295 }
2296 
2297 /** evaluates an expression tree */
2299  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2300  SCIP_EXPRTREE* tree, /**< expression tree */
2301  SCIP_Real* varvals, /**< values of variables */
2302  SCIP_Real* val /**< buffer to store value */
2303  )
2304 {
2305  SCIP_EXPRINTDATA* data;
2306 
2307  assert(exprint != NULL);
2308  assert(tree != NULL);
2309  assert(varvals != NULL);
2310  assert(val != NULL);
2311 
2312  data = SCIPexprtreeGetInterpreterData(tree);
2313  assert(data != NULL);
2314  assert(SCIPexprtreeGetNVars(tree) == (int)data->X.size());
2315  assert(SCIPexprtreeGetRoot(tree) != NULL);
2316 
2317  int n = SCIPexprtreeGetNVars(tree);
2318 
2319  if( n == 0 )
2320  {
2321  SCIP_CALL( SCIPexprtreeEval(tree, NULL, val) );
2322  return SCIP_OKAY;
2323  }
2324 
2325  if( data->need_retape_always || data->need_retape )
2326  {
2327  for( int i = 0; i < n; ++i )
2328  {
2329  data->X[i] = varvals[i];
2330  data->x[i] = varvals[i];
2331  }
2332 
2333  CppAD::Independent(data->X);
2334 
2335  if( data->root != NULL )
2336  SCIP_CALL( eval(data->root, data->X, SCIPexprtreeGetParamVals(tree), data->Y[0]) );
2337  else
2338  data->Y[0] = 0.0;
2339 
2340  data->f.Dependent(data->X, data->Y);
2341 
2342  data->val = Value(data->Y[0]);
2343  SCIPdebugMessage("Eval retaped and computed value %g\n", data->val);
2344 
2345  // the following is required if the gradient shall be computed by a reverse sweep later
2346  // data->val = data->f.Forward(0, data->x)[0];
2347 
2348  data->need_retape = false;
2349  }
2350  else
2351  {
2352  assert((int)data->x.size() >= n);
2353  for( int i = 0; i < n; ++i )
2354  data->x[i] = varvals[i];
2355 
2356  data->val = data->f.Forward(0, data->x)[0]; /*lint !e1793*/
2357  SCIPdebugMessage("Eval used forward sweep to compute value %g\n", data->val);
2358  }
2359 
2360  *val = data->val;
2361 
2362  return SCIP_OKAY;
2363 }
2364 
2365 /** evaluates an expression tree on intervals */
2367  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2368  SCIP_EXPRTREE* tree, /**< expression tree */
2369  SCIP_Real infinity, /**< value for infinity */
2370  SCIP_INTERVAL* varvals, /**< interval values of variables */
2371  SCIP_INTERVAL* val /**< buffer to store interval value of expression */
2372  )
2373 {
2374  SCIP_EXPRINTDATA* data;
2375 
2376  assert(exprint != NULL);
2377  assert(tree != NULL);
2378  assert(varvals != NULL);
2379  assert(val != NULL);
2380 
2381  data = SCIPexprtreeGetInterpreterData(tree);
2382  assert(data != NULL);
2383  assert(SCIPexprtreeGetNVars(tree) == (int)data->int_X.size());
2384  assert(SCIPexprtreeGetRoot(tree) != NULL);
2385 
2386  int n = SCIPexprtreeGetNVars(tree);
2387 
2388  if( n == 0 )
2389  {
2390  SCIP_CALL( SCIPexprtreeEvalInt(tree, infinity, NULL, val) );
2391  return SCIP_OKAY;
2392  }
2393 
2395 
2396  if( data->int_need_retape || data->need_retape_always )
2397  {
2398  for( int i = 0; i < n; ++i )
2399  {
2400  data->int_X[i] = varvals[i];
2401  data->int_x[i] = varvals[i];
2402  }
2403 
2404  CppAD::Independent(data->int_X);
2405 
2406  if( data->root != NULL )
2407  SCIP_CALL( eval(data->root, data->int_X, SCIPexprtreeGetParamVals(tree), data->int_Y[0]) );
2408  else
2409  data->int_Y[0] = 0.0;
2410 
2411  data->int_f.Dependent(data->int_X, data->int_Y);
2412 
2413  data->int_val = Value(data->int_Y[0]);
2414 
2415  data->int_need_retape = false;
2416  }
2417  else
2418  {
2419  assert((int)data->int_x.size() >= n);
2420  for( int i = 0; i < n; ++i )
2421  data->int_x[i] = varvals[i];
2422 
2423  data->int_val = data->int_f.Forward(0, data->int_x)[0]; /*lint !e1793*/
2424  }
2425 
2426  *val = data->int_val;
2427 
2428  return SCIP_OKAY;
2429 }
2430 
2431 /** computes value and gradient of an expression tree */
2433  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2434  SCIP_EXPRTREE* tree, /**< expression tree */
2435  SCIP_Real* varvals, /**< values of variables, can be NULL if new_varvals is FALSE */
2436  SCIP_Bool new_varvals, /**< have variable values changed since last call to a point evaluation routine? */
2437  SCIP_Real* val, /**< buffer to store expression value */
2438  SCIP_Real* gradient /**< buffer to store expression gradient, need to have length at least SCIPexprtreeGetNVars(tree) */
2439  )
2440 {
2441  assert(exprint != NULL);
2442  assert(tree != NULL);
2443  assert(varvals != NULL || new_varvals == FALSE);
2444  assert(val != NULL);
2445  assert(gradient != NULL);
2446 
2448  assert(data != NULL);
2449 
2450  if( new_varvals )
2451  {
2452  SCIP_CALL( SCIPexprintEval(exprint, tree, varvals, val) );
2453  }
2454  else
2455  *val = data->val;
2456 
2457  int n = SCIPexprtreeGetNVars(tree);
2458 
2459  if( n == 0 )
2460  return SCIP_OKAY;
2461 
2462  vector<double> jac(data->f.Jacobian(data->x));
2463 
2464  for( int i = 0; i < n; ++i )
2465  gradient[i] = jac[i];
2466 
2467 /* disable debug output since we have no message handler here
2468 #ifdef SCIP_DEBUG
2469  SCIPdebugMessage("Grad for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2470  SCIPdebugMessage("x ="); for (int i = 0; i < n; ++i) printf("\t %g", data->x[i]); printf("\n");
2471  SCIPdebugMessage("grad ="); for (int i = 0; i < n; ++i) printf("\t %g", gradient[i]); printf("\n");
2472 #endif
2473 */
2474 
2475  return SCIP_OKAY;
2476 }
2477 
2478 /** computes interval value and interval gradient of an expression tree */
2480  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2481  SCIP_EXPRTREE* tree, /**< expression tree */
2482  SCIP_Real infinity, /**< value for infinity */
2483  SCIP_INTERVAL* varvals, /**< interval values of variables, can be NULL if new_varvals is FALSE */
2484  SCIP_Bool new_varvals, /**< have variable interval values changed since last call to an interval evaluation routine? */
2485  SCIP_INTERVAL* val, /**< buffer to store expression interval value */
2486  SCIP_INTERVAL* gradient /**< buffer to store expression interval gradient, need to have length at least SCIPexprtreeGetNVars(tree) */
2487  )
2488 {
2489  assert(exprint != NULL);
2490  assert(tree != NULL);
2491  assert(varvals != NULL || new_varvals == FALSE);
2492  assert(val != NULL);
2493  assert(gradient != NULL);
2494 
2496  assert(data != NULL);
2497 
2498  if (new_varvals)
2499  SCIP_CALL( SCIPexprintEvalInt(exprint, tree, infinity, varvals, val) );
2500  else
2501  *val = data->int_val;
2502 
2503  int n = SCIPexprtreeGetNVars(tree);
2504 
2505  if( n == 0 )
2506  return SCIP_OKAY;
2507 
2508  vector<SCIPInterval> jac(data->int_f.Jacobian(data->int_x));
2509 
2510  for (int i = 0; i < n; ++i)
2511  gradient[i] = jac[i];
2512 
2513 /* disable debug output since we have no message handler here
2514 #ifdef SCIP_DEBUG
2515  SCIPdebugMessage("GradInt for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2516  SCIPdebugMessage("x ="); for (int i = 0; i < n; ++i) printf("\t [%g,%g]", SCIPintervalGetInf(data->int_x[i]), SCIPintervalGetSup(data->int_x[i])); printf("\n");
2517  SCIPdebugMessage("grad ="); for (int i = 0; i < n; ++i) printf("\t [%g,%g]", SCIPintervalGetInf(gradient[i]), SCIPintervalGetSup(gradient[i])); printf("\n");
2518 #endif
2519 */
2520 
2521  return SCIP_OKAY;
2522 }
2523 
2524 /** gives sparsity pattern of hessian
2525  *
2526  * NOTE: this function might be replaced later by something nicer.
2527  * Since the AD code might need to do a forward sweep, you should pass variable values in here.
2528  */
2530  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2531  SCIP_EXPRTREE* tree, /**< expression tree */
2532  SCIP_Real* varvals, /**< values of variables */
2533  SCIP_Bool* sparsity /**< buffer to store sparsity pattern of Hessian, sparsity[i+n*j] indicates whether entry (i,j) is nonzero in the hessian */
2534  )
2535 {
2536  assert(exprint != NULL);
2537  assert(tree != NULL);
2538  assert(varvals != NULL);
2539  assert(sparsity != NULL);
2540 
2542  assert(data != NULL);
2543 
2544  int n = SCIPexprtreeGetNVars(tree);
2545  if( n == 0 )
2546  return SCIP_OKAY;
2547 
2548  int nn = n*n;
2549 
2550  if( data->need_retape_always )
2551  {
2552  // @todo can we do something better here, e.g., by looking at the expression tree by ourself?
2553 
2554  for( int i = 0; i < nn; ++i )
2555  sparsity[i] = TRUE;
2556 
2557 /* disable debug output since we have no message handler here
2558 #ifdef SCIP_DEBUG
2559  SCIPdebugMessage("HessianSparsityDense for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2560  SCIPdebugMessage("sparsity = all elements, due to discontinuouities\n");
2561 #endif
2562 */
2563 
2564  return SCIP_OKAY;
2565  }
2566 
2567  if( data->need_retape )
2568  {
2569  SCIP_Real val;
2570  SCIP_CALL( SCIPexprintEval(exprint, tree, varvals, &val) );
2571  } /*lint !e438*/
2572 
2573  SCIPdebugMessage("calling ForSparseJac\n");
2574 
2575  vector<bool> r(nn, false);
2576  for (int i = 0; i < n; ++i)
2577  r[i*n+i] = true; /*lint !e647 !e1793*/
2578  (void) data->f.ForSparseJac(n, r); // need to compute sparsity for Jacobian first
2579 
2580  SCIPdebugMessage("calling RevSparseHes\n");
2581 
2582  vector<bool> s(1, true);
2583  vector<bool> sparsehes(data->f.RevSparseHes(n, s));
2584 
2585  for( int i = 0; i < nn; ++i )
2586  sparsity[i] = (SCIP_Bool)sparsehes[i];
2587 
2588 /* disable debug output since we have no message handler here
2589 #ifdef SCIP_DEBUG
2590  SCIPdebugMessage("HessianSparsityDense for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2591  SCIPdebugMessage("sparsity ="); for (int i = 0; i < n; ++i) for (int j = 0; j < n; ++j) if (sparsity[i*n+j]) printf(" (%d,%d)", i, j); printf("\n");
2592 #endif
2593 */
2594 
2595  return SCIP_OKAY;
2596 }
2597 
2598 /** computes value and dense hessian of an expression tree
2599  *
2600  * The full hessian is computed (lower left and upper right triangle).
2601  */
2603  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2604  SCIP_EXPRTREE* tree, /**< expression tree */
2605  SCIP_Real* varvals, /**< values of variables, can be NULL if new_varvals is FALSE */
2606  SCIP_Bool new_varvals, /**< have variable values changed since last call to an evaluation routine? */
2607  SCIP_Real* val, /**< buffer to store function value */
2608  SCIP_Real* hessian /**< buffer to store hessian values, need to have size at least n*n */
2609  )
2610 {
2611  assert(exprint != NULL);
2612  assert(tree != NULL);
2613  assert(varvals != NULL || new_varvals == FALSE);
2614  assert(val != NULL);
2615  assert(hessian != NULL);
2616 
2618  assert(data != NULL);
2619 
2620  if( new_varvals )
2621  {
2622  SCIP_CALL( SCIPexprintEval(exprint, tree, varvals, val) );
2623  }
2624  else
2625  *val = data->val;
2626 
2627  int n = SCIPexprtreeGetNVars(tree);
2628 
2629  if( n == 0 )
2630  return SCIP_OKAY;
2631 
2632 #if 1
2633  /* this one uses reverse mode */
2634  vector<double> hess(data->f.Hessian(data->x, 0));
2635 
2636  int nn = n*n;
2637  for (int i = 0; i < nn; ++i)
2638  hessian[i] = hess[i];
2639 
2640 #else
2641  /* this one uses forward mode */
2642  for( int i = 0; i < n; ++i )
2643  for( int j = 0; j < n; ++j )
2644  {
2645  vector<int> ii(1,i);
2646  vector<int> jj(1,j);
2647  hessian[i*n+j] = data->f.ForTwo(data->x, ii, jj)[0];
2648  }
2649 #endif
2650 
2651 /* disable debug output since we have no message handler here
2652 #ifdef SCIP_DEBUG
2653  SCIPdebugMessage("HessianDense for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2654  SCIPdebugMessage("x ="); for (int i = 0; i < n; ++i) printf("\t %g", data->x[i]); printf("\n");
2655  SCIPdebugMessage("hess ="); for (int i = 0; i < n*n; ++i) printf("\t %g", hessian[i]); printf("\n");
2656 #endif
2657 */
2658  return SCIP_OKAY;
2659 }
void SCIPexprFreeDeep(BMS_BLKMEM *blkmem, SCIP_EXPR **expr)
Definition: expr.c:6187
SCIPInterval square(const SCIPInterval &x)
bool LessThanZero(const SCIPInterval &x)
static void evalMax(Type &resultant, const Type &arg1, const Type &arg2)
bool IdenticalEqualPar(const SCIPInterval &x, const SCIPInterval &y)
SCIP_RETCODE SCIPexprintNewParametrization(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree)
static SCIP_RETCODE eval(SCIP_EXPR *expr, const vector< Type > &x, SCIP_Real *param, Type &val)
SCIP_RETCODE SCIPexprtreeEvalInt(SCIP_EXPRTREE *tree, SCIP_Real infinity, SCIP_INTERVAL *varvals, SCIP_INTERVAL *val)
Definition: expr.c:8741
#define BMSfreeMemoryArrayNull(ptr)
Definition: memory.h:140
methods to interpret (evaluate) an expression tree "fast"
int * SCIPexprGetMonomialChildIndices(SCIP_EXPRDATA_MONOMIAL *monomial)
Definition: expr.c:5924
static size_t thread_num(void)
static void evalUser(Type &resultant, const Type *args, SCIP_EXPR *expr)
SCIP_EXPROP SCIPexprGetOperator(SCIP_EXPR *expr)
Definition: expr.c:5697
#define infinity
Definition: gastrans.c:71
static void evalSignPower(Type &resultant, const Type &arg, SCIP_EXPR *expr)
SCIPInterval pow(const SCIPInterval &x, const SCIPInterval &y)
SCIP_Real * SCIPexprtreeGetParamVals(SCIP_EXPRTREE *tree)
Definition: expr.c:8634
#define SCIP_EXPORT
Definition: def.h:100
#define SCIP_EXPRINTCAPABILITY_INTGRADIENT
SCIP_EXPRINTCAPABILITY SCIPexprintGetExprtreeCapability(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree)
SCIP_Real SCIPexprGetRealPowerExponent(SCIP_EXPR *expr)
Definition: expr.c:5760
int SCIPexprGetOpIndex(SCIP_EXPR *expr)
Definition: expr.c:5727
SCIPInterval cos(const SCIPInterval &x)
SCIP_Real SCIPexprGetPolynomialConstant(SCIP_EXPR *expr)
Definition: expr.c:5892
#define FALSE
Definition: def.h:73
#define TRUE
Definition: def.h:72
enum SCIP_Retcode SCIP_RETCODE
Definition: type_retcode.h:54
SCIP_RETCODE SCIPexprintGrad(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Bool new_varvals, SCIP_Real *val, SCIP_Real *gradient)
SCIPInterval exp(const SCIPInterval &x)
static void evalAbs(Type &resultant, const Type &arg)
#define BMSallocMemoryArray(ptr, num)
Definition: memory.h:115
SCIP_RETCODE SCIPexprintEvalInt(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real infinity, SCIP_INTERVAL *varvals, SCIP_INTERVAL *val)
#define CPPAD_MAX_NUM_THREADS
#define SCIPdebugMessage
Definition: pub_message.h:87
static void analyzeTree(SCIP_EXPRINTDATA *data, SCIP_EXPR *expr)
unsigned int SCIP_EXPRINTCAPABILITY
static void evalMin(Type &resultant, const Type &arg1, const Type &arg2)
SCIP_VAR ** x
Definition: circlepacking.c:54
static thread_local int thread_number
SCIPInterval abs(const SCIPInterval &x)
SCIP_EXPRDATA_MONOMIAL ** SCIPexprGetMonomials(SCIP_EXPR *expr)
Definition: expr.c:5868
public methods for expressions, expression trees, expression graphs, and related stuff ...
int SCIPexprGetMonomialNFactors(SCIP_EXPRDATA_MONOMIAL *monomial)
Definition: expr.c:5914
int SCIPexprGetIntPowerExponent(SCIP_EXPR *expr)
Definition: expr.c:5771
#define SCIP_EXPRINTCAPABILITY_INTFUNCVALUE
SCIPInterval CondExpOp(enum CppAD::CompareOp cop, const SCIPInterval &left, const SCIPInterval &right, const SCIPInterval &trueCase, const SCIPInterval &falseCase)
SCIPInterval signpow(const SCIPInterval &x, const SCIP_Real p)
const char * SCIPexprintGetName(void)
SCIP_Real coef
Definition: type_expr.h:104
SCIP_Real inf
Definition: intervalarith.h:39
static bool univariate_for_sparse_jac(size_t q, const CppAD::vector< bool > &r, CppAD::vector< bool > &s)
bool IdenticalOne(const SCIPInterval &x)
static std::atomic_size_t ncurthreads
SCIP_Real * SCIPexprGetQuadLinearCoefs(SCIP_EXPR *expr)
Definition: expr.c:5844
static void cppaderrorcallback(bool known, int line, const char *file, const char *cond, const char *msg)
SCIP_RETCODE SCIPexprCopyDeep(BMS_BLKMEM *blkmem, SCIP_EXPR **targetexpr, SCIP_EXPR *sourceexpr)
Definition: expr.c:6145
SCIPInterval sqrt(const SCIPInterval &x)
SCIPInterval sign(const SCIPInterval &x)
SCIP_Real SCIPexprGetQuadConstant(SCIP_EXPR *expr)
Definition: expr.c:5831
#define SIGN(x)
SCIP_RETCODE SCIPexprintFree(SCIP_EXPRINT **exprint)
#define NULL
Definition: lpi_spx1.cpp:155
#define REALABS(x)
Definition: def.h:187
int SCIPexprtreeGetNVars(SCIP_EXPRTREE *tree)
Definition: expr.c:8614
#define SCIP_CALL(x)
Definition: def.h:370
SCIP_RETCODE SCIPexprEvalUser(SCIP_EXPR *expr, SCIP_Real *argvals, SCIP_Real *val, SCIP_Real *gradient, SCIP_Real *hessian)
Definition: expr.c:7973
SCIP_Real sup
Definition: intervalarith.h:40
static char init_parallel_return
SCIP_RETCODE exprEvalUser(SCIP_EXPR *expr, Type *x, Type &funcval, Type *gradient, Type *hessian)
SCIP_RETCODE SCIPexprintCreate(BMS_BLKMEM *blkmem, SCIP_EXPRINT **exprint)
SCIP_EXPR * SCIPexprtreeGetRoot(SCIP_EXPRTREE *tree)
Definition: expr.c:8604
static void evalSqrt(Type &resultant, const Type &arg)
#define BMSfreeBlockMemory(mem, ptr)
Definition: memory.h:456
SCIP_EXPRINTCAPABILITY SCIPexprGetUserEvalCapability(SCIP_EXPR *expr)
Definition: expr.c:5966
#define SCIP_EXPRINTCAPABILITY_ALL
#define SCIP_EXPRINTCAPABILITY_HESSIAN
SCIP_EXPR ** SCIPexprGetChildren(SCIP_EXPR *expr)
Definition: expr.c:5717
SCIP_Real SCIPexprGetSignPowerExponent(SCIP_EXPR *expr)
Definition: expr.c:5782
bool IdenticalZero(const SCIPInterval &x)
SCIP_RETCODE SCIPexprintEval(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Real *val)
#define SCIP_Bool
Definition: def.h:70
SCIPInterval azmul(const SCIPInterval &x, const SCIPInterval &y)
SCIPInterval sin(const SCIPInterval &x)
void SCIPexprSortQuadElems(SCIP_EXPR *expr)
Definition: expr.c:6624
SCIP_RETCODE SCIPexprintFreeData(SCIP_EXPRINTDATA **interpreterdata)
SCIP_EXPRINTDATA * SCIPexprtreeGetInterpreterData(SCIP_EXPRTREE *tree)
Definition: expr.c:8659
unsigned short Type
Definition: cons_xor.c:121
#define MAX(x, y)
Definition: tclique_def.h:83
SCIP_EXPRINTCAPABILITY SCIPexprintGetCapability(void)
int SCIPexprGetNChildren(SCIP_EXPR *expr)
Definition: expr.c:5707
static bool univariate_rev_sparse_jac(size_t q, const CppAD::vector< bool > &r, CppAD::vector< bool > &s)
SCIPInterval log(const SCIPInterval &x)
SCIP_RETCODE SCIPexprintHessianDense(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Bool new_varvals, SCIP_Real *val, SCIP_Real *hessian)
SCIP_Real SCIPexprGetMonomialCoef(SCIP_EXPRDATA_MONOMIAL *monomial)
Definition: expr.c:5904
static CppAD::ErrorHandler errorhandler(cppaderrorcallback)
static void evalIntPower(Type &resultant, const Type &arg, const int exponent)
bool GreaterThanZero(const SCIPInterval &x)
SCIP_RETCODE SCIPexprEvalIntUser(SCIP_EXPR *expr, SCIP_Real infinity, SCIP_INTERVAL *argvals, SCIP_INTERVAL *val, SCIP_INTERVAL *gradient, SCIP_INTERVAL *hessian)
Definition: expr.c:7996
SCIP_Real * r
Definition: circlepacking.c:50
bool IdenticalPar(const SCIPInterval &x)
#define SCIP_DEFAULT_INFINITY
Definition: def.h:168
#define SCIP_EXPRINTCAPABILITY_FUNCVALUE
SCIP_Real * SCIPexprGetLinearCoefs(SCIP_EXPR *expr)
Definition: expr.c:5793
SCIP_Real SCIPexprGetOpReal(SCIP_EXPR *expr)
Definition: expr.c:5738
SCIP_QUADELEM * SCIPexprGetQuadElements(SCIP_EXPR *expr)
Definition: expr.c:5819
std::ostream & operator<<(std::ostream &out, const SCIP_INTERVAL &x)
int SCIPexprGetNMonomials(SCIP_EXPR *expr)
Definition: expr.c:5880
int Integer(const SCIPInterval &x)
#define SCIP_Real
Definition: def.h:163
static bool in_parallel(void)
SCIP_VAR ** y
Definition: circlepacking.c:55
SCIP_RETCODE SCIPexprintGradInt(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real infinity, SCIP_INTERVAL *varvals, SCIP_Bool new_varvals, SCIP_INTERVAL *val, SCIP_INTERVAL *gradient)
SCIP_RETCODE SCIPexprintHessianSparsityDense(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Bool *sparsity)
SCIP_EXPORT char SCIPexprintCppADInitParallel(void)
SCIP_Real * SCIPexprGetMonomialExponents(SCIP_EXPRDATA_MONOMIAL *monomial)
Definition: expr.c:5934
bool LessThanOrZero(const SCIPInterval &x)
const char * SCIPexprintGetDesc(void)
static bool univariate_rev_sparse_hes(const CppAD::vector< bool > &vx, const CppAD::vector< bool > &s, CppAD::vector< bool > &t, size_t q, const CppAD::vector< bool > &r, const CppAD::vector< bool > &u, CppAD::vector< bool > &v)
#define BMSallocBlockMemory(mem, ptr)
Definition: memory.h:443
common defines and data types used in all packages of SCIP
struct BMS_BlkMem BMS_BLKMEM
Definition: memory.h:429
SCIP_Real SCIPexprGetLinearConstant(SCIP_EXPR *expr)
Definition: expr.c:5806
void SCIPexprtreeSetInterpreterData(SCIP_EXPRTREE *tree, SCIP_EXPRINTDATA *interpreterdata)
Definition: expr.c:8669
struct SCIP_ExprIntData SCIP_EXPRINTDATA
int SCIPexprGetNQuadElements(SCIP_EXPR *expr)
Definition: expr.c:5856
bool GreaterThanOrZero(const SCIPInterval &x)
#define SCIP_EXPRINTCAPABILITY_GRADIENT
SCIP_RETCODE SCIPexprintCompile(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree)
SCIP_RETCODE SCIPexprtreeEval(SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Real *val)
Definition: expr.c:8725
static void posintpower(const vector< Type > &in, vector< Type > &out, size_t exponent)
C++ extensions to interval arithmetics for provable bounds.
memory allocation routines