Scippy

SCIP

Solving Constraint Integer Programs

exprinterpret_cppad.cpp
Go to the documentation of this file.
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2 /* */
3 /* This file is part of the program and library */
4 /* SCIP --- Solving Constraint Integer Programs */
5 /* */
6 /* Copyright (C) 2002-2019 Konrad-Zuse-Zentrum */
7 /* fuer Informationstechnik Berlin */
8 /* */
9 /* SCIP is distributed under the terms of the ZIB Academic License. */
10 /* */
11 /* You should have received a copy of the ZIB Academic License */
12 /* along with SCIP; see the file COPYING. If not visit scip.zib.de. */
13 /* */
14 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
15 
16 /**@file exprinterpret_cppad.cpp
17  * @brief methods to interpret (evaluate) an expression tree "fast" using CppAD
18  * @ingroup EXPRINTS
19  * @author Stefan Vigerske
20  */
21 
22 /*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/
23 
24 #include "scip/def.h"
25 #include "blockmemshell/memory.h"
26 #include "nlpi/pub_expr.h"
27 #include "nlpi/exprinterpret.h"
28 
29 #include <cmath>
30 #include <vector>
31 using std::vector;
32 
33 /* Turn off lint warning "747: Significant prototype coercion" and "732: Loss of sign".
34  * The first warning is generated for expressions like t[0], where t is a vector, since 0 is an integer constant, but a
35  * size_t is expected (usually long unsigned). The second is generated for expressions like t[n], where n is an
36  * integer. Both code pieces are likely to be correct. It seems to be impossible to inhibit these messages for
37  * vector<*>::operator[] only. */
38 /*lint --e{747,732}*/
39 
40 /* Turn off lint info "1702 operator '...' is both an ordinary function 'CppAD::operator...' and a member function 'CppAD::SCIPInterval::operator...'.
41  * However, the functions have different signatures (the CppAD working on double, the SCIPInterval member
42  * function working on SCIPInterval's.
43  */
44 /*lint --e{1702}*/
45 
46 /* defining NO_CPPAD_USER_ATOMIC disables the use of our own implementation of derivaties of power operators
47  * via CppAD's user-atomic function feature
48  * our customized implementation should give better results (tighter intervals) for the interval data type
49  */
50 /* #define NO_CPPAD_USER_ATOMIC */
51 
52 /** sign of a value (-1 or +1)
53  *
54  * 0.0 has sign +1
55  */
56 #define SIGN(x) ((x) >= 0.0 ? 1.0 : -1.0)
57 
58 /* in order to use intervals as operands in CppAD,
59  * we need to include the intervalarithext.h very early and require the interval operations to be in the CppAD namespace */
60 #define SCIPInterval_NAMESPACE CppAD
61 #include "nlpi/intervalarithext.h"
62 
63 namespace CppAD
64 {
66 }
67 using CppAD::SCIPInterval;
68 
69 /* CppAD needs to know a fixed upper bound on the number of threads at compile time.
70  * It is wise to set it to a power of 2, so that if the tape id overflows, it is likely to start at 0 again, which avoids difficult to debug errors.
71  */
72 #ifndef CPPAD_MAX_NUM_THREADS
73 #ifndef NPARASCIP
74 #define CPPAD_MAX_NUM_THREADS 64
75 #else
76 #define CPPAD_MAX_NUM_THREADS 1
77 #endif
78 #endif
79 
80 /* disable -Wshadow warnings for upcoming includes of CppAD if using some old GCC
81  * -Wshadow was too strict with some versions of GCC 4 (https://stackoverflow.com/questions/2958457/gcc-wshadow-is-too-strict)
82  */
83 #ifdef __GNUC__
84 #if __GNUC__ == 4
85 #pragma GCC diagnostic ignored "-Wshadow"
86 #endif
87 #endif
88 
89 #include <cppad/cppad.hpp>
90 #include <cppad/utility/error_handler.hpp>
91 
92 /* CppAD is not thread-safe by itself, but uses some static datastructures
93  * To run it in a multithreading environment, a special CppAD memory allocator that is aware of the multiple threads has to be used.
94  * This allocator requires to know the number of threads and a thread number for each thread.
95  * To implement this, we follow the team_pthread example of CppAD, which uses pthread's thread-specific data management.
96  */
97 #ifndef NPARASCIP
98 
99 #include <atomic>
100 
101 /** currently registered number of threads */
102 static std::atomic_size_t ncurthreads{0};
103 static thread_local int thread_number{-1};
104 
105 /** CppAD callback function that indicates whether we are running in parallel mode */
106 static
107 bool in_parallel(void)
108 {
109  return ncurthreads > 1;
110 }
111 
112 /** CppAD callback function that returns the number of the current thread
113  *
114  * assigns a new number to the thread if new
115  */
116 static
117 size_t thread_num(void)
118 {
119  size_t threadnum;
120 
121  /* if no thread_number for this thread yet, then assign a new thread number to the current thread
122  */
123  if( thread_number == -1 )
124  {
125  thread_number = static_cast<int>(ncurthreads.fetch_add(1, std::memory_order_relaxed));
126  }
127 
128  threadnum = static_cast<size_t>(thread_number);
129 
130  return threadnum;
131 }
132 
133 /** sets up CppAD's datastructures for running in multithreading mode
134  *
135  * It must be called once before multithreading is started.
136  */
137 static
138 char init_parallel(void)
139 {
140  CppAD::thread_alloc::parallel_setup(CPPAD_MAX_NUM_THREADS, in_parallel, thread_num);
141  CppAD::parallel_ad<double>();
142  CppAD::parallel_ad<SCIPInterval>();
143 
144  return 0;
145 }
146 
147 /** a dummy variable that is initialized to the result of init_parallel
148  *
149  * The purpose is to make sure that init_parallel() is called before any multithreading is started.
150  */
151 #if !defined(_MSC_VER)
152 __attribute__ ((unused))
153 #endif
154 static char init_parallel_return = init_parallel();
155 
156 #endif // NPARASCIP
157 
158 /** definition of CondExpOp for SCIPInterval (required by CppAD) */
159 inline
160 SCIPInterval CondExpOp(
161  enum CppAD::CompareOp cop,
162  const SCIPInterval& left,
163  const SCIPInterval& right,
164  const SCIPInterval& trueCase,
165  const SCIPInterval& falseCase)
166 { /*lint --e{715}*/
167  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
168  "SCIPInterval CondExpOp(...)",
169  "Error: cannot use CondExp with an interval type"
170  );
171 
172  return SCIPInterval();
173 }
174 
175 /** another function required by CppAD */
176 inline
178  const SCIPInterval& x /**< operand */
179  )
180 { /*lint --e{715}*/
181  return true;
182 }
183 
184 /** returns whether the interval equals [0,0] */
185 inline
187  const SCIPInterval& x /**< operand */
188  )
189 {
190  return (x == 0.0);
191 }
192 
193 /** returns whether the interval equals [1,1] */
194 inline
196  const SCIPInterval& x /**< operand */
197  )
198 {
199  return (x == 1.0);
200 }
201 
202 /** yet another function that checks whether two intervals are equal */
203 inline
205  const SCIPInterval& x, /**< first operand */
206  const SCIPInterval& y /**< second operand */
207  )
208 {
209  return (x == y);
210 }
211 
212 /** greater than zero not defined for intervals */
213 inline
215  const SCIPInterval& x /**< operand */
216  )
217 { /*lint --e{715}*/
218  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
219  "GreaterThanZero(x)",
220  "Error: cannot use GreaterThanZero with interval"
221  );
222 
223  return false;
224 }
225 
226 /** greater than or equal zero not defined for intervals */
227 inline
229  const SCIPInterval& x /**< operand */
230  )
231 { /*lint --e{715}*/
232  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__ ,
233  "GreaterThanOrZero(x)",
234  "Error: cannot use GreaterThanOrZero with interval"
235  );
236 
237  return false;
238 }
239 
240 /** less than not defined for intervals */
241 inline
243  const SCIPInterval& x /**< operand */
244  )
245 { /*lint --e{715}*/
246  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
247  "LessThanZero(x)",
248  "Error: cannot use LessThanZero with interval"
249  );
250 
251  return false;
252 }
253 
254 /** less than or equal not defined for intervals */
255 inline
257  const SCIPInterval& x /**< operand */
258  )
259 { /*lint --e{715}*/
260  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
261  "LessThanOrZero(x)",
262  "Error: cannot use LessThanOrZero with interval"
263  );
264 
265  return false;
266 }
267 
268 /** conversion to integers not defined for intervals */
269 inline
271  const SCIPInterval& x /**< operand */
272  )
273 { /*lint --e{715}*/
274  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
275  "Integer(x)",
276  "Error: cannot use Integer with interval"
277  );
278 
279  return 0;
280 }
281 
282 /** absolute zero multiplication
283  *
284  * @return [0,0] if first argument is [0,0] independent of whether the second argument is an empty interval or not
285  */
286 inline
287 SCIPInterval azmul(
288  const SCIPInterval& x, /**< first operand */
289  const SCIPInterval& y /**< second operand */
290  )
291 {
292  if( x.inf == 0.0 && x.sup == 0.0 )
293  return SCIPInterval(0.0, 0.0);
294  return x * y;
295 }
296 
297 /** printing of an interval (required by CppAD) */
298 inline
299 std::ostream& operator<<(std::ostream& out, const SCIP_INTERVAL& x)
300 {
301  out << '[' << x.inf << ',' << x.sup << ']';
302  return out;
303 }
304 
305 using CppAD::AD;
306 
307 /** expression interpreter */
309 {
310  BMS_BLKMEM* blkmem; /**< block memory data structure */
311 };
312 
313 /** expression specific interpreter data */
314 struct SCIP_ExprIntData
315 {
316 public:
317  /** constructor */
318  SCIP_ExprIntData()
319  : val(0.0), need_retape(true), int_need_retape(true), need_retape_always(false), userevalcapability(SCIP_EXPRINTCAPABILITY_ALL), blkmem(NULL), root(NULL)
320  { }
321 
322  /** destructor */
323  ~SCIP_ExprIntData()
324  { }/*lint --e{1540}*/
325 
326  vector< AD<double> > X; /**< vector of dependent variables */
327  vector< AD<double> > Y; /**< result vector */
328  CppAD::ADFun<double> f; /**< the function to evaluate as CppAD object */
329 
330  vector<double> x; /**< current values of dependent variables */
331  double val; /**< current function value */
332  bool need_retape; /**< will retaping be required for the next point evaluation? */
333 
334  vector< AD<SCIPInterval> > int_X; /**< interval vector of dependent variables */
335  vector< AD<SCIPInterval> > int_Y; /**< interval result vector */
336  CppAD::ADFun<SCIPInterval> int_f; /**< the function to evaluate on intervals as CppAD object */
337 
338  vector<SCIPInterval> int_x; /**< current interval values of dependent variables */
339  SCIPInterval int_val; /**< current interval function value */
340  bool int_need_retape; /**< will retaping be required for the next interval evaluation? */
341 
342  bool need_retape_always; /**< will retaping be always required? */
343  SCIP_EXPRINTCAPABILITY userevalcapability; /**< (intersection of) capabilities of evaluation rountines of user expressions */
344 
345  BMS_BLKMEM* blkmem; /**< block memory used to allocate expresstion tree */
346  SCIP_EXPR* root; /**< copy of expression tree; @todo we should not need to make a copy */
347 };
348 
349 #ifndef NO_CPPAD_USER_ATOMIC
350 
351 /** computes sparsity of jacobian for a univariate function during a forward sweep
352  *
353  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
354  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
355  */
356 static
358  size_t q, /**< number of columns in R */
359  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
360  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
361  )
362 {
363  assert(r.size() == q);
364  assert(s.size() == q);
365 
366  s = r;
367 
368  return true;
369 }
370 
371 /** Computes sparsity of jacobian during a reverse sweep
372  *
373  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
374  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
375  */
376 static
378  size_t q, /**< number of rows in R */
379  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
380  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
381  )
382 {
383  assert(r.size() == q);
384  assert(s.size() == q);
385 
386  s = r;
387 
388  return true;
389 }
390 
391 /** computes sparsity of hessian during a reverse sweep
392  *
393  * Assume V(x) = (g(f(x)))'' R with f(x) = x^p for a function g:R->R and a matrix R.
394  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
395  */
396 static
398  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
399  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
400  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
401  size_t q, /**< number of columns in R, U, and V */
402  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
403  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
404  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
405  )
406 { /*lint --e{439,715}*/ /* @todo take vx into account */
407  assert(r.size() == q);
408  assert(s.size() == 1);
409  assert(t.size() == 1);
410  assert(u.size() == q);
411  assert(v.size() == q);
412 
413  // T(x) = g'(f(x)) * f'(x) = S * f'(x), and f' is not identically 0
414  t[0] = s[0];
415 
416  // V(x) = g''(f(x)) f'(x) f'(x) R + g'(f(x)) f''(x) R466
417  // = f'(x) U + S f''(x) R, with f'(x) and f''(x) not identically 0
418  v = u;
419  if( s[0] )
420  for( size_t j = 0; j < q; ++j )
421  if( r[j] )
422  v[j] = true;
423 
424  return true;
425 }
426 
427 
428 /** Automatic differentiation of x -> x^p, p>=2 integer, as CppAD user-atomic function.
429  *
430  * This class implements forward and reverse operations for the function x -> x^p for use within CppAD.
431  * While CppAD would implement integer powers as a recursion of multiplications, we still use pow functions as they allow us to avoid overestimation in interval arithmetics.
432  *
433  * @todo treat the exponent as a (variable) argument to the function, with the assumption that we never differentiate w.r.t. it (this should make the approach threadsafe again)
434  */
435 template<class Type>
436 class atomic_posintpower : public CppAD::atomic_base<Type>
437 {
438 public:
440  : CppAD::atomic_base<Type>("posintpower"),
441  exponent(0)
442  {
443  /* indicate that we want to use bool-based sparsity pattern */
444  this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
445  }
446 
447 private:
448  /** exponent value for next call to forward or reverse */
449  int exponent;
450 
451  /** stores exponent value corresponding to next call to forward or reverse
452  *
453  * how is this supposed to be threadsafe? (we use only one global instantiation of this class)
454  * TODO according to the CppAD 2018 docu, using this function is deprecated; what is the modern way to do this?
455  */
456  virtual void set_old(size_t id)
457  {
458  exponent = (int) id;
459  }
460 
461  /** forward sweep of positive integer power
462  *
463  * Given the taylor coefficients for x, we have to compute the taylor coefficients for f(x),
464  * that is, given tx = (x, x', x'', ...), we compute the coefficients ty = (y, y', y'', ...)
465  * in the taylor expansion of f(x) = x^p.
466  * Thus, y = x^p
467  * = tx[0]^p,
468  * y' = p * x^(p-1) * x'
469  * = p * tx[0]^(p-1) * tx[1],
470  * y'' = 1/2 * p * (p-1) * x^(p-2) * x'^2 + p * x^(p-1) * x''
471  * = 1/2 * p * (p-1) * tx[0]^(p-2) * tx[1]^2 + p * tx[0]^(p-1) * tx[2]
472  */
473  bool forward(
474  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
475  size_t p, /**< highest order Taylor coefficient that we are evaluating */
476  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
477  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
478  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
479  CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
480  )
481  {
482  assert(exponent > 1);
483  assert(tx.size() >= p+1);
484  assert(ty.size() >= p+1);
485  assert(q <= p);
486 
487  if( vx.size() > 0 )
488  {
489  assert(vx.size() == 1);
490  assert(vy.size() == 1);
491  assert(p == 0);
492 
493  vy[0] = vx[0];
494  }
495 
496  if( q == 0 /* q <= 0 && 0 <= p */ )
497  {
498  ty[0] = CppAD::pow(tx[0], exponent);
499  }
500 
501  if( q <= 1 && 1 <= p )
502  {
503  ty[1] = CppAD::pow(tx[0], exponent-1) * tx[1];
504  ty[1] *= double(exponent);
505  }
506 
507  if( q <= 2 && 2 <= p )
508  {
509  if( exponent > 2 )
510  {
511  // ty[2] = 1/2 * exponent * (exponent-1) * pow(tx[0], exponent-2) * tx[1] * tx[1] + exponent * pow(tx[0], exponent-1) * tx[2];
512  ty[2] = CppAD::pow(tx[0], exponent-2) * tx[1] * tx[1];
513  ty[2] *= (exponent-1) / 2.0;
514  ty[2] += CppAD::pow(tx[0], exponent-1) * tx[2];
515  ty[2] *= exponent;
516  }
517  else
518  {
519  assert(exponent == 2);
520  // ty[2] = 1/2 * exponent * tx[1] * tx[1] + exponent * tx[0] * tx[2];
521  ty[2] = tx[1] * tx[1] + 2.0 * tx[0] * tx[2];
522  }
523  }
524 
525  /* higher order derivatives not implemented */
526  if( p > 2 )
527  return false;
528 
529  return true;
530  }
531 
532  /** reverse sweep of positive integer power
533  *
534  * Assume y(x) is a function of the taylor coefficients of f(x) = x^p for x, i.e.,
535  * y(x) = [ x^p, p * x^(p-1) * x', p * (p-1) * x^(p-2) * x'^2 + p * x^(p-1) * x'', ... ].
536  * Then in the reverse sweep we have to compute the elements of \f$\partial h / \partial x^[l], l = 0, ..., k,\f$
537  * where x^[l] is the l'th taylor coefficient (x, x', x'', ...) and h(x) = g(y(x)) for some function g:R^k -> R.
538  * That is, we have to compute
539  *\f$
540  * px[l] = \partial h / \partial x^[l] = (\partial g / \partial y) * (\partial y / \partial x^[l])
541  * = \sum_{i=0}^k (\partial g / \partial y_i) * (\partial y_i / \partial x^[l])
542  * = \sum_{i=0}^k py[i] * (\partial y_i / \partial x^[l])
543  * \f$
544  *
545  * For k = 0, this means
546  *\f$
547  * px[0] = py[0] * (\partial y_0 / \partial x^[0])
548  * = py[0] * (\partial x^p / \partial x)
549  * = py[0] * p * tx[0]^(p-1)
550  *\f$
551  *
552  * For k = 1, this means
553  * \f$
554  * px[0] = py[0] * (\partial y_0 / \partial x^[0]) + py[1] * (\partial y_1 / \partial x^[0])
555  * = py[0] * (\partial x^p / \partial x) + py[1] * (\partial (p * x^(p-1) * x') / \partial x)
556  * = py[0] * p * tx[0]^(p-1) + py[1] * p * (p-1) * tx[0]^(p-2) * tx[1]
557  * px[1] = py[0] * (\partial y_0 / \partial x^[1]) + py[1] * (\partial y_1 / \partial x^[1])
558  * = py[0] * (\partial x^p / \partial x') + py[1] * (\partial (p * x^(p-1) x') / \partial x')
559  * = py[0] * 0 + py[1] * p * tx[0]^(p-1)
560  * \f$
561  */
562  bool reverse(
563  size_t p, /**< highest order Taylor coefficient that we are evaluating */
564  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
565  const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
566  CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
567  const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
568  )
569  { /*lint --e{715}*/
570  assert(exponent > 1);
571  assert(px.size() >= p+1);
572  assert(py.size() >= p+1);
573  assert(tx.size() >= p+1);
574 
575  switch( p )
576  {
577  case 0:
578  // px[0] = py[0] * exponent * pow(tx[0], exponent-1);
579  px[0] = py[0] * CppAD::pow(tx[0], exponent-1);
580  px[0] *= exponent;
581  break;
582 
583  case 1:
584  // px[0] = py[0] * exponent * pow(tx[0], exponent-1) + py[1] * exponent * (exponent-1) * pow(tx[0], exponent-2) * tx[1];
585  px[0] = py[1] * tx[1] * CppAD::pow(tx[0], exponent-2);
586  px[0] *= exponent-1;
587  px[0] += py[0] * CppAD::pow(tx[0], exponent-1);
588  px[0] *= exponent;
589  // px[1] = py[1] * exponent * pow(tx[0], exponent-1);
590  px[1] = py[1] * CppAD::pow(tx[0], exponent-1);
591  px[1] *= exponent;
592  break;
593 
594  default:
595  return false;
596  }
597 
598  return true;
599  }
600 
601  using CppAD::atomic_base<Type>::for_sparse_jac;
602 
603  /** computes sparsity of jacobian during a forward sweep
604  *
605  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
606  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
607  */
608  bool for_sparse_jac(
609  size_t q, /**< number of columns in R */
610  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
611  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
612  )
613  {
614  return univariate_for_sparse_jac(q, r, s);
615  }
616 
617  using CppAD::atomic_base<Type>::rev_sparse_jac;
618 
619  /** computes sparsity of jacobian during a reverse sweep
620  *
621  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
622  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
623  */
624  bool rev_sparse_jac(
625  size_t q, /**< number of rows in R */
626  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
627  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
628  )
629  {
630  return univariate_rev_sparse_jac(q, r, s);
631  }
632 
633  using CppAD::atomic_base<Type>::rev_sparse_hes;
634 
635  /** computes sparsity of hessian during a reverse sweep
636  *
637  * Assume V(x) = (g(f(x)))'' R with f(x) = x^p for a function g:R->R and a matrix R.
638  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
639  */
640  bool rev_sparse_hes(
641  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
642  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
643  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
644  size_t q, /**< number of columns in R, U, and V */
645  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
646  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
647  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
648  )
649  {
650  return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
651  }
652 };
653 
654 /** power function with natural exponents */
655 template<class Type>
656 static
658  const vector<Type>& in, /**< vector which first argument is base */
659  vector<Type>& out, /**< vector where to store result in first argument */
660  size_t exponent /**< exponent */
661  )
662 {
664  pip(in, out, exponent);
665 }
666 
667 #else
668 
669 /** power function with natural exponents */
670 template<class Type>
671 void posintpower(
672  const vector<Type>& in, /**< vector which first argument is base */
673  vector<Type>& out, /**< vector where to store result in first argument */
674  size_t exponent /**< exponent */
675  )
676 {
677  out[0] = pow(in[0], (int)exponent);
678 }
679 
680 #endif
681 
682 
683 #ifndef NO_CPPAD_USER_ATOMIC
684 
685 /** Automatic differentiation of x -> sign(x)abs(x)^p, p>=1, as CppAD user-atomic function.
686  *
687  * This class implements forward and reverse operations for the function x -> sign(x)abs(x)^p for use within CppAD.
688  * While we otherwise would have to use discontinuous sign and abs functions, our own implementation allows to provide
689  * a continuously differentiable function.
690  *
691  * @todo treat the exponent as a (variable) argument to the function, with the assumption that we never differentiate w.r.t. it (this should make the approach threadsafe again)
692  */
693 template<class Type>
694 class atomic_signpower : public CppAD::atomic_base<Type>
695 {
696 public:
698  : CppAD::atomic_base<Type>("signpower"),
699  exponent(0.0)
700  {
701  /* indicate that we want to use bool-based sparsity pattern */
702  this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
703  }
704 
705 private:
706  /** exponent for use in next call to forward or reverse */
707  SCIP_Real exponent;
708 
709  /** stores exponent corresponding to next call to forward or reverse
710  *
711  * How is this supposed to be threadsafe? (we use only one global instantiation of this class)
712  * TODO according to the CppAD 2018 docu, using this function is deprecated; what is the modern way to do this?
713  */
714  virtual void set_old(size_t id)
715  {
716  exponent = SCIPexprGetSignPowerExponent((SCIP_EXPR*)(void*)id);
717  }
718 
719  /** forward sweep of signpower
720  *
721  * Given the taylor coefficients for x, we have to compute the taylor coefficients for f(x),
722  * that is, given tx = (x, x', x'', ...), we compute the coefficients ty = (y, y', y'', ...)
723  * in the taylor expansion of f(x) = sign(x)abs(x)^p.
724  * Thus, y = sign(x)abs(x)^p
725  * = sign(tx[0])abs(tx[0])^p,
726  * y' = p * abs(x)^(p-1) * x'
727  * = p * abs(tx[0])^(p-1) * tx[1],
728  * y'' = 1/2 * p * (p-1) * sign(x) * abs(x)^(p-2) * x'^2 + p * abs(x)^(p-1) * x''
729  * = 1/2 * p * (p-1) * sign(tx[0]) * abs(tx[0])^(p-2) * tx[1]^2 + p * abs(tx[0])^(p-1) * tx[2]
730  */
731  bool forward(
732  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
733  size_t p, /**< highest order Taylor coefficient that we are evaluating */
734  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
735  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
736  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
737  CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
738  )
739  {
740  assert(exponent > 0.0);
741  assert(tx.size() >= p+1);
742  assert(ty.size() >= p+1);
743  assert(q <= p);
744 
745  if( vx.size() > 0 )
746  {
747  assert(vx.size() == 1);
748  assert(vy.size() == 1);
749  assert(p == 0);
750 
751  vy[0] = vx[0];
752  }
753 
754  if( q == 0 /* q <= 0 && 0 <= p */ )
755  {
756  ty[0] = SIGN(tx[0]) * pow(REALABS(tx[0]), exponent);
757  }
758 
759  if( q <= 1 && 1 <= p )
760  {
761  ty[1] = pow(REALABS(tx[0]), exponent - 1.0) * tx[1];
762  ty[1] *= exponent;
763  }
764 
765  if( q <= 2 && 2 <= p )
766  {
767  if( exponent != 2.0 )
768  {
769  ty[2] = SIGN(tx[0]) * pow(REALABS(tx[0]), exponent - 2.0) * tx[1] * tx[1];
770  ty[2] *= (exponent - 1.0) / 2.0;
771  ty[2] += pow(REALABS(tx[0]), exponent - 1.0) * tx[2];
772  ty[2] *= exponent;
773  }
774  else
775  {
776  // y'' = 2 (1/2 * sign(x) * x'^2 + |x|*x'') = sign(tx[0]) * tx[1]^2 + 2 * abs(tx[0]) * tx[2]
777  ty[2] = SIGN(tx[0]) * tx[1] * tx[1];
778  ty[2] += 2.0 * REALABS(tx[0]) * tx[2];
779  }
780  }
781 
782  /* higher order derivatives not implemented */
783  if( p > 2 )
784  return false;
785 
786  return true;
787  }
788 
789  /** reverse sweep of signpower
790  *
791  * Assume y(x) is a function of the taylor coefficients of f(x) = sign(x)|x|^p for x, i.e.,
792  * y(x) = [ f(x), f'(x), f''(x), ... ].
793  * Then in the reverse sweep we have to compute the elements of \f$\partial h / \partial x^[l], l = 0, ..., k,\f$
794  * where x^[l] is the l'th taylor coefficient (x, x', x'', ...) and h(x) = g(y(x)) for some function g:R^k -> R.
795  * That is, we have to compute
796  *\f$
797  * px[l] = \partial h / \partial x^[l] = (\partial g / \partial y) * (\partial y / \partial x^[l])
798  * = \sum_{i=0}^k (\partial g / \partial y_i) * (\partial y_i / \partial x^[l])
799  * = \sum_{i=0}^k py[i] * (\partial y_i / \partial x^[l])
800  *\f$
801  *
802  * For k = 0, this means
803  *\f$
804  * px[0] = py[0] * (\partial y_0 / \partial x^[0])
805  * = py[0] * (\partial f(x) / \partial x)
806  * = py[0] * p * abs(tx[0])^(p-1)
807  * \f$
808  *
809  * For k = 1, this means
810  *\f$
811  * px[0] = py[0] * (\partial y_0 / \partial x^[0]) + py[1] * (\partial y_1 / \partial x^[0])
812  * = py[0] * (\partial f(x) / \partial x) + py[1] * (\partial f'(x) / \partial x)
813  * = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
814  * px[1] = py[0] * (\partial y_0 / \partial x^[1]) + py[1] * (\partial y_1 / \partial x^[1])
815  * = py[0] * (\partial f(x) / \partial x') + py[1] * (\partial f'(x) / \partial x')
816  * = py[0] * 0 + py[1] * p * abs(tx[0])^(p-1)
817  * \f$
818  */
819  bool reverse(
820  size_t p, /**< highest order Taylor coefficient that we are evaluating */
821  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
822  const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
823  CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
824  const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
825  )
826  { /*lint --e{715}*/
827  assert(exponent > 1);
828  assert(px.size() >= p+1);
829  assert(py.size() >= p+1);
830  assert(tx.size() >= p+1);
831 
832  switch( p )
833  {
834  case 0:
835  // px[0] = py[0] * p * pow(abs(tx[0]), p-1);
836  px[0] = py[0] * pow(REALABS(tx[0]), exponent - 1.0);
837  px[0] *= p;
838  break;
839 
840  case 1:
841  if( exponent != 2.0 )
842  {
843  // px[0] = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
844  px[0] = py[1] * tx[1] * pow(REALABS(tx[0]), exponent - 2.0) * SIGN(tx[0]);
845  px[0] *= exponent - 1.0;
846  px[0] += py[0] * pow(REALABS(tx[0]), exponent - 1.0);
847  px[0] *= exponent;
848  // px[1] = py[1] * p * abs(tx[0])^(p-1)
849  px[1] = py[1] * pow(REALABS(tx[0]), exponent - 1.0);
850  px[1] *= exponent;
851  }
852  else
853  {
854  // px[0] = py[0] * 2.0 * abs(tx[0]) + py[1] * 2.0 * sign(tx[0]) * tx[1]
855  px[0] = py[1] * tx[1] * SIGN(tx[0]);
856  px[0] += py[0] * REALABS(tx[0]);
857  px[0] *= 2.0;
858  // px[1] = py[1] * 2.0 * abs(tx[0])
859  px[1] = py[1] * REALABS(tx[0]);
860  px[1] *= 2.0;
861  }
862  break;
863 
864  default:
865  return false;
866  }
867 
868  return true;
869  }
870 
871  using CppAD::atomic_base<Type>::for_sparse_jac;
872 
873  /** computes sparsity of jacobian during a forward sweep
874  *
875  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
876  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
877  */
878  bool for_sparse_jac(
879  size_t q, /**< number of columns in R */
880  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
881  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
882  )
883  {
884  return univariate_for_sparse_jac(q, r, s);
885  }
886 
887  using CppAD::atomic_base<Type>::rev_sparse_jac;
888 
889  /** computes sparsity of jacobian during a reverse sweep
890  *
891  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
892  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
893  */
894  bool rev_sparse_jac(
895  size_t q, /**< number of rows in R */
896  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
897  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
898  )
899  {
900  return univariate_rev_sparse_jac(q, r, s);
901  }
902 
903  using CppAD::atomic_base<Type>::rev_sparse_hes;
904 
905  /** computes sparsity of hessian during a reverse sweep
906  *
907  * Assume V(x) = (g(f(x)))'' R with f(x) = sign(x)abs(x)^p for a function g:R->R and a matrix R.
908  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
909  */
910  bool rev_sparse_hes(
911  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
912  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
913  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
914  size_t q, /**< number of columns in S and R */
915  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
916  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
917  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
918  )
919  {
920  return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
921  }
922 
923 };
924 
925 /** Specialization of atomic_signpower template for intervals */
926 template<>
927 class atomic_signpower<SCIPInterval> : public CppAD::atomic_base<SCIPInterval>
928 {
929 public:
931  : CppAD::atomic_base<SCIPInterval>("signpowerint"),
932  exponent(0.0)
933  {
934  /* indicate that we want to use bool-based sparsity pattern */
935  this->option(CppAD::atomic_base<SCIPInterval>::bool_sparsity_enum);
936  }
937 
938 private:
939  /** exponent for use in next call to forward or reverse */
940  SCIP_Real exponent;
941 
942  /** stores exponent corresponding to next call to forward or reverse
943  *
944  * How is this supposed to be threadsafe? (we use only one global instantiation of this class)
945  * TODO according to the CppAD 2018 docu, using this function is deprecated; what is the modern way to do this?
946  */
947  virtual void set_old(size_t id)
948  {
949  exponent = SCIPexprGetSignPowerExponent((SCIP_EXPR*)(void*)id);
950  }
951 
952  /** specialization of atomic_signpower::forward template for SCIPinterval
953  *
954  * @todo try to compute tighter resultants
955  */
956  bool forward(
957  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
958  size_t p, /**< highest order Taylor coefficient that we are evaluating */
959  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
960  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
961  const CppAD::vector<SCIPInterval>& tx, /**< values for taylor coefficients of x */
962  CppAD::vector<SCIPInterval>& ty /**< vector to store taylor coefficients of y */
963  )
964  {
965  assert(exponent > 0.0);
966  assert(tx.size() >= p+1);
967  assert(ty.size() >= p+1);
968  assert(q <= p);
969 
970  if( vx.size() > 0 )
971  {
972  assert(vx.size() == 1);
973  assert(vy.size() == 1);
974  assert(p == 0);
975 
976  vy[0] = vx[0];
977  }
978 
979  if( q == 0 /* q <= 0 && 0 <= p */ )
980  {
981  ty[0] = CppAD::signpow(tx[0], exponent);
982  }
983 
984  if( q <= 1 && 1 <= p )
985  {
986  ty[1] = CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0) * tx[1];
987  ty[1] *= p;
988  }
989 
990  if( q <= 2 && 2 <= p )
991  {
992  if( exponent != 2.0 )
993  {
994  ty[2] = CppAD::signpow(tx[0], exponent - 2.0) * CppAD::square(tx[1]);
995  ty[2] *= (exponent - 1.0) / 2.0;
996  ty[2] += CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0) * tx[2];
997  ty[2] *= exponent;
998  }
999  else
1000  {
1001  // y'' = 2 (1/2 * sign(x) * x'^2 + |x|*x'') = sign(tx[0]) * tx[1]^2 + 2 * abs(tx[0]) * tx[2]
1002  ty[2] = CppAD::sign(tx[0]) * CppAD::square(tx[1]);
1003  ty[2] += 2.0 * CppAD::abs(tx[0]) * tx[2];
1004  }
1005  }
1006 
1007  /* higher order derivatives not implemented */
1008  if( p > 2 )
1009  return false;
1010 
1011  return true;
1012  }
1013 
1014  /** specialization of atomic_signpower::reverse template for SCIPinterval
1015  *
1016  * @todo try to compute tighter resultants
1017  */
1018  bool reverse(
1019  size_t p, /**< highest order Taylor coefficient that we are evaluating */
1020  const CppAD::vector<SCIPInterval>& tx, /**< values for taylor coefficients of x */
1021  const CppAD::vector<SCIPInterval>& ty, /**< values for taylor coefficients of y */
1022  CppAD::vector<SCIPInterval>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
1023  const CppAD::vector<SCIPInterval>& py /**< values for partial derivatives of g(x) w.r.t. y */
1024  )
1025  { /*lint --e{715} */
1026  assert(exponent > 1);
1027  assert(px.size() >= p+1);
1028  assert(py.size() >= p+1);
1029  assert(tx.size() >= p+1);
1030 
1031  switch( p )
1032  {
1033  case 0:
1034  // px[0] = py[0] * p * pow(abs(tx[0]), p-1);
1035  px[0] = py[0] * CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0);
1036  px[0] *= exponent;
1037  break;
1038 
1039  case 1:
1040  if( exponent != 2.0 )
1041  {
1042  // px[0] = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
1043  px[0] = py[1] * tx[1] * CppAD::signpow(tx[0], exponent - 2.0);
1044  px[0] *= exponent - 1.0;
1045  px[0] += py[0] * CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0);
1046  px[0] *= exponent;
1047  // px[1] = py[1] * p * abs(tx[0])^(p-1)
1048  px[1] = py[1] * CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0);
1049  px[1] *= exponent;
1050  }
1051  else
1052  {
1053  // px[0] = py[0] * 2.0 * abs(tx[0]) + py[1] * 2.0 * sign(tx[0]) * tx[1]
1054  px[0] = py[1] * tx[1] * CppAD::sign(tx[0]);
1055  px[0] += py[0] * CppAD::abs(tx[0]);
1056  px[0] *= 2.0;
1057  // px[1] = py[1] * 2.0 * abs(tx[0])
1058  px[1] = py[1] * CppAD::abs(tx[0]);
1059  px[1] *= 2.0;
1060  }
1061  break;
1062 
1063  default:
1064  return false;
1065  }
1066 
1067  return true;
1068  }
1069 
1070  using CppAD::atomic_base<SCIPInterval>::for_sparse_jac;
1071 
1072  /** computes sparsity of jacobian during a forward sweep
1073  *
1074  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
1075  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
1076  */
1077  bool for_sparse_jac(
1078  size_t q, /**< number of columns in R */
1079  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
1080  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
1081  )
1082  {
1083  return univariate_for_sparse_jac(q, r, s);
1084  }
1085 
1086  using CppAD::atomic_base<SCIPInterval>::rev_sparse_jac;
1087 
1088  /** computes sparsity of jacobian during a reverse sweep
1089  *
1090  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
1091  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
1092  */
1093  bool rev_sparse_jac(
1094  size_t q, /**< number of rows in R */
1095  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
1096  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
1097  )
1098  {
1099  return univariate_rev_sparse_jac(q, r, s);
1100  }
1101 
1102  using CppAD::atomic_base<SCIPInterval>::rev_sparse_hes;
1103 
1104  /** computes sparsity of hessian during a reverse sweep
1105  *
1106  * Assume V(x) = (g(f(x)))'' R with f(x) = sign(x)abs(x)^p for a function g:R->R and a matrix R.
1107  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
1108  */
1109  bool rev_sparse_hes(
1110  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
1111  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
1112  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
1113  size_t q, /**< number of columns in S and R */
1114  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
1115  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
1116  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
1117  )
1118  {
1119  return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
1120  }
1121 };
1122 
1123 /** template for evaluation for signpower operator */
1124 template<class Type>
1125 static
1127  Type& resultant, /**< resultant */
1128  const Type& arg, /**< operand */
1129  SCIP_EXPR* expr /**< expression that holds the exponent */
1130  )
1131 {
1132  vector<Type> in(1, arg);
1133  vector<Type> out(1);
1134 
1136  sp(in, out, (size_t)(void*)expr);
1137 
1138  resultant = out[0];
1139  return;
1140 }
1141 
1142 #else
1143 
1144 /** template for evaluation for signpower operator
1145  *
1146  * Only implemented for real numbers, thus gives error by default.
1147  */
1148 template<class Type>
1149 static
1150 void evalSignPower(
1151  Type& resultant, /**< resultant */
1152  const Type& arg, /**< operand */
1153  SCIP_EXPR* expr /**< expression that holds the exponent */
1154  )
1155 { /*lint --e{715}*/
1156  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1157  "evalSignPower()",
1158  "Error: SignPower not implemented for this value type"
1159  );
1160 }
1161 
1162 /** specialization of signpower evaluation for real numbers */
1163 template<>
1164 void evalSignPower(
1165  CppAD::AD<double>& resultant, /**< resultant */
1166  const CppAD::AD<double>& arg, /**< operand */
1167  SCIP_EXPR* expr /**< expression that holds the exponent */
1168  )
1169 {
1170  SCIP_Real exponent;
1171 
1172  exponent = SCIPexprGetSignPowerExponent(expr);
1173 
1174  if( arg == 0.0 )
1175  resultant = 0.0;
1176  else if( arg > 0.0 )
1177  resultant = pow( arg, exponent);
1178  else
1179  resultant = -pow(-arg, exponent);
1180 }
1181 
1182 #endif
1183 
1184 
1185 #ifndef NO_CPPAD_USER_ATOMIC
1186 
1187 template<class Type>
1189  SCIP_EXPR* expr,
1190  Type* x,
1191  Type& funcval,
1192  Type* gradient,
1193  Type* hessian
1194  )
1195 {
1196  return SCIPexprEvalUser(expr, x, &funcval, gradient, hessian); /*lint !e429*/
1197 }
1198 
1199 template<>
1201  SCIP_EXPR* expr,
1202  SCIPInterval* x,
1203  SCIPInterval& funcval,
1204  SCIPInterval* gradient,
1205  SCIPInterval* hessian
1206  )
1207 {
1208  return SCIPexprEvalIntUser(expr, SCIPInterval::infinity, x, &funcval, gradient, hessian);
1209 }
1210 
1211 /** Automatic differentiation of user expression as CppAD user-atomic function.
1212  *
1213  * This class implements forward and reverse operations for a function given by a user expression for use within CppAD.
1214  */
1215 template<class Type>
1216 class atomic_userexpr : public CppAD::atomic_base<Type>
1217 {
1218 public:
1220  : CppAD::atomic_base<Type>("userexpr"),
1221  expr(NULL)
1222  {
1223  /* indicate that we want to use bool-based sparsity pattern */
1224  this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
1225  }
1226 
1227 private:
1228  /** user expression */
1229  SCIP_EXPR* expr;
1230 
1231  /** stores user expression corresponding to next call to forward or reverse
1232  *
1233  * how is this supposed to be threadsafe? (we use only one global instantiation of this class)
1234  * TODO according to the CppAD 2018 docu, using this function is deprecated; what is the modern way to do this?
1235  */
1236  virtual void set_old(size_t id)
1237  {
1238  expr = (SCIP_EXPR*)(void*)id;
1239  assert(SCIPexprGetOperator(expr) == SCIP_EXPR_USER);
1240  }
1241 
1242  /** forward sweep of userexpr
1243  *
1244  * We follow http://www.coin-or.org/CppAD/Doc/atomic_forward.xml
1245  * Note, that p and q are interchanged!
1246  *
1247  * For a scalar variable t, let
1248  * Y(t) = f(X(t))
1249  * X(t) = x^0 + x^1 t^1 + ... + x^p t^p
1250  * where for x^i the i an index, while for t^i the i is an exponent.
1251  * Thus, x^k = 1/k! X^(k) (0), where X^(k)(.) denotes the k-th derivative.
1252  *
1253  * Next, let y^k = 1/k! Y^(k)(0) be the k'th taylor coefficient of Y. Thus,
1254  * y^0 = Y^(0)(0) = Y(0) = f(X(0)) = f(x^0)
1255  * y^1 = Y^(1)(0) = Y'(0) = f'(X(0)) * X'(0) = f'(x^0) * x^1
1256  * y^2 = 1/2 Y^(2)(0) = 1/2 Y''(0) = 1/2 X'(0) * f''(X(0)) X'(0) + 1/2 * f'(X(0)) * X''(0) = 1/2 x^1 * f''(x^0) * x^1 + f'(x^0) * x^2
1257  *
1258  * As x^k = (tx[k], tx[(p+1)+k], tx[2*(p+1)+k], ..., tx[n*(p+1)+k], we get
1259  * ty[0] = y^0 = f(x^0) = f(tx[{1..n}*(p+1)])
1260  * ty[1] = y^1 = f'(x^0) * tx[{1..n}*(p+1)+1] = sum(i=1..n, grad[i] * tx[i*(p+1)+1]), where grad = f'(x^0)
1261  * ty[2] = 1/2 sum(i,j=1..n, x[i*(p+1)+1] * x[j*(p+1)+q] * hessian[i,j]) + sum(i=1..n, grad[i] * x[i*(p+1)+2])
1262  */
1263  bool forward(
1264  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
1265  size_t p, /**< highest order Taylor coefficient that we are evaluating */
1266  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
1267  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
1268  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
1269  CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
1270  )
1271  {
1272  assert(expr != NULL);
1273  assert(ty.size() == p+1);
1274  assert(q <= p);
1275 
1276  size_t n = tx.size() / (p+1);
1277  assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1278  assert(n >= 1);
1279 
1280  if( vx.size() > 0 )
1281  {
1282  assert(vx.size() == n);
1283  assert(vy.size() == 1);
1284  assert(p == 0);
1285 
1286  /* y_0 is a variable if at least one of the x_i is a variable */
1287  vy[0] = false;
1288  for( size_t i = 0; i < n; ++i )
1289  if( vx[i] )
1290  {
1291  vy[0] = true;
1292  break;
1293  }
1294  }
1295 
1296  Type* x = new Type[n];
1297  Type* gradient = NULL;
1298  Type* hessian = NULL;
1299 
1300  if( q <= 2 && 1 <= p )
1301  gradient = new Type[n];
1302  if( q <= 2 && 2 <= p )
1303  hessian = new Type[n*n];
1304 
1305  for( size_t i = 0; i < n; ++i )
1306  x[i] = tx[i * (p+1) + 0]; /*lint !e835*/
1307 
1308  if( exprEvalUser(expr, x, ty[0], gradient, hessian) != SCIP_OKAY )
1309  {
1310  delete[] x;
1311  delete[] gradient;
1312  delete[] hessian;
1313  return false;
1314  }
1315 
1316  if( gradient != NULL )
1317  {
1318  ty[1] = 0.0;
1319  for( size_t i = 0; i < n; ++i )
1320  ty[1] += gradient[i] * tx[i * (p+1) + 1];
1321  }
1322 
1323  if( hessian != NULL )
1324  {
1325  assert(gradient != NULL);
1326 
1327  ty[2] = 0.0;
1328  for( size_t i = 0; i < n; ++i )
1329  {
1330  for( size_t j = 0; j < n; ++j )
1331  ty[2] += 0.5 * hessian[i*n+j] * tx[i * (p+1) + 1] * tx[j * (p+1) + 1];
1332 
1333  ty[2] += gradient[i] * tx[i * (p+1) + 2];
1334  }
1335  }
1336 
1337  delete[] x;
1338  delete[] gradient;
1339  delete[] hessian;
1340 
1341  /* higher order derivatives not implemented */
1342  if( p > 2 )
1343  return false;
1344 
1345  return true;
1346  }
1347 
1348  /** reverse sweep of userexpr
1349  *
1350  * We follow http://www.coin-or.org/CppAD/Doc/atomic_reverse.xml
1351  * Note, that there q is our p.
1352  *
1353  * For a scalar variable t, let
1354  * Y(t) = f(X(t))
1355  * X(t) = x^0 + x^1 t^1 + ... + x^p t^p
1356  * where for x^i the i an index, while for t^i the i is an exponent.
1357  * Thus, x^k = 1/k! X^(k) (0), where X^(k)(.) denotes the k-th derivative.
1358  *
1359  * Next, let y^k = 1/k! Y^(k)(0) be the k'th taylor coefficient of Y. Thus,
1360  * Y(t) = y^0 + y^1 t^1 + y^2 t^2 + ...
1361  * y^0, y^1, ... are the taylor coefficients of f(x).
1362  *
1363  * Further, let F(x^0,..,x^p) by given as F^k(x) = y^k. Thus,
1364  * F^0(x) = y^0 = Y^(0)(0) = f(x^0)
1365  * F^1(x) = y^1 = Y^(1)(0) = f'(x^0) * x^1
1366  * F^2(x) = y^2 = 1/2 Y''(0) = 1/2 x^1 f''(x^0) x^1 + f'(x^0) x^2
1367  *
1368  * Given functions G: R^(p+1) -> R and H: R^(n*(p+1)) -> R, where H(x^0, x^1, .., x^p) = G(F(x^0,..,x^p)),
1369  * we have to return the value of \f$\partial H / \partial x^l, l = 0..p,\f$ in px. Therefor,
1370  * \f$
1371  * px^l = \partial H / \partial x^l
1372  * = sum(k=0..p, (\partial G / \partial y^k) * (\partial y^k / \partial x^l)
1373  * = sum(k=0..p, py[k] * (\partial F^k / \partial x^l)
1374  * \f$
1375  *
1376  * For p = 0, this means
1377  * \f$
1378  * px^0 = py[0] * \partial F^0 / \partial x^0
1379  * = py[0] * \partial f(x^0) / \partial x^0
1380  * = py[0] * f'(x^0)
1381  * \f$
1382  *
1383  * For p = 1, this means (for l = 0):
1384  * \f[
1385  * px^0 = py[0] * \partial F^0 / \partial x^0 + py[1] * \partial F^1 / \partial x^0
1386  * = py[0] * \partial f(x^0) / \partial x^0 + py[1] * \partial (f'(x^0) * x^1) / \partial x^0
1387  * = py[0] * f'(x^0) + py[1] * f''(x^0) * x^1
1388  * \f]
1389  * and (for l=1):
1390  * \[
1391  * px^1 = py[0] * \partial F^0 / \partial x^1 + py[1] * \partial F^1 / \partial x^1
1392  * = py[0] * \partial f(x^0) / \partial x^1 + py[1] * \partial (f'(x^0) * x^1) / \partial x^0
1393  * = py[0] * 0 + py[1] * f'(x^0)
1394  * \f]
1395  *
1396  * As x^k = (tx[k], tx[(p+1)+k], tx[2*(p+1)+k], ..., tx[n*(p+1)+k] and
1397  * px^k = (px[k], px[(p+1)+k], px[2*(p+1)+k], ..., px[n*(p+1)+k], we get
1398  * for p = 0:
1399  * px[i] = (px^0)_i = py[0] * grad[i]
1400  * for p = 1:
1401  * px[i*2+0] = (px^0)_i = py[0] * grad[i] + py[1] * sum(j, hessian[j,i] * tx[j*2+1])
1402  * px[i*2+1] = (px^1)_i = py[1] * grad[i]
1403  */
1404  bool reverse(
1405  size_t p, /**< highest order Taylor coefficient that we are evaluating */
1406  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
1407  const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
1408  CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
1409  const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
1410  )
1411  {
1412  assert(expr != NULL);
1413  assert(px.size() == tx.size());
1414  assert(py.size() == p+1);
1415 
1416  size_t n = tx.size() / (p+1);
1417  assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1418  assert(n >= 1);
1419 
1420  Type* x = new Type[n];
1421  Type funcval;
1422  Type* gradient = new Type[n];
1423  Type* hessian = NULL;
1424 
1425  if( p == 1 )
1426  hessian = new Type[n*n];
1427 
1428  for( size_t i = 0; i < n; ++i )
1429  x[i] = tx[i * (p+1) + 0]; /*lint !e835*/
1430 
1431  if( exprEvalUser(expr, x, funcval, gradient, hessian) != SCIP_OKAY )
1432  {
1433  delete[] x;
1434  delete[] gradient;
1435  delete[] hessian;
1436  return false;
1437  }
1438 
1439  switch( p )
1440  {
1441  case 0:
1442  // px[j] = (px^0)_j = py[0] * grad[j]
1443  for( size_t i = 0; i < n; ++i )
1444  px[i] = py[0] * gradient[i];
1445  break;
1446 
1447  case 1:
1448  // px[i*2+0] = (px^0)_i = py[0] * grad[i] + py[1] * sum(j, hessian[j,i] * tx[j*2+1])
1449  // px[i*2+1] = (px^1)_i = py[1] * grad[i]
1450  assert(hessian != NULL);
1451  for( size_t i = 0; i < n; ++i )
1452  {
1453  px[i*2+0] = py[0] * gradient[i]; /*lint !e835*/
1454  for( size_t j = 0; j < n; ++j )
1455  px[i*2+0] += py[1] * hessian[i+n*j] * tx[j*2+1]; /*lint !e835*/
1456 
1457  px[i*2+1] = py[1] * gradient[i];
1458  }
1459  break;
1460 
1461  default:
1462  return false;
1463  }
1464 
1465  return true;
1466  } /*lint !e715*/
1467 
1468  using CppAD::atomic_base<Type>::for_sparse_jac;
1469 
1470  /** computes sparsity of jacobian during a forward sweep
1471  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
1472  * Since we assume f'(x) to be dense, the sparsity of S will be the sparsity of R.
1473  */
1474  bool for_sparse_jac(
1475  size_t q, /**< number of columns in R */
1476  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
1477  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
1478  )
1479  {
1480  assert(expr != NULL);
1481  assert(s.size() == q);
1482 
1483  size_t n = r.size() / q;
1484  assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1485 
1486  // sparsity for S(x) = f'(x) * R
1487  for( size_t j = 0; j < q; j++ )
1488  {
1489  s[j] = false;
1490  for( size_t i = 0; i < n; i++ )
1491  s[j] |= (bool)r[i * q + j]; /*lint !e1786*/
1492  }
1493 
1494  return true;
1495  }
1496 
1497  using CppAD::atomic_base<Type>::rev_sparse_jac;
1498 
1499  /** computes sparsity of jacobian during a reverse sweep
1500  * For a q x 1 matrix S, we have to return the sparsity pattern of the q x 1 matrix R(x) = S * f'(x).
1501  * Since we assume f'(x) to be dense, the sparsity of R will be the sparsity of S.
1502  */
1503  bool rev_sparse_jac(
1504  size_t q, /**< number of rows in R */
1505  const CppAD::vector<bool>& rt, /**< sparsity of R, rowwise */
1506  CppAD::vector<bool>& st /**< vector to store sparsity of S, rowwise */
1507  )
1508  {
1509  assert(expr != NULL);
1510  assert(rt.size() == q);
1511 
1512  size_t n = st.size() / q;
1513  assert(n == (size_t)SCIPexprGetNChildren(expr));
1514 
1515  // sparsity for S(x)^T = f'(x)^T * R^T
1516  for( size_t j = 0; j < q; j++ )
1517  for( size_t i = 0; i < n; i++ )
1518  st[i * q + j] = rt[j];
1519 
1520  return true;
1521  }
1522 
1523  using CppAD::atomic_base<Type>::rev_sparse_hes;
1524 
1525  /** computes sparsity of hessian during a reverse sweep
1526  * Assume V(x) = (g(f(x)))'' R for a function g:R->R and a matrix R.
1527  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
1528  */
1529  bool rev_sparse_hes(
1530  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
1531  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
1532  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
1533  size_t q, /**< number of columns in S and R */
1534  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
1535  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
1536  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
1537  )
1538  {
1539  assert(expr != NULL);
1540  size_t n = vx.size();
1541  assert((size_t)SCIPexprGetNChildren(expr) == n);
1542  assert(s.size() == 1);
1543  assert(t.size() == n);
1544  assert(r.size() == n * q);
1545  assert(u.size() == q);
1546  assert(v.size() == n * q);
1547 
1548  size_t i, j, k;
1549 
1550  // sparsity for T(x) = S(x) * f'(x)
1551  for( i = 0; i < n; ++i )
1552  t[i] = s[0];
1553 
1554  // V(x) = f'(x)^T * g''(y) * f'(x) * R + g'(y) * f''(x) * R
1555  // U(x) = g''(y) * f'(x) * R
1556  // S(x) = g'(y)
1557 
1558  // back propagate the sparsity for U
1559  for( j = 0; j < q; j++ )
1560  for( i = 0; i < n; i++ )
1561  v[ i * q + j] = u[j];
1562 
1563  // include forward Jacobian sparsity in Hessian sparsity
1564  // sparsity for g'(y) * f''(x) * R (Note f''(x) is assumed to be dense)
1565  if( s[0] )
1566  for( j = 0; j < q; j++ )
1567  for( i = 0; i < n; i++ )
1568  for( k = 0; k < n; ++k )
1569  v[ i * q + j] |= (bool) r[ k * q + j];
1570 
1571  return true;
1572  }
1573 
1574 };
1575 
1576 template<class Type>
1577 static
1579  Type& resultant, /**< resultant */
1580  const Type* args, /**< operands */
1581  SCIP_EXPR* expr /**< expression that holds the user expression */
1582  )
1583 {
1584  assert( args != 0 );
1585  vector<Type> in(args, args + SCIPexprGetNChildren(expr));
1586  vector<Type> out(1);
1587 
1589  u(in, out, (size_t)(void*)expr);
1590 
1591  resultant = out[0];
1592  return;
1593 }
1594 
1595 #else
1596 
1597 template<class Type>
1598 static
1599 void evalUser(
1600  Type& resultant, /**< resultant */
1601  const Type* args, /**< operands */
1602  SCIP_EXPR* expr /**< expression that holds the user expression */
1603  )
1604 {
1605  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1606  "evalUser()",
1607  "Error: user expressions in CppAD not possible without CppAD user atomic facility"
1608  );
1609 }
1610 
1611 #endif
1612 
1613 /** template for evaluation for minimum operator
1614  *
1615  * Only implemented for real numbers, thus gives error by default.
1616  * @todo implement own userad function
1617  */
1618 template<class Type>
1619 static
1620 void evalMin(
1621  Type& resultant, /**< resultant */
1622  const Type& arg1, /**< first operand */
1623  const Type& arg2 /**< second operand */
1624  )
1625 { /*lint --e{715,1764}*/
1626  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1627  "evalMin()",
1628  "Error: Min not implemented for this value type"
1629  );
1630 }
1631 
1632 /** specialization of minimum evaluation for real numbers */
1633 template<>
1634 void evalMin(
1635  CppAD::AD<double>& resultant, /**< resultant */
1636  const CppAD::AD<double>& arg1, /**< first operand */
1637  const CppAD::AD<double>& arg2 /**< second operand */
1638  )
1639 {
1640  resultant = MIN(arg1, arg2);
1641 }
1642 
1643 /** template for evaluation for maximum operator
1644  *
1645  * Only implemented for real numbers, thus gives error by default.
1646  * @todo implement own userad function
1647  */
1648 template<class Type>
1649 static
1650 void evalMax(
1651  Type& resultant, /**< resultant */
1652  const Type& arg1, /**< first operand */
1653  const Type& arg2 /**< second operand */
1654  )
1655 { /*lint --e{715,1764}*/
1656  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1657  "evalMax()",
1658  "Error: Max not implemented for this value type"
1659  );
1660 }
1661 
1662 /** specialization of maximum evaluation for real numbers */
1663 template<>
1664 void evalMax(
1665  CppAD::AD<double>& resultant, /**< resultant */
1666  const CppAD::AD<double>& arg1, /**< first operand */
1667  const CppAD::AD<double>& arg2 /**< second operand */
1668  )
1669 {
1670  resultant = MAX(arg1, arg2);
1671 }
1672 
1673 /** template for evaluation for square-root operator
1674  *
1675  * Default is to use the standard sqrt-function.
1676  */
1677 template<class Type>
1678 static
1680  Type& resultant, /**< resultant */
1681  const Type& arg /**< operand */
1682  )
1683 {
1684  resultant = sqrt(arg);
1685 }
1686 
1687 /** template for evaluation for absolute value operator */
1688 template<class Type>
1689 static
1690 void evalAbs(
1691  Type& resultant, /**< resultant */
1692  const Type& arg /**< operand */
1693  )
1694 {
1695  resultant = abs(arg);
1696 }
1697 
1698 /** specialization of absolute value evaluation for intervals
1699  *
1700  * Use sqrt(x^2) for now @todo implement own userad function.
1701  */
1702 template<>
1703 void evalAbs(
1704  CppAD::AD<SCIPInterval>& resultant, /**< resultant */
1705  const CppAD::AD<SCIPInterval>& arg /**< operand */
1706  )
1707 {
1708  vector<CppAD::AD<SCIPInterval> > in(1, arg);
1709  vector<CppAD::AD<SCIPInterval> > out(1);
1710 
1711  posintpower(in, out, 2);
1712 
1713  resultant = sqrt(out[0]);
1714 }
1715 
1716 /** integer power operation for arbitrary integer exponents */
1717 template<class Type>
1718 static
1720  Type& resultant, /**< resultant */
1721  const Type& arg, /**< operand */
1722  const int exponent /**< exponent */
1723  )
1724 {
1725  if( exponent > 1 )
1726  {
1727  vector<Type> in(1, arg);
1728  vector<Type> out(1);
1729 
1730  posintpower(in, out, exponent);
1731 
1732  resultant = out[0];
1733  return;
1734  }
1735 
1736  if( exponent < -1 )
1737  {
1738  vector<Type> in(1, arg);
1739  vector<Type> out(1);
1740 
1741  posintpower(in, out, -exponent);
1742 
1743  resultant = Type(1.0)/out[0];
1744  return;
1745  }
1746 
1747  if( exponent == 1 )
1748  {
1749  resultant = arg;
1750  return;
1751  }
1752 
1753  if( exponent == 0 )
1754  {
1755  resultant = Type(1.0);
1756  return;
1757  }
1758 
1759  assert(exponent == -1);
1760  resultant = Type(1.0)/arg;
1761 }
1762 
1763 /** CppAD compatible evaluation of an expression for given arguments and parameters */
1764 template<class Type>
1765 static
1767  SCIP_EXPR* expr, /**< expression */
1768  const vector<Type>& x, /**< values of variables */
1769  SCIP_Real* param, /**< values of parameters */
1770  Type& val /**< buffer to store expression value */
1771  )
1772 {
1773  Type* buf = 0;
1774 
1775  assert(expr != NULL);
1776 
1777  /* todo use SCIP_MAXCHILD_ESTIMATE as in expression.c */
1778 
1779  if( SCIPexprGetNChildren(expr) )
1780  {
1781  if( BMSallocMemoryArray(&buf, SCIPexprGetNChildren(expr)) == NULL ) /*lint !e666*/
1782  return SCIP_NOMEMORY;
1783 
1784  for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
1785  {
1786  SCIP_CALL( eval(SCIPexprGetChildren(expr)[i], x, param, buf[i]) );
1787  }
1788  }
1789 
1790  switch(SCIPexprGetOperator(expr))
1791  {
1792  case SCIP_EXPR_VARIDX:
1793  assert(SCIPexprGetOpIndex(expr) < (int)x.size());
1794  val = x[SCIPexprGetOpIndex(expr)];
1795  break;
1796 
1797  case SCIP_EXPR_CONST:
1798  val = SCIPexprGetOpReal(expr);
1799  break;
1800 
1801  case SCIP_EXPR_PARAM:
1802  assert(param != NULL);
1803  val = param[SCIPexprGetOpIndex(expr)];
1804  break;
1805 
1806  case SCIP_EXPR_PLUS:
1807  assert( buf != 0 );
1808  val = buf[0] + buf[1];
1809  break;
1810 
1811  case SCIP_EXPR_MINUS:
1812  assert( buf != 0 );
1813  val = buf[0] - buf[1];
1814  break;
1815 
1816  case SCIP_EXPR_MUL:
1817  assert( buf != 0 );
1818  val = buf[0] * buf[1];
1819  break;
1820 
1821  case SCIP_EXPR_DIV:
1822  assert( buf != 0 );
1823  val = buf[0] / buf[1];
1824  break;
1825 
1826  case SCIP_EXPR_SQUARE:
1827  assert( buf != 0 );
1828  evalIntPower(val, buf[0], 2);
1829  break;
1830 
1831  case SCIP_EXPR_SQRT:
1832  assert( buf != 0 );
1833  evalSqrt(val, buf[0]);
1834  break;
1835 
1836  case SCIP_EXPR_REALPOWER:
1837  assert( buf != 0 );
1838  val = CppAD::pow(buf[0], SCIPexprGetRealPowerExponent(expr));
1839  break;
1840 
1841  case SCIP_EXPR_INTPOWER:
1842  assert( buf != 0 );
1843  evalIntPower(val, buf[0], SCIPexprGetIntPowerExponent(expr));
1844  break;
1845 
1846  case SCIP_EXPR_SIGNPOWER:
1847  assert( buf != 0 );
1848  evalSignPower(val, buf[0], expr);
1849  break;
1850 
1851  case SCIP_EXPR_EXP:
1852  assert( buf != 0 );
1853  val = exp(buf[0]);
1854  break;
1855 
1856  case SCIP_EXPR_LOG:
1857  assert( buf != 0 );
1858  val = log(buf[0]);
1859  break;
1860 
1861  case SCIP_EXPR_SIN:
1862  assert( buf != 0 );
1863  val = sin(buf[0]);
1864  break;
1865 
1866  case SCIP_EXPR_COS:
1867  assert( buf != 0 );
1868  val = cos(buf[0]);
1869  break;
1870 
1871  case SCIP_EXPR_TAN:
1872  assert( buf != 0 );
1873  val = tan(buf[0]);
1874  break;
1875 #ifdef SCIP_DISABLED_CODE /* these operators are currently disabled */
1876  case SCIP_EXPR_ERF:
1877  assert( buf != 0 );
1878  val = erf(buf[0]);
1879  break;
1880 
1881  case SCIP_EXPR_ERFI:
1882  return SCIP_ERROR;
1883 #endif
1884  case SCIP_EXPR_MIN:
1885  assert( buf != 0 );
1886  evalMin(val, buf[0], buf[1]);
1887  break;
1888 
1889  case SCIP_EXPR_MAX:
1890  assert( buf != 0 );
1891  evalMax(val, buf[0], buf[1]);
1892  break;
1893 
1894  case SCIP_EXPR_ABS:
1895  assert( buf != 0 );
1896  evalAbs(val, buf[0]);
1897  break;
1898 
1899  case SCIP_EXPR_SIGN:
1900  assert( buf != 0 );
1901  val = sign(buf[0]);
1902  break;
1903 
1904  case SCIP_EXPR_SUM:
1905  assert( buf != 0 );
1906  val = 0.0;
1907  for (int i = 0; i < SCIPexprGetNChildren(expr); ++i)
1908  val += buf[i];
1909  break;
1910 
1911  case SCIP_EXPR_PRODUCT:
1912  assert( buf != 0 );
1913  val = 1.0;
1914  for (int i = 0; i < SCIPexprGetNChildren(expr); ++i)
1915  val *= buf[i];
1916  break;
1917 
1918  case SCIP_EXPR_LINEAR:
1919  {
1920  SCIP_Real* coefs;
1921 
1922  coefs = SCIPexprGetLinearCoefs(expr);
1923  assert(coefs != NULL || SCIPexprGetNChildren(expr) == 0);
1924 
1925  assert( buf != 0 );
1926  val = SCIPexprGetLinearConstant(expr);
1927  for (int i = 0; i < SCIPexprGetNChildren(expr); ++i)
1928  val += coefs[i] * buf[i]; /*lint !e613*/
1929  break;
1930  }
1931 
1932  case SCIP_EXPR_QUADRATIC:
1933  {
1934  SCIP_Real* lincoefs;
1935  SCIP_QUADELEM* quadelems;
1936  int nquadelems;
1937  SCIP_Real sqrcoef;
1938  Type lincoef;
1939  vector<Type> in(1);
1940  vector<Type> out(1);
1941 
1942  assert( buf != 0 );
1943 
1944  lincoefs = SCIPexprGetQuadLinearCoefs(expr);
1945  nquadelems = SCIPexprGetNQuadElements(expr);
1946  quadelems = SCIPexprGetQuadElements(expr);
1947  assert(quadelems != NULL || nquadelems == 0);
1948 
1949  SCIPexprSortQuadElems(expr);
1950 
1951  val = SCIPexprGetQuadConstant(expr);
1952 
1953  /* for each argument, we collect it's linear index from lincoefs, it's square coefficients and all factors from bilinear terms
1954  * then we compute the interval sqrcoef*x^2 + lincoef*x and add it to result */
1955  int i = 0;
1956  for( int argidx = 0; argidx < SCIPexprGetNChildren(expr); ++argidx )
1957  {
1958  if( i == nquadelems || quadelems[i].idx1 > argidx ) /*lint !e613*/
1959  {
1960  /* there are no quadratic terms with argidx in its first argument, that should be easy to handle */
1961  if( lincoefs != NULL )
1962  val += lincoefs[argidx] * buf[argidx];
1963  continue;
1964  }
1965 
1966  sqrcoef = 0.0;
1967  lincoef = lincoefs != NULL ? lincoefs[argidx] : 0.0;
1968 
1969  assert(i < nquadelems && quadelems[i].idx1 == argidx); /*lint !e613*/
1970  do
1971  {
1972  if( quadelems[i].idx2 == argidx ) /*lint !e613*/
1973  sqrcoef += quadelems[i].coef; /*lint !e613*/
1974  else
1975  lincoef += quadelems[i].coef * buf[quadelems[i].idx2]; /*lint !e613*/
1976  ++i;
1977  } while( i < nquadelems && quadelems[i].idx1 == argidx ); /*lint !e613*/
1978  assert(i == nquadelems || quadelems[i].idx1 > argidx); /*lint !e613*/
1979 
1980  /* this is not as good as what we can get from SCIPintervalQuad, but easy to implement */
1981  if( sqrcoef != 0.0 )
1982  {
1983  in[0] = buf[argidx];
1984  posintpower(in, out, 2);
1985  val += sqrcoef * out[0];
1986  }
1987 
1988  val += lincoef * buf[argidx];
1989  }
1990  assert(i == nquadelems);
1991 
1992  break;
1993  }
1994 
1995  case SCIP_EXPR_POLYNOMIAL:
1996  {
1997  SCIP_EXPRDATA_MONOMIAL** monomials;
1998  Type childval;
1999  Type monomialval;
2000  SCIP_Real exponent;
2001  int nmonomials;
2002  int nfactors;
2003  int* childidxs;
2004  SCIP_Real* exponents;
2005  int i;
2006  int j;
2007 
2008  assert( buf != 0 );
2009 
2010  val = SCIPexprGetPolynomialConstant(expr);
2011 
2012  nmonomials = SCIPexprGetNMonomials(expr);
2013  monomials = SCIPexprGetMonomials(expr);
2014 
2015  for( i = 0; i < nmonomials; ++i )
2016  {
2017  nfactors = SCIPexprGetMonomialNFactors(monomials[i]);
2018  childidxs = SCIPexprGetMonomialChildIndices(monomials[i]);
2019  exponents = SCIPexprGetMonomialExponents(monomials[i]);
2020  monomialval = SCIPexprGetMonomialCoef(monomials[i]);
2021 
2022  for( j = 0; j < nfactors; ++j )
2023  {
2024  assert(childidxs[j] >= 0);
2025  assert(childidxs[j] < SCIPexprGetNChildren(expr));
2026 
2027  childval = buf[childidxs[j]];
2028  exponent = exponents[j];
2029 
2030  /* cover some special exponents separately to avoid calling expensive pow function */
2031  if( exponent == 0.0 )
2032  continue;
2033  if( exponent == 1.0 )
2034  {
2035  monomialval *= childval;
2036  continue;
2037  }
2038  if( (int)exponent == exponent )
2039  {
2040  Type tmp;
2041  evalIntPower(tmp, childval, (int)exponent);
2042  monomialval *= tmp;
2043  continue;
2044  }
2045  if( exponent == 0.5 )
2046  {
2047  Type tmp;
2048  evalSqrt(tmp, childval);
2049  monomialval *= tmp;
2050  continue;
2051  }
2052  monomialval *= pow(childval, exponent);
2053  }
2054 
2055  val += monomialval;
2056  }
2057 
2058  break;
2059  }
2060 
2061  case SCIP_EXPR_USER:
2062  evalUser(val, buf, expr);
2063  break;
2064 
2065  case SCIP_EXPR_LAST:
2066  default:
2067  BMSfreeMemoryArrayNull(&buf);
2068  return SCIP_ERROR;
2069  }
2070 
2071  BMSfreeMemoryArrayNull(&buf);
2072 
2073  return SCIP_OKAY;
2074 }
2075 
2076 /** analysis an expression tree whether it requires retaping on every evaluation
2077  *
2078  * This may be the case if the evaluation sequence depends on values of operands (e.g., in case of abs, sign, signpower, ...).
2079  */
2080 static
2082  SCIP_EXPRINTDATA* data,
2083  SCIP_EXPR* expr
2084  )
2085 {
2086  assert(expr != NULL);
2087  assert(SCIPexprGetChildren(expr) != NULL || SCIPexprGetNChildren(expr) == 0);
2088 
2089  for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
2090  analyzeTree(data, SCIPexprGetChildren(expr)[i]);
2091 
2092  switch( SCIPexprGetOperator(expr) )
2093  {
2094  case SCIP_EXPR_MIN:
2095  case SCIP_EXPR_MAX:
2096  case SCIP_EXPR_ABS:
2097 #ifdef NO_CPPAD_USER_ATOMIC
2098  case SCIP_EXPR_SIGNPOWER:
2099 #endif
2100  data->need_retape_always = true;
2101  break;
2102 
2103  case SCIP_EXPR_USER:
2104  data->userevalcapability &= SCIPexprGetUserEvalCapability(expr);
2105  break;
2106 
2107  default: ;
2108  } /*lint !e788*/
2109 
2110 }
2111 
2112 /** replacement for CppAD's default error handler
2113  *
2114  * In debug mode, CppAD gives an error when an evaluation contains a nan.
2115  * We do not want to stop execution in such a case, since the calling routine should check for nan's and decide what to do.
2116  * Since we cannot ignore this particular error, we ignore all.
2117  * @todo find a way to check whether the error corresponds to a nan and communicate this back
2118  */
2119 static
2121  bool known, /**< is the error from a known source? */
2122  int line, /**< line where error occured */
2123  const char* file, /**< file where error occured */
2124  const char* cond, /**< error condition */
2125  const char* msg /**< error message */
2126  )
2127 {
2128  SCIPdebugMessage("ignore CppAD error from %sknown source %s:%d: msg: %s exp: %s\n", known ? "" : "un", file, line, msg, cond);
2129 }
2130 
2131 /* install our error handler */
2132 static CppAD::ErrorHandler errorhandler(cppaderrorcallback);
2133 
2134 /** gets name and version of expression interpreter */
2135 const char* SCIPexprintGetName(void)
2136 {
2137  return CPPAD_PACKAGE_STRING;
2138 }
2139 
2140 /** gets descriptive text of expression interpreter */
2141 const char* SCIPexprintGetDesc(void)
2142 {
2143  return "Algorithmic Differentiation of C++ algorithms developed by B. Bell (www.coin-or.org/CppAD)";
2144 }
2145 
2146 /** gets capabilities of expression interpreter (using bitflags) */
2148  void
2149  )
2150 {
2154 }
2155 
2156 /** creates an expression interpreter object */
2158  BMS_BLKMEM* blkmem, /**< block memory data structure */
2159  SCIP_EXPRINT** exprint /**< buffer to store pointer to expression interpreter */
2160  )
2161 {
2162  assert(blkmem != NULL);
2163  assert(exprint != NULL);
2164 
2165  if( BMSallocBlockMemory(blkmem, exprint) == NULL )
2166  return SCIP_NOMEMORY;
2167 
2168  (*exprint)->blkmem = blkmem;
2169 
2170  return SCIP_OKAY;
2171 }
2172 
2173 /** frees an expression interpreter object */
2175  SCIP_EXPRINT** exprint /**< expression interpreter that should be freed */
2176  )
2177 {
2178  assert( exprint != NULL);
2179  assert(*exprint != NULL);
2180 
2181  BMSfreeBlockMemory((*exprint)->blkmem, exprint);
2182 
2183  return SCIP_OKAY;
2184 }
2185 
2186 /** compiles an expression tree and stores compiled data in expression tree */
2188  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2189  SCIP_EXPRTREE* tree /**< expression tree */
2190  )
2191 { /*lint --e{429} */
2192  assert(tree != NULL);
2193 
2195  if (!data)
2196  {
2197  data = new SCIP_EXPRINTDATA();
2198  assert( data != NULL );
2199  SCIPexprtreeSetInterpreterData(tree, data);
2200  SCIPdebugMessage("set interpreter data in tree %p to %p\n", (void*)tree, (void*)data);
2201  }
2202  else
2203  {
2204  data->need_retape = true;
2205  data->int_need_retape = true;
2206  }
2207 
2208  int n = SCIPexprtreeGetNVars(tree);
2209 
2210  data->X.resize(n);
2211  data->x.resize(n);
2212  data->Y.resize(1);
2213 
2214  data->int_X.resize(n);
2215  data->int_x.resize(n);
2216  data->int_Y.resize(1);
2217 
2218  if( data->root != NULL )
2219  {
2220  SCIPexprFreeDeep(exprint->blkmem, &data->root);
2221  }
2222 
2223  SCIP_EXPR* root = SCIPexprtreeGetRoot(tree);
2224 
2225  SCIP_CALL( SCIPexprCopyDeep(exprint->blkmem, &data->root, root) );
2226 
2227  data->blkmem = exprint->blkmem;
2228 
2229  analyzeTree(data, data->root);
2230 
2231  return SCIP_OKAY;
2232 }
2233 
2234 
2235 /** gives the capability to evaluate an expression by the expression interpreter
2236  *
2237  * In cases of user-given expressions, higher order derivatives may not be available for the user-expression,
2238  * even if the expression interpreter could handle these. This method allows to recognize that, e.g., the
2239  * Hessian for an expression is not available because it contains a user expression that does not provide
2240  * Hessians.
2241  */
2243  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2244  SCIP_EXPRTREE* tree /**< expression tree */
2245  )
2246 {
2247  assert(tree != NULL);
2248 
2250  assert(data != NULL);
2251 
2252  return data->userevalcapability;
2253 }/*lint !e715*/
2254 
2255 /** frees interpreter data */
2257  SCIP_EXPRINTDATA** interpreterdata /**< interpreter data that should freed */
2258  )
2259 {
2260  assert( interpreterdata != NULL);
2261  assert(*interpreterdata != NULL);
2262 
2263  if( (*interpreterdata)->root != NULL )
2264  SCIPexprFreeDeep((*interpreterdata)->blkmem, &(*interpreterdata)->root);
2265 
2266  delete *interpreterdata;
2267  *interpreterdata = NULL;
2268 
2269  return SCIP_OKAY;
2270 }
2271 
2272 /** notify expression interpreter that a new parameterization is used
2273  *
2274  * This probably causes retaping by AD algorithms.
2275  */
2277  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2278  SCIP_EXPRTREE* tree /**< expression tree */
2279  )
2280 {
2281  assert(exprint != NULL);
2282  assert(tree != NULL);
2283 
2285  if( data != NULL )
2286  {
2287  data->need_retape = true;
2288  data->int_need_retape = true;
2289  }
2290 
2291  return SCIP_OKAY;
2292 }
2293 
2294 /** evaluates an expression tree */
2296  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2297  SCIP_EXPRTREE* tree, /**< expression tree */
2298  SCIP_Real* varvals, /**< values of variables */
2299  SCIP_Real* val /**< buffer to store value */
2300  )
2301 {
2302  SCIP_EXPRINTDATA* data;
2303 
2304  assert(exprint != NULL);
2305  assert(tree != NULL);
2306  assert(varvals != NULL);
2307  assert(val != NULL);
2308 
2309  data = SCIPexprtreeGetInterpreterData(tree);
2310  assert(data != NULL);
2311  assert(SCIPexprtreeGetNVars(tree) == (int)data->X.size());
2312  assert(SCIPexprtreeGetRoot(tree) != NULL);
2313 
2314  int n = SCIPexprtreeGetNVars(tree);
2315 
2316  if( n == 0 )
2317  {
2318  SCIP_CALL( SCIPexprtreeEval(tree, NULL, val) );
2319  return SCIP_OKAY;
2320  }
2321 
2322  if( data->need_retape_always || data->need_retape )
2323  {
2324  for( int i = 0; i < n; ++i )
2325  {
2326  data->X[i] = varvals[i];
2327  data->x[i] = varvals[i];
2328  }
2329 
2330  CppAD::Independent(data->X);
2331 
2332  if( data->root != NULL )
2333  SCIP_CALL( eval(data->root, data->X, SCIPexprtreeGetParamVals(tree), data->Y[0]) );
2334  else
2335  data->Y[0] = 0.0;
2336 
2337  data->f.Dependent(data->X, data->Y);
2338 
2339  data->val = Value(data->Y[0]);
2340  SCIPdebugMessage("Eval retaped and computed value %g\n", data->val);
2341 
2342  // the following is required if the gradient shall be computed by a reverse sweep later
2343  // data->val = data->f.Forward(0, data->x)[0];
2344 
2345  data->need_retape = false;
2346  }
2347  else
2348  {
2349  assert((int)data->x.size() >= n);
2350  for( int i = 0; i < n; ++i )
2351  data->x[i] = varvals[i];
2352 
2353  data->val = data->f.Forward(0, data->x)[0]; /*lint !e1793*/
2354  SCIPdebugMessage("Eval used forward sweep to compute value %g\n", data->val);
2355  }
2356 
2357  *val = data->val;
2358 
2359  return SCIP_OKAY;
2360 }
2361 
2362 /** evaluates an expression tree on intervals */
2364  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2365  SCIP_EXPRTREE* tree, /**< expression tree */
2366  SCIP_Real infinity, /**< value for infinity */
2367  SCIP_INTERVAL* varvals, /**< interval values of variables */
2368  SCIP_INTERVAL* val /**< buffer to store interval value of expression */
2369  )
2370 {
2371  SCIP_EXPRINTDATA* data;
2372 
2373  assert(exprint != NULL);
2374  assert(tree != NULL);
2375  assert(varvals != NULL);
2376  assert(val != NULL);
2377 
2378  data = SCIPexprtreeGetInterpreterData(tree);
2379  assert(data != NULL);
2380  assert(SCIPexprtreeGetNVars(tree) == (int)data->int_X.size());
2381  assert(SCIPexprtreeGetRoot(tree) != NULL);
2382 
2383  int n = SCIPexprtreeGetNVars(tree);
2384 
2385  if( n == 0 )
2386  {
2387  SCIP_CALL( SCIPexprtreeEvalInt(tree, infinity, NULL, val) );
2388  return SCIP_OKAY;
2389  }
2390 
2392 
2393  if( data->int_need_retape || data->need_retape_always )
2394  {
2395  for( int i = 0; i < n; ++i )
2396  {
2397  data->int_X[i] = varvals[i];
2398  data->int_x[i] = varvals[i];
2399  }
2400 
2401  CppAD::Independent(data->int_X);
2402 
2403  if( data->root != NULL )
2404  SCIP_CALL( eval(data->root, data->int_X, SCIPexprtreeGetParamVals(tree), data->int_Y[0]) );
2405  else
2406  data->int_Y[0] = 0.0;
2407 
2408  data->int_f.Dependent(data->int_X, data->int_Y);
2409 
2410  data->int_val = Value(data->int_Y[0]);
2411 
2412  data->int_need_retape = false;
2413  }
2414  else
2415  {
2416  assert((int)data->int_x.size() >= n);
2417  for( int i = 0; i < n; ++i )
2418  data->int_x[i] = varvals[i];
2419 
2420  data->int_val = data->int_f.Forward(0, data->int_x)[0]; /*lint !e1793*/
2421  }
2422 
2423  *val = data->int_val;
2424 
2425  return SCIP_OKAY;
2426 }
2427 
2428 /** computes value and gradient of an expression tree */
2430  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2431  SCIP_EXPRTREE* tree, /**< expression tree */
2432  SCIP_Real* varvals, /**< values of variables, can be NULL if new_varvals is FALSE */
2433  SCIP_Bool new_varvals, /**< have variable values changed since last call to a point evaluation routine? */
2434  SCIP_Real* val, /**< buffer to store expression value */
2435  SCIP_Real* gradient /**< buffer to store expression gradient, need to have length at least SCIPexprtreeGetNVars(tree) */
2436  )
2437 {
2438  assert(exprint != NULL);
2439  assert(tree != NULL);
2440  assert(varvals != NULL || new_varvals == FALSE);
2441  assert(val != NULL);
2442  assert(gradient != NULL);
2443 
2445  assert(data != NULL);
2446 
2447  if( new_varvals )
2448  {
2449  SCIP_CALL( SCIPexprintEval(exprint, tree, varvals, val) );
2450  }
2451  else
2452  *val = data->val;
2453 
2454  int n = SCIPexprtreeGetNVars(tree);
2455 
2456  if( n == 0 )
2457  return SCIP_OKAY;
2458 
2459  vector<double> jac(data->f.Jacobian(data->x));
2460 
2461  for( int i = 0; i < n; ++i )
2462  gradient[i] = jac[i];
2463 
2464 /* disable debug output since we have no message handler here
2465 #ifdef SCIP_DEBUG
2466  SCIPdebugMessage("Grad for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2467  SCIPdebugMessage("x ="); for (int i = 0; i < n; ++i) printf("\t %g", data->x[i]); printf("\n");
2468  SCIPdebugMessage("grad ="); for (int i = 0; i < n; ++i) printf("\t %g", gradient[i]); printf("\n");
2469 #endif
2470 */
2471 
2472  return SCIP_OKAY;
2473 }
2474 
2475 /** computes interval value and interval gradient of an expression tree */
2477  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2478  SCIP_EXPRTREE* tree, /**< expression tree */
2479  SCIP_Real infinity, /**< value for infinity */
2480  SCIP_INTERVAL* varvals, /**< interval values of variables, can be NULL if new_varvals is FALSE */
2481  SCIP_Bool new_varvals, /**< have variable interval values changed since last call to an interval evaluation routine? */
2482  SCIP_INTERVAL* val, /**< buffer to store expression interval value */
2483  SCIP_INTERVAL* gradient /**< buffer to store expression interval gradient, need to have length at least SCIPexprtreeGetNVars(tree) */
2484  )
2485 {
2486  assert(exprint != NULL);
2487  assert(tree != NULL);
2488  assert(varvals != NULL || new_varvals == FALSE);
2489  assert(val != NULL);
2490  assert(gradient != NULL);
2491 
2493  assert(data != NULL);
2494 
2495  if (new_varvals)
2496  SCIP_CALL( SCIPexprintEvalInt(exprint, tree, infinity, varvals, val) );
2497  else
2498  *val = data->int_val;
2499 
2500  int n = SCIPexprtreeGetNVars(tree);
2501 
2502  if( n == 0 )
2503  return SCIP_OKAY;
2504 
2505  vector<SCIPInterval> jac(data->int_f.Jacobian(data->int_x));
2506 
2507  for (int i = 0; i < n; ++i)
2508  gradient[i] = jac[i];
2509 
2510 /* disable debug output since we have no message handler here
2511 #ifdef SCIP_DEBUG
2512  SCIPdebugMessage("GradInt for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2513  SCIPdebugMessage("x ="); for (int i = 0; i < n; ++i) printf("\t [%g,%g]", SCIPintervalGetInf(data->int_x[i]), SCIPintervalGetSup(data->int_x[i])); printf("\n");
2514  SCIPdebugMessage("grad ="); for (int i = 0; i < n; ++i) printf("\t [%g,%g]", SCIPintervalGetInf(gradient[i]), SCIPintervalGetSup(gradient[i])); printf("\n");
2515 #endif
2516 */
2517 
2518  return SCIP_OKAY;
2519 }
2520 
2521 /** gives sparsity pattern of hessian
2522  *
2523  * NOTE: this function might be replaced later by something nicer.
2524  * Since the AD code might need to do a forward sweep, you should pass variable values in here.
2525  */
2527  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2528  SCIP_EXPRTREE* tree, /**< expression tree */
2529  SCIP_Real* varvals, /**< values of variables */
2530  SCIP_Bool* sparsity /**< buffer to store sparsity pattern of Hessian, sparsity[i+n*j] indicates whether entry (i,j) is nonzero in the hessian */
2531  )
2532 {
2533  assert(exprint != NULL);
2534  assert(tree != NULL);
2535  assert(varvals != NULL);
2536  assert(sparsity != NULL);
2537 
2539  assert(data != NULL);
2540 
2541  int n = SCIPexprtreeGetNVars(tree);
2542  if( n == 0 )
2543  return SCIP_OKAY;
2544 
2545  int nn = n*n;
2546 
2547  if( data->need_retape_always )
2548  {
2549  // @todo can we do something better here, e.g., by looking at the expression tree by ourself?
2550 
2551  for( int i = 0; i < nn; ++i )
2552  sparsity[i] = TRUE;
2553 
2554 /* disable debug output since we have no message handler here
2555 #ifdef SCIP_DEBUG
2556  SCIPdebugMessage("HessianSparsityDense for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2557  SCIPdebugMessage("sparsity = all elements, due to discontinuouities\n");
2558 #endif
2559 */
2560 
2561  return SCIP_OKAY;
2562  }
2563 
2564  if( data->need_retape )
2565  {
2566  SCIP_Real val;
2567  SCIP_CALL( SCIPexprintEval(exprint, tree, varvals, &val) );
2568  }
2569 
2570  SCIPdebugMessage("calling ForSparseJac\n");
2571 
2572  vector<bool> r(nn, false);
2573  for (int i = 0; i < n; ++i)
2574  r[i*n+i] = true; /*lint !e647 !e1793*/
2575  (void) data->f.ForSparseJac(n, r); // need to compute sparsity for Jacobian first
2576 
2577  SCIPdebugMessage("calling RevSparseHes\n");
2578 
2579  vector<bool> s(1, true);
2580  vector<bool> sparsehes(data->f.RevSparseHes(n, s));
2581 
2582  for( int i = 0; i < nn; ++i )
2583  sparsity[i] = sparsehes[i];
2584 
2585 /* disable debug output since we have no message handler here
2586 #ifdef SCIP_DEBUG
2587  SCIPdebugMessage("HessianSparsityDense for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2588  SCIPdebugMessage("sparsity ="); for (int i = 0; i < n; ++i) for (int j = 0; j < n; ++j) if (sparsity[i*n+j]) printf(" (%d,%d)", i, j); printf("\n");
2589 #endif
2590 */
2591 
2592  return SCIP_OKAY;
2593 }
2594 
2595 /** computes value and dense hessian of an expression tree
2596  *
2597  * The full hessian is computed (lower left and upper right triangle).
2598  */
2600  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2601  SCIP_EXPRTREE* tree, /**< expression tree */
2602  SCIP_Real* varvals, /**< values of variables, can be NULL if new_varvals is FALSE */
2603  SCIP_Bool new_varvals, /**< have variable values changed since last call to an evaluation routine? */
2604  SCIP_Real* val, /**< buffer to store function value */
2605  SCIP_Real* hessian /**< buffer to store hessian values, need to have size at least n*n */
2606  )
2607 {
2608  assert(exprint != NULL);
2609  assert(tree != NULL);
2610  assert(varvals != NULL || new_varvals == FALSE);
2611  assert(val != NULL);
2612  assert(hessian != NULL);
2613 
2615  assert(data != NULL);
2616 
2617  if( new_varvals )
2618  {
2619  SCIP_CALL( SCIPexprintEval(exprint, tree, varvals, val) );
2620  }
2621  else
2622  *val = data->val;
2623 
2624  int n = SCIPexprtreeGetNVars(tree);
2625 
2626  if( n == 0 )
2627  return SCIP_OKAY;
2628 
2629 #if 1
2630  /* this one uses reverse mode */
2631  vector<double> hess(data->f.Hessian(data->x, 0));
2632 
2633  int nn = n*n;
2634  for (int i = 0; i < nn; ++i)
2635  hessian[i] = hess[i];
2636 
2637 #else
2638  /* this one uses forward mode */
2639  for( int i = 0; i < n; ++i )
2640  for( int j = 0; j < n; ++j )
2641  {
2642  vector<int> ii(1,i);
2643  vector<int> jj(1,j);
2644  hessian[i*n+j] = data->f.ForTwo(data->x, ii, jj)[0];
2645  }
2646 #endif
2647 
2648 /* disable debug output since we have no message handler here
2649 #ifdef SCIP_DEBUG
2650  SCIPdebugMessage("HessianDense for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2651  SCIPdebugMessage("x ="); for (int i = 0; i < n; ++i) printf("\t %g", data->x[i]); printf("\n");
2652  SCIPdebugMessage("hess ="); for (int i = 0; i < n*n; ++i) printf("\t %g", hessian[i]); printf("\n");
2653 #endif
2654 */
2655  return SCIP_OKAY;
2656 }
void SCIPexprFreeDeep(BMS_BLKMEM *blkmem, SCIP_EXPR **expr)
Definition: expr.c:6183
SCIPInterval square(const SCIPInterval &x)
bool LessThanZero(const SCIPInterval &x)
static void evalMax(Type &resultant, const Type &arg1, const Type &arg2)
#define NULL
Definition: def.h:246
const char * SCIPexprintGetName(void)
bool IdenticalEqualPar(const SCIPInterval &x, const SCIPInterval &y)
static SCIP_RETCODE eval(SCIP_EXPR *expr, const vector< Type > &x, SCIP_Real *param, Type &val)
SCIP_RETCODE SCIPexprtreeEvalInt(SCIP_EXPRTREE *tree, SCIP_Real infinity, SCIP_INTERVAL *varvals, SCIP_INTERVAL *val)
Definition: expr.c:8739
#define BMSfreeMemoryArrayNull(ptr)
Definition: memory.h:137
methods to interpret (evaluate) an expression tree "fast"
int * SCIPexprGetMonomialChildIndices(SCIP_EXPRDATA_MONOMIAL *monomial)
Definition: expr.c:5920
static size_t thread_num(void)
static void evalUser(Type &resultant, const Type *args, SCIP_EXPR *expr)
SCIP_EXPROP SCIPexprGetOperator(SCIP_EXPR *expr)
Definition: expr.c:5693
#define infinity
Definition: gastrans.c:71
static void evalSignPower(Type &resultant, const Type &arg, SCIP_EXPR *expr)
SCIPInterval pow(const SCIPInterval &x, const SCIPInterval &y)
SCIP_RETCODE SCIPexprintGradInt(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real infinity, SCIP_INTERVAL *varvals, SCIP_Bool new_varvals, SCIP_INTERVAL *val, SCIP_INTERVAL *gradient)
SCIP_Real * SCIPexprtreeGetParamVals(SCIP_EXPRTREE *tree)
Definition: expr.c:8632
#define SCIP_EXPRINTCAPABILITY_INTGRADIENT
SCIP_Real SCIPexprGetRealPowerExponent(SCIP_EXPR *expr)
Definition: expr.c:5756
int SCIPexprGetOpIndex(SCIP_EXPR *expr)
Definition: expr.c:5723
SCIPInterval cos(const SCIPInterval &x)
SCIP_Real SCIPexprGetPolynomialConstant(SCIP_EXPR *expr)
Definition: expr.c:5888
#define FALSE
Definition: def.h:72
#define TRUE
Definition: def.h:71
enum SCIP_Retcode SCIP_RETCODE
Definition: type_retcode.h:53
SCIP_RETCODE SCIPexprintCompile(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree)
SCIPInterval exp(const SCIPInterval &x)
const char * SCIPexprintGetDesc(void)
static void evalAbs(Type &resultant, const Type &arg)
#define BMSallocMemoryArray(ptr, num)
Definition: memory.h:112
#define CPPAD_MAX_NUM_THREADS
#define SCIPdebugMessage
Definition: pub_message.h:77
static void analyzeTree(SCIP_EXPRINTDATA *data, SCIP_EXPR *expr)
SCIP_EXPRINTCAPABILITY SCIPexprintGetExprtreeCapability(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree)
unsigned int SCIP_EXPRINTCAPABILITY
static char init_parallel(void)
static void evalMin(Type &resultant, const Type &arg1, const Type &arg2)
SCIP_VAR ** x
Definition: circlepacking.c:54
static thread_local int thread_number
SCIPInterval abs(const SCIPInterval &x)
SCIP_EXPRDATA_MONOMIAL ** SCIPexprGetMonomials(SCIP_EXPR *expr)
Definition: expr.c:5864
public methods for expressions, expression trees, expression graphs, and related stuff ...
int SCIPexprGetMonomialNFactors(SCIP_EXPRDATA_MONOMIAL *monomial)
Definition: expr.c:5910
int SCIPexprGetIntPowerExponent(SCIP_EXPR *expr)
Definition: expr.c:5767
#define SCIP_EXPRINTCAPABILITY_INTFUNCVALUE
SCIPInterval CondExpOp(enum CppAD::CompareOp cop, const SCIPInterval &left, const SCIPInterval &right, const SCIPInterval &trueCase, const SCIPInterval &falseCase)
SCIPInterval signpow(const SCIPInterval &x, const SCIP_Real p)
SCIP_Real coef
Definition: type_expr.h:104
SCIP_Real inf
Definition: intervalarith.h:39
static bool univariate_for_sparse_jac(size_t q, const CppAD::vector< bool > &r, CppAD::vector< bool > &s)
bool IdenticalOne(const SCIPInterval &x)
static std::atomic_size_t ncurthreads
SCIP_RETCODE SCIPexprintEval(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Real *val)
SCIP_RETCODE SCIPexprintCreate(BMS_BLKMEM *blkmem, SCIP_EXPRINT **exprint)
SCIP_Real * SCIPexprGetQuadLinearCoefs(SCIP_EXPR *expr)
Definition: expr.c:5840
static void cppaderrorcallback(bool known, int line, const char *file, const char *cond, const char *msg)
SCIP_RETCODE SCIPexprCopyDeep(BMS_BLKMEM *blkmem, SCIP_EXPR **targetexpr, SCIP_EXPR *sourceexpr)
Definition: expr.c:6141
SCIPInterval sqrt(const SCIPInterval &x)
SCIPInterval sign(const SCIPInterval &x)
SCIP_Real SCIPexprGetQuadConstant(SCIP_EXPR *expr)
Definition: expr.c:5827
#define SIGN(x)
#define REALABS(x)
Definition: def.h:181
int SCIPexprtreeGetNVars(SCIP_EXPRTREE *tree)
Definition: expr.c:8612
#define SCIP_CALL(x)
Definition: def.h:358
SCIP_RETCODE SCIPexprEvalUser(SCIP_EXPR *expr, SCIP_Real *argvals, SCIP_Real *val, SCIP_Real *gradient, SCIP_Real *hessian)
Definition: expr.c:7969
SCIP_Real sup
Definition: intervalarith.h:40
SCIP_EXPRINTCAPABILITY SCIPexprintGetCapability(void)
SCIP_RETCODE exprEvalUser(SCIP_EXPR *expr, Type *x, Type &funcval, Type *gradient, Type *hessian)
SCIP_EXPR * SCIPexprtreeGetRoot(SCIP_EXPRTREE *tree)
Definition: expr.c:8602
static void evalSqrt(Type &resultant, const Type &arg)
#define BMSfreeBlockMemory(mem, ptr)
Definition: memory.h:453
SCIP_EXPRINTCAPABILITY SCIPexprGetUserEvalCapability(SCIP_EXPR *expr)
Definition: expr.c:5962
#define SCIP_EXPRINTCAPABILITY_ALL
#define SCIP_EXPRINTCAPABILITY_HESSIAN
SCIP_EXPR ** SCIPexprGetChildren(SCIP_EXPR *expr)
Definition: expr.c:5713
SCIP_Real SCIPexprGetSignPowerExponent(SCIP_EXPR *expr)
Definition: expr.c:5778
bool IdenticalZero(const SCIPInterval &x)
#define SCIP_Bool
Definition: def.h:69
SCIPInterval azmul(const SCIPInterval &x, const SCIPInterval &y)
SCIP_RETCODE SCIPexprintNewParametrization(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree)
SCIPInterval sin(const SCIPInterval &x)
void SCIPexprSortQuadElems(SCIP_EXPR *expr)
Definition: expr.c:6620
SCIP_RETCODE SCIPexprintEvalInt(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real infinity, SCIP_INTERVAL *varvals, SCIP_INTERVAL *val)
SCIP_EXPRINTDATA * SCIPexprtreeGetInterpreterData(SCIP_EXPRTREE *tree)
Definition: expr.c:8657
unsigned short Type
Definition: cons_xor.c:112
int SCIPexprGetNChildren(SCIP_EXPR *expr)
Definition: expr.c:5703
#define MIN(x, y)
Definition: def.h:216
static bool univariate_rev_sparse_jac(size_t q, const CppAD::vector< bool > &r, CppAD::vector< bool > &s)
SCIPInterval log(const SCIPInterval &x)
SCIP_RETCODE SCIPexprintHessianDense(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Bool new_varvals, SCIP_Real *val, SCIP_Real *hessian)
SCIP_Real SCIPexprGetMonomialCoef(SCIP_EXPRDATA_MONOMIAL *monomial)
Definition: expr.c:5900
static CppAD::ErrorHandler errorhandler(cppaderrorcallback)
SCIP_RETCODE SCIPexprintGrad(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Bool new_varvals, SCIP_Real *val, SCIP_Real *gradient)
static void evalIntPower(Type &resultant, const Type &arg, const int exponent)
bool GreaterThanZero(const SCIPInterval &x)
SCIP_RETCODE SCIPexprEvalIntUser(SCIP_EXPR *expr, SCIP_Real infinity, SCIP_INTERVAL *argvals, SCIP_INTERVAL *val, SCIP_INTERVAL *gradient, SCIP_INTERVAL *hessian)
Definition: expr.c:7992
SCIP_Real * r
Definition: circlepacking.c:50
bool IdenticalPar(const SCIPInterval &x)
#define MAX(x, y)
Definition: def.h:215
#define SCIP_DEFAULT_INFINITY
Definition: def.h:162
#define SCIP_EXPRINTCAPABILITY_FUNCVALUE
SCIP_Real * SCIPexprGetLinearCoefs(SCIP_EXPR *expr)
Definition: expr.c:5789
SCIP_Real SCIPexprGetOpReal(SCIP_EXPR *expr)
Definition: expr.c:5734
SCIP_QUADELEM * SCIPexprGetQuadElements(SCIP_EXPR *expr)
Definition: expr.c:5815
std::ostream & operator<<(std::ostream &out, const SCIP_INTERVAL &x)
int SCIPexprGetNMonomials(SCIP_EXPR *expr)
Definition: expr.c:5876
int Integer(const SCIPInterval &x)
#define SCIP_Real
Definition: def.h:157
static bool in_parallel(void)
SCIP_VAR ** y
Definition: circlepacking.c:55
SCIP_RETCODE SCIPexprintHessianSparsityDense(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Bool *sparsity)
SCIP_Real * SCIPexprGetMonomialExponents(SCIP_EXPRDATA_MONOMIAL *monomial)
Definition: expr.c:5930
bool LessThanOrZero(const SCIPInterval &x)
static bool univariate_rev_sparse_hes(const CppAD::vector< bool > &vx, const CppAD::vector< bool > &s, CppAD::vector< bool > &t, size_t q, const CppAD::vector< bool > &r, const CppAD::vector< bool > &u, CppAD::vector< bool > &v)
#define BMSallocBlockMemory(mem, ptr)
Definition: memory.h:440
SCIP_RETCODE SCIPexprintFreeData(SCIP_EXPRINTDATA **interpreterdata)
SCIP_RETCODE SCIPexprintFree(SCIP_EXPRINT **exprint)
common defines and data types used in all packages of SCIP
struct BMS_BlkMem BMS_BLKMEM
Definition: memory.h:426
SCIP_Real SCIPexprGetLinearConstant(SCIP_EXPR *expr)
Definition: expr.c:5802
void SCIPexprtreeSetInterpreterData(SCIP_EXPRTREE *tree, SCIP_EXPRINTDATA *interpreterdata)
Definition: expr.c:8667
struct SCIP_ExprIntData SCIP_EXPRINTDATA
int SCIPexprGetNQuadElements(SCIP_EXPR *expr)
Definition: expr.c:5852
bool GreaterThanOrZero(const SCIPInterval &x)
#define SCIP_EXPRINTCAPABILITY_GRADIENT
SCIP_RETCODE SCIPexprtreeEval(SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Real *val)
Definition: expr.c:8723
static void posintpower(const vector< Type > &in, vector< Type > &out, size_t exponent)
C++ extensions to interval arithmetics for provable bounds.
memory allocation routines