Scippy

SCIP

Solving Constraint Integer Programs

exprinterpret_cppad.cpp
Go to the documentation of this file.
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2 /* */
3 /* This file is part of the program and library */
4 /* SCIP --- Solving Constraint Integer Programs */
5 /* */
6 /* Copyright (C) 2002-2017 Konrad-Zuse-Zentrum */
7 /* fuer Informationstechnik Berlin */
8 /* */
9 /* SCIP is distributed under the terms of the ZIB Academic License. */
10 /* */
11 /* You should have received a copy of the ZIB Academic License */
12 /* along with SCIP; see the file COPYING. If not email to scip@zib.de. */
13 /* */
14 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
15 
16 /**@file exprinterpret_cppad.cpp
17  * @brief methods to interpret (evaluate) an expression tree "fast" using CppAD
18  * @ingroup EXPRINTS
19  * @author Stefan Vigerske
20  */
21 
22 /*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/
23 
24 #include "scip/def.h"
25 #include "blockmemshell/memory.h"
26 #include "nlpi/pub_expr.h"
27 #include "nlpi/exprinterpret.h"
28 
29 #include <cmath>
30 #include <vector>
31 using std::vector;
32 
33 /* Turn off lint warning "747: Significant prototype coercion" and "732: Loss of sign".
34  * The first warning is generated for expressions like t[0], where t is a vector, since 0 is an integer constant, but a
35  * size_t is expected (usually long unsigned). The second is generated for expressions like t[n], where n is an
36  * integer. Both code pieces are likely to be correct. It seems to be impossible to inhibit these messages for
37  * vector<*>::operator[] only. */
38 /*lint --e{747,732}*/
39 
40 /* Turn off lint info "1702 operator '...' is both an ordinary function 'CppAD::operator...' and a member function 'CppAD::SCIPInterval::operator...'.
41  * However, the functions have different signatures (the CppAD working on double, the SCIPInterval member
42  * function working on SCIPInterval's.
43  */
44 /*lint --e{1702}*/
45 
46 /* defining NO_CPPAD_USER_ATOMIC disables the use of our own implementation of derivaties of power operators
47  * via CppAD's user-atomic function feature
48  * our customized implementation should give better results (tighter intervals) for the interval data type
49  */
50 /* #define NO_CPPAD_USER_ATOMIC */
51 
52 /** sign of a value (-1 or +1)
53  *
54  * 0.0 has sign +1
55  */
56 #define SIGN(x) ((x) >= 0.0 ? 1.0 : -1.0)
57 
58 /* in order to use intervals as operands in CppAD,
59  * we need to include the intervalarithext.h very early and require the interval operations to be in the CppAD namespace */
60 #define SCIPInterval_NAMESPACE CppAD
61 #include "nlpi/intervalarithext.h"
62 
64 using CppAD::SCIPInterval;
65 
66 /* CppAD needs to know a fixed upper bound on the number of threads at compile time.
67  * It is wise to set it to a power of 2, so that if the tape id overflows, it is likely to start at 0 again, which avoids difficult to debug errors.
68  */
69 #ifndef CPPAD_MAX_NUM_THREADS
70 #ifndef NPARASCIP
71 #define CPPAD_MAX_NUM_THREADS 64
72 #else
73 #define CPPAD_MAX_NUM_THREADS 1
74 #endif
75 #endif
76 
77 #include <cppad/cppad.hpp>
78 #include <cppad/utility/error_handler.hpp>
79 
80 /* CppAD is not thread-safe by itself, but uses some static datastructures
81  * To run it in a multithreading environment, a special CppAD memory allocator that is aware of the multiple threads has to be used.
82  * This allocator requires to know the number of threads and a thread number for each thread.
83  * To implement this, we follow the team_pthread example of CppAD, which uses pthread's thread-specific data management.
84  */
85 #ifndef NPARASCIP
86 #include <pthread.h>
87 
88 /** mutex for locking in pthread case */
89 static pthread_mutex_t cppadmutex = PTHREAD_MUTEX_INITIALIZER;
90 
91 /** key for accessing thread specific information */
92 static pthread_key_t thread_specific_key;
93 
94 /** currently registered number of threads */
95 static size_t ncurthreads = 0;
96 
97 /** CppAD callback function that indicates whether we are running in parallel mode */
98 static
99 bool in_parallel(void)
100 {
101  return ncurthreads > 1;
102 }
103 
104 /** CppAD callback function that returns the number of the current thread
105  *
106  * assigns a new number to the thread if new
107  */
108 static
109 size_t thread_num(void)
110 {
111  size_t threadnum;
112  void* specific;
113 
114  specific = pthread_getspecific(thread_specific_key);
115 
116  /* if no data for this thread yet, then assign a new thread number to the current thread
117  * we store the thread number incremented by one, to distinguish the absence of data (=0) from existing data
118  */
119  if( specific == NULL )
120  {
121  pthread_mutex_lock(&cppadmutex);
122 
123  SCIPdebugMessage("Assigning thread number %lu to thread %p.\n", (long unsigned int)ncurthreads, (void*)pthread_self());
124 
125  pthread_setspecific(thread_specific_key, (void*)(ncurthreads + 1));
126 
127  threadnum = ncurthreads;
128 
129  ++ncurthreads;
130 
131  pthread_mutex_unlock(&cppadmutex);
132 
133  assert(pthread_getspecific(thread_specific_key) != NULL);
134  assert((size_t)pthread_getspecific(thread_specific_key) == threadnum + 1);
135  }
136  else
137  {
138  threadnum = (size_t)(specific) - 1;
139  }
140 
141  assert(threadnum < ncurthreads);
142 
143  return threadnum;
144 }
145 
146 /** sets up CppAD's datastructures for running in multithreading mode
147  *
148  * It must be called once before multithreading is started.
149  */
150 static
151 char init_parallel(void)
152 {
153  pthread_key_create(&thread_specific_key, NULL);
154 
155  CppAD::thread_alloc::parallel_setup(CPPAD_MAX_NUM_THREADS, in_parallel, thread_num);
156  CppAD::parallel_ad<double>();
157  CppAD::parallel_ad<SCIPInterval>();
158 
159  return 0;
160 }
161 
162 /** a dummy variable that is initialized to the result of init_parallel
163  *
164  * The purpose is to make sure that init_parallel() is called before any multithreading is started.
165  */
166 #if !defined(_MSC_VER)
167 __attribute__ ((unused))
168 #endif
169 static char init_parallel_return = init_parallel();
170 
171 #endif // NPARASCIP
172 
173 /** definition of CondExpOp for SCIPInterval (required by CppAD) */
174 inline
175 SCIPInterval CondExpOp(
176  enum CppAD::CompareOp cop,
177  const SCIPInterval& left,
178  const SCIPInterval& right,
179  const SCIPInterval& trueCase,
180  const SCIPInterval& falseCase)
181 { /*lint --e{715}*/
182  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
183  "SCIPInterval CondExpOp(...)",
184  "Error: cannot use CondExp with an interval type"
185  );
186 
187  return SCIPInterval();
188 }
189 
190 /** another function required by CppAD */
191 inline
193  const SCIPInterval& x /**< operand */
194  )
195 { /*lint --e{715}*/
196  return true;
197 }
198 
199 /** returns whether the interval equals [0,0] */
200 inline
202  const SCIPInterval& x /**< operand */
203  )
204 {
205  return (x == 0.0);
206 }
207 
208 /** returns whether the interval equals [1,1] */
209 inline
211  const SCIPInterval& x /**< operand */
212  )
213 {
214  return (x == 1.0);
215 }
216 
217 /** yet another function that checks whether two intervals are equal */
218 inline
220  const SCIPInterval& x, /**< first operand */
221  const SCIPInterval& y /**< second operand */
222  )
223 {
224  return (x == y);
225 }
226 
227 /** greater than zero not defined for intervals */
228 inline
230  const SCIPInterval& x /**< operand */
231  )
232 { /*lint --e{715}*/
233  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
234  "GreaterThanZero(x)",
235  "Error: cannot use GreaterThanZero with interval"
236  );
237 
238  return false;
239 }
240 
241 /** greater than or equal zero not defined for intervals */
242 inline
244  const SCIPInterval& x /**< operand */
245  )
246 { /*lint --e{715}*/
247  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__ ,
248  "GreaterThanOrZero(x)",
249  "Error: cannot use GreaterThanOrZero with interval"
250  );
251 
252  return false;
253 }
254 
255 /** less than not defined for intervals */
256 inline
258  const SCIPInterval& x /**< operand */
259  )
260 { /*lint --e{715}*/
261  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
262  "LessThanZero(x)",
263  "Error: cannot use LessThanZero with interval"
264  );
265 
266  return false;
267 }
268 
269 /** less than or equal not defined for intervals */
270 inline
272  const SCIPInterval& x /**< operand */
273  )
274 { /*lint --e{715}*/
275  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
276  "LessThanOrZero(x)",
277  "Error: cannot use LessThanOrZero with interval"
278  );
279 
280  return false;
281 }
282 
283 /** conversion to integers not defined for intervals */
284 inline
286  const SCIPInterval& x /**< operand */
287  )
288 { /*lint --e{715}*/
289  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
290  "Integer(x)",
291  "Error: cannot use Integer with interval"
292  );
293 
294  return 0;
295 }
296 
297 /** absolute zero multiplication
298  *
299  * @return [0,0] if first argument is [0,0] independent of whether the second argument is an empty interval or not
300  */
301 inline
302 SCIPInterval azmul(
303  const SCIPInterval& x, /**< first operand */
304  const SCIPInterval& y /**< second operand */
305  )
306 {
307  if( x.inf == 0.0 && x.sup == 0.0 )
308  return SCIPInterval(0.0, 0.0);
309  return x * y;
310 }
311 
312 /** printing of an interval (required by CppAD) */
313 inline
314 std::ostream& operator<<(std::ostream& out, const SCIP_INTERVAL& x)
315 {
316  out << '[' << x.inf << ',' << x.sup << ']';
317  return out;
318 }
319 
320 using CppAD::AD;
321 
322 /** expression interpreter */
324 {
325  BMS_BLKMEM* blkmem; /**< block memory data structure */
326 };
327 
328 /** expression specific interpreter data */
329 struct SCIP_ExprIntData
330 {
331 public:
332  /** constructor */
333  SCIP_ExprIntData()
334  : val(0.0), need_retape(true), int_need_retape(true), need_retape_always(false), userevalcapability(SCIP_EXPRINTCAPABILITY_ALL), blkmem(NULL), root(NULL)
335  { }
336 
337  /** destructor */
338  ~SCIP_ExprIntData()
339  { }/*lint --e{1540}*/
340 
341  vector< AD<double> > X; /**< vector of dependent variables */
342  vector< AD<double> > Y; /**< result vector */
343  CppAD::ADFun<double> f; /**< the function to evaluate as CppAD object */
344 
345  vector<double> x; /**< current values of dependent variables */
346  double val; /**< current function value */
347  bool need_retape; /**< will retaping be required for the next point evaluation? */
348 
349  vector< AD<SCIPInterval> > int_X; /**< interval vector of dependent variables */
350  vector< AD<SCIPInterval> > int_Y; /**< interval result vector */
351  CppAD::ADFun<SCIPInterval> int_f; /**< the function to evaluate on intervals as CppAD object */
352 
353  vector<SCIPInterval> int_x; /**< current interval values of dependent variables */
354  SCIPInterval int_val; /**< current interval function value */
355  bool int_need_retape; /**< will retaping be required for the next interval evaluation? */
356 
357  bool need_retape_always; /**< will retaping be always required? */
358  SCIP_EXPRINTCAPABILITY userevalcapability; /**< (intersection of) capabilities of evaluation rountines of user expressions */
359 
360  BMS_BLKMEM* blkmem; /**< block memory used to allocate expresstion tree */
361  SCIP_EXPR* root; /**< copy of expression tree; @todo we should not need to make a copy */
362 };
363 
364 #ifndef NO_CPPAD_USER_ATOMIC
365 
366 /** computes sparsity of jacobian for a univariate function during a forward sweep
367  *
368  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
369  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
370  */
371 static
373  size_t q, /**< number of columns in R */
374  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
375  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
376  )
377 {
378  assert(r.size() == q);
379  assert(s.size() == q);
380 
381  s = r;
382 
383  return true;
384 }
385 
386 /** Computes sparsity of jacobian during a reverse sweep
387  *
388  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
389  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
390  */
391 static
393  size_t q, /**< number of rows in R */
394  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
395  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
396  )
397 {
398  assert(r.size() == q);
399  assert(s.size() == q);
400 
401  s = r;
402 
403  return true;
404 }
405 
406 /** computes sparsity of hessian during a reverse sweep
407  *
408  * Assume V(x) = (g(f(x)))'' R with f(x) = x^p for a function g:R->R and a matrix R.
409  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
410  */
411 static
413  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
414  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
415  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
416  size_t q, /**< number of columns in R, U, and V */
417  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
418  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
419  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
420  )
421 { /*lint --e{439,715}*/ /* @todo take vx into account */
422  assert(r.size() == q);
423  assert(s.size() == 1);
424  assert(t.size() == 1);
425  assert(u.size() == q);
426  assert(v.size() == q);
427 
428  // T(x) = g'(f(x)) * f'(x) = S * f'(x), and f' is not identically 0
429  t[0] = s[0];
430 
431  // V(x) = g''(f(x)) f'(x) f'(x) R + g'(f(x)) f''(x) R466
432  // = f'(x) U + S f''(x) R, with f'(x) and f''(x) not identically 0
433  v = u;
434  if( s[0] )
435  for( size_t j = 0; j < q; ++j )
436  if( r[j] )
437  v[j] = true;
438 
439  return true;
440 }
441 
442 
443 /** Automatic differentiation of x -> x^p, p>=2 integer, as CppAD user-atomic function.
444  *
445  * This class implements forward and reverse operations for the function x -> x^p for use within CppAD.
446  * While CppAD would implement integer powers as a recursion of multiplications, we still use pow functions as they allow us to avoid overestimation in interval arithmetics.
447  *
448  * @todo treat the exponent as a (variable) argument to the function, with the assumption that we never differentiate w.r.t. it (this should make the approach threadsafe again)
449  */
450 template<class Type>
451 class atomic_posintpower : public CppAD::atomic_base<Type>
452 {
453 public:
455  : CppAD::atomic_base<Type>("posintpower"),
456  exponent(0)
457  {
458  /* indicate that we want to use bool-based sparsity pattern */
459  this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
460  }
461 
462 private:
463  /** exponent value for next call to forward or reverse */
464  int exponent;
465 
466  /** stores exponent value corresponding to next call to forward or reverse
467  *
468  * how is this supposed to be threadsafe? (we use only one global instantiation of this class)
469  */
470  virtual void set_id(size_t id)
471  {
472  exponent = (int) id;
473  }
474 
475  /** forward sweep of positive integer power
476  *
477  * Given the taylor coefficients for x, we have to compute the taylor coefficients for f(x),
478  * that is, given tx = (x, x', x'', ...), we compute the coefficients ty = (y, y', y'', ...)
479  * in the taylor expansion of f(x) = x^p.
480  * Thus, y = x^p
481  * = tx[0]^p,
482  * y' = p * x^(p-1) * x'
483  * = p * tx[0]^(p-1) * tx[1],
484  * y'' = 1/2 * p * (p-1) * x^(p-2) * x'^2 + p * x^(p-1) * x''
485  * = 1/2 * p * (p-1) * tx[0]^(p-2) * tx[1]^2 + p * tx[0]^(p-1) * tx[2]
486  */
487  bool forward(
488  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
489  size_t p, /**< highest order Taylor coefficient that we are evaluating */
490  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
491  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
492  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
493  CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
494  )
495  {
496  assert(exponent > 1);
497  assert(tx.size() >= p+1);
498  assert(ty.size() >= p+1);
499  assert(q <= p);
500 
501  if( vx.size() > 0 )
502  {
503  assert(vx.size() == 1);
504  assert(vy.size() == 1);
505  assert(p == 0);
506 
507  vy[0] = vx[0];
508  }
509 
510  if( q == 0 /* q <= 0 && 0 <= p */ )
511  {
512  ty[0] = CppAD::pow(tx[0], exponent);
513  }
514 
515  if( q <= 1 && 1 <= p )
516  {
517  ty[1] = CppAD::pow(tx[0], exponent-1) * tx[1];
518  ty[1] *= double(exponent);
519  }
520 
521  if( q <= 2 && 2 <= p )
522  {
523  if( exponent > 2 )
524  {
525  // ty[2] = 1/2 * exponent * (exponent-1) * pow(tx[0], exponent-2) * tx[1] * tx[1] + exponent * pow(tx[0], exponent-1) * tx[2];
526  ty[2] = CppAD::pow(tx[0], exponent-2) * tx[1] * tx[1];
527  ty[2] *= (exponent-1) / 2.0;
528  ty[2] += CppAD::pow(tx[0], exponent-1) * tx[2];
529  ty[2] *= exponent;
530  }
531  else
532  {
533  assert(exponent == 2);
534  // ty[2] = 1/2 * exponent * tx[1] * tx[1] + exponent * tx[0] * tx[2];
535  ty[2] = tx[1] * tx[1] + 2.0 * tx[0] * tx[2];
536  }
537  }
538 
539  /* higher order derivatives not implemented */
540  if( p > 2 )
541  return false;
542 
543  return true;
544  }
545 
546  /** reverse sweep of positive integer power
547  *
548  * Assume y(x) is a function of the taylor coefficients of f(x) = x^p for x, i.e.,
549  * y(x) = [ x^p, p * x^(p-1) * x', p * (p-1) * x^(p-2) * x'^2 + p * x^(p-1) * x'', ... ].
550  * Then in the reverse sweep we have to compute the elements of \f$\partial h / \partial x^[l], l = 0, ..., k,\f$
551  * where x^[l] is the l'th taylor coefficient (x, x', x'', ...) and h(x) = g(y(x)) for some function g:R^k -> R.
552  * That is, we have to compute
553  *\f$
554  * px[l] = \partial h / \partial x^[l] = (\partial g / \partial y) * (\partial y / \partial x^[l])
555  * = \sum_{i=0}^k (\partial g / \partial y_i) * (\partial y_i / \partial x^[l])
556  * = \sum_{i=0}^k py[i] * (\partial y_i / \partial x^[l])
557  * \f$
558  *
559  * For k = 0, this means
560  *\f$
561  * px[0] = py[0] * (\partial y_0 / \partial x^[0])
562  * = py[0] * (\partial x^p / \partial x)
563  * = py[0] * p * tx[0]^(p-1)
564  *\f$
565  *
566  * For k = 1, this means
567  * \f$
568  * px[0] = py[0] * (\partial y_0 / \partial x^[0]) + py[1] * (\partial y_1 / \partial x^[0])
569  * = py[0] * (\partial x^p / \partial x) + py[1] * (\partial (p * x^(p-1) * x') / \partial x)
570  * = py[0] * p * tx[0]^(p-1) + py[1] * p * (p-1) * tx[0]^(p-2) * tx[1]
571  * px[1] = py[0] * (\partial y_0 / \partial x^[1]) + py[1] * (\partial y_1 / \partial x^[1])
572  * = py[0] * (\partial x^p / \partial x') + py[1] * (\partial (p * x^(p-1) x') / \partial x')
573  * = py[0] * 0 + py[1] * p * tx[0]^(p-1)
574  * \f$
575  */
576  bool reverse(
577  size_t p, /**< highest order Taylor coefficient that we are evaluating */
578  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
579  const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
580  CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
581  const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
582  )
583  { /*lint --e{715}*/
584  assert(exponent > 1);
585  assert(px.size() >= p+1);
586  assert(py.size() >= p+1);
587  assert(tx.size() >= p+1);
588 
589  switch( p )
590  {
591  case 0:
592  // px[0] = py[0] * exponent * pow(tx[0], exponent-1);
593  px[0] = py[0] * CppAD::pow(tx[0], exponent-1);
594  px[0] *= exponent;
595  break;
596 
597  case 1:
598  // px[0] = py[0] * exponent * pow(tx[0], exponent-1) + py[1] * exponent * (exponent-1) * pow(tx[0], exponent-2) * tx[1];
599  px[0] = py[1] * tx[1] * CppAD::pow(tx[0], exponent-2);
600  px[0] *= exponent-1;
601  px[0] += py[0] * CppAD::pow(tx[0], exponent-1);
602  px[0] *= exponent;
603  // px[1] = py[1] * exponent * pow(tx[0], exponent-1);
604  px[1] = py[1] * CppAD::pow(tx[0], exponent-1);
605  px[1] *= exponent;
606  break;
607 
608  default:
609  return false;
610  }
611 
612  return true;
613  }
614 
615  using CppAD::atomic_base<Type>::for_sparse_jac;
616 
617  /** computes sparsity of jacobian during a forward sweep
618  *
619  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
620  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
621  */
622  bool for_sparse_jac(
623  size_t q, /**< number of columns in R */
624  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
625  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
626  )
627  {
628  return univariate_for_sparse_jac(q, r, s);
629  }
630 
631  using CppAD::atomic_base<Type>::rev_sparse_jac;
632 
633  /** computes sparsity of jacobian during a reverse sweep
634  *
635  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
636  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
637  */
638  bool rev_sparse_jac(
639  size_t q, /**< number of rows in R */
640  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
641  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
642  )
643  {
644  return univariate_rev_sparse_jac(q, r, s);
645  }
646 
647  using CppAD::atomic_base<Type>::rev_sparse_hes;
648 
649  /** computes sparsity of hessian during a reverse sweep
650  *
651  * Assume V(x) = (g(f(x)))'' R with f(x) = x^p for a function g:R->R and a matrix R.
652  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
653  */
654  bool rev_sparse_hes(
655  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
656  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
657  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
658  size_t q, /**< number of columns in R, U, and V */
659  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
660  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
661  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
662  )
663  {
664  return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
665  }
666 };
667 
668 /** power function with natural exponents */
669 template<class Type>
670 static
672  const vector<Type>& in, /**< vector which first argument is base */
673  vector<Type>& out, /**< vector where to store result in first argument */
674  size_t exponent /**< exponent */
675  )
676 {
678  pip(in, out, exponent);
679 }
680 
681 #else
682 
683 /** power function with natural exponents */
684 template<class Type>
685 void posintpower(
686  const vector<Type>& in, /**< vector which first argument is base */
687  vector<Type>& out, /**< vector where to store result in first argument */
688  size_t exponent /**< exponent */
689  )
690 {
691  out[0] = pow(in[0], (int)exponent);
692 }
693 
694 #endif
695 
696 
697 #ifndef NO_CPPAD_USER_ATOMIC
698 
699 /** Automatic differentiation of x -> sign(x)abs(x)^p, p>=1, as CppAD user-atomic function.
700  *
701  * This class implements forward and reverse operations for the function x -> sign(x)abs(x)^p for use within CppAD.
702  * While we otherwise would have to use discontinuous sign and abs functions, our own implementation allows to provide
703  * a continuously differentiable function.
704  *
705  * @todo treat the exponent as a (variable) argument to the function, with the assumption that we never differentiate w.r.t. it (this should make the approach threadsafe again)
706  */
707 template<class Type>
708 class atomic_signpower : public CppAD::atomic_base<Type>
709 {
710 public:
712  : CppAD::atomic_base<Type>("signpower"),
713  exponent(0.0)
714  {
715  /* indicate that we want to use bool-based sparsity pattern */
716  this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
717  }
718 
719 private:
720  /** exponent for use in next call to forward or reverse */
721  SCIP_Real exponent;
722 
723  /** stores exponent corresponding to next call to forward or reverse
724  *
725  * How is this supposed to be threadsafe? (we use only one global instantiation of this class)
726  */
727  virtual void set_id(size_t id)
728  {
729  exponent = SCIPexprGetSignPowerExponent((SCIP_EXPR*)(void*)id);
730  }
731 
732  /** forward sweep of signpower
733  *
734  * Given the taylor coefficients for x, we have to compute the taylor coefficients for f(x),
735  * that is, given tx = (x, x', x'', ...), we compute the coefficients ty = (y, y', y'', ...)
736  * in the taylor expansion of f(x) = sign(x)abs(x)^p.
737  * Thus, y = sign(x)abs(x)^p
738  * = sign(tx[0])abs(tx[0])^p,
739  * y' = p * abs(x)^(p-1) * x'
740  * = p * abs(tx[0])^(p-1) * tx[1],
741  * y'' = 1/2 * p * (p-1) * sign(x) * abs(x)^(p-2) * x'^2 + p * abs(x)^(p-1) * x''
742  * = 1/2 * p * (p-1) * sign(tx[0]) * abs(tx[0])^(p-2) * tx[1]^2 + p * abs(tx[0])^(p-1) * tx[2]
743  */
744  bool forward(
745  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
746  size_t p, /**< highest order Taylor coefficient that we are evaluating */
747  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
748  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
749  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
750  CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
751  )
752  {
753  assert(exponent > 0.0);
754  assert(tx.size() >= p+1);
755  assert(ty.size() >= p+1);
756  assert(q <= p);
757 
758  if( vx.size() > 0 )
759  {
760  assert(vx.size() == 1);
761  assert(vy.size() == 1);
762  assert(p == 0);
763 
764  vy[0] = vx[0];
765  }
766 
767  if( q == 0 /* q <= 0 && 0 <= p */ )
768  {
769  ty[0] = SIGN(tx[0]) * pow(REALABS(tx[0]), exponent);
770  }
771 
772  if( q <= 1 && 1 <= p )
773  {
774  ty[1] = pow(REALABS(tx[0]), exponent - 1.0) * tx[1];
775  ty[1] *= exponent;
776  }
777 
778  if( q <= 2 && 2 <= p )
779  {
780  if( exponent != 2.0 )
781  {
782  ty[2] = SIGN(tx[0]) * pow(REALABS(tx[0]), exponent - 2.0) * tx[1] * tx[1];
783  ty[2] *= (exponent - 1.0) / 2.0;
784  ty[2] += pow(REALABS(tx[0]), exponent - 1.0) * tx[2];
785  ty[2] *= exponent;
786  }
787  else
788  {
789  // y'' = 2 (1/2 * sign(x) * x'^2 + |x|*x'') = sign(tx[0]) * tx[1]^2 + 2 * abs(tx[0]) * tx[2]
790  ty[2] = SIGN(tx[0]) * tx[1] * tx[1];
791  ty[2] += 2.0 * REALABS(tx[0]) * tx[2];
792  }
793  }
794 
795  /* higher order derivatives not implemented */
796  if( p > 2 )
797  return false;
798 
799  return true;
800  }
801 
802  /** reverse sweep of signpower
803  *
804  * Assume y(x) is a function of the taylor coefficients of f(x) = sign(x)|x|^p for x, i.e.,
805  * y(x) = [ f(x), f'(x), f''(x), ... ].
806  * Then in the reverse sweep we have to compute the elements of \f$\partial h / \partial x^[l], l = 0, ..., k,\f$
807  * where x^[l] is the l'th taylor coefficient (x, x', x'', ...) and h(x) = g(y(x)) for some function g:R^k -> R.
808  * That is, we have to compute
809  *\f$
810  * px[l] = \partial h / \partial x^[l] = (\partial g / \partial y) * (\partial y / \partial x^[l])
811  * = \sum_{i=0}^k (\partial g / \partial y_i) * (\partial y_i / \partial x^[l])
812  * = \sum_{i=0}^k py[i] * (\partial y_i / \partial x^[l])
813  *\f$
814  *
815  * For k = 0, this means
816  *\f$
817  * px[0] = py[0] * (\partial y_0 / \partial x^[0])
818  * = py[0] * (\partial f(x) / \partial x)
819  * = py[0] * p * abs(tx[0])^(p-1)
820  * \f$
821  *
822  * For k = 1, this means
823  *\f$
824  * px[0] = py[0] * (\partial y_0 / \partial x^[0]) + py[1] * (\partial y_1 / \partial x^[0])
825  * = py[0] * (\partial f(x) / \partial x) + py[1] * (\partial f'(x) / \partial x)
826  * = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
827  * px[1] = py[0] * (\partial y_0 / \partial x^[1]) + py[1] * (\partial y_1 / \partial x^[1])
828  * = py[0] * (\partial f(x) / \partial x') + py[1] * (\partial f'(x) / \partial x')
829  * = py[0] * 0 + py[1] * p * abs(tx[0])^(p-1)
830  * \f$
831  */
832  bool reverse(
833  size_t p, /**< highest order Taylor coefficient that we are evaluating */
834  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
835  const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
836  CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
837  const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
838  )
839  { /*lint --e{715}*/
840  assert(exponent > 1);
841  assert(px.size() >= p+1);
842  assert(py.size() >= p+1);
843  assert(tx.size() >= p+1);
844 
845  switch( p )
846  {
847  case 0:
848  // px[0] = py[0] * p * pow(abs(tx[0]), p-1);
849  px[0] = py[0] * pow(REALABS(tx[0]), exponent - 1.0);
850  px[0] *= p;
851  break;
852 
853  case 1:
854  if( exponent != 2.0 )
855  {
856  // px[0] = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
857  px[0] = py[1] * tx[1] * pow(REALABS(tx[0]), exponent - 2.0) * SIGN(tx[0]);
858  px[0] *= exponent - 1.0;
859  px[0] += py[0] * pow(REALABS(tx[0]), exponent - 1.0);
860  px[0] *= exponent;
861  // px[1] = py[1] * p * abs(tx[0])^(p-1)
862  px[1] = py[1] * pow(REALABS(tx[0]), exponent - 1.0);
863  px[1] *= exponent;
864  }
865  else
866  {
867  // px[0] = py[0] * 2.0 * abs(tx[0]) + py[1] * 2.0 * sign(tx[0]) * tx[1]
868  px[0] = py[1] * tx[1] * SIGN(tx[0]);
869  px[0] += py[0] * REALABS(tx[0]);
870  px[0] *= 2.0;
871  // px[1] = py[1] * 2.0 * abs(tx[0])
872  px[1] = py[1] * REALABS(tx[0]);
873  px[1] *= 2.0;
874  }
875  break;
876 
877  default:
878  return false;
879  }
880 
881  return true;
882  }
883 
884  using CppAD::atomic_base<Type>::for_sparse_jac;
885 
886  /** computes sparsity of jacobian during a forward sweep
887  *
888  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
889  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
890  */
891  bool for_sparse_jac(
892  size_t q, /**< number of columns in R */
893  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
894  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
895  )
896  {
897  return univariate_for_sparse_jac(q, r, s);
898  }
899 
900  using CppAD::atomic_base<Type>::rev_sparse_jac;
901 
902  /** computes sparsity of jacobian during a reverse sweep
903  *
904  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
905  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
906  */
907  bool rev_sparse_jac(
908  size_t q, /**< number of rows in R */
909  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
910  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
911  )
912  {
913  return univariate_rev_sparse_jac(q, r, s);
914  }
915 
916  using CppAD::atomic_base<Type>::rev_sparse_hes;
917 
918  /** computes sparsity of hessian during a reverse sweep
919  *
920  * Assume V(x) = (g(f(x)))'' R with f(x) = sign(x)abs(x)^p for a function g:R->R and a matrix R.
921  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
922  */
923  bool rev_sparse_hes(
924  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
925  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
926  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
927  size_t q, /**< number of columns in S and R */
928  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
929  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
930  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
931  )
932  {
933  return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
934  }
935 
936 };
937 
938 /** Specialization of atomic_signpower template for intervals */
939 template<>
940 class atomic_signpower<SCIPInterval> : public CppAD::atomic_base<SCIPInterval>
941 {
942 public:
944  : CppAD::atomic_base<SCIPInterval>("signpowerint"),
945  exponent(0.0)
946  {
947  /* indicate that we want to use bool-based sparsity pattern */
948  this->option(CppAD::atomic_base<SCIPInterval>::bool_sparsity_enum);
949  }
950 
951 private:
952  /** exponent for use in next call to forward or reverse */
953  SCIP_Real exponent;
954 
955  /** stores exponent corresponding to next call to forward or reverse
956  *
957  * How is this supposed to be threadsafe? (we use only one global instantiation of this class)
958  */
959  virtual void set_id(size_t id)
960  {
961  exponent = SCIPexprGetSignPowerExponent((SCIP_EXPR*)(void*)id);
962  }
963 
964  /** specialization of atomic_signpower::forward template for SCIPinterval
965  *
966  * @todo try to compute tighter resultants
967  */
968  bool forward(
969  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
970  size_t p, /**< highest order Taylor coefficient that we are evaluating */
971  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
972  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
973  const CppAD::vector<SCIPInterval>& tx, /**< values for taylor coefficients of x */
974  CppAD::vector<SCIPInterval>& ty /**< vector to store taylor coefficients of y */
975  )
976  {
977  assert(exponent > 0.0);
978  assert(tx.size() >= p+1);
979  assert(ty.size() >= p+1);
980  assert(q <= p);
981 
982  if( vx.size() > 0 )
983  {
984  assert(vx.size() == 1);
985  assert(vy.size() == 1);
986  assert(p == 0);
987 
988  vy[0] = vx[0];
989  }
990 
991  if( q == 0 /* q <= 0 && 0 <= p */ )
992  {
993  ty[0] = CppAD::signpow(tx[0], exponent);
994  }
995 
996  if( q <= 1 && 1 <= p )
997  {
998  ty[1] = CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0) * tx[1];
999  ty[1] *= p;
1000  }
1001 
1002  if( q <= 2 && 2 <= p )
1003  {
1004  if( exponent != 2.0 )
1005  {
1006  ty[2] = CppAD::signpow(tx[0], exponent - 2.0) * CppAD::square(tx[1]);
1007  ty[2] *= (exponent - 1.0) / 2.0;
1008  ty[2] += CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0) * tx[2];
1009  ty[2] *= exponent;
1010  }
1011  else
1012  {
1013  // y'' = 2 (1/2 * sign(x) * x'^2 + |x|*x'') = sign(tx[0]) * tx[1]^2 + 2 * abs(tx[0]) * tx[2]
1014  ty[2] = CppAD::sign(tx[0]) * CppAD::square(tx[1]);
1015  ty[2] += 2.0 * CppAD::abs(tx[0]) * tx[2];
1016  }
1017  }
1018 
1019  /* higher order derivatives not implemented */
1020  if( p > 2 )
1021  return false;
1022 
1023  return true;
1024  }
1025 
1026  /** specialization of atomic_signpower::reverse template for SCIPinterval
1027  *
1028  * @todo try to compute tighter resultants
1029  */
1030  bool reverse(
1031  size_t p, /**< highest order Taylor coefficient that we are evaluating */
1032  const CppAD::vector<SCIPInterval>& tx, /**< values for taylor coefficients of x */
1033  const CppAD::vector<SCIPInterval>& ty, /**< values for taylor coefficients of y */
1034  CppAD::vector<SCIPInterval>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
1035  const CppAD::vector<SCIPInterval>& py /**< values for partial derivatives of g(x) w.r.t. y */
1036  )
1037  { /*lint --e{715} */
1038  assert(exponent > 1);
1039  assert(px.size() >= p+1);
1040  assert(py.size() >= p+1);
1041  assert(tx.size() >= p+1);
1042 
1043  switch( p )
1044  {
1045  case 0:
1046  // px[0] = py[0] * p * pow(abs(tx[0]), p-1);
1047  px[0] = py[0] * CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0);
1048  px[0] *= exponent;
1049  break;
1050 
1051  case 1:
1052  if( exponent != 2.0 )
1053  {
1054  // px[0] = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
1055  px[0] = py[1] * tx[1] * CppAD::signpow(tx[0], exponent - 2.0);
1056  px[0] *= exponent - 1.0;
1057  px[0] += py[0] * CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0);
1058  px[0] *= exponent;
1059  // px[1] = py[1] * p * abs(tx[0])^(p-1)
1060  px[1] = py[1] * CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0);
1061  px[1] *= exponent;
1062  }
1063  else
1064  {
1065  // px[0] = py[0] * 2.0 * abs(tx[0]) + py[1] * 2.0 * sign(tx[0]) * tx[1]
1066  px[0] = py[1] * tx[1] * CppAD::sign(tx[0]);
1067  px[0] += py[0] * CppAD::abs(tx[0]);
1068  px[0] *= 2.0;
1069  // px[1] = py[1] * 2.0 * abs(tx[0])
1070  px[1] = py[1] * CppAD::abs(tx[0]);
1071  px[1] *= 2.0;
1072  }
1073  break;
1074 
1075  default:
1076  return false;
1077  }
1078 
1079  return true;
1080  }
1081 
1082  using CppAD::atomic_base<SCIPInterval>::for_sparse_jac;
1083 
1084  /** computes sparsity of jacobian during a forward sweep
1085  *
1086  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
1087  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
1088  */
1089  bool for_sparse_jac(
1090  size_t q, /**< number of columns in R */
1091  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
1092  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
1093  )
1094  {
1095  return univariate_for_sparse_jac(q, r, s);
1096  }
1097 
1098  using CppAD::atomic_base<SCIPInterval>::rev_sparse_jac;
1099 
1100  /** computes sparsity of jacobian during a reverse sweep
1101  *
1102  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
1103  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
1104  */
1105  bool rev_sparse_jac(
1106  size_t q, /**< number of rows in R */
1107  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
1108  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
1109  )
1110  {
1111  return univariate_rev_sparse_jac(q, r, s);
1112  }
1113 
1114  using CppAD::atomic_base<SCIPInterval>::rev_sparse_hes;
1115 
1116  /** computes sparsity of hessian during a reverse sweep
1117  *
1118  * Assume V(x) = (g(f(x)))'' R with f(x) = sign(x)abs(x)^p for a function g:R->R and a matrix R.
1119  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
1120  */
1121  bool rev_sparse_hes(
1122  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
1123  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
1124  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
1125  size_t q, /**< number of columns in S and R */
1126  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
1127  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
1128  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
1129  )
1130  {
1131  return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
1132  }
1133 };
1134 
1135 /** template for evaluation for signpower operator */
1136 template<class Type>
1137 static
1139  Type& resultant, /**< resultant */
1140  const Type& arg, /**< operand */
1141  SCIP_EXPR* expr /**< expression that holds the exponent */
1142  )
1143 {
1144  vector<Type> in(1, arg);
1145  vector<Type> out(1);
1146 
1148  sp(in, out, (size_t)(void*)expr);
1149 
1150  resultant = out[0];
1151  return;
1152 }
1153 
1154 #else
1155 
1156 /** template for evaluation for signpower operator
1157  *
1158  * Only implemented for real numbers, thus gives error by default.
1159  */
1160 template<class Type>
1161 static
1162 void evalSignPower(
1163  Type& resultant, /**< resultant */
1164  const Type& arg, /**< operand */
1165  SCIP_EXPR* expr /**< expression that holds the exponent */
1166  )
1167 { /*lint --e{715}*/
1168  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1169  "evalSignPower()",
1170  "Error: SignPower not implemented for this value type"
1171  );
1172 }
1173 
1174 /** specialization of signpower evaluation for real numbers */
1175 template<>
1176 void evalSignPower(
1177  CppAD::AD<double>& resultant, /**< resultant */
1178  const CppAD::AD<double>& arg, /**< operand */
1179  SCIP_EXPR* expr /**< expression that holds the exponent */
1180  )
1181 {
1182  SCIP_Real exponent;
1183 
1184  exponent = SCIPexprGetSignPowerExponent(expr);
1185 
1186  if( arg == 0.0 )
1187  resultant = 0.0;
1188  else if( arg > 0.0 )
1189  resultant = pow( arg, exponent);
1190  else
1191  resultant = -pow(-arg, exponent);
1192 }
1193 
1194 #endif
1195 
1196 
1197 #ifndef NO_CPPAD_USER_ATOMIC
1198 
1199 template<class Type>
1201  SCIP_EXPR* expr,
1202  Type* x,
1203  Type& funcval,
1204  Type* gradient,
1205  Type* hessian
1206  )
1207 {
1208  return SCIPexprEvalUser(expr, x, &funcval, gradient, hessian); /*lint !e429*/
1209 }
1210 
1211 template<>
1213  SCIP_EXPR* expr,
1214  SCIPInterval* x,
1215  SCIPInterval& funcval,
1216  SCIPInterval* gradient,
1217  SCIPInterval* hessian
1218  )
1219 {
1220  return SCIPexprEvalIntUser(expr, SCIPInterval::infinity, x, &funcval, gradient, hessian);
1221 }
1222 
1223 /** Automatic differentiation of user expression as CppAD user-atomic function.
1224  *
1225  * This class implements forward and reverse operations for a function given by a user expression for use within CppAD.
1226  */
1227 template<class Type>
1228 class atomic_userexpr : public CppAD::atomic_base<Type>
1229 {
1230 public:
1232  : CppAD::atomic_base<Type>("userexpr"),
1233  expr(NULL)
1234  {
1235  /* indicate that we want to use bool-based sparsity pattern */
1236  this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
1237  }
1238 
1239 private:
1240  /** user expression */
1241  SCIP_EXPR* expr;
1242 
1243  /** stores user expression corresponding to next call to forward or reverse
1244  *
1245  * how is this supposed to be threadsafe? (we use only one global instantiation of this class)
1246  */
1247  virtual void set_id(size_t id)
1248  {
1249  expr = (SCIP_EXPR*)(void*)id;
1250  assert(SCIPexprGetOperator(expr) == SCIP_EXPR_USER);
1251  }
1252 
1253  /** forward sweep of userexpr
1254  *
1255  * We follow http://www.coin-or.org/CppAD/Doc/atomic_forward.xml
1256  * Note, that p and q are interchanged!
1257  *
1258  * For a scalar variable t, let
1259  * Y(t) = f(X(t))
1260  * X(t) = x^0 + x^1 t^1 + ... + x^p t^p
1261  * where for x^i the i an index, while for t^i the i is an exponent.
1262  * Thus, x^k = 1/k! X^(k) (0), where X^(k)(.) denotes the k-th derivative.
1263  *
1264  * Next, let y^k = 1/k! Y^(k)(0) be the k'th taylor coefficient of Y. Thus,
1265  * y^0 = Y^(0)(0) = Y(0) = f(X(0)) = f(x^0)
1266  * y^1 = Y^(1)(0) = Y'(0) = f'(X(0)) * X'(0) = f'(x^0) * x^1
1267  * y^2 = 1/2 Y^(2)(0) = 1/2 Y''(0) = 1/2 X'(0) * f''(X(0)) X'(0) + 1/2 * f'(X(0)) * X''(0) = 1/2 x^1 * f''(x^0) * x^1 + f'(x^0) * x^2
1268  *
1269  * As x^k = (tx[k], tx[(p+1)+k], tx[2*(p+1)+k], ..., tx[n*(p+1)+k], we get
1270  * ty[0] = y^0 = f(x^0) = f(tx[{1..n}*(p+1)])
1271  * ty[1] = y^1 = f'(x^0) * tx[{1..n}*(p+1)+1] = sum(i=1..n, grad[i] * tx[i*(p+1)+1]), where grad = f'(x^0)
1272  * ty[2] = 1/2 sum(i,j=1..n, x[i*(p+1)+1] * x[j*(p+1)+q] * hessian[i,j]) + sum(i=1..n, grad[i] * x[i*(p+1)+2])
1273  */
1274  bool forward(
1275  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
1276  size_t p, /**< highest order Taylor coefficient that we are evaluating */
1277  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
1278  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
1279  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
1280  CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
1281  )
1282  {
1283  assert(expr != NULL);
1284  assert(ty.size() == p+1);
1285  assert(q <= p);
1286 
1287  size_t n = tx.size() / (p+1);
1288  assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1289  assert(n >= 1);
1290 
1291  if( vx.size() > 0 )
1292  {
1293  assert(vx.size() == n);
1294  assert(vy.size() == 1);
1295  assert(p == 0);
1296 
1297  /* y_0 is a variable if at least one of the x_i is a variable */
1298  vy[0] = false;
1299  for( size_t i = 0; i < n; ++i )
1300  if( vx[i] )
1301  {
1302  vy[0] = true;
1303  break;
1304  }
1305  }
1306 
1307  Type* x = new Type[n];
1308  Type* gradient = NULL;
1309  Type* hessian = NULL;
1310 
1311  if( q <= 2 && 1 <= p )
1312  gradient = new Type[n];
1313  if( q <= 2 && 2 <= p )
1314  hessian = new Type[n*n];
1315 
1316  for( size_t i = 0; i < n; ++i )
1317  x[i] = tx[i * (p+1) + 0]; /*lint !e835*/
1318 
1319  if( exprEvalUser(expr, x, ty[0], gradient, hessian) != SCIP_OKAY )
1320  {
1321  delete[] x;
1322  delete[] gradient;
1323  delete[] hessian;
1324  return false;
1325  }
1326 
1327  if( gradient != NULL )
1328  {
1329  ty[1] = 0.0;
1330  for( size_t i = 0; i < n; ++i )
1331  ty[1] += gradient[i] * tx[i * (p+1) + 1];
1332  }
1333 
1334  if( hessian != NULL )
1335  {
1336  assert(gradient != NULL);
1337 
1338  ty[2] = 0.0;
1339  for( size_t i = 0; i < n; ++i )
1340  {
1341  for( size_t j = 0; j < n; ++j )
1342  ty[2] += 0.5 * hessian[i*n+j] * tx[i * (p+1) + 1] * tx[j * (p+1) + 1];
1343 
1344  ty[2] += gradient[i] * tx[i * (p+1) + 2];
1345  }
1346  }
1347 
1348  delete[] x;
1349  delete[] gradient;
1350  delete[] hessian;
1351 
1352  /* higher order derivatives not implemented */
1353  if( p > 2 )
1354  return false;
1355 
1356  return true;
1357  }
1358 
1359  /** reverse sweep of userexpr
1360  *
1361  * We follow http://www.coin-or.org/CppAD/Doc/atomic_reverse.xml
1362  * Note, that there q is our p.
1363  *
1364  * For a scalar variable t, let
1365  * Y(t) = f(X(t))
1366  * X(t) = x^0 + x^1 t^1 + ... + x^p t^p
1367  * where for x^i the i an index, while for t^i the i is an exponent.
1368  * Thus, x^k = 1/k! X^(k) (0), where X^(k)(.) denotes the k-th derivative.
1369  *
1370  * Next, let y^k = 1/k! Y^(k)(0) be the k'th taylor coefficient of Y. Thus,
1371  * Y(t) = y^0 + y^1 t^1 + y^2 t^2 + ...
1372  * y^0, y^1, ... are the taylor coefficients of f(x).
1373  *
1374  * Further, let F(x^0,..,x^p) by given as F^k(x) = y^k. Thus,
1375  * F^0(x) = y^0 = Y^(0)(0) = f(x^0)
1376  * F^1(x) = y^1 = Y^(1)(0) = f'(x^0) * x^1
1377  * F^2(x) = y^2 = 1/2 Y''(0) = 1/2 x^1 f''(x^0) x^1 + f'(x^0) x^2
1378  *
1379  * Given functions G: R^(p+1) -> R and H: R^(n*(p+1)) -> R, where H(x^0, x^1, .., x^p) = G(F(x^0,..,x^p)),
1380  * we have to return the value of \f$\partial H / \partial x^l, l = 0..p,\f$ in px. Therefor,
1381  * \f$
1382  * px^l = \partial H / \partial x^l
1383  * = sum(k=0..p, (\partial G / \partial y^k) * (\partial y^k / \partial x^l)
1384  * = sum(k=0..p, py[k] * (\partial F^k / \partial x^l)
1385  * \f$
1386  *
1387  * For p = 0, this means
1388  * \f$
1389  * px^0 = py[0] * \partial F^0 / \partial x^0
1390  * = py[0] * \partial f(x^0) / \partial x^0
1391  * = py[0] * f'(x^0)
1392  * \f$
1393  *
1394  * For p = 1, this means (for l = 0):
1395  * \f[
1396  * px^0 = py[0] * \partial F^0 / \partial x^0 + py[1] * \partial F^1 / \partial x^0
1397  * = py[0] * \partial f(x^0) / \partial x^0 + py[1] * \partial (f'(x^0) * x^1) / \partial x^0
1398  * = py[0] * f'(x^0) + py[1] * f''(x^0) * x^1
1399  * \f]
1400  * and (for l=1):
1401  * \[
1402  * px^1 = py[0] * \partial F^0 / \partial x^1 + py[1] * \partial F^1 / \partial x^1
1403  * = py[0] * \partial f(x^0) / \partial x^1 + py[1] * \partial (f'(x^0) * x^1) / \partial x^0
1404  * = py[0] * 0 + py[1] * f'(x^0)
1405  * \f]
1406  *
1407  * As x^k = (tx[k], tx[(p+1)+k], tx[2*(p+1)+k], ..., tx[n*(p+1)+k] and
1408  * px^k = (px[k], px[(p+1)+k], px[2*(p+1)+k], ..., px[n*(p+1)+k], we get
1409  * for p = 0:
1410  * px[i] = (px^0)_i = py[0] * grad[i]
1411  * for p = 1:
1412  * px[i*2+0] = (px^0)_i = py[0] * grad[i] + py[1] * sum(j, hessian[j,i] * tx[j*2+1])
1413  * px[i*2+1] = (px^1)_i = py[1] * grad[i]
1414  */
1415  bool reverse(
1416  size_t p, /**< highest order Taylor coefficient that we are evaluating */
1417  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
1418  const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
1419  CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
1420  const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
1421  )
1422  {
1423  assert(expr != NULL);
1424  assert(px.size() == tx.size());
1425  assert(py.size() == p+1);
1426 
1427  size_t n = tx.size() / (p+1);
1428  assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1429  assert(n >= 1);
1430 
1431  Type* x = new Type[n];
1432  Type funcval;
1433  Type* gradient = new Type[n];
1434  Type* hessian = NULL;
1435 
1436  if( p == 1 )
1437  hessian = new Type[n*n];
1438 
1439  for( size_t i = 0; i < n; ++i )
1440  x[i] = tx[i * (p+1) + 0]; /*lint !e835*/
1441 
1442  if( exprEvalUser(expr, x, funcval, gradient, hessian) != SCIP_OKAY )
1443  {
1444  delete[] x;
1445  delete[] gradient;
1446  delete[] hessian;
1447  return false;
1448  }
1449 
1450  switch( p )
1451  {
1452  case 0:
1453  // px[j] = (px^0)_j = py[0] * grad[j]
1454  for( size_t i = 0; i < n; ++i )
1455  px[i] = py[0] * gradient[i];
1456  break;
1457 
1458  case 1:
1459  // px[i*2+0] = (px^0)_i = py[0] * grad[i] + py[1] * sum(j, hessian[j,i] * tx[j*2+1])
1460  // px[i*2+1] = (px^1)_i = py[1] * grad[i]
1461  assert(hessian != NULL);
1462  for( size_t i = 0; i < n; ++i )
1463  {
1464  px[i*2+0] = py[0] * gradient[i]; /*lint !e835*/
1465  for( size_t j = 0; j < n; ++j )
1466  px[i*2+0] += py[1] * hessian[i+n*j] * tx[j*2+1]; /*lint !e835*/
1467 
1468  px[i*2+1] = py[1] * gradient[i];
1469  }
1470  break;
1471 
1472  default:
1473  return false;
1474  }
1475 
1476  return true;
1477  } /*lint !e715*/
1478 
1479  using CppAD::atomic_base<Type>::for_sparse_jac;
1480 
1481  /** computes sparsity of jacobian during a forward sweep
1482  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
1483  * Since we assume f'(x) to be dense, the sparsity of S will be the sparsity of R.
1484  */
1485  bool for_sparse_jac(
1486  size_t q, /**< number of columns in R */
1487  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
1488  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
1489  )
1490  {
1491  assert(expr != NULL);
1492  assert(s.size() == q);
1493 
1494  size_t n = r.size() / q;
1495  assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1496 
1497  // sparsity for S(x) = f'(x) * R
1498  for( size_t j = 0; j < q; j++ )
1499  {
1500  s[j] = false;
1501  for( size_t i = 0; i < n; i++ )
1502  s[j] |= (bool)r[i * q + j]; /*lint !e1786*/
1503  }
1504 
1505  return true;
1506  }
1507 
1508  using CppAD::atomic_base<Type>::rev_sparse_jac;
1509 
1510  /** computes sparsity of jacobian during a reverse sweep
1511  * For a q x 1 matrix S, we have to return the sparsity pattern of the q x 1 matrix R(x) = S * f'(x).
1512  * Since we assume f'(x) to be dense, the sparsity of R will be the sparsity of S.
1513  */
1514  bool rev_sparse_jac(
1515  size_t q, /**< number of rows in R */
1516  const CppAD::vector<bool>& rt, /**< sparsity of R, rowwise */
1517  CppAD::vector<bool>& st /**< vector to store sparsity of S, rowwise */
1518  )
1519  {
1520  assert(expr != NULL);
1521  assert(rt.size() == q);
1522 
1523  size_t n = st.size() / q;
1524  assert(n == (size_t)SCIPexprGetNChildren(expr));
1525 
1526  // sparsity for S(x)^T = f'(x)^T * R^T
1527  for( size_t j = 0; j < q; j++ )
1528  for( size_t i = 0; i < n; i++ )
1529  st[i * q + j] = rt[j];
1530 
1531  return true;
1532  }
1533 
1534  using CppAD::atomic_base<Type>::rev_sparse_hes;
1535 
1536  /** computes sparsity of hessian during a reverse sweep
1537  * Assume V(x) = (g(f(x)))'' R for a function g:R->R and a matrix R.
1538  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
1539  */
1540  bool rev_sparse_hes(
1541  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
1542  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
1543  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
1544  size_t q, /**< number of columns in S and R */
1545  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
1546  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
1547  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
1548  )
1549  {
1550  assert(expr != NULL);
1551  size_t n = vx.size();
1552  assert((size_t)SCIPexprGetNChildren(expr) == n);
1553  assert(s.size() == 1);
1554  assert(t.size() == n);
1555  assert(r.size() == n * q);
1556  assert(u.size() == q);
1557  assert(v.size() == n * q);
1558 
1559  size_t i, j, k;
1560 
1561  // sparsity for T(x) = S(x) * f'(x)
1562  for( i = 0; i < n; ++i )
1563  t[i] = s[0];
1564 
1565  // V(x) = f'(x)^T * g''(y) * f'(x) * R + g'(y) * f''(x) * R
1566  // U(x) = g''(y) * f'(x) * R
1567  // S(x) = g'(y)
1568 
1569  // back propagate the sparsity for U
1570  for( j = 0; j < q; j++ )
1571  for( i = 0; i < n; i++ )
1572  v[ i * q + j] = u[j];
1573 
1574  // include forward Jacobian sparsity in Hessian sparsity
1575  // sparsity for g'(y) * f''(x) * R (Note f''(x) is assumed to be dense)
1576  if( s[0] )
1577  for( j = 0; j < q; j++ )
1578  for( i = 0; i < n; i++ )
1579  for( k = 0; k < n; ++k )
1580  v[ i * q + j] |= (bool) r[ k * q + j];
1581 
1582  return true;
1583  }
1584 
1585 };
1586 
1587 template<class Type>
1588 static
1590  Type& resultant, /**< resultant */
1591  const Type* args, /**< operands */
1592  SCIP_EXPR* expr /**< expression that holds the user expression */
1593  )
1594 {
1595  assert( args != 0 );
1596  vector<Type> in(args, args + SCIPexprGetNChildren(expr));
1597  vector<Type> out(1);
1598 
1600  u(in, out, (size_t)(void*)expr);
1601 
1602  resultant = out[0];
1603  return;
1604 }
1605 
1606 #else
1607 
1608 template<class Type>
1609 static
1610 void evalUser(
1611  Type& resultant, /**< resultant */
1612  const Type* args, /**< operands */
1613  SCIP_EXPR* expr /**< expression that holds the user expression */
1614  )
1615 {
1616  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1617  "evalUser()",
1618  "Error: user expressions in CppAD not possible without CppAD user atomic facility"
1619  );
1620 }
1621 
1622 #endif
1623 
1624 /** template for evaluation for minimum operator
1625  *
1626  * Only implemented for real numbers, thus gives error by default.
1627  * @todo implement own userad function
1628  */
1629 template<class Type>
1630 static
1631 void evalMin(
1632  Type& resultant, /**< resultant */
1633  const Type& arg1, /**< first operand */
1634  const Type& arg2 /**< second operand */
1635  )
1636 { /*lint --e{715,1764}*/
1637  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1638  "evalMin()",
1639  "Error: Min not implemented for this value type"
1640  );
1641 }
1642 
1643 /** specialization of minimum evaluation for real numbers */
1644 template<>
1645 void evalMin(
1646  CppAD::AD<double>& resultant, /**< resultant */
1647  const CppAD::AD<double>& arg1, /**< first operand */
1648  const CppAD::AD<double>& arg2 /**< second operand */
1649  )
1650 {
1651  resultant = MIN(arg1, arg2);
1652 }
1653 
1654 /** template for evaluation for maximum operator
1655  *
1656  * Only implemented for real numbers, thus gives error by default.
1657  * @todo implement own userad function
1658  */
1659 template<class Type>
1660 static
1661 void evalMax(
1662  Type& resultant, /**< resultant */
1663  const Type& arg1, /**< first operand */
1664  const Type& arg2 /**< second operand */
1665  )
1666 { /*lint --e{715,1764}*/
1667  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1668  "evalMax()",
1669  "Error: Max not implemented for this value type"
1670  );
1671 }
1672 
1673 /** specialization of maximum evaluation for real numbers */
1674 template<>
1675 void evalMax(
1676  CppAD::AD<double>& resultant, /**< resultant */
1677  const CppAD::AD<double>& arg1, /**< first operand */
1678  const CppAD::AD<double>& arg2 /**< second operand */
1679  )
1680 {
1681  resultant = MAX(arg1, arg2);
1682 }
1683 
1684 /** template for evaluation for square-root operator
1685  *
1686  * Default is to use the standard sqrt-function.
1687  */
1688 template<class Type>
1689 static
1691  Type& resultant, /**< resultant */
1692  const Type& arg /**< operand */
1693  )
1694 {
1695  resultant = sqrt(arg);
1696 }
1697 
1698 /** template for evaluation for absolute value operator */
1699 template<class Type>
1700 static
1701 void evalAbs(
1702  Type& resultant, /**< resultant */
1703  const Type& arg /**< operand */
1704  )
1705 {
1706  resultant = abs(arg);
1707 }
1708 
1709 /** specialization of absolute value evaluation for intervals
1710  *
1711  * Use sqrt(x^2) for now @todo implement own userad function.
1712  */
1713 template<>
1714 void evalAbs(
1715  CppAD::AD<SCIPInterval>& resultant, /**< resultant */
1716  const CppAD::AD<SCIPInterval>& arg /**< operand */
1717  )
1718 {
1719  vector<CppAD::AD<SCIPInterval> > in(1, arg);
1720  vector<CppAD::AD<SCIPInterval> > out(1);
1721 
1722  posintpower(in, out, 2);
1723 
1724  resultant = sqrt(out[0]);
1725 }
1726 
1727 /** integer power operation for arbitrary integer exponents */
1728 template<class Type>
1729 static
1731  Type& resultant, /**< resultant */
1732  const Type& arg, /**< operand */
1733  const int exponent /**< exponent */
1734  )
1735 {
1736  if( exponent > 1 )
1737  {
1738  vector<Type> in(1, arg);
1739  vector<Type> out(1);
1740 
1741  posintpower(in, out, exponent);
1742 
1743  resultant = out[0];
1744  return;
1745  }
1746 
1747  if( exponent < -1 )
1748  {
1749  vector<Type> in(1, arg);
1750  vector<Type> out(1);
1751 
1752  posintpower(in, out, -exponent);
1753 
1754  resultant = Type(1.0)/out[0];
1755  return;
1756  }
1757 
1758  if( exponent == 1 )
1759  {
1760  resultant = arg;
1761  return;
1762  }
1763 
1764  if( exponent == 0 )
1765  {
1766  resultant = Type(1.0);
1767  return;
1768  }
1769 
1770  assert(exponent == -1);
1771  resultant = Type(1.0)/arg;
1772 }
1773 
1774 /** CppAD compatible evaluation of an expression for given arguments and parameters */
1775 template<class Type>
1776 static
1778  SCIP_EXPR* expr, /**< expression */
1779  const vector<Type>& x, /**< values of variables */
1780  SCIP_Real* param, /**< values of parameters */
1781  Type& val /**< buffer to store expression value */
1782  )
1783 {
1784  Type* buf = 0;
1785 
1786  assert(expr != NULL);
1787 
1788  /* todo use SCIP_MAXCHILD_ESTIMATE as in expression.c */
1789 
1790  if( SCIPexprGetNChildren(expr) )
1791  {
1792  if( BMSallocMemoryArray(&buf, SCIPexprGetNChildren(expr)) == NULL ) /*lint !e666*/
1793  return SCIP_NOMEMORY;
1794 
1795  for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
1796  {
1797  SCIP_CALL( eval(SCIPexprGetChildren(expr)[i], x, param, buf[i]) );
1798  }
1799  }
1800 
1801  switch(SCIPexprGetOperator(expr))
1802  {
1803  case SCIP_EXPR_VARIDX:
1804  assert(SCIPexprGetOpIndex(expr) < (int)x.size());
1805  val = x[SCIPexprGetOpIndex(expr)];
1806  break;
1807 
1808  case SCIP_EXPR_CONST:
1809  val = SCIPexprGetOpReal(expr);
1810  break;
1811 
1812  case SCIP_EXPR_PARAM:
1813  assert(param != NULL);
1814  val = param[SCIPexprGetOpIndex(expr)];
1815  break;
1816 
1817  case SCIP_EXPR_PLUS:
1818  assert( buf != 0 );
1819  val = buf[0] + buf[1];
1820  break;
1821 
1822  case SCIP_EXPR_MINUS:
1823  assert( buf != 0 );
1824  val = buf[0] - buf[1];
1825  break;
1826 
1827  case SCIP_EXPR_MUL:
1828  assert( buf != 0 );
1829  val = buf[0] * buf[1];
1830  break;
1831 
1832  case SCIP_EXPR_DIV:
1833  assert( buf != 0 );
1834  val = buf[0] / buf[1];
1835  break;
1836 
1837  case SCIP_EXPR_SQUARE:
1838  assert( buf != 0 );
1839  evalIntPower(val, buf[0], 2);
1840  break;
1841 
1842  case SCIP_EXPR_SQRT:
1843  assert( buf != 0 );
1844  evalSqrt(val, buf[0]);
1845  break;
1846 
1847  case SCIP_EXPR_REALPOWER:
1848  assert( buf != 0 );
1849  val = CppAD::pow(buf[0], SCIPexprGetRealPowerExponent(expr));
1850  break;
1851 
1852  case SCIP_EXPR_INTPOWER:
1853  assert( buf != 0 );
1854  evalIntPower(val, buf[0], SCIPexprGetIntPowerExponent(expr));
1855  break;
1856 
1857  case SCIP_EXPR_SIGNPOWER:
1858  assert( buf != 0 );
1859  evalSignPower(val, buf[0], expr);
1860  break;
1861 
1862  case SCIP_EXPR_EXP:
1863  assert( buf != 0 );
1864  val = exp(buf[0]);
1865  break;
1866 
1867  case SCIP_EXPR_LOG:
1868  assert( buf != 0 );
1869  val = log(buf[0]);
1870  break;
1871 
1872  case SCIP_EXPR_SIN:
1873  assert( buf != 0 );
1874  val = sin(buf[0]);
1875  break;
1876 
1877  case SCIP_EXPR_COS:
1878  assert( buf != 0 );
1879  val = cos(buf[0]);
1880  break;
1881 
1882  case SCIP_EXPR_TAN:
1883  assert( buf != 0 );
1884  val = tan(buf[0]);
1885  break;
1886 #ifdef SCIP_DISABLED_CODE /* these operators are currently disabled */
1887  case SCIP_EXPR_ERF:
1888  assert( buf != 0 );
1889  val = erf(buf[0]);
1890  break;
1891 
1892  case SCIP_EXPR_ERFI:
1893  return SCIP_ERROR;
1894 #endif
1895  case SCIP_EXPR_MIN:
1896  assert( buf != 0 );
1897  evalMin(val, buf[0], buf[1]);
1898  break;
1899 
1900  case SCIP_EXPR_MAX:
1901  assert( buf != 0 );
1902  evalMax(val, buf[0], buf[1]);
1903  break;
1904 
1905  case SCIP_EXPR_ABS:
1906  assert( buf != 0 );
1907  evalAbs(val, buf[0]);
1908  break;
1909 
1910  case SCIP_EXPR_SIGN:
1911  assert( buf != 0 );
1912  val = sign(buf[0]);
1913  break;
1914 
1915  case SCIP_EXPR_SUM:
1916  assert( buf != 0 );
1917  val = 0.0;
1918  for (int i = 0; i < SCIPexprGetNChildren(expr); ++i)
1919  val += buf[i];
1920  break;
1921 
1922  case SCIP_EXPR_PRODUCT:
1923  assert( buf != 0 );
1924  val = 1.0;
1925  for (int i = 0; i < SCIPexprGetNChildren(expr); ++i)
1926  val *= buf[i];
1927  break;
1928 
1929  case SCIP_EXPR_LINEAR:
1930  {
1931  SCIP_Real* coefs;
1932 
1933  coefs = SCIPexprGetLinearCoefs(expr);
1934  assert(coefs != NULL || SCIPexprGetNChildren(expr) == 0);
1935 
1936  assert( buf != 0 );
1937  val = SCIPexprGetLinearConstant(expr);
1938  for (int i = 0; i < SCIPexprGetNChildren(expr); ++i)
1939  val += coefs[i] * buf[i]; /*lint !e613*/
1940  break;
1941  }
1942 
1943  case SCIP_EXPR_QUADRATIC:
1944  {
1945  SCIP_Real* lincoefs;
1946  SCIP_QUADELEM* quadelems;
1947  int nquadelems;
1948  SCIP_Real sqrcoef;
1949  Type lincoef;
1950  vector<Type> in(1);
1951  vector<Type> out(1);
1952 
1953  assert( buf != 0 );
1954 
1955  lincoefs = SCIPexprGetQuadLinearCoefs(expr);
1956  nquadelems = SCIPexprGetNQuadElements(expr);
1957  quadelems = SCIPexprGetQuadElements(expr);
1958  assert(quadelems != NULL || nquadelems == 0);
1959 
1960  SCIPexprSortQuadElems(expr);
1961 
1962  val = SCIPexprGetQuadConstant(expr);
1963 
1964  /* for each argument, we collect it's linear index from lincoefs, it's square coefficients and all factors from bilinear terms
1965  * then we compute the interval sqrcoef*x^2 + lincoef*x and add it to result */
1966  int i = 0;
1967  for( int argidx = 0; argidx < SCIPexprGetNChildren(expr); ++argidx )
1968  {
1969  if( i == nquadelems || quadelems[i].idx1 > argidx ) /*lint !e613*/
1970  {
1971  /* there are no quadratic terms with argidx in its first argument, that should be easy to handle */
1972  if( lincoefs != NULL )
1973  val += lincoefs[argidx] * buf[argidx];
1974  continue;
1975  }
1976 
1977  sqrcoef = 0.0;
1978  lincoef = lincoefs != NULL ? lincoefs[argidx] : 0.0;
1979 
1980  assert(i < nquadelems && quadelems[i].idx1 == argidx); /*lint !e613*/
1981  do
1982  {
1983  if( quadelems[i].idx2 == argidx ) /*lint !e613*/
1984  sqrcoef += quadelems[i].coef; /*lint !e613*/
1985  else
1986  lincoef += quadelems[i].coef * buf[quadelems[i].idx2]; /*lint !e613*/
1987  ++i;
1988  } while( i < nquadelems && quadelems[i].idx1 == argidx ); /*lint !e613*/
1989  assert(i == nquadelems || quadelems[i].idx1 > argidx); /*lint !e613*/
1990 
1991  /* this is not as good as what we can get from SCIPintervalQuad, but easy to implement */
1992  if( sqrcoef != 0.0 )
1993  {
1994  in[0] = buf[argidx];
1995  posintpower(in, out, 2);
1996  val += sqrcoef * out[0];
1997  }
1998 
1999  val += lincoef * buf[argidx];
2000  }
2001  assert(i == nquadelems);
2002 
2003  break;
2004  }
2005 
2006  case SCIP_EXPR_POLYNOMIAL:
2007  {
2008  SCIP_EXPRDATA_MONOMIAL** monomials;
2009  Type childval;
2010  Type monomialval;
2011  SCIP_Real exponent;
2012  int nmonomials;
2013  int nfactors;
2014  int* childidxs;
2015  SCIP_Real* exponents;
2016  int i;
2017  int j;
2018 
2019  assert( buf != 0 );
2020 
2021  val = SCIPexprGetPolynomialConstant(expr);
2022 
2023  nmonomials = SCIPexprGetNMonomials(expr);
2024  monomials = SCIPexprGetMonomials(expr);
2025 
2026  for( i = 0; i < nmonomials; ++i )
2027  {
2028  nfactors = SCIPexprGetMonomialNFactors(monomials[i]);
2029  childidxs = SCIPexprGetMonomialChildIndices(monomials[i]);
2030  exponents = SCIPexprGetMonomialExponents(monomials[i]);
2031  monomialval = SCIPexprGetMonomialCoef(monomials[i]);
2032 
2033  for( j = 0; j < nfactors; ++j )
2034  {
2035  assert(childidxs[j] >= 0);
2036  assert(childidxs[j] < SCIPexprGetNChildren(expr));
2037 
2038  childval = buf[childidxs[j]];
2039  exponent = exponents[j];
2040 
2041  /* cover some special exponents separately to avoid calling expensive pow function */
2042  if( exponent == 0.0 )
2043  continue;
2044  if( exponent == 1.0 )
2045  {
2046  monomialval *= childval;
2047  continue;
2048  }
2049  if( (int)exponent == exponent )
2050  {
2051  Type tmp;
2052  evalIntPower(tmp, childval, (int)exponent);
2053  monomialval *= tmp;
2054  continue;
2055  }
2056  if( exponent == 0.5 )
2057  {
2058  Type tmp;
2059  evalSqrt(tmp, childval);
2060  monomialval *= tmp;
2061  continue;
2062  }
2063  monomialval *= pow(childval, exponent);
2064  }
2065 
2066  val += monomialval;
2067  }
2068 
2069  break;
2070  }
2071 
2072  case SCIP_EXPR_USER:
2073  evalUser(val, buf, expr);
2074  break;
2075 
2076  case SCIP_EXPR_LAST:
2077  default:
2078  BMSfreeMemoryArrayNull(&buf);
2079  return SCIP_ERROR;
2080  }
2081 
2082  BMSfreeMemoryArrayNull(&buf);
2083 
2084  return SCIP_OKAY;
2085 }
2086 
2087 /** analysis an expression tree whether it requires retaping on every evaluation
2088  *
2089  * This may be the case if the evaluation sequence depends on values of operands (e.g., in case of abs, sign, signpower, ...).
2090  */
2091 static
2093  SCIP_EXPRINTDATA* data,
2094  SCIP_EXPR* expr
2095  )
2096 {
2097  assert(expr != NULL);
2098  assert(SCIPexprGetChildren(expr) != NULL || SCIPexprGetNChildren(expr) == 0);
2099 
2100  for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
2101  analyzeTree(data, SCIPexprGetChildren(expr)[i]);
2102 
2103  switch( SCIPexprGetOperator(expr) )
2104  {
2105  case SCIP_EXPR_MIN:
2106  case SCIP_EXPR_MAX:
2107  case SCIP_EXPR_ABS:
2108 #ifdef NO_CPPAD_USER_ATOMIC
2109  case SCIP_EXPR_SIGNPOWER:
2110 #endif
2111  data->need_retape_always = true;
2112  break;
2113 
2114  case SCIP_EXPR_USER:
2115  data->userevalcapability &= SCIPexprGetUserEvalCapability(expr);
2116  break;
2117 
2118  default: ;
2119  } /*lint !e788*/
2120 
2121 }
2122 
2123 /** replacement for CppAD's default error handler
2124  *
2125  * In debug mode, CppAD gives an error when an evaluation contains a nan.
2126  * We do not want to stop execution in such a case, since the calling routine should check for nan's and decide what to do.
2127  * Since we cannot ignore this particular error, we ignore all.
2128  * @todo find a way to check whether the error corresponds to a nan and communicate this back
2129  */
2130 static
2132  bool known, /**< is the error from a known source? */
2133  int line, /**< line where error occured */
2134  const char* file, /**< file where error occured */
2135  const char* cond, /**< error condition */
2136  const char* msg /**< error message */
2137  )
2138 {
2139  SCIPdebugMessage("ignore CppAD error from %sknown source %s:%d: msg: %s exp: %s\n", known ? "" : "un", file, line, msg, cond);
2140 }
2141 
2142 /* install our error handler */
2143 static CppAD::ErrorHandler errorhandler(cppaderrorcallback);
2144 
2145 /** gets name and version of expression interpreter */
2146 const char* SCIPexprintGetName(void)
2147 {
2148  return CPPAD_PACKAGE_STRING;
2149 }
2150 
2151 /** gets descriptive text of expression interpreter */
2152 const char* SCIPexprintGetDesc(void)
2153 {
2154  return "Algorithmic Differentiation of C++ algorithms developed by B. Bell (www.coin-or.org/CppAD)";
2155 }
2156 
2157 /** gets capabilities of expression interpreter (using bitflags) */
2159  void
2160  )
2161 {
2165 }
2166 
2167 /** creates an expression interpreter object */
2169  BMS_BLKMEM* blkmem, /**< block memory data structure */
2170  SCIP_EXPRINT** exprint /**< buffer to store pointer to expression interpreter */
2171  )
2172 {
2173  assert(blkmem != NULL);
2174  assert(exprint != NULL);
2175 
2176  if( BMSallocMemory(exprint) == NULL )
2177  return SCIP_NOMEMORY;
2178 
2179  (*exprint)->blkmem = blkmem;
2180 
2181  return SCIP_OKAY;
2182 }
2183 
2184 /** frees an expression interpreter object */
2186  SCIP_EXPRINT** exprint /**< expression interpreter that should be freed */
2187  )
2188 {
2189  assert( exprint != NULL);
2190  assert(*exprint != NULL);
2191 
2192  BMSfreeMemory(exprint);
2193 
2194  return SCIP_OKAY;
2195 }
2196 
2197 /** compiles an expression tree and stores compiled data in expression tree */
2199  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2200  SCIP_EXPRTREE* tree /**< expression tree */
2201  )
2202 { /*lint --e{429} */
2203  assert(tree != NULL);
2204 
2206  if (!data)
2207  {
2208  data = new SCIP_EXPRINTDATA();
2209  assert( data != NULL );
2210  SCIPexprtreeSetInterpreterData(tree, data);
2211  SCIPdebugMessage("set interpreter data in tree %p to %p\n", (void*)tree, (void*)data);
2212  }
2213  else
2214  {
2215  data->need_retape = true;
2216  data->int_need_retape = true;
2217  }
2218 
2219  int n = SCIPexprtreeGetNVars(tree);
2220 
2221  data->X.resize(n);
2222  data->x.resize(n);
2223  data->Y.resize(1);
2224 
2225  data->int_X.resize(n);
2226  data->int_x.resize(n);
2227  data->int_Y.resize(1);
2228 
2229  if( data->root != NULL )
2230  {
2231  SCIPexprFreeDeep(exprint->blkmem, &data->root);
2232  }
2233 
2234  SCIP_EXPR* root = SCIPexprtreeGetRoot(tree);
2235 
2236  SCIP_CALL( SCIPexprCopyDeep(exprint->blkmem, &data->root, root) );
2237 
2238  data->blkmem = exprint->blkmem;
2239 
2240  analyzeTree(data, data->root);
2241 
2242  return SCIP_OKAY;
2243 }
2244 
2245 
2246 /** gives the capability to evaluate an expression by the expression interpreter
2247  *
2248  * In cases of user-given expressions, higher order derivatives may not be available for the user-expression,
2249  * even if the expression interpreter could handle these. This method allows to recognize that, e.g., the
2250  * Hessian for an expression is not available because it contains a user expression that does not provide
2251  * Hessians.
2252  */
2254  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2255  SCIP_EXPRTREE* tree /**< expression tree */
2256  )
2257 {
2258  assert(tree != NULL);
2259 
2261  assert(data != NULL);
2262 
2263  return data->userevalcapability;
2264 }/*lint !e715*/
2265 
2266 /** frees interpreter data */
2268  SCIP_EXPRINTDATA** interpreterdata /**< interpreter data that should freed */
2269  )
2270 {
2271  assert( interpreterdata != NULL);
2272  assert(*interpreterdata != NULL);
2273 
2274  if( (*interpreterdata)->root != NULL )
2275  SCIPexprFreeDeep((*interpreterdata)->blkmem, &(*interpreterdata)->root);
2276 
2277  delete *interpreterdata;
2278  *interpreterdata = NULL;
2279 
2280  return SCIP_OKAY;
2281 }
2282 
2283 /** notify expression interpreter that a new parameterization is used
2284  *
2285  * This probably causes retaping by AD algorithms.
2286  */
2288  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2289  SCIP_EXPRTREE* tree /**< expression tree */
2290  )
2291 {
2292  assert(exprint != NULL);
2293  assert(tree != NULL);
2294 
2296  if( data != NULL )
2297  {
2298  data->need_retape = true;
2299  data->int_need_retape = true;
2300  }
2301 
2302  return SCIP_OKAY;
2303 }
2304 
2305 /** evaluates an expression tree */
2307  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2308  SCIP_EXPRTREE* tree, /**< expression tree */
2309  SCIP_Real* varvals, /**< values of variables */
2310  SCIP_Real* val /**< buffer to store value */
2311  )
2312 {
2313  SCIP_EXPRINTDATA* data;
2314 
2315  assert(exprint != NULL);
2316  assert(tree != NULL);
2317  assert(varvals != NULL);
2318  assert(val != NULL);
2319 
2320  data = SCIPexprtreeGetInterpreterData(tree);
2321  assert(data != NULL);
2322  assert(SCIPexprtreeGetNVars(tree) == (int)data->X.size());
2323  assert(SCIPexprtreeGetRoot(tree) != NULL);
2324 
2325  int n = SCIPexprtreeGetNVars(tree);
2326 
2327  if( n == 0 )
2328  {
2329  SCIP_CALL( SCIPexprtreeEval(tree, NULL, val) );
2330  return SCIP_OKAY;
2331  }
2332 
2333  if( data->need_retape_always || data->need_retape )
2334  {
2335  for( int i = 0; i < n; ++i )
2336  {
2337  data->X[i] = varvals[i];
2338  data->x[i] = varvals[i];
2339  }
2340 
2341  CppAD::Independent(data->X);
2342 
2343  if( data->root != NULL )
2344  SCIP_CALL( eval(data->root, data->X, SCIPexprtreeGetParamVals(tree), data->Y[0]) );
2345  else
2346  data->Y[0] = 0.0;
2347 
2348  data->f.Dependent(data->X, data->Y);
2349 
2350  data->val = Value(data->Y[0]);
2351  SCIPdebugMessage("Eval retaped and computed value %g\n", data->val);
2352 
2353  // the following is required if the gradient shall be computed by a reverse sweep later
2354  // data->val = data->f.Forward(0, data->x)[0];
2355 
2356  data->need_retape = false;
2357  }
2358  else
2359  {
2360  assert((int)data->x.size() >= n);
2361  for( int i = 0; i < n; ++i )
2362  data->x[i] = varvals[i];
2363 
2364  data->val = data->f.Forward(0, data->x)[0]; /*lint !e1793*/
2365  SCIPdebugMessage("Eval used forward sweep to compute value %g\n", data->val);
2366  }
2367 
2368  *val = data->val;
2369 
2370  return SCIP_OKAY;
2371 }
2372 
2373 /** evaluates an expression tree on intervals */
2375  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2376  SCIP_EXPRTREE* tree, /**< expression tree */
2377  SCIP_Real infinity, /**< value for infinity */
2378  SCIP_INTERVAL* varvals, /**< interval values of variables */
2379  SCIP_INTERVAL* val /**< buffer to store interval value of expression */
2380  )
2381 {
2382  SCIP_EXPRINTDATA* data;
2383 
2384  assert(exprint != NULL);
2385  assert(tree != NULL);
2386  assert(varvals != NULL);
2387  assert(val != NULL);
2388 
2389  data = SCIPexprtreeGetInterpreterData(tree);
2390  assert(data != NULL);
2391  assert(SCIPexprtreeGetNVars(tree) == (int)data->int_X.size());
2392  assert(SCIPexprtreeGetRoot(tree) != NULL);
2393 
2394  int n = SCIPexprtreeGetNVars(tree);
2395 
2396  if( n == 0 )
2397  {
2398  SCIP_CALL( SCIPexprtreeEvalInt(tree, infinity, NULL, val) );
2399  return SCIP_OKAY;
2400  }
2401 
2403 
2404  if( data->int_need_retape || data->need_retape_always )
2405  {
2406  for( int i = 0; i < n; ++i )
2407  {
2408  data->int_X[i] = varvals[i];
2409  data->int_x[i] = varvals[i];
2410  }
2411 
2412  CppAD::Independent(data->int_X);
2413 
2414  if( data->root != NULL )
2415  SCIP_CALL( eval(data->root, data->int_X, SCIPexprtreeGetParamVals(tree), data->int_Y[0]) );
2416  else
2417  data->int_Y[0] = 0.0;
2418 
2419  data->int_f.Dependent(data->int_X, data->int_Y);
2420 
2421  data->int_val = Value(data->int_Y[0]);
2422 
2423  data->int_need_retape = false;
2424  }
2425  else
2426  {
2427  assert((int)data->int_x.size() >= n);
2428  for( int i = 0; i < n; ++i )
2429  data->int_x[i] = varvals[i];
2430 
2431  data->int_val = data->int_f.Forward(0, data->int_x)[0]; /*lint !e1793*/
2432  }
2433 
2434  *val = data->int_val;
2435 
2436  return SCIP_OKAY;
2437 }
2438 
2439 /** computes value and gradient of an expression tree */
2441  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2442  SCIP_EXPRTREE* tree, /**< expression tree */
2443  SCIP_Real* varvals, /**< values of variables, can be NULL if new_varvals is FALSE */
2444  SCIP_Bool new_varvals, /**< have variable values changed since last call to a point evaluation routine? */
2445  SCIP_Real* val, /**< buffer to store expression value */
2446  SCIP_Real* gradient /**< buffer to store expression gradient, need to have length at least SCIPexprtreeGetNVars(tree) */
2447  )
2448 {
2449  assert(exprint != NULL);
2450  assert(tree != NULL);
2451  assert(varvals != NULL || new_varvals == FALSE);
2452  assert(val != NULL);
2453  assert(gradient != NULL);
2454 
2456  assert(data != NULL);
2457 
2458  if( new_varvals )
2459  {
2460  SCIP_CALL( SCIPexprintEval(exprint, tree, varvals, val) );
2461  }
2462  else
2463  *val = data->val;
2464 
2465  int n = SCIPexprtreeGetNVars(tree);
2466 
2467  if( n == 0 )
2468  return SCIP_OKAY;
2469 
2470  vector<double> jac(data->f.Jacobian(data->x));
2471 
2472  for( int i = 0; i < n; ++i )
2473  gradient[i] = jac[i];
2474 
2475 /* disable debug output since we have no message handler here
2476 #ifdef SCIP_DEBUG
2477  SCIPdebugMessage("Grad for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2478  SCIPdebugMessage("x ="); for (int i = 0; i < n; ++i) printf("\t %g", data->x[i]); printf("\n");
2479  SCIPdebugMessage("grad ="); for (int i = 0; i < n; ++i) printf("\t %g", gradient[i]); printf("\n");
2480 #endif
2481 */
2482 
2483  return SCIP_OKAY;
2484 }
2485 
2486 /** computes interval value and interval gradient of an expression tree */
2488  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2489  SCIP_EXPRTREE* tree, /**< expression tree */
2490  SCIP_Real infinity, /**< value for infinity */
2491  SCIP_INTERVAL* varvals, /**< interval values of variables, can be NULL if new_varvals is FALSE */
2492  SCIP_Bool new_varvals, /**< have variable interval values changed since last call to an interval evaluation routine? */
2493  SCIP_INTERVAL* val, /**< buffer to store expression interval value */
2494  SCIP_INTERVAL* gradient /**< buffer to store expression interval gradient, need to have length at least SCIPexprtreeGetNVars(tree) */
2495  )
2496 {
2497  assert(exprint != NULL);
2498  assert(tree != NULL);
2499  assert(varvals != NULL || new_varvals == false);
2500  assert(val != NULL);
2501  assert(gradient != NULL);
2502 
2504  assert(data != NULL);
2505 
2506  if (new_varvals)
2507  SCIP_CALL( SCIPexprintEvalInt(exprint, tree, infinity, varvals, val) );
2508  else
2509  *val = data->int_val;
2510 
2511  int n = SCIPexprtreeGetNVars(tree);
2512 
2513  if( n == 0 )
2514  return SCIP_OKAY;
2515 
2516  vector<SCIPInterval> jac(data->int_f.Jacobian(data->int_x));
2517 
2518  for (int i = 0; i < n; ++i)
2519  gradient[i] = jac[i];
2520 
2521 /* disable debug output since we have no message handler here
2522 #ifdef SCIP_DEBUG
2523  SCIPdebugMessage("GradInt for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2524  SCIPdebugMessage("x ="); for (int i = 0; i < n; ++i) printf("\t [%g,%g]", SCIPintervalGetInf(data->int_x[i]), SCIPintervalGetSup(data->int_x[i])); printf("\n");
2525  SCIPdebugMessage("grad ="); for (int i = 0; i < n; ++i) printf("\t [%g,%g]", SCIPintervalGetInf(gradient[i]), SCIPintervalGetSup(gradient[i])); printf("\n");
2526 #endif
2527 */
2528 
2529  return SCIP_OKAY;
2530 }
2531 
2532 /** gives sparsity pattern of hessian
2533  *
2534  * NOTE: this function might be replaced later by something nicer.
2535  * Since the AD code might need to do a forward sweep, you should pass variable values in here.
2536  */
2538  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2539  SCIP_EXPRTREE* tree, /**< expression tree */
2540  SCIP_Real* varvals, /**< values of variables */
2541  SCIP_Bool* sparsity /**< buffer to store sparsity pattern of Hessian, sparsity[i+n*j] indicates whether entry (i,j) is nonzero in the hessian */
2542  )
2543 {
2544  assert(exprint != NULL);
2545  assert(tree != NULL);
2546  assert(varvals != NULL);
2547  assert(sparsity != NULL);
2548 
2550  assert(data != NULL);
2551 
2552  int n = SCIPexprtreeGetNVars(tree);
2553  if( n == 0 )
2554  return SCIP_OKAY;
2555 
2556  int nn = n*n;
2557 
2558  if( data->need_retape_always )
2559  {
2560  // @todo can we do something better here, e.g., by looking at the expression tree by ourself?
2561 
2562  for( int i = 0; i < nn; ++i )
2563  sparsity[i] = TRUE;
2564 
2565 /* disable debug output since we have no message handler here
2566 #ifdef SCIP_DEBUG
2567  SCIPdebugMessage("HessianSparsityDense for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2568  SCIPdebugMessage("sparsity = all elements, due to discontinuouities\n");
2569 #endif
2570 */
2571 
2572  return SCIP_OKAY;
2573  }
2574 
2575  if( data->need_retape )
2576  {
2577  SCIP_Real val;
2578  SCIP_CALL( SCIPexprintEval(exprint, tree, varvals, &val) );
2579  }
2580 
2581  SCIPdebugMessage("calling ForSparseJac\n");
2582 
2583  vector<bool> r(nn, false);
2584  for (int i = 0; i < n; ++i)
2585  r[i*n+i] = true; /*lint !e647 !e1793*/
2586  (void) data->f.ForSparseJac(n, r); // need to compute sparsity for Jacobian first
2587 
2588  SCIPdebugMessage("calling RevSparseHes\n");
2589 
2590  vector<bool> s(1, true);
2591  vector<bool> sparsehes(data->f.RevSparseHes(n, s));
2592 
2593  for( int i = 0; i < nn; ++i )
2594  sparsity[i] = sparsehes[i];
2595 
2596 /* disable debug output since we have no message handler here
2597 #ifdef SCIP_DEBUG
2598  SCIPdebugMessage("HessianSparsityDense for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2599  SCIPdebugMessage("sparsity ="); for (int i = 0; i < n; ++i) for (int j = 0; j < n; ++j) if (sparsity[i*n+j]) printf(" (%d,%d)", i, j); printf("\n");
2600 #endif
2601 */
2602 
2603  return SCIP_OKAY;
2604 }
2605 
2606 /** computes value and dense hessian of an expression tree
2607  *
2608  * The full hessian is computed (lower left and upper right triangle).
2609  */
2611  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2612  SCIP_EXPRTREE* tree, /**< expression tree */
2613  SCIP_Real* varvals, /**< values of variables, can be NULL if new_varvals is FALSE */
2614  SCIP_Bool new_varvals, /**< have variable values changed since last call to an evaluation routine? */
2615  SCIP_Real* val, /**< buffer to store function value */
2616  SCIP_Real* hessian /**< buffer to store hessian values, need to have size at least n*n */
2617  )
2618 {
2619  assert(exprint != NULL);
2620  assert(tree != NULL);
2621  assert(varvals != NULL || new_varvals == FALSE);
2622  assert(val != NULL);
2623  assert(hessian != NULL);
2624 
2626  assert(data != NULL);
2627 
2628  if( new_varvals )
2629  {
2630  SCIP_CALL( SCIPexprintEval(exprint, tree, varvals, val) );
2631  }
2632  else
2633  *val = data->val;
2634 
2635  int n = SCIPexprtreeGetNVars(tree);
2636 
2637  if( n == 0 )
2638  return SCIP_OKAY;
2639 
2640 #if 1
2641  /* this one uses reverse mode */
2642  vector<double> hess(data->f.Hessian(data->x, 0));
2643 
2644  int nn = n*n;
2645  for (int i = 0; i < nn; ++i)
2646  hessian[i] = hess[i];
2647 
2648 #else
2649  /* this one uses forward mode */
2650  for( int i = 0; i < n; ++i )
2651  for( int j = 0; j < n; ++j )
2652  {
2653  vector<int> ii(1,i);
2654  vector<int> jj(1,j);
2655  hessian[i*n+j] = data->f.ForTwo(data->x, ii, jj)[0];
2656  }
2657 #endif
2658 
2659 /* disable debug output since we have no message handler here
2660 #ifdef SCIP_DEBUG
2661  SCIPdebugMessage("HessianDense for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2662  SCIPdebugMessage("x ="); for (int i = 0; i < n; ++i) printf("\t %g", data->x[i]); printf("\n");
2663  SCIPdebugMessage("hess ="); for (int i = 0; i < n*n; ++i) printf("\t %g", hessian[i]); printf("\n");
2664 #endif
2665 */
2666  return SCIP_OKAY;
2667 }
void SCIPexprFreeDeep(BMS_BLKMEM *blkmem, SCIP_EXPR **expr)
Definition: expr.c:6142
SCIPInterval square(const SCIPInterval &x)
bool LessThanZero(const SCIPInterval &x)
static void evalMax(Type &resultant, const Type &arg1, const Type &arg2)
const char * SCIPexprintGetName(void)
bool IdenticalEqualPar(const SCIPInterval &x, const SCIPInterval &y)
static SCIP_RETCODE eval(SCIP_EXPR *expr, const vector< Type > &x, SCIP_Real *param, Type &val)
SCIP_RETCODE SCIPexprtreeEvalInt(SCIP_EXPRTREE *tree, SCIP_Real infinity, SCIP_INTERVAL *varvals, SCIP_INTERVAL *val)
Definition: expr.c:8670
#define BMSfreeMemoryArrayNull(ptr)
Definition: memory.h:103
methods to interpret (evaluate) an expression tree "fast"
int * SCIPexprGetMonomialChildIndices(SCIP_EXPRDATA_MONOMIAL *monomial)
Definition: expr.c:5881
static size_t thread_num(void)
static void evalUser(Type &resultant, const Type *args, SCIP_EXPR *expr)
SCIP_EXPROP SCIPexprGetOperator(SCIP_EXPR *expr)
Definition: expr.c:5654
#define infinity
Definition: gastrans.c:71
static void evalSignPower(Type &resultant, const Type &arg, SCIP_EXPR *expr)
SCIPInterval pow(const SCIPInterval &x, const SCIPInterval &y)
SCIP_RETCODE SCIPexprintGradInt(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real infinity, SCIP_INTERVAL *varvals, SCIP_Bool new_varvals, SCIP_INTERVAL *val, SCIP_INTERVAL *gradient)
SCIP_Real * SCIPexprtreeGetParamVals(SCIP_EXPRTREE *tree)
Definition: expr.c:8563
#define SCIP_EXPRINTCAPABILITY_INTGRADIENT
SCIP_Real SCIPexprGetRealPowerExponent(SCIP_EXPR *expr)
Definition: expr.c:5717
int SCIPexprGetOpIndex(SCIP_EXPR *expr)
Definition: expr.c:5684
SCIPInterval cos(const SCIPInterval &x)
SCIP_Real SCIPexprGetPolynomialConstant(SCIP_EXPR *expr)
Definition: expr.c:5849
#define FALSE
Definition: def.h:64
#define TRUE
Definition: def.h:63
enum SCIP_Retcode SCIP_RETCODE
Definition: type_retcode.h:53
SCIP_RETCODE SCIPexprintCompile(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree)
SCIPInterval exp(const SCIPInterval &x)
const char * SCIPexprintGetDesc(void)
static void evalAbs(Type &resultant, const Type &arg)
#define BMSallocMemoryArray(ptr, num)
Definition: memory.h:78
static size_t ncurthreads
#define CPPAD_MAX_NUM_THREADS
#define SCIPdebugMessage
Definition: pub_message.h:77
static void analyzeTree(SCIP_EXPRINTDATA *data, SCIP_EXPR *expr)
SCIP_EXPRINTCAPABILITY SCIPexprintGetExprtreeCapability(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree)
unsigned int SCIP_EXPRINTCAPABILITY
#define BMSfreeMemory(ptr)
Definition: memory.h:100
static char init_parallel(void)
static void evalMin(Type &resultant, const Type &arg1, const Type &arg2)
SCIPInterval abs(const SCIPInterval &x)
SCIP_EXPRDATA_MONOMIAL ** SCIPexprGetMonomials(SCIP_EXPR *expr)
Definition: expr.c:5825
public methods for expressions, expression trees, expression graphs, and related stuff ...
int SCIPexprGetMonomialNFactors(SCIP_EXPRDATA_MONOMIAL *monomial)
Definition: expr.c:5871
int SCIPexprGetIntPowerExponent(SCIP_EXPR *expr)
Definition: expr.c:5728
#define SCIP_EXPRINTCAPABILITY_INTFUNCVALUE
SCIPInterval CondExpOp(enum CppAD::CompareOp cop, const SCIPInterval &left, const SCIPInterval &right, const SCIPInterval &trueCase, const SCIPInterval &falseCase)
SCIPInterval signpow(const SCIPInterval &x, const SCIP_Real p)
SCIP_Real coef
Definition: type_expr.h:102
SCIP_Real inf
Definition: intervalarith.h:39
static bool univariate_for_sparse_jac(size_t q, const CppAD::vector< bool > &r, CppAD::vector< bool > &s)
bool IdenticalOne(const SCIPInterval &x)
SCIP_RETCODE SCIPexprintEval(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Real *val)
SCIP_RETCODE SCIPexprintCreate(BMS_BLKMEM *blkmem, SCIP_EXPRINT **exprint)
SCIP_Real * SCIPexprGetQuadLinearCoefs(SCIP_EXPR *expr)
Definition: expr.c:5801
static void cppaderrorcallback(bool known, int line, const char *file, const char *cond, const char *msg)
SCIP_RETCODE SCIPexprCopyDeep(BMS_BLKMEM *blkmem, SCIP_EXPR **targetexpr, SCIP_EXPR *sourceexpr)
Definition: expr.c:6100
SCIPInterval sqrt(const SCIPInterval &x)
SCIPInterval sign(const SCIPInterval &x)
SCIP_Real SCIPexprGetQuadConstant(SCIP_EXPR *expr)
Definition: expr.c:5788
#define SIGN(x)
#define NULL
Definition: lpi_spx1.cpp:137
#define REALABS(x)
Definition: def.h:159
int SCIPexprtreeGetNVars(SCIP_EXPRTREE *tree)
Definition: expr.c:8543
#define SCIP_CALL(x)
Definition: def.h:306
SCIP_RETCODE SCIPexprEvalUser(SCIP_EXPR *expr, SCIP_Real *argvals, SCIP_Real *val, SCIP_Real *gradient, SCIP_Real *hessian)
Definition: expr.c:7928
SCIP_Real sup
Definition: intervalarith.h:40
SCIP_EXPRINTCAPABILITY SCIPexprintGetCapability(void)
SCIP_RETCODE exprEvalUser(SCIP_EXPR *expr, Type *x, Type &funcval, Type *gradient, Type *hessian)
SCIP_EXPR * SCIPexprtreeGetRoot(SCIP_EXPRTREE *tree)
Definition: expr.c:8533
static void evalSqrt(Type &resultant, const Type &arg)
SCIP_EXPRINTCAPABILITY SCIPexprGetUserEvalCapability(SCIP_EXPR *expr)
Definition: expr.c:5923
#define SCIP_EXPRINTCAPABILITY_ALL
#define SCIP_EXPRINTCAPABILITY_HESSIAN
SCIP_EXPR ** SCIPexprGetChildren(SCIP_EXPR *expr)
Definition: expr.c:5674
SCIP_Real SCIPexprGetSignPowerExponent(SCIP_EXPR *expr)
Definition: expr.c:5739
bool IdenticalZero(const SCIPInterval &x)
#define SCIP_Bool
Definition: def.h:61
SCIPInterval azmul(const SCIPInterval &x, const SCIPInterval &y)
SCIP_RETCODE SCIPexprintNewParametrization(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree)
SCIPInterval sin(const SCIPInterval &x)
void SCIPexprSortQuadElems(SCIP_EXPR *expr)
Definition: expr.c:6579
SCIP_RETCODE SCIPexprintEvalInt(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real infinity, SCIP_INTERVAL *varvals, SCIP_INTERVAL *val)
static pthread_mutex_t cppadmutex
SCIP_EXPRINTDATA * SCIPexprtreeGetInterpreterData(SCIP_EXPRTREE *tree)
Definition: expr.c:8588
unsigned short Type
Definition: cons_xor.c:91
#define MAX(x, y)
Definition: tclique_def.h:75
int SCIPexprGetNChildren(SCIP_EXPR *expr)
Definition: expr.c:5664
static bool univariate_rev_sparse_jac(size_t q, const CppAD::vector< bool > &r, CppAD::vector< bool > &s)
SCIPInterval log(const SCIPInterval &x)
SCIP_RETCODE SCIPexprintHessianDense(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Bool new_varvals, SCIP_Real *val, SCIP_Real *hessian)
SCIP_Real SCIPexprGetMonomialCoef(SCIP_EXPRDATA_MONOMIAL *monomial)
Definition: expr.c:5861
static CppAD::ErrorHandler errorhandler(cppaderrorcallback)
SCIP_RETCODE SCIPexprintGrad(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Bool new_varvals, SCIP_Real *val, SCIP_Real *gradient)
static void evalIntPower(Type &resultant, const Type &arg, const int exponent)
bool GreaterThanZero(const SCIPInterval &x)
SCIP_RETCODE SCIPexprEvalIntUser(SCIP_EXPR *expr, SCIP_Real infinity, SCIP_INTERVAL *argvals, SCIP_INTERVAL *val, SCIP_INTERVAL *gradient, SCIP_INTERVAL *hessian)
Definition: expr.c:7951
bool IdenticalPar(const SCIPInterval &x)
#define SCIP_DEFAULT_INFINITY
Definition: def.h:140
#define SCIP_EXPRINTCAPABILITY_FUNCVALUE
SCIP_Real * SCIPexprGetLinearCoefs(SCIP_EXPR *expr)
Definition: expr.c:5750
SCIP_Real SCIPexprGetOpReal(SCIP_EXPR *expr)
Definition: expr.c:5695
SCIP_QUADELEM * SCIPexprGetQuadElements(SCIP_EXPR *expr)
Definition: expr.c:5776
std::ostream & operator<<(std::ostream &out, const SCIP_INTERVAL &x)
int SCIPexprGetNMonomials(SCIP_EXPR *expr)
Definition: expr.c:5837
int Integer(const SCIPInterval &x)
#define SCIP_Real
Definition: def.h:135
static bool in_parallel(void)
#define MIN(x, y)
Definition: memory.c:75
#define BMSallocMemory(ptr)
Definition: memory.h:74
static pthread_key_t thread_specific_key
SCIP_RETCODE SCIPexprintHessianSparsityDense(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Bool *sparsity)
SCIP_Real * SCIPexprGetMonomialExponents(SCIP_EXPRDATA_MONOMIAL *monomial)
Definition: expr.c:5891
bool LessThanOrZero(const SCIPInterval &x)
static bool univariate_rev_sparse_hes(const CppAD::vector< bool > &vx, const CppAD::vector< bool > &s, CppAD::vector< bool > &t, size_t q, const CppAD::vector< bool > &r, const CppAD::vector< bool > &u, CppAD::vector< bool > &v)
SCIP_RETCODE SCIPexprintFreeData(SCIP_EXPRINTDATA **interpreterdata)
SCIP_RETCODE SCIPexprintFree(SCIP_EXPRINT **exprint)
common defines and data types used in all packages of SCIP
struct BMS_BlkMem BMS_BLKMEM
Definition: memory.h:392
SCIP_Real SCIPexprGetLinearConstant(SCIP_EXPR *expr)
Definition: expr.c:5763
void SCIPexprtreeSetInterpreterData(SCIP_EXPRTREE *tree, SCIP_EXPRINTDATA *interpreterdata)
Definition: expr.c:8598
struct SCIP_ExprIntData SCIP_EXPRINTDATA
int SCIPexprGetNQuadElements(SCIP_EXPR *expr)
Definition: expr.c:5813
bool GreaterThanOrZero(const SCIPInterval &x)
#define SCIP_EXPRINTCAPABILITY_GRADIENT
SCIP_RETCODE SCIPexprtreeEval(SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Real *val)
Definition: expr.c:8654
static void posintpower(const vector< Type > &in, vector< Type > &out, size_t exponent)
C++ extensions to interval arithmetics for provable bounds.
memory allocation routines