Scippy

SCIP

Solving Constraint Integer Programs

exprinterpret_cppad.cpp
Go to the documentation of this file.
1/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2/* */
3/* This file is part of the program and library */
4/* SCIP --- Solving Constraint Integer Programs */
5/* */
6/* Copyright (c) 2002-2025 Zuse Institute Berlin (ZIB) */
7/* */
8/* Licensed under the Apache License, Version 2.0 (the "License"); */
9/* you may not use this file except in compliance with the License. */
10/* You may obtain a copy of the License at */
11/* */
12/* http://www.apache.org/licenses/LICENSE-2.0 */
13/* */
14/* Unless required by applicable law or agreed to in writing, software */
15/* distributed under the License is distributed on an "AS IS" BASIS, */
16/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
17/* See the License for the specific language governing permissions and */
18/* limitations under the License. */
19/* */
20/* You should have received a copy of the Apache-2.0 license */
21/* along with SCIP; see the file LICENSE. If not visit scipopt.org. */
22/* */
23/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
24
25/**@file exprinterpret_cppad.cpp
26 * @brief methods to interpret (evaluate) an expression "fast" using CppAD
27 * @ingroup DEFPLUGINS_EXPRINT
28 * @author Stefan Vigerske
29 */
30
31/*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/
32
33#include "scip/exprinterpret.h"
34#include "scip/def.h"
35#include "scip/intervalarith.h"
36#include "scip/pub_expr.h"
37#include "scip/scip_expr.h"
38#include "scip/expr_pow.h"
39#include "scip/expr_exp.h"
40#include "scip/expr_log.h"
41#include "scip/expr_varidx.h"
42
43#include <cmath>
44#include <cstring>
45#include <algorithm>
46#include <vector>
47using std::vector;
48
49/* Turn off lint warning "747: Significant prototype coercion" and "732: Loss of sign".
50 * The first warning is generated for expressions like t[0], where t is a vector, since 0 is an integer constant, but a
51 * size_t is expected (usually long unsigned). The second is generated for expressions like t[n], where n is an
52 * integer. Both code pieces are likely to be correct. It seems to be impossible to inhibit these messages for
53 * vector<*>::operator[] only. */
54/*lint --e{747,732}*/
55
56/* defining EVAL_USE_EXPRHDLR_ALWAYS uses the exprhdlr methods for the evaluation and differentiation of all expression (except for var and value)
57 * instead of only those that are not recognized in this code
58 * this is incomplete (no hessians) and slow (only forward-mode gradients), but can be useful for testing
59 */
60// #define EVAL_USE_EXPRHDLR_ALWAYS
61
62/* defining NO_CPPAD_USER_ATOMIC disables the use of our own implementation of derivatives of power operators
63 * via CppAD's user-atomic function feature
64 * our customized implementation should give better results (tighter intervals) for the interval data type
65 * but we don't use interval types here anymore and the atomic operator does not look threadsafe (might be better in newer CppAD version)
66 */
67#define NO_CPPAD_USER_ATOMIC
68
69/* fallback to non-thread-safe version if C++ is too old to have std::atomic */
70#if __cplusplus < 201103L && defined(SCIP_THREADSAFE)
71#undef SCIP_THREADSAFE
72#endif
73
74/* CppAD needs to know a fixed upper bound on the number of threads at compile time.
75 * It is wise to set it to a power of 2, so that if the tape id overflows, it is likely to start at 0 again, which avoids difficult to debug errors.
76 */
77#ifndef CPPAD_MAX_NUM_THREADS
78#ifdef SCIP_THREADSAFE
79#define CPPAD_MAX_NUM_THREADS 64
80#else
81#define CPPAD_MAX_NUM_THREADS 1
82#endif
83#endif
84
85/* disable -Wshadow warnings for upcoming includes of CppAD if using some old GCC
86 * -Wshadow was too strict with some versions of GCC 4 (https://stackoverflow.com/questions/2958457/gcc-wshadow-is-too-strict)
87 * disable -Wimplicit-fallthrough as I don't want to maintain extra comments in CppAD code to suppress these
88 */
89#ifdef __GNUC__
90#if __GNUC__ == 4
91#pragma GCC diagnostic ignored "-Wshadow"
92#endif
93#if __GNUC__ >= 7
94#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
95#endif
96#endif
97
98// MS compiler doesn't have log2() - define an expensive workaround
99#ifdef _MSC_VER
100#define log2(x) (log((double)x) / log(2.0))
101#endif
102
103#include <cppad/cppad.hpp>
104#include <cppad/utility/error_handler.hpp>
105
106/* CppAD is not thread-safe by itself, but uses some static datastructures
107 * To run it in a multithreading environment, a special CppAD memory allocator that is aware of the multiple threads has to be used.
108 * This allocator requires to know the number of threads and a thread number for each thread.
109 * To implement this, we follow the team_pthread example of CppAD, which uses pthread's thread-specific data management.
110 */
111#ifdef SCIP_THREADSAFE
112
113#include <atomic>
114
115/** currently registered number of threads */
116static std::atomic_size_t ncurthreads{0};
117static thread_local int thread_number{-1};
118
119/** CppAD callback function that indicates whether we are running in parallel mode */
120static
121bool in_parallel(void)
122{
123 return ncurthreads > 1;
124}
125
126/** CppAD callback function that returns the number of the current thread
127 *
128 * assigns a new number to the thread if new
129 */
130static
131size_t thread_num(void)
132{
133 size_t threadnum;
134
135 /* if no thread_number for this thread yet, then assign a new thread number to the current thread
136 */
137 if( thread_number == -1 )
138 {
139 thread_number = static_cast<int>(ncurthreads.fetch_add(1, std::memory_order_relaxed));
140 }
141
142 threadnum = static_cast<size_t>(thread_number);
143
144 return threadnum;
145}
146
147/** sets up CppAD's datastructures for running in multithreading mode
148 *
149 * It must be called once before multithreading is started.
150 * For GCC-compatible compilers, this will happen automatically.
151 */
152extern "C" SCIP_EXPORT char SCIPexprintCppADInitParallel(void);
153#ifdef __GNUC__
154__attribute__((constructor))
155#endif
156char SCIPexprintCppADInitParallel(void)
157{
158 CppAD::thread_alloc::parallel_setup(CPPAD_MAX_NUM_THREADS, in_parallel, thread_num);
159 CppAD::parallel_ad<double>();
160
161 return 0;
162}
163
164#if !defined(__GNUC__)
165/** a dummy variable that is initialized to the result of init_parallel
166 *
167 * The purpose is to make sure that init_parallel() is called before any multithreading is started.
168 */
169static char init_parallel_return = SCIPexprintCppADInitParallel();
170#endif
171
172#endif // SCIP_THREADSAFE
173
174using CppAD::AD;
175
176class atomic_userexpr;
177
178/** expression specific interpreter data */
179struct SCIP_ExprIntData
180{
181public:
182 /** constructor */
183 SCIP_ExprIntData()
184 : val(0.0),
185 need_retape(true),
186 need_retape_always(false),
187 userevalcapability(SCIP_EXPRINTCAPABILITY_ALL),
188 hesrowidxs(NULL),
189 hescolidxs(NULL),
190 hesnnz(0),
191 hesconstant(false)
192 { }
193
194 /** destructor */
195 ~SCIP_ExprIntData()
196 { }/*lint --e{1540}*/
197
198 /** gives position of index in varidxs vector */
199 int getVarPos(
200 int varidx /**< variable index to look for */
201 ) const
202 {
203 // varidxs is sorted, so can use binary search functions
204 assert(std::binary_search(varidxs.begin(), varidxs.end(), varidx));
205 return std::lower_bound(varidxs.begin(), varidxs.end(), varidx) - varidxs.begin();
206 }
207
208 vector< int > varidxs; /**< variable indices used in expression (unique and sorted) */
209 vector< AD<double> > X; /**< vector of dependent variables (same size as varidxs) */
210 vector< AD<double> > Y; /**< result vector (size 1) */
211 CppAD::ADFun<double> f; /**< the function to evaluate as CppAD object */
212
213 vector<double> x; /**< current values of dependent variables (same size as varidxs) */
214 double val; /**< current function value */
215 bool need_retape; /**< will retaping be required for the next point evaluation? */
216 bool need_retape_always; /**< will retaping be always required? */
217
218 vector<atomic_userexpr*> userexprs; /**< vector of atomic_userexpr that are created during eval() and need to be kept as long as the tape exists */
219 SCIP_EXPRINTCAPABILITY userevalcapability;/**< (intersection of) capabilities of evaluation routines of user expressions */
220
221 int* hesrowidxs; /**< row indices of Hessian sparsity: indices are the variables used in expression */
222 int* hescolidxs; /**< column indices of Hessian sparsity: indices are the variables used in expression */
223 vector<SCIP_Real> hesvalues; /**< values of Hessian (a vector<> so we can pass it to CppAD) */
224 int hesnnz; /**< number of nonzeros in Hessian */
225 SCIP_Bool hesconstant; /**< whether Hessian is constant (because expr is at most quadratic) */
226
227 // Hessian data in CppAD style: indices are 0..n-1 and elements on both lower and upper diagonal of matrix are considered
228 CppAD::local::internal_sparsity<bool>::pattern_type hessparsity_pattern; /**< packed sparsity pattern of Hessian in CppAD-internal form */
229 CppAD::vector<size_t> hessparsity_row; /**< row indices of sparsity pattern of Hessian in CppAD-internal form */
230 CppAD::vector<size_t> hessparsity_col; /**< column indices of sparsity pattern of Hessian in CppAD-internal form */
231 CppAD::sparse_hessian_work heswork; /**< work memory of CppAD for sparse Hessians */
232};
233
234#ifndef NO_CPPAD_USER_ATOMIC
235
236/** computes sparsity of jacobian for a univariate function during a forward sweep
237 *
238 * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
239 * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
240 */
241static
242bool univariate_for_sparse_jac(
243 size_t q, /**< number of columns in R */
244 const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
245 CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
246 )
247{
248 assert(r.size() == q);
249 assert(s.size() == q);
250
251 s = r;
252
253 return true;
254}
255
256/** Computes sparsity of jacobian during a reverse sweep
257 *
258 * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
259 * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
260 */
261static
262bool univariate_rev_sparse_jac(
263 size_t q, /**< number of rows in R */
264 const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
265 CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
266 )
267{
268 assert(r.size() == q);
269 assert(s.size() == q);
270
271 s = r;
272
273 return true;
274}
275
276/** computes sparsity of hessian during a reverse sweep
277 *
278 * Assume V(x) = (g(f(x)))'' R with f(x) = x^p for a function g:R->R and a matrix R.
279 * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
280 */ /*lint -e715*/
281static
282bool univariate_rev_sparse_hes(
283 const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
284 const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
285 CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
286 size_t q, /**< number of columns in R, U, and V */
287 const CppAD::vector<bool>& r, /**< sparsity pattern of R */
288 const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
289 CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
290 )
291{ /*lint --e{439,715}*/ /* @todo take vx into account */
292 assert(r.size() == q);
293 assert(s.size() == 1);
294 assert(t.size() == 1);
295 assert(u.size() == q);
296 assert(v.size() == q);
297
298 // T(x) = g'(f(x)) * f'(x) = S * f'(x), and f' is not identically 0
299 t[0] = s[0];
300
301 // V(x) = g''(f(x)) f'(x) f'(x) R + g'(f(x)) f''(x) R466
302 // = f'(x) U + S f''(x) R, with f'(x) and f''(x) not identically 0
303 v = u;
304 if( s[0] )
305 for( size_t j = 0; j < q; ++j )
306 if( r[j] )
307 v[j] = true;
308
309 return true;
310}
311
312
313/** Automatic differentiation of x -> x^p, p>=2 integer, as CppAD user-atomic function.
314 *
315 * This class implements forward and reverse operations for the function x -> x^p for use within CppAD.
316 * While CppAD would implement integer powers as a recursion of multiplications, we still use pow functions as they allow us to avoid overestimation in interval arithmetics.
317 *
318 * @todo treat the exponent as a (variable) argument to the function, with the assumption that we never differentiate w.r.t. it (this should make the approach threadsafe again)
319 */
320template<class Type>
321class atomic_posintpower : public CppAD::atomic_base<Type>
322{
323public:
324 atomic_posintpower()
325 : CppAD::atomic_base<Type>("posintpower"),
326 exponent(0)
327 {
328 /* indicate that we want to use bool-based sparsity pattern */
329 this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
330 }
331
332private:
333 /** exponent value for next call to forward or reverse */
334 int exponent;
335
336 /** stores exponent value corresponding to next call to forward or reverse
337 *
338 * how is this supposed to be threadsafe? (we use only one global instantiation of this class)
339 * TODO using this function is deprecated, do as with atomic_userexpr instead
340 */
341 virtual void set_old(size_t id)
342 {
343 exponent = (int) id;
344 }
345
346 /** forward sweep of positive integer power
347 *
348 * Given the taylor coefficients for x, we have to compute the taylor coefficients for f(x),
349 * that is, given tx = (x, x', x'', ...), we compute the coefficients ty = (y, y', y'', ...)
350 * in the taylor expansion of f(x) = x^p.
351 * Thus, y = x^p
352 * = tx[0]^p,
353 * y' = p * x^(p-1) * x'
354 * = p * tx[0]^(p-1) * tx[1],
355 * y'' = 1/2 * p * (p-1) * x^(p-2) * x'^2 + p * x^(p-1) * x''
356 * = 1/2 * p * (p-1) * tx[0]^(p-2) * tx[1]^2 + p * tx[0]^(p-1) * tx[2]
357 */
358 bool forward(
359 size_t q, /**< lowest order Taylor coefficient that we are evaluating */
360 size_t p, /**< highest order Taylor coefficient that we are evaluating */
361 const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
362 CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
363 const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
364 CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
365 )
366 {
367 assert(exponent > 1);
368 assert(tx.size() >= p+1);
369 assert(ty.size() >= p+1);
370 assert(q <= p);
371
372 if( vx.size() > 0 )
373 {
374 assert(vx.size() == 1);
375 assert(vy.size() == 1);
376 assert(p == 0);
377
378 vy[0] = vx[0];
379 }
380
381 if( q == 0 /* q <= 0 && 0 <= p */ )
382 {
383 ty[0] = CppAD::pow(tx[0], exponent);
384 }
385
386 if( q <= 1 && 1 <= p )
387 {
388 ty[1] = CppAD::pow(tx[0], exponent-1) * tx[1];
389 ty[1] *= double(exponent);
390 }
391
392 if( q <= 2 && 2 <= p )
393 {
394 if( exponent > 2 )
395 {
396 // ty[2] = 1/2 * exponent * (exponent-1) * pow(tx[0], exponent-2) * tx[1] * tx[1] + exponent * pow(tx[0], exponent-1) * tx[2];
397 ty[2] = CppAD::pow(tx[0], exponent-2) * tx[1] * tx[1];
398 ty[2] *= (exponent-1) / 2.0;
399 ty[2] += CppAD::pow(tx[0], exponent-1) * tx[2];
400 ty[2] *= exponent;
401 }
402 else
403 {
404 assert(exponent == 2);
405 // ty[2] = 1/2 * exponent * tx[1] * tx[1] + exponent * tx[0] * tx[2];
406 ty[2] = tx[1] * tx[1] + 2.0 * tx[0] * tx[2];
407 }
408 }
409
410 /* higher order derivatives not implemented */
411 if( p > 2 )
412 return false;
413
414 return true;
415 }
416
417 /** reverse sweep of positive integer power
418 *
419 * Assume y(x) is a function of the taylor coefficients of f(x) = x^p for x, i.e.,
420 * y(x) = [ x^p, p * x^(p-1) * x', p * (p-1) * x^(p-2) * x'^2 + p * x^(p-1) * x'', ... ].
421 * Then in the reverse sweep we have to compute the elements of \f$\partial h / \partial x^[l], l = 0, ..., k,\f$
422 * where x^[l] is the l'th taylor coefficient (x, x', x'', ...) and h(x) = g(y(x)) for some function g:R^k -> R.
423 * That is, we have to compute
424 *\f$
425 * px[l] = \partial h / \partial x^[l] = (\partial g / \partial y) * (\partial y / \partial x^[l])
426 * = \sum_{i=0}^k (\partial g / \partial y_i) * (\partial y_i / \partial x^[l])
427 * = \sum_{i=0}^k py[i] * (\partial y_i / \partial x^[l])
428 * \f$
429 *
430 * For k = 0, this means
431 *\f$
432 * px[0] = py[0] * (\partial y_0 / \partial x^[0])
433 * = py[0] * (\partial x^p / \partial x)
434 * = py[0] * p * tx[0]^(p-1)
435 *\f$
436 *
437 * For k = 1, this means
438 * \f$
439 * px[0] = py[0] * (\partial y_0 / \partial x^[0]) + py[1] * (\partial y_1 / \partial x^[0])
440 * = py[0] * (\partial x^p / \partial x) + py[1] * (\partial (p * x^(p-1) * x') / \partial x)
441 * = py[0] * p * tx[0]^(p-1) + py[1] * p * (p-1) * tx[0]^(p-2) * tx[1]
442 * px[1] = py[0] * (\partial y_0 / \partial x^[1]) + py[1] * (\partial y_1 / \partial x^[1])
443 * = py[0] * (\partial x^p / \partial x') + py[1] * (\partial (p * x^(p-1) x') / \partial x')
444 * = py[0] * 0 + py[1] * p * tx[0]^(p-1)
445 * \f$
446 */ /*lint -e715*/
447 bool reverse(
448 size_t p, /**< highest order Taylor coefficient that we are evaluating */
449 const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
450 const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
451 CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
452 const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
453 )
454 { /*lint --e{715}*/
455 assert(exponent > 1);
456 assert(px.size() >= p+1);
457 assert(py.size() >= p+1);
458 assert(tx.size() >= p+1);
459
460 switch( p )
461 {
462 case 0:
463 // px[0] = py[0] * exponent * pow(tx[0], exponent-1);
464 px[0] = py[0] * CppAD::pow(tx[0], exponent-1);
465 px[0] *= exponent;
466 break;
467
468 case 1:
469 // px[0] = py[0] * exponent * pow(tx[0], exponent-1) + py[1] * exponent * (exponent-1) * pow(tx[0], exponent-2) * tx[1];
470 px[0] = py[1] * tx[1] * CppAD::pow(tx[0], exponent-2);
471 px[0] *= exponent-1;
472 px[0] += py[0] * CppAD::pow(tx[0], exponent-1);
473 px[0] *= exponent;
474 // px[1] = py[1] * exponent * pow(tx[0], exponent-1);
475 px[1] = py[1] * CppAD::pow(tx[0], exponent-1);
476 px[1] *= exponent;
477 break;
478
479 default:
480 return false;
481 }
482
483 return true;
484 }
485
486 using CppAD::atomic_base<Type>::for_sparse_jac;
487
488 /** computes sparsity of jacobian during a forward sweep
489 *
490 * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
491 * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
492 */
493 bool for_sparse_jac(
494 size_t q, /**< number of columns in R */
495 const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
496 CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
497 )
498 {
499 return univariate_for_sparse_jac(q, r, s);
500 }
501
502 using CppAD::atomic_base<Type>::rev_sparse_jac;
503
504 /** computes sparsity of jacobian during a reverse sweep
505 *
506 * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
507 * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
508 */
509 bool rev_sparse_jac(
510 size_t q, /**< number of rows in R */
511 const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
512 CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
513 )
514 {
515 return univariate_rev_sparse_jac(q, r, s);
516 }
517
518 using CppAD::atomic_base<Type>::rev_sparse_hes;
519
520 /** computes sparsity of hessian during a reverse sweep
521 *
522 * Assume V(x) = (g(f(x)))'' R with f(x) = x^p for a function g:R->R and a matrix R.
523 * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
524 */
525 bool rev_sparse_hes(
526 const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
527 const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
528 CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
529 size_t q, /**< number of columns in R, U, and V */
530 const CppAD::vector<bool>& r, /**< sparsity pattern of R */
531 const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
532 CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
533 )
534 {
535 return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
536 }
537};
538
539/** power function with natural exponents */
540template<class Type>
541static
542void posintpower(
543 const vector<Type>& in, /**< vector which first argument is base */
544 vector<Type>& out, /**< vector where to store result in first argument */
545 size_t exponent /**< exponent */
546 )
547{
548 static atomic_posintpower<typename Type::value_type> pip;
549 pip(in, out, exponent);
550}
551
552#else
553
554/** power function with natural exponents */
555template<class Type>
557 const vector<Type>& in, /**< vector which first argument is base */
558 vector<Type>& out, /**< vector where to store result in first argument */
559 size_t exponent /**< exponent */
560 )
561{
562 out[0] = pow(in[0], (int)exponent);
563}
564
565#endif
566
567
568#ifndef NO_CPPAD_USER_ATOMIC
569
570/** sign of a value (-1 or +1)
571 *
572 * 0.0 has sign +1
573 */
574#define SIGN(x) ((x) >= 0.0 ? 1.0 : -1.0)
575
576/** Automatic differentiation of x -> sign(x)abs(x)^p, p>=1, as CppAD user-atomic function.
577 *
578 * This class implements forward and reverse operations for the function x -> sign(x)abs(x)^p for use within CppAD.
579 * While we otherwise would have to use discontinuous sign and abs functions, our own implementation allows to provide
580 * a continuously differentiable function.
581 *
582 * @todo treat the exponent as a (variable) argument to the function, with the assumption that we never differentiate w.r.t. it (this should make the approach threadsafe again)
583 */
584template<class Type>
585class atomic_signpower : public CppAD::atomic_base<Type>
586{
587public:
588 atomic_signpower()
589 : CppAD::atomic_base<Type>("signpower"),
590 exponent(0.0)
591 {
592 /* indicate that we want to use bool-based sparsity pattern */
593 this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
594 }
595
596private:
597 /** exponent for use in next call to forward or reverse */
598 SCIP_Real exponent;
599
600 /** stores exponent corresponding to next call to forward or reverse
601 *
602 * How is this supposed to be threadsafe? (we use only one global instantiation of this class)
603 * TODO using this function is deprecated, do as with atomic_userexpr instead
604 */
605 virtual void set_old(size_t id)
606 {
607 exponent = SCIPgetExponentExprPow((SCIP_EXPR*)(void*)id);
608 }
609
610 /** forward sweep of signpower
611 *
612 * Given the taylor coefficients for x, we have to compute the taylor coefficients for f(x),
613 * that is, given tx = (x, x', x'', ...), we compute the coefficients ty = (y, y', y'', ...)
614 * in the taylor expansion of f(x) = sign(x)abs(x)^p.
615 * Thus, y = sign(x)abs(x)^p
616 * = sign(tx[0])abs(tx[0])^p,
617 * y' = p * abs(x)^(p-1) * x'
618 * = p * abs(tx[0])^(p-1) * tx[1],
619 * y'' = 1/2 * p * (p-1) * sign(x) * abs(x)^(p-2) * x'^2 + p * abs(x)^(p-1) * x''
620 * = 1/2 * p * (p-1) * sign(tx[0]) * abs(tx[0])^(p-2) * tx[1]^2 + p * abs(tx[0])^(p-1) * tx[2]
621 */
622 bool forward(
623 size_t q, /**< lowest order Taylor coefficient that we are evaluating */
624 size_t p, /**< highest order Taylor coefficient that we are evaluating */
625 const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
626 CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
627 const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
628 CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
629 )
630 {
631 assert(exponent > 0.0);
632 assert(tx.size() >= p+1);
633 assert(ty.size() >= p+1);
634 assert(q <= p);
635
636 if( vx.size() > 0 )
637 {
638 assert(vx.size() == 1);
639 assert(vy.size() == 1);
640 assert(p == 0);
641
642 vy[0] = vx[0];
643 }
644
645 if( q == 0 /* q <= 0 && 0 <= p */ )
646 {
647 ty[0] = SIGN(tx[0]) * pow(REALABS(tx[0]), exponent);
648 }
649
650 if( q <= 1 && 1 <= p )
651 {
652 ty[1] = pow(REALABS(tx[0]), exponent - 1.0) * tx[1];
653 ty[1] *= exponent;
654 }
655
656 if( q <= 2 && 2 <= p )
657 {
658 if( exponent != 2.0 )
659 {
660 ty[2] = SIGN(tx[0]) * pow(REALABS(tx[0]), exponent - 2.0) * tx[1] * tx[1];
661 ty[2] *= (exponent - 1.0) / 2.0;
662 ty[2] += pow(REALABS(tx[0]), exponent - 1.0) * tx[2];
663 ty[2] *= exponent;
664 }
665 else
666 {
667 // y'' = 2 (1/2 * sign(x) * x'^2 + |x|*x'') = sign(tx[0]) * tx[1]^2 + 2 * abs(tx[0]) * tx[2]
668 ty[2] = SIGN(tx[0]) * tx[1] * tx[1];
669 ty[2] += 2.0 * REALABS(tx[0]) * tx[2];
670 }
671 }
672
673 /* higher order derivatives not implemented */
674 if( p > 2 )
675 return false;
676
677 return true;
678 }
679
680 /** reverse sweep of signpower
681 *
682 * Assume y(x) is a function of the taylor coefficients of f(x) = sign(x)|x|^p for x, i.e.,
683 * y(x) = [ f(x), f'(x), f''(x), ... ].
684 * Then in the reverse sweep we have to compute the elements of \f$\partial h / \partial x^[l], l = 0, ..., k,\f$
685 * where x^[l] is the l'th taylor coefficient (x, x', x'', ...) and h(x) = g(y(x)) for some function g:R^k -> R.
686 * That is, we have to compute
687 *\f$
688 * px[l] = \partial h / \partial x^[l] = (\partial g / \partial y) * (\partial y / \partial x^[l])
689 * = \sum_{i=0}^k (\partial g / \partial y_i) * (\partial y_i / \partial x^[l])
690 * = \sum_{i=0}^k py[i] * (\partial y_i / \partial x^[l])
691 *\f$
692 *
693 * For k = 0, this means
694 *\f$
695 * px[0] = py[0] * (\partial y_0 / \partial x^[0])
696 * = py[0] * (\partial f(x) / \partial x)
697 * = py[0] * p * abs(tx[0])^(p-1)
698 * \f$
699 *
700 * For k = 1, this means
701 *\f$
702 * px[0] = py[0] * (\partial y_0 / \partial x^[0]) + py[1] * (\partial y_1 / \partial x^[0])
703 * = py[0] * (\partial f(x) / \partial x) + py[1] * (\partial f'(x) / \partial x)
704 * = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
705 * px[1] = py[0] * (\partial y_0 / \partial x^[1]) + py[1] * (\partial y_1 / \partial x^[1])
706 * = py[0] * (\partial f(x) / \partial x') + py[1] * (\partial f'(x) / \partial x')
707 * = py[0] * 0 + py[1] * p * abs(tx[0])^(p-1)
708 * \f$
709 */
710 bool reverse(
711 size_t p, /**< highest order Taylor coefficient that we are evaluating */
712 const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
713 const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
714 CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
715 const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
716 )
717 { /*lint --e{715}*/
718 assert(exponent > 1);
719 assert(px.size() >= p+1);
720 assert(py.size() >= p+1);
721 assert(tx.size() >= p+1);
722
723 switch( p )
724 {
725 case 0:
726 // px[0] = py[0] * p * pow(abs(tx[0]), p-1);
727 px[0] = py[0] * pow(REALABS(tx[0]), exponent - 1.0);
728 px[0] *= p;
729 break;
730
731 case 1:
732 if( exponent != 2.0 )
733 {
734 // px[0] = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
735 px[0] = py[1] * tx[1] * pow(REALABS(tx[0]), exponent - 2.0) * SIGN(tx[0]);
736 px[0] *= exponent - 1.0;
737 px[0] += py[0] * pow(REALABS(tx[0]), exponent - 1.0);
738 px[0] *= exponent;
739 // px[1] = py[1] * p * abs(tx[0])^(p-1)
740 px[1] = py[1] * pow(REALABS(tx[0]), exponent - 1.0);
741 px[1] *= exponent;
742 }
743 else
744 {
745 // px[0] = py[0] * 2.0 * abs(tx[0]) + py[1] * 2.0 * sign(tx[0]) * tx[1]
746 px[0] = py[1] * tx[1] * SIGN(tx[0]);
747 px[0] += py[0] * REALABS(tx[0]);
748 px[0] *= 2.0;
749 // px[1] = py[1] * 2.0 * abs(tx[0])
750 px[1] = py[1] * REALABS(tx[0]);
751 px[1] *= 2.0;
752 }
753 break;
754
755 default:
756 return false;
757 }
758
759 return true;
760 }
761
762 using CppAD::atomic_base<Type>::for_sparse_jac;
763
764 /** computes sparsity of jacobian during a forward sweep
765 *
766 * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
767 * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
768 */
769 bool for_sparse_jac(
770 size_t q, /**< number of columns in R */
771 const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
772 CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
773 )
774 {
775 return univariate_for_sparse_jac(q, r, s);
776 }
777
778 using CppAD::atomic_base<Type>::rev_sparse_jac;
779
780 /** computes sparsity of jacobian during a reverse sweep
781 *
782 * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
783 * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
784 */
785 bool rev_sparse_jac(
786 size_t q, /**< number of rows in R */
787 const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
788 CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
789 )
790 {
791 return univariate_rev_sparse_jac(q, r, s);
792 }
793
794 using CppAD::atomic_base<Type>::rev_sparse_hes;
795
796 /** computes sparsity of hessian during a reverse sweep
797 *
798 * Assume V(x) = (g(f(x)))'' R with f(x) = sign(x)abs(x)^p for a function g:R->R and a matrix R.
799 * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
800 */
801 bool rev_sparse_hes(
802 const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
803 const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
804 CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
805 size_t q, /**< number of columns in S and R */
806 const CppAD::vector<bool>& r, /**< sparsity pattern of R */
807 const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
808 CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
809 )
810 {
811 return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
812 }
813};
814
815/** template for evaluation for signpower operator */
816template<class Type>
817static
818void evalSignPower(
819 Type& resultant, /**< resultant */
820 const Type& arg, /**< operand */
821 SCIP_EXPR* expr /**< expression that holds the exponent */
822 )
823{
824 vector<Type> in(1, arg);
825 vector<Type> out(1);
826
827 static atomic_signpower<typename Type::value_type> sp;
828 sp(in, out, (size_t)(void*)expr);
829
830 resultant = out[0];
831 return;
832}
833
834#else
835
836/** specialization of signpower evaluation for real numbers */
837template<class Type>
838static
840 CppAD::AD<Type>& resultant, /**< resultant */
841 const CppAD::AD<Type>& arg, /**< operand */
842 SCIP_EXPR* expr /**< expression that holds the exponent */
843 )
844{
845 AD<Type> adzero(0.);
846 SCIP_Real exponent;
847
848 exponent = SCIPgetExponentExprPow(expr);
849 assert(exponent >= 1.0);
850
851 if( EPSISINT(exponent, 0.0) )
852 {
853 resultant = CppAD::CondExpGe(arg, adzero, pow(arg, (int)exponent), -pow(-arg, (int)exponent));
854 }
855 else
856 {
857 /* pow(0,fractional>1) is not differential in the CppAD world (https://github.com/coin-or/CppAD/discussions/93)
858 * this works around this by evaluating pow(eps,exponent) in this case
859 */
860 resultant = CppAD::CondExpEq(arg, adzero, pow(arg+std::numeric_limits<SCIP_Real>::epsilon(), exponent)-pow(std::numeric_limits<SCIP_Real>::epsilon(), exponent),
861 CppAD::CondExpGe(arg, adzero, pow(arg, exponent), -pow(-arg, exponent)));
862 }
863}
864#endif
865
866/** Automatic differentiation of user expression as CppAD user-atomic function.
867 *
868 * This class implements forward and reverse operations for a function given by a user expression for use within CppAD.
869 */
870class atomic_userexpr : public CppAD::atomic_base<SCIP_Real>
871{
872public:
874 SCIP* scip_, /**< SCIP data structure */
875 SCIP_EXPR* expr_ /**< expression to use */
876 )
877 : CppAD::atomic_base<SCIP_Real>(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr_)), CppAD::atomic_base<SCIP_Real>::bool_sparsity_enum),
878 scip(scip_),
879 expr(expr_)
880 { }
881
882private:
883 /** SCIP data structure */
884 SCIP* scip;
885
886 /** expression */
887 SCIP_EXPR* expr;
888
889 /** forward sweep of userexpr
890 *
891 * We follow http://www.coin-or.org/CppAD/Doc/atomic_forward.xml
892 * Note, that p and q are interchanged!
893 *
894 * For a scalar variable t, let
895 * Y(t) = f(X(t))
896 * X(t) = x^0 + x^1 t^1 + ... + x^p t^p
897 * where for x^i the i an index, while for t^i the i is an exponent.
898 * Thus, x^k = 1/k! X^(k) (0), where X^(k)(.) denotes the k-th derivative.
899 *
900 * Next, let y^k = 1/k! Y^(k)(0) be the k'th taylor coefficient of Y. Thus,
901 * y^0 = Y^(0)(0) = Y(0) = f(X(0)) = f(x^0)
902 * y^1 = Y^(1)(0) = Y'(0) = f'(X(0)) * X'(0) = f'(x^0) * x^1
903 * y^2 = 1/2 Y^(2)(0) = 1/2 Y''(0) = 1/2 X'(0) * f''(X(0)) X'(0) + 1/2 * f'(X(0)) * X''(0) = 1/2 x^1 * f''(x^0) * x^1 + f'(x^0) * x^2
904 *
905 * As x^k = (tx[k], tx[(p+1)+k], tx[2*(p+1)+k], ..., tx[n*(p+1)+k], we get
906 * ty[0] = y^0 = f(x^0) = f(tx[{1..n}*(p+1)])
907 * ty[1] = y^1 = f'(x^0) * tx[{1..n}*(p+1)+1] = sum(i=1..n, grad[i] * tx[i*(p+1)+1]), where grad = f'(x^0)
908 * ty[2] = 1/2 sum(i,j=1..n, x[i*(p+1)+1] * x[j*(p+1)+q] * hessian[i,j]) + sum(i=1..n, grad[i] * x[i*(p+1)+2])
909 */
910 /* cppcheck-suppress unusedPrivateFunction */
911 bool forward(
912 size_t q, /**< lowest order Taylor coefficient that we are evaluating */
913 size_t p, /**< highest order Taylor coefficient that we are evaluating */
914 const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
915 CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
916 const CppAD::vector<SCIP_Real>& tx, /**< values for taylor coefficients of x */
917 CppAD::vector<SCIP_Real>& ty /**< vector to store taylor coefficients of y */
918 )
919 {
920 assert(scip != NULL);
921 assert(expr != NULL);
922 assert(ty.size() == p+1);
923 assert(q <= p);
924
925 size_t n = tx.size() / (p+1);
926 assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
927 assert(n >= 1);
928
929 SCIPdebugMsg(scip, "expr_%s:forward, q=%zd, p=%zd\n", SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), q, p);
930
931 if( vx.size() > 0 )
932 {
933 assert(vx.size() == n);
934 assert(vy.size() == 1);
935 assert(p == 0);
936
937 /* y_0 is a variable if at least one of the x_i is a variable */
938 vy[0] = false;
939 for( size_t i = 0; i < n; ++i )
940 if( vx[i] )
941 {
942 vy[0] = true;
943 break;
944 }
945 }
946
947 if( p == 0 )
948 {
949 /* only eval requested
950 * arguments are in tx[i * (p+1) + 0], i == 0..n-1, that is tx[0]..tx[n-1]
951 */
952 if( SCIPcallExprEval(scip, expr, const_cast<double*>(tx.data()), &ty[0]) != SCIP_OKAY )
953 return false;
954
955 if( ty[0] == SCIP_INVALID )
957
958 return true;
959 }
960
961 if( p == 1 )
962 {
963 /* eval and forward-diff requested
964 *
965 * point to eval is in tx[i * (p+1) + 0], i == 0..n-1
966 * direction is in tx[i * (p+1) + 1], i == 0..n-1
967 */
968 SCIP_Real* x = new SCIP_Real[n];
969 SCIP_Real* dir = new SCIP_Real[n];
970 for( size_t i = 0; i < n; ++i )
971 {
972 x[i] = tx[i * (p+1) + 0]; /*lint !e835*/
973 dir[i] = tx[i * (p+1) + 1]; /*lint !e835*/
974 }
975
976 SCIP_RETCODE rc = SCIPcallExprEvalFwdiff(scip, expr, x, dir, &ty[0], &ty[1]);
977
978 if( ty[0] == SCIP_INVALID )
980 if( ty[1] == SCIP_INVALID )
982
983 delete[] dir;
984 delete[] x;
985
986 return rc == SCIP_OKAY;
987 }
988
989 /* higher order derivatives not implemented yet (TODO) */
990 SCIPABORT();
991 return false;
992
993#if SCIP_DISABLED_CODE
994 /* cppcheck-suppress unreachableCode */
995 SCIP_Real* gradient = NULL;
996 SCIP_Real* hessian = NULL;
997
998 if( q <= 2 && 1 <= p )
999 gradient = new SCIP_Real[n];
1000 if( q <= 2 && 2 <= p )
1001 hessian = new SCIP_Real[n*n];
1002
1003 if( exprEvalUser(expr, x, ty[0], gradient, hessian) != SCIP_OKAY )
1004 {
1005 delete[] x;
1006 delete[] gradient;
1007 delete[] hessian;
1008 return false;
1009 }
1010
1011 if( gradient != NULL )
1012 {
1013 ty[1] = 0.0;
1014 for( size_t i = 0; i < n; ++i )
1015 ty[1] += gradient[i] * tx[i * (p+1) + 1];
1016 }
1017
1018 if( hessian != NULL )
1019 {
1020 assert(gradient != NULL);
1021
1022 ty[2] = 0.0;
1023 for( size_t i = 0; i < n; ++i )
1024 {
1025 for( size_t j = 0; j < n; ++j )
1026 ty[2] += 0.5 * hessian[i*n+j] * tx[i * (p+1) + 1] * tx[j * (p+1) + 1];
1027
1028 ty[2] += gradient[i] * tx[i * (p+1) + 2];
1029 }
1030 }
1031
1032 delete[] x;
1033 delete[] gradient;
1034 delete[] hessian;
1035
1036 /* higher order derivatives not implemented */
1037 if( p > 2 )
1038 return false;
1039
1040 return true;
1041#endif
1042 }
1043
1044 /** reverse sweep of userexpr
1045 *
1046 * We follow http://www.coin-or.org/CppAD/Doc/atomic_reverse.xml
1047 * Note, that there q is our p.
1048 *
1049 * For a scalar variable t, let
1050 * Y(t) = f(X(t))
1051 * X(t) = x^0 + x^1 t^1 + ... + x^p t^p
1052 * where for x^i the i an index, while for t^i the i is an exponent.
1053 * Thus, x^k = 1/k! X^(k) (0), where X^(k)(.) denotes the k-th derivative.
1054 *
1055 * Next, let y^k = 1/k! Y^(k)(0) be the k'th taylor coefficient of Y. Thus,
1056 * Y(t) = y^0 + y^1 t^1 + y^2 t^2 + ...
1057 * y^0, y^1, ... are the taylor coefficients of f(x).
1058 *
1059 * Further, let F(x^0,..,x^p) by given as F^k(x) = y^k. Thus,
1060 * F^0(x) = y^0 = Y^(0)(0) = f(x^0)
1061 * F^1(x) = y^1 = Y^(1)(0) = f'(x^0) * x^1
1062 * F^2(x) = y^2 = 1/2 Y''(0) = 1/2 x^1 f''(x^0) x^1 + f'(x^0) x^2
1063 *
1064 * Given functions G: R^(p+1) -> R and H: R^(n*(p+1)) -> R, where H(x^0, x^1, .., x^p) = G(F(x^0,..,x^p)),
1065 * we have to return the value of \f$\partial H / \partial x^l, l = 0..p,\f$ in px. Therefor,
1066 * \f$
1067 * px^l = \partial H / \partial x^l
1068 * = sum(k=0..p, (\partial G / \partial y^k) * (\partial y^k / \partial x^l)
1069 * = sum(k=0..p, py[k] * (\partial F^k / \partial x^l)
1070 * \f$
1071 *
1072 * For p = 0, this means
1073 * \f$
1074 * px^0 = py[0] * \partial F^0 / \partial x^0
1075 * = py[0] * \partial f(x^0) / \partial x^0
1076 * = py[0] * f'(x^0)
1077 * \f$
1078 *
1079 * For p = 1, this means (for l = 0):
1080 * \f[
1081 * px^0 = py[0] * \partial F^0 / \partial x^0 + py[1] * \partial F^1 / \partial x^0
1082 * = py[0] * \partial f(x^0) / \partial x^0 + py[1] * \partial (f'(x^0) * x^1) / \partial x^0
1083 * = py[0] * f'(x^0) + py[1] * f''(x^0) * x^1
1084 * \f]
1085 * and (for l=1):
1086 * \[
1087 * px^1 = py[0] * \partial F^0 / \partial x^1 + py[1] * \partial F^1 / \partial x^1
1088 * = py[0] * \partial f(x^0) / \partial x^1 + py[1] * \partial (f'(x^0) * x^1) / \partial x^0
1089 * = py[0] * 0 + py[1] * f'(x^0)
1090 * \f]
1091 *
1092 * As x^k = (tx[k], tx[(p+1)+k], tx[2*(p+1)+k], ..., tx[n*(p+1)+k] and
1093 * px^k = (px[k], px[(p+1)+k], px[2*(p+1)+k], ..., px[n*(p+1)+k], we get
1094 * for p = 0:
1095 * px[i] = (px^0)_i = py[0] * grad[i]
1096 * for p = 1:
1097 * px[i*2+0] = (px^0)_i = py[0] * grad[i] + py[1] * sum(j, hessian[j,i] * tx[j*2+1])
1098 * px[i*2+1] = (px^1)_i = py[1] * grad[i]
1099 */
1100 /* cppcheck-suppress unusedPrivateFunction */
1101 bool reverse(
1102 size_t p, /**< highest order Taylor coefficient that we are evaluating */
1103 const CppAD::vector<SCIP_Real>& tx, /**< values for taylor coefficients of x */
1104 const CppAD::vector<SCIP_Real>& ty, /**< values for taylor coefficients of y */
1105 CppAD::vector<SCIP_Real>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */ /* cppcheck-suppress constParameterReference */
1106 const CppAD::vector<SCIP_Real>& py /**< values for partial derivatives of g(x) w.r.t. y */
1107 )
1108 {
1109 assert(expr != NULL);
1110 assert(px.size() == tx.size());
1111 assert(py.size() == p+1);
1112
1113 // TODO implement this again and then remove check for userexprs in SCIPexprintGrad
1114 SCIPABORT();
1115 return false;
1116
1117#ifdef SCIP_DISABLED_CODE
1118 /* cppcheck-suppress unreachableCode */
1119 size_t n = tx.size() / (p+1);
1120 assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1121 assert(n >= 1);
1122
1123 SCIP_Real* x = new SCIP_Real[n];
1124 SCIP_Real funcval;
1125 SCIP_Real* gradient = new SCIP_Real[n];
1126 SCIP_Real* hessian = NULL;
1127
1128 if( p == 1 )
1129 hessian = new SCIP_Real[n*n];
1130
1131 for( size_t i = 0; i < n; ++i )
1132 x[i] = tx[i * (p+1) + 0]; /*lint !e835*/
1133
1134 if( exprEvalUser(expr, x, funcval, gradient, hessian) != SCIP_OKAY )
1135 {
1136 delete[] x;
1137 delete[] gradient;
1138 delete[] hessian;
1139 return false;
1140 }
1141
1142 switch( p )
1143 {
1144 case 0:
1145 // px[j] = (px^0)_j = py[0] * grad[j]
1146 for( size_t i = 0; i < n; ++i )
1147 px[i] = py[0] * gradient[i];
1148 break;
1149
1150 case 1:
1151 // px[i*2+0] = (px^0)_i = py[0] * grad[i] + py[1] * sum(j, hessian[j,i] * tx[j*2+1])
1152 // px[i*2+1] = (px^1)_i = py[1] * grad[i]
1153 assert(hessian != NULL);
1154 for( size_t i = 0; i < n; ++i )
1155 {
1156 px[i*2+0] = py[0] * gradient[i]; /*lint !e835*/
1157 for( size_t j = 0; j < n; ++j )
1158 px[i*2+0] += py[1] * hessian[i+n*j] * tx[j*2+1]; /*lint !e835*/
1159
1160 px[i*2+1] = py[1] * gradient[i];
1161 }
1162 break;
1163
1164 default:
1165 return false;
1166 }
1167
1168 return true;
1169#endif
1170 } /*lint !e715*/
1171
1172 using CppAD::atomic_base<SCIP_Real>::for_sparse_jac;
1173
1174 /** computes sparsity of jacobian during a forward sweep
1175 * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
1176 * Since we assume f'(x) to be dense, the sparsity of S will be the sparsity of R.
1177 */
1178 /* cppcheck-suppress unusedPrivateFunction */
1179 bool for_sparse_jac(
1180 size_t q, /**< number of columns in R */
1181 const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
1182 CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
1183 )
1184 {
1185 assert(expr != NULL);
1186 assert(s.size() == q);
1187
1188 size_t n = r.size() / q;
1189 assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1190
1191 // sparsity for S(x) = f'(x) * R
1192 for( size_t j = 0; j < q; j++ )
1193 {
1194 s[j] = false;
1195 for( size_t i = 0; i < n; i++ )
1196 s[j] |= (bool)r[i * q + j]; /*lint !e1786*/
1197 }
1198
1199 return true;
1200 }
1201
1202 using CppAD::atomic_base<SCIP_Real>::rev_sparse_jac;
1203
1204 /** computes sparsity of jacobian during a reverse sweep
1205 * For a q x 1 matrix S, we have to return the sparsity pattern of the q x 1 matrix R(x) = S * f'(x).
1206 * Since we assume f'(x) to be dense, the sparsity of R will be the sparsity of S.
1207 */
1208 /* cppcheck-suppress unusedPrivateFunction */
1209 bool rev_sparse_jac(
1210 size_t q, /**< number of rows in R */
1211 const CppAD::vector<bool>& rt, /**< sparsity of R, rowwise */
1212 CppAD::vector<bool>& st /**< vector to store sparsity of S, rowwise */
1213 )
1214 {
1215 assert(expr != NULL);
1216 assert(rt.size() == q);
1217
1218 size_t n = st.size() / q;
1219 assert(n == (size_t)SCIPexprGetNChildren(expr));
1220
1221 // sparsity for S(x)^T = f'(x)^T * R^T
1222 for( size_t j = 0; j < q; j++ )
1223 for( size_t i = 0; i < n; i++ )
1224 st[i * q + j] = rt[j];
1225
1226 return true;
1227 }
1228
1229 using CppAD::atomic_base<SCIP_Real>::rev_sparse_hes;
1230
1231 /** computes sparsity of hessian during a reverse sweep
1232 * Assume V(x) = (g(f(x)))'' R for a function g:R->R and a matrix R.
1233 * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
1234 */
1235 /* cppcheck-suppress unusedPrivateFunction */
1236 bool rev_sparse_hes(
1237 const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
1238 const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
1239 CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
1240 size_t q, /**< number of columns in S and R */
1241 const CppAD::vector<bool>& r, /**< sparsity pattern of R */
1242 const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
1243 CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
1244 )
1245 {
1246 assert(expr != NULL);
1247 size_t n = vx.size();
1248 assert((size_t)SCIPexprGetNChildren(expr) == n);
1249 assert(s.size() == 1);
1250 assert(t.size() == n);
1251 assert(r.size() == n * q);
1252 assert(u.size() == q);
1253 assert(v.size() == n * q);
1254
1255 size_t i, j, k;
1256
1257 // sparsity for T(x) = S(x) * f'(x)
1258 for( i = 0; i < n; ++i )
1259 t[i] = s[0];
1260
1261 // V(x) = f'(x)^T * g''(y) * f'(x) * R + g'(y) * f''(x) * R
1262 // U(x) = g''(y) * f'(x) * R
1263 // S(x) = g'(y)
1264
1265 // back propagate the sparsity for U
1266 for( j = 0; j < q; j++ )
1267 for( i = 0; i < n; i++ )
1268 v[ i * q + j] = u[j];
1269
1270 // include forward Jacobian sparsity in Hessian sparsity
1271 // sparsity for g'(y) * f''(x) * R (Note f''(x) is assumed to be dense)
1272 if( s[0] )
1273 for( j = 0; j < q; j++ )
1274 for( i = 0; i < n; i++ )
1275 for( k = 0; k < n; ++k )
1276 v[ i * q + j] |= (bool) r[ k * q + j];
1277
1278 return true;
1279 }
1280};
1281
1282
1283/** integer power operation for arbitrary integer exponents */
1284template<class Type>
1285static
1287 Type& resultant, /**< resultant */
1288 const Type& arg, /**< operand */
1289 const int exponent /**< exponent */
1290 )
1291{
1292 if( exponent > 1 )
1293 {
1294 vector<Type> in(1, arg);
1295 vector<Type> out(1);
1296
1297 posintpower(in, out, exponent);
1298
1299 resultant = out[0];
1300 return;
1301 }
1302
1303 if( exponent < -1 )
1304 {
1305 vector<Type> in(1, arg);
1306 vector<Type> out(1);
1307
1308 posintpower(in, out, -exponent);
1309
1310 resultant = Type(1.0)/out[0];
1311 return;
1312 }
1313
1314 if( exponent == 1 )
1315 {
1316 resultant = arg;
1317 return;
1318 }
1319
1320 if( exponent == 0 )
1321 {
1322 resultant = Type(1.0);
1323 return;
1324 }
1325
1326 assert(exponent == -1);
1327 resultant = Type(1.0)/arg;
1328}
1329
1330/** CppAD compatible evaluation of an expression for given arguments */
1331template<class Type>
1332static
1334 SCIP* scip, /**< SCIP data structure */
1335 SCIP_EXPR* expr, /**< expression */
1336 SCIP_EXPRINTDATA* exprintdata, /**< interpreter data for root expression */
1337 const vector<Type>& x, /**< values of variables */
1338 Type& val /**< buffer to store expression value */
1339 )
1340{
1341 Type* buf = NULL;
1342
1343 assert(expr != NULL);
1344
1345 // TODO this should iterate instead of using recursion
1346 // but the iterdata wouldn't work to hold Type at the moment
1347 // they could hold Type*, but then we need to alloc small portions all the time
1348 // or we have a big Type-array outside and point to it in iterdata
1349
1350 if( SCIPisExprVaridx(scip, expr) )
1351 {
1352 val = x[exprintdata->getVarPos(SCIPgetIndexExprVaridx(expr))];
1353 return SCIP_OKAY;
1354 }
1355 if( SCIPisExprValue(scip, expr) )
1356 {
1357 val = SCIPgetValueExprValue(expr);
1358 return SCIP_OKAY;
1359 }
1360
1362 for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
1363 {
1364 SCIP_CALL( eval(scip, SCIPexprGetChildren(expr)[i], exprintdata, x, buf[i]) );
1365 }
1366
1367#ifndef EVAL_USE_EXPRHDLR_ALWAYS
1368 if( SCIPisExprSum(scip, expr) )
1369 {
1370 val = SCIPgetConstantExprSum(expr);
1371 for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
1372 val += SCIPgetCoefsExprSum(expr)[i] * buf[i];
1373 }
1374 else if( SCIPisExprProduct(scip, expr) )
1375 {
1376 val = SCIPgetCoefExprProduct(expr);
1377 for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
1378 val *= buf[i];
1379 }
1380 else if( SCIPisExprPower(scip, expr) )
1381 {
1382 SCIP_Real exponent = SCIPgetExponentExprPow(expr);
1383 if( EPSISINT(exponent, 0.0) )
1384 evalIntPower(val, buf[0], (int)SCIPgetExponentExprPow(expr));
1385 else if( exponent == 0.5 )
1386 val = sqrt(buf[0]);
1387 else if( exponent < 1.0 )
1388 val = CppAD::pow(buf[0], SCIPgetExponentExprPow(expr));
1389 else
1390 {
1391 // workaround bug where CppAD claims pow(x,fractional>0) is nondiff at x=0
1392 // https://github.com/coin-or/CppAD/discussions/93#discussioncomment-327876
1393 AD<double> adzero(0.);
1394 val = CppAD::CondExpEq(buf[0], adzero, pow(buf[0]+std::numeric_limits<SCIP_Real>::epsilon(), exponent)-pow(std::numeric_limits<SCIP_Real>::epsilon(), exponent),
1395 pow(buf[0], exponent));
1396 }
1397 }
1398 else if( SCIPisExprSignpower(scip, expr) )
1399 {
1400 evalSignPower(val, buf[0], expr);
1401 }
1402 else if( SCIPisExprExp(scip, expr) )
1403 {
1404 val = exp(buf[0]);
1405 }
1406 else if( SCIPisExprLog(scip, expr) )
1407 {
1408 val = log(buf[0]);
1409 }
1410 else if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "sin") == 0 )
1411 {
1412 val = sin(buf[0]);
1413 }
1414 else if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "cos") == 0 )
1415 {
1416 val = cos(buf[0]);
1417 }
1418 else if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "erf") == 0 )
1419 {
1420 val = erf(buf[0]);
1421 }
1422 else if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "abs") == 0 )
1423 {
1424 val = abs(buf[0]);
1425 }
1426 else if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "entropy") == 0 )
1427 {
1428 /* -x*log(x) if x > 0, else 0
1429 * https://coin-or.github.io/CppAD/doc/cond_exp.cpp.htm suggest to use 0 for the x=0 case
1430 * but then derivatives at 0 are 0, while they are actually infinite (see expr_entropy.c:bwdiff)
1431 * so we use -sqrt(x) for the x=0 case, as this also has value 0 and first derivative -inf at 0
1432 */
1433 val = CppAD::CondExpGt(buf[0], AD<double>(0.), -buf[0] * log(buf[0]), -sqrt(buf[0]));
1434 }
1435 else
1436#endif
1437 {
1438 //SCIPerrorMessage("using derivative methods of exprhdlr %s in CppAD is not implemented yet", SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)));
1439 //CppAD::ErrorHandler::Call(true, __LINE__, __FILE__, "evalUser()", "Error: user expressions in CppAD not possible without CppAD user atomic facility");
1440 vector<Type> in(buf, buf + SCIPexprGetNChildren(expr));
1441 vector<Type> out(1);
1442
1443 exprintdata->userexprs.push_back(new atomic_userexpr(scip, expr));
1444 (*exprintdata->userexprs.back())(in, out);
1445
1446 val = out[0];
1447 }
1448
1450
1451 return SCIP_OKAY;
1452}
1453
1454/** replacement for CppAD's default error handler
1455 *
1456 * In debug mode, CppAD gives an error when an evaluation contains a nan.
1457 * We do not want to stop execution in such a case, since the calling routine should check for nan's and decide what to do.
1458 * Since we cannot ignore this particular error, we ignore all.
1459 * @todo find a way to check whether the error corresponds to a nan and communicate this back
1460 */
1461static
1463 bool known, /**< is the error from a known source? */
1464 int line, /**< line where error occured */
1465 const char* file, /**< file where error occured */
1466 const char* cond, /**< error condition */
1467 const char* msg /**< error message */
1468 )
1469{
1470 SCIPdebugMessage("ignore CppAD error from %sknown source %s:%d: msg: %s exp: %s\n", known ? "" : "un", file, line, msg, cond);
1471}
1472
1473/* install our error handler */
1474static CppAD::ErrorHandler errorhandler(cppaderrorcallback);
1475
1476/** gets name and version of expression interpreter */
1477const char* SCIPexprintGetName(void)
1478{
1479 return CPPAD_PACKAGE_STRING;
1480}
1481
1482/** gets descriptive text of expression interpreter */
1483const char* SCIPexprintGetDesc(void)
1484{
1485 return "Algorithmic Differentiation of C++ algorithms developed by B. Bell (github.com/coin-or/CppAD)";
1486}
1487
1488/** gets capabilities of expression interpreter (using bitflags) */
1490 void
1491 )
1492{
1494}
1495
1496/** creates an expression interpreter object */
1498 SCIP* scip, /**< SCIP data structure */
1499 SCIP_EXPRINT** exprint /**< buffer to store pointer to expression interpreter */
1500 )
1501{
1502 assert(exprint != NULL);
1503
1504 *exprint = (SCIP_EXPRINT*)(size_t)1u; /* some code checks that a non-NULL pointer is returned here, even though it may not point anywhere */
1505
1506 return SCIP_OKAY;
1507}
1508
1509/** frees an expression interpreter object */
1511 SCIP* scip, /**< SCIP data structure */
1512 SCIP_EXPRINT** exprint /**< expression interpreter that should be freed */
1513 )
1514{
1515 assert(exprint != NULL);
1516
1517 *exprint = NULL;
1518
1519 return SCIP_OKAY;
1520}
1521
1522/** compiles an expression and returns interpreter-specific data for expression
1523 *
1524 * can be called again with existing exprintdata if expression has been changed
1525 *
1526 * @attention *exprintdata needs to be initialized to NULL at first call
1527 * @attention the expression is assumed to use varidx expressions instead of var expressions
1528 */
1530 SCIP* scip, /**< SCIP data structure */
1531 SCIP_EXPRINT* exprint, /**< interpreter data structure */
1532 SCIP_EXPR* expr, /**< expression */
1533 SCIP_EXPRINTDATA** exprintdata /**< buffer to store pointer to compiled data */
1534 )
1535{
1536 SCIP_EXPRITER* it;
1537 SCIP_EXPR* expr2;
1538
1539 assert(expr != NULL);
1540 assert(exprintdata != NULL);
1541
1542 if( *exprintdata == NULL )
1543 {
1544 *exprintdata = new SCIP_EXPRINTDATA();
1545 assert(*exprintdata != NULL);
1546 }
1547 else
1548 {
1549 (*exprintdata)->need_retape = true;
1550 (*exprintdata)->need_retape_always = false;
1551 (*exprintdata)->hesconstant = false;
1552 }
1553
1556
1557 std::set<int> varidxs;
1558 for( expr2 = SCIPexpriterGetCurrent(it); !SCIPexpriterIsEnd(it); expr2 = SCIPexpriterGetNext(it) )
1559 {
1560 /* cannot handle var-expressions in exprint so far, should be varidx expressions */
1561 assert(!SCIPisExprVar(scip, expr2));
1562
1563 if( SCIPisExprVaridx(scip, expr2) )
1564 {
1565 varidxs.insert(SCIPgetIndexExprVaridx(expr2));
1566 continue;
1567 }
1568
1569 /* check whether expr2 is of a type we don't recognize */
1570#ifndef EVAL_USE_EXPRHDLR_ALWAYS
1571 if( !SCIPisExprSum(scip, expr2) &&
1572 !SCIPisExprProduct(scip, expr2) &&
1573 !SCIPisExprPower(scip, expr2) &&
1574 !SCIPisExprSignpower(scip, expr2) &&
1575 !SCIPisExprExp(scip, expr2) &&
1576 !SCIPisExprLog(scip, expr2) &&
1577 strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr2)), "abs") != 0 &&
1578 strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr2)), "sin") != 0 &&
1579 strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr2)), "cos") != 0 &&
1580 strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr2)), "entropy") != 0 &&
1581 strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr2)), "erf") != 0 &&
1582 !SCIPisExprValue(scip, expr2) )
1583#endif
1584 {
1585 /* an expression for which we have no taping implemented in eval()
1586 * so we will try to use CppAD's atomic operand and the expression handler methods
1587 * however, only atomic_userexpr:forward() is implemented for p=0,1 at the moment, so we cannot do Hessians
1588 * also the exprhdlr needs to implement the fwdiff callback for derivatives
1589 */
1591 (*exprintdata)->userevalcapability &= SCIP_EXPRINTCAPABILITY_FUNCVALUE | SCIP_EXPRINTCAPABILITY_GRADIENT;
1592 else
1593 (*exprintdata)->userevalcapability &= SCIP_EXPRINTCAPABILITY_FUNCVALUE;
1594 }
1595
1596 //if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr2)), "xyz") == 0 )
1597 // (*exprintdata)->need_retape_always = true;
1598 }
1599
1600 SCIPfreeExpriter(&it);
1601
1602 (*exprintdata)->varidxs.reserve(varidxs.size());
1603 (*exprintdata)->varidxs.insert((*exprintdata)->varidxs.begin(), varidxs.begin(), varidxs.end());
1604
1605 size_t n = (*exprintdata)->varidxs.size();
1606 (*exprintdata)->X.resize(n);
1607 (*exprintdata)->x.resize(n);
1608 (*exprintdata)->Y.resize(1);
1609
1610 // check whether we are quadratic (or linear), so we can save on Hessian time (so
1611 // assumes simplified and skips over x^2 and x*y cases
1612 // not using SCIPcheckExprQuadratic(), because we don't need the quadratic form
1613 if( SCIPisExprSum(scip, expr) )
1614 {
1615 (*exprintdata)->hesconstant = true;
1616 for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
1617 {
1618 SCIP_EXPR* child;
1619 child = SCIPexprGetChildren(expr)[i];
1620 /* linear term is ok */
1621 if( SCIPisExprVaridx(scip, child) )
1622 continue;
1623 /* square term is ok */
1624 if( SCIPisExprPower(scip, child) && SCIPgetExponentExprPow(child) == 2.0 && SCIPisExprVaridx(scip, SCIPexprGetChildren(child)[0]) )
1625 continue;
1626 /* bilinear term is ok */
1628 continue;
1629 /* everything else means not quadratic (or not simplified) */
1630 (*exprintdata)->hesconstant = false;
1631 break;
1632 }
1633 SCIPdebugMsg(scip, "Hessian found %sconstant\n", (*exprintdata)->hesconstant ? "" : "not ");
1634 }
1635
1636 return SCIP_OKAY;
1637}
1638
1639/** frees interpreter data for expression */
1641 SCIP* scip, /**< SCIP data structure */
1642 SCIP_EXPRINT* exprint, /**< interpreter data structure */
1643 SCIP_EXPR* expr, /**< expression */
1644 SCIP_EXPRINTDATA** exprintdata /**< pointer to pointer to compiled data to be freed */
1645 )
1646{
1647 assert( exprintdata != NULL);
1648 assert(*exprintdata != NULL);
1649
1650 SCIPfreeBlockMemoryArrayNull(scip, &(*exprintdata)->hesrowidxs, (*exprintdata)->hesnnz);
1651 SCIPfreeBlockMemoryArrayNull(scip, &(*exprintdata)->hescolidxs, (*exprintdata)->hesnnz);
1652
1653 for( vector<atomic_userexpr*>::iterator it((*exprintdata)->userexprs.begin()); it != (*exprintdata)->userexprs.end(); ++it )
1654 delete *it;
1655 (*exprintdata)->userexprs.clear();
1656
1657 delete *exprintdata;
1658 *exprintdata = NULL;
1659
1660 return SCIP_OKAY;
1661}
1662
1663/** gives the capability to evaluate an expression by the expression interpreter
1664 *
1665 * In cases of user-given expressions, higher order derivatives may not be available for the user-expression,
1666 * even if the expression interpreter could handle these. This method allows to recognize that, e.g., the
1667 * Hessian for an expression is not available because it contains a user expression that does not provide
1668 * Hessians.
1669 */
1671 SCIP* scip, /**< SCIP data structure */
1672 SCIP_EXPRINT* exprint, /**< interpreter data structure */
1673 SCIP_EXPR* expr, /**< expression */
1674 SCIP_EXPRINTDATA* exprintdata /**< interpreter-specific data for expression */
1675 )
1676{
1677 assert(exprintdata != NULL);
1678
1679 return exprintdata->userevalcapability;
1680}/*lint !e715*/
1681
1682/** evaluates an expression */
1684 SCIP* scip, /**< SCIP data structure */
1685 SCIP_EXPRINT* exprint, /**< interpreter data structure */
1686 SCIP_EXPR* expr, /**< expression */
1687 SCIP_EXPRINTDATA* exprintdata, /**< interpreter-specific data for expression */
1688 SCIP_Real* varvals, /**< values of variables */
1689 SCIP_Real* val /**< buffer to store value of expression */
1690 )
1691{
1692 assert(expr != NULL);
1693 assert(exprintdata != NULL);
1694 assert(varvals != NULL);
1695 assert(val != NULL);
1696
1697 size_t n = exprintdata->varidxs.size();
1698
1699 if( n == 0 )
1700 {
1701 SCIP_CALL( SCIPevalExpr(scip, expr, NULL, 0L) );
1702 exprintdata->val = *val = SCIPexprGetEvalValue(expr);
1703 return SCIP_OKAY;
1704 }
1705
1706 if( exprintdata->need_retape_always || exprintdata->need_retape )
1707 {
1708 /* free old Hessian data, if any */
1709 SCIPfreeBlockMemoryArrayNull(scip, &exprintdata->hesrowidxs, exprintdata->hesnnz);
1710 SCIPfreeBlockMemoryArrayNull(scip, &exprintdata->hescolidxs, exprintdata->hesnnz);
1711 exprintdata->hesvalues.clear();
1712 exprintdata->hesnnz = 0;
1713
1714 for( size_t i = 0; i < n; ++i )
1715 {
1716 int idx = exprintdata->varidxs[i];
1717 exprintdata->X[i] = varvals[idx];
1718 exprintdata->x[i] = varvals[idx]; /* need this for a following grad or hessian eval with new_x = false */
1719 }
1720
1721 // delete old atomic_userexprs before we start collecting new ones
1722 for( vector<atomic_userexpr*>::iterator it(exprintdata->userexprs.begin()); it != exprintdata->userexprs.end(); ++it )
1723 delete *it;
1724 exprintdata->userexprs.clear();
1725
1726 CppAD::Independent(exprintdata->X);
1727
1728 SCIP_CALL( eval(scip, expr, exprintdata, exprintdata->X, exprintdata->Y[0]) );
1729
1730 exprintdata->f.Dependent(exprintdata->X, exprintdata->Y);
1731
1732 exprintdata->val = Value(exprintdata->Y[0]);
1733 SCIPdebugMessage("Eval retaped and computed value %g\n", exprintdata->val);
1734
1735 // the following is required if the gradient shall be computed by a reverse sweep later
1736 // exprintdata->val = exprintdata->f.Forward(0, exprintdata->x)[0];
1737
1738 // https://coin-or.github.io/CppAD/doc/optimize.htm
1739 exprintdata->f.optimize();
1740
1741 exprintdata->need_retape = false;
1742 }
1743 else
1744 {
1745 assert(exprintdata->x.size() >= n);
1746 for( size_t i = 0; i < n; ++i )
1747 exprintdata->x[i] = varvals[exprintdata->varidxs[i]];
1748
1749 exprintdata->val = exprintdata->f.Forward(0, exprintdata->x)[0];
1750 SCIPdebugMessage("Eval used forward sweep to compute value %g\n", exprintdata->val);
1751 }
1752
1753 *val = exprintdata->val;
1754
1755 return SCIP_OKAY;
1756}
1757
1758/** computes value and gradient of an expression */
1760 SCIP* scip, /**< SCIP data structure */
1761 SCIP_EXPRINT* exprint, /**< interpreter data structure */
1762 SCIP_EXPR* expr, /**< expression */
1763 SCIP_EXPRINTDATA* exprintdata, /**< interpreter-specific data for expression */
1764 SCIP_Real* varvals, /**< values of variables, can be NULL if new_varvals is FALSE */
1765 SCIP_Bool new_varvals, /**< have variable values changed since last call to a point evaluation routine? */
1766 SCIP_Real* val, /**< buffer to store expression value */
1767 SCIP_Real* gradient /**< buffer to store expression gradient */
1768 )
1769{
1770 assert(expr != NULL);
1771 assert(exprintdata != NULL);
1772 assert(varvals != NULL || new_varvals == FALSE);
1773 assert(val != NULL);
1774 assert(gradient != NULL);
1775
1776 if( new_varvals )
1777 {
1778 SCIP_CALL( SCIPexprintEval(scip, exprint, expr, exprintdata, varvals, val) );
1779 }
1780 else
1781 *val = exprintdata->val;
1782
1783 size_t n = exprintdata->varidxs.size();
1784
1785 if( n == 0 )
1786 return SCIP_OKAY;
1787
1788 vector<double> jac;
1789 if( exprintdata->userexprs.empty() )
1790 jac = exprintdata->f.Jacobian(exprintdata->x);
1791 else
1792 {
1793 // atomic_userexpr:reverse not implemented yet (even for p=0)
1794 // so force use of forward-eval for gradient (though that seems pretty inefficient)
1795 exprintdata->f.Forward(0, exprintdata->x);
1796 jac.resize(n);
1797 CppAD::JacobianFor(exprintdata->f, exprintdata->x, jac);
1798 }
1799
1800 for( size_t i = 0; i < n; ++i )
1801 gradient[exprintdata->varidxs[i]] = jac[i];
1802
1803#ifdef SCIP_DEBUG
1804 SCIPinfoMessage(scip, NULL, "Grad for ");
1805 SCIP_CALL( SCIPprintExpr(scip, expr, NULL) );
1806 SCIPinfoMessage(scip, NULL, "\nx = ");
1807 for( size_t i = 0; i < n; ++i )
1808 {
1809 SCIPinfoMessage(scip, NULL, "\t %g", exprintdata->x[i]);
1810 }
1811 SCIPinfoMessage(scip, NULL, "\ngrad = ");
1812 for( size_t i = 0; i < n; ++i )
1813 {
1814 SCIPinfoMessage(scip, NULL, "\t %g", jac[i]);
1815 }
1816 SCIPinfoMessage(scip, NULL, "\n");
1817#endif
1818
1819 return SCIP_OKAY;
1820}
1821
1822/** gives sparsity pattern of lower-triangular part of Hessian
1823 *
1824 * Since the AD code might need to do a forward sweep, variable values need to be passed in here.
1825 *
1826 * Result will have `(*colidxs)[i] <= (*rowidixs)[i]` for `i=0..*nnz`.
1827 */
1829 SCIP* scip, /**< SCIP data structure */
1830 SCIP_EXPRINT* exprint, /**< interpreter data structure */
1831 SCIP_EXPR* expr, /**< expression */
1832 SCIP_EXPRINTDATA* exprintdata, /**< interpreter-specific data for expression */
1833 SCIP_Real* varvals, /**< values of variables */
1834 int** rowidxs, /**< buffer to return array with row indices of Hessian elements */
1835 int** colidxs, /**< buffer to return array with column indices of Hessian elements */
1836 int* nnz /**< buffer to return length of arrays */
1837 )
1838{
1839 assert(expr != NULL);
1840 assert(exprintdata != NULL);
1841 assert(varvals != NULL);
1842 assert(rowidxs != NULL);
1843 assert(colidxs != NULL);
1844 assert(nnz != NULL);
1845
1846 if( exprintdata->hesrowidxs == NULL )
1847 {
1848 assert(exprintdata->hescolidxs == NULL);
1849 assert(exprintdata->hesvalues.size() == 0);
1850 assert(exprintdata->hesnnz == 0);
1851
1852 size_t n = exprintdata->varidxs.size();
1853 if( n == 0 )
1854 {
1855 *nnz = 0;
1856 return SCIP_OKAY;
1857 }
1858
1859 size_t nn = n*n;
1860
1861 if( exprintdata->need_retape_always )
1862 {
1863 // pretend dense
1864 exprintdata->hesnnz = (n * (n+1))/2;
1865 SCIP_CALL( SCIPallocBlockMemoryArray(scip, &exprintdata->hesrowidxs, exprintdata->hesnnz) );
1866 SCIP_CALL( SCIPallocBlockMemoryArray(scip, &exprintdata->hescolidxs, exprintdata->hesnnz) );
1867
1868 int k = 0;
1869 for( size_t i = 0; i < n; ++i )
1870 for( size_t j = 0; j <= i; ++j )
1871 {
1872 exprintdata->hesrowidxs[k] = exprintdata->varidxs[i];
1873 exprintdata->hescolidxs[k] = exprintdata->varidxs[j];
1874 k++;
1875 }
1876
1877#ifdef SCIP_DEBUG
1878 SCIPinfoMessage(scip, NULL, "HessianSparsity for ");
1879 SCIP_CALL( SCIPprintExpr(scip, expr, NULL) );
1880 SCIPinfoMessage(scip, NULL, ": all elements, due to discontinuities\n");
1881#endif
1882
1883 *rowidxs = exprintdata->hesrowidxs;
1884 *colidxs = exprintdata->hescolidxs;
1885 *nnz = exprintdata->hesnnz;
1886
1887 return SCIP_OKAY;
1888 }
1889
1890 if( exprintdata->need_retape )
1891 {
1892 SCIP_Real val;
1893 SCIP_CALL( SCIPexprintEval(scip, exprint, expr, exprintdata, varvals, &val) );
1894 } /*lint !e438*/
1895
1896 // following https://github.com/coin-or/CppAD/blob/20180000.0/cppad_ipopt/src/vec_fun_pattern.cpp
1897
1898 SCIPdebugMessage("calling ForSparseJac\n");
1899
1900 vector<bool> r(nn, false);
1901 for( size_t i = 0; i < n; ++i )
1902 r[i*n+i] = true;
1903 (void) exprintdata->f.ForSparseJac(n, r); // need to compute sparsity for Jacobian first
1904
1905 SCIPdebugMessage("calling RevSparseHes\n");
1906
1907 // this was originally
1908 // vector<bool> s(1, true);
1909 // hessparsity = exprintdata->f.RevSparseHes(n, s);
1910 // RevSparseHes is just calling RevSparseHesCase
1911 // to avoid copying hessparsity, call RevSparseHesCase directly
1912 vector<bool> hessparsity;
1913 exprintdata->f.RevSparseHesCase(true, false, n, vector<bool>(1, true), hessparsity);
1914
1915 // count number of hessian elements and setup hessparsity_pattern
1916 exprintdata->hessparsity_pattern.resize(0, 0); // clear old data, if any
1917 exprintdata->hessparsity_pattern.resize(n, n);
1918 size_t hesnnz_full = 0; // number of nonzeros in full matrix, that is, not only lower-diagonal
1919 for( size_t i = 0; i < nn; ++i )
1920 if( hessparsity[i] )
1921 {
1922 size_t row = i / n;
1923 size_t col = i % n;
1924
1925 ++hesnnz_full;
1926
1927 if( col > row )
1928 continue;
1929 ++exprintdata->hesnnz;
1930 }
1931
1932 // hessian sparsity in sparse form
1933 // - hesrowidxs,hescolidxs are nonzero entries in the lower-diagonal of the Hessian and are returned to the caller; indices are variable indices as in expression
1934 // - hessparsity_row,hessparsity_col are nonzero entries in the full Hessian and are used in SCIPexprintHessian(); indices are 0..n-1 (n=dimension of f)
1935 // - hessparsity_pattern is the same information as hessparsity_row,hessparsity_col, but stored in different form
1936 // originally we were calling CppAD::local::sparsity_user2internal(exprintdata->hessparsity_pattern, exprintdata->hessparsity, n, n, false, ""),
1937 // which was again looping over hessparsity, so this is included into one loop here
1938 SCIP_CALL( SCIPallocBlockMemoryArray(scip, &exprintdata->hesrowidxs, exprintdata->hesnnz) );
1939 SCIP_CALL( SCIPallocBlockMemoryArray(scip, &exprintdata->hescolidxs, exprintdata->hesnnz) );
1940
1941 exprintdata->hessparsity_row.resize(hesnnz_full);
1942 exprintdata->hessparsity_col.resize(hesnnz_full);
1943
1944 for( size_t i = 0, j = 0, k = 0; i < nn; ++i )
1945 if( hessparsity[i] )
1946 {
1947 size_t row = i / n;
1948 size_t col = i % n;
1949
1950 if( (size_t)exprintdata->hesnnz <= nn/4 )
1951 {
1952 // we need hessparsity_pattern only if SCIPexprintHessian is doing a sparse hessian eval; nn/4 is the threshold for that
1953 exprintdata->hessparsity_pattern.add_element(row, col);
1954 }
1955
1956 if( col > row )
1957 {
1958 // add above-diagonal elements into end part of hessparsity_row/col, so we have a 1:1 correspondence
1959 // between hessparsity_row/col and hesrowidxs/hescolidxs
1960 assert(exprintdata->hesnnz + k < hesnnz_full);
1961 exprintdata->hessparsity_row[exprintdata->hesnnz + k] = row;
1962 exprintdata->hessparsity_col[exprintdata->hesnnz + k] = col;
1963 ++k;
1964 continue;
1965 }
1966
1967 exprintdata->hessparsity_row[j] = row;
1968 exprintdata->hessparsity_col[j] = col;
1969
1970 assert(j < (size_t)exprintdata->hesnnz);
1971 exprintdata->hesrowidxs[j] = exprintdata->varidxs[row];
1972 exprintdata->hescolidxs[j] = exprintdata->varidxs[col];
1973 ++j;
1974 }
1975
1976#ifdef SCIP_DEBUG
1977 SCIPinfoMessage(scip, NULL, "HessianSparsity for ");
1978 SCIP_CALL( SCIPprintExpr(scip, expr, NULL) );
1979 SCIPinfoMessage(scip, NULL, ":");
1980 for( int i = 0; i < exprintdata->hesnnz; ++i )
1981 {
1982 SCIPinfoMessage(scip, NULL, " (%d,%d)", exprintdata->hesrowidxs[i], exprintdata->hescolidxs[i]);
1983 }
1984 SCIPinfoMessage(scip, NULL, "\n");
1985#endif
1986 }
1987
1988 *rowidxs = exprintdata->hesrowidxs;
1989 *colidxs = exprintdata->hescolidxs;
1990 *nnz = exprintdata->hesnnz;
1991
1992 return SCIP_OKAY;
1993}
1994
1995/** computes value and Hessian of an expression
1996 *
1997 * Returned arrays `rowidxs` and `colidxs` and number of elements `nnz` are the same as given by SCIPexprintHessianSparsity().
1998 * Returned array `hessianvals` will contain the corresponding Hessian elements.
1999 */
2001 SCIP* scip, /**< SCIP data structure */
2002 SCIP_EXPRINT* exprint, /**< interpreter data structure */
2003 SCIP_EXPR* expr, /**< expression */
2004 SCIP_EXPRINTDATA* exprintdata, /**< interpreter-specific data for expression */
2005 SCIP_Real* varvals, /**< values of variables, can be NULL if new_varvals is FALSE */
2006 SCIP_Bool new_varvals, /**< have variable values changed since last call to an evaluation routine? */
2007 SCIP_Real* val, /**< buffer to store function value */
2008 int** rowidxs, /**< buffer to return array with row indices of Hessian elements */
2009 int** colidxs, /**< buffer to return array with column indices of Hessian elements */
2010 SCIP_Real** hessianvals, /**< buffer to return array with Hessian elements */
2011 int* nnz /**< buffer to return length of arrays */
2012 )
2013{
2014 assert(expr != NULL);
2015 assert(exprintdata != NULL);
2016
2017 if( exprintdata->hesrowidxs == NULL )
2018 {
2019 /* setup sparsity if not done yet */
2020 int dummy1;
2021 int* dummy2;
2022
2023 assert(exprintdata->hescolidxs == NULL);
2024 assert(exprintdata->hesvalues.size() == 0);
2025 assert(exprintdata->hesnnz == 0);
2026
2027 SCIP_CALL( SCIPexprintHessianSparsity(scip, exprint, expr, exprintdata, varvals, &dummy2, &dummy2, &dummy1) );
2028
2029 new_varvals = FALSE;
2030 }
2031
2032 if( new_varvals )
2033 {
2034 SCIP_CALL( SCIPexprintEval(scip, exprint, expr, exprintdata, varvals, val) );
2035 }
2036 else
2037 *val = exprintdata->val;
2038
2039 size_t n = exprintdata->varidxs.size();
2040
2041 // eval hessian; if constant, then only if not evaluated yet (hesvalues has size 0, but hesnnz > 0)
2042 if( n > 0 && (!exprintdata->hesconstant || exprintdata->hesvalues.size() < (size_t)exprintdata->hesnnz) )
2043 {
2044 size_t nn = n*n;
2045
2046 if( exprintdata->hesvalues.size() == 0 )
2047 {
2048 // using here hessparsity_row.size() instead of hesnnz as we want to pass hesvalues to CppAD and it prefers to calculate a full Hessian
2049 exprintdata->hesvalues.resize(exprintdata->hessparsity_row.size());
2050 }
2051
2052 // these use reverse mode
2053 if( (size_t)exprintdata->hesnnz > nn/4 )
2054 {
2055 // use dense Hessian if there are many nonzero elements (approx more than half (recall only lower (or upper)-triangular is considered))
2056 // because it seems faster than the sparse Hessian if there isn't actually much sparsity
2057 vector<double> hess = exprintdata->f.Hessian(exprintdata->x, 0);
2058 for( int i = 0; i < exprintdata->hesnnz; ++i )
2059 exprintdata->hesvalues[i] = hess[exprintdata->hessparsity_row[i] * n + exprintdata->hessparsity_col[i]];
2060 }
2061 else
2062 {
2063 // originally, this was hess = exprintdata->f.SparseHessian(exprintdata->x, vector<double>(1, 1.0), exprintdata->hessparsity),
2064 // where hess was a dense nxn matrix as in the case above
2065 // to reuse the coloring of the sparsity pattern and use also a sparse matrix for the Hessian values, we now call SparseHessianCompute directly
2066 exprintdata->f.SparseHessianCompute(exprintdata->x, vector<double>(1, 1.0), exprintdata->hessparsity_pattern, exprintdata->hessparsity_row, exprintdata->hessparsity_col, exprintdata->hesvalues, exprintdata->heswork);
2067
2068#ifndef NDEBUG
2069 // hessparsity_row, hessparsity_col, hessparsity_pattern are no longer used by SparseHessianCompute after coloring has been computed in first call, except for some asserts, so we free some mem
2070 exprintdata->hessparsity_row.clear();
2071 exprintdata->hessparsity_col.clear();
2072 exprintdata->hessparsity_pattern.resize(0,0);
2073#endif
2074 }
2075 }
2076
2077#ifdef SCIP_DEBUG
2078 SCIPinfoMessage(scip, NULL, "Hessian for ");
2079 SCIP_CALL( SCIPprintExpr(scip, expr, NULL) );
2080 SCIPinfoMessage(scip, NULL, "\nat x = ");
2081 for( size_t i = 0; i < n; ++i )
2082 {
2083 SCIPinfoMessage(scip, NULL, "\t %g", exprintdata->x[i]);
2084 }
2085 SCIPinfoMessage(scip, NULL, "\nis ");
2086 for( int i = 0; i < exprintdata->hesnnz; ++i )
2087 {
2088 SCIPinfoMessage(scip, NULL, " (%d,%d)=%g", exprintdata->hesrowidxs[i], exprintdata->hescolidxs[i], exprintdata->hesvalues[i]);
2089 }
2090 SCIPinfoMessage(scip, NULL, "\n");
2091#endif
2092
2093 *rowidxs = exprintdata->hesrowidxs;
2094 *colidxs = exprintdata->hescolidxs;
2095 *hessianvals = exprintdata->hesvalues.data();
2096 *nnz = exprintdata->hesnnz;
2097
2098 return SCIP_OKAY;
2099}
SCIP_Real * r
Definition: circlepacking.c:59
SCIP_VAR ** x
Definition: circlepacking.c:63
atomic_userexpr(SCIP *scip_, SCIP_EXPR *expr_)
unsigned short Type
Definition: cons_xor.c:131
common defines and data types used in all packages of SCIP
#define NULL
Definition: def.h:248
#define EPSISINT(x, eps)
Definition: def.h:195
#define SCIP_INVALID
Definition: def.h:178
#define SCIP_Bool
Definition: def.h:91
#define SCIP_Real
Definition: def.h:156
#define FALSE
Definition: def.h:94
#define SCIPABORT()
Definition: def.h:327
#define REALABS(x)
Definition: def.h:182
#define SCIP_CALL(x)
Definition: def.h:355
exponential expression handler
logarithm expression handler
#define SIGN(x)
Definition: expr_pow.c:71
power and signed power expression handlers
handler for variable index expressions
methods to interpret (evaluate) an expression "fast"
#define CPPAD_MAX_NUM_THREADS
void posintpower(const vector< Type > &in, vector< Type > &out, size_t exponent)
static CppAD::ErrorHandler errorhandler(cppaderrorcallback)
static void cppaderrorcallback(bool known, int line, const char *file, const char *cond, const char *msg)
static void evalIntPower(Type &resultant, const Type &arg, const int exponent)
static void evalSignPower(CppAD::AD< Type > &resultant, const CppAD::AD< Type > &arg, SCIP_EXPR *expr)
static SCIP_RETCODE eval(SCIP *scip, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata, const vector< Type > &x, Type &val)
#define infinity
Definition: gastrans.c:80
int SCIPgetIndexExprVaridx(SCIP_EXPR *expr)
Definition: expr_varidx.c:267
SCIP_Bool SCIPisExprVaridx(SCIP *scip, SCIP_EXPR *expr)
Definition: expr_varidx.c:252
SCIP_Bool SCIPisExprLog(SCIP *scip, SCIP_EXPR *expr)
Definition: expr_log.c:648
SCIP_Bool SCIPisExprExp(SCIP *scip, SCIP_EXPR *expr)
Definition: expr_exp.c:528
SCIP_Bool SCIPisExprSignpower(SCIP *scip, SCIP_EXPR *expr)
Definition: expr_pow.c:3234
SCIP_RETCODE SCIPexprintCompile(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA **exprintdata)
SCIP_RETCODE SCIPexprintFreeData(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA **exprintdata)
SCIP_EXPRINTCAPABILITY SCIPexprintGetCapability(void)
SCIP_RETCODE SCIPexprintHessianSparsity(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata, SCIP_Real *varvals, int **rowidxs, int **colidxs, int *nnz)
SCIP_RETCODE SCIPexprintFree(SCIP *scip, SCIP_EXPRINT **exprint)
SCIP_RETCODE SCIPexprintEval(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata, SCIP_Real *varvals, SCIP_Real *val)
const char * SCIPexprintGetName(void)
const char * SCIPexprintGetDesc(void)
SCIP_EXPRINTCAPABILITY SCIPexprintGetExprCapability(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata)
SCIP_RETCODE SCIPexprintHessian(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata, SCIP_Real *varvals, SCIP_Bool new_varvals, SCIP_Real *val, int **rowidxs, int **colidxs, SCIP_Real **hessianvals, int *nnz)
SCIP_RETCODE SCIPexprintCreate(SCIP *scip, SCIP_EXPRINT **exprint)
SCIP_RETCODE SCIPexprintGrad(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata, SCIP_Real *varvals, SCIP_Bool new_varvals, SCIP_Real *val, SCIP_Real *gradient)
void SCIPinfoMessage(SCIP *scip, FILE *file, const char *formatstr,...)
Definition: scip_message.c:208
#define SCIPdebugMsg
Definition: scip_message.h:78
const char * SCIPexprhdlrGetName(SCIP_EXPRHDLR *exprhdlr)
Definition: expr.c:545
SCIP_Bool SCIPexprhdlrHasFwdiff(SCIP_EXPRHDLR *exprhdlr)
Definition: expr.c:605
SCIP_RETCODE SCIPevalExpr(SCIP *scip, SCIP_EXPR *expr, SCIP_SOL *sol, SCIP_Longint soltag)
Definition: scip_expr.c:1661
int SCIPexprGetNChildren(SCIP_EXPR *expr)
Definition: expr.c:3872
SCIP_Real SCIPgetExponentExprPow(SCIP_EXPR *expr)
Definition: expr_pow.c:3448
SCIP_Bool SCIPisExprProduct(SCIP *scip, SCIP_EXPR *expr)
Definition: scip_expr.c:1490
SCIP_Bool SCIPexpriterIsEnd(SCIP_EXPRITER *iterator)
Definition: expriter.c:969
SCIP_Bool SCIPisExprSum(SCIP *scip, SCIP_EXPR *expr)
Definition: scip_expr.c:1479
SCIP_Real * SCIPgetCoefsExprSum(SCIP_EXPR *expr)
Definition: expr_sum.c:1554
SCIP_Bool SCIPisExprValue(SCIP *scip, SCIP_EXPR *expr)
Definition: scip_expr.c:1468
SCIP_Real SCIPgetCoefExprProduct(SCIP_EXPR *expr)
SCIP_EXPR * SCIPexpriterGetCurrent(SCIP_EXPRITER *iterator)
Definition: expriter.c:683
SCIP_Bool SCIPisExprVar(SCIP *scip, SCIP_EXPR *expr)
Definition: scip_expr.c:1457
SCIP_RETCODE SCIPcallExprEval(SCIP *scip, SCIP_EXPR *expr, SCIP_Real *childrenvalues, SCIP_Real *val)
Definition: scip_expr.c:2210
SCIP_RETCODE SCIPcreateExpriter(SCIP *scip, SCIP_EXPRITER **iterator)
Definition: scip_expr.c:2362
SCIP_RETCODE SCIPcallExprEvalFwdiff(SCIP *scip, SCIP_EXPR *expr, SCIP_Real *childrenvalues, SCIP_Real *direction, SCIP_Real *val, SCIP_Real *dot)
Definition: scip_expr.c:2237
SCIP_RETCODE SCIPprintExpr(SCIP *scip, SCIP_EXPR *expr, FILE *file)
Definition: scip_expr.c:1512
SCIP_Real SCIPgetValueExprValue(SCIP_EXPR *expr)
Definition: expr_value.c:298
SCIP_Bool SCIPisExprPower(SCIP *scip, SCIP_EXPR *expr)
Definition: scip_expr.c:1501
SCIP_Real SCIPexprGetEvalValue(SCIP_EXPR *expr)
Definition: expr.c:3946
SCIP_EXPR * SCIPexpriterGetNext(SCIP_EXPRITER *iterator)
Definition: expriter.c:858
SCIP_EXPR ** SCIPexprGetChildren(SCIP_EXPR *expr)
Definition: expr.c:3882
SCIP_Real SCIPgetConstantExprSum(SCIP_EXPR *expr)
Definition: expr_sum.c:1569
void SCIPfreeExpriter(SCIP_EXPRITER **iterator)
Definition: scip_expr.c:2376
SCIP_RETCODE SCIPexpriterInit(SCIP_EXPRITER *iterator, SCIP_EXPR *expr, SCIP_EXPRITER_TYPE type, SCIP_Bool allowrevisit)
Definition: expriter.c:501
SCIP_EXPRHDLR * SCIPexprGetHdlr(SCIP_EXPR *expr)
Definition: expr.c:3895
#define SCIPallocBufferArray(scip, ptr, num)
Definition: scip_mem.h:124
#define SCIPfreeBufferArray(scip, ptr)
Definition: scip_mem.h:136
#define SCIPallocBlockMemoryArray(scip, ptr, num)
Definition: scip_mem.h:93
#define SCIPfreeBlockMemoryArrayNull(scip, ptr, num)
Definition: scip_mem.h:111
interval arithmetics for provable bounds
Rational & abs(Rational &r)
public functions to work with algebraic expressions
#define SCIPdebugMessage
Definition: pub_message.h:96
public functions to work with algebraic expressions
@ SCIP_EXPRITER_DFS
Definition: type_expr.h:718
#define SCIP_EXPRINTCAPABILITY_GRADIENT
struct SCIP_ExprIntData SCIP_EXPRINTDATA
#define SCIP_EXPRINTCAPABILITY_HESSIAN
#define SCIP_EXPRINTCAPABILITY_FUNCVALUE
#define SCIP_EXPRINTCAPABILITY_ALL
struct SCIP_ExprInt SCIP_EXPRINT
unsigned int SCIP_EXPRINTCAPABILITY
@ SCIP_OKAY
Definition: type_retcode.h:42
enum SCIP_Retcode SCIP_RETCODE
Definition: type_retcode.h:63