Skip to content

Commit

Permalink
add option and code to approximate objective gradient
Browse files Browse the repository at this point in the history
- new option gradient_approximation
- requested in #573, shouldn't really be used
  • Loading branch information
svigerske committed Jun 23, 2022
1 parent b56b627 commit 2ba311b
Show file tree
Hide file tree
Showing 4 changed files with 103 additions and 26 deletions.
2 changes: 2 additions & 0 deletions ChangeLog.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ More detailed information about incremental changes can be found in the

- Fixed that ComputeSensitivityMatrix() of sIpopt assumed that there are
no more than two parameters [#578, by Andrea Vescovini].
- For completeness, added option `gradient_approximation` to enable approximation
of gradient of objective function by finite differences. Do not use. [#573]

### 3.14.6 (2022-05-02)

Expand Down
42 changes: 26 additions & 16 deletions doc/options.dox
Original file line number Diff line number Diff line change
Expand Up @@ -319,6 +319,32 @@ Possible values: yes, no
When the Hessian is approximated, it is assumed that the first num_linear_variables variables are linear. The Hessian is then not approximated in this space. If the get_number_of_nonlinear_variables method in the TNLP is implemented, this option is ignored. The valid range for this integer option is 0 ≤ num_linear_variables and its default value is 0.
</blockquote>

\anchor OPT_jacobian_approximation
<strong>jacobian_approximation</strong> (<em>advanced</em>): Specifies technique to compute constraint Jacobian
<blockquote>
The default value for this string option is "exact".

Possible values:
- exact: user-provided derivatives
- finite-difference-values: user-provided structure, values by finite differences
</blockquote>

\anchor OPT_gradient_approximation
<strong>gradient_approximation</strong> (<em>advanced</em>): Specifies technique to compute objective Gradient
<blockquote>
The default value for this string option is "exact".

Possible values:
- exact: user-provided gradient
- finite-difference-values: values by finite differences
</blockquote>

\anchor OPT_findiff_perturbation
<strong>findiff_perturbation</strong> (<em>advanced</em>): Size of the finite difference perturbation for derivative approximation.
<blockquote>
This determines the relative perturbation of the variable entries. The valid range for this real option is 0 < findiff_perturbation and its default value is 10<sup>-07</sup>.
</blockquote>

\anchor OPT_kappa_d
<strong>kappa_d</strong> (<em>advanced</em>): Weight for linear damping term (to handle one-sided bounds).
<blockquote>
Expand Down Expand Up @@ -1483,22 +1509,6 @@ Possible values:
Possible values: yes, no
</blockquote>

\anchor OPT_jacobian_approximation
<strong>jacobian_approximation</strong> (<em>advanced</em>): Specifies technique to compute constraint Jacobian
<blockquote>
The default value for this string option is "exact".

Possible values:
- exact: user-provided derivatives
- finite-difference-values: user-provided structure, values by finite differences
</blockquote>

\anchor OPT_findiff_perturbation
<strong>findiff_perturbation</strong> (<em>advanced</em>): Size of the finite difference perturbation for derivative approximation.
<blockquote>
This determines the relative perturbation of the variable entries. The valid range for this real option is 0 < findiff_perturbation and its default value is 10<sup>-07</sup>.
</blockquote>

\anchor OPT_point_perturbation_radius
<strong>point_perturbation_radius</strong>: Maximal perturbation of an evaluation point.
<blockquote>
Expand Down
76 changes: 66 additions & 10 deletions src/Interfaces/IpTNLPAdapter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,14 @@ void TNLPAdapter::RegisterOptions(
"finite-difference-values", "user-provided structure, values by finite differences",
"",
true);
roptions->AddStringOption2(
"gradient_approximation",
"Specifies technique to compute objective Gradient",
"exact",
"exact", "user-provided gradient",
"finite-difference-values", "values by finite differences",
"",
true);
roptions->AddLowerBoundedNumberOption(
"findiff_perturbation",
"Size of the finite difference perturbation for derivative approximation.",
Expand Down Expand Up @@ -247,6 +255,8 @@ bool TNLPAdapter::ProcessOptions(

options.GetEnumValue("jacobian_approximation", enum_int, prefix);
jacobian_approximation_ = JacobianApproxEnum(enum_int);
options.GetEnumValue("gradient_approximation", enum_int, prefix);
gradient_approximation_ = GradientApproxEnum(enum_int);
options.GetNumericValue("findiff_perturbation", findiff_perturbation_, prefix);

options.GetNumericValue("point_perturbation_radius", point_perturbation_radius_, prefix);
Expand Down Expand Up @@ -1465,7 +1475,7 @@ bool TNLPAdapter::GetBoundsInformation(
}

// In case we are doing finite differences, keep a copy of the bounds
if( jacobian_approximation_ != JAC_EXACT )
if( jacobian_approximation_ != JAC_EXACT || gradient_approximation_ != OBJGRAD_EXACT )
{
delete[] findiff_x_l_;
delete[] findiff_x_u_;
Expand Down Expand Up @@ -1670,23 +1680,69 @@ bool TNLPAdapter::Eval_grad_f(
DenseVector* dg_f = static_cast<DenseVector*>(&g_f);
DBG_ASSERT(dynamic_cast<DenseVector*>(&g_f));
Number* values = dg_f->Values();
if( IsValid(P_x_full_x_) )

if( gradient_approximation_ == OBJGRAD_EXACT )
{
Number* full_grad_f = new Number[n_full_x_];
if( tnlp_->eval_grad_f(n_full_x_, full_x_, new_x, full_grad_f) )
if( IsValid(P_x_full_x_) )
{
const Index* x_pos = P_x_full_x_->ExpandedPosIndices();
for( Index i = 0; i < g_f.Dim(); i++ )
Number* full_grad_f = new Number[n_full_x_];
if( tnlp_->eval_grad_f(n_full_x_, full_x_, new_x, full_grad_f) )
{
values[i] = full_grad_f[x_pos[i]];
const Index* x_pos = P_x_full_x_->ExpandedPosIndices();
for( Index i = 0; i < g_f.Dim(); i++ )
{
values[i] = full_grad_f[x_pos[i]];
}
retvalue = true;
}
retvalue = true;
delete[] full_grad_f;
}
else
{
retvalue = tnlp_->eval_grad_f(n_full_x_, full_x_, new_x, values);
}
delete[] full_grad_f;
}
else
{
retvalue = tnlp_->eval_grad_f(n_full_x_, full_x_, new_x, values);
// make sure we have the value of the objective at the point
Number f;
retvalue = tnlp_->eval_f(n_full_x_, full_x_, new_x, f);
if( retvalue )
{
Number* full_x_pert = new Number[n_full_x_];
IpBlasCopy(n_full_x_, full_x_, 1, full_x_pert, 1);
const Index* x_pos = NULL;
if( IsValid(P_x_full_x_) )
x_pos = P_x_full_x_->ExpandedPosIndices();

// Compute the finite difference objective
for( Index i = 0; i < g_f.Dim(); i++ )
{
Index ivar = x_pos != NULL ? x_pos[i] : i;
if( findiff_x_l_[ivar] < findiff_x_u_[ivar] )
{
const Number xorig = full_x_pert[ivar];
Number this_perturbation = findiff_perturbation_ * Max(Number(1.), std::abs(full_x_[ivar]));
full_x_pert[ivar] += this_perturbation;
if( full_x_pert[ivar] > findiff_x_u_[ivar] )
{
// if at upper bound, then change direction towards lower bound
this_perturbation = -this_perturbation;
full_x_pert[ivar] = xorig + this_perturbation;
}
Number f_pert;
retvalue = tnlp_->eval_f(n_full_x_, full_x_pert, true, f_pert);
if( !retvalue )
break;

values[i] = (f_pert - f) / this_perturbation;

full_x_pert[ivar] = xorig;
}
}

delete[] full_x_pert;
}
}

return retvalue;
Expand Down
9 changes: 9 additions & 0 deletions src/Interfaces/IpTNLPAdapter.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -261,6 +261,13 @@ class IPOPTLIB_EXPORT TNLPAdapter : public NLP
JAC_FINDIFF_VALUES
};

/** Enum for specifying technique for computing objective Gradient */
enum GradientApproxEnum
{
OBJGRAD_EXACT = 0,
OBJGRAD_FINDIFF_VALUES
};

/** Method for performing the derivative test */
bool CheckDerivatives(
DerivativeTestEnum deriv_test,
Expand Down Expand Up @@ -489,6 +496,8 @@ class IPOPTLIB_EXPORT TNLPAdapter : public NLP
Index num_linear_variables_;
/** Flag indicating how Jacobian is computed. */
JacobianApproxEnum jacobian_approximation_;
/** Flag indicating how objective Gradient is computed. */
GradientApproxEnum gradient_approximation_;
/** Size of the perturbation for the derivative approximation */
Number findiff_perturbation_;
/** Maximal perturbation of the initial point */
Expand Down

0 comments on commit 2ba311b

Please sign in to comment.