diff --git a/CMakeLists.txt b/CMakeLists.txt index ae89eac7..a1c7cc33 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -42,6 +42,7 @@ option (NLOPT_PYTHON "build python bindings" ON) option (NLOPT_OCTAVE "build octave bindings" ON) option (NLOPT_MATLAB "build matlab bindings" OFF) option (NLOPT_GUILE "build guile bindings" ON) +option (NLOPT_JAVA "build java bindings" ON) option (NLOPT_SWIG "use SWIG to build bindings" ON) option (NLOPT_LUKSAN "enable LGPL Luksan solvers" ON) option (NLOPT_TESTS "build unit tests" OFF) @@ -143,7 +144,7 @@ if (WITH_THREADLOCAL AND NOT DEFINED THREADLOCAL) endif () -if (NLOPT_CXX OR NLOPT_PYTHON OR NLOPT_GUILE OR NLOPT_OCTAVE) +if (NLOPT_CXX OR NLOPT_PYTHON OR NLOPT_GUILE OR NLOPT_OCTAVE OR NLOPT_JAVA) check_cxx_symbol_exists (__cplusplus ciso646 SYSTEM_HAS_CXX) if (SYSTEM_HAS_CXX) set (CMAKE_CXX_STANDARD 11) # set the standard to C++11 but do not require it @@ -340,6 +341,25 @@ if (NLOPT_GUILE) find_package (Guile) endif () +if (NLOPT_JAVA) + # we do not really need any component, only the main JNI target, but if the + # list of components is left empty, FindJNI defaults to "JVM AWT", and we + # specifically do not want to check for the AWT library that is not available + # on headless installations + find_package (JNI COMPONENTS JVM) + # FindJNI.cmake in CMake versions prior to 3.24 does not export any targets + if(JNI_FOUND AND NOT TARGET JNI::JNI) + add_library(JNI::JNI IMPORTED INTERFACE) + set_property(TARGET JNI::JNI PROPERTY INTERFACE_INCLUDE_DIRECTORIES + ${JAVA_INCLUDE_PATH}) + if(NOT JNI_INCLUDE_PATH2_OPTIONAL AND JAVA_INCLUDE_PATH2) + set_property(TARGET JNI::JNI APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES + ${JAVA_INCLUDE_PATH2}) + endif() + endif() + find_package (Java 1.5) +endif () + if (NLOPT_SWIG) find_package (SWIG 3) if (SWIG_FOUND) diff --git a/doc/docs/NLopt_Java_Reference.md b/doc/docs/NLopt_Java_Reference.md new file mode 100644 index 00000000..3c80b305 --- /dev/null +++ b/doc/docs/NLopt_Java_Reference.md @@ -0,0 +1,423 @@ +--- +# NLopt Java Reference +--- + +The NLopt includes an interface callable from the [Java programming language](https://en.wikipedia.org/wiki/Java_(programming_language)). + +The main purpose of this section is to document the syntax and unique features of the Java API; for more detail on the underlying features, please refer to the C documentation in the [NLopt Reference](NLopt_Reference.md). + +[TOC] + +Using the NLopt Java API +------------------------ + +To use NLopt in Java, your Java program should usually include the line: + +```java +import nlopt.*; +``` + +which imports the complete `nlopt` package, or alternatively, individual imports of every class you intend to use (which some Java IDEs will maintain automatically for you). Java also allows using the package with explicit namespacing, e.g., `nlopt.Opt opt = new nlopt.Opt(nlopt.Algorithm.LD_MMA, 2);`, but this is typically not recommended. + +In addition, your Java program *must* ensure that the JNI library `nloptjni` is loaded before using the `nlopt` package, because Java will otherwise be unable to find the native methods of the NLopt Java binding and throw a runtime error. This can be done with the line: + +```java +System.loadLibrary("nloptjni"); +``` + +Simple programs will typically call this at the beginning of `main`. In more complex applications, the most suitable spot is a constructor or a static initializer in the application class interfacing to NLopt. The application may also already have a wrapper around `System.loadLibrary` that sets library search paths and/or extracts libraries from JARs. In that case, that wrapper can also be used for `nloptjni`. + +The `nlopt.Opt` class +--------------------- + +The NLopt API revolves around an object of type `nlopt.Opt`, analogous to (and internally wrapping) `nlopt::opt` in C++. Via methods of this object, all of the parameters of the optimization are specified (dimensions, algorithm, stopping criteria, constraints, objective function, etcetera), and then one finally calls the `Opt.optimize` method in order to perform the optimization. The object should normally be created via the constructor: + +```java +Opt opt = new Opt(algorithm, n); +``` + +given an `algorithm` (see [NLopt Algorithms](NLopt_Algorithms.md) for possible values) and the dimensionality of the problem (`n`, the number of optimization parameters). (Writing just `Opt` assumes that `nlopt.Opt` has been imported with `import nlopt.*;` or `import nlopt.Opt;` in the import section of the class. Otherwise, you have to write `nlopt.Opt` explicitly.) The code snippets below assume that `opt` is an instance of `nlopt.Opt`, constructed as above. + +Whereas the C algorithms are specified by `nlopt_algorithm` constants of the form `NLOPT_LD_MMA`, `NLOPT_LN_COBYLA`, etcetera, the Java `Algorithm` values are of the form `nlopt.Algorithm.LD_MMA`, `nlopt.Algorithm.LN_COBYLA`, etcetera (with the `NLOPT_` prefix replaced by the `nlopt.Algorithm.` enum). (Again, the `nlopt.` package can be omitted if `nlopt.Algorithm` has been imported. It is also possible to import the individual enum entries using `import static`, e.g., `import static nlopt.Algorithm.*;` or `import static nlopt.Algorithm.LD_MMA`, allowing to write, e.g., just `LD_MMA` in the code below.) + +There are also a copy constructor `nlopt.Opt(Opt)` to make a copy of a given object (equivalent to `nlopt_copy` in the C API). + +If there is an error in the constructor (or copy constructor, or assignment), a runtime exception such as `IllegalArgumentException` or `NullPointerException`, or an `OutOfMemoryError` is thrown. + +The algorithm and dimension parameters of the object are immutable (cannot be changed without constructing a new object), but you can query them for a given object by the methods: + +```java +opt.getAlgorithm() +opt.getDimension() +``` + +You can get a string description of the algorithm via: + +```java +opt.getAlgorithmName() +``` + +Objective function +------------------ + +The objective function is specified by calling one of the methods: + +```java +opt.setMinObjective(f) +opt.setMaxObjective(f) +``` + +depending on whether one wishes to minimize or maximize the objective function `f`, respectively. The function `f` must implement the interface `nlopt.Opt.Func`, which specifies a method `double apply(double[] x, double[] gradient)`. This can be done in 3 ways: + +1. explicitly, by declaring a named or anonymous class implementing the interface. (In Java versions prior to 1.8, this was the only way.) +2. as an explicit lambda, of the form + ```java + (x, grad) -> { + if (grad != null) { + ...set grad to gradient, in-place... + } + return ...value of f(x)...; + } + ``` +3. as a static method reference of the form `MyClass::f` where `MyClass` contains a method of the form: + ```java + private static double f(double[] x, double[] grad) { + if (grad != null) { + ...set grad to gradient, in-place... + } + return ...value of f(x)...; + } + ``` + Note that, if the reference `MyClass::f` is within the same class `MyClass`, the method `f` can and should be `private`. Otherwise, it needs a higher visibility, e.g., `public`. Also note that `MyClass::f` is just a shortcut for the lambda `(x, grad) -> MyClass.f(x,grad)`, which is itself a shortcut for `(x, grad) -> {return MyClass.f(x,grad);}`. + +The return value should be the value of the function at the point `x`, where `x` is a `double[]` array of length `n` of the optimization parameters (the same as the dimension passed to the constructor). + +In addition, if the argument `grad` is not null, i.e. `grad != null`, then `grad` is a `double[]` array of length `n` which should (upon return) be set to the gradient of the function with respect to the optimization parameters at `x`. That is, `grad[i]` should upon return contain the partial derivative $\partial f / \partial x_i$, for $0 \leq i < n$, if `grad` is non-null. Not all of the optimization algorithms (below) use the gradient information: for algorithms listed as "derivative-free," the `grad` argument will always be null and need never be computed. (For algorithms that do use gradient information, however, `grad` may still be null for some calls.) + +Note that `grad` must be modified *in-place* by your function `f`. Generally, this means using indexing operations `grad[...] = ...;` to overwrite the contents of `grad`, as described below. + +### Assigning results in-place + +Your objective and constraint functions must overwrite the contents of the `grad` (gradient) argument in-place (although of course you can allocate whatever additional storage you might need, in addition to overwriting `grad`). However, typical Java assignment operations do *not* do this. For example: + +```java +grad = Arrays.stream(x).map(t -> 2*t).toArray(); +``` + +might seem like the gradient of the function `sum(x*x)`, but it will *not work* with NLopt because this expression actually allocates a *new* array to store `2*x` and re-assigns `grad` to point to it, rather than overwriting the old contents of `grad`. Instead, you should either explicitly copy the local array to `grad` using `System.arraycopy`: + +```java +double[] mygrad = Arrays.stream(x).map(t -> 2*t).toArray(); +System.arraycopy(mygrad, 0, grad, 0, grad.length); +``` + +or set `grad` in place to begin with, e.g.: + +```java +Arrays.setAll(grad, i -> 2*x[i]); +``` + +or simply: + +```java +int n = x.length; +for (int i = 0; i < n; i++) { + grad[i] = 2*x[i]; +} +``` + +Bound constraints +----------------- + +The [bound constraints](NLopt_Reference.md#bound-constraints) can be specified by calling the methods: + +```java +opt.setLowerBounds(lb); +opt.setUpperBounds(ub); +``` + +where `lb` and `ub` are `DoubleVector` instances of length *n* (the same as the dimension passed to the `nlopt.Opt` constructor). For convenience, these are overloaded with functions that take a single number as arguments, in order to set the lower/upper bounds for all optimization parameters to a single constant. + +To retrieve the values of the lower/upper bounds, you can call one of: + +```java +opt.getLowerBounds(); +opt.getUpperBounds(); +``` + +both of which return `DoubleVector` instances. + +To specify an unbounded dimension, you can use `Double.POSITIVE_INFINITY` or `Double.NEGATIVE_INFINITY` in Java to specify $\pm\infty$, respectively. + +Nonlinear constraints +--------------------- + +Just as for [nonlinear constraints in C](NLopt_Reference.md#nonlinear-constraints), you can specify nonlinear inequality and equality constraints by the methods: + +```java +opt.addInequalityConstraint(fc, tol); +opt.addEqualityConstraint(h, tol); +``` + +where the arguments `fc` and `h` have the same form as the objective function above. The (optional) `tol` arguments specify a tolerance in judging feasibility for the purposes of stopping the optimization, as in C (defaulting to zero if they are omitted). + +To remove all of the inequality and/or equality constraints from a given problem, you can call the following methods: + +```java +opt.removeInequalityConstraints(); +opt.removeEqualityConstraints(); +``` + + +### Vector-valued constraints + +Just as for [nonlinear constraints in C](NLopt_Reference.md#vector-valued-constraints), you can specify vector-valued nonlinear inequality and equality constraints by the methods + +```java +opt.addInequalityMconstraint(c, tol) +opt.addEqualityMconstraint(c, tol) +``` + +Here, `tol` is a `DoubleVector` of the tolerances in each constraint dimension; the dimensionality *m* of the constraint is determined by `tol.size`. The constraint function `c` must implement the interface `nlopt.Opt.MFunc`, which specifies a method `double[] apply(double[] x, double[] gradient);`. It can be implemented in the same 3 ways as the `nlopt.Opt.Func` interface, but now the lambda must be of the form: + +```java +(x, grad) -> { + if (grad != null) { + ...set grad to gradient, in-place... + } + double[] result = new double[m]; + result[0] = ...value of c_0(x)... + result[1] = ...value of c_1(x)... + return result; +} +``` + +It should return a `double[]` array whose length equals the dimensionality *m* of the constraint (same as the length of `tol` above) and containing the constraint results at the point `x` (a `double[]` array whose length *n* is the same as the dimension passed to the constructor). + +In addition, if the argument `grad` is not null, i.e. `grad != null`, then `grad` is a `double[]` array of length `m*n` which should (upon return) be set in-place ([see above](#assigning-results-in-place)) to the Jacobian (i.e., the matrix of gradient rows, in row-major order) of the constraints with respect to the optimization parameters at `x`. That is, `grad[i*n+j]` should upon return contain the partial derivative $\partial c_i / \partial x_j$, for $0 \leq j < n$, if `grad` is non-null. Not all of the optimization algorithms (below) use the gradient information: for algorithms listed as "derivative-free," the `grad` argument will always be null and need never be computed. (For algorithms that do use gradient information, however, `grad` may still be null for some calls.) + +An inequality constraint corresponds to $c_i \le 0$ for $0 \le i < m$, and an equality constraint corresponds to $c_i = 0$, in both cases with tolerance `tol[i]` for purposes of termination criteria. + +(You can add multiple vector-valued constraints and/or scalar constraints in the same problem.) + +Stopping criteria +----------------- + +As explained in the [C API Reference](NLopt_Reference.md#stopping-criteria) and the [Introduction](NLopt_Introduction.md#termination-conditions)), you have multiple options for different stopping criteria that you can specify. (Unspecified stopping criteria are disabled; i.e., they have innocuous defaults.) + +For each stopping criteria, there are (at least) two methods: a `set` method to specify the stopping criterion, and a `get` method to retrieve the current value for that criterion. The meanings of each criterion are exactly the same as in the C API. + +```java +opt.setStopval(stopval); +opt.getStopval(); +``` + +Stop when an objective value of at least `stopval` is found. + +```java +opt.setFtolRel(tol); +opt.getFtolRel(); +``` + +Set relative tolerance on function value. + +```java +opt.setFtolAbs(tol); +opt.getFtolAbs(); +``` + +Set absolute tolerance on function value. + +```java +opt.setXtolRel(tol); +opt.getXtolRel(); +``` + +Set relative tolerance on optimization parameters. + +```java +opt.setXtolAbs(tol); +opt.getXtolAbs(); +``` + +Set absolute tolerances on optimization parameters. The `tol` input must be a `DoubleVector` of length `n` (the dimension specified in the `nlopt.Opt` constructor); alternatively, you can pass a single number in order to set the same tolerance for all optimization parameters. `getXtolAbs()` returns the tolerances as a `DoubleVector`. + +```java +opt.setXWeights(w); +opt.getXWeights(); +``` + +Set the weights used when the computing L₁ norm for the `xtolRel` stopping criterion above. + +```java +opt.setMaxeval(maxeval); +opt.getMaxeval(); +``` + +Stop when the number of function evaluations exceeds `maxeval`. (0 or negative for no limit.) + +```java +opt.setMaxtime(maxtime); +opt.getMaxtime(); +``` + +Stop when the optimization time (in seconds) exceeds `maxtime`. (0 or negative for no limit.) + +```java +opt.getNumevals(); +``` + +Request the number of evaluations. + +### Forced termination + +In certain cases, the caller may wish to *force* the optimization to halt, for some reason unknown to NLopt. For example, if the user presses Ctrl-C, or there is an error of some sort in the objective function. You can do this by raising *any* exception inside your objective/constraint functions: the optimization will be halted gracefully, and the same exception will be raised to the caller. See [Exceptions](#exceptions), below. The Java equivalent of `nlopt_forced_stop` from the [C API](NLopt_Reference.md#forced-termination) is to throw an `nlopt.ForcedStopException`. + +Algorithm-specific parameters +----------------------------- + +Certain NLopt optimization algorithms allow you to specify additional parameters by calling +```java +opt.setParam("name", val); +opt.hasParam("name"); +opt.getParam("name", defaultval); +opt.numParams(); +opt.nthParam(n); +``` +where the string `"name"` is the name of an algorithm-specific parameter and `val` is the value you are setting the parameter to. These functions are equivalent to the [C API](NLopt_Reference.md#algorithm-specific-parameters) functions of the corresponding names. + +Performing the optimization +--------------------------- + +Once all of the desired optimization parameters have been specified in a given object `opt`, you can perform the optimization by calling: + +```java +DoubleVector xopt = opt.optimize(x); +``` + +On input, `x` is a `DoubleVector` of length `n` (the dimension of the problem from the `nlopt.Opt` constructor) giving an initial guess for the optimization parameters. The return value `xopt` is a `DoubleVector` containing the optimized values of the optimization parameters. + +You can call the following methods to retrieve the optimized objective function value from the last `optimize` call, and also the return code (including negative/failure return values) from the last `optimize` call: + +```java +double opt_val = opt.lastOptimumValue(); +Result result = opt.lastOptimizeResult(); +``` + +The return code (see below) is positive on success, indicating the reason for termination. On failure (negative return codes), `optimize()` throws an exception (see [Exceptions](#exceptions), below). + +### Return values + +The possible return values are the same as the [return values in the C API](NLopt_Reference.md#return-values), except that the `NLOPT_` prefix is replaced with the `nlopt.Result` enum. That is, `NLOPT_SUCCESS` becomes `Result.SUCCESS`, etcetera. + +Exceptions +---------- + +The [Error codes (negative return values)](NLopt_Reference.md#error-codes-negative-return-values) in the C API are replaced in the Java API by thrown exceptions. The following exceptions are thrown by the various routines: + +``` +RuntimeException +``` + +Generic failure, equivalent to `NLOPT_FAILURE`. Note that, since other, more specific exceptions will typically be subclasses of `RuntimeException`, this should be caught *last*. + +``` +IllegalArgumentException +``` + +Invalid arguments (e.g. lower bounds are bigger than upper bounds, an unknown algorithm was specified, etcetera), equivalent to `NLOPT_INVALID_ARGS`. + +``` +OutOfMemoryError +``` + +Ran out of memory (a memory allocation failed), equivalent to `NLOPT_OUT_OF_MEMORY`. + +`nlopt.RoundoffLimitedException` (subclass of `RuntimeException`) +Halted because roundoff errors limited progress, equivalent to `NLOPT_ROUNDOFF_LIMITED`. + +`nlopt.ForcedStopException` (subclass of `RuntimeException`) +Halted because of a [forced termination](#forced-termination): the user called `opt.forceStop()` from the user’s objective function or threw an `nlopt.ForcedStop` exception. Equivalent to `NLOPT_FORCED_STOP`. + +If your objective/constraint functions throw *any* (runtime) exception during the execution of `opt.optimize`, it will be caught by NLopt and the optimization will be halted gracefully, and `opt.optimize` will re-throw the *same* exception to its caller. (Note that the Java compiler will not allow you to throw a checked exception from your callbacks, only a runtime exception.) + +Local/subsidiary optimization algorithm +--------------------------------------- + +Some of the algorithms, especially MLSL and AUGLAG, use a different optimization algorithm as a subroutine, typically for local optimization. You can change the local search algorithm and its tolerances by calling: + +```java +opt.setLocalOptimizer(localOpt); +``` + +Here, `localOpt` is another `nlopt.Opt` object whose parameters are used to determine the local search algorithm, its stopping criteria, and other algorithm parameters. (However, the objective function, bounds, and nonlinear-constraint parameters of `localOpt` are ignored.) The dimension `n` of `localOpt` must match that of `opt`. + +This function makes a copy of the `localOpt` object, so you can freely change your original `localOpt` afterwards without affecting `opt`. + +Initial step size +----------------- + +Just as in the C API, you can [get and set the initial step sizes](NLopt_Reference.md#initial-step-size) for derivative-free optimization algorithms. The Java equivalents of the C functions are the following methods: + +```java +opt.setInitialStep(dx); +DoubleVector dx = opt.getInitialStep(x); +``` + +Here, `dx` is a `DoubleVector` of the (nonzero) initial steps for each dimension, or a single number if you wish to use the same initial steps for all dimensions. `opt.getInitialStep(x)` returns the initial step that will be used for a starting guess of `x` in `opt.optimize(x)`. + +Stochastic population +--------------------- + +Just as in the C API, you can [get and set the initial population](NLopt_Reference.md#stochastic-population) for stochastic optimization algorithms, by the methods: + +```java +opt.setPopulation(pop); +opt.getPopulation(); +``` + +(A `pop` of zero implies that the heuristic default will be used.) + +Pseudorandom numbers +-------------------- + +For stochastic optimization algorithms, we use pseudorandom numbers generated by the [Mersenne Twister](https://en.wikipedia.org/wiki/Mersenne_twister) algorithm, based on code from Makoto Matsumoto. By default, the [seed](https://en.wikipedia.org/wiki/Random_seed) for the random numbers is generated from the system time, so that you will get a different sequence of pseudorandom numbers each time you run your program. If you want to use a "deterministic" sequence of pseudorandom numbers, i.e. the same sequence from run to run, you can set the seed by calling: + +```java +NLopt.srand(seed); +``` + +where `seed` is an integer. To reset the seed based on the system time, you can call: + +```java +NLopt.srandTime(); +``` + +(Normally, you don't need to call this as it is called automatically. However, it might be useful if you want to "re-randomize" the pseudorandom numbers after calling `NLopt.srand` to set a deterministic seed.) + +Vector storage for limited-memory quasi-Newton algorithms +--------------------------------------------------------- + +Just as in the C API, you can get and set the [number *M* of stored vectors](NLopt_Reference.md#vector-storage-for-limited-memory-quasi-newton-algorithms) for limited-memory quasi-Newton algorithms, via the methods: + +```java +opt.setVectorStorage(M); +opt.getVectorStorage(); +``` + +(The default is *M*=0, in which case NLopt uses a heuristic nonzero value.) + +Version number +-------------- + +To determine the version number of NLopt at runtime, you can call: + +```java +int major = NLopt.versionMajor(); +int minor = NLopt.versionMinor(); +int bugfix = NLopt.versionBugfix(); +``` + +For example, NLopt version 3.1.4 would return `major=3`, `minor=1`, and `bugfix=4`. + + diff --git a/doc/docs/NLopt_Tutorial.md b/doc/docs/NLopt_Tutorial.md index 6e490ad3..fddc6946 100644 --- a/doc/docs/NLopt_Tutorial.md +++ b/doc/docs/NLopt_Tutorial.md @@ -544,6 +544,58 @@ On error conditions, the NLopt functions throw [exceptions](http://www.gnu.org/s The heavy use of side-effects here is a bit unnatural in Scheme, but is used in order to closely map to the C++ interface. (Notice that `nlopt::` C++ functions map to `nlopt-` Guile functions, and `nlopt::opt::` methods map to `nlopt-opt-` functions that take the `opt` object as the first argument.) Of course, you are free to wrap your own Scheme-like functional interface around this if you wish. +Example in Java +--------------- + +In Java (1.8 or later), the equivalent of the example above would be: + +```java +import nlopt.*; + +public class t_java { + private static double myfunc(double[] x, double[] grad) { + if (grad != null) { + grad[0] = 0.0; + grad[1] = 0.5 / Math.sqrt(x[1]); + } + return Math.sqrt(x[1]); + } + + private static double myconstraint(double[] x, double[] grad, double a, + double b) { + if (grad != null) { + grad[0] = 3 * a * (a*x[0] + b) * (a*x[0] + b); + grad[1] = -1.0; + } + return ((a*x[0] + b) * (a*x[0] + b) * (a*x[0] + b) - x[1]); + } + + public static void main(String[] args) { + System.loadLibrary("nloptjni"); + Opt opt = new Opt(Algorithm.LD_MMA, 2); + opt.setLowerBounds(new DoubleVector(Double.NEGATIVE_INFINITY, 0.)); + opt.setMinObjective(t_java::myfunc); + opt.addInequalityConstraint((x, grad) -> myconstraint(x, grad, 2, 0), 1e-8); + opt.addInequalityConstraint((x, grad) -> myconstraint(x, grad, -1, 1), + 1e-8); + opt.setXtolRel(1e-4); + DoubleVector x = opt.optimize(new DoubleVector(1.234, 5.678)); + double minf = opt.lastOptimumValue(); + System.out.println("optimum at " + x); + System.out.println("minimum value: " + minf); + System.out.println("result code: " + opt.lastOptimizeResult()); + } +} +``` + +Note that the objective/constraint functions take two arguments, `x` and `grad`, and return a number. `x` is a vector whose length is the dimension of the problem; grad is either `null` if it is not needed, or a `DoubleVector` that must be modified *in-place* to the gradient of the function. + +Also note that the above example uses lambdas, both in the explicit `(x, grad) -> ` notation and using the `::` operator, so it will compile as is only with Java 1.8 or later. The Java binding supports Java 1.5 or later, but if you wish to support versions 1.5 to 1.7, you cannot use lambdas. Instead, for Java prior to 1.8, you would have to explicitly declare the anonymous classes implementing the interfaces, leading to less readable code. + +On error conditions, the NLopt functions throw Java runtime exceptions (unchecked exceptions) that can be caught by your Java code if you wish. + +Note that the class and method names are renamed to camel case as usual in Java: upper camel case for classes, lower camel case for methods. The exception classes additionally have `Exception` appended to their names. The `nlopt` C++ namespace maps to the `nlopt` Java package, global `nlopt::` C++ functions map to static methods of the `nlopt.NLopt` class, methods of classes (e.g., `nlopt::opt`) map to the methods of the corresponding Java class (e.g., `nlopt.Opt`), and `std::vector` maps to `nlopt.DoubleVector`. + Example in Fortran ------------------ diff --git a/doc/docs/index.md b/doc/docs/index.md index 4e95f45f..176a4b31 100644 --- a/doc/docs/index.md +++ b/doc/docs/index.md @@ -10,7 +10,7 @@ NLopt **NLopt** is a free/open-source library for **nonlinear optimization**, providing a common interface for a number of different free optimization routines available online as well as original implementations of various other algorithms. Its features include: -- Callable from [C](NLopt_Reference.md), [C++](NLopt_C-plus-plus_Reference.md), [Fortran](NLopt_Fortran_Reference.md), [Matlab or GNU Octave](NLopt_Matlab_Reference.md), [Python](NLopt_Python_Reference.md), [GNU Guile](NLopt_Guile_Reference.md), [Julia](https://github.com/stevengj/NLopt.jl), [GNU R](NLopt_R_Reference.md), [Lua](https://github.com/rochus-keller/LuaNLopt), [OCaml](https://bitbucket.org/mkur/nlopt-ocaml), [Rust](https://github.com/adwhit/rust-nlopt) and [Crystal](https://github.com/konovod/nlopt.cr). +- Callable from [C](NLopt_Reference.md), [C++](NLopt_C-plus-plus_Reference.md), [Fortran](NLopt_Fortran_Reference.md), [Matlab or GNU Octave](NLopt_Matlab_Reference.md), [Python](NLopt_Python_Reference.md), [GNU Guile](NLopt_Guile_Reference.md), [Java](NLopt_Java_Reference.md), [Julia](https://github.com/stevengj/NLopt.jl), [GNU R](NLopt_R_Reference.md), [Lua](https://github.com/rochus-keller/LuaNLopt), [OCaml](https://bitbucket.org/mkur/nlopt-ocaml), [Rust](https://github.com/adwhit/rust-nlopt) and [Crystal](https://github.com/konovod/nlopt.cr). - A common interface for [many different algorithms](NLopt_Algorithms.md)—try a different algorithm just by changing one parameter. - Support for large-scale optimization (some algorithms scalable to millions of parameters and thousands of constraints). - Both global and local optimization algorithms. diff --git a/src/swig/CMakeLists.txt b/src/swig/CMakeLists.txt index b6f094cd..c376186b 100644 --- a/src/swig/CMakeLists.txt +++ b/src/swig/CMakeLists.txt @@ -2,6 +2,8 @@ if (POLICY CMP0078) cmake_policy(SET CMP0078 NEW) endif () +# clean up old generated source files before running SWIG, useful for Java +set(UseSWIG_MODULE_VERSION 2) include (UseSWIG) # allows one set C++ flags for swig wrappers @@ -24,7 +26,9 @@ if (Python_NumPy_FOUND) set (SWIG_MODULE_nlopt_python_EXTRA_DEPS nlopt-python.i numpy.i generate-cpp) # swig_add_module is deprecated - swig_add_library (nlopt_python LANGUAGE python SOURCES nlopt.i) + swig_add_library (nlopt_python LANGUAGE python SOURCES nlopt.i + OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/python + OUTFILE_DIR ${CMAKE_CURRENT_BINARY_DIR}) target_link_libraries (nlopt_python ${nlopt_lib}) target_link_libraries (nlopt_python Python::NumPy) @@ -33,7 +37,7 @@ if (Python_NumPy_FOUND) set_target_properties (nlopt_python PROPERTIES OUTPUT_NAME nlopt) set_target_properties (nlopt_python PROPERTIES COMPILE_FLAGS "${SWIG_COMPILE_FLAGS}") - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/nlopt.py DESTINATION ${INSTALL_PYTHON_DIR}) + install (FILES ${CMAKE_CURRENT_BINARY_DIR}/python/nlopt.py DESTINATION ${INSTALL_PYTHON_DIR}) install (TARGETS nlopt_python DESTINATION ${INSTALL_PYTHON_DIR}) configure_file (METADATA.in METADATA @ONLY) @@ -44,11 +48,15 @@ endif () if (GUILE_FOUND) - set_source_files_properties (nlopt.i PROPERTIES SWIG_FLAGS "-scmstub") set (SWIG_MODULE_nlopt_guile_EXTRA_DEPS nlopt-guile.i generate-cpp) + set (CMAKE_SWIG_FLAGS -scmstub) # swig_add_module is deprecated - swig_add_library (nlopt_guile LANGUAGE guile SOURCES nlopt.i) + swig_add_library (nlopt_guile LANGUAGE guile SOURCES nlopt.i + OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/guile + OUTFILE_DIR ${CMAKE_CURRENT_BINARY_DIR}) + + set (CMAKE_SWIG_FLAGS) target_include_directories (nlopt_guile PRIVATE ${GUILE_INCLUDE_DIRS}) @@ -58,9 +66,54 @@ if (GUILE_FOUND) file (RELATIVE_PATH _REL_GUILE_SITE_PATH ${GUILE_ROOT_DIR} ${GUILE_SITE_DIR}) set (GUILE_SITE_PATH ${_REL_GUILE_SITE_PATH}) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/nlopt.scm DESTINATION ${GUILE_SITE_PATH}) + install (FILES ${CMAKE_CURRENT_BINARY_DIR}/guile/nlopt.scm DESTINATION ${GUILE_SITE_PATH}) file (RELATIVE_PATH _REL_GUILE_EXTENSION_PATH ${GUILE_ROOT_DIR} ${GUILE_EXTENSION_DIR}) set (GUILE_EXTENSION_PATH ${_REL_GUILE_EXTENSION_PATH}) install (TARGETS nlopt_guile LIBRARY DESTINATION ${GUILE_EXTENSION_PATH}) endif () + + +if (JNI_FOUND AND Java_FOUND AND SWIG_FOUND) + + include (UseJava) + + set (SWIG_MODULE_nlopt_java_EXTRA_DEPS nlopt-java.i generate-cpp) + set (CMAKE_SWIG_FLAGS -package nlopt) + + # swig_add_module is deprecated + # OUTPUT_DIR is ${CMAKE_CURRENT_BINARY_DIR}/java/ + the -package above (with + # any '.' replaced by '/'). It must also match the GLOB in glob_java.cmake. + swig_add_library (nlopt_java LANGUAGE java SOURCES nlopt.i + OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/java/nlopt + OUTFILE_DIR ${CMAKE_CURRENT_BINARY_DIR}) + + set (CMAKE_SWIG_FLAGS) + + swig_link_libraries (nlopt_java ${nlopt_lib}) + target_link_libraries (nlopt_java JNI::JNI) + + set_target_properties (nlopt_java PROPERTIES OUTPUT_NAME nloptjni) + if (CMAKE_CXX_COMPILER_ID MATCHES "Clang|GNU") + set_target_properties (nlopt_java PROPERTIES + COMPILE_OPTIONS "-fno-strict-aliasing") + endif () + + install (TARGETS nlopt_java LIBRARY DESTINATION ${NLOPT_INSTALL_LIBDIR}) + + # unfortunately, SWIG will not tell us which .java files it generated, so we + # have to find out ourselves - this is the only portable way to do so + # (The nlopt*.i dependencies are there to force updating the list of sources + # on any changes to the SWIG interface code, they are not direct inputs.) + add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/java_sources.txt + COMMAND ${CMAKE_COMMAND} + -DBINARY_DIR=${CMAKE_CURRENT_BINARY_DIR} + -P ${CMAKE_CURRENT_SOURCE_DIR}/glob_java.cmake + DEPENDS "${swig_generated_file_fullname}" + nlopt.i nlopt-exceptions.i nlopt-java.i + nlopt_java_swig_compilation glob_java.cmake) + + add_jar (nlopt_jar SOURCES @${CMAKE_CURRENT_BINARY_DIR}/java_sources.txt + OUTPUT_NAME nlopt) + install_jar (nlopt_jar ${CMAKE_INSTALL_DATADIR}/java) +endif () diff --git a/src/swig/glob_java.cmake b/src/swig/glob_java.cmake new file mode 100644 index 00000000..5fcd976a --- /dev/null +++ b/src/swig/glob_java.cmake @@ -0,0 +1,13 @@ +# This file(GLOB ...) must run at build (make) time, after the SWIG run. So it +# cannot be invoked directly from CMakeLists.txt, but must be invoked through +# cmake -P at the correct spot of the build, using add_custom_command. +file(GLOB JAVA_SOURCES ${BINARY_DIR}/java/nlopt/*.java) +list(JOIN JAVA_SOURCES "\n" JAVA_SOURCES_LINES) +file(WRITE ${BINARY_DIR}/java_sources.txt ${JAVA_SOURCES_LINES}) + +# SWIG hardcodes non-vararg initial elements for std::vector wrappers, +# probably to support Java versions older than 1.5. We do not really care +# about supporting a Java that old, so fix the generated code. +file(READ ${BINARY_DIR}/java/nlopt/DoubleVector.java FILE_CONTENTS) +string(REPLACE "double[] initialElements" "double... initialElements" FILE_CONTENTS "${FILE_CONTENTS}") +file(WRITE ${BINARY_DIR}/java/nlopt/DoubleVector.java "${FILE_CONTENTS}") diff --git a/src/swig/nlopt-java.i b/src/swig/nlopt-java.i new file mode 100644 index 00000000..5494c7fd --- /dev/null +++ b/src/swig/nlopt-java.i @@ -0,0 +1,229 @@ +// -*- C++ -*- +// kate: hl c++ + +// use Java naming conventions +%rename("%(camelcase)s", match="enum") ""; +%rename("%(camelcase)s", match="class") ""; +%rename("%(lowercamelcase)s", %$isfunction) ""; + +// use proper Java enums +%include "enums.swg" +// use Java code for the constants in the enums instead of calling a C function +%javaconst(1); + +// pointer-based API not supported, use version_{major,minor,bugfix} instead +%ignore version; +// pointer-based API not supported, use the other overload instead +%ignore optimize(std::vector &, double &); +// unsupported function APIs, use the ones with nlopt_munge instead +%ignore set_min_objective(func, void *); +%ignore set_min_objective(vfunc, void *); +%ignore set_min_objective(functor_type); +%ignore set_max_objective(func, void *); +%ignore set_max_objective(vfunc, void *); +%ignore set_max_objective(functor_type); +%ignore add_inequality_constraint(func, void *); +%ignore add_inequality_constraint(func, void *, double); +%ignore add_inequality_constraint(vfunc, void *); +%ignore add_inequality_constraint(vfunc, void *, double); +%ignore add_inequality_mconstraint(mfunc, void *, const std::vector &); +%ignore add_equality_constraint(func, void *); +%ignore add_equality_constraint(func, void *, double); +%ignore add_equality_constraint(vfunc, void *); +%ignore add_equality_constraint(vfunc, void *, double); +%ignore add_equality_mconstraint(mfunc, void *, const std::vector &); + +// Munge function types +%extend nlopt::opt { + %proxycode { + public static interface Func { + public double apply(double[] x, double[] gradient); + } + + public static interface MFunc { + public double[] apply(double[] x, double[] gradient); + } + } +} + +%{ +struct jfunc { + JNIEnv *jenv; + jobject func; + jmethodID method; +}; + +static void *free_jfunc(void *p) { + ((jfunc *) p)->jenv->DeleteGlobalRef(((jfunc *) p)->func); + delete (jfunc *) p; + return (void *) 0; +} + +static void *dup_jfunc(void *p) { + jfunc *q = new jfunc; + q->jenv = ((jfunc *) p)->jenv; + q->func = q->jenv->NewGlobalRef(((jfunc *) p)->func); + q->method = ((jfunc *) p)->method; + return (void *) q; +} + +static double func_java(unsigned n, const double *x, double *grad, void *f) +{ + JNIEnv *jenv = ((jfunc *) f)->jenv; + jobject func = ((jfunc *) f)->func; + jmethodID method = ((jfunc *) f)->method; + + jdoubleArray jx = jenv->NewDoubleArray(n); + if (!jx || jenv->ExceptionCheck()) { + throw nlopt::forced_stop(); + } + jenv->SetDoubleArrayRegion(jx, 0, n, x); + jdoubleArray jgrad = (jdoubleArray) 0; + if (grad) { + jgrad = jenv->NewDoubleArray(n); + if (!jgrad || jenv->ExceptionCheck()) { + jenv->DeleteLocalRef(jx); + throw nlopt::forced_stop(); + } + jenv->SetDoubleArrayRegion(jgrad, 0, n, grad); + } + + jdouble res = jenv->CallDoubleMethod(func, method, jx, jgrad); + jenv->DeleteLocalRef(jx); + + if (jenv->ExceptionCheck()) { + if (jgrad) { + jenv->DeleteLocalRef(jgrad); + } + throw nlopt::forced_stop(); + } + + if (grad) { + jenv->GetDoubleArrayRegion(jgrad, 0, n, grad); + jenv->DeleteLocalRef(jgrad); + } + + return res; +} + +static void mfunc_java(unsigned m, double *result, + unsigned n, const double *x, double *grad, void *f) +{ + JNIEnv *jenv = ((jfunc *) f)->jenv; + jobject func = ((jfunc *) f)->func; + jmethodID method = ((jfunc *) f)->method; + + jdoubleArray jx = jenv->NewDoubleArray(n); + if (!jx || jenv->ExceptionCheck()) { + throw nlopt::forced_stop(); + } + jenv->SetDoubleArrayRegion(jx, 0, n, x); + jdoubleArray jgrad = (jdoubleArray) 0; + if (grad) { + jgrad = jenv->NewDoubleArray(m * n); + if (!jgrad || jenv->ExceptionCheck()) { + jenv->DeleteLocalRef(jx); + throw nlopt::forced_stop(); + } + jenv->SetDoubleArrayRegion(jgrad, 0, m * n, grad); + } + + jdoubleArray res = (jdoubleArray) jenv->CallObjectMethod(func, method, jx, jgrad); + jenv->DeleteLocalRef(jx); + + if (!res || jenv->ExceptionCheck()) { + if (jgrad) { + jenv->DeleteLocalRef(jgrad); + } + if (res) { + jenv->DeleteLocalRef(res); + } + throw nlopt::forced_stop(); + } + + jenv->GetDoubleArrayRegion(res, 0, m, result); + jenv->DeleteLocalRef(res); + + if (grad) { + jenv->GetDoubleArrayRegion(jgrad, 0, m * n, grad); + jenv->DeleteLocalRef(jgrad); + } +} +%} + +%typemap(jni)(nlopt::func f, void *f_data, nlopt_munge md, nlopt_munge mc) "jobject" +%typemap(jtype)(nlopt::func f, void *f_data, nlopt_munge md, nlopt_munge mc) "java.lang.Object" +%typemap(jstype)(nlopt::func f, void *f_data, nlopt_munge md, nlopt_munge mc) "Func" +%typemap(in)(nlopt::func f, void *f_data, nlopt_munge md, nlopt_munge mc) { + $1 = func_java; + jfunc jf = {jenv, $input, jenv->GetMethodID(jenv->FindClass("nlopt/Opt$Func"), "apply", "([D[D)D")}; + $2 = dup_jfunc((void *) &jf); + $3 = free_jfunc; + $4 = dup_jfunc; +} +%typemap(javain)(nlopt::func f, void *f_data, nlopt_munge md, nlopt_munge mc) "$javainput" + +%typemap(jni)(nlopt::mfunc mf, void *f_data, nlopt_munge md, nlopt_munge mc) "jobject" +%typemap(jtype)(nlopt::mfunc mf, void *f_data, nlopt_munge md, nlopt_munge mc) "java.lang.Object" +%typemap(jstype)(nlopt::mfunc mf, void *f_data, nlopt_munge md, nlopt_munge mc) "MFunc" +%typemap(in)(nlopt::mfunc mf, void *f_data, nlopt_munge md, nlopt_munge mc) { + $1 = mfunc_java; + jfunc jf = {jenv, $input, jenv->GetMethodID(jenv->FindClass("nlopt/Opt$MFunc"), "apply", "([D[D)[D")}; + $2 = dup_jfunc((void *) &jf); + $3 = free_jfunc; + $4 = dup_jfunc; +} +%typemap(javain)(nlopt::mfunc mf, void *f_data, nlopt_munge md, nlopt_munge mc) "$javainput" + +// Make exception classes Java-compliant +%rename(ForcedStopException) nlopt::forced_stop; +%typemap(javabase) nlopt::forced_stop "java.lang.RuntimeException" +%typemap(javabody) nlopt::forced_stop "" +%typemap(javadestruct) nlopt::forced_stop "" +%typemap(javafinalize) nlopt::forced_stop "" +%ignore nlopt::forced_stop::forced_stop; +%extend nlopt::forced_stop { + %proxycode { + public ForcedStopException(String message) { + super(message); + } + } +} +%rename(RoundoffLimitedException) nlopt::roundoff_limited; +%typemap(javabase) nlopt::roundoff_limited "java.lang.RuntimeException" +%typemap(javabody) nlopt::roundoff_limited "" +%typemap(javadestruct) nlopt::roundoff_limited "" +%typemap(javafinalize) nlopt::roundoff_limited "" +%ignore nlopt::roundoff_limited::roundoff_limited; +%extend nlopt::roundoff_limited { + %proxycode { + public RoundoffLimitedException(String message) { + super(message); + } + } +} + +// Map exceptions +%typemap(throws) std::bad_alloc %{ + SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, $1.what()); + return $null; +%} + +%typemap(throws) nlopt::forced_stop %{ + if (!jenv->ExceptionCheck()) { + jclass excep = jenv->FindClass("nlopt/ForcedStopException"); + if (excep) + jenv->ThrowNew(excep, $1.what()); + } + return $null; +%} + +%typemap(throws) nlopt::roundoff_limited %{ + if (!jenv->ExceptionCheck()) { + jclass excep = jenv->FindClass("nlopt/RoundoffLimitedException"); + if (excep) + jenv->ThrowNew(excep, $1.what()); + } + return $null; +%} + diff --git a/src/swig/nlopt.i b/src/swig/nlopt.i index 175b5b8f..ae5c68a9 100644 --- a/src/swig/nlopt.i +++ b/src/swig/nlopt.i @@ -1,4 +1,5 @@ // -*- C++ -*- +// kate: hl c++ %define DOCSTRING "NLopt is a multi-language library for nonlinear optimization (local or @@ -7,7 +8,11 @@ constraints). Complete documentation, including a Python tutorial, can be found at the NLopt web page: http://ab-initio.mit.edu/nlopt" %enddef +#ifdef SWIGJAVA +%module(docstring=DOCSTRING) NLopt +#else %module(docstring=DOCSTRING) nlopt +#endif %{ #include "nlopt.hpp" %} @@ -18,15 +23,24 @@ can be found at the NLopt web page: http://ab-initio.mit.edu/nlopt" %include "std_except.i" %include "std_vector.i" namespace std { +#ifdef SWIGJAVA + %template(DoubleVector) vector; +#else %template(nlopt_doublevector) vector; +#endif }; %ignore nlopt::opt::myfunc_data; %ignore nlopt::opt::operator=; // dont use the in-place version of get_initial_step +%ignore nlopt_get_initial_step; %ignore nlopt::opt::get_initial_step; +#ifdef SWIGJAVA +%rename(getInitialStep) nlopt::opt::get_initial_step_; +#else %rename(get_initial_step) nlopt::opt::get_initial_step_; +#endif // prepend "nlopt_" in Guile to substitute for namespace #if defined(SWIGGUILE) @@ -53,4 +67,8 @@ namespace std { %include "nlopt-python.i" #endif +#ifdef SWIGJAVA +%include "nlopt-java.i" +#endif + %include "nlopt.hpp" diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 649b24f5..03df03ff 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -64,8 +64,8 @@ endforeach () if (Python_NumPy_FOUND AND (SWIG_FOUND OR (EXISTS ${PROJECT_SOURCE_DIR}/src/swig/nlopt-python.cpp))) set (PYINSTALLCHECK_ENVIRONMENT "LD_LIBRARY_PATH=${PROJECT_BINARY_DIR}/src/swig" - "PYTHONPATH=${PROJECT_BINARY_DIR}/src/swig" - ) + "PYTHONPATH=$" + ) foreach (algo_index 23 24 30 39) add_test (NAME test_python${algo_index} COMMAND ${Python_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/t_python.py ${algo_index}) @@ -76,6 +76,25 @@ if (Python_NumPy_FOUND AND (SWIG_FOUND OR (EXISTS ${PROJECT_SOURCE_DIR}/src/swig set_tests_properties (test_memoize PROPERTIES ENVIRONMENT "${PYINSTALLCHECK_ENVIRONMENT}") endif () +# The test uses lambdas and the :: operator, which were introduced in Java 1.8. +# The binding itself should also compile with more ancient Java versions that +# have already reached their end of life, but it is not worth uglifying the test +# code for them, because people will then cargo-cult the legacy boilerplate. +if (JNI_FOUND AND Java_FOUND AND SWIG_FOUND AND NOT Java_VERSION VERSION_LESS 1.8) + include (UseJava) + add_jar (t_java SOURCES t_java.java INCLUDE_JARS nlopt_jar ENTRY_POINT t_java) + get_property (t_java_jar TARGET t_java PROPERTY JAR_FILE) + get_property (nlopt_jar_jar TARGET nlopt_jar PROPERTY JAR_FILE) + set (nlopt_java_dir $) + foreach (algo_index 23 24 30 39) + add_test (NAME test_java${algo_index} + COMMAND ${Java_JAVA_EXECUTABLE} + -cp "$" + -Djava.library.path=${nlopt_java_dir} t_java + ${algo_index}) + endforeach() +endif () + if (OCTAVE_FOUND) add_test (NAME test_octave COMMAND ${OCTAVE_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/t_octave.m ${PROJECT_SOURCE_DIR}/src/octave ${PROJECT_BINARY_DIR}/src/octave) endif () @@ -86,7 +105,7 @@ endif () if (GUILE_FOUND AND (SWIG_FOUND OR (EXISTS ${PROJECT_SOURCE_DIR}/src/swig/nlopt-guile.cpp))) set (GUILECHECK_ENVIRONMENT "LD_LIBRARY_PATH=${PROJECT_BINARY_DIR}/src/swig" - "GUILE_LOAD_PATH=${PROJECT_BINARY_DIR}/src/swig" + "GUILE_LOAD_PATH=$" "GUILE_AUTO_COMPILE=0") add_test (NAME test_guile COMMAND ${GUILE_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/t_guile.scm diff --git a/test/t_java.java b/test/t_java.java new file mode 100644 index 00000000..b84c0027 --- /dev/null +++ b/test/t_java.java @@ -0,0 +1,43 @@ +import nlopt.*; + +public class t_java { + private static double myfunc(double[] x, double[] grad) { + if (grad != null) { + grad[0] = 0.0; + grad[1] = 0.5 / Math.sqrt(x[1]); + } + return Math.sqrt(x[1]); + } + + private static double myconstraint(double[] x, double[] grad, double a, + double b) { + if (grad != null) { + grad[0] = 3 * a * (a*x[0] + b) * (a*x[0] + b); + grad[1] = -1.0; + } + return ((a*x[0] + b) * (a*x[0] + b) * (a*x[0] + b) - x[1]); + } + + public static void main(String[] args) { + System.loadLibrary("nloptjni"); + Algorithm algo = args.length < 1 ? Algorithm.LD_MMA + : Algorithm.swigToEnum(Integer.parseInt(args[0])); + Opt opt = new Opt(algo, 2); + System.out.println("algo: " + opt.getAlgorithmName()); + opt.setLowerBounds(new DoubleVector(Double.NEGATIVE_INFINITY, 1e-6)); + opt.setMinObjective(t_java::myfunc); + opt.addInequalityConstraint((x, grad) -> myconstraint(x, grad, 2, 0), 1e-8); + opt.addInequalityConstraint((x, grad) -> myconstraint(x, grad, -1, 1), + 1e-8); + opt.setXtolRel(1e-4); + DoubleVector x0 = new DoubleVector(1.234, 5.678); + DoubleVector x = opt.optimize(x0); + double minf = opt.lastOptimumValue(); + System.out.println("optimum at " + x); + System.out.println("minimum value: " + minf); + System.out.println("result code: " + opt.lastOptimizeResult()); + System.out.println("nevals: " + opt.getNumevals()); + System.out.println("initial step: " + opt.getInitialStep(x0)); + assert Math.abs(minf - 0.544331) < 1e-3: "wrong optimum"; + } +}