diff --git a/Project.toml b/Project.toml index 9e040d7d24e..f4f7ba1ede1 100644 --- a/Project.toml +++ b/Project.toml @@ -5,6 +5,7 @@ version = "1.13.0" [deps] LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +MacroTools = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" MutableArithmetics = "d8a4904e-b15c-11e9-3269-09a3773c0cb0" OrderedCollections = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" @@ -20,8 +21,9 @@ JuMPDimensionalDataExt = "DimensionalData" [compat] DimensionalData = "0.24" -MathOptInterface = "1.18" -MutableArithmetics = "1" +MacroTools = "0.5" +MathOptInterface = "1.19" +MutableArithmetics = "1.1" OrderedCollections = "1" SnoopPrecompile = "1" julia = "1.6" diff --git a/docs/Project.toml b/docs/Project.toml index 9f864d9f940..99d8107c20b 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -20,11 +20,13 @@ Markdown = "d6f4376e-aef5-505a-96c1-9c027394607a" MarkdownAST = "d0879d2d-cac2-40c8-9cee-1863dc0c7391" MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" MultiObjectiveAlgorithms = "0327d340-17cd-11ea-3e99-2fd5d98cecda" +PATHSolver = "f5f7c340-0bb3-5c69-969a-41884d311d1b" Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SCS = "c946c3f1-0d1f-5ce8-9dea-7daa1f7e2d13" SQLite = "0aa819cd-b072-5ff4-a722-6bc24af294d9" +SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b" StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd" TOML = "fa267f1f-6049-4f14-aa54-33bafae1ed76" Tables = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" @@ -48,6 +50,7 @@ JSONSchema = "1" Literate = "2.8" MathOptInterface = "=1.19.0" MultiObjectiveAlgorithms = "=1.2.0" +PATHSolver = "=1.6.0" Plots = "1" SCS = "=1.3.0" SQLite = "1" diff --git a/docs/make.jl b/docs/make.jl index 03c07a55410..fc89f566ef3 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -308,6 +308,7 @@ const _PAGES = [ "tutorials/nonlinear/user_defined_hessians.md", "tutorials/nonlinear/nested_problems.md", "tutorials/nonlinear/querying_hessians.md", + "tutorials/nonlinear/complementarity.md", ], "Conic programs" => [ "tutorials/conic/introduction.md", @@ -345,6 +346,7 @@ const _PAGES = [ "manual/nlp.md", "manual/callbacks.md", "manual/complex.md", + "manual/nonlinear.md", ], jump_api_reference, "Background Information" => diff --git a/docs/src/manual/expressions.md b/docs/src/manual/expressions.md index 3afff752ee2..59944f0c24a 100644 --- a/docs/src/manual/expressions.md +++ b/docs/src/manual/expressions.md @@ -195,7 +195,7 @@ julia> @variable(model, y) y julia> ex = @expression(model, x^2 + 2 * x * y + y^2 + x + y - 1) -x² + 2 y*x + y² + x + y - 1 +x² + 2 x*y + y² + x + y - 1 ``` ### Operator overloading @@ -326,10 +326,8 @@ julia> coefficient(ex, x) ## Nonlinear expressions -Nonlinear expressions can be constructed only using the [`@NLexpression`](@ref) -macro and can be used only in [`@NLobjective`](@ref), [`@NLconstraint`](@ref), -and other [`@NLexpression`](@ref)s. For more details, see the [Nonlinear -Modeling](@ref) section. +Nonlinear expressions in JuMP are represented by a [`NonlinearExpr`](@ref) +object. See [Nonlinear expressions in detail](@ref) for more details. ## Initializing arrays diff --git a/docs/src/manual/nlp.md b/docs/src/manual/nlp.md index 56bccdc92f7..28029baac0d 100644 --- a/docs/src/manual/nlp.md +++ b/docs/src/manual/nlp.md @@ -8,6 +8,13 @@ DocTestFilters = [r"≤|<=", r"≥|>=", r" == | = ", r" ∈ | in ", r"MathOptInt # Nonlinear Modeling +!!! warning + This page describes the legacy nonlinear interface to JuMP. It has a number + of quirks and limitations that prompted the development of a new nonlinear + interface. The new interface is documented at [Nonlinear Modeling](@ref new_nonlinear_interface). + This legacy interface will remain for all future `v1.X` releases of JuMP. + The two nonlinear interfaces cannot be combined. + JuMP has support for general smooth nonlinear (convex and nonconvex) optimization problems. JuMP is able to provide exact, sparse second-order derivatives to solvers. This information can improve solver accuracy and @@ -172,22 +179,7 @@ julia> value.(p) 3.0 ``` -Nonlinear parameters can be used *within nonlinear macros* only: - -```jldoctest nonlinear_parameters -julia> @objective(model, Max, p[1] * x) -ERROR: MethodError: no method matching *(::NonlinearParameter, ::VariableRef) -[...] - -julia> @NLobjective(model, Max, p[1] * x) - -julia> @expression(model, my_expr, p[1] * x^2) -ERROR: MethodError: no method matching *(::NonlinearParameter, ::QuadExpr) -[...] - -julia> @NLexpression(model, my_nl_expr, p[1] * x^2) -subexpression[1]: parameter[1] * x ^ 2.0 -``` +Nonlinear parameters must be used *within nonlinear macros* only. ### When to use a parameter @@ -220,27 +212,6 @@ nothing #hide The syntax accepted in nonlinear macros is more restricted than the syntax for linear and quadratic macros. We note some important points below. -### No operator overloading - -There is no operator overloading provided to build up nonlinear expressions. -For example, if `x` is a JuMP variable, the code `3x` will return an -`AffExpr` object that can be used inside of future expressions and linear -constraints. However, the code `sin(x)` is an error. All nonlinear -expressions must be inside of macros. - -```jldoctest -julia> model = Model(); - -julia> @variable(model, x); - -julia> expr = sin(x) + 1 -ERROR: sin is not defined for type AbstractVariableRef. Are you trying to build a nonlinear problem? Make sure you use @NLconstraint/@NLobjective. -[...] - -julia> expr = @NLexpression(model, sin(x) + 1) -subexpression[1]: sin(x) + 1.0 -``` - ### Scalar operations only Except for the splatting syntax discussed below, all expressions @@ -308,7 +279,7 @@ the function is not available in the scope of the nonlinear expression. !!! warning User-defined functions must return a scalar output. For a work-around, see - [User-defined functions with vector outputs](@ref). + [User-defined operators with vector outputs](@ref). ### Automatic differentiation diff --git a/docs/src/manual/nonlinear.md b/docs/src/manual/nonlinear.md new file mode 100644 index 00000000000..3c3f665108a --- /dev/null +++ b/docs/src/manual/nonlinear.md @@ -0,0 +1,549 @@ +```@meta +CurrentModule = JuMP +DocTestSetup = quote + using JuMP +end +DocTestFilters = [r"≤|<=", r"≥|>=", r" == | = ", r" ∈ | in ", r"MathOptInterface|MOI"] +``` + +# [Nonlinear Modeling](@id new_nonlinear_interface) + +!!! warning + This page describes a new nonlinear interface to JuMP. It replaces the + legacy `@NL` interface, which is documented at [Nonlinear Modeling](@ref). + The API described below is stable, and it will not break with future 1.X + releases of JuMP. However, solver support may be limited, and there may be + gaps in functionality compared with the legacy interface. To report a bug, + or request a missing feature, please [open an issue](https://github.com/jump-dev/JuMP.jl/issues/new/choose). + +JuMP has support for nonlinear (convex and nonconvex) optimization problems. +JuMP is able to automatically provide exact, sparse second-order derivatives to +solvers. This information can improve solver accuracy and performance. + +## Set a nonlinear objective + +Use [`@objective`](@ref) to set a nonlinear objective. + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x[1:2]); + +julia> @objective(model, Min, exp(x[1]) - sqrt(x[2])) +exp(x[1]) - sqrt(x[2]) +``` + +To modify a nonlinear objective, call [`@objective`](@ref) again. + +## Add a nonlinear constraint + +Use [`@constraint`](@ref) to add a nonlinear constraint. + +```jldoctest nonlinear_constraint +julia> model = Model(); + +julia> @variable(model, x[1:2]); + +julia> @constraint(model, exp(x[1]) <= 1) +exp(x[1]) - 1.0 ≤ 0 + +julia> @constraint(model, con[i = 1:2], 2^x[i] >= i) +2-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarNonlinearFunction, MathOptInterface.GreaterThan{Float64}}, ScalarShape}}: + con[1] : (2.0 ^ x[1]) - 1.0 ≥ 0 + con[2] : (2.0 ^ x[2]) - 2.0 ≥ 0 +``` + +Delete a nonlinear constraint using [`delete`](@ref): +```jldoctest nonlinear_constraint +julia> delete(model, con[1]) +``` + +## Create a nonlinear expression + +Use [`@expression`](@ref) to create nonlinear expression objects: + +```jldoctest nl_expression +julia> model = Model(); + +julia> @variable(model, x[1:2]); + +julia> expr = @expression(model, exp(x[1]) + sqrt(x[2])) +exp(x[1]) + sqrt(x[2]) + +julia> my_anon_expr = @expression(model, [i = 1:2], sin(x[i])) +2-element Vector{NonlinearExpr}: + sin(x[1]) + sin(x[2]) + +julia> @expression(model, my_expr[i = 1:2], sin(x[i])) +2-element Vector{NonlinearExpr}: + sin(x[1]) + sin(x[2]) +``` + +A [`NonlinearExpr`](@ref) can be used in [`@objective`](@ref), +[`@constraint`](@ref), and even nested in other [`@expression`](@ref)s. + +```jldoctest nl_expression +julia> @objective(model, Min, expr^2 + 1) +((exp(x[1]) + sqrt(x[2])) ^ 2.0) + 1.0 + +julia> @constraint(model, [i = 1:2], my_expr[i] <= i) +2-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarNonlinearFunction, MathOptInterface.LessThan{Float64}}, ScalarShape}}: + sin(x[1]) - 1.0 ≤ 0 + sin(x[2]) - 2.0 ≤ 0 + +julia> @expression(model, nested[i = 1:2], sin(my_expr[i])) +2-element Vector{NonlinearExpr}: + sin(sin(x[1])) + sin(sin(x[2])) +``` + +Use [`value`](@ref) to query the value of a nonlinear expression: + +```jldoctest nl_expression +julia> set_start_value(x[1], 1.0) + +julia> value(start_value, nested[1]) +0.7456241416655579 + +julia> sin(sin(1.0)) +0.7456241416655579 +``` + +## Nonlinear expressions in detail + +Nonlinear expressions in JuMP are represented by a [`NonlinearExpr`](@ref) +object. + +### Constructors + +Nonlinear expressions can be created using the [`NonlinearExpr`](@ref) +constructors: + +```jldoctest nonlinear_expressions +julia> model = Model(); + +julia> @variable(model, x); + +julia> expr = NonlinearExpr(:sin, Any[x]) +sin(x) +``` + +or via operator overloading: + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> expr = sin(x) +sin(x) +``` + +### Supported arguments + +Nonlinear expressions can contain a mix of numbers, [`AffExpr`](@ref), +[`QuadExpr`](@ref), and other [`NonlinearExpr`](@ref): + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> aff = x + 1; + +julia> quad = x^2 + x; + +julia> expr = cos(x) * sin(quad) + aff +(cos(x) * sin(x² + x)) + (x + 1) +``` + +### Supported operators + +The list of supported operators may vary between solvers. Given an optimizer, +query the list of supported operators using +[`MOI.ListOfSupportedNonlinearOperators`](@ref): +```jldoctest; filter=[r":.+", r"[0-9]+\-element"] +julia> import Ipopt + +julia> import MathOptInterface as MOI + +julia> MOI.get(Ipopt.Optimizer(), MOI.ListOfSupportedNonlinearOperators()) +85-element Vector{Symbol}: + :+ + :- + :abs + :sqrt + :cbrt + :abs2 + :inv + :log + :log10 + :log2 + ⋮ + :min + :max + :&& + :|| + :<= + :(==) + :>= + :< + :> +``` + +In some univariate cases, the operator is defined in [`SpecialFunctions.jl`](https://github.com/JuliaMath/SpecialFunctions.jl). +To use these functions, you must explicitly import `SpecialFunctions.jl` +```jldoctest +julia> import Ipopt + +julia> op = MOI.get(Ipopt.Optimizer(), MOI.ListOfSupportedNonlinearOperators()); + +julia> :erfcx in op +true + +julia> :dawson in op +true + +julia> import SpecialFunctions + +julia> model = Model(); + +julia> @variable(model, x) +x + +julia> @expression(model, SpecialFunctions.erfcx(x)) +erfcx(x) + +julia> @expression(model, SpecialFunctions.dawson(x)) +dawson(x) +``` + +### Limitations + +Some nonlinear expressions cannot be created via operator overloading. For +example, to minimize the likelihood of bugs in user-code, we have not overloaded +comparisons such as `<` and `>=` between JuMP objects: + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> x < 1 +ERROR: Cannot evaluate `<` between a variable and a number. +[...] +``` + +Instead, wrap the expression in the [`@expression`](@ref) macro: +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> expr = @expression(model, x < 1) +x < 1 +``` + +For technical reasons, other operators that are not overloaded include `||`, +`&&`, and `ifelse`. + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> expr = @expression(model, ifelse(x < -1 || x >= 1, x^2, 0.0)) +ifelse((x < -1) || (x >= 1), x², 0.0) +``` + +As an alternative, use the `JuMP.op_` functions, which fallback to the +various comparison and logical operators: +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> expr = op_ifelse( + op_or(op_strictly_less_than(x, -1), op_greater_than_or_equal_to(x, 1)), + x^2, + 0.0, + ) +ifelse((x < -1) || (x >= 1), x², 0.0) +``` + +The available functions are: + +| JuMP function | Julia function | +| :------------------------------------ | :------------- | +| [`op_ifelse`](@ref) | `ifelse` | +| [`op_and`](@ref) | `&&` | +| [`op_or`](@ref) | `\|\|` | +| [`op_greater_than_or_equal_to`](@ref) | `>=` | +| [`op_less_than_or_equal_to`](@ref) | `<=` | +| [`op_equal_to`](@ref) | `==` | +| [`op_strictly_greater_than`](@ref) | `>` | +| [`op_strictly_less_than`](@ref) | `<` | + +### Fields + +Each [`NonlinearExpr`](@ref) has two fields. + +The `.head` field is a `Symbol` that represents the operator being called: + +```jldoctest nonlinear_expressions +julia> expr.head +:sin +``` + +The `.args` field is a `Vector{Any}` containing the arguments to the operator: + +```jldoctest nonlinear_expressions +julia> expr.args +1-element Vector{Any}: + x +``` + +## [User-defined operators](@id jump_user_defined_operators) + +In addition to a standard list of univariate and multivariate operators +recognized by the `MOI.Nonlinear` submodule, JuMP supports *user-defined* +Julia operators. + +!!! warning + User-defined operators must return a scalar output. For a work-around, see + [User-defined operators with vector outputs](@ref). + +### Add an operator + +Add a user-defined operator using the [`@operator`](@ref) macro: + +```@repl +using JuMP +square(x) = x^2 +f(x, y) = (x - 1)^2 + (y - 2)^2 +model = Model(); +@operator(model, op_square, 1, square) +@operator(model, op_f, 2, f) +@variable(model, x[1:2]); +@objective(model, Min, op_f(x[1], op_square(x[2]))) +``` + +The arguments to [`@operator`](@ref) are: + + 1. The model to which the operator is added. + 2. A Julia symbol object which serves as the name of the user-defined operator + in JuMP expressions. This name must not be the same as that of the function. + 3. The number of scalar input arguments that the function takes. + 4. A Julia method which computes the function. + +!!! warning + User-defined operators cannot be deleted. + +You can obtain a reference to the operator using the `model[:key]` syntax: + +```@repl +using JuMP +square(x) = x^2 +model = Model(); +@operator(model, op_square, 1, square) +op_square_2 = model[:op_square] +``` + +### Add an operator without macros + +The [`@operator`](@ref) macro is syntactic sugar for [`add_nonlinear_operator`](@ref). +Thus, the non-macro version of the preceding example is: + +```@repl +using JuMP +square(x) = x^2 +f(x, y) = (x - 1)^2 + (y - 2)^2 +model = Model(); +op_square = add_nonlinear_operator(model, 1, square; name = :op_square) +model[:op_square] = op_square +op_f = add_nonlinear_operator(model, 2, f; name = :op_f) +model[:op_f] = op_f +@variable(model, x[1:2]); +@objective(model, Min, op_f(x[1], op_square(x[2]))) +``` + +### Operators with the same name as an existing function + +A common error encountered is the following: +```jldoctest nonlinear_invalid_redefinition +julia> using JuMP + +julia> model = Model(); + +julia> f(x) = x^2 +f (generic function with 1 method) + +julia> @operator(model, f, 1, f) +ERROR: Unable to add the nonlinear operator `:f` with the same name as +an existing function. +[...] +``` +This error occurs because `@operator(model, f, 1, f)` is equivalent to: +```julia +julia> f = add_nonlinear_operator(model, 1, f; name = :f) +``` +but `f` already exists as a Julia function. + +If you evaluate the function without adding it as an operator, JuMP will trace +the function using operator overloading: +```jldoctest nonlinear_invalid_redefinition +julia> @variable(model, x); + +julia> f(x) +x² +``` + +To force JuMP to treat `f` as a user-defined operator and not trace it, add +the operator using [`add_nonlinear_operator`](@ref) and define a new method +which manually creates a [`NonlinearExpr`](@ref): +```jldoctest nonlinear_invalid_redefinition +julia> _ = add_nonlinear_operator(model, 1, f; name = :f) +NonlinearOperator(:f, f) + +julia> f(x::AbstractJuMPScalar) = NonlinearExpr(:f, Any[x]) +f (generic function with 2 methods) + +julia> @expression(model, log(f(x))) +log(f(x)) +``` + +### Gradients and Hessians + +By default, JuMP will use automatic differentiation to compute the gradient and +Hessian of user-defined operators. If your function is not amenable to +automatic differentiation, or you can compute analytic derivatives, you may pass +additional arguments to [`@operator`](@ref) to compute the first- and +second-derivatives. + +#### Univariate functions + +For univariate functions, a gradient function `∇f` returns a number that +represents the first-order derivative. You may, in addition, pass a third +function which returns a number representing the second-order derivative: +```@repl +using JuMP +f(x) = x^2 +∇f(x) = 2x +∇²f(x) = 2 +model = Model(); +@operator(model, op_f, 1, f, ∇f, ∇²f) # Providing ∇²f is optional +@variable(model, x) +@objective(model, Min, op_f(x)) +``` + +#### Multivariate functions + +For multivariate functions, the gradient function `∇f` must take an +`AbstractVector` as the first argument that is filled in-place. The Hessian +function, `∇²f`, must take an `AbstractMatrix` as the first argument, the +lower-triangular of which is filled in-place: +```@repl +using JuMP +f(x...) = (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2 +function ∇f(g::AbstractVector{T}, x::T...) where {T} + g[1] = 400 * x[1]^3 - 400 * x[1] * x[2] + 2 * x[1] - 2 + g[2] = 200 * (x[2] - x[1]^2) + return +end +function ∇²f(H::AbstractMatrix{T}, x::T...) where {T} + H[1, 1] = 1200 * x[1]^2 - 400 * x[2] + 2 + # H[1, 2] = -400 * x[1] <-- Not needed. Fill the lower-triangular only. + H[2, 1] = -400 * x[1] + H[2, 2] = 200.0 + return +end +model = Model(); +@operator(model, rosenbrock, 2, f, ∇f, ∇²f) # Providing ∇²f is optional +@variable(model, x[1:2]) +@objective(model, Min, rosenbrock(x[1], x[2])) +``` + +You may assume the Hessian matrix `H` is initialized with zeros, and because `H` +is symmetric, you need only to fill in the non-zero lower-triangular +terms. The matrix type passed in as `H` depends on the automatic differentiation +system, so make sure the first argument to the Hessian function supports an +`AbstractMatrix` (it may be something other than `Matrix{Float64}`). Moreover, +you may assume only that `H` supports `size(H)` and `setindex!`. Finally, the +matrix is treated as dense, so the performance will be poor on functions with +high-dimensional input. + +### User-defined operators with vector inputs + +User-defined operators which take vectors as input arguments (for example, +`f(x::Vector)`) are *not* supported. Instead, use Julia's splatting syntax to +create a function with scalar arguments. For example, instead of: +```julia +f(x::Vector) = sum(x[i]^i for i in 1:length(x)) +``` +define: +```julia +f(x...) = sum(x[i]^i for i in 1:length(x)) +``` + +Another approach is to define the splatted function as an anonymous function: +```@repl +using JuMP +model = Model(); +@variable(model, x[1:5]) +f(x::Vector) = sum(x[i]^i for i in 1:length(x)) +@operator(model, op_f, 5, (x...) -> f(collect(x))) +@objective(model, Min, op_f(x...)) +``` + +### Automatic differentiation + +JuMP does not support black-box optimization, so all user-defined operators must +provide derivatives in some form. Fortunately, JuMP supports automatic +differentiation of user-defined operators. + +!!! info + Automatic differentiation is *not* finite differencing. JuMP's automatically + computed derivatives are not subject to approximation error. + +JuMP uses [ForwardDiff.jl](https://github.com/JuliaDiff/ForwardDiff.jl) to +perform automatic differentiation of user-defined operators; see the ForwardDiff.jl +[documentation](https://www.juliadiff.org/ForwardDiff.jl/v0.10.2/user/limitations.html) +for a description of how to write a function suitable for automatic +differentiation. + +#### Common mistakes when writing a user-defined operator + +!!! warning + Get an error like `No method matching Float64(::ForwardDiff.Dual)`? Read + this section, and see the guidelines at [ForwardDiff.jl](https://www.juliadiff.org/ForwardDiff.jl/release-0.10/user/limitations.html). + +The most common error is that your user-defined operator is not generic with +respect to the number type, that is, don't assume that the input to the function +is `Float64`. +```julia +f(x::Float64) = 2 * x # This will not work. +f(x::Real) = 2 * x # This is good. +f(x) = 2 * x # This is also good. +``` + +Another reason you may encounter this error is if you create arrays inside +your function which are `Float64`. +```julia +function bad_f(x...) + y = zeros(length(x)) # This constructs an array of `Float64`! + for i in 1:length(x) + y[i] = x[i]^i + end + return sum(y) +end + +function good_f(x::T...) where {T<:Real} + y = zeros(T, length(x)) # Construct an array of type `T` instead! + for i in 1:length(x) + y[i] = x[i]^i + end + return sum(y) +end +``` diff --git a/docs/src/manual/objective.md b/docs/src/manual/objective.md index 140f7157ae1..8c4e5d3e2be 100644 --- a/docs/src/manual/objective.md +++ b/docs/src/manual/objective.md @@ -64,6 +64,19 @@ julia> @objective(model, Max, x * y + x + y) x*y + x + y ``` +## Set a nonlinear objective + +Use the [`@objective`](@ref) macro to set a nonlinear objective function: + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x <= 1); + +julia> @objective(model, Max, log(x)) +log(x) +``` + ## Query the objective function Use [`objective_function`](@ref) to return the current objective function. diff --git a/docs/src/should_i_use.md b/docs/src/should_i_use.md index 67081ac77e1..19d0f6397d0 100644 --- a/docs/src/should_i_use.md +++ b/docs/src/should_i_use.md @@ -77,9 +77,9 @@ consider using other packages such as: ### Black-box, derivative free, or unconstrained optimization JuMP does support nonlinear programs with constraints and objectives containing -user-defined functions. However, the functions must be automatically +user-defined operators. However, the functions must be automatically differentiable, or need to provide explicit derivatives. (See -[User-defined Functions](@ref) for more information.) +[User-defined operators](@ref jump_user_defined_operators) for more information.) If your function is a black-box that is non-differentiable (for example, it is the output of a simulation written in C++), JuMP is not the right tool for the diff --git a/docs/src/tutorials/applications/power_systems.jl b/docs/src/tutorials/applications/power_systems.jl index 983075c8c0b..b5e9a5d289a 100644 --- a/docs/src/tutorials/applications/power_systems.jl +++ b/docs/src/tutorials/applications/power_systems.jl @@ -513,17 +513,17 @@ function solve_nonlinear_economic_dispatch( if silent set_silent(model) end - register(model, :tcf, 1, thermal_cost_function; autodiff = true) + @operator(model, op_tcf, 1, thermal_cost_function) N = length(generators) @variable(model, generators[i].min <= g[i = 1:N] <= generators[i].max) @variable(model, 0 <= w <= scenario.wind) - @NLobjective( + @objective( model, Min, - sum(generators[i].variable_cost * tcf(g[i]) for i in 1:N) + + sum(generators[i].variable_cost * op_tcf(g[i]) for i in 1:N) + wind.variable_cost * w, ) - @NLconstraint(model, sum(g[i] for i in 1:N) + sqrt(w) == scenario.demand) + @constraint(model, sum(g[i] for i in 1:N) + sqrt(w) == scenario.demand) optimize!(model) return ( g = value.(g), diff --git a/docs/src/tutorials/nonlinear/complementarity.jl b/docs/src/tutorials/nonlinear/complementarity.jl new file mode 100644 index 00000000000..a301aab933f --- /dev/null +++ b/docs/src/tutorials/nonlinear/complementarity.jl @@ -0,0 +1,208 @@ +# Copyright 2017, Iain Dunning, Joey Huchette, Miles Lubin, and contributors #src +# This Source Code Form is subject to the terms of the Mozilla Public License #src +# v.2.0. If a copy of the MPL was not distributed with this file, You can #src +# obtain one at https://mozilla.org/MPL/2.0/. #src + +# # Mixed complementarity problems + +# This tutorial is a collection of mixed complementarity programs. + +# This tutorial uses the following packages: + +using JuMP +import PATHSolver +import Test #src + +# ## Background + +# A mixed complementarity problem has the form: +# ```math +# \begin{align} +# F_i(x) \perp x_i & i = 1 \ldots n \\ +# l_i \le x_i \le u_i & i = 1 \ldots n. +# \end{align} +# ``` +# where the ``\perp`` constraint enforces the following relations: +# +# - If ``l_i < x_i < u_i``, then ``F_i(x) = 0`` +# - If ``l_i = x_i``, then ``F_i(x) \ge 0`` +# - If ``x_i = u_i``, then ``F_i(x) \le 0`` + +# You may have seen a complementarity problem written as +# ``0 \le F(x) \perp x \ge 0``. This is a special case of a mixed +# complementarity problem in which ``l_i = 0`` and ``u_i = \infty``. + +# Importantly, a mixed complementarity problem does not have an objective, and +# no other constraint types are present. + +# ## Linear complementarity + +# Form a mixed complementarity problem using the perp symbol `⟂` (type +# `\perp` in the REPL). + +M = [0 0 -1 -1; 0 0 1 -2; 1 -1 2 -2; 1 2 -2 4] +q = [2, 2, -2, -6] +model = Model(PATHSolver.Optimizer) +set_silent(model) +@variable(model, 0 <= x[1:4] <= 10, start = 0) +@constraint(model, M * x + q ⟂ x) +optimize!(model) +Test.@test value.(x) ≈ [2.8, 0.0, 0.8, 1.2] #src +value.(x) + +# ## Other ways of writing linear complementarity problems + +# You do not need to use a single vector of variables, and the complementarity +# constraints can be given in any order. In addition, you can use the perp +# symbol, the `complements(F, x)` syntax, or the [`MOI.Complements`](@ref) set. + +model = Model(PATHSolver.Optimizer) +set_silent(model) +@variable(model, 0 <= w <= 10, start = 0) +@variable(model, 0 <= x <= 10, start = 0) +@variable(model, 0 <= y <= 10, start = 0) +@variable(model, 0 <= z <= 10, start = 0) +@constraint(model, complements(y - 2z + 2, x)) +@constraint(model, [-y - z + 2, w] in MOI.Complements(2)) +@constraint(model, w + 2x - 2y + 4z - 6 ⟂ z) +@constraint(model, w - x + 2y - 2z - 2 ⟂ y) +optimize!(model) +Test.@test value.([w, x, y, z]) ≈ [2.8, 0.0, 0.8, 1.2] #src +value.([w, x, y, z]) + +# ## Transportation + +# This example is a reformulation of the transportation problem from Chapter +# 3.3 of Dantzig, G.B. (1963). _Linear Programming and Extensions_. Princeton +# University Press, Princeton, New Jersey. It is based on the GAMS model +# [`gamslib_transmcp`](https://www.gams.com/latest/gamslib_ml/libhtml/gamslib_transmcp.html). + +capacity = Dict("seattle" => 350, "san-diego" => 600) +demand = Dict("new-york" => 325, "chicago" => 300, "topeka" => 275) +cost = Dict( + ("seattle" => "new-york") => 90 * 2.5 / 1_000, + ("seattle" => "chicago") => 90 * 1.7 / 1_000, + ("seattle" => "topeka") => 90 * 1.8 / 1_000, + ("san-diego" => "new-york") => 90 * 2.5 / 1_000, + ("san-diego" => "chicago") => 90 * 1.8 / 1_000, + ("san-diego" => "topeka") => 90 * 1.4 / 1_000, +) +plants, markets = keys(capacity), keys(demand) +model = Model(PATHSolver.Optimizer) +set_silent(model) +@variable(model, w[i in plants] >= 0) +@variable(model, p[j in markets] >= 0) +@variable(model, x[i in plants, j in markets] >= 0) +@constraints( + model, + begin + [i in plants, j in markets], w[i] + cost[i=>j] - p[j] ⟂ x[i, j] + [i in plants], capacity[i] - sum(x[i, :]) ⟂ w[i] + [j in markets], sum(x[:, j]) - demand[j] ⟂ p[j] + end +) +optimize!(model) +Test.@test isapprox(value(p["new-york"]), 0.225; atol = 1e-3) #src +value.(p) + +# ## Expected utility of insurance + +# This example is taken from a lecture of the course AAE706, given by Thomas F. +# Rutherford at the University of Wisconsin, Madison. It models the expected +# coverage of insurance `K` that a rational actor would obtain to insure a risk +# that occurs with probability `pi` and results in a loss of `L`. + +pi = 0.01 # Probability of a bad outcome +L = 0.5 # Loss with a bad outcome +γ = 0.02 # Premium for coverage +σ = 0.5 # Elasticity +ρ = -1 # Risk exponent +U(C) = C^ρ / ρ +MU(C) = C^(ρ - 1) +model = Model(PATHSolver.Optimizer) +set_silent(model) +@variable(model, EU, start = 1) # Expected utilitiy +@variable(model, EV, start = 1) # Equivalent variation in income +@variable(model, C_G, start = 1) # Consumption on a good day +@variable(model, C_B, start = 1) # Consumption on a bad day +@variable(model, K, start = 1) # Coverage +@constraints( + model, + begin + (1 - pi) * U(C_G) + pi * U(C_B) - EU ⟂ EU + 100 * (((1 - pi) * C_G^ρ + pi * C_B^ρ)^(1 / ρ) - 1) - EV ⟂ EV + 1 - γ * K - C_G ⟂ C_G + 1 - L + (1 - γ) * K - C_B ⟂ C_B + γ * ((1 - pi) * MU(C_G) + pi * MU(C_B)) - pi * MU(C_B) ⟂ K + end +) +optimize!(model) +Test.@test isapprox(value(C_G), 0.996; atol = 1e-3) #src +value(K) + +# ## Electricity consumption + +# This example is a mixed complementarity formulation of example 3.3.1 from +# D’Aertrycke, G., Ehrenmann, A., Ralph, D., & Smeers, Y. (2017). [Risk trading +# in capacity equilibrium models](https://doi.org/10.17863/CAM.17552). + +# This example models a risk neutral competitive equilibrium between a producer +# and a consumer of electricity. + +# In our example, we assume a producer is looking to invest in a new power +# plant with capacity ``x`` [MW]. This plant has an annualized capital cost of +# ``I`` [€/MW] and an operating cost of ``C`` [€/MWh]. There are 8760 hours in a +# year. +# +# After making the capital investment, there are five possible consumption +# scenarios, ``\omega``, which occur with probability ``\theta_\omega``. In each +# scenario , the producer makes ``Y_ω`` MW of electricity. +# +# There is one consumer in the model, who has a quadratic utility function, +# ``U(Q_ω) = A_ω Q_ω + \frac{B_ω Q_ω^2}{2}``. +# +# We now build and solve the mixed complementarity problem with a few brief +# comments. The economic justification for the model would require a larger +# tutorial than the space available here. Consult the [original text](https://doi.org/10.17863/CAM.17552) +# for details. + +I = 90_000 # Annualized capital cost +C = 60 # Operation cost per MWh +τ = 8_760 # Hours per year +θ = [0.2, 0.2, 0.2, 0.2, 0.2] # Scenario probabilities +A = [300, 350, 400, 450, 500] # Utility function coefficients +B = 1 # Utility function coefficients +model = Model(PATHSolver.Optimizer) +set_silent(model) +@variable(model, x >= 0, start = 1) # Installed capacity +@variable(model, Q[ω = 1:5] >= 0, start = 1) # Consumption +@variable(model, Y[ω = 1:5] >= 0, start = 1) # Production +@variable(model, P[ω = 1:5], start = 1) # Electricity price +@variable(model, μ[ω = 1:5] >= 0, start = 1) # Capital scarcity margin +## Unit investment cost equals annualized scarcity profit or investment is 0 +@constraint(model, I - τ * θ' * μ ⟂ x) +## Difference between price and scarcity margin is equal to operation cost +@constraint(model, [ω = 1:5], C - (P[ω] - μ[ω]) ⟂ Y[ω]) +## Price is equal to consumer's marginal utility +@constraint(model, [ω = 1:5], P[ω] - (A[ω] - B * Q[ω]) ⟂ Q[ω]) +## Production is equal to consumption +@constraint(model, [ω = 1:5], Y[ω] - Q[ω] ⟂ P[ω]) +## Production does not exceed capacity +@constraint(model, [ω = 1:5], x - Y[ω] ⟂ μ[ω]) +optimize!(model) +solution_summary(model) + +# An equilibrium solution is to build 389 MW: + +Test.@test isapprox(value(x), 389; atol = 1) #src +value(x) + +# The production in each scenario is: + +Test.@test isapprox(value.(Q), [240, 290, 340, 389, 389]; atol = 1) #src +value.(Q) + +# The price in each scenario is: + +Test.@test isapprox(value.(P), [60, 60, 60, 61, 111]; atol = 1) #src +value.(P) diff --git a/docs/src/tutorials/nonlinear/nested_problems.jl b/docs/src/tutorials/nonlinear/nested_problems.jl index bf70493196a..54865d05c13 100644 --- a/docs/src/tutorials/nonlinear/nested_problems.jl +++ b/docs/src/tutorials/nonlinear/nested_problems.jl @@ -24,11 +24,12 @@ # where an *upper* problem uses the results from the optimization of a *lower* # subproblem. # -# To model the problem, we define a user-defined function to handle the decomposition -# of the lower problem inside the upper one. Finally, we show how to improve -# the performance by using a cache that avoids resolving the lower problem. +# To model the problem, we define a user-defined operator to handle the +# decomposition of the lower problem inside the upper one. Finally, we show how +# to improve the performance by using a cache that avoids resolving the lower +# problem. # -# For a simpler example of writing a user-defined function, +# For a simpler example of writing a user-defined operator, # see the [User-defined Hessians](@ref) tutorial. # This tutorial uses the following packages: @@ -74,7 +75,7 @@ function solve_lower_level(x...) model = Model(Ipopt.Optimizer) set_silent(model) @variable(model, y[1:2]) - @NLobjective( + @objective( model, Max, x[1]^2 * y[1] + x[2]^2 * y[2] - x[1] * y[1]^4 - 2 * x[2] * y[2]^4, @@ -104,7 +105,7 @@ end # \end{array} # ``` -# This looks like a nonlinear optimization problem with a user-defined function +# This looks like a nonlinear optimization problem with a user-defined operator # ``V``! However, because ``V`` solves an optimization problem internally, we # can't use automatic differentiation to compute the first and second # derivatives. Instead, we can use JuMP's ability to pass callback functions @@ -141,8 +142,8 @@ end model = Model(Ipopt.Optimizer) @variable(model, x[1:2] >= 0) -register(model, :V, 2, V, ∇V, ∇²V) -@NLobjective(model, Min, x[1]^2 + x[2]^2 + V(x[1], x[2])) +@operator(model, op_V, 2, V, ∇V, ∇²V) +@objective(model, Min, x[1]^2 + x[2]^2 + op_V(x[1], x[2])) optimize!(model) solution_summary(model) @@ -213,15 +214,15 @@ end model = Model(Ipopt.Optimizer) @variable(model, x[1:2] >= 0) cache = Cache(Float64[], NaN, Float64[]) -register( +@operator( model, - :V, + op_cached_f, 2, (x...) -> cached_f(cache, x...), (g, x...) -> cached_∇f(cache, g, x...), (H, x...) -> cached_∇²f(cache, H, x...), ) -@NLobjective(model, Min, x[1]^2 + x[2]^2 + V(x[1], x[2])) +@objective(model, Min, x[1]^2 + x[2]^2 + op_cached_f(x[1], x[2])) optimize!(model) solution_summary(model) diff --git a/docs/src/tutorials/nonlinear/querying_hessians.jl b/docs/src/tutorials/nonlinear/querying_hessians.jl index 807430d8195..d4a44ee72cc 100644 --- a/docs/src/tutorials/nonlinear/querying_hessians.jl +++ b/docs/src/tutorials/nonlinear/querying_hessians.jl @@ -68,8 +68,8 @@ model = Model(Ipopt.Optimizer) set_silent(model) @variable(model, x[i = 1:2], start = -i) @constraint(model, g_1, x[1]^2 <= 1) -@NLconstraint(model, g_2, (x[1] + x[2])^2 <= 2) -@NLobjective(model, Min, (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2) +@constraint(model, g_2, (x[1] + x[2])^2 <= 2) +@objective(model, Min, (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2) optimize!(model) # ## The analytic solution @@ -102,29 +102,54 @@ analytic_hessian([1, 1], 0, [0, 1]) analytic_hessian([1, 1], 1, [0, 0]) -#- +# ## Create a nonlinear model + +# JuMP delegates automatic differentiation to the `MOI.Nonlinear` submodule. +# Therefore, to compute the Hessian of the Lagrangian, we need to create a +# [`MOI.Nonlinear.Model`](@ref) object: -# ## Initializing the NLPEvaluator +rows = Any[] +nlp = MOI.Nonlinear.Model() +for (F, S) in list_of_constraint_types(model) + if F <: VariableRef + continue # Skip variable bounds + end + for ci in all_constraints(model, F, S) + push!(rows, ci) + object = constraint_object(ci) + MOI.Nonlinear.add_constraint(nlp, object.func, object.set) + end +end +MOI.Nonlinear.set_objective(nlp, objective_function(model)) +nlp -# JuMP stores all information relating to the nonlinear portions of the model in -# a [`NLPEvaluator`](@ref) struct: +# It is important that we save the constraint indices in a vector `rows`, so +# that we know the order of the constraints in the nonlinear model. -d = NLPEvaluator(model) +# Next, we need to convert our model into an [`MOI.Nonlinear.Evaluator`](@ref), +# specifying an automatic differentiation backend. In this case, we use +# [`MOI.Nonlinear.SparseReverseMode`](@ref): -# Before computing anything with the NLPEvaluator, we need to initialize it. +evaluator = MOI.Nonlinear.Evaluator( + nlp, + MOI.Nonlinear.SparseReverseMode(), + index.(all_variables(model)), +) + +# Before computing anything with the evaluator, we need to initialize it. # Use [`MOI.features_available`](@ref) to see what we can query: -MOI.features_available(d) +MOI.features_available(evaluator) -# Consult the MOI documentation for specifics. But to obtain the Hessian matrix, +# Consult the MOI documentation for specifics, but to obtain the Hessian matrix, # we need to initialize `:Hess`: -MOI.initialize(d, [:Hess]) +MOI.initialize(evaluator, [:Hess]) # MOI represents the Hessian as a sparse matrix. Get the sparsity pattern as # follows: -hessian_sparsity = MOI.hessian_lagrangian_structure(d) +hessian_sparsity = MOI.hessian_lagrangian_structure(evaluator) # The sparsity pattern has a few properties of interest: # * Each element `(i, j)` indicates a structural non-zero in row `i` and column @@ -146,8 +171,7 @@ H = SparseArrays.sparse(I, J, V, n, n) # Of course, knowing where the zeros are isn't very interesting. We really want # to compute the value of the Hessian matrix at a point. -num_g = num_nonlinear_constraints(model) -MOI.eval_hessian_lagrangian(d, V, ones(n), 1.0, ones(num_g)) +MOI.eval_hessian_lagrangian(evaluator, V, ones(n), 1.0, ones(length(rows))) H = SparseArrays.sparse(I, J, V, n, n) # In practice, we often want to compute the value of the hessian at the optimal @@ -164,13 +188,14 @@ x = all_variables(model) x_optimal = value.(x) # Next, we need the optimal dual solution associated with the nonlinear -# constraints: +# constraints (this is where it is important to record the order of the +# constraints as we added them to `nlp`): -y_optimal = dual.(all_nonlinear_constraints(model)) +y_optimal = dual.(rows) # Now we can compute the Hessian at the optimal primal-dual point: -MOI.eval_hessian_lagrangian(d, V, x_optimal, 1.0, y_optimal) +MOI.eval_hessian_lagrangian(evaluator, V, x_optimal, 1.0, y_optimal) H = SparseArrays.sparse(I, J, V, n, n) # However, this Hessian isn't quite right because it isn't symmetric. We can fix @@ -192,95 +217,29 @@ end fill_off_diagonal(H) -# Moreover, this Hessian only accounts for the objective and constraints entered -# using [`@NLobjective`](@ref) and [`@NLconstraint`](@ref). If we want to take -# quadratic objectives and constraints written using [`@objective`](@ref) or -# [`@constraint`](@ref) into account, we'll need to handle them separately. - -# !!! tip -# If you don't want to do this, you can replace calls to [`@objective`](@ref) -# and [`@constraint`](@ref) with [`@NLobjective`](@ref) and -# [`@NLconstraint`](@ref). - -# ## Hessians from QuadExpr functions - -# To compute the hessian from a quadratic expression, let's see how JuMP -# represents a quadratic constraint: - -f = constraint_object(g_1).func - -# `f` is a quadratic expression of the form: -# ``` -# f(x) = Σqᵢⱼ * xᵢ * xⱼ + Σaᵢ xᵢ + c -# ``` -# So `∇²f(x)` is the matrix formed by `[qᵢⱼ]ᵢⱼ` if `i != j` and `2[qᵢⱼ]ᵢⱼ` if `i = j`. - -variables_to_column = Dict(x[i] => i for i in 1:n) - -function add_to_hessian(H, f::QuadExpr, μ) - for (vars, coef) in f.terms - i = variables_to_column[vars.a] - j = variables_to_column[vars.b] - H[i, j] += μ * coef - end - return -end - -# If the function `f` is not a `QuadExpr`, do nothing because it is an `AffExpr` -# or a `VariableRef`. In both cases, the second derivative is zero. - -add_to_hessian(H, f::Any, μ) = nothing - -# Then we iterate over all constraints in the model and add their Hessian -# components: - -for (F, S) in list_of_constraint_types(model) - for cref in all_constraints(model, F, S) - f = constraint_object(cref).func - add_to_hessian(H, f, dual(cref)) - end -end - -H - -# Finally, we need to take into account the objective function: - -add_to_hessian(H, objective_function(model), 1.0) - -fill_off_diagonal(H) - # Putting everything together: -function compute_optimal_hessian(model) - d = NLPEvaluator(model) - MOI.initialize(d, [:Hess]) - hessian_sparsity = MOI.hessian_lagrangian_structure(d) - I = [i for (i, _) in hessian_sparsity] - J = [j for (_, j) in hessian_sparsity] - V = zeros(length(hessian_sparsity)) - x = all_variables(model) - x_optimal = value.(x) - y_optimal = dual.(all_nonlinear_constraints(model)) - MOI.eval_hessian_lagrangian(d, V, x_optimal, 1.0, y_optimal) - n = num_variables(model) - H = SparseArrays.sparse(I, J, V, n, n) - vmap = Dict(x[i] => i for i in 1:n) - add_to_hessian(H, f::Any, μ) = nothing - function add_to_hessian(H, f::QuadExpr, μ) - for (vars, coef) in f.terms - if vars.a != vars.b - H[vmap[vars.a], vmap[vars.b]] += μ * coef - else - H[vmap[vars.a], vmap[vars.b]] += 2 * μ * coef - end - end - end +function compute_optimal_hessian(model::Model) + rows = Any[] + nlp = MOI.Nonlinear.Model() for (F, S) in list_of_constraint_types(model) - for cref in all_constraints(model, F, S) - add_to_hessian(H, constraint_object(cref).func, dual(cref)) + for ci in all_constraints(model, F, S) + push!(rows, ci) + object = constraint_object(ci) + MOI.Nonlinear.add_constraint(nlp, object.func, object.set) end end - add_to_hessian(H, objective_function(model), 1.0) + MOI.Nonlinear.set_objective(nlp, objective_function(model)) + x = all_variables(model) + backend = MOI.Nonlinear.SparseReverseMode() + evaluator = MOI.Nonlinear.Evaluator(nlp, backend, index.(x)) + MOI.initialize(evaluator, [:Hess]) + hessian_sparsity = MOI.hessian_lagrangian_structure(evaluator) + I = [i for (i, _) in hessian_sparsity] + J = [j for (_, j) in hessian_sparsity] + V = zeros(length(hessian_sparsity)) + MOI.eval_hessian_lagrangian(evaluator, V, value.(x), 1.0, dual.(rows)) + H = SparseArrays.sparse(I, J, V, length(x), length(x)) return Matrix(fill_off_diagonal(H)) end diff --git a/docs/src/tutorials/nonlinear/rocket_control.jl b/docs/src/tutorials/nonlinear/rocket_control.jl index 4e3d35e8000..2cf3317aaf4 100644 --- a/docs/src/tutorials/nonlinear/rocket_control.jl +++ b/docs/src/tutorials/nonlinear/rocket_control.jl @@ -120,7 +120,7 @@ fix(m[n], m_f; force = true) # ## Forces -@NLexpressions( +@expressions( rocket, begin ## Drag(h,v) = Dc v^2 exp( -hc * (h - h0) / h0 ) @@ -137,17 +137,17 @@ fix(m[n], m_f; force = true) for j in 2:n ## h' = v ## Rectangular integration - ## @NLconstraint(rocket, h[j] == h[j - 1] + Δt * v[j - 1]) + ## @constraint(rocket, h[j] == h[j - 1] + Δt * v[j - 1]) ## Trapezoidal integration - @NLconstraint(rocket, h[j] == h[j-1] + 0.5 * Δt * (v[j] + v[j-1])) + @constraint(rocket, h[j] == h[j-1] + 0.5 * Δt * (v[j] + v[j-1])) ## v' = (T-D(h,v))/m - g(h) ## Rectangular integration - ## @NLconstraint( + ## @constraint( ## rocket, ## v[j] == v[j - 1] + Δt *((T[j - 1] - drag[j - 1]) / m[j - 1] - grav[j - 1]) ## ) ## Trapezoidal integration - @NLconstraint( + @constraint( rocket, v[j] == v[j-1] + @@ -160,9 +160,9 @@ for j in 2:n ) ## m' = -T/c ## Rectangular integration - ## @NLconstraint(rocket, m[j] == m[j - 1] - Δt * T[j - 1] / c) + ## @constraint(rocket, m[j] == m[j - 1] - Δt * T[j - 1] / c) ## Trapezoidal integration - @NLconstraint(rocket, m[j] == m[j-1] - 0.5 * Δt * (T[j] + T[j-1]) / c) + @constraint(rocket, m[j] == m[j-1] - 0.5 * Δt * (T[j] + T[j-1]) / c) end # Solve for the control and state diff --git a/docs/src/tutorials/nonlinear/simple_examples.jl b/docs/src/tutorials/nonlinear/simple_examples.jl index b358b30a72e..3a8ae666cba 100644 --- a/docs/src/tutorials/nonlinear/simple_examples.jl +++ b/docs/src/tutorials/nonlinear/simple_examples.jl @@ -23,7 +23,7 @@ function example_rosenbrock() set_silent(model) @variable(model, x) @variable(model, y) - @NLobjective(model, Min, (1 - x)^2 + 100 * (y - x^2)^2) + @objective(model, Min, (1 - x)^2 + 100 * (y - x^2)^2) optimize!(model) Test.@test termination_status(model) == LOCALLY_SOLVED Test.@test primal_status(model) == FEASIBLE_POINT @@ -63,7 +63,7 @@ function example_clnlbeam() -0.05 <= x[1:(N+1)] <= 0.05 u[1:(N+1)] end) - @NLobjective( + @objective( model, Min, sum( @@ -71,7 +71,7 @@ function example_clnlbeam() 0.5 * alpha * h * (cos(t[i+1]) + cos(t[i])) for i in 1:N ), ) - @NLconstraint( + @constraint( model, [i = 1:N], x[i+1] - x[i] - 0.5 * h * (sin(t[i+1]) + sin(t[i])) == 0, @@ -109,7 +109,7 @@ function example_mle() set_silent(model) @variable(model, μ, start = 0.0) @variable(model, σ >= 0.0, start = 1.0) - @NLobjective( + @objective( model, Max, n / 2 * log(1 / (2 * π * σ^2)) - @@ -124,7 +124,7 @@ function example_mle() Test.@test value(μ) ≈ Statistics.mean(data) atol = 1e-3 Test.@test value(σ)^2 ≈ Statistics.var(data) atol = 1e-2 ## You can even do constrained MLE! - @NLconstraint(model, μ == σ^2) + @constraint(model, μ == σ^2) optimize!(model) Test.@test value(μ) ≈ value(σ)^2 println() diff --git a/docs/src/tutorials/nonlinear/space_shuttle_reentry_trajectory.jl b/docs/src/tutorials/nonlinear/space_shuttle_reentry_trajectory.jl index 64f0413e597..638c7e1d8f0 100644 --- a/docs/src/tutorials/nonlinear/space_shuttle_reentry_trajectory.jl +++ b/docs/src/tutorials/nonlinear/space_shuttle_reentry_trajectory.jl @@ -237,38 +237,34 @@ initial_guess = mapreduce(transpose, vcat, interp_linear.(1:n)) set_start_value.(all_variables(model), vec(initial_guess)) ## Functions to restore `h` and `v` to their true scale -@NLexpression(model, h[j = 1:n], scaled_h[j] * 1e5) -@NLexpression(model, v[j = 1:n], scaled_v[j] * 1e4) +@expression(model, h[j = 1:n], scaled_h[j] * 1e5) +@expression(model, v[j = 1:n], scaled_v[j] * 1e4) ## Helper functions -@NLexpression(model, c_L[j = 1:n], a₀ + a₁ * rad2deg(α[j])) -@NLexpression( - model, - c_D[j = 1:n], - b₀ + b₁ * rad2deg(α[j]) + b₂ * rad2deg(α[j])^2 -) -@NLexpression(model, ρ[j = 1:n], ρ₀ * exp(-h[j] / hᵣ)) -@NLexpression(model, D[j = 1:n], 0.5 * c_D[j] * S * ρ[j] * v[j]^2) -@NLexpression(model, L[j = 1:n], 0.5 * c_L[j] * S * ρ[j] * v[j]^2) -@NLexpression(model, r[j = 1:n], Rₑ + h[j]) -@NLexpression(model, g[j = 1:n], μ / r[j]^2) +@expression(model, c_L[j = 1:n], a₀ + a₁ * rad2deg(α[j])) +@expression(model, c_D[j = 1:n], b₀ + b₁ * rad2deg(α[j]) + b₂ * rad2deg(α[j])^2) +@expression(model, ρ[j = 1:n], ρ₀ * exp(-h[j] / hᵣ)) +@expression(model, D[j = 1:n], 0.5 * c_D[j] * S * ρ[j] * v[j]^2) +@expression(model, L[j = 1:n], 0.5 * c_L[j] * S * ρ[j] * v[j]^2) +@expression(model, r[j = 1:n], Rₑ + h[j]) +@expression(model, g[j = 1:n], μ / r[j]^2) ## Motion of the vehicle as a differential-algebraic system of equations (DAEs) -@NLexpression(model, δh[j = 1:n], v[j] * sin(γ[j])) -@NLexpression( +@expression(model, δh[j = 1:n], v[j] * sin(γ[j])) +@expression( model, δϕ[j = 1:n], (v[j] / r[j]) * cos(γ[j]) * sin(ψ[j]) / cos(θ[j]) ) -@NLexpression(model, δθ[j = 1:n], (v[j] / r[j]) * cos(γ[j]) * cos(ψ[j])) -@NLexpression(model, δv[j = 1:n], -(D[j] / m) - g[j] * sin(γ[j])) -@NLexpression( +@expression(model, δθ[j = 1:n], (v[j] / r[j]) * cos(γ[j]) * cos(ψ[j])) +@expression(model, δv[j = 1:n], -(D[j] / m) - g[j] * sin(γ[j])) +@expression( model, δγ[j = 1:n], (L[j] / (m * v[j])) * cos(β[j]) + cos(γ[j]) * ((v[j] / r[j]) - (g[j] / v[j])) ) -@NLexpression( +@expression( model, δψ[j = 1:n], (1 / (m * v[j] * cos(γ[j]))) * L[j] * sin(β[j]) + @@ -281,20 +277,20 @@ for j in 2:n if integration_rule == "rectangular" ## Rectangular integration - @NLconstraint(model, h[j] == h[i] + Δt[i] * δh[i]) - @NLconstraint(model, ϕ[j] == ϕ[i] + Δt[i] * δϕ[i]) - @NLconstraint(model, θ[j] == θ[i] + Δt[i] * δθ[i]) - @NLconstraint(model, v[j] == v[i] + Δt[i] * δv[i]) - @NLconstraint(model, γ[j] == γ[i] + Δt[i] * δγ[i]) - @NLconstraint(model, ψ[j] == ψ[i] + Δt[i] * δψ[i]) + @constraint(model, h[j] == h[i] + Δt[i] * δh[i]) + @constraint(model, ϕ[j] == ϕ[i] + Δt[i] * δϕ[i]) + @constraint(model, θ[j] == θ[i] + Δt[i] * δθ[i]) + @constraint(model, v[j] == v[i] + Δt[i] * δv[i]) + @constraint(model, γ[j] == γ[i] + Δt[i] * δγ[i]) + @constraint(model, ψ[j] == ψ[i] + Δt[i] * δψ[i]) elseif integration_rule == "trapezoidal" ## Trapezoidal integration - @NLconstraint(model, h[j] == h[i] + 0.5 * Δt[i] * (δh[j] + δh[i])) - @NLconstraint(model, ϕ[j] == ϕ[i] + 0.5 * Δt[i] * (δϕ[j] + δϕ[i])) - @NLconstraint(model, θ[j] == θ[i] + 0.5 * Δt[i] * (δθ[j] + δθ[i])) - @NLconstraint(model, v[j] == v[i] + 0.5 * Δt[i] * (δv[j] + δv[i])) - @NLconstraint(model, γ[j] == γ[i] + 0.5 * Δt[i] * (δγ[j] + δγ[i])) - @NLconstraint(model, ψ[j] == ψ[i] + 0.5 * Δt[i] * (δψ[j] + δψ[i])) + @constraint(model, h[j] == h[i] + 0.5 * Δt[i] * (δh[j] + δh[i])) + @constraint(model, ϕ[j] == ϕ[i] + 0.5 * Δt[i] * (δϕ[j] + δϕ[i])) + @constraint(model, θ[j] == θ[i] + 0.5 * Δt[i] * (δθ[j] + δθ[i])) + @constraint(model, v[j] == v[i] + 0.5 * Δt[i] * (δv[j] + δv[i])) + @constraint(model, γ[j] == γ[i] + 0.5 * Δt[i] * (δγ[j] + δγ[i])) + @constraint(model, ψ[j] == ψ[i] + 0.5 * Δt[i] * (δψ[j] + δψ[i])) else @error "Unexpected integration rule '$(integration_rule)'" end diff --git a/docs/src/tutorials/nonlinear/tips_and_tricks.jl b/docs/src/tutorials/nonlinear/tips_and_tricks.jl index f932dcdad8b..26f96d0d97f 100644 --- a/docs/src/tutorials/nonlinear/tips_and_tricks.jl +++ b/docs/src/tutorials/nonlinear/tips_and_tricks.jl @@ -12,9 +12,9 @@ using JuMP import Ipopt import Test -# ## User-defined functions with vector outputs +# ## User-defined operators with vector outputs -# A common situation is to have a user-defined function like the following that +# A common situation is to have a user-defined operator like the following that # returns multiple outputs (we define `function_calls` to keep track of how # many times we call this method): @@ -31,7 +31,7 @@ end # term might be used in a constraint, and often they share work that is # expensive to evaluate. -# This is a problem for JuMP, because it requires user-defined functions to +# This is a problem for JuMP, because it requires user-defined operators to # return a single number. One option is to define two separate functions, the # first returning the first argument, and the second returning the second # argument. @@ -46,10 +46,10 @@ foo_2(x, y) = foo(x, y)[2] model = Model(Ipopt.Optimizer) set_silent(model) @variable(model, x[1:2] >= 0, start = 0.1) -register(model, :foo_1, 2, foo_1; autodiff = true) -register(model, :foo_2, 2, foo_2; autodiff = true) -@NLobjective(model, Max, foo_1(x[1], x[2])) -@NLconstraint(model, foo_2(x[1], x[2]) <= 2) +@operator(model, op_foo_1, 2, foo_1) +@operator(model, op_foo_2, 2, foo_2) +@objective(model, Max, op_foo_1(x[1], x[2])) +@constraint(model, op_foo_2(x[1], x[2]) <= 2) function_calls = 0 optimize!(model) Test.@test objective_value(model) ≈ √3 atol = 1e-4 @@ -114,10 +114,10 @@ println("function_calls = ", function_calls) model = Model(Ipopt.Optimizer) set_silent(model) @variable(model, x[1:2] >= 0, start = 0.1) -register(model, :foo_1, 2, memoized_foo[1]; autodiff = true) -register(model, :foo_2, 2, memoized_foo[2]; autodiff = true) -@NLobjective(model, Max, foo_1(x[1], x[2])) -@NLconstraint(model, foo_2(x[1], x[2]) <= 2) +@operator(model, op_foo_1, 2, memoized_foo[1]) +@operator(model, op_foo_2, 2, memoized_foo[2]) +@objective(model, Max, op_foo_1(x[1], x[2])) +@constraint(model, op_foo_2(x[1], x[2]) <= 2) function_calls = 0 optimize!(model) Test.@test objective_value(model) ≈ √3 atol = 1e-4 diff --git a/docs/src/tutorials/nonlinear/user_defined_hessians.jl b/docs/src/tutorials/nonlinear/user_defined_hessians.jl index 1672aae56a3..b7f75e42d20 100644 --- a/docs/src/tutorials/nonlinear/user_defined_hessians.jl +++ b/docs/src/tutorials/nonlinear/user_defined_hessians.jl @@ -20,9 +20,9 @@ # # User-defined Hessians -# In this tutorial, we explain how to write a user-defined function (see -# [User-defined Functions](@ref)) with a Hessian matrix explicitly provided by -# the user. +# In this tutorial, we explain how to write a user-defined operator (see +# [User-defined operators](@ref jump_user_defined_operators)) with a Hessian +# matrix explicitly provided by the user. # # For a more advanced example, see [Nested optimization problems](@ref). @@ -65,11 +65,11 @@ end # you may assume only that `H` supports `size(H)` and `setindex!`. # Now that we have the function, its gradient, and its Hessian, we can construct -# a JuMP model, register the function, and use it in a `@NL` macro: +# a JuMP model, add the operator, and use it in a macro: model = Model(Ipopt.Optimizer) @variable(model, x[1:2]) -register(model, :rosenbrock, 2, rosenbrock, ∇rosenbrock, ∇²rosenbrock) -@NLobjective(model, Min, rosenbrock(x[1], x[2])) +@operator(model, op_rosenbrock, 2, rosenbrock, ∇rosenbrock, ∇²rosenbrock) +@objective(model, Min, op_rosenbrock(x[1], x[2])) optimize!(model) solution_summary(model; verbose = true) diff --git a/docs/styles/Vocab/JuMP-Vocab/accept.txt b/docs/styles/Vocab/JuMP-Vocab/accept.txt index 772fe8a1995..e9d34e56170 100644 --- a/docs/styles/Vocab/JuMP-Vocab/accept.txt +++ b/docs/styles/Vocab/JuMP-Vocab/accept.txt @@ -159,6 +159,7 @@ Coey Dantzig Das Dvorkin +Ehrenmann Ehrgott Erlangen Farkas @@ -210,6 +211,7 @@ Schur Schwarz Shabbir Shuvomoy +Smeers Soodeh Steuer Stigler diff --git a/src/JuMP.jl b/src/JuMP.jl index e9328ad52c1..5efab520cc4 100644 --- a/src/JuMP.jl +++ b/src/JuMP.jl @@ -19,6 +19,7 @@ module JuMP import Base.Meta: isexpr, quot import LinearAlgebra +import MacroTools import MathOptInterface as MOI import MutableArithmetics import OrderedCollections @@ -1079,6 +1080,7 @@ include("objective.jl") include("aff_expr.jl") include("quad_expr.jl") include("nlp.jl") +include("nlp_expr.jl") include("macros.jl") include("optimizer_interface.jl") diff --git a/src/aff_expr.jl b/src/aff_expr.jl index d80f909e8ab..75634bee377 100644 --- a/src/aff_expr.jl +++ b/src/aff_expr.jl @@ -124,6 +124,13 @@ end variable_ref_type(::Type{GenericAffExpr{C,V}}) where {C,V} = V +function owner_model(x::GenericAffExpr) + if !isempty(x.terms) + return owner_model(first(keys(x.terms))) + end + return nothing +end + """ GenericAffExpr(constant::V, kv::AbstractArray{Pair{K,V}}) where {K,V} diff --git a/src/complement.jl b/src/complement.jl index e3de1ce2175..74f47d37fdc 100644 --- a/src/complement.jl +++ b/src/complement.jl @@ -70,6 +70,6 @@ function parse_constraint_call( F, x, ) - f, parse_code = _MA.rewrite(F) + f, parse_code = _rewrite_expression(F) return parse_code, :(_build_complements_constraint($errorf, $f, $(esc(x)))) end diff --git a/src/macros.jl b/src/macros.jl index 9d6545afac9..da97d5e5cda 100644 --- a/src/macros.jl +++ b/src/macros.jl @@ -474,6 +474,245 @@ function parse_constraint_head( return is_vectorized, parse_code, build_call end +""" + op_ifelse(a, x, y) + +A function that falls back to `ifelse(a, x, y)`, but when called with a JuMP +variables or expression in the first argument, returns a +[`GenericNonlinearExpr`](@ref). + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> op_ifelse(true, 1.0, 2.0) +1.0 + +julia> op_ifelse(x, 1.0, 2.0) +ifelse(x, 1.0, 2.0) + +julia> op_ifelse(true, x, 2.0) +x +``` +""" +op_ifelse(a, x, y) = ifelse(a, x, y) + +# We can't make this a generic `NonlinearOperator` because we only want to +# intercept `ifelse` if the first argument is an `AbstractJuMPScalar` (if it's a +# `Bool`, we want to return the correct branch). +op_ifelse(a::AbstractJuMPScalar, x, y) = NonlinearExpr(:ifelse, Any[a, x, y]) + +""" + op_and(x, y) + +A function that falls back to `x & y`, but when called with JuMP variables or +expressions, returns a [`GenericNonlinearExpr`](@ref). + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> op_and(true, false) +false + +julia> op_and(true, x) +true && x +``` +""" +const op_and = NonlinearOperator(:&&, &) +# Note that the function is `&` instead of `&&` because `&&` is special lowering +# syntax and is not a regular Julia function, but the MOI operator is `:&&`. + +""" + op_or(x, y) + +A function that falls back to `x | y`, but when called with JuMP variables or +expressions, returns a [`GenericNonlinearExpr`](@ref). + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> op_or(true, false) +true + +julia> op_or(true, x) +true || x +``` +""" +const op_or = NonlinearOperator(:||, |) +# Note that the function is `|` instead of `||` because `||` is special lowering +# syntax and is not a regular Julia function, but the MOI operator is `:||`. + +""" + op_strictly_less_than(x, y) + +A function that falls back to `x < y`, but when called with JuMP variables or +expressions, returns a [`GenericNonlinearExpr`](@ref). + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> op_strictly_less_than(1, 2) +true + +julia> op_strictly_less_than(x, 2) +x < 2 +``` +""" +const op_strictly_less_than = NonlinearOperator(:<, <) + +""" + op_strictly_greater_than(x, y) + +A function that falls back to `x > y`, but when called with JuMP variables or +expressions, returns a [`GenericNonlinearExpr`](@ref). + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> op_strictly_greater_than(1, 2) +false + +julia> op_strictly_greater_than(x, 2) +x > 2 +``` +""" +const op_strictly_greater_than = NonlinearOperator(:>, >) + +""" + op_less_than_or_equal_to(x, y) + +A function that falls back to `x <= y`, but when called with JuMP variables or +expressions, returns a [`GenericNonlinearExpr`](@ref). + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> op_less_than_or_equal_to(2, 2) +true + +julia> op_less_than_or_equal_to(x, 2) +x <= 2 +``` +""" +const op_less_than_or_equal_to = NonlinearOperator(:<=, <=) + +""" + op_greater_than_or_equal_to(x, y) + +A function that falls back to `x >= y`, but when called with JuMP variables or +expressions, returns a [`GenericNonlinearExpr`](@ref). + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> op_greater_than_or_equal_to(2, 2) +true + +julia> op_greater_than_or_equal_to(x, 2) +x >= 2 +``` +""" +const op_greater_than_or_equal_to = NonlinearOperator(:>=, >=) + +""" + op_equal_to(x, y) + +A function that falls back to `x == y`, but when called with JuMP variables or +expressions, returns a [`GenericNonlinearExpr`](@ref). + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> op_equal_to(2, 2) +true + +julia> op_equal_to(x, 2) +x == 2 +``` +""" +const op_equal_to = NonlinearOperator(:(==), ==) + +function _rewrite_to_jump_logic(x) + if Meta.isexpr(x, :call) + op = if x.args[1] == :ifelse + return Expr(:call, op_ifelse, x.args[2:end]...) + elseif x.args[1] == :< + return Expr(:call, op_strictly_less_than, x.args[2:end]...) + elseif x.args[1] == :> + return Expr(:call, op_strictly_greater_than, x.args[2:end]...) + elseif x.args[1] == :<= + return Expr(:call, op_less_than_or_equal_to, x.args[2:end]...) + elseif x.args[1] == :>= + return Expr(:call, op_greater_than_or_equal_to, x.args[2:end]...) + elseif x.args[1] == :(==) + return Expr(:call, op_equal_to, x.args[2:end]...) + end + elseif Meta.isexpr(x, :||) + return Expr(:call, op_or, x.args...) + elseif Meta.isexpr(x, :&&) + return Expr(:call, op_and, x.args...) + elseif Meta.isexpr(x, :comparison) + lhs = Expr(:call, x.args[2], x.args[1], x.args[3]) + rhs = Expr(:call, x.args[4], x.args[3], x.args[5]) + return Expr( + :call, + op_and, + _rewrite_to_jump_logic(lhs), + _rewrite_to_jump_logic(rhs), + ) + end + return x +end + +""" + _rewrite_expression(expr) + +A helper function so that we can change how we rewrite expressions in a single +place and have it cascade to all locations in the JuMP macros that rewrite +expressions. +""" +function _rewrite_expression(expr) + new_expr = MacroTools.postwalk(_rewrite_to_jump_logic, expr) + new_aff, parse_aff = _MA.rewrite(new_expr; move_factors_into_sums = false) + ret = gensym() + code = quote + $parse_aff + $ret = $flatten!($new_aff) + end + return ret, code +end + function parse_constraint_head( _error::Function, ::Val{:comparison}, @@ -501,9 +740,9 @@ function parse_constraint_head( "`$ub >= ... >= $lb`.", ) end - new_aff, parse_aff = _MA.rewrite(aff) - new_lb, parse_lb = _MA.rewrite(lb) - new_ub, parse_ub = _MA.rewrite(ub) + new_aff, parse_aff = _rewrite_expression(aff) + new_lb, parse_lb = _rewrite_expression(lb) + new_ub, parse_ub = _rewrite_expression(ub) parse_code = quote $parse_aff $parse_lb @@ -584,7 +823,7 @@ function parse_constraint_call( func, set, ) - f, parse_code = _MA.rewrite(func) + f, parse_code = _rewrite_expression(func) build_call = if vectorized :(build_constraint.($_error, _desparsify($f), Ref($(esc(set))))) else @@ -618,7 +857,7 @@ function parse_constraint_call( rhs, ) func = vectorized ? :($lhs .- $rhs) : :($lhs - $rhs) - f, parse_code = _MA.rewrite(func) + f, parse_code = _rewrite_expression(func) set = operator_to_set(_error, operator) # `_functionize` deals with the pathological case where the `lhs` is a # `VariableRef` and the `rhs` is a summation with no terms. @@ -1590,7 +1829,7 @@ macro objective(model, args...) end sense, x = args sense_expr = _moi_sense(_error, sense) - newaff, parsecode = _MA.rewrite(x) + newaff, parsecode = _rewrite_expression(x) code = quote $parsecode # Don't leak a `_MA.Zero` if the objective expression is an empty @@ -1679,11 +1918,12 @@ macro expression(args...) "different name for the index.", ) end - code = _MA.rewrite_and_return(x) + expr_var, build_code = _rewrite_expression(x) code = quote + $build_code # Don't leak a `_MA.Zero` if the expression is an empty summation, or # other structure that returns `_MA.Zero()`. - _replace_zero($m, $code) + _replace_zero($m, $expr_var) end code = Containers.container_code(idxvars, indices, code, requested_container) diff --git a/src/mutable_arithmetics.jl b/src/mutable_arithmetics.jl index 3a51dd85ce1..d7b77506520 100644 --- a/src/mutable_arithmetics.jl +++ b/src/mutable_arithmetics.jl @@ -286,6 +286,11 @@ end function _MA.add_mul(lhs::AbstractJuMPScalar, x::_Scalar, y::_Scalar) T = _MA.promote_operation(_MA.add_mul, typeof(lhs), typeof(x), typeof(y)) expr = _MA.operate(convert, T, lhs) + # We can't use `operate!!` here because in the IsNotMutable case (e.g., + # NonlinearExpr), it will fallback to this method and cause a StackOverflow. + if _MA.mutability(T) == _MA.IsNotMutable() + return expr + _MA.operate(*, x, y) + end return _MA.operate!(_MA.add_mul, expr, x, y) end @@ -303,12 +308,22 @@ function _MA.add_mul( typeof.(args)..., ) expr = _MA.operate(convert, T, lhs) + # We can't use `operate!!` here because in the IsNotMutable case (e.g., + # NonlinearExpr), it will fallback to this method and cause a StackOverflow. + if _MA.mutability(T) == _MA.IsNotMutable() + return expr + _MA.operate(*, x, y, args...) + end return _MA.operate!(_MA.add_mul, expr, x, y, args...) end function _MA.sub_mul(lhs::AbstractJuMPScalar, x::_Scalar, y::_Scalar) T = _MA.promote_operation(_MA.sub_mul, typeof(lhs), typeof(x), typeof(y)) expr = _MA.operate(convert, T, lhs) + # We can't use `operate!!` here because in the IsNotMutable case (e.g., + # NonlinearExpr), it will fallback to this method and cause a StackOverflow. + if _MA.mutability(T) == _MA.IsNotMutable() + return expr - _MA.operate(*, x, y) + end return _MA.operate!(_MA.sub_mul, expr, x, y) end @@ -326,5 +341,10 @@ function _MA.sub_mul( typeof.(args)..., ) expr = _MA.operate(convert, T, lhs) + # We can't use `operate!!` here because in the IsNotMutable case (e.g., + # NonlinearExpr), it will fallback to this method and cause a StackOverflow. + if _MA.mutability(T) == _MA.IsNotMutable() + return expr - _MA.operate(*, x, y, args...) + end return _MA.operate!(_MA.sub_mul, expr, x, y, args...) end diff --git a/src/nlp_expr.jl b/src/nlp_expr.jl new file mode 100644 index 00000000000..6fe42fcb5ee --- /dev/null +++ b/src/nlp_expr.jl @@ -0,0 +1,1113 @@ +# Copyright 2017, Iain Dunning, Joey Huchette, Miles Lubin, and contributors +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + +""" + GenericNonlinearExpr{V}(head::Symbol, args::Vector{Any}) + GenericNonlinearExpr{V}(head::Symbol, args::Any...) + +The scalar-valued nonlinear function `head(args...)`, represented as a symbolic +expression tree, with the call operator `head` and ordered arguments in `args`. + +`V` is the type of [`AbstractVariableRef`](@ref) present in the expression, and +is used to help dispatch JuMP extensions. + +## `head` + +The `head::Symbol` must be an operator supported by the model. + +The default list of supported univariate operators is given by: + + * [`MOI.Nonlinear.DEFAULT_UNIVARIATE_OPERATORS`](@ref) + +and the default list of supported multivariate operators is given by: + + * [`MOI.Nonlinear.DEFAULT_MULTIVARIATE_OPERATORS`](@ref) + +Additional operators can be add using [`@operator`](@ref). + +See the full list of operators supported by a [`MOI.ModelLike`](@ref) by +querying the [`MOI.ListOfSupportedNonlinearOperators`](@ref) attribute. + +## `args` + +The vector `args` contains the arguments to the nonlinear function. If the +operator is univariate, it must contain one element. Otherwise, it may contain +multiple elements. + +Given a subtype of [`AbstractVariableRef`](@ref), `V`, for `GenericNonlinearExpr{V}`, +each element must be one of the following: + + * A constant value of type `<:Real` + * A `V` + * A [`GenericAffExpr{T,V}`](@ref) + * A [`GenericQuadExpr{T,V}`](@ref) + * A [`GenericNonlinearExpr{V}`](@ref) + +where `T<:Real` and `T == value_type(V)`. + +## Unsupported operators + +If the optimizer does not support `head`, an [`MOI.UnsupportedNonlinearOperator`](@ref) +error will be thrown. + +There is no guarantee about when this error will be thrown; it may be thrown +when the function is first added to the model, or it may be thrown when +[`optimize!`](@ref) is called. + +## Example + +To represent the function ``f(x) = sin(x)^2``, do: + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x) +x + +julia> f = sin(x)^2 +sin(x) ^ 2.0 + +julia> f = GenericNonlinearExpr{VariableRef}( + :^, + GenericNonlinearExpr{VariableRef}(:sin, x), + 2.0, + ) +sin(x) ^ 2.0 +``` +""" +struct GenericNonlinearExpr{V<:AbstractVariableRef} <: AbstractJuMPScalar + head::Symbol + args::Vector{Any} + + function GenericNonlinearExpr(head::Symbol, args::Vector{Any}) + index = findfirst(Base.Fix2(isa, AbstractJuMPScalar), args) + if index === nothing + error( + "Unable to create a nonlinear expression because it did not " * + "contain any JuMP scalars. head = $head, args = $args.", + ) + end + return new{variable_ref_type(args[index])}(head, args) + end + + function GenericNonlinearExpr{V}( + head::Symbol, + args::Vector{Any}, + ) where {V<:AbstractVariableRef} + return new{V}(head, args) + end +end + +""" + NonlinearExpr + +Alias for `GenericNonlinearExpr{VariableRef}`, the specific +[`GenericNonlinearExpr`](@ref) used by JuMP. +""" +const NonlinearExpr = GenericNonlinearExpr{VariableRef} + +variable_ref_type(::GenericNonlinearExpr{V}) where {V} = V + +# We include this method so that we can refactor the internal representation of +# GenericNonlinearExpr without having to rewrite the method overloads. +function GenericNonlinearExpr{V}( + head::Symbol, + args..., +) where {V<:AbstractVariableRef} + return GenericNonlinearExpr{V}(head, Any[args...]) +end + +const _PREFIX_OPERATORS = + (:+, :-, :*, :/, :^, :||, :&&, :>, :<, :(<=), :(>=), :(==)) + +_needs_parentheses(::Union{Number,AbstractVariableRef}) = false +_needs_parentheses(::Any) = true +function _needs_parentheses(x::GenericNonlinearExpr) + return x.head in _PREFIX_OPERATORS && length(x.args) > 1 +end + +function function_string(::MIME"text/plain", x::GenericNonlinearExpr) + io, stack = IOBuffer(), Any[x] + while !isempty(stack) + arg = pop!(stack) + if arg isa GenericNonlinearExpr + if arg.head in _PREFIX_OPERATORS && length(arg.args) > 1 + if _needs_parentheses(arg.args[1]) + print(io, "(") + end + if _needs_parentheses(arg.args[end]) + push!(stack, ")") + end + for i in length(arg.args):-1:2 + push!(stack, arg.args[i]) + if _needs_parentheses(arg.args[i]) + push!(stack, "(") + end + push!(stack, " $(arg.head) ") + if _needs_parentheses(arg.args[i-1]) + push!(stack, ")") + end + end + push!(stack, arg.args[1]) + else + print(io, arg.head, "(") + push!(stack, ")") + for i in length(arg.args):-1:2 + push!(stack, arg.args[i]) + push!(stack, ", ") + end + if length(arg.args) >= 1 + push!(stack, arg.args[1]) + end + end + else + print(io, arg) + end + end + seekstart(io) + return read(io, String) +end + +function function_string(::MIME"text/latex", x::GenericNonlinearExpr) + io, stack = IOBuffer(), Any[x] + while !isempty(stack) + arg = pop!(stack) + if arg isa GenericNonlinearExpr + if arg.head in _PREFIX_OPERATORS && length(arg.args) > 1 + print(io, "{") + push!(stack, "}") + if _needs_parentheses(arg.args[1]) + print(io, "\\left(") + end + if _needs_parentheses(arg.args[end]) + push!(stack, "\\right)") + end + for i in length(arg.args):-1:2 + push!(stack, arg.args[i]) + if _needs_parentheses(arg.args[i]) + push!(stack, "\\left(") + end + push!(stack, "} $(arg.head) {") + if _needs_parentheses(arg.args[i-1]) + push!(stack, "\\right)") + end + end + push!(stack, arg.args[1]) + else + print(io, "\\textsf{", arg.head, "}\\left({") + push!(stack, "}\\right)") + for i in length(arg.args):-1:2 + push!(stack, arg.args[i]) + push!(stack, "}, {") + end + if length(arg.args) >= 1 + push!(stack, arg.args[1]) + end + end + else + print(io, arg) + end + end + seekstart(io) + return read(io, String) +end + +_isequal(x, y) = x == y +_isequal(x::T, y::T) where {T<:AbstractJuMPScalar} = isequal_canonical(x, y) + +function isequal_canonical(x::GenericNonlinearExpr, y::GenericNonlinearExpr) + return x.head == y.head && + length(x.args) == length(y.args) && + all(i -> _isequal(x.args[i], y.args[i]), 1:length(x.args)) +end + +function MOI.Nonlinear.parse_expression( + data::MOI.Nonlinear.Model, + expr::MOI.Nonlinear.Expression, + x::GenericNonlinearExpr, + parent::Int, +) + stack = Tuple{Int,Any}[(parent, x)] + while !isempty(stack) + parent_node, arg = pop!(stack) + if arg isa GenericNonlinearExpr + _parse_without_recursion_inner(stack, data, expr, arg, parent_node) + else + # We can use recursion here, because GenericNonlinearExpr only occur + # in other GenericNonlinearExpr. + MOI.Nonlinear.parse_expression(data, expr, arg, parent_node) + end + end + return +end + +function _get_node_type(data, x::GenericNonlinearExpr) + id = get(data.operators.univariate_operator_to_id, x.head, nothing) + if length(x.args) == 1 && id !== nothing + return id, MOI.Nonlinear.NODE_CALL_UNIVARIATE + end + id = get(data.operators.multivariate_operator_to_id, x.head, nothing) + if id !== nothing + return id, MOI.Nonlinear.NODE_CALL_MULTIVARIATE + end + id = get(data.operators.comparison_operator_to_id, x.head, nothing) + if id !== nothing + return id, MOI.Nonlinear.NODE_COMPARISON + end + id = get(data.operators.logic_operator_to_id, x.head, nothing) + if id !== nothing + return id, MOI.Nonlinear.NODE_LOGIC + end + return throw(MOI.UnsupportedNonlinearOperator(x.head)) +end + +function _parse_without_recursion_inner( + stack, + data, + expr, + x::GenericNonlinearExpr, + parent, +) + id, node_type = _get_node_type(data, x) + push!(expr.nodes, MOI.Nonlinear.Node(node_type, id, parent)) + parent = length(expr.nodes) + # Args need to be pushed onto the stack in reverse + for i in length(x.args):-1:1 + push!(stack, (parent, x.args[i])) + end + return +end + +# Method definitions + +function Base.zero(::Type{GenericNonlinearExpr{V}}) where {V} + return GenericNonlinearExpr{V}(:+, 0.0) +end + +function Base.one(::Type{GenericNonlinearExpr{V}}) where {V} + return GenericNonlinearExpr{V}(:+, 1.0) +end + +# Univariate operators + +_is_real(::Any) = false +_is_real(::Real) = true +_is_real(::AbstractVariableRef) = true +_is_real(::GenericAffExpr{<:Real}) = true +_is_real(::GenericQuadExpr{<:Real}) = true +_is_real(::GenericNonlinearExpr) = true +_is_real(::NonlinearExpression) = true +_is_real(::NonlinearParameter) = true + +function _throw_if_not_real(x) + if !_is_real(x) + error( + "Cannot build `GenericNonlinearExpr` because a term is " * + "complex-valued: `($x)::$(typeof(x))`", + ) + end + return +end + +for f in MOI.Nonlinear.DEFAULT_UNIVARIATE_OPERATORS + op = Meta.quot(f) + if f == :+ + continue # We don't need this. + elseif f == :- + @eval function Base.:-(x::GenericNonlinearExpr{V}) where {V} + return GenericNonlinearExpr{V}(:-, x) + end + elseif isdefined(Base, f) + @eval function Base.$(f)(x::AbstractJuMPScalar) + _throw_if_not_real(x) + return GenericNonlinearExpr{variable_ref_type(x)}($op, x) + end + elseif isdefined(MOI.Nonlinear, :SpecialFunctions) + # The operator is defined in some other package. + SF = MOI.Nonlinear.SpecialFunctions + if isdefined(SF, f) + @eval function $(SF).$(f)(x::AbstractJuMPScalar) + _throw_if_not_real(x) + return GenericNonlinearExpr{variable_ref_type(x)}($op, x) + end + end + end +end + +# Multivariate operators + +# The multivariate operators in MOI are +, -, *, ^, /, ifelse, atan +# +# However, ifelse is a builtin, so we can't add methods to it. + +# We need only very generic fallbacks for these, because all other cases are +# caught with more specific methods. +for f in (:+, :-, :*, :^, :/, :atan) + op = Meta.quot(f) + @eval begin + function Base.$(f)(x::AbstractJuMPScalar, y::_Constant) + _throw_if_not_real(x) + _throw_if_not_real(y) + rhs = convert(Float64, _constant_to_number(y)) + return GenericNonlinearExpr{variable_ref_type(x)}($op, x, rhs) + end + function Base.$(f)(x::_Constant, y::AbstractJuMPScalar) + _throw_if_not_real(x) + _throw_if_not_real(y) + lhs = convert(Float64, _constant_to_number(x)) + return GenericNonlinearExpr{variable_ref_type(y)}($op, lhs, y) + end + function Base.$(f)(x::AbstractJuMPScalar, y::AbstractJuMPScalar) + _throw_if_not_real(x) + _throw_if_not_real(y) + return GenericNonlinearExpr{variable_ref_type(x)}($op, x, y) + end + end +end + +function _MA.operate!!( + ::typeof(_MA.add_mul), + x::GenericNonlinearExpr, + y::AbstractJuMPScalar, +) + _throw_if_not_real(x) + if x.head == :+ + push!(x.args, y) + return x + end + return +(x, y) +end + +""" + flatten!(expr::GenericNonlinearExpr) + +Flatten a nonlinear expression in-place by lifting nested `+` and `*` nodes into +a single n-ary operation. + +## Motivation + +Nonlinear expressions created using operator overloading can be deeply nested +and unbalanced. For example, `prod(x for i in 1:4)` creates +`*(x, *(x, *(x, x)))` instead of the more preferable `*(x, x, x, x)`. + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x) +x + +julia> y = prod(x for i in 1:4) +((x²) * x) * x + +julia> flatten!(y) +(x²) * x * x + +julia> flatten!(sin(prod(x for i in 1:4))) +sin((x²) * x * x) +``` +""" +function flatten!(expr::GenericNonlinearExpr{V}) where {V} + if !any(Base.Fix1(_needs_flatten, expr), expr.args) + return expr + end + stack = Tuple{GenericNonlinearExpr{V},Int,GenericNonlinearExpr{V}}[] + for i in length(expr.args):-1:1 + if _needs_flatten(expr, expr.args[i]) + push!(stack, (expr, i, expr.args[i])) + end + end + while !isempty(stack) + parent, i, arg = pop!(stack) + if parent.head in (:+, :*) && arg.head == parent.head + n = length(parent.args) + resize!(parent.args, n + length(arg.args) - 1) + for j in length(arg.args):-1:1 + parent_index = j == 1 ? i : n + j - 1 + if _needs_flatten(parent, arg.args[j]) + push!(stack, (parent, parent_index, arg.args[j])) + else + parent.args[parent_index] = arg.args[j] + end + end + else + parent.args[i] = arg + for j in length(arg.args):-1:1 + if _needs_flatten(arg, arg.args[j]) + push!(stack, (arg, j, arg.args[j])) + end + end + end + end + return expr +end + +flatten!(expr) = expr + +_is_expr(::Any, ::Any) = false +_is_expr(x::GenericNonlinearExpr, op::Symbol) = x.head == op + +_needs_flatten(::GenericNonlinearExpr, ::Any) = false + +function _needs_flatten(parent::GenericNonlinearExpr, arg::GenericNonlinearExpr) + if _is_expr(parent, :+) + return _is_expr(arg, :+) + elseif _is_expr(parent, :*) + return _is_expr(arg, :*) + else + # Heuristic: we decide to flatten if `parent` is not a + or * operator, + # but if one level down there are + or * nodes. This let's us flatten + # sin(+(x, +(y, z)) => sin(+(x, y, z)), but not a more complicated + # expression like log(sin(+(x, +(y, z))). + # + # If you have a benchmark that requires modifying this code, consider + # instead addinng `flatten!(::Any; force::Bool)` that would allow the + # user to override this decision and flatten the entire tree. + return any(Base.Fix2(_is_expr, :+), arg.args) || + any(Base.Fix2(_is_expr, :*), arg.args) + end +end + +# JuMP interop + +function owner_model(expr::GenericNonlinearExpr) + for arg in expr.args + if !(arg isa AbstractJuMPScalar) + continue + end + model = owner_model(arg) + if model !== nothing + return model + end + end + return nothing +end + +function check_belongs_to_model( + expr::GenericNonlinearExpr, + model::AbstractModel, +) + for arg in expr.args + if arg isa AbstractJuMPScalar + check_belongs_to_model(arg, model) + end + end + return +end + +moi_function(x::Number) = x + +function moi_function(f::GenericNonlinearExpr{V}) where {V} + ret = MOI.ScalarNonlinearFunction(f.head, similar(f.args)) + stack = Tuple{MOI.ScalarNonlinearFunction,Int,GenericNonlinearExpr{V}}[] + for i in length(f.args):-1:1 + if f.args[i] isa GenericNonlinearExpr{V} + push!(stack, (ret, i, f.args[i])) + else + ret.args[i] = moi_function(f.args[i]) + end + end + while !isempty(stack) + parent, i, arg = pop!(stack) + child = MOI.ScalarNonlinearFunction(arg.head, similar(arg.args)) + parent.args[i] = child + for j in length(arg.args):-1:1 + if arg.args[j] isa GenericNonlinearExpr{V} + push!(stack, (child, j, arg.args[j])) + else + child.args[j] = moi_function(arg.args[j]) + end + end + end + return ret +end + +function jump_function(model::GenericModel, f::MOI.ScalarNonlinearFunction) + V = variable_ref_type(typeof(model)) + ret = GenericNonlinearExpr{V}(f.head, Any[]) + stack = Tuple{GenericNonlinearExpr,Any}[] + for arg in reverse(f.args) + push!(stack, (ret, arg)) + end + while !isempty(stack) + parent, arg = pop!(stack) + if arg isa MOI.ScalarNonlinearFunction + new_ret = GenericNonlinearExpr{V}(arg.head, Any[]) + push!(parent.args, new_ret) + for child in reverse(arg.args) + push!(stack, (new_ret, child)) + end + elseif arg isa Number + push!(parent.args, arg) + else + push!(parent.args, jump_function(model, arg)) + end + end + return ret +end + +function jump_function_type( + model::GenericModel, + ::Type{<:MOI.ScalarNonlinearFunction}, +) + return GenericNonlinearExpr{variable_ref_type(typeof(model))} +end + +moi_function_type(::Type{<:GenericNonlinearExpr}) = MOI.ScalarNonlinearFunction + +function constraint_object(c::NonlinearConstraintRef) + nlp = nonlinear_model(c.model) + data = nlp.constraints[index(c)] + return ScalarConstraint(jump_function(c.model, data.expression), data.set) +end + +function jump_function(model::GenericModel, expr::MOI.Nonlinear.Expression) + V = variable_ref_type(typeof(model)) + nlp = nonlinear_model(model) + parsed = Vector{Any}(undef, length(expr.nodes)) + adj = MOI.Nonlinear.adjacency_matrix(expr.nodes) + rowvals = SparseArrays.rowvals(adj) + for i in length(expr.nodes):-1:1 + node = expr.nodes[i] + parsed[i] = if node.type == MOI.Nonlinear.NODE_CALL_UNIVARIATE + GenericNonlinearExpr{V}( + nlp.operators.univariate_operators[node.index], + parsed[rowvals[SparseArrays.nzrange(adj, i)[1]]], + ) + elseif node.type == MOI.Nonlinear.NODE_CALL_MULTIVARIATE + GenericNonlinearExpr{V}( + nlp.operators.multivariate_operators[node.index], + Any[parsed[rowvals[j]] for j in SparseArrays.nzrange(adj, i)], + ) + elseif node.type == MOI.Nonlinear.NODE_MOI_VARIABLE + V(model, MOI.VariableIndex(node.index)) + elseif node.type == MOI.Nonlinear.NODE_PARAMETER + NonlinearParameter(model, node.index) + elseif node.type == MOI.Nonlinear.NODE_SUBEXPRESSION + NonlinearExpression(model, node.index) + elseif node.type == MOI.Nonlinear.NODE_VALUE + expr.values[node.index] + else + # node.type == MOI.Nonlinear.NODE_COMPARISON + # node.type == MOI.Nonlinear.NODE_LOGIC + error("Unsupported node") + end + end + return parsed[1] +end + +function value(f::Function, expr::GenericNonlinearExpr) + return _evaluate_expr(MOI.Nonlinear.OperatorRegistry(), f, expr) +end + +function value(a::GenericNonlinearExpr; result::Int = 1) + return value(a) do x + return value(x; result = result) + end +end + +function _evaluate_expr( + ::MOI.Nonlinear.OperatorRegistry, + f::Function, + expr::AbstractJuMPScalar, +) + return value(f, expr) +end + +function _evaluate_expr( + ::MOI.Nonlinear.OperatorRegistry, + ::Function, + expr::Real, +) + return convert(Float64, expr) +end + +function _evaluate_user_defined_function( + registry, + f, + expr::GenericNonlinearExpr, +) + model = owner_model(expr) + op, nargs = expr.head, length(expr.args) + udf = MOI.get(model, MOI.UserDefinedFunction(op, nargs)) + if udf === nothing + return error( + "Unable to evaluate nonlinear operator $op because it was " * + "not added as an operator.", + ) + end + args = [_evaluate_expr(registry, f, arg) for arg in expr.args] + return first(udf)(args...) +end + +function _evaluate_expr( + registry::MOI.Nonlinear.OperatorRegistry, + f::Function, + expr::GenericNonlinearExpr, +) + op = expr.head + # TODO(odow): uses private function + if !MOI.Nonlinear._is_registered(registry, op, length(expr.args)) + return _evaluate_user_defined_function(registry, f, expr) + end + if length(expr.args) == 1 && haskey(registry.univariate_operator_to_id, op) + arg = _evaluate_expr(registry, f, expr.args[1]) + return MOI.Nonlinear.eval_univariate_function(registry, op, arg) + elseif haskey(registry.multivariate_operator_to_id, op) + args = [_evaluate_expr(registry, f, arg) for arg in expr.args] + return MOI.Nonlinear.eval_multivariate_function(registry, op, args) + elseif haskey(registry.logic_operator_to_id, op) + @assert length(expr.args) == 2 + x = _evaluate_expr(registry, f, expr.args[1]) + y = _evaluate_expr(registry, f, expr.args[2]) + return MOI.Nonlinear.eval_logic_function(registry, op, x, y) + else + @assert haskey(registry.comparison_operator_to_id, op) + @assert length(expr.args) == 2 + x = _evaluate_expr(registry, f, expr.args[1]) + y = _evaluate_expr(registry, f, expr.args[2]) + return MOI.Nonlinear.eval_comparison_function(registry, op, x, y) + end +end + +# MutableArithmetics.jl + +# These converts are used in the {add,sub}mul definition for AbstractJuMPScalar. + +Base.convert(::Type{<:GenericNonlinearExpr}, x::AbstractVariableRef) = x + +function Base.convert( + ::Type{<:GenericNonlinearExpr}, + x::GenericAffExpr{C,V}, +) where {C,V} + args = Any[] + for (variable, coef) in x.terms + if isone(coef) + push!(args, variable) + elseif !iszero(coef) + push!(args, GenericNonlinearExpr{V}(:*, coef, variable)) + end + end + if !iszero(x.constant) || isempty(args) + push!(args, x.constant) + end + if length(args) == 1 + return args[1] + end + return GenericNonlinearExpr{V}(:+, args) +end + +function Base.convert( + ::Type{<:GenericNonlinearExpr}, + x::GenericQuadExpr{C,V}, +) where {C,V} + args = Any[] + for (variable, coef) in x.aff.terms + if isone(coef) + push!(args, variable) + elseif !iszero(coef) + push!(args, GenericNonlinearExpr{V}(:*, coef, variable)) + end + end + for (pair, coef) in x.terms + if isone(coef) + push!(args, GenericNonlinearExpr{V}(:*, pair.a, pair.b)) + elseif !iszero(coef) + push!(args, GenericNonlinearExpr{V}(:*, coef, pair.a, pair.b)) + end + end + if !iszero(x.aff.constant) || isempty(args) + push!(args, x.aff.constant) + end + if length(args) == 1 + return args[1] + end + return GenericNonlinearExpr{V}(:+, args) +end + +function _MA.promote_operation( + ::Union{typeof(+),typeof(-),typeof(*)}, + ::Type{GenericNonlinearExpr{V}}, + ::Type{<:AbstractJuMPScalar}, +) where {V<:AbstractVariableRef} + return GenericNonlinearExpr{V} +end + +function _MA.promote_operation( + ::Union{typeof(+),typeof(-),typeof(*)}, + ::Type{<:AbstractJuMPScalar}, + ::Type{GenericNonlinearExpr{V}}, +) where {V<:AbstractVariableRef} + return GenericNonlinearExpr{V} +end + +function _MA.promote_operation( + ::Union{typeof(+),typeof(-),typeof(*)}, + ::Type{GenericNonlinearExpr{V}}, + ::Type{GenericNonlinearExpr{V}}, +) where {V<:AbstractVariableRef} + return GenericNonlinearExpr{V} +end + +function _MA.promote_operation( + ::Union{typeof(+),typeof(-),typeof(*)}, + ::Type{GenericNonlinearExpr{U}}, + ::Type{GenericNonlinearExpr{V}}, +) where {U<:AbstractVariableRef,V<:AbstractVariableRef} + return error( + "Unable to promote two different types of nonlinear expression", + ) +end + +""" + NonlinearOperator(head::Symbol, func::Function) + +A callable struct (functor) representing a function named `head`. + +When called with [`AbstractJuMPScalar`](@ref)s, the struct returns a +[`GenericNonlinearExpr`](@ref). + +When called with non-JuMP types, the struct returns the evaluation of +`func(args...)`. + +Unless `head` is special-cased by the optimizer, the operator must have already +been added to the model using [`add_nonlinear_operator`](@ref) or +[`@operator`](@ref). + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x) +x + +julia> f(x::Float64) = x^2 +f (generic function with 1 method) + +julia> ∇f(x::Float64) = 2 * x +∇f (generic function with 1 method) + +julia> ∇²f(x::Float64) = 2.0 +∇²f (generic function with 1 method) + +julia> @operator(model, op_f, 1, f, ∇f, ∇²f) +NonlinearOperator(:op_f, f) + +julia> bar = NonlinearOperator(:op_f, f) +NonlinearOperator(:op_f, f) + +julia> @objective(model, Min, bar(x)) +op_f(x) + +julia> bar(2.0) +4.0 +``` +""" +struct NonlinearOperator{F} + head::Symbol + func::F +end + +# Make it so that we don't print the complicated type parameter +function Base.show(io::IO, f::NonlinearOperator) + return print(io, "NonlinearOperator(:$(f.head), $(f.func))") +end + +# Fast overload for unary calls + +(f::NonlinearOperator)(x) = f.func(x) + +(f::NonlinearOperator)(x::AbstractJuMPScalar) = NonlinearExpr(f.head, Any[x]) + +# Fast overload for binary calls + +(f::NonlinearOperator)(x, y) = f.func(x, y) + +function (f::NonlinearOperator)(x::AbstractJuMPScalar, y) + return GenericNonlinearExpr(f.head, Any[x, y]) +end + +function (f::NonlinearOperator)(x, y::AbstractJuMPScalar) + return GenericNonlinearExpr(f.head, Any[x, y]) +end + +function (f::NonlinearOperator)(x::AbstractJuMPScalar, y::AbstractJuMPScalar) + return GenericNonlinearExpr(f.head, Any[x, y]) +end + +# Fallback for more arguments +function (f::NonlinearOperator)(x, y, z...) + args = (x, y, z...) + if any(Base.Fix2(isa, AbstractJuMPScalar), args) + return GenericNonlinearExpr(f.head, Any[a for a in args]) + end + return f.func(args...) +end + +""" + add_nonlinear_operator( + model::Model, + dim::Int, + f::Function, + [∇f::Function,] + [∇²f::Function]; + [name::Symbol = Symbol(f),] + ) + +Add a new nonlinear operator with `dim` input arguments to `model` and +associate it with the name `name`. + +The function `f` evaluates the operator and must return a scalar. + +The optional function `∇f` evaluates the first derivative, and the optional +function `∇²f` evaluates the second derivative. + +`∇²f` may be provided only if `∇f` is also provided. + +## Univariate syntax + +If `dim == 1`, then the method signatures of each function must be: + + * `f(::T)::T where {T<:Real}` + * `∇f(::T)::T where {T<:Real}` + * `∇²f(::T)::T where {T<:Real}` + +## Multivariate syntax + +If `dim > 1`, then the method signatures of each function must be: + + * `f(x::T...)::T where {T<:Real}` + * `∇f(g::AbstractVector{T}, x::T...)::Nothing where {T<:Real}` + * `∇²f(H::AbstractMatrix{T}, x::T...)::Nothing where {T<:Real}` + +Where the gradient vector `g` and Hessian matrix `H` are filled in-place. For +the Hessian, you must fill in the non-zero lower-triangular entries only. +Setting an off-diagonal upper-triangular element may error. + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x) +x + +julia> f(x::Float64) = x^2 +f (generic function with 1 method) + +julia> ∇f(x::Float64) = 2 * x +∇f (generic function with 1 method) + +julia> ∇²f(x::Float64) = 2.0 +∇²f (generic function with 1 method) + +julia> op_f = add_nonlinear_operator(model, 1, f, ∇f, ∇²f) +NonlinearOperator(:f, f) + +julia> @objective(model, Min, op_f(x)) +f(x) + +julia> op_f(2.0) +4.0 +``` +""" +function add_nonlinear_operator( + model::GenericModel, + dim::Int, + f::Function, + args::Vararg{Function,N}; + name::Symbol = Symbol(f), +) where {N} + nargs = 1 + N + if !(1 <= nargs <= 3) + error( + "Unable to add operator $name: invalid number of functions " * + "provided. Got $nargs, but expected 1 (if function only), 2 (if " * + "function and gradient), or 3 (if function, gradient, and " * + "hesssian provided)", + ) + end + # TODO(odow): we could add other checks here, but we won't for now because + # down-stream solvers in MOI can add their own checks, and any solver using + # MOI.Nonlinear will automatically check for autodiff and common mistakes + # and throw a nice informative error. + MOI.set(model, MOI.UserDefinedFunction(name, dim), tuple(f, args...)) + return NonlinearOperator(name, f) +end + +function _catch_redefinition_constant_error(op::Symbol, f::Function) + if op == Symbol(f) + error(""" + Unable to add the nonlinear operator `:$op` with the same name as + an existing function. + + For example, this code will error: + ```julia + model = Model() + f(x) = x^2 + @operator(model, f, 1, f) + ``` + because it is equivalent to: + ```julia + model = Model() + f(x) = x^2 + f = add_nonlinear_operator(model, 1, f; name = :f) + ``` + + To fix, use a unique name, like `op_$op`: + ```julia + model = Model() + f(x) = x^2 + @operator(model, op_f, 1, f) + @expression(model, op_f(x)) + ``` + """) + end + return +end + +""" + @operator(model, operator, dim, f[, ∇f[, ∇²f]]) + +Add the nonlinear operator `operator` in `model` with `dim` arguments, and +create a new [`NonlinearOperator`](@ref) object called `operator` in the current +scope. + +The function `f` evaluates the operator and must return a scalar. + +The optional function `∇f` evaluates the first derivative, and the optional +function `∇²f` evaluates the second derivative. + +`∇²f` may be provided only if `∇f` is also provided. + +## Univariate syntax + +If `dim == 1`, then the method signatures of each function must be: + + * `f(::T)::T where {T<:Real}` + * `∇f(::T)::T where {T<:Real}` + * `∇²f(::T)::T where {T<:Real}` + +## Multivariate syntax + +If `dim > 1`, then the method signatures of each function must be: + + * `f(x::T...)::T where {T<:Real}` + * `∇f(g::AbstractVector{T}, x::T...)::Nothing where {T<:Real}` + * `∇²f(H::AbstractMatrix{T}, x::T...)::Nothing where {T<:Real}` + +Where the gradient vector `g` and Hessian matrix `H` are filled in-place. For +the Hessian, you must fill in the non-zero lower-triangular entries only. +Setting an off-diagonal upper-triangular element may error. + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x) +x + +julia> f(x::Float64) = x^2 +f (generic function with 1 method) + +julia> ∇f(x::Float64) = 2 * x +∇f (generic function with 1 method) + +julia> ∇²f(x::Float64) = 2.0 +∇²f (generic function with 1 method) + +julia> @operator(model, op_f, 1, f, ∇f, ∇²f) +NonlinearOperator(:op_f, f) + +julia> @objective(model, Min, op_f(x)) +op_f(x) + +julia> op_f(2.0) +4.0 + +julia> model[:op_f] +NonlinearOperator(:op_f, f) + +julia> model[:op_f](x) +op_f(x) +``` + +## Non-macro version + +This macro is provided as helpful syntax that matches the style of the rest of +the JuMP macros. However, you may also add operators outside the macro +using [`add_nonlinear_operator`](@ref). For example: + +```jldoctest +julia> model = Model(); + +julia> f(x) = x^2 +f (generic function with 1 method) + +julia> @operator(model, op_f, 1, f) +NonlinearOperator(:op_f, f) +``` +is equivalent to +```jldoctest +julia> model = Model(); + +julia> f(x) = x^2 +f (generic function with 1 method) + +julia> op_f = model[:op_f] = add_nonlinear_operator(model, 1, f; name = :op_f) +NonlinearOperator(:op_f, f) +``` +""" +macro operator(model, op, dim, f, args...) + return _macro_assign_and_return( + quote + _catch_redefinition_constant_error($(Meta.quot(op)), $(esc(f))) + add_nonlinear_operator( + $(esc(model)), + $(esc(dim)), + $(esc(f)), + $(esc.(args)...); + name = $(Meta.quot(op)), + ) + end, + gensym(), + op; + model_for_registering = esc(model), + ) +end + +function jump_function_type( + ::GenericModel{T}, + ::Type{MOI.VectorNonlinearFunction}, +) where {T} + return Vector{GenericNonlinearExpr{GenericVariableRef{T}}} +end + +function jump_function( + model::GenericModel{T}, + f::MOI.VectorNonlinearFunction, +) where {T} + return GenericNonlinearExpr{GenericVariableRef{T}}[ + jump_function(model, fi) for fi in MOI.Utilities.eachscalar(f) + ] +end + +# We use `AbstractJuMPScalar` as a catch-all fallback for any mix of JuMP +# scalars that have not been dispatched by some other method. + +function moi_function_type(::Type{<:Vector{<:AbstractJuMPScalar}}) + return MOI.VectorNonlinearFunction +end + +function moi_function(f::Vector{<:AbstractJuMPScalar}) + return MOI.VectorNonlinearFunction(f) +end + +function MOI.VectorNonlinearFunction(f::Vector{<:AbstractJuMPScalar}) + return MOI.VectorNonlinearFunction(map(moi_function, f)) +end diff --git a/src/operators.jl b/src/operators.jl index 1d27e708e17..311afb73714 100644 --- a/src/operators.jl +++ b/src/operators.jl @@ -172,9 +172,6 @@ function Base.:*( end return result end -function Base.:/(lhs::AbstractVariableRef, rhs::GenericAffExpr) - return error("Cannot divide a variable by an affine expression") -end # AbstractVariableRef--GenericQuadExpr function Base.:+(v::AbstractVariableRef, q::GenericQuadExpr) return GenericQuadExpr(v + q.aff, copy(q.terms)) @@ -207,11 +204,7 @@ function Base.:^(lhs::AbstractVariableRef, rhs::Integer) elseif rhs == 0 return one(GenericQuadExpr{T,variable_ref_type(lhs)}) else - error( - "Only exponents of 0, 1, or 2 are currently supported. Are you " * - "trying to build a nonlinear problem? Make sure you use " * - "@NLconstraint/@NLobjective.", - ) + return GenericNonlinearExpr(:^, Any[lhs, rhs]) end end @@ -223,19 +216,10 @@ function Base.:^(lhs::GenericAffExpr{T}, rhs::Integer) where {T} elseif rhs == 0 return one(GenericQuadExpr{T,variable_ref_type(lhs)}) else - error( - "Only exponents of 0, 1, or 2 are currently supported. Are you " * - "trying to build a nonlinear problem? Make sure you use " * - "@NLconstraint/@NLobjective.", - ) + return GenericNonlinearExpr(:^, Any[lhs, rhs]) end end -function Base.:^(lhs::Union{AbstractVariableRef,GenericAffExpr}, rhs::_Constant) - return error( - "Only exponents of 0, 1, or 2 are currently supported. Are you trying to build a nonlinear problem? Make sure you use @NLconstraint/@NLobjective.", - ) -end # GenericAffExpr--AbstractVariableRef function Base.:+( lhs::GenericAffExpr{C,V}, @@ -265,9 +249,6 @@ function Base.:*( end return result end -function Base.:/(lhs::GenericAffExpr, rhs::AbstractVariableRef) - return error("Cannot divide affine expression by a variable") -end # AffExpr--AffExpr _copy_convert_coef(::Type{C}, aff::GenericAffExpr{C}) where {C} = copy(aff) @@ -374,12 +355,6 @@ end function Base.:-(q::GenericQuadExpr, v::AbstractVariableRef) return GenericQuadExpr(q.aff - v, copy(q.terms)) end -function Base.:*(q::GenericQuadExpr, v::AbstractVariableRef) - return error("Cannot multiply a quadratic expression by a variable") -end -function Base.:/(q::GenericQuadExpr, v::AbstractVariableRef) - return error("Cannot divide a quadratic expression by a variable") -end # GenericQuadExpr--GenericAffExpr function Base.:+(q::GenericQuadExpr, a::GenericAffExpr) return GenericQuadExpr(q.aff + a, copy(q.terms)) @@ -387,12 +362,6 @@ end function Base.:-(q::GenericQuadExpr, a::GenericAffExpr) return GenericQuadExpr(q.aff - a, copy(q.terms)) end -function Base.:*(q::GenericQuadExpr, a::GenericAffExpr) - return error("Cannot multiply a quadratic expression by an aff. expression") -end -function Base.:/(q::GenericQuadExpr, a::GenericAffExpr) - return error("Cannot divide a quadratic expression by an aff. expression") -end # GenericQuadExpr--GenericQuadExpr function Base.:+(q1::GenericQuadExpr{S}, q2::GenericQuadExpr{T}) where {S,T} result = _copy_convert_coef(_MA.promote_operation(+, S, T), q1) @@ -483,59 +452,3 @@ function LinearAlgebra.issymmetric(x::Matrix{T}) where {T<:_JuMPTypes} end return true end - -############################################################################### -# nonlinear function fallbacks for JuMP built-in types -############################################################################### - -const _OP_HINT = - "Are you trying to build a nonlinear problem? Make sure you use " * - "@NLconstraint/@NLobjective. If you are using an `@NL` macro and you " * - "encountered this error message, it is because you are attempting to use " * - "another unsupported function which calls this method internally." - -for f in MOI.Nonlinear.DEFAULT_UNIVARIATE_OPERATORS - if f in (:+, :-, :abs) || !isdefined(Base, f) - continue - end - for T in (:AbstractVariableRef, :GenericAffExpr, :GenericQuadExpr) - if f == :abs2 && (T == :AbstractVariableRef || T == :GenericAffExpr) - continue - end - error_string = "$f is not defined for type $T. $_OP_HINT" - @eval Base.$(f)(::$T) = error($error_string) - end -end - -function Base.:*( - ::T, - ::S, -) where { - T<:GenericQuadExpr, - S<:Union{AbstractVariableRef,GenericAffExpr,GenericQuadExpr}, -} - return error("*(::$T,::$S) is not defined. $_OP_HINT") -end -function Base.:*(lhs::GenericQuadExpr, rhs::GenericQuadExpr) - return error( - "*(::GenericQuadExpr,::GenericQuadExpr) is not defined. $_OP_HINT", - ) -end -function Base.:*( - ::S, - ::T, -) where { - T<:GenericQuadExpr, - S<:Union{AbstractVariableRef,GenericAffExpr,GenericQuadExpr}, -} - return error("*(::$S,::$T) is not defined. $_OP_HINT") -end -function Base.:/( - ::S, - ::T, -) where { - S<:Union{_Constant,AbstractVariableRef,GenericAffExpr,GenericQuadExpr}, - T<:Union{AbstractVariableRef,GenericAffExpr,GenericQuadExpr}, -} - return error("/(::$S,::$T) is not defined. $_OP_HINT") -end diff --git a/src/optimizer_interface.jl b/src/optimizer_interface.jl index 2a4db592806..89b3f40c2ea 100644 --- a/src/optimizer_interface.jl +++ b/src/optimizer_interface.jl @@ -415,6 +415,14 @@ function optimize!( # The nlp_model is not kept in sync, so re-set it here. # TODO: Consider how to handle incremental solves. if nonlinear_model(model) !== nothing + if _uses_new_nonlinear_interface(model) + error( + "Cannot optimize a model which contains the features from " * + "both the legacy (macros beginning with `@NL`) and new " * + "(`NonlinearExpr`) nonlinear interfaces. You must use one or " * + "the other.", + ) + end evaluator = MOI.Nonlinear.Evaluator( nonlinear_model(model), _differentiation_backend, @@ -454,6 +462,18 @@ function optimize!( return end +function _uses_new_nonlinear_interface(model) + if objective_function_type(model) <: GenericNonlinearExpr + return true + end + for (F, S) in list_of_constraint_types(model) + if F <: GenericNonlinearExpr + return true + end + end + return false +end + """ compute_conflict!(model::GenericModel) diff --git a/src/quad_expr.jl b/src/quad_expr.jl index 4361de2d944..9d6fbf0ac49 100644 --- a/src/quad_expr.jl +++ b/src/quad_expr.jl @@ -53,6 +53,17 @@ end variable_ref_type(::Type{GenericQuadExpr{C,V}}) where {C,V} = V +function owner_model(x::GenericQuadExpr) + model = owner_model(x.aff) + if model !== nothing + return model + elseif !isempty(x.terms) + pair = first(keys(x.terms)) + return owner_model(pair.a) + end + return nothing +end + """ GenericQuadExpr( aff::GenericAffExpr{V,K}, diff --git a/src/variables.jl b/src/variables.jl index 56a3d7bce8c..f6cd0c0be2f 100644 --- a/src/variables.jl +++ b/src/variables.jl @@ -2008,51 +2008,67 @@ end ### Error messages for common incorrect usages ### +function _logic_error_exception(sym::Symbol) + return ErrorException( + """ +Cannot evaluate `$(sym)` between a variable and a number. + +There are three common mistakes that lead to this. + + 1. You tried to write a constraint that depends on the value of a variable, for + example: + ```julia + model = Model() + @variable(model, x[1:2]) + if x[1] $(sym) 1 + @constraint(model, x[2] == 0) + end + ``` + You cannot write a model like this. You must formulate your problem as a + single optimization problem. Unfortunately, the way to do this is + problem-specific and depends on your choice of solver. You may be able to + use indicator constraints, or some other mixed-integer linear + reformulation. If stuck, post your problem on the community forum: + https://jump.dev/forum + + 2. You wrote a function that expected the value of a variable, but passed the + variable instead. For example: + ```julia + foo(x) = x $(sym) 1 ? 0 : 1 - x + model = Model() + @variable(model, x) + @expression(model, foo(x)) + ``` + To fix, create a nonlinear model with a user-defined operator: + ```julia + foo(x) = x $(sym) 1 ? 0 : 1 - x + model = Model() + @operator(model, op_f, 1, foo) + @variable(model, x) + @expression(model, op_f(x)) + ``` + + 3. You tried to create a logical nonlinear expression outside a macro, for + example: + ```julia + model = Model() + @variable(model, x) + expr = x $sym 1 + ``` + To fix, wrap the expression in the [`@expression`](@ref) macro: + ```julia + model = Model() + @variable(model, x) + expr = @expression(model, x $sym 1) + ``` + """, + ) +end + for sym in (:(<=), :(>=), :(<), :(>)) - msg = """Cannot evaluate `$(sym)` between a variable and a number. - - There are two common mistakes that lead to this. - - * You tried to write a constraint that depends on the value of a variable - - For example: - ```julia - model = Model() - @variable(model, x[1:2]) - if x[1] $(sym) 1 - @constraint(model, x[2] == 0) - end - ``` - - You cannot write a model like this. You must formulate your problem as a - single optimization problem. Unfortunately, the way to do this is - problem-specific and depends on your choice of solver. You may be able to - use indicator constraints, or some other mixed-integer linear - reformulation. If stuck, post your problem on the community forum: - https://jump.dev/forum - - * You wrote a function that expected the value of a variable, but it was - passed the variable instead - - For example: - ```julia - foo(x) = x $(sym) 1 ? 0 : 1 - x - model = Model() - @variable(model, x) - @objective(model, foo(x)) - ``` - - To fix this, create a nonlinear model with a user-defined function: - ```julia - foo(x) = x $(sym) 1 ? 0 : 1 - x - model = Model() - register(model, :foo, 1, foo; autodiff = true) - @variable(model, x) - @NLobjective(model, foo(x)) - ``` - """ + err = _logic_error_exception(sym) @eval begin - Base.$(sym)(::GenericVariableRef, ::Number) = error($(msg)) - Base.$(sym)(::Number, ::GenericVariableRef) = error($(msg)) + Base.$(sym)(::GenericVariableRef, ::Number) = throw($err) + Base.$(sym)(::Number, ::GenericVariableRef) = throw($err) end end diff --git a/test/perf/NonlinearBenchmark.jl b/test/perf/NonlinearBenchmark.jl new file mode 100644 index 00000000000..2ce3e66c49a --- /dev/null +++ b/test/perf/NonlinearBenchmark.jl @@ -0,0 +1,543 @@ +module NonlinearBenchmark + +using JuMP +import BenchmarkTools +import DataFrames +import Ipopt +import Random + +function benchmark_group() + lookup = Dict("perf_nl_" => "@NL", "perf_nlexpr_" => "NonlinearExpr") + suite = BenchmarkTools.BenchmarkGroup() + for v in values(lookup) + suite[v] = BenchmarkTools.BenchmarkGroup() + end + for name in names(@__MODULE__; all = true) + f = getfield(@__MODULE__, name) + for (k, v) in lookup + if startswith("$name", k) + fname = replace("$name", k => "") + suite[v][fname] = BenchmarkTools.@benchmarkable $f() + break + end + end + end + return suite +end + +function runbenchmarks() + suite = benchmark_group() + results = BenchmarkTools.run(suite) + df_time = build_table(x -> minimum(x).time / 1e9, results) + df_memory = build_table(x -> minimum(x).memory / 1024^2, results) + @info "minimum(time) [s]" + display(df_time) + @info "minimum(memory) [MiB]" + display(df_memory) + return results +end + +function build_table(f, results) + tables = map(sort!(collect(keys(results["NonlinearExpr"])))) do b + old = f(results["@NL"][b]) + new = f(results["NonlinearExpr"][b]) + return (benchmark = b, NL = old, NonlinearExpr = new, ratio = new / old) + end + return DataFrames.DataFrame(tables) +end + +# sum +# +# nlexpr is slower because it builds up the product via operator overloading, +# creating a lot of temporary objects. @NL gets to see the full +(args...) to it +# builds the expression in-place. +# +# We could fix this by implementing a n-argy method for +, but that gets +# difficult with method ambiguities. + +function perf_nl_micro_sum() + model = Model() + @variable(model, x) + @NLobjective(model, Min, sum(x^i for i in 1:10_000)) + return +end + +function perf_nlexpr_micro_sum() + model = Model() + @variable(model, x) + @objective(model, Min, sum(x^i for i in 1:10_000)) + return +end + +# prod +# +# nlexpr is slower because it builds up the product via operator overloading, +# creating a lot of temporary objects. @NL gets to see the full *(args...) to it +# builds the expression in-place. +# +# We could fix this by implementing a n-argy method for *, but that gets +# difficult with method ambiguities. + +function perf_nl_micro_prod() + model = Model() + @variable(model, x) + @NLobjective(model, Min, prod(x^i for i in 1:10_000)) + return +end + +function perf_nlexpr_micro_prod() + model = Model() + @variable(model, x) + @objective(model, Min, prod(x^i for i in 1:10_000)) + return +end + +# many_constraints + +function perf_nl_micro_many_constraints() + model = Model() + @variable(model, x[1:10_000]) + @NLconstraint(model, [i = 1:10_000], sin(x[i]) <= cos(i)) + return +end + +function perf_nlexpr_micro_many_constraints() + model = Model() + @variable(model, x[1:10_000]) + @constraint(model, [i = 1:10_000], sin(x[i]) <= cos(i)) + return +end + +# value_expr_many_small + +function perf_nl_micro_value_expr_many_small() + model = Model() + @variable(model, x) + @NLexpression(model, expr[i = 1:10_000], x^i) + value.(x -> 2.0, expr) + return +end + +function perf_nlexpr_micro_value_expr_many_small() + model = Model() + @variable(model, x) + @expression(model, expr[i = 1:10_000], x^i) + value.(x -> 2.0, expr) + return +end + +# value_expr_few_large + +function perf_nl_micro_value_expr_few_large() + model = Model() + @variable(model, x) + @NLexpression(model, expr, sum(x^i for i in 1:10_000)) + value(x -> 2.0, expr) + return +end + +function perf_nlexpr_micro_value_expr_few_large() + model = Model() + @variable(model, x) + @expression(model, expr, sum(x^i for i in 1:10_000)) + value(x -> 2.0, expr) + return +end + +# mle + +function perf_nl_model_mle() + Random.seed!(1234) + n = 1_000 + data = randn(n) + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, μ, start = 0.0) + @variable(model, σ >= 0.0, start = 1.0) + @NLobjective( + model, + Max, + n / 2 * log(1 / (2 * π * σ^2)) - + sum((data[i] - μ)^2 for i in 1:n) / (2 * σ^2) + ) + optimize!(model) + return +end + +function perf_nlexpr_model_mle() + Random.seed!(1234) + n = 1_000 + data = randn(n) + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, μ, start = 0.0) + @variable(model, σ >= 0.0, start = 1.0) + @objective( + model, + Max, + n / 2 * log(1 / (2 * π * σ^2)) - + sum((data[i] - μ)^2 for i in 1:n) / (2 * σ^2) + ) + optimize!(model) + return +end + +# clnlbeam + +function perf_nl_model_clnlbeam() + N = 1000 + h = 1 / N + alpha = 350 + model = Model(Ipopt.Optimizer) + set_silent(model) + @variables(model, begin + -1 <= t[1:(N+1)] <= 1 + -0.05 <= x[1:(N+1)] <= 0.05 + u[1:(N+1)] + end) + @NLobjective( + model, + Min, + sum( + 0.5 * h * (u[i+1]^2 + u[i]^2) + + 0.5 * alpha * h * (cos(t[i+1]) + cos(t[i])) for i in 1:N + ), + ) + @NLconstraint( + model, + [i = 1:N], + x[i+1] - x[i] - 0.5 * h * (sin(t[i+1]) + sin(t[i])) == 0, + ) + @constraint( + model, + [i = 1:N], + t[i+1] - t[i] - 0.5 * h * u[i+1] - 0.5 * h * u[i] == 0, + ) + optimize!(model) + return +end + +function perf_nlexpr_model_clnlbeam() + N = 1000 + h = 1 / N + alpha = 350 + model = Model(Ipopt.Optimizer) + set_silent(model) + @variables(model, begin + -1 <= t[1:(N+1)] <= 1 + -0.05 <= x[1:(N+1)] <= 0.05 + u[1:(N+1)] + end) + @objective( + model, + Min, + sum( + 0.5 * h * (u[i+1]^2 + u[i]^2) + + 0.5 * alpha * h * (cos(t[i+1]) + cos(t[i])) for i in 1:N + ), + ) + @constraint( + model, + [i = 1:N], + x[i+1] - x[i] - 0.5 * h * (sin(t[i+1]) + sin(t[i])) == 0, + ) + @constraint( + model, + [i = 1:N], + t[i+1] - t[i] - 0.5 * h * u[i+1] - 0.5 * h * u[i] == 0, + ) + optimize!(model) + return +end + +# rosenbrock + +function perf_nl_model_rosenbrock() + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, x) + @variable(model, y) + @NLobjective(model, Min, (1 - x)^2 + 100 * (y - x^2)^2) + optimize!(model) + return +end + +function perf_nlexpr_model_rosenbrock() + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, x) + @variable(model, y) + @objective(model, Min, (1 - x)^2 + 100 * (y - x^2)^2) + optimize!(model) + return +end + +# JuMP#2788 + +function perf_nl_model_jump_2788() + N = 400 + Random.seed!(1234) + k = N + n = 12 + p = rand(400:700, k, 1) + c1 = rand(100:200, k, n) + c2 = 0.9 .* c1 + b = rand(150:250, k, 1) + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, 0 <= x[i = 1:n] <= 1) + @variable(model, 0 <= var1 <= 1) + @variable(model, 0 <= var2 <= 1) + @variable(model, 0 <= var3 <= 1) + @objective(model, Max, var1 - var2 + var3) + @NLexpression(model, expr, sum(x[i] * p[i] for i in 1:n)) + @NLexpression(model, expr_c1[j = 1:k], sum(x[i] * c1[j, i] for i in 1:n)) + @NLexpression(model, expr_c2[j = 1:k], sum(x[i] * c2[j, i] for i in 1:n)) + @NLconstraint(model, expr == sum(b[j] / (1 + var1)^j for j in 1:k)) + @NLconstraint(model, expr == sum(expr_c1[j] / (1 + var2)^j for j in 1:k)) + @NLconstraint(model, expr == sum(expr_c2[j] / (1 + var3)^j for j in 1:k)) + @NLconstraint(model, [j = 1:k], expr_c1[j] >= b[j]) + optimize!(model) + return +end + +function perf_nlexpr_model_jump_2788() + N = 400 + Random.seed!(1234) + k = N + n = 12 + p = rand(400:700, k, 1) + c1 = rand(100:200, k, n) + c2 = 0.9 .* c1 + b = rand(150:250, k, 1) + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, 0 <= x[i = 1:n] <= 1) + @variable(model, 0 <= var1 <= 1) + @variable(model, 0 <= var2 <= 1) + @variable(model, 0 <= var3 <= 1) + @objective(model, Max, var1 - var2 + var3) + @expression(model, expr, sum(x[i] * p[i] for i in 1:n)) + @expression(model, expr_c1[j = 1:k], sum(x[i] * c1[j, i] for i in 1:n)) + @expression(model, expr_c2[j = 1:k], sum(x[i] * c2[j, i] for i in 1:n)) + @constraint(model, expr == sum(b[j] / (1 + var1)^j for j in 1:k)) + @constraint(model, expr == sum(expr_c1[j] / (1 + var2)^j for j in 1:k),) + @constraint(model, expr == sum(expr_c2[j] / (1 + var3)^j for j in 1:k),) + @constraint(model, [j = 1:k], expr_c1[j] >= b[j]) + optimize!(model) + return +end + +# nested_problems + +function perf_nl_model_nested_problems() + function solve_lower_level(x...) + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, y[1:2]) + @NLobjective( + model, + Max, + x[1]^2 * y[1] + x[2]^2 * y[2] - x[1] * y[1]^4 - 2 * x[2] * y[2]^4, + ) + @constraint(model, (y[1] - 10)^2 + (y[2] - 10)^2 <= 25) + optimize!(model) + @assert termination_status(model) == LOCALLY_SOLVED + return objective_value(model), value.(y) + end + function V(x...) + f, _ = solve_lower_level(x...) + return f + end + function ∇V(g::AbstractVector, x...) + _, y = solve_lower_level(x...) + g[1] = 2 * x[1] * y[1] - y[1]^4 + g[2] = 2 * x[2] * y[2] - 2 * y[2]^4 + return + end + function ∇²V(H::AbstractMatrix, x...) + _, y = solve_lower_level(x...) + H[1, 1] = 2 * y[1] + H[2, 2] = 2 * y[2] + return + end + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, x[1:2] >= 0) + register(model, :f_V, 2, V, ∇V, ∇²V) + @NLobjective(model, Min, x[1]^2 + x[2]^2 + f_V(x[1], x[2])) + optimize!(model) + solution_summary(model) + return +end + +function perf_nlexpr_model_nested_problems() + function solve_lower_level(x...) + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, y[1:2]) + @objective( + model, + Max, + x[1]^2 * y[1] + x[2]^2 * y[2] - x[1] * y[1]^4 - 2 * x[2] * y[2]^4, + ) + @constraint(model, (y[1] - 10)^2 + (y[2] - 10)^2 <= 25) + optimize!(model) + @assert termination_status(model) == LOCALLY_SOLVED + return objective_value(model), value.(y) + end + function V(x...) + f, _ = solve_lower_level(x...) + return f + end + function ∇V(g::AbstractVector, x...) + _, y = solve_lower_level(x...) + g[1] = 2 * x[1] * y[1] - y[1]^4 + g[2] = 2 * x[2] * y[2] - 2 * y[2]^4 + return + end + function ∇²V(H::AbstractMatrix, x...) + _, y = solve_lower_level(x...) + H[1, 1] = 2 * y[1] + H[2, 2] = 2 * y[2] + return + end + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, x[1:2] >= 0) + @operator(model, f_V, 2, V, ∇V, ∇²V) + @objective(model, Min, x[1]^2 + x[2]^2 + f_V(x[1], x[2])) + optimize!(model) + solution_summary(model) + return +end + +# ### + +function perf_nl_model_votroto() + Q = -0.8:0.4:0.8 + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, -2 <= p[1:5] <= 2) + @variable(model, -1 <= w <= 3) + @variable(model, -1 <= q <= 3) + @objective(model, Min, w) + total = Dict( + _q => @NLexpression( + model, + sum( + _p / sqrt(2π) * exp(-(i - _q)^2 / 2) for + (i, _p) in enumerate(p) + ) + ) for _q in Any[Q; q; 0.5] + ) + l1 = Dict( + _q => @NLexpression(model, 1 - total[_q] + 0.5 * total[0.5]) for + _q in Any[Q; q] + ) + @NLconstraint( + model, + [_q in Q], + w * (l1[q] - l1[_q]) + (1 - w) * (total[q] - 1) <= 0 + ) + optimize!(model) + return +end + +function perf_nlexpr_model_votroto() + Q = -0.8:0.4:0.8 + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, -2 <= p[1:5] <= 2) + @variable(model, -1 <= w <= 3) + @variable(model, -1 <= q <= 3) + @objective(model, Min, w) + f(p, q) = (1 / sqrt(2π)) * exp(-((p - q)^2) / 2) + total(p, q) = sum(_p * f(i, q) for (i, _p) in enumerate(p)) + l1(p, q) = 1 - total(p, q) + 0.5 * total(p, 0.5) + l2(p, q) = total(p, q) - 1 + lhs(p, q, _q) = l1(p, q) - l1(p, _q) + @constraint(model, [_q in Q], w * lhs(p, q, _q) + (1 - w) * l2(p, q) <= 0) + optimize!(model) + return +end + +# large_expressions + +function perf_nl_model_large_expressions() + N = 50_000 + model = Model(Ipopt.Optimizer) + set_silent(model) + set_attribute(model, "max_iter", 1) + @variable(model, y[1:N], start = 1) + @variable(model, z[1:N]) + @NLobjective(model, Max, sum(2z[i]^2 + sin(1 / y[i]) for i in 1:N)) + @NLconstraint( + model, + [i = 1:N], + ifelse(z[i] <= y[i]^3, log(y[i] / i), z[i] / cos(y[i])) <= 42, + ) + @NLconstraint(model, sum(z[i]^i + log(y[i]) for i in 1:N) == 0) + optimize!(model) + return +end + +function perf_nlexpr_model_large_expressions() + N = 50_000 + model = Model(Ipopt.Optimizer) + set_silent(model) + set_attribute(model, "max_iter", 1) + @variable(model, y[1:N], start = 1) + @variable(model, z[1:N]) + @objective(model, Max, sum(2z[i]^2 + sin(1 / y[i]) for i in 1:N)) + @constraint( + model, + [i = 1:N], + ifelse(z[i] <= y[i]^3, log(y[i] / i), z[i] / cos(y[i])) <= 42, + ) + @constraint(model, sum(z[i]^i + log(y[i]) for i in 1:N) == 0) + optimize!(model) + return +end + +# large_expressions_2 + +function perf_nl_model_large_expressions_2() + N = 100 + model = Model(Ipopt.Optimizer) + set_silent(model) + set_attribute(model, "max_iter", 1) + @variable(model, y[1:N], start = 1) + @variable(model, z[1:N]) + @NLobjective(model, Max, sum(2z[i]^2 + sin(1 / y[i]) for i in 1:N)) + @NLconstraint( + model, + prod( + ifelse(z[i] <= y[i]^3, log(y[i] / i), z[i] / cos(y[i])) for i in 1:N + ) <= 42 + ) + @NLconstraint(model, sum(z[i]^i + log(y[i]) for i in 1:N) == 0) + optimize!(model) + return +end + +function perf_nlexpr_model_large_expressions_2() + N = 100 + model = Model(Ipopt.Optimizer) + set_silent(model) + set_attribute(model, "max_iter", 1) + @variable(model, y[1:N], start = 1) + @variable(model, z[1:N]) + @objective(model, Max, sum(2z[i]^2 + sin(1 / y[i]) for i in 1:N)) + @constraint( + model, + prod( + ifelse(z[i] <= y[i]^3, log(y[i] / i), z[i] / cos(y[i])) for i in 1:N + ) <= 42 + ) + @constraint(model, sum(z[i]^i + log(y[i]) for i in 1:N) == 0) + optimize!(model) + return +end + +end # module diff --git a/test/test_expr.jl b/test/test_expr.jl index f17b977b75b..1aad5b4fb0c 100644 --- a/test/test_expr.jl +++ b/test/test_expr.jl @@ -445,4 +445,10 @@ function test_expression_ambiguities() return end +function test_quadexpr_owner_model() + quad = GenericQuadExpr{Int,Int}() + @test owner_model(quad) === nothing + return +end + end # TestExpr diff --git a/test/test_macros.jl b/test/test_macros.jl index ab2b38fa46e..993fe62a701 100644 --- a/test/test_macros.jl +++ b/test/test_macros.jl @@ -616,13 +616,6 @@ function test_Nested_tuple_destructuring() return end -function test_Error_on_unexpected_comparison() - m = Model() - @variable(m, x) - @test_macro_throws ErrorException @expression(m, x <= 1) - return -end - function test_Lookup_in_model_scope_variable() model = Model() @variable(model, x) diff --git a/test/test_nlp.jl b/test/test_nlp.jl index bf20db8903b..d4f34cfe56a 100644 --- a/test/test_nlp.jl +++ b/test/test_nlp.jl @@ -1601,4 +1601,95 @@ function test_parse_expression_quadexpr_multivariate_sum() return end +function test_parse_expression_nonlinearexpr_call() + model = Model() + @variable(model, x) + @variable(model, y) + f = GenericNonlinearExpr(:ifelse, Any[x, 0, y]) + @NLexpression(model, ref, f) + nlp = nonlinear_model(model) + expr = :(ifelse($x, 0, $y)) + @test MOI.Nonlinear.parse_expression(nlp, expr) == nlp[index(ref)] + return +end + +function test_parse_expression_nonlinearexpr_or() + model = Model() + @variable(model, x) + @variable(model, y) + f = GenericNonlinearExpr(:||, Any[x, y]) + @NLexpression(model, ref, f) + nlp = nonlinear_model(model) + expr = :($x || $y) + @test MOI.Nonlinear.parse_expression(nlp, expr) == nlp[index(ref)] + return +end + +function test_parse_expression_nonlinearexpr_and() + model = Model() + @variable(model, x) + @variable(model, y) + f = GenericNonlinearExpr(:&&, Any[x, y]) + @NLexpression(model, ref, f) + nlp = nonlinear_model(model) + expr = :($x && $y) + @test MOI.Nonlinear.parse_expression(nlp, expr) == nlp[index(ref)] + return +end + +function test_parse_expression_nonlinearexpr_unsupported() + model = Model() + @variable(model, x) + @variable(model, y) + f = GenericNonlinearExpr(:foo, Any[x, y]) + @test_throws( + MOI.UnsupportedNonlinearOperator, + @NLexpression(model, ref, f), + ) + return +end + +function test_parse_expression_nonlinearexpr_nested_comparison() + model = Model() + @variable(model, x) + @variable(model, y) + f = GenericNonlinearExpr(:||, Any[x, y]) + g = GenericNonlinearExpr(:&&, Any[f, x]) + @NLexpression(model, ref, g) + nlp = nonlinear_model(model) + expr = :(($x || $y) && $x) + @test MOI.Nonlinear.parse_expression(nlp, expr) == nlp[index(ref)] + return +end + +function test_parse_boolean_comparison_fallbacks() + model = Model() + @variable(model, x) + @test @expression(model, ifelse(true && true, x, 0.0)) === x + @test @expression(model, ifelse(true || false, x, 0.0)) === x + @test @expression(model, ifelse(1 < 2, x, 0.0)) === x + @test @expression(model, ifelse(1 <= 2, x, 0.0)) === x + @test @expression(model, ifelse(2 > 1, x, 0.0)) === x + @test @expression(model, ifelse(2 >= 1, x, 0.0)) === x + @test @expression(model, ifelse(2 == 2, x, 0.0)) === x + @test @expression(model, ifelse(true && false, x, 0.0)) === 0.0 + @test @expression(model, ifelse(false || false, x, 0.0)) === 0.0 + @test @expression(model, ifelse(2 < 1, x, 0.0)) === 0.0 + @test @expression(model, ifelse(2 <= 1, x, 0.0)) === 0.0 + @test @expression(model, ifelse(1 > 2, x, 0.0)) === 0.0 + @test @expression(model, ifelse(1 >= 2, x, 0.0)) === 0.0 + @test @expression(model, ifelse(1 == 2, x, 0.0)) === 0.0 + return +end + +function test_get_node_type_comparison() + model = Model() + @variable(model, x) + expr = @expression(model, ifelse(x >= 0, x, 0.0)) + @NLexpression(model, ref, ifelse(x >= 0, x, 0.0)) + nlp = nonlinear_model(model) + @test MOI.Nonlinear.parse_expression(nlp, expr) == nlp[index(ref)] + return +end + end diff --git a/test/test_nlp_expr.jl b/test/test_nlp_expr.jl new file mode 100644 index 00000000000..53b6704073e --- /dev/null +++ b/test/test_nlp_expr.jl @@ -0,0 +1,831 @@ +# Copyright 2017, Iain Dunning, Joey Huchette, Miles Lubin, and contributors +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + +module TestNLPExpr + +using JuMP +using Test + +function test_extension_univariate_operators( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() + @variable(model, x) + for f in MOI.Nonlinear.DEFAULT_UNIVARIATE_OPERATORS + if f in (:+, :-, :abs2) + op = getfield(Base, f) + @test op(sin(x)) isa GenericNonlinearExpr{VariableRefType} + elseif isdefined(Base, f) + op = getfield(Base, f) + @test op(x) isa GenericNonlinearExpr{VariableRefType} + elseif isdefined(MOI.Nonlinear.SpecialFunctions, f) + op = getfield(MOI.Nonlinear.SpecialFunctions, f) + @test op(x) isa GenericNonlinearExpr{VariableRefType} + end + end + return +end + +function test_extension_binary_operators( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() + @variable(model, x) + num, aff, quad, nlp = 1.0, 1.0 + x, x^2, sin(x) + for op in (+, -, *, /), a in (num, x, aff, quad, nlp) + @test op(a, nlp) isa GenericNonlinearExpr{VariableRefType} + @test op(nlp, a) isa GenericNonlinearExpr{VariableRefType} + end + for op in (*, /), a in (x, aff) + @test op(a, quad) isa GenericNonlinearExpr{VariableRefType} + @test op(quad, a) isa GenericNonlinearExpr{VariableRefType} + end + for a in (num, x, aff, quad), b in (x, aff, quad) + @test /(a, b) isa GenericNonlinearExpr{VariableRefType} + end + return +end + +function test_extension_objective( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() + @variable(model, x) + @objective(model, Min, 2.0 * sin(x)^2 + cos(x) / x) + @test objective_function(model) isa GenericNonlinearExpr{VariableRefType} + return +end + +function test_extension_expression( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() + @variable(model, x) + @variable(model, y[1:3]) + @test string(@expression(model, *(y...))) == "(y[1]*y[2]) * y[3]" + @test string(@expression(model, sin(x))) == "sin(x)" + @test string(@expression(model, ifelse(x >= 0, x, 0))) == + "ifelse(x >= 0, x, 0)" + @test string(@expression(model, 2^x)) == "2.0 ^ x" + @test string(@expression(model, x^x)) == "x ^ x" + @test string(@expression(model, sin(x)^2)) == "sin(x) ^ 2.0" + @test string(@expression(model, sin(x)^2.0)) == "sin(x) ^ 2.0" + @test string(@expression(model, 2 * sin(x)^2.0)) == "2.0 * (sin(x) ^ 2.0)" + @test string(@expression(model, 1 + sin(x))) == "1.0 + sin(x)" + @test string(@expression(model, 1 + 2 * sin(x))) == "1.0 + (2.0 * sin(x))" + @test string(@expression(model, 2.0 * sin(x)^2 + cos(x) / x)) == + "(2.0 * (sin(x) ^ 2.0)) + (cos(x) / x)" + @test string(@expression(model, 2.0 * sin(x)^2 - cos(x) / x)) == + "(2.0 * (sin(x) ^ 2.0)) - (cos(x) / x)" + return +end + +function test_extension_flatten_nary( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() + @variable(model, x) + expr_plus = GenericNonlinearExpr{VariableRefType}(:+, Any[x]) + expr_mult = GenericNonlinearExpr{VariableRefType}(:*, Any[x]) + expr_sin = GenericNonlinearExpr{VariableRefType}(:sin, Any[x]) + to_string(x) = string(flatten!(x)) + @test to_string(+(expr_plus, 1)) == "x + 1.0" + @test to_string(+(1, expr_plus)) == "1.0 + x" + @test to_string(+(expr_plus, x)) == "x + x" + @test to_string(+(expr_sin, x)) == "sin(x) + x" + @test to_string(+(x, expr_plus)) == "x + x" + @test to_string(+(x, expr_sin)) == "x + sin(x)" + @test to_string(+(expr_plus, expr_plus)) == "x + x" + @test to_string(+(expr_plus, expr_sin)) == "x + sin(x)" + @test to_string(+(expr_sin, expr_plus)) == "sin(x) + x" + @test to_string(+(expr_sin, expr_sin)) == "sin(x) + sin(x)" + @test to_string(*(expr_mult, 2)) == "x * 2.0" + @test to_string(*(2, expr_mult)) == "2.0 * x" + @test to_string(*(expr_mult, x)) == "x * x" + @test to_string(*(expr_sin, x)) == "sin(x) * x" + @test to_string(*(x, expr_mult)) == "x * x" + @test to_string(*(x, expr_sin)) == "x * sin(x)" + @test to_string(*(expr_mult, expr_mult)) == "x * x" + @test to_string(*(expr_mult, expr_sin)) == "x * sin(x)" + @test to_string(*(expr_sin, expr_mult)) == "sin(x) * x" + @test to_string(*(expr_sin, expr_sin)) == "sin(x) * sin(x)" + @test to_string(sin(+(expr_plus, 1))) == "sin(x + 1.0)" + @test to_string(sin(*(expr_mult, expr_mult))) == "sin(x * x)" + return +end + +function test_extension_zero_one( + ModelType = Model, + VariableRefType = VariableRef, +) + @test string(zero(GenericNonlinearExpr{VariableRefType})) == "+(0.0)" + @test string(one(GenericNonlinearExpr{VariableRefType})) == "+(1.0)" + return +end + +function test_extension_latex(ModelType = Model, VariableRefType = VariableRef) + model = ModelType() + @variable(model, x) + @test function_string(MIME("text/latex"), sin(x)) == + raw"\textsf{sin}\left({x}\right)" + @test function_string(MIME("text/latex"), sin(x)^x) == + raw"{\textsf{sin}\left({x}\right)} ^ {x}" + @test function_string(MIME("text/latex"), sin(x)^(x + 1)) == + raw"{\textsf{sin}\left({x}\right)} ^ {\left(x + 1\right)}" + @test function_string(MIME("text/latex"), (x + 1)^x) == + raw"{\left(x + 1\right)} ^ {x}" + @test function_string(MIME("text/latex"), (x + 1)^(x + 1)) == + raw"{\left(x + 1\right)} ^ {\left(x + 1\right)}" + @test function_string(MIME("text/latex"), (x + 1)^sin(x)) == + raw"{\left(x + 1\right)} ^ {\textsf{sin}\left({x}\right)}" + + @expression(model, g, ifelse(x > 0, sin(x), x + cos(x)^2)) + @test function_string(MIME("text/latex"), g) == + raw"\textsf{ifelse}\left({{x} > {0}}, {\textsf{sin}\left({x}\right)}, {{x} + {\left({\textsf{cos}\left({x}\right)} ^ {2.0}\right)}}\right)" + return +end + +function test_extension_expression_addmul( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() + @variable(model, x) + @test string(@expression(model, x + 3 * sin(x))) == "x + (3.0 * sin(x))" + @test string(@expression(model, 2 * x + 3 * sin(x))) == + "(2 x) + (3.0 * sin(x))" + @test string(@expression(model, x^2 + 3 * sin(x))) == + "($(x^2)) + (3.0 * sin(x))" + @test string(@expression(model, sin(x) + 3 * sin(x))) == + "sin(x) + (3.0 * sin(x))" + @test string(@expression(model, sin(x) + 3 * x)) == "sin(x) + (3 x)" + @test string(@expression(model, sin(x) + 3 * x * x)) == + "sin(x) + (3 $(x^2))" + return +end + +function test_extension_expression_submul( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() + @variable(model, x) + @test string(@expression(model, x - 3 * sin(x))) == "x - (3.0 * sin(x))" + @test string(@expression(model, 2 * x - 3 * sin(x))) == + "(2 x) - (3.0 * sin(x))" + @test string(@expression(model, x^2 - 3 * sin(x))) == + "($(x^2)) - (3.0 * sin(x))" + @test string(@expression(model, sin(x) - 3 * sin(x))) == + "sin(x) - (3.0 * sin(x))" + @test string(@expression(model, sin(x) - 3 * x)) == "sin(x) - (3 x)" + @test string(@expression(model, sin(x) - 3 * x * x)) == + "sin(x) - (3 $(x^2))" + return +end + +function test_extension_aff_expr_convert( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() + @variable(model, x) + _to_string(x) = string(convert(GenericNonlinearExpr{VariableRefType}, x)) + @test _to_string(AffExpr(0.0)) == "0.0" + @test _to_string(AffExpr(1.0)) == "1.0" + @test _to_string(x + 1) == "x + 1.0" + @test _to_string(2x + 1) == "(2.0 * x) + 1.0" + @test _to_string(2x) == "2.0 * x" + return +end + +function test_extension_quad_expr_convert( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() + @variable(model, x) + _to_string(x) = string(convert(GenericNonlinearExpr{VariableRefType}, x)) + @test _to_string(QuadExpr(AffExpr(0.0))) == "0.0" + @test _to_string(QuadExpr(AffExpr(1.0))) == "1.0" + @test _to_string(x^2 + 1) == "(x * x) + 1.0" + @test _to_string(2x^2 + 1) == "(2.0 * x * x) + 1.0" + @test _to_string(2x^2) == "2.0 * x * x" + @test _to_string(x^2 + x + 1) == "x + (x * x) + 1.0" + @test _to_string(2x^2 + x + 1) == "x + (2.0 * x * x) + 1.0" + @test _to_string(2x^2 + x) == "x + (2.0 * x * x)" + @test _to_string(x^2 + 2x + 1) == "(2.0 * x) + (x * x) + 1.0" + @test _to_string(2x^2 + 2x + 1) == "(2.0 * x) + (2.0 * x * x) + 1.0" + @test _to_string(2x^2 + 2x) == "(2.0 * x) + (2.0 * x * x)" + return +end + +function test_extension_constraint_name( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() + @variable(model, x) + @constraint(model, c, sin(x) <= 1) + @test name(c) == "c" + set_name(c, "d") + @test name(c) == "d" + @test startswith(string(c), "d : ") + return +end + +function test_extension_constraint_lessthan( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() + @variable(model, x) + @constraint(model, c, 2.0 * sin(x)^2 + cos(x) / x <= 1) + obj = constraint_object(c) + @test isequal_canonical(obj.func, 2.0 * sin(x)^2 + cos(x) / x - 1) + @test obj.set == MOI.LessThan(zero(value_type(ModelType))) + return +end + +function test_extension_constraint_greaterthan( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() + @variable(model, x) + @constraint(model, c, 2.0 * sin(x)^2 + cos(x) / x >= 1) + obj = constraint_object(c) + @test isequal_canonical(obj.func, 2.0 * sin(x)^2 + cos(x) / x - 1) + @test obj.set == MOI.GreaterThan(zero(value_type(ModelType))) + return +end + +function test_extension_constraint_equalto( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() + @variable(model, x) + @constraint(model, c, 2.0 * sin(x)^2 + cos(x) / x == 1) + obj = constraint_object(c) + @test isequal_canonical(obj.func, 2.0 * sin(x)^2 + cos(x) / x - 1) + @test obj.set == MOI.EqualTo(zero(value_type(ModelType))) + return +end + +function test_extension_constraint_interval( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() + @variable(model, x) + @constraint(model, c, 0 <= 2.0 * sin(x)^2 + cos(x) / x <= 1) + obj = constraint_object(c) + @test isequal_canonical(obj.func, 2.0 * sin(x)^2 + cos(x) / x) + T = value_type(ModelType) + @test obj.set == MOI.Interval(zero(T), one(T)) + return +end + +function test_user_defined_function_overload() + model = Model() + @variable(model, x) + f(x::Real) = x^2 + f(x::AbstractJuMPScalar) = NonlinearExpr(:f, x) + register(model, :f, 1, f; autodiff = true) + @test string(@expression(model, f(x))) == "f(x)" + @test string(f(x) + f(x)) == "f(x) + f(x)" + @test string(1 / (f(x) + f(x))) == "1.0 / (f(x) + f(x))" + return +end + +function test_extension_nonlinear_matrix_algebra( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() + @variable(model, X[1:3, 1:3], Symmetric) + @objective(model, Max, sum(X^4 .- X^3)) + @test objective_function(model) isa GenericNonlinearExpr{VariableRefType} + return +end + +""" +This test checks that we can work with expressions of arbitrary depth. Don't use +recursion! +""" +function test_extension_recursion_stackoverflow( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() + @variable(model, x) + expr = sin(x) + for _ in 1:20_000 + expr = sin(expr) + end + @test @objective(model, Min, expr) isa GenericNonlinearExpr{VariableRefType} + @test string(expr) isa String + return +end + +function test_nlparameter_interaction() + model = Model() + @variable(model, x) + @NLparameter(model, p == 1) + e = x + p + @test e isa GenericNonlinearExpr + @test string(e) == "x + ($p)" + return +end + +function test_nlexpression_interaction() + model = Model() + @variable(model, x) + @NLexpression(model, expr, sin(x)) + e = x + expr + @test e isa GenericNonlinearExpr + @test string(e) == "x + ($expr)" + return +end + +function test_nlobjective_with_nlexpr() + model = Model() + @variable(model, x) + y = sin(x) + @NLobjective(model, Min, y^2) + nlp = nonlinear_model(model) + @test isequal_canonical(jump_function(model, nlp.objective), sin(x)^2) + return +end + +function test_nlconstraint_with_nlexpr() + model = Model() + @variable(model, x) + y = sin(x) + @NLconstraint(model, c, y^2 <= 1) + nlp = nonlinear_model(model) + @test isequal_canonical( + jump_function(model, nlp.constraints[index(c)].expression), + sin(x)^2 - 1, + ) + return +end + +function test_jump_function_nonlinearexpr() + model = Model() + @variable(model, x) + @NLparameter(model, p == 1) + @NLexpression(model, expr1, sin(p + x)) + @NLexpression(model, expr2, sin(expr1)) + nlp = nonlinear_model(model) + @test string(jump_function(model, nlp[index(expr1)])) == "sin(($p) + $x)" + @test string(jump_function(model, nlp[index(expr2)])) == "sin($expr1)" + return +end + +function test_constraint_object() + model = Model() + @variable(model, x) + y = sin(x) + @NLconstraint(model, c, y^2 <= 1) + con = constraint_object(c) + @test isequal_canonical(con.func, sin(x)^2 - 1.0) + @test con.set == MOI.LessThan(0.0) + return +end + +function test_extension_expr_mle( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() + data = [1.0, 2.0, 4.0, 8.0] + n = length(data) + @variable(model, x) + @variable(model, y) + obj = @expression( + model, + n / 2 * log(1 / (2 * y^2)) - + sum((data[i] - x)^2 for i in 1:n) / (2 * y^2) + ) + @test string(obj) == + "(2.0 * log(1.0 / (2 $(y^2)))) - ((4 $(x^2) - 30 x + 85) / (2 $(y^2)))" + return +end + +function test_extension_nl_macro( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() + @variable(model, x) + @test isequal_canonical( + @expression(model, ifelse(x, 1, 2)), + GenericNonlinearExpr(:ifelse, Any[x, 1, 2]), + ) + @test isequal_canonical( + @expression(model, x || 1), + GenericNonlinearExpr(:||, Any[x, 1]), + ) + @test isequal_canonical( + @expression(model, x && 1), + GenericNonlinearExpr(:&&, Any[x, 1]), + ) + @test isequal_canonical( + @expression(model, x < 0), + GenericNonlinearExpr(:<, Any[x, 0]), + ) + @test isequal_canonical( + @expression(model, x > 0), + GenericNonlinearExpr(:>, Any[x, 0]), + ) + @test isequal_canonical( + @expression(model, x <= 0), + GenericNonlinearExpr(:<=, Any[x, 0]), + ) + @test isequal_canonical( + @expression(model, x >= 0), + GenericNonlinearExpr(:>=, Any[x, 0]), + ) + @test isequal_canonical( + @expression(model, x == 0), + GenericNonlinearExpr(:(==), Any[x, 0]), + ) + @test isequal_canonical( + @expression(model, 0 < x <= 1), + GenericNonlinearExpr( + :&&, + Any[@expression(model, 0 < x), @expression(model, x <= 1)], + ), + ) + @test isequal_canonical( + @expression(model, ifelse(x > 0, x^2, sin(x))), + GenericNonlinearExpr( + :ifelse, + Any[@expression(model, x > 0), x^2, sin(x)], + ), + ) + return +end + +function test_register_univariate() + model = Model() + @variable(model, x) + @operator(model, f, 1, x -> x^2) + @test f isa NonlinearOperator + @test sprint(show, f) == "NonlinearOperator(:f, $(f.func))" + @test isequal_canonical(@expression(model, f(x)), f(x)) + @test isequal_canonical(f(x), GenericNonlinearExpr(:f, Any[x])) + attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) + @test MOI.UserDefinedFunction(:f, 1) in attrs + return +end + +function test_register_eval_non_jump() + model = Model() + @variable(model, x) + @operator(model, f, 1, x -> x^2) + @test f(2.0) == 4.0 + @operator(model, g, 2, (x, y) -> x^2 - sin(y)) + @test g(2.0, 3.0) == 4.0 - sin(3.0) + return +end + +function test_register_univariate_gradient() + model = Model() + @variable(model, x) + @operator(model, f, 1, x -> x^2, x -> 2 * x) + @test isequal_canonical(@expression(model, f(x)), f(x)) + @test isequal_canonical(f(x), GenericNonlinearExpr(:f, Any[x])) + attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) + @test MOI.UserDefinedFunction(:f, 1) in attrs + return +end + +function test_register_univariate_gradient_hessian() + model = Model() + @variable(model, x) + @operator(model, f, 1, x -> x^2, x -> 2 * x, x -> 2.0) + @test isequal_canonical(@expression(model, f(x)), f(x)) + @test isequal_canonical(f(x), GenericNonlinearExpr(:f, Any[x])) + attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) + @test MOI.UserDefinedFunction(:f, 1) in attrs + return +end + +function test_register_multivariate() + model = Model() + @variable(model, x[1:2]) + f = (x...) -> sum(x .^ 2) + @operator(model, foo, 2, f) + @test isequal_canonical(@expression(model, foo(x...)), foo(x...)) + @test isequal_canonical(foo(x...), GenericNonlinearExpr(:foo, Any[x...])) + attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) + @test MOI.UserDefinedFunction(:foo, 2) in attrs + return +end + +function test_register_multivariate_gradient() + model = Model() + @variable(model, x[1:2]) + f = (x...) -> sum(x .^ 2) + ∇f = (g, x...) -> (g .= 2 .* x) + @operator(model, foo, 2, f, ∇f) + @test isequal_canonical(@expression(model, foo(x...)), foo(x...)) + @test isequal_canonical(foo(x...), GenericNonlinearExpr(:foo, Any[x...])) + attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) + @test MOI.UserDefinedFunction(:foo, 2) in attrs + return +end + +function test_register_multivariate_gradient_hessian() + model = Model() + @variable(model, x[1:2]) + f = (x...) -> sum(x .^ 2) + ∇f = (g, x...) -> (g .= 2 .* x) + ∇²f = (H, x...) -> begin + for i in 1:2 + H[i, i] = 2.0 + end + end + @operator(model, foo, 2, f, ∇f, ∇²f) + @test isequal_canonical(@expression(model, foo(x...)), foo(x...)) + @test isequal_canonical(foo(x...), GenericNonlinearExpr(:foo, Any[x...])) + attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) + @test MOI.UserDefinedFunction(:foo, 2) in attrs + return +end + +function test_register_multivariate_many_args() + model = Model() + @variable(model, x[1:10]) + f = (x...) -> sum(x .^ 2) + @operator(model, foo, 10, f) + @test isequal_canonical(foo(x...), GenericNonlinearExpr(:foo, Any[x...])) + @test foo((1:10)...) == 385 + return +end + +function test_register_errors() + model = Model() + f = x -> x^2 + @test_throws( + ErrorException( + "Unable to add operator foo: invalid number of " * + "functions provided. Got 4, but expected 1 (if function only), " * + "2 (if function and gradient), or 3 (if function, gradient, and " * + "hesssian provided)", + ), + @operator(model, foo, 2, f, f, f, f), + ) + return +end + +function test_expression_no_variable() + head, args = :sin, Any[1] + @test_throws( + ErrorException( + "Unable to create a nonlinear expression because it did not " * + "contain any JuMP scalars. head = $head, args = $args.", + ), + GenericNonlinearExpr(head, args), + ) + return +end + +function test_value_expression() + model = Model() + @variable(model, x) + f = x -> 1.1 + @test value(f, sin(x)) ≈ sin(1.1) + @test value(f, sin(x) + cos(x)) ≈ sin(1.1) + cos(1.1) + @test value(f, x^1.3 / x) ≈ 1.1^1.3 / 1.1 + @test value(f, @expression(model, ifelse(x > 1, 1, 2))) ≈ 1 + @test value(f, @expression(model, ifelse(x < 1, 1, 2))) ≈ 2 + @test value(f, @expression(model, ifelse(x < 1 || x > 2, 1, 2))) ≈ 2 + @test value(f, @expression(model, ifelse(x < 1 && x > 2, 1, 2))) ≈ 2 + @test value(f, sin(x + 1)) ≈ sin(1.1 + 1) + @test value(f, sin(x^2 + x + 1)) ≈ sin(1.1^2 + 1.1 + 1) + foo(x) = (x - 1)^2 + bar(x, y) = sqrt(x - y) + @operator(model, my_foo, 1, foo) + @operator(model, my_bar, 2, bar) + @test value(f, my_foo(x)) ≈ (1.1 - 1)^2 + @test value(f, my_foo(x + 1)) ≈ (1.1 + 1 - 1)^2 + @test value(f, my_foo(x^2 + 1)) ≈ (1.1^2 + 1 - 1)^2 + @test value(f, my_foo(x^2 + x + 1)) ≈ (1.1^2 + 1.1 + 1 - 1)^2 + y = QuadExpr(x + 1) + @test value(f, my_foo(y)) ≈ (value(f, y) - 1)^2 + @test value(f, my_bar(2.2, x)) ≈ sqrt(2.2 - 1.1) + bad_udf = NonlinearOperator(:bad_udf, f) + @test_throws( + ErrorException( + "Unable to evaluate nonlinear operator bad_udf because it was " * + "not added as an operator.", + ), + value(f, bad_udf(x)), + ) + return +end + +function test_value_result() + model = Model() do + return MOI.Utilities.MockOptimizer(MOI.Utilities.Model{Float64}()) + end + @variable(model, x) + optimize!(model) + mock = unsafe_backend(model) + MOI.set(mock, MOI.TerminationStatus(), MOI.OPTIMAL) + MOI.set(mock, MOI.ResultCount(), 2) + MOI.set(mock, MOI.PrimalStatus(1), MOI.FEASIBLE_POINT) + MOI.set(mock, MOI.PrimalStatus(2), MOI.FEASIBLE_POINT) + MOI.set(mock, MOI.VariablePrimal(1), optimizer_index(x), 1.1) + MOI.set(mock, MOI.VariablePrimal(2), optimizer_index(x), 2.2) + f = sin(x) + @test value(f; result = 1) ≈ sin(1.1) + @test value(f; result = 2) ≈ sin(2.2) + return +end + +function test_nonlinear_expr_owner_model() + model = Model() + @variable(model, x) + f = GenericNonlinearExpr(:sin, Any[x]) + # This shouldn't happen in regular code, but let's test against it to check + # we get something similar to AffExpr and QuadExpr. + empty!(f.args) + @test owner_model(f) === nothing + return +end + +function test_operate_shortcut_ma_operate!!_add_mul() + model = Model() + @variable(model, x) + @test isequal_canonical( + @expression(model, sum(sin(x) for i in 1:3)), + NonlinearExpr(:+, Any[sin(x), sin(x), sin(x)]), + ) + @test isequal_canonical(JuMP._MA.add_mul(sin(x), 2, x), sin(x) + 2x) + @test isequal_canonical(JuMP._MA.add_mul(sin(x), 2, x, 2), sin(x) + 4x) + @test isequal_canonical(JuMP._MA.sub_mul(sin(x), 2, x), sin(x) - 2x) + @test isequal_canonical(JuMP._MA.sub_mul(sin(x), 2, x, 2), sin(x) - 4x) + return +end + +function test_show_nonlinear_model() + model = Model() + @variable(model, x >= -1) + @objective(model, Min, exp(x)) + @constraint(model, sin(x) <= 0) + str = sprint(show, model) + @test occursin("NonlinearExpr", str) + return +end + +function test_error_both_nl_interfaces_constraint() + model = Model() + @variable(model, x) + @constraint(model, log(x) <= 1) + @NLconstraint(model, log(x) <= 1) + @test_throws( + ErrorException( + "Cannot optimize a model which contains the features from " * + "both the legacy (macros beginning with `@NL`) and new " * + "(`NonlinearExpr`) nonlinear interfaces. You must use one or " * + "the other.", + ), + optimize!(model), + ) + return +end + +function test_error_both_nl_interfaces_objective() + model = Model() + @variable(model, x) + @objective(model, Max, log(x)) + @NLconstraint(model, log(x) <= 1) + @test_throws( + ErrorException( + "Cannot optimize a model which contains the features from " * + "both the legacy (macros beginning with `@NL`) and new " * + "(`NonlinearExpr`) nonlinear interfaces. You must use one or " * + "the other.", + ), + optimize!(model), + ) + return +end + +function test_VectorNonlinearFunction_moi_function() + model = Model() + @variable(model, x) + F = [sin(x)] + @test moi_function_type(typeof(F)) == MOI.VectorNonlinearFunction + @test isapprox( + moi_function(F), + MOI.VectorNonlinearFunction([ + MOI.ScalarNonlinearFunction(:sin, Any[index(x)]), + ]), + ) + @test MOI.VectorNonlinearFunction(F) ≈ moi_function(F) + @test jump_function_type(model, MOI.VectorNonlinearFunction) == + Vector{NonlinearExpr} + @test isequal_canonical(jump_function(model, moi_function(F)), F) + return +end + +function test_VectorNonlinearFunction_moi_function_AbstractJuMPScalar() + model = Model() + @variable(model, x) + F = [sin(x), x] + @test F isa Vector{AbstractJuMPScalar} + @test moi_function_type(typeof(F)) == MOI.VectorNonlinearFunction + @test isapprox( + moi_function(F), + MOI.VectorNonlinearFunction([ + MOI.ScalarNonlinearFunction(:sin, Any[index(x)]), + MOI.ScalarNonlinearFunction(:+, Any[index(x)]), + ]), + ) + @test MOI.VectorNonlinearFunction(F) ≈ moi_function(F) + @test jump_function_type(model, MOI.VectorNonlinearFunction) == + Vector{NonlinearExpr} + @test isequal_canonical( + jump_function(model, moi_function(F)), + [sin(x), NonlinearExpr(:+, x)], + ) + return +end + +function test_VectorNonlinearFunction_objective() + model = Model() + @variable(model, x) + F = [sin(x), sqrt(x)] + @objective(model, Min, F) + @test objective_function_type(model) == Vector{NonlinearExpr} + @test isequal_canonical(objective_function(model), F) + return +end + +function test_operator_overload_complex_error() + model = Model() + @variable(model, x) + f = (1 + 2im) * x + @test_throws( + ErrorException( + "Cannot build `GenericNonlinearExpr` because a term is complex-" * + "valued: `($f)::$(typeof(f))`", + ), + sin(f), + ) + @test_throws( + ErrorException( + "Cannot build `GenericNonlinearExpr` because a term is complex-" * + "valued: `($(1 + 2im))::$(typeof(1 + 2im))`", + ), + +(sin(x), 1 + 2im), + ) + @test_throws( + ErrorException( + "Cannot build `GenericNonlinearExpr` because a term is complex-" * + "valued: `($(1 + 2im))::$(typeof(1 + 2im))`", + ), + +(1 + 2im, sin(x)), + ) + @test_throws( + ErrorException( + "Cannot build `GenericNonlinearExpr` because a term is complex-" * + "valued: `($f)::$(typeof(f))`", + ), + +(f, sin(x)), + ) + @test_throws( + ErrorException( + "Cannot build `GenericNonlinearExpr` because a term is complex-" * + "valued: `($f)::$(typeof(f))`", + ), + +(sin(x), f), + ) + return +end + +function test_redefinition_of_function() + model = Model() + f(x) = x^2 + err = try + JuMP._catch_redefinition_constant_error(:f, f) + catch err + err + end + @test_throws(err, @operator(model, f, 1, f)) + return +end + +end # module diff --git a/test/test_operator.jl b/test/test_operator.jl index 075d8a5d8e8..3903c3818e0 100644 --- a/test/test_operator.jl +++ b/test/test_operator.jl @@ -106,10 +106,11 @@ function test_extension_broadcast_division_error( copy(B.rowval), vec(x), ) - @test_throws ErrorException A ./ x - @test_throws ErrorException B ./ x - @test_throws ErrorException A ./ y - @test_throws ErrorException B ./ y + NonlinearExprType = GenericNonlinearExpr{VariableRefType} + @test A ./ x isa Matrix{NonlinearExprType} + @test B ./ x isa SparseArrays.SparseMatrixCSC{NonlinearExprType,Int} + @test A ./ y isa SparseArrays.SparseMatrixCSC{NonlinearExprType,Int} + @test B ./ y isa SparseArrays.SparseMatrixCSC{NonlinearExprType,Int} # TODO: Refactor to avoid calling the internal JuMP function # `_densify_with_jump_eltype`. #z = _densify_with_jump_eltype((2 .* y) ./ 3) @@ -335,17 +336,17 @@ function test_extension_basic_operators_number( @test_expression_with_string 4.13 + w "w + 4.13" @test_expression_with_string 3.16 - w "-w + 3.16" @test_expression_with_string 5.23 * w "5.23 w" - @test_throws ErrorException 2.94 / w + @test_expression_with_string 2.94 / w "2.94 / w" # 1-3 Number--AffExpr @test_expression_with_string 1.5 + aff "7.1 x + 4" @test_expression_with_string 1.5 - aff "-7.1 x - 1" @test_expression_with_string 2 * aff "14.2 x + 5" - @test_throws ErrorException 2 / aff + @test_expression_with_string 2 / aff "2.0 / (7.1 x + 2.5)" # 1-4 Number--QuadExpr @test_expression_with_string 1.5 + q "2.5 y*z + 7.1 x + 4" @test_expression_with_string 1.5 - q "-2.5 y*z - 7.1 x - 1" @test_expression_with_string 2 * q "5 y*z + 14.2 x + 5" - @test_throws ErrorException 2 / q + @test_expression_with_string 2 / q "2.0 / (2.5 y*z + 7.1 x + 2.5)" return end @@ -373,30 +374,30 @@ function test_extension_basic_operators_variable( @test_expression_with_string w / T(2) "0.5 w" @test w == w @test_expression_with_string x * y - 1 "x*y - 1" - @test_expression_with_string x^2 "x²" - @test_expression_with_string x^1 "x" - @test_expression_with_string x^0 "1" - @test_throws ErrorException x^3 - @test_throws ErrorException x^(T(15) / T(10)) + @test_expression_with_string(x^2, "x²", interrable = false) + @test_expression_with_string(x^1, "x", interrable = false) + @test_expression_with_string(x^0, "1", interrable = false) + @test_expression_with_string(x^3, "x ^ 3", interrable = false) + @test_expression_with_string x^(T(15) / T(10)) "x ^ 1.5" # 2-2 Variable--Variable @test_expression_with_string w + x "w + x" @test_expression_with_string w - x "w - x" @test_expression_with_string w * x "w*x" @test_expression_with_string x - x "0" - @test_throws ErrorException w / x + @test_expression_with_string w / x "w / x" @test_expression_with_string y * z - x "y*z - x" # 2-3 Variable--AffExpr @test_expression_with_string z + aff "z + 7.1 x + 2.5" @test_expression_with_string z - aff "z - 7.1 x - 2.5" @test_expression_with_string z * aff "7.1 z*x + 2.5 z" - @test_throws ErrorException z / aff + @test_expression_with_string z / aff "z / (7.1 x + 2.5)" @test_throws MethodError z ≤ aff @test_expression_with_string β * x - aff "0 x - 2.5" # 2-4 Variable--QuadExpr @test_expression_with_string w + q "2.5 y*z + w + 7.1 x + 2.5" @test_expression_with_string w - q "-2.5 y*z + w - 7.1 x - 2.5" - @test_throws ErrorException w * q - @test_throws ErrorException w / q + @test_expression_with_string w * q "w * (2.5 y*z + 7.1 x + 2.5)" + @test_expression_with_string w / q "w / (2.5 y*z + 7.1 x + 2.5)" @test transpose(x) === x @test conj(x) === x return @@ -427,34 +428,50 @@ function test_extension_basic_operators_affexpr( @test aff == aff @test_throws MethodError aff ≥ 1 @test_expression_with_string aff - 1 "7.1 x + 1.5" - @test_expression_with_string aff^2 "50.41 x² + 35.5 x + 6.25" - @test_expression_with_string (7.1 * x + 2.5)^2 "50.41 x² + 35.5 x + 6.25" - @test_expression_with_string aff^1 "7.1 x + 2.5" - @test_expression_with_string (7.1 * x + 2.5)^1 "7.1 x + 2.5" - @test_expression_with_string aff^0 "1" - @test_expression_with_string (7.1 * x + 2.5)^0 "1" - @test_throws ErrorException aff^3 - @test_throws ErrorException (7.1 * x + 2.5)^3 - @test_throws ErrorException aff^1.5 - @test_throws ErrorException (7.1 * x + 2.5)^1.5 + @test_expression_with_string( + aff^2, + "50.41 x² + 35.5 x + 6.25", + inferrable = false + ) + @test_expression_with_string( + (7.1 * x + 2.5)^2, + "50.41 x² + 35.5 x + 6.25", + inferrable = false + ) + @test_expression_with_string(aff^1, "7.1 x + 2.5", inferrable = false) + @test_expression_with_string( + (7.1 * x + 2.5)^1, + "7.1 x + 2.5", + inferrable = false + ) + @test_expression_with_string(aff^0, "1", inferrable = false) + @test_expression_with_string((7.1 * x + 2.5)^0, "1", inferrable = false) + @test_expression_with_string(aff^3, "(7.1 x + 2.5) ^ 3", inferrable = false) + @test_expression_with_string( + (7.1 * x + 2.5)^3, + "(7.1 x + 2.5) ^ 3", + inferrable = false + ) + @test_expression_with_string aff^1.5 "(7.1 x + 2.5) ^ 1.5" + @test_expression_with_string (7.1 * x + 2.5)^1.5 "(7.1 x + 2.5) ^ 1.5" # 3-2 AffExpr--Variable @test_expression_with_string aff + z "7.1 x + z + 2.5" @test_expression_with_string aff - z "7.1 x - z + 2.5" @test_expression_with_string aff * z "7.1 x*z + 2.5 z" - @test_throws ErrorException aff / z + @test_expression_with_string aff / z "(7.1 x + 2.5) / z" @test_expression_with_string aff - 7.1 * x "0 x + 2.5" # 3-3 AffExpr--AffExpr @test_expression_with_string aff + aff2 "7.1 x + 1.2 y + 3.7" @test_expression_with_string aff - aff2 "7.1 x - 1.2 y + 1.3" @test_expression_with_string aff * aff2 "8.52 x*y + 3 y + 8.52 x + 3" @test string((x + x) * (x + 3)) == string((x + 3) * (x + x)) # Issue #288 - @test_throws ErrorException aff / aff2 + @test_expression_with_string aff / aff2 "(7.1 x + 2.5) / (1.2 y + 1.2)" @test_expression_with_string aff - aff "0 x" # 4-4 AffExpr--QuadExpr @test_expression_with_string aff2 + q "2.5 y*z + 1.2 y + 7.1 x + 3.7" @test_expression_with_string aff2 - q "-2.5 y*z + 1.2 y - 7.1 x - 1.3" - @test_throws ErrorException aff2 * q - @test_throws ErrorException aff2 / q + @test_expression_with_string aff2 * q "(1.2 y + 1.2) * (2.5 y*z + 7.1 x + 2.5)" + @test_expression_with_string aff2 / q "(1.2 y + 1.2) / (2.5 y*z + 7.1 x + 2.5)" @test transpose(aff) === aff @test conj(aff) === aff return @@ -486,18 +503,18 @@ function test_extension_basic_operators_quadexpr( # 4-2 QuadExpr--Variable @test_expression_with_string q + w "2.5 y*z + 7.1 x + w + 2.5" @test_expression_with_string q - w "2.5 y*z + 7.1 x - w + 2.5" - @test_throws ErrorException q * w - @test_throws ErrorException q / w + @test_expression_with_string q * w "(2.5 y*z + 7.1 x + 2.5) * w" + @test_expression_with_string q / w "(2.5 y*z + 7.1 x + 2.5) / w" # 4-3 QuadExpr--AffExpr @test_expression_with_string q + aff2 "2.5 y*z + 7.1 x + 1.2 y + 3.7" @test_expression_with_string q - aff2 "2.5 y*z + 7.1 x - 1.2 y + 1.3" - @test_throws ErrorException q * aff2 - @test_throws ErrorException q / aff2 + @test_expression_with_string q * aff2 "(2.5 y*z + 7.1 x + 2.5) * (1.2 y + 1.2)" + @test_expression_with_string q / aff2 "(2.5 y*z + 7.1 x + 2.5) / (1.2 y + 1.2)" # 4-4 QuadExpr--QuadExpr @test_expression_with_string q + q2 "2.5 y*z + 8 x*z + 7.1 x + 1.2 y + 3.7" @test_expression_with_string q - q2 "2.5 y*z - 8 x*z + 7.1 x - 1.2 y + 1.3" - @test_throws ErrorException q * q2 - @test_throws ErrorException q / q2 + @test_expression_with_string q * q2 "(2.5 y*z + 7.1 x + 2.5) * (8 x*z + 1.2 y + 1.2)" + @test_expression_with_string q / q2 "(2.5 y*z + 7.1 x + 2.5) / (8 x*z + 1.2 y + 1.2)" @test transpose(q) === q @test conj(q) === q return @@ -604,7 +621,7 @@ function test_complex_pow() @test y^0 == (1.0 + 0im) @test y^1 == 0 * y * y + y @test y^2 == y * y - @test_throws ErrorException y^3 + @test isequal_canonical(y^3, GenericNonlinearExpr(:^, Any[y, 3])) return end diff --git a/test/test_print.jl b/test/test_print.jl index 790db8a1c40..649c268fc03 100644 --- a/test/test_print.jl +++ b/test/test_print.jl @@ -180,7 +180,7 @@ function test_printing_expressions() "x_{1}\\times y_{2,2} + x_{2}\\times y_{2,2} + z$ijulia_sq + 3 x_{1} + 3 x_{2} - 1", ) - ex = @expression(mod, -z * x[1] - x[1] * z + x[1] * x[2] + 0 * z^2) + ex = @expression(mod, -z * x[1] - z * x[1] + x[1] * x[2] + 0 * z^2) io_test(MIME("text/plain"), ex, "-2 z*x[1] + x[1]*x[2]") io_test(MIME("text/latex"), ex, "-2 z\\times x_{1} + x_{1}\\times x_{2}") diff --git a/test/test_variable.jl b/test/test_variable.jl index 6cd56ea84c1..dd6916104ad 100644 --- a/test/test_variable.jl +++ b/test/test_variable.jl @@ -1118,23 +1118,14 @@ end function test_error_messages() model = Model() @variable(model, x) - err = try - x >= 1 - catch err - err - end - function f(s) - return ErrorException( - replace(replace(err.msg, ">= 1" => "$(s) 1"), "`>=`" => "`$(s)`"), - ) - end - @test_throws err 1 >= x - @test_throws f("<=") x <= 1 - @test_throws f("<=") 1 <= x - @test_throws f(">") x > 1 - @test_throws f(">") 1 > x - @test_throws f("<") x < 1 - @test_throws f("<") 1 < x + @test_throws JuMP._logic_error_exception(:>=) x >= 1 + @test_throws JuMP._logic_error_exception(:>=) 1 >= x + @test_throws JuMP._logic_error_exception(:<=) x <= 1 + @test_throws JuMP._logic_error_exception(:<=) 1 <= x + @test_throws JuMP._logic_error_exception(:>) x > 1 + @test_throws JuMP._logic_error_exception(:>) 1 > x + @test_throws JuMP._logic_error_exception(:<) x < 1 + @test_throws JuMP._logic_error_exception(:<) 1 < x return end diff --git a/test/utilities.jl b/test/utilities.jl index b4a59992515..2db40a48acc 100644 --- a/test/utilities.jl +++ b/test/utilities.jl @@ -17,14 +17,17 @@ macro test_expression(expr) end) end -macro test_expression_with_string(expr, str) - return esc( - quote - realized_expr = @inferred $expr - @test string(realized_expr) == $str - @test isequal_canonical(@expression(model, $expr), realized_expr) - end, - ) +macro test_expression_with_string(expr, str, inferrable = true) + code = quote + realized_expr = if $inferrable + @inferred $expr + else + $expr + end + @test string(realized_expr) == $str + @test isequal_canonical(@expression(model, $expr), realized_expr) + end + return esc(code) end function _strip_line_from_error(err::ErrorException)