From 938069ac267274353c4dda971c68c44c386c9652 Mon Sep 17 00:00:00 2001 From: odow Date: Tue, 28 Feb 2023 09:56:50 +1300 Subject: [PATCH 1/8] Add quantum state discrimination tutorial --- docs/make.jl | 1 + .../tutorials/conic/quantum_discrimination.jl | 60 +++++++++++++++++++ 2 files changed, 61 insertions(+) create mode 100644 docs/src/tutorials/conic/quantum_discrimination.jl diff --git a/docs/make.jl b/docs/make.jl index 4b54b81ba14..c00bb446372 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -167,6 +167,7 @@ const _PAGES = [ "tutorials/conic/experiment_design.md", "tutorials/conic/min_ellipse.md", "tutorials/conic/ellipse_approx.md", + "tutorials/conic/quantum_discrimination.md", ], "Algorithms" => [ "tutorials/algorithms/benders_decomposition.md", diff --git a/docs/src/tutorials/conic/quantum_discrimination.jl b/docs/src/tutorials/conic/quantum_discrimination.jl new file mode 100644 index 00000000000..4332e1812c8 --- /dev/null +++ b/docs/src/tutorials/conic/quantum_discrimination.jl @@ -0,0 +1,60 @@ +# Copyright 2017, Iain Dunning, Joey Huchette, Miles Lubin, and contributors #src +# This Source Code Form is subject to the terms of the Mozilla Public License #src +# v.2.0. If a copy of the MPL was not distributed with this file, You can #src +# obtain one at https://mozilla.org/MPL/2.0/. #src + +# # Quantum state discrimination + +# This tutorial solves the problem of [quantum state discrimination](https://en.wikipedia.org/wiki/Quantum_state_discrimination).# +# The purpose is to demonstrate how you can solve problems involving +# complex-valued decision variables and the [`HermitianPSDCone`(@ref). See +# [Complex number support](@ref) for more details. + +# ## Required packages + +# This tutorial makes use of the following packages: + +using JuMP +import LinearAlgebra +import SCS + +# ## Data + +function random_state(d) + x = randn(ComplexF64, (d, d)) + y = x * x' + return LinearAlgebra.Hermitian(round.(y / LinearAlgebra.tr(y); digits = 3)) +end + +N, d = 2, 2 + +states = [random_state(d) for i in 1:N] + +# ## JuMP formulation + +model = Model(SCS.Optimizer) +set_silent(model) +E = [@variable(model, [1:d, 1:d] in HermitianPSDCone()) for i in 1:N] +@constraint(model, sum(E) .== LinearAlgebra.I) +@objective(model, Max, real(LinearAlgebra.dot(states, E)) / N) +optimize!(model) +objective_value(model) +solution = [value.(e) for e in E] + +# ## Alternative formulation + +# The formulation above includes `n` Hermitian matrices, and a set of linear +# equality constraints. We can simplify the problem by replacing `E[n]` with +# ``I - \sum E_i``, where ``I`` is the identity matrix. This results in: + +model = Model(SCS.Optimizer) +set_silent(model) +E = [@variable(model, [1:d, 1:d] in HermitianPSDCone()) for i in 1:N-1] +E_n = LinearAlgebra.Hermitian(LinearAlgebra.I - sum(E)) +@constraint(model, E_n in HermitianPSDCone()) +push!(E, E_n) +@objective(model, Max, real(LinearAlgebra.dot(states, E)) / n) +optimize!(model) +objective_value(model) + + From 1a33f15546102b410594834b0887946df8fc4b71 Mon Sep 17 00:00:00 2001 From: odow Date: Tue, 28 Feb 2023 10:10:09 +1300 Subject: [PATCH 2/8] Update --- docs/src/tutorials/conic/quantum_discrimination.jl | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/src/tutorials/conic/quantum_discrimination.jl b/docs/src/tutorials/conic/quantum_discrimination.jl index 4332e1812c8..cb5bce16e39 100644 --- a/docs/src/tutorials/conic/quantum_discrimination.jl +++ b/docs/src/tutorials/conic/quantum_discrimination.jl @@ -53,8 +53,6 @@ E = [@variable(model, [1:d, 1:d] in HermitianPSDCone()) for i in 1:N-1] E_n = LinearAlgebra.Hermitian(LinearAlgebra.I - sum(E)) @constraint(model, E_n in HermitianPSDCone()) push!(E, E_n) -@objective(model, Max, real(LinearAlgebra.dot(states, E)) / n) +@objective(model, Max, real(LinearAlgebra.dot(states, E)) / N) optimize!(model) objective_value(model) - - From 37a94135380d68dad51316514d6f2850ef13654d Mon Sep 17 00:00:00 2001 From: odow Date: Tue, 28 Feb 2023 11:48:57 +1300 Subject: [PATCH 3/8] Update explanation --- .../tutorials/conic/quantum_discrimination.jl | 88 ++++++++++++++++--- 1 file changed, 78 insertions(+), 10 deletions(-) diff --git a/docs/src/tutorials/conic/quantum_discrimination.jl b/docs/src/tutorials/conic/quantum_discrimination.jl index cb5bce16e39..466cb0a1901 100644 --- a/docs/src/tutorials/conic/quantum_discrimination.jl +++ b/docs/src/tutorials/conic/quantum_discrimination.jl @@ -5,8 +5,9 @@ # # Quantum state discrimination -# This tutorial solves the problem of [quantum state discrimination](https://en.wikipedia.org/wiki/Quantum_state_discrimination).# -# The purpose is to demonstrate how you can solve problems involving +# This tutorial solves the problem of [quantum state discrimination](https://en.wikipedia.org/wiki/Quantum_state_discrimination). + +# The purpose of this tutorial to demonstrate how to solve problems involving # complex-valued decision variables and the [`HermitianPSDCone`(@ref). See # [Complex number support](@ref) for more details. @@ -18,27 +19,88 @@ using JuMP import LinearAlgebra import SCS +# ## formulation + +# A `d`-dimensional quantum state, ``\rho``, can be defined by a complex-valued +# Hermitian matrix with a trace of `1`. Assume we have `N` `d`-dimensional +# quantum states, ``\{\rho_i}_{i=1}^n``, each of which is equally likely. + +# The goal of the Quantum state discrimination problem is to choose a set of +# positive-operator-valued-measures (POVMs), ``E_i`` such that if we observe +# ``E_i`` then the most probable state that we are in is ``\rho_i``. + +# Each POVM ``E_i`` is a complex-valued Hermitian matrix, and there is a +# requirement that ``\sum\limits E_i = \mathbf{I}``. + +# To choose the set of POVMs, we want to maximize the probability that we guess +# the quantum state corrrectly. This can be formulated as the following +# optimization problem: + +# ```math +# \begin{aligned} +# \max\limits_{E} \;\; & \\mathbb{E}_i[ tr(\rho_i \times E_i)] \\ +# \text{s.t.} \;\; & \sum\limits_i E_i = \mathbf{I} \\ +# & E_i \succeq 0 \forall i. +# ``` + # ## Data +# To setup our problem, we need `N` `d-`dimensional quantum states. To keep the +# problem simple, we use `N = 2` and `d = 2`. + +N, d = 2, 2 + +# We then generated `N` random `d`-dimensional quantum states: + function random_state(d) x = randn(ComplexF64, (d, d)) y = x * x' - return LinearAlgebra.Hermitian(round.(y / LinearAlgebra.tr(y); digits = 3)) + return LinearAlgebra.Hermitian(y / LinearAlgebra.tr(y)) end -N, d = 2, 2 - -states = [random_state(d) for i in 1:N] +ρ = [random_state(d) for i in 1:N] # ## JuMP formulation +# To model the problem in JuMP, we need a solver that supports positive +# semidefinite matrices: + model = Model(SCS.Optimizer) set_silent(model) + +# Then, we construct our set of `E` variables: + E = [@variable(model, [1:d, 1:d] in HermitianPSDCone()) for i in 1:N] + +# Here we have created a vector of matrices. This is different to other modeling +# languages such as YALMIP, which allow you to create a multi-dimensional array +# in which 2-dimensional slices of the array are Hermitian matrices. + +# We also need to enforce the constraint that +# ``\sum\limits_i E_i = \mathbf{I}``: + @constraint(model, sum(E) .== LinearAlgebra.I) -@objective(model, Max, real(LinearAlgebra.dot(states, E)) / N) + +# This constraint is a complex-valued equality constraint. In the solver, it +# will be decomposed onto two types of equality constraints: one to enforce +# equality of the real components, and one to enforce equality of the imaginary +# components. + +# Our objective is to maximize the expected probability of guessing correctly: + +@objective( + model, + Max, + sum(real(LinearAlgebra.tr(ρ[i] * E[i])) for i in 1:N) / N, +) + +# Now we optimize: + optimize!(model) -objective_value(model) +solution_summary(model) + +# The POVMs are: + solution = [value.(e) for e in E] # ## Alternative formulation @@ -53,6 +115,12 @@ E = [@variable(model, [1:d, 1:d] in HermitianPSDCone()) for i in 1:N-1] E_n = LinearAlgebra.Hermitian(LinearAlgebra.I - sum(E)) @constraint(model, E_n in HermitianPSDCone()) push!(E, E_n) -@objective(model, Max, real(LinearAlgebra.dot(states, E)) / N) + +# The objective can also be simplified, by observing that it is equivalent to: + +@objective(model, Max, real(LinearAlgebra.dot(ρ, E)) / N) + +# Then we can check that we get the same solution: + optimize!(model) -objective_value(model) +solution_summary(model) From 361acd10ec0e421ba98946e7009c004a8327c5ea Mon Sep 17 00:00:00 2001 From: odow Date: Tue, 28 Feb 2023 12:34:18 +1300 Subject: [PATCH 4/8] Fix typos --- docs/src/tutorials/conic/quantum_discrimination.jl | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/docs/src/tutorials/conic/quantum_discrimination.jl b/docs/src/tutorials/conic/quantum_discrimination.jl index 466cb0a1901..fe5b4c73e58 100644 --- a/docs/src/tutorials/conic/quantum_discrimination.jl +++ b/docs/src/tutorials/conic/quantum_discrimination.jl @@ -19,18 +19,18 @@ using JuMP import LinearAlgebra import SCS -# ## formulation +# ## Formulation # A `d`-dimensional quantum state, ``\rho``, can be defined by a complex-valued # Hermitian matrix with a trace of `1`. Assume we have `N` `d`-dimensional -# quantum states, ``\{\rho_i}_{i=1}^n``, each of which is equally likely. +# quantum states, ``\{\rho_i\}_{i=1}^n``, each of which is equally likely. # The goal of the Quantum state discrimination problem is to choose a set of # positive-operator-valued-measures (POVMs), ``E_i`` such that if we observe # ``E_i`` then the most probable state that we are in is ``\rho_i``. # Each POVM ``E_i`` is a complex-valued Hermitian matrix, and there is a -# requirement that ``\sum\limits E_i = \mathbf{I}``. +# requirement that ``\sum\limits_{i=1}^N E_i = \mathbf{I}``. # To choose the set of POVMs, we want to maximize the probability that we guess # the quantum state corrrectly. This can be formulated as the following @@ -38,9 +38,10 @@ import SCS # ```math # \begin{aligned} -# \max\limits_{E} \;\; & \\mathbb{E}_i[ tr(\rho_i \times E_i)] \\ -# \text{s.t.} \;\; & \sum\limits_i E_i = \mathbf{I} \\ -# & E_i \succeq 0 \forall i. +# \max\limits_{E} \;\; & \mathbb{E}_i[ tr(\rho_i \times E_i)] \\ +# \text{s.t.} \;\; & \sum\limits_{i=1}^N E_i = \mathbf{I} \\ +# & E_i \succeq 0 \forall i = 1,\ldots,N. +# \end{aligned} # ``` # ## Data From 3a4ea016d7460667a1441bb99f0de493ed7535a6 Mon Sep 17 00:00:00 2001 From: odow Date: Tue, 28 Feb 2023 13:49:44 +1300 Subject: [PATCH 5/8] Updates --- docs/src/tutorials/conic/quantum_discrimination.jl | 14 +++++++------- docs/styles/Vocab/JuMP-Vocab/accept.txt | 1 + 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/docs/src/tutorials/conic/quantum_discrimination.jl b/docs/src/tutorials/conic/quantum_discrimination.jl index fe5b4c73e58..822d05f3d1f 100644 --- a/docs/src/tutorials/conic/quantum_discrimination.jl +++ b/docs/src/tutorials/conic/quantum_discrimination.jl @@ -23,7 +23,7 @@ import SCS # A `d`-dimensional quantum state, ``\rho``, can be defined by a complex-valued # Hermitian matrix with a trace of `1`. Assume we have `N` `d`-dimensional -# quantum states, ``\{\rho_i\}_{i=1}^n``, each of which is equally likely. +# quantum states, ``\{\rho_i\}_{i=1}^N``, each of which is equally likely. # The goal of the Quantum state discrimination problem is to choose a set of # positive-operator-valued-measures (POVMs), ``E_i`` such that if we observe @@ -106,16 +106,16 @@ solution = [value.(e) for e in E] # ## Alternative formulation -# The formulation above includes `n` Hermitian matrices, and a set of linear -# equality constraints. We can simplify the problem by replacing `E[n]` with -# ``I - \sum E_i``, where ``I`` is the identity matrix. This results in: +# The formulation above includes `N` Hermitian matrices and a set of linear +# equality constraints. We can simplify the problem by replacing ``E_N`` with +# ``E_N = I - \sum\limits_{i=1}^{N-1} E_i``. This results in: model = Model(SCS.Optimizer) set_silent(model) E = [@variable(model, [1:d, 1:d] in HermitianPSDCone()) for i in 1:N-1] -E_n = LinearAlgebra.Hermitian(LinearAlgebra.I - sum(E)) -@constraint(model, E_n in HermitianPSDCone()) -push!(E, E_n) +E_N = LinearAlgebra.Hermitian(LinearAlgebra.I - sum(E)) +@constraint(model, E_N in HermitianPSDCone()) +push!(E, E_N) # The objective can also be simplified, by observing that it is equivalent to: diff --git a/docs/styles/Vocab/JuMP-Vocab/accept.txt b/docs/styles/Vocab/JuMP-Vocab/accept.txt index a2c8c434f35..f43dd5709c5 100644 --- a/docs/styles/Vocab/JuMP-Vocab/accept.txt +++ b/docs/styles/Vocab/JuMP-Vocab/accept.txt @@ -78,6 +78,7 @@ nl|NL overfitting parameteriz(ing|ation) perp +POVMs [Pp]recompil(ation|(e(?d))) [Pp]resolve Relatedly From 961840eb19e03b7d41db101cc1b38fb644c598e7 Mon Sep 17 00:00:00 2001 From: Oscar Dowson Date: Tue, 28 Feb 2023 14:19:34 +1300 Subject: [PATCH 6/8] Apply suggestions from code review Co-authored-by: James Foster <38274066+jd-foster@users.noreply.github.com> --- docs/src/tutorials/conic/quantum_discrimination.jl | 14 +++++++------- docs/styles/Vocab/JuMP-Vocab/accept.txt | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/src/tutorials/conic/quantum_discrimination.jl b/docs/src/tutorials/conic/quantum_discrimination.jl index 822d05f3d1f..d9c8ec459d6 100644 --- a/docs/src/tutorials/conic/quantum_discrimination.jl +++ b/docs/src/tutorials/conic/quantum_discrimination.jl @@ -25,22 +25,22 @@ import SCS # Hermitian matrix with a trace of `1`. Assume we have `N` `d`-dimensional # quantum states, ``\{\rho_i\}_{i=1}^N``, each of which is equally likely. -# The goal of the Quantum state discrimination problem is to choose a set of -# positive-operator-valued-measures (POVMs), ``E_i`` such that if we observe +# The goal of the quantum state discrimination problem is to choose a +# positive operator-valued measure ([POVM](https://en.wikipedia.org/wiki/POVM)), ``\{ E_i \}_{i=1}^N``, such that if we observe # ``E_i`` then the most probable state that we are in is ``\rho_i``. -# Each POVM ``E_i`` is a complex-valued Hermitian matrix, and there is a +# Each POVM element, ``E_i``, is a complex-valued Hermitian matrix, and there is a # requirement that ``\sum\limits_{i=1}^N E_i = \mathbf{I}``. -# To choose the set of POVMs, we want to maximize the probability that we guess +# To choose a POVM, we want to maximize the probability that we guess # the quantum state corrrectly. This can be formulated as the following # optimization problem: # ```math # \begin{aligned} -# \max\limits_{E} \;\; & \mathbb{E}_i[ tr(\rho_i \times E_i)] \\ +# \max\limits_{E} \;\; & \mathbb{E}_i[ \operatorname{tr}(\rho_i E_i)] \\ # \text{s.t.} \;\; & \sum\limits_{i=1}^N E_i = \mathbf{I} \\ -# & E_i \succeq 0 \forall i = 1,\ldots,N. +# & E_i \succeq 0 \; \forall i = 1,\ldots,N. # \end{aligned} # ``` @@ -100,7 +100,7 @@ E = [@variable(model, [1:d, 1:d] in HermitianPSDCone()) for i in 1:N] optimize!(model) solution_summary(model) -# The POVMs are: +# The optimal POVM is: solution = [value.(e) for e in E] diff --git a/docs/styles/Vocab/JuMP-Vocab/accept.txt b/docs/styles/Vocab/JuMP-Vocab/accept.txt index f43dd5709c5..4e2c3421c44 100644 --- a/docs/styles/Vocab/JuMP-Vocab/accept.txt +++ b/docs/styles/Vocab/JuMP-Vocab/accept.txt @@ -78,7 +78,7 @@ nl|NL overfitting parameteriz(ing|ation) perp -POVMs +POVM [Pp]recompil(ation|(e(?d))) [Pp]resolve Relatedly From 396a517f6298f7ff8a05048182375d24066e8a1d Mon Sep 17 00:00:00 2001 From: Oscar Dowson Date: Tue, 28 Feb 2023 14:58:41 +1300 Subject: [PATCH 7/8] Update docs/src/tutorials/conic/quantum_discrimination.jl --- docs/src/tutorials/conic/quantum_discrimination.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/tutorials/conic/quantum_discrimination.jl b/docs/src/tutorials/conic/quantum_discrimination.jl index d9c8ec459d6..4a55aab2a9d 100644 --- a/docs/src/tutorials/conic/quantum_discrimination.jl +++ b/docs/src/tutorials/conic/quantum_discrimination.jl @@ -8,7 +8,7 @@ # This tutorial solves the problem of [quantum state discrimination](https://en.wikipedia.org/wiki/Quantum_state_discrimination). # The purpose of this tutorial to demonstrate how to solve problems involving -# complex-valued decision variables and the [`HermitianPSDCone`(@ref). See +# complex-valued decision variables and the [`HermitianPSDCone`](@ref). See # [Complex number support](@ref) for more details. # ## Required packages From b2dd2107926d32523a26feb899729764fd6a7fa3 Mon Sep 17 00:00:00 2001 From: odow Date: Wed, 1 Mar 2023 08:25:30 +1300 Subject: [PATCH 8/8] Updates --- .../tutorials/conic/quantum_discrimination.jl | 22 +++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/docs/src/tutorials/conic/quantum_discrimination.jl b/docs/src/tutorials/conic/quantum_discrimination.jl index 4a55aab2a9d..11eb6a9c9e1 100644 --- a/docs/src/tutorials/conic/quantum_discrimination.jl +++ b/docs/src/tutorials/conic/quantum_discrimination.jl @@ -38,7 +38,7 @@ import SCS # ```math # \begin{aligned} -# \max\limits_{E} \;\; & \mathbb{E}_i[ \operatorname{tr}(\rho_i E_i)] \\ +# \max\limits_{E} \;\; & \frac{1}{N} \sum\limits_{i=1}^N \operatorname{tr}(\rho_i E_i) \\ # \text{s.t.} \;\; & \sum\limits_{i=1}^N E_i = \mathbf{I} \\ # & E_i \succeq 0 \; \forall i = 1,\ldots,N. # \end{aligned} @@ -78,9 +78,9 @@ E = [@variable(model, [1:d, 1:d] in HermitianPSDCone()) for i in 1:N] # in which 2-dimensional slices of the array are Hermitian matrices. # We also need to enforce the constraint that -# ``\sum\limits_i E_i = \mathbf{I}``: +# ``\sum\limits_{i=1}^N E_i = \mathbf{I}``: -@constraint(model, sum(E) .== LinearAlgebra.I) +@constraint(model, sum(E[i] for i in 1:N) .== LinearAlgebra.I) # This constraint is a complex-valued equality constraint. In the solver, it # will be decomposed onto two types of equality constraints: one to enforce @@ -100,7 +100,17 @@ E = [@variable(model, [1:d, 1:d] in HermitianPSDCone()) for i in 1:N] optimize!(model) solution_summary(model) -# The optimal POVM is: +# The probability of guessing correctly is: + +objective_value(model) + +# When `N = 2`, there is a known analytical solution of: + +0.5 + 0.25 * sum(LinearAlgebra.svdvals(ρ[1] - ρ[2])) + +# proving that we found the optimal solution. + +# Finally, the optimal POVM is: solution = [value.(e) for e in E] @@ -125,3 +135,7 @@ push!(E, E_N) optimize!(model) solution_summary(model) + +#- + +objective_value(model)