Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix parameter counting in DefaultQubitLegacy adjoint method #4820

Merged
merged 6 commits into from
Nov 11, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions doc/releases/changelog-dev.md
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,10 @@

<h3>Bug fixes 🐛</h3>

* Fixes a bug where the adjoint method differentiation would fail if
an operation with `grad_method=None` that has a parameter is present.
[(#4820)](https://github.com/PennyLaneAI/pennylane/pull/4820)

* `MottonenStatePreparation` now raises an error if decomposing a broadcasted state vector.
[(#4767)](https://github.com/PennyLaneAI/pennylane/pull/4767)

Expand Down
2 changes: 1 addition & 1 deletion pennylane/_qubit_device.py
Original file line number Diff line number Diff line change
Expand Up @@ -1669,7 +1669,7 @@ def adjoint_jacobian(
adj_op = qml.adjoint(op)
ket = self._apply_operation(ket, adj_op)

if op.grad_method is not None:
if op.num_params == 1:
if param_number in trainable_params:
d_op_matrix = operation_derivative(op)
ket_temp = self._apply_unitary(ket, d_op_matrix, op.wires)
Expand Down
6 changes: 3 additions & 3 deletions pennylane/devices/qubit/adjoint_jacobian.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def adjoint_jacobian(tape: QuantumTape, state=None):
adj_op = qml.adjoint(op)
ket = apply_operation(adj_op, ket)

if op.grad_method is not None:
if op.num_params == 1:
if param_number in tape.trainable_params:
d_op_matrix = operation_derivative(op)
ket_temp = apply_operation(qml.QubitUnitary(d_op_matrix, wires=op.wires), ket)
Expand Down Expand Up @@ -157,7 +157,7 @@ def adjoint_jvp(tape: QuantumTape, tangents: Tuple[Number], state=None):
adj_op = qml.adjoint(op)
ket = apply_operation(adj_op, ket)

if op.grad_method is not None:
if op.num_params == 1:
if param_number in tape.trainable_params:
# don't do anything if the tangent is 0
if not np.allclose(tangents[trainable_param_number], 0):
Expand Down Expand Up @@ -232,7 +232,7 @@ def adjoint_vjp(tape: QuantumTape, cotangents: Tuple[Number], state=None):
adj_op = qml.adjoint(op)
ket = apply_operation(adj_op, ket)

if op.grad_method is not None:
if op.num_params == 1:
if param_number in tape.trainable_params:
d_op_matrix = operation_derivative(op)
ket_temp = apply_operation(qml.QubitUnitary(d_op_matrix, wires=op.wires), ket)
Expand Down
53 changes: 53 additions & 0 deletions tests/devices/qubit/test_adjoint_jacobian.py
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,23 @@ def test_gradient_of_tape_with_tensor(self, tol):
]
assert np.allclose(res, expected, atol=tol, rtol=0)

def test_with_nontrainable_parametrized(self):
"""Test that a parametrized `QubitUnitary` is accounted for correctly
when it is not trainable."""

par = np.array(0.6)

ops = [
qml.RY(par, wires=0),
qml.QubitUnitary(np.eye(2), wires=0),
]
qs = QuantumScript(ops, [qml.expval(qml.PauliZ(0))])
qs.trainable_params = [0]

grad_adjoint = adjoint_jacobian(qs)
expected = [-np.sin(par)]
assert np.allclose(grad_adjoint, expected)


class TestAdjointJVP:
"""Test for adjoint_jvp"""
Expand Down Expand Up @@ -388,6 +405,24 @@ def test_custom_wire_labels(self, tangents, wires, tol):
expected = jac @ np.array(tangents)
assert np.allclose(actual, expected, atol=tol)

def test_with_nontrainable_parametrized(self):
"""Test that a parametrized `QubitUnitary` is accounted for correctly
when it is not trainable."""

par = np.array(0.6)
tangents = (0.45,)

ops = [
qml.RY(par, wires=0),
qml.QubitUnitary(np.eye(2), wires=0),
]
qs = QuantumScript(ops, [qml.expval(qml.PauliZ(0))])
qs.trainable_params = [0]

jvp_adjoint = adjoint_jvp(qs, tangents)
expected = [-np.sin(par) * tangents[0]]
assert np.allclose(jvp_adjoint, expected)


class TestAdjointVJP:
"""Test for adjoint_vjp"""
Expand Down Expand Up @@ -484,3 +519,21 @@ def test_custom_wire_labels(self, cotangents, wires, tol):
jac = np.array([[-np.sin(x), 0], [0, -np.cos(y)], [np.cos(x), 0]])
expected = np.array(cotangents) @ jac
assert np.allclose(actual, expected, atol=tol)

def test_with_nontrainable_parametrized(self):
"""Test that a parametrized `QubitUnitary` is accounted for correctly
when it is not trainable."""

par = np.array(0.6)
cotangents = (0.45,)

ops = [
qml.RY(par, wires=0),
qml.QubitUnitary(np.eye(2), wires=0),
]
qs = QuantumScript(ops, [qml.expval(qml.PauliZ(0))])
qs.trainable_params = [0]

vjp_adjoint = adjoint_vjp(qs, cotangents)
expected = [-np.sin(par) * cotangents[0]]
assert np.allclose(vjp_adjoint, expected)
18 changes: 18 additions & 0 deletions tests/gradients/core/test_adjoint_diff.py
Original file line number Diff line number Diff line change
Expand Up @@ -419,3 +419,21 @@ def test_multi_return(self, dev):
assert all(isinstance(g, np.ndarray) for g in expected)

assert np.allclose(grad_D[i], expected)

def test_with_nontrainable_parametrized(self):
"""Test that a parametrized `QubitUnitary` is accounted for correctly
when it is not trainable."""

dev = qml.device("default.qubit.legacy", wires=1)
par = np.array(0.6)

def circuit(x):
qml.RY(x, wires=0)
qml.QubitUnitary(np.eye(2, requires_grad=False), wires=0)
return qml.expval(qml.PauliZ(0))

circ = qml.QNode(circuit, dev, diff_method="adjoint")
grad_adjoint = qml.jacobian(circ)(par)
circ = qml.QNode(circuit, dev, diff_method="parameter-shift")
grad_psr = qml.jacobian(circ)(par)
assert np.allclose(grad_adjoint, grad_psr)