Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Feature] Debug logging and NVTX annotations #144

Merged
merged 8 commits into from
May 28, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions docs/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -378,3 +378,20 @@ def fig_to_html(fig: Figure) -> str: # markdown-exec: hide
return buffer.getvalue() # markdown-exec: hide
print(fig_to_html(plt.gcf())) # markdown-exec: hide
```

## CUDA Profiling and debugging

To debug your quantum programs on `CUDA` devices, `pyqtorch` offers a `DEBUG` mode, which can be activated via
setting the `PYQ_LOG_LEVEL` environment variable.

```bash
export PYQ_LOG_LEVEL=DEBUG
```

Before running your script, make sure to install the following packages:

```bash
pip install nvidia-pyindex
pip install nvidia-dlprof[pytorch]
```
For more information, check [the dlprof docs](https://docs.nvidia.com/deeplearning/frameworks/dlprof-user-guide/index.html).
16 changes: 8 additions & 8 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ authors = [
]
requires-python = ">=3.8,<3.13"
license = {text = "Apache 2.0"}
version = "1.1.2"
version = "1.1.3"
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
Expand Down Expand Up @@ -63,20 +63,20 @@ build = "mkdocs build --clean --strict"
serve = "mkdocs serve --dev-addr localhost:8000"

[tool.ruff]
select = ["E", "F", "I", "Q"]
extend-ignore = ["F841"]
lint.select = ["E", "F", "I", "Q"]
lint.extend-ignore = ["F841"]
line-length = 100

[tool.ruff.isort]
[tool.ruff.lint.isort]
required-imports = ["from __future__ import annotations"]

[tool.ruff.per-file-ignores]
"__init__.py" = ["F401"]
[tool.ruff.lint.per-file-ignores]
"__init__.py" = ["F401", "E402"]

[tool.ruff.mccabe]
[tool.ruff.lint.mccabe]
max-complexity = 15

[tool.ruff.flake8-quotes]
[tool.ruff.lint.flake8-quotes]
dominikandreasseitz marked this conversation as resolved.
Show resolved Hide resolved
docstring-quotes = "double"

[tool.black]
Expand Down
37 changes: 37 additions & 0 deletions pyqtorch/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,43 @@
# # limitations under the License.
from __future__ import annotations

import logging
import os
import sys

logging_levels = {
"DEBUG": logging.DEBUG,
"INFO": logging.INFO,
"WARNING": logging.WARNING,
"ERROR": logging.ERROR,
"CRITICAL": logging.CRITICAL,
}

LOG_LEVEL: str = os.environ.get("PYQ_LOG_LEVEL", "").upper()


# if LOG_LEVEL:
LOG_LEVEL: int = logging_levels.get(LOG_LEVEL, logging.INFO) # type: ignore[arg-type, no-redef]
# If logger not setup, add handler to stderr
# else use setup presumably from Qadence
handle = None
if __name__ not in logging.Logger.manager.loggerDict.keys():
handle = logging.StreamHandler(sys.stderr)
handle.set_name("console")

logger = logging.getLogger(__name__)
if handle:
logger.addHandler(handle)
[
h.setLevel(LOG_LEVEL) # type: ignore[func-returns-value]
for h in logger.handlers
if h.get_name() == "console"
]
logger.setLevel(LOG_LEVEL)

logger.info(f"PyQTorch logger successfully setup with log level {LOG_LEVEL}")


from .adjoint import expectation
from .analog import Add, Hamiltonian, HamiltonianEvolution, Scale
from .apply import apply_operator
Expand Down
33 changes: 33 additions & 0 deletions pyqtorch/analog.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from __future__ import annotations

import logging
from functools import reduce
from logging import getLogger
from operator import add
from typing import Tuple

Expand All @@ -18,6 +20,29 @@
BATCH_DIM = 2


logger = getLogger(__name__)


def forward_hook(*args, **kwargs) -> None: # type: ignore[no-untyped-def]
logger.debug("Forward complete")
torch.cuda.nvtx.range_pop()


def pre_forward_hook(*args, **kwargs) -> None: # type: ignore[no-untyped-def]
logger.debug("Executing forward")
torch.cuda.nvtx.range_push("HamiltonianEvolution.forward")


def backward_hook(*args, **kwargs) -> None: # type: ignore[no-untyped-def]
logger.debug("Backward complete")
torch.cuda.nvtx.range_pop()


def pre_backward_hook(*args, **kwargs) -> None: # type: ignore[no-untyped-def]
logger.debug("Executed backward")
torch.cuda.nvtx.range_push("Hamiltonian Evolution.backward")


class Scale(Sequence):
"""Generic container for multiplying a 'Primitive' or 'Sequence' instance by a parameter."""

Expand Down Expand Up @@ -90,6 +115,14 @@ def _matrixexp_operator(hamiltonian: Operator, time_evolution: torch.Tensor) ->

self._evolve_diag_operator = _diag_operator
self._evolve_matrixexp_operator = _matrixexp_operator
logger.debug("Hamiltonian Evolution initialized")
if logger.isEnabledFor(logging.DEBUG):
# When Debugging let's add logging and NVTX markers
# WARNING: incurs performance penalty
self.register_forward_hook(forward_hook, always_call=True)
self.register_full_backward_hook(backward_hook)
self.register_forward_pre_hook(pre_forward_hook)
self.register_full_backward_pre_hook(pre_backward_hook)

def forward(
self,
Expand Down
35 changes: 35 additions & 0 deletions pyqtorch/circuit.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
from __future__ import annotations

import logging
from functools import reduce
from logging import getLogger
from operator import add
from typing import Any, Generator, Iterator, NoReturn

import torch
from torch import Tensor, complex128, einsum, rand
from torch import device as torch_device
from torch import dtype as torch_dtype
Expand All @@ -18,6 +20,26 @@
logger = getLogger(__name__)


def forward_hook(*args, **kwargs) -> None: # type: ignore[no-untyped-def]
logger.debug("Forward complete")
awennersteen marked this conversation as resolved.
Show resolved Hide resolved
torch.cuda.nvtx.range_pop()


def pre_forward_hook(*args, **kwargs) -> None: # type: ignore[no-untyped-def]
logger.debug("Executing forward")
torch.cuda.nvtx.range_push("QuantumCircuit.forward")


def backward_hook(*args, **kwargs) -> None: # type: ignore[no-untyped-def]
logger.debug("Backward complete")
torch.cuda.nvtx.range_pop()


def pre_backward_hook(*args, **kwargs) -> None: # type: ignore[no-untyped-def]
logger.debug("Executed backward")
torch.cuda.nvtx.range_push("QuantumCircuit.backward")


class Sequence(Module):
"""A generic container for pyqtorch operations"""

Expand All @@ -31,6 +53,14 @@ def __init__(self, operations: list[Module]):
self._device = next(iter(set((op.device for op in self.operations))))
except StopIteration:
pass
logger.debug("QuantumCircuit initialized")
if logger.isEnabledFor(logging.DEBUG):
# When Debugging let's add logging and NVTX markers
# WARNING: incurs performance penalty
self.register_forward_hook(forward_hook, always_call=True)
self.register_full_backward_hook(backward_hook)
self.register_forward_pre_hook(pre_forward_hook)
self.register_full_backward_pre_hook(pre_backward_hook)

def __iter__(self) -> Iterator:
return iter(self.operations)
Expand All @@ -43,6 +73,11 @@ def forward(self, state: State, values: dict[str, Tensor] | ParameterDict = {})
state = op(state, values)
return state

def run(self, state: State = None, values: dict[str, Tensor] = {}) -> State:
if state is None:
state = self.init_state()
return self.forward(state, values)

@property
def device(self) -> torch_device:
return self._device
Expand Down
28 changes: 28 additions & 0 deletions pyqtorch/primitive.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
from __future__ import annotations

import logging
from logging import getLogger
from typing import Any

import torch
Expand All @@ -9,6 +11,24 @@
from pyqtorch.matrices import OPERATIONS_DICT, _controlled, _dagger
from pyqtorch.utils import product_state

logger = getLogger(__name__)


def forward_hook(*args, **kwargs) -> None: # type: ignore[no-untyped-def]
torch.cuda.nvtx.range_pop()


def pre_forward_hook(*args, **kwargs) -> None: # type: ignore[no-untyped-def]
torch.cuda.nvtx.range_push("Primitive.forward")


def backward_hook(*args, **kwargs) -> None: # type: ignore[no-untyped-def]
torch.cuda.nvtx.range_pop()


def pre_backward_hook(*args, **kwargs) -> None: # type: ignore[no-untyped-def]
torch.cuda.nvtx.range_push("Primitive.backward")


class Primitive(torch.nn.Module):
def __init__(self, pauli: Tensor, target: int) -> None:
Expand All @@ -19,6 +39,14 @@ def __init__(self, pauli: Tensor, target: int) -> None:
self._device = self.pauli.device
self._dtype = self.pauli.dtype

if logger.isEnabledFor(logging.DEBUG):
# When Debugging let's add logging and NVTX markers
# WARNING: incurs performance penalty
self.register_forward_hook(forward_hook, always_call=True)
self.register_full_backward_hook(backward_hook)
self.register_forward_pre_hook(pre_forward_hook)
self.register_full_backward_pre_hook(pre_backward_hook)

def __hash__(self) -> int:
return hash(self.qubit_support)

Expand Down
14 changes: 13 additions & 1 deletion pyqtorch/utils.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from __future__ import annotations

import logging
from enum import Enum
from logging import getLogger
from typing import Sequence

import torch
Expand All @@ -15,12 +17,22 @@
RTOL = 0.0
GRADCHECK_ATOL = 1e-06

logger = getLogger(__name__)


def inner_prod(bra: Tensor, ket: Tensor) -> Tensor:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Inner prod calculation")
torch.cuda.nvtx.range_push("inner_prod")

n_qubits = len(bra.size()) - 1
bra = bra.reshape((2**n_qubits, bra.size(-1)))
ket = ket.reshape((2**n_qubits, ket.size(-1)))
return torch.einsum("ib,ib->b", bra.conj(), ket)
res = torch.einsum("ib,ib->b", bra.conj(), ket)
if logger.isEnabledFor(logging.DEBUG):
torch.cuda.nvtx.range_pop()
logger.debug("Inner prod complete")
return res


def overlap(bra: Tensor, ket: Tensor) -> Tensor:
Expand Down
Loading