From c324a1fa8aa0b06500312c3121726f0daa5de6b6 Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Wed, 11 Oct 2023 09:49:27 +0200 Subject: [PATCH 01/34] Refresh FMs --- qadence/constructors/__init__.py | 4 + qadence/constructors/feature_maps.py | 189 +++++++++++++++++++++------ qadence/parameters.py | 2 +- 3 files changed, 157 insertions(+), 38 deletions(-) diff --git a/qadence/constructors/__init__.py b/qadence/constructors/__init__.py index 13b76d852..77322a9c5 100644 --- a/qadence/constructors/__init__.py +++ b/qadence/constructors/__init__.py @@ -6,6 +6,8 @@ fourier_feature_map, tower_feature_map, exp_fourier_feature_map, + ReuploadScaling, + BasisSet, ) from .ansatze import hea, build_qnn @@ -29,6 +31,8 @@ "fourier_feature_map", "tower_feature_map", "exp_fourier_feature_map", + "ReuploadScaling", + "BasisSet", "hea", "build_qnn", "hamiltonian_factory", diff --git a/qadence/constructors/feature_maps.py b/qadence/constructors/feature_maps.py index b35cee315..cefb3d669 100644 --- a/qadence/constructors/feature_maps.py +++ b/qadence/constructors/feature_maps.py @@ -1,35 +1,65 @@ from __future__ import annotations -from typing import Type, Union +import inspect +from enum import Enum +from typing import Callable, Type, Union import numpy as np import sympy from qadence.blocks import AbstractBlock, KronBlock, chain, kron, tag -from qadence.operations import RX, RY, RZ, H +from qadence.operations import PHASE, RX, RY, RZ, H from qadence.parameters import FeatureParameter, Parameter +from qadence.types import TParameter -Rotation = Union[RX, RY, RZ] +Rotation = Union[RX, RY, RZ, PHASE] + + +class BasisSet(str, Enum): + FOURIER = "Fourier" + CHEBYSHEV = "Chebyshev" + + +class ReuploadScaling(str, Enum): + CONSTANT = "Constant" + TOWER = "Tower" + EXP_UP = "Exponential_up" + EXP_DOWN = "Exponential_down" def feature_map( n_qubits: int, support: tuple[int, ...] = None, - param: str = "phi", + param: Parameter | sympy.Basic | str = "phi", op: Type[Rotation] = RX, - fm_type: str = "fourier", + fm_type: BasisSet | Type[sympy.Function] | str = BasisSet.FOURIER, + reupload_scaling: Callable | str | ReuploadScaling = ReuploadScaling.CONSTANT, + feature_range: tuple[float, float] = None, + target_range: tuple[float, float] = None, + multiplier: Parameter | TParameter = None, ) -> KronBlock: """Construct a feature map of a given type. Arguments: n_qubits: Number of qubits the feature map covers. Results in `support=range(n_qubits)`. - support: Overrides `n_qubits`. Puts one rotation gate on every qubit in `support`. - param: Parameter of the feature map. - op: Rotation operation of the feature map. - fm_type: Determines the additional expression the final feature parameter (the addtional - term in front of `param`). `"fourier": param` (nothing is done to `param`) - `"chebyshev": 2*acos(param)`, `"tower": (i+1)*2*acos(param)` (where `i` is the qubit - index). + support: Puts one feature-encoding rotation gate on every qubit in `support`. n_qubits in + this case specifies the total overall qubits of the circuit, which may be wider than the + support itself, but not narrower. + param: Parameter of the feature map; you can pass a string or sympy expression or Parameter; + it will be set as non-trainable (FeatureParameter) regardless + op: Rotation operation of the feature map; choose from RX, RY, RZ, PHASE + fm_type: Determines the basis set that this encoding relates to; choose + from `BasisSet.FOURIER` for Fourier encoding, or `BasisSet.CHEBYSHEV` + for Chebyshev polynomials of the first kind. + reupload_scaling: the feature_map re-uploads the data on len(support) qubits; + however, in each re-upload one may choose a different scaling factor in front, + to enrich the spectrum. + feature_range: what is the range of data that the input data is assumed to come from? + target_range: what is the range of data the data encoder assumes as the natural range? for + example, in Chebyshev polynomials, the natural range is (-1,1), while for Fourier it may + be chosen as (0,2pi) + multiplier: overall multiplier; this is useful for reuploading the feature map serially with + different scalings there; can be a number or parameter/expression Example: ```python exec="on" source="material-block" result="json" @@ -45,21 +75,100 @@ def feature_map( print(f"{fm = }") ``` """ - fparam = FeatureParameter(param) + if isinstance(param, Parameter): + fparam = param + if fparam.is_trainable: + fparam.is_trainable = False + else: + fparam = FeatureParameter(param) + if support is None: support = tuple(range(n_qubits)) assert len(support) <= n_qubits, "Wrong qubit support supplied" - if fm_type == "fourier": - fm = kron(*[op(qubit, fparam) for qubit in support]) - elif fm_type == "chebyshev": - fm = kron(*[op(qubit, 2 * sympy.acos(fparam)) for qubit in support]) - elif fm_type == "tower": - fm = kron(*[op(qubit, (i + 1) * 2 * sympy.acos(fparam)) for i, qubit in enumerate(support)]) + def set_range(fm_type: BasisSet | Type[sympy.Function] | str) -> tuple[float, float]: + if fm_type == BasisSet.FOURIER: + out_range = (0.0, 2 * sympy.pi) + elif fm_type == BasisSet.CHEBYSHEV: + out_range = (-1.0, 1.0) + else: + out_range = (0.0, 1.0) + return out_range + + fr = set_range(fm_type) if feature_range is None else feature_range + tr = set_range(fm_type) if target_range is None else target_range + + # this scales data to the range (-1, 1), only introducing sympy scalings if necessary + if np.isclose(float(np.mean(fr)), 0.0) and np.isclose(float(np.mean(tr)), 0.0): + scaled_and_shifted_fparam = ( + fparam if np.isclose(float(fr[1]), float(tr[1])) else fparam * tr[1] / fr[1] + ) + elif np.isclose(float(fr[1] - fr[0]), float(tr[1] - tr[0])): + scaled_and_shifted_fparam = fparam - (fr[0] - tr[0]) + else: + scaled_and_shifted_fparam = (tr[1] - tr[0]) * (fparam - fr[0]) / (fr[1] - fr[0]) + tr[0] + + if fm_type == BasisSet.FOURIER: + transformed_feature = scaled_and_shifted_fparam + basis_tag = "Fourier" + elif fm_type == BasisSet.CHEBYSHEV: + transformed_feature = sympy.acos(scaled_and_shifted_fparam) + basis_tag = "Chebyshev" + elif inspect.isclass(fm_type) and issubclass(fm_type, sympy.Function): + transformed_feature = fm_type(scaled_and_shifted_fparam) + basis_tag = str(fm_type) + else: + raise NotImplementedError( + f"{fm_type} not implemented. Choose a basis set from {[bs for bs in BasisSet]}, /" + "or your own sympy.Function to wrap the given feature parameter with" + ) + + if callable(reupload_scaling): + rs = reupload_scaling + rs_tag = "Custom" + elif reupload_scaling == ReuploadScaling.CONSTANT: + + def rs(i: int) -> float: + return float(1) + + rs_tag = "Constant" + elif reupload_scaling == ReuploadScaling.TOWER: + + def rs(i: int) -> float: + return float( + i + 1 + ) # in qadence, qubit indices start at zero, but tower should start at 1 + + rs_tag = "Tower" + elif reupload_scaling == ReuploadScaling.EXP_UP: + + def rs(i: int) -> float: + return float(2**i) + + rs_tag = "Exponential" + elif reupload_scaling == ReuploadScaling.EXP_DOWN: + + def rs(i: int) -> float: + return float(2 ** (len(support) - 1 - i)) + + rs_tag = "Exponential" else: - raise NotImplementedError(f"Feature map {fm_type} not implemented") - fm.tag = "FM" + raise NotImplementedError( + f"Re-upload scaling {reupload_scaling} not Implemented; choose one from /" + f"{[rs for rs in ReuploadScaling]}, or your own python function with a /" + "single int arg as input and int or float output!" + ) + + mult = 1.0 if multiplier is None else multiplier + + # assembling all ingredients + op_list = [] + for i, qubit in enumerate(support): + op_list.append(op(qubit, mult * rs(i) * transformed_feature)) # type: ignore[operator] + fm = kron(*op_list) + + fm.tag = rs_tag + " " + basis_tag + " FM" return fm @@ -72,7 +181,7 @@ def fourier_feature_map( n_qubits: number of qubits across which the FM is created param: The base name for the feature `Parameter` """ - fm = feature_map(n_qubits, support=support, param=param, op=op, fm_type="fourier") + fm = feature_map(n_qubits, support=support, param=param, op=op, fm_type=BasisSet.FOURIER) return tag(fm, tag="FourierFM") @@ -86,7 +195,7 @@ def chebyshev_feature_map( support (Iterable[int]): The qubit support param: The base name for the feature `Parameter` """ - fm = feature_map(n_qubits, support=support, param=param, op=op, fm_type="chebyshev") + fm = feature_map(n_qubits, support=support, param=param, op=op, fm_type=BasisSet.CHEBYSHEV) return tag(fm, tag="ChebyshevFM") @@ -99,7 +208,14 @@ def tower_feature_map( n_qubits: number of qubits across which the FM is created param: The base name for the feature `Parameter` """ - fm = feature_map(n_qubits, support=support, param=param, op=op, fm_type="tower") + fm = feature_map( + n_qubits, + support=support, + param=param, + op=op, + fm_type=BasisSet.CHEBYSHEV, + reupload_scaling=ReuploadScaling.TOWER, + ) return tag(fm, tag="TowerFM") @@ -122,18 +238,17 @@ def exp_fourier_feature_map( if feature_range is None: feature_range = (0.0, 2.0**n_qubits) - if support is None: - support = tuple(range(n_qubits)) - - xmax = max(feature_range) - xmin = min(feature_range) - - x = Parameter(param, trainable=False) - - # The feature map works on the range of 0 to 2**n - x_rescaled = 2 * np.pi * (x - xmin) / (xmax - xmin) - + support = tuple(range(n_qubits)) if support is None else support hlayer = kron(H(qubit) for qubit in support) - rlayer = kron(RZ(support[i], x_rescaled * (2**i)) for i in range(n_qubits)) - + rlayer = feature_map( + n_qubits, + support=support, + param=param, + op=RZ, + fm_type=BasisSet.FOURIER, + reupload_scaling=ReuploadScaling.EXP_UP, + feature_range=feature_range, + target_range=(0.0, 2 * sympy.pi), + ) + rlayer.tag = None return tag(chain(hlayer, rlayer), f"ExpFourierFM({param})") diff --git a/qadence/parameters.py b/qadence/parameters.py index 9242de06a..7d31d168a 100644 --- a/qadence/parameters.py +++ b/qadence/parameters.py @@ -191,7 +191,7 @@ def torchify(expr: Expr) -> SymPyModule: A torchified, differentiable Expression. """ extra_funcs = {sympy.core.numbers.ImaginaryUnit: 1.0j} - return SymPyModule(expressions=[expr], extra_funcs=extra_funcs) + return SymPyModule(expressions=[sympy.N(expr)], extra_funcs=extra_funcs) def sympy_to_numeric(expr: Basic) -> TNumber: From ecdebc7853b166fa93d9098175082f8d73d7459b Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Wed, 11 Oct 2023 15:15:01 +0200 Subject: [PATCH 02/34] refactoring --- qadence/constructors/feature_maps.py | 133 ++++++++++++++------------- 1 file changed, 69 insertions(+), 64 deletions(-) diff --git a/qadence/constructors/feature_maps.py b/qadence/constructors/feature_maps.py index cefb3d669..34d16f847 100644 --- a/qadence/constructors/feature_maps.py +++ b/qadence/constructors/feature_maps.py @@ -4,7 +4,6 @@ from enum import Enum from typing import Callable, Type, Union -import numpy as np import sympy from qadence.blocks import AbstractBlock, KronBlock, chain, kron, tag @@ -27,13 +26,57 @@ class ReuploadScaling(str, Enum): EXP_DOWN = "Exponential_down" +def _set_range(fm_type: BasisSet | Type[sympy.Function] | str) -> tuple[float, float]: + if fm_type == BasisSet.FOURIER: + return (0.0, 2 * sympy.pi) + elif fm_type == BasisSet.CHEBYSHEV: + return (-1.0, 1.0) + else: + return (0.0, 1.0) + + +def _get_rs_func( + reupload_scaling: Callable | ReuploadScaling, support: tuple[int, ...] +) -> tuple[Callable, str]: + if reupload_scaling == ReuploadScaling.CONSTANT: + + def rs_func(i: int) -> float: + return float(1) + + elif reupload_scaling == ReuploadScaling.TOWER: + + def rs_func(i: int) -> float: + return float(i + 1) + + elif reupload_scaling == ReuploadScaling.EXP_UP: + + def rs_func(i: int) -> float: + return float(2**i) + + elif reupload_scaling == ReuploadScaling.EXP_DOWN: + + def rs_func(i: int) -> float: + return float(2 ** (len(support) - 1 - i)) + + else: + raise NotImplementedError( + f"Re-upload scaling {reupload_scaling} not Implemented; choose one from /" + f"{[rs for rs in ReuploadScaling]}, or your own python function with a /" + "single int arg as input and int or float output!" + ) + + rs_tag = reupload_scaling.value + + return rs_func, rs_tag + + def feature_map( n_qubits: int, - support: tuple[int, ...] = None, + support: tuple[int, ...] | None = None, param: Parameter | sympy.Basic | str = "phi", op: Type[Rotation] = RX, fm_type: BasisSet | Type[sympy.Function] | str = BasisSet.FOURIER, - reupload_scaling: Callable | str | ReuploadScaling = ReuploadScaling.CONSTANT, + reupload_scaling: Callable | ReuploadScaling = ReuploadScaling.CONSTANT, feature_range: tuple[float, float] = None, target_range: tuple[float, float] = None, multiplier: Parameter | TParameter = None, @@ -87,85 +130,47 @@ def feature_map( assert len(support) <= n_qubits, "Wrong qubit support supplied" - def set_range(fm_type: BasisSet | Type[sympy.Function] | str) -> tuple[float, float]: - if fm_type == BasisSet.FOURIER: - out_range = (0.0, 2 * sympy.pi) - elif fm_type == BasisSet.CHEBYSHEV: - out_range = (-1.0, 1.0) - else: - out_range = (0.0, 1.0) - return out_range - - fr = set_range(fm_type) if feature_range is None else feature_range - tr = set_range(fm_type) if target_range is None else target_range - - # this scales data to the range (-1, 1), only introducing sympy scalings if necessary - if np.isclose(float(np.mean(fr)), 0.0) and np.isclose(float(np.mean(tr)), 0.0): - scaled_and_shifted_fparam = ( - fparam if np.isclose(float(fr[1]), float(tr[1])) else fparam * tr[1] / fr[1] - ) - elif np.isclose(float(fr[1] - fr[0]), float(tr[1] - tr[0])): - scaled_and_shifted_fparam = fparam - (fr[0] - tr[0]) - else: - scaled_and_shifted_fparam = (tr[1] - tr[0]) * (fparam - fr[0]) / (fr[1] - fr[0]) + tr[0] + # Set feature and target range + feature_range = _set_range(fm_type) if feature_range is None else feature_range + target_range = _set_range(fm_type) if target_range is None else target_range + + # Rescale the parameter according to feature and target range + f_max = max(feature_range) + f_min = min(feature_range) + t_max = max(target_range) + t_min = min(target_range) + + scaled_fparam = (t_max - t_min) * (fparam - f_min) / (f_max - f_min) + t_max if fm_type == BasisSet.FOURIER: - transformed_feature = scaled_and_shifted_fparam - basis_tag = "Fourier" + transformed_feature = scaled_fparam + basis_tag = fm_type.value elif fm_type == BasisSet.CHEBYSHEV: - transformed_feature = sympy.acos(scaled_and_shifted_fparam) - basis_tag = "Chebyshev" + transformed_feature = sympy.acos(scaled_fparam) + basis_tag = fm_type.value elif inspect.isclass(fm_type) and issubclass(fm_type, sympy.Function): - transformed_feature = fm_type(scaled_and_shifted_fparam) + transformed_feature = fm_type(scaled_fparam) basis_tag = str(fm_type) else: raise NotImplementedError( f"{fm_type} not implemented. Choose a basis set from {[bs for bs in BasisSet]}, /" - "or your own sympy.Function to wrap the given feature parameter with" + "or your own sympy.Function to wrap the given feature parameter with." ) + # Set reupload scaling function if callable(reupload_scaling): - rs = reupload_scaling + rs_func = reupload_scaling rs_tag = "Custom" - elif reupload_scaling == ReuploadScaling.CONSTANT: - - def rs(i: int) -> float: - return float(1) - - rs_tag = "Constant" - elif reupload_scaling == ReuploadScaling.TOWER: - - def rs(i: int) -> float: - return float( - i + 1 - ) # in qadence, qubit indices start at zero, but tower should start at 1 - - rs_tag = "Tower" - elif reupload_scaling == ReuploadScaling.EXP_UP: - - def rs(i: int) -> float: - return float(2**i) - - rs_tag = "Exponential" - elif reupload_scaling == ReuploadScaling.EXP_DOWN: - - def rs(i: int) -> float: - return float(2 ** (len(support) - 1 - i)) - - rs_tag = "Exponential" else: - raise NotImplementedError( - f"Re-upload scaling {reupload_scaling} not Implemented; choose one from /" - f"{[rs for rs in ReuploadScaling]}, or your own python function with a /" - "single int arg as input and int or float output!" - ) + rs_func, rs_tag = _get_rs_func(reupload_scaling, support) + # Set multiplier mult = 1.0 if multiplier is None else multiplier - # assembling all ingredients + # Build feature map op_list = [] for i, qubit in enumerate(support): - op_list.append(op(qubit, mult * rs(i) * transformed_feature)) # type: ignore[operator] + op_list.append(op(qubit, mult * rs_func(i) * transformed_feature)) # type: ignore[operator] fm = kron(*op_list) fm.tag = rs_tag + " " + basis_tag + " FM" From d295fd721ced6cb213dd3b76f571949dcea4d4b9 Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Wed, 11 Oct 2023 17:23:30 +0200 Subject: [PATCH 03/34] more refactoring --- qadence/constructors/__init__.py | 4 - qadence/constructors/feature_maps.py | 112 +++++++++++++-------------- qadence/types.py | 17 ++++ 3 files changed, 70 insertions(+), 63 deletions(-) diff --git a/qadence/constructors/__init__.py b/qadence/constructors/__init__.py index 77322a9c5..13b76d852 100644 --- a/qadence/constructors/__init__.py +++ b/qadence/constructors/__init__.py @@ -6,8 +6,6 @@ fourier_feature_map, tower_feature_map, exp_fourier_feature_map, - ReuploadScaling, - BasisSet, ) from .ansatze import hea, build_qnn @@ -31,8 +29,6 @@ "fourier_feature_map", "tower_feature_map", "exp_fourier_feature_map", - "ReuploadScaling", - "BasisSet", "hea", "build_qnn", "hamiltonian_factory", diff --git a/qadence/constructors/feature_maps.py b/qadence/constructors/feature_maps.py index 34d16f847..88d40fb2a 100644 --- a/qadence/constructors/feature_maps.py +++ b/qadence/constructors/feature_maps.py @@ -1,7 +1,7 @@ from __future__ import annotations import inspect -from enum import Enum +import math from typing import Callable, Type, Union import sympy @@ -9,21 +9,10 @@ from qadence.blocks import AbstractBlock, KronBlock, chain, kron, tag from qadence.operations import PHASE, RX, RY, RZ, H from qadence.parameters import FeatureParameter, Parameter -from qadence.types import TParameter +from qadence.types import BasisSet, ReuploadScaling, TParameter -Rotation = Union[RX, RY, RZ, PHASE] - - -class BasisSet(str, Enum): - FOURIER = "Fourier" - CHEBYSHEV = "Chebyshev" - - -class ReuploadScaling(str, Enum): - CONSTANT = "Constant" - TOWER = "Tower" - EXP_UP = "Exponential_up" - EXP_DOWN = "Exponential_down" +TRotation = Type[Union[RX, RY, RZ, PHASE]] +ROTATIONS = [RX, RY, RZ, PHASE] def _set_range(fm_type: BasisSet | Type[sympy.Function] | str) -> tuple[float, float]: @@ -35,32 +24,25 @@ def _set_range(fm_type: BasisSet | Type[sympy.Function] | str) -> tuple[float, f return (0.0, 1.0) -def _get_rs_func( - reupload_scaling: Callable | ReuploadScaling, support: tuple[int, ...] -) -> tuple[Callable, str]: +def _get_rs_func(reupload_scaling: Callable | ReuploadScaling) -> tuple[Callable, str]: if reupload_scaling == ReuploadScaling.CONSTANT: def rs_func(i: int) -> float: - return float(1) + return 1 elif reupload_scaling == ReuploadScaling.TOWER: def rs_func(i: int) -> float: return float(i + 1) - elif reupload_scaling == ReuploadScaling.EXP_UP: + elif reupload_scaling == ReuploadScaling.EXP: def rs_func(i: int) -> float: return float(2**i) - elif reupload_scaling == ReuploadScaling.EXP_DOWN: - - def rs_func(i: int) -> float: - return float(2 ** (len(support) - 1 - i)) - else: raise NotImplementedError( - f"Re-upload scaling {reupload_scaling} not Implemented; choose one from /" + f"Re-upload scaling {reupload_scaling} not implemented; choose one from /" f"{[rs for rs in ReuploadScaling]}, or your own python function with a /" "single int arg as input and int or float output!" ) @@ -74,7 +56,7 @@ def feature_map( n_qubits: int, support: tuple[int, ...] | None = None, param: Parameter | sympy.Basic | str = "phi", - op: Type[Rotation] = RX, + op: TRotation = RX, fm_type: BasisSet | Type[sympy.Function] | str = BasisSet.FOURIER, reupload_scaling: Callable | ReuploadScaling = ReuploadScaling.CONSTANT, feature_range: tuple[float, float] = None, @@ -88,36 +70,44 @@ def feature_map( support: Puts one feature-encoding rotation gate on every qubit in `support`. n_qubits in this case specifies the total overall qubits of the circuit, which may be wider than the support itself, but not narrower. - param: Parameter of the feature map; you can pass a string or sympy expression or Parameter; - it will be set as non-trainable (FeatureParameter) regardless + param: Parameter of the feature map; you can pass a string, sympy expression or Parameter; + it will be set as non-trainable (FeatureParameter) regardless. op: Rotation operation of the feature map; choose from RX, RY, RZ, PHASE - fm_type: Determines the basis set that this encoding relates to; choose - from `BasisSet.FOURIER` for Fourier encoding, or `BasisSet.CHEBYSHEV` - for Chebyshev polynomials of the first kind. - reupload_scaling: the feature_map re-uploads the data on len(support) qubits; - however, in each re-upload one may choose a different scaling factor in front, - to enrich the spectrum. - feature_range: what is the range of data that the input data is assumed to come from? - target_range: what is the range of data the data encoder assumes as the natural range? for - example, in Chebyshev polynomials, the natural range is (-1,1), while for Fourier it may - be chosen as (0,2pi) + fm_type: Determines the basis set for the encoding; choose from `BasisSet.FOURIER` for + Fourier encoding, or `BasisSet.CHEBYSHEV` for Chebyshev polynomials of the first kind. + reupload_scaling: how the feature map scales the data that is re-uploaded for each qubit. + feature_range: range of data that the input data is assumed to come from. + target_range: range of data the data encoder assumes as the natural range. For example, + in Chebyshev polynomials it is (-1, 1), while for Fourier it may be chosen as (0, 2pi). multiplier: overall multiplier; this is useful for reuploading the feature map serially with - different scalings there; can be a number or parameter/expression + different scalings there; can be a number or parameter/expression. Example: ```python exec="on" source="material-block" result="json" - from qadence import feature_map + from qadence import feature_map, BasisSet, ReuploadScaling - fm = feature_map(3, fm_type="fourier") + fm = feature_map(3, fm_type=BasisSet.FOURIER) print(f"{fm = }") - fm = feature_map(3, fm_type="chebyshev") + fm = feature_map(3, fm_type=BasisSet.CHEBYSHEV) print(f"{fm = }") - fm = feature_map(3, fm_type="tower") + fm = feature_map(3, fm_type=BasisSet.FOURIER, reupload_scaling = ReuploadScaling.TOWER) print(f"{fm = }") ``` """ + + # Process input + if support is None: + support = tuple(range(n_qubits)) + elif len(support) != n_qubits: + raise ValueError("Wrong qubit support supplied") + + if op not in ROTATIONS: + raise ValueError( + f"Operation {op} not supported. Please one from {[rot.__name__ for rot in ROTATIONS]}." + ) + if isinstance(param, Parameter): fparam = param if fparam.is_trainable: @@ -125,23 +115,26 @@ def feature_map( else: fparam = FeatureParameter(param) - if support is None: - support = tuple(range(n_qubits)) - - assert len(support) <= n_qubits, "Wrong qubit support supplied" - # Set feature and target range feature_range = _set_range(fm_type) if feature_range is None else feature_range target_range = _set_range(fm_type) if target_range is None else target_range - # Rescale the parameter according to feature and target range + # Rescale the feature parameter f_max = max(feature_range) f_min = min(feature_range) t_max = max(target_range) t_min = min(target_range) - scaled_fparam = (t_max - t_min) * (fparam - f_min) / (f_max - f_min) + t_max + scaling = (t_max - t_min) / (f_max - f_min) + shift = t_min - f_min * scaling + if math.isclose(scaling, 1.0): + # So we don't get 1.0 factor in visualization + scaled_fparam = fparam + shift + else: + scaled_fparam = scaling * fparam + shift + + # Transform feature parameter if fm_type == BasisSet.FOURIER: transformed_feature = scaled_fparam basis_tag = fm_type.value @@ -162,23 +155,24 @@ def feature_map( rs_func = reupload_scaling rs_tag = "Custom" else: - rs_func, rs_tag = _get_rs_func(reupload_scaling, support) + rs_func, rs_tag = _get_rs_func(reupload_scaling) - # Set multiplier - mult = 1.0 if multiplier is None else multiplier + # Set overall multiplier + multiplier = 1 if multiplier is None else multiplier # Build feature map op_list = [] for i, qubit in enumerate(support): - op_list.append(op(qubit, mult * rs_func(i) * transformed_feature)) # type: ignore[operator] + op_list.append(op(qubit, multiplier * rs_func(i) * transformed_feature)) fm = kron(*op_list) fm.tag = rs_tag + " " + basis_tag + " FM" + return fm def fourier_feature_map( - n_qubits: int, support: tuple[int, ...] = None, param: str = "phi", op: Type[Rotation] = RX + n_qubits: int, support: tuple[int, ...] = None, param: str = "phi", op: TRotation = RX ) -> AbstractBlock: """Construct a Fourier feature map @@ -191,7 +185,7 @@ def fourier_feature_map( def chebyshev_feature_map( - n_qubits: int, support: tuple[int, ...] = None, param: str = "phi", op: Type[Rotation] = RX + n_qubits: int, support: tuple[int, ...] = None, param: str = "phi", op: TRotation = RX ) -> AbstractBlock: """Construct a Chebyshev feature map @@ -205,7 +199,7 @@ def chebyshev_feature_map( def tower_feature_map( - n_qubits: int, support: tuple[int, ...] = None, param: str = "phi", op: Type[Rotation] = RX + n_qubits: int, support: tuple[int, ...] = None, param: str = "phi", op: TRotation = RX ) -> AbstractBlock: """Construct a Chebyshev tower feature map @@ -251,7 +245,7 @@ def exp_fourier_feature_map( param=param, op=RZ, fm_type=BasisSet.FOURIER, - reupload_scaling=ReuploadScaling.EXP_UP, + reupload_scaling=ReuploadScaling.EXP, feature_range=feature_range, target_range=(0.0, 2 * sympy.pi), ) diff --git a/qadence/types.py b/qadence/types.py index 3e9c0657b..d1bbaf37a 100644 --- a/qadence/types.py +++ b/qadence/types.py @@ -33,6 +33,8 @@ "BackendName", "StateGeneratorType", "LTSOrder", + "ReuploadScaling", + "BasisSet", "TensorType", "DiffMode", "BackendName", @@ -122,6 +124,21 @@ class LTSOrder(StrEnum): """ST4.""" +class BasisSet(str, Enum): + """Basis set for feature maps.""" + + FOURIER = "Fourier" + """Fourier basis set.""" + CHEBYSHEV = "Chebyshev" + """Chebyshev polynomials of the first kind.""" + + +class ReuploadScaling(str, Enum): + CONSTANT = "Constant" + TOWER = "Tower" + EXP = "Exponential" + + class _DiffMode(StrEnum): """Differentiation modes to choose from.""" From d870af2f383dd07749714087ea22ad83c705208e Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Wed, 11 Oct 2023 18:01:54 +0200 Subject: [PATCH 04/34] tags --- qadence/constructors/feature_maps.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/qadence/constructors/feature_maps.py b/qadence/constructors/feature_maps.py index 88d40fb2a..89f095bc3 100644 --- a/qadence/constructors/feature_maps.py +++ b/qadence/constructors/feature_maps.py @@ -181,7 +181,7 @@ def fourier_feature_map( param: The base name for the feature `Parameter` """ fm = feature_map(n_qubits, support=support, param=param, op=op, fm_type=BasisSet.FOURIER) - return tag(fm, tag="FourierFM") + return fm def chebyshev_feature_map( @@ -195,7 +195,7 @@ def chebyshev_feature_map( param: The base name for the feature `Parameter` """ fm = feature_map(n_qubits, support=support, param=param, op=op, fm_type=BasisSet.CHEBYSHEV) - return tag(fm, tag="ChebyshevFM") + return fm def tower_feature_map( @@ -215,7 +215,7 @@ def tower_feature_map( fm_type=BasisSet.CHEBYSHEV, reupload_scaling=ReuploadScaling.TOWER, ) - return tag(fm, tag="TowerFM") + return fm def exp_fourier_feature_map( From 86ab25c11686cf022d7b294901125827b125507c Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Thu, 12 Oct 2023 11:18:15 +0200 Subject: [PATCH 05/34] backward compatibility --- qadence/constructors/feature_maps.py | 78 +++++++++++++++++----------- qadence/types.py | 9 +++- 2 files changed, 54 insertions(+), 33 deletions(-) diff --git a/qadence/constructors/feature_maps.py b/qadence/constructors/feature_maps.py index 89f095bc3..0253f11ab 100644 --- a/qadence/constructors/feature_maps.py +++ b/qadence/constructors/feature_maps.py @@ -7,10 +7,13 @@ import sympy from qadence.blocks import AbstractBlock, KronBlock, chain, kron, tag +from qadence.logger import get_logger from qadence.operations import PHASE, RX, RY, RZ, H from qadence.parameters import FeatureParameter, Parameter from qadence.types import BasisSet, ReuploadScaling, TParameter +logger = get_logger(__name__) + TRotation = Type[Union[RX, RY, RZ, PHASE]] ROTATIONS = [RX, RY, RZ, PHASE] @@ -24,32 +27,23 @@ def _set_range(fm_type: BasisSet | Type[sympy.Function] | str) -> tuple[float, f return (0.0, 1.0) -def _get_rs_func(reupload_scaling: Callable | ReuploadScaling) -> tuple[Callable, str]: - if reupload_scaling == ReuploadScaling.CONSTANT: - - def rs_func(i: int) -> float: - return 1 +def _rs_constant(i: int) -> int: + return 1 - elif reupload_scaling == ReuploadScaling.TOWER: - def rs_func(i: int) -> float: - return float(i + 1) +def _rs_tower(i: int) -> float: + return float(i + 1) - elif reupload_scaling == ReuploadScaling.EXP: - def rs_func(i: int) -> float: - return float(2**i) +def _rs_exp(i: int) -> float: + return float(2**i) - else: - raise NotImplementedError( - f"Re-upload scaling {reupload_scaling} not implemented; choose one from /" - f"{[rs for rs in ReuploadScaling]}, or your own python function with a /" - "single int arg as input and int or float output!" - ) - rs_tag = reupload_scaling.value - - return rs_func, rs_tag +RS_FUNC_DICT = { + ReuploadScaling.CONSTANT: _rs_constant, + ReuploadScaling.TOWER: _rs_tower, + ReuploadScaling.EXP: _rs_exp, +} def feature_map( @@ -58,7 +52,7 @@ def feature_map( param: Parameter | sympy.Basic | str = "phi", op: TRotation = RX, fm_type: BasisSet | Type[sympy.Function] | str = BasisSet.FOURIER, - reupload_scaling: Callable | ReuploadScaling = ReuploadScaling.CONSTANT, + reupload_scaling: ReuploadScaling | Callable | str = ReuploadScaling.CONSTANT, feature_range: tuple[float, float] = None, target_range: tuple[float, float] = None, multiplier: Parameter | TParameter = None, @@ -80,7 +74,7 @@ def feature_map( target_range: range of data the data encoder assumes as the natural range. For example, in Chebyshev polynomials it is (-1, 1), while for Fourier it may be chosen as (0, 2pi). multiplier: overall multiplier; this is useful for reuploading the feature map serially with - different scalings there; can be a number or parameter/expression. + different scalings; can be a number or parameter/expression. Example: ```python exec="on" source="material-block" result="json" @@ -108,6 +102,17 @@ def feature_map( f"Operation {op} not supported. Please one from {[rot.__name__ for rot in ROTATIONS]}." ) + # Backward compatibility + if fm_type in ("fourier", "chebyshev", "tower"): + logger.warning( + "Selecting `fm_type` as 'fourier', 'chebyshev' or 'tower' is deprecated. " + "Please use the respective enumerations: 'fm_type = BasisSet.FOURIER', " + "'fm_type = BasisSet.CHEBYSHEV' or 'reupload_scaling = ReuploadScaling.TOWER'." + ) + fm_type = BasisSet.FOURIER if fm_type == "fourier" else fm_type + fm_type = BasisSet.CHEBYSHEV if fm_type == "chebyshev" else fm_type + reupload_scaling = ReuploadScaling.TOWER if fm_type == "tower" else reupload_scaling + if isinstance(param, Parameter): fparam = param if fparam.is_trainable: @@ -137,25 +142,36 @@ def feature_map( # Transform feature parameter if fm_type == BasisSet.FOURIER: transformed_feature = scaled_fparam - basis_tag = fm_type.value elif fm_type == BasisSet.CHEBYSHEV: transformed_feature = sympy.acos(scaled_fparam) - basis_tag = fm_type.value elif inspect.isclass(fm_type) and issubclass(fm_type, sympy.Function): transformed_feature = fm_type(scaled_fparam) - basis_tag = str(fm_type) else: raise NotImplementedError( - f"{fm_type} not implemented. Choose a basis set from {[bs for bs in BasisSet]}, /" - "or your own sympy.Function to wrap the given feature parameter with." + f"Feature map type {fm_type} not implemented. Choose an item from the BasisSet enum: " + f"{[bs.name for bs in BasisSet]}, or your own sympy.Function to wrap the given " + "feature parameter with." ) + basis_tag = fm_type.value if isinstance(fm_type, BasisSet) else str(fm_type) + # Set reupload scaling function if callable(reupload_scaling): rs_func = reupload_scaling rs_tag = "Custom" else: - rs_func, rs_tag = _get_rs_func(reupload_scaling) + try: + rs_func = RS_FUNC_DICT[reupload_scaling] # type: ignore [index] + if isinstance(reupload_scaling, ReuploadScaling): + rs_tag = reupload_scaling.value + else: + rs_tag = reupload_scaling + except (KeyError, ValueError) as error: + raise NotImplementedError( + f"Reupload scaling {reupload_scaling} not implemented; choose an item from " + f"the ReuploadScaling enum: {[rs.name for rs in ReuploadScaling]}, or your own " + "python function with a single int arg as input and int or float output." + ) # Set overall multiplier multiplier = 1 if multiplier is None else multiplier @@ -174,7 +190,7 @@ def feature_map( def fourier_feature_map( n_qubits: int, support: tuple[int, ...] = None, param: str = "phi", op: TRotation = RX ) -> AbstractBlock: - """Construct a Fourier feature map + """Construct a Fourier feature map. Args: n_qubits: number of qubits across which the FM is created @@ -187,7 +203,7 @@ def fourier_feature_map( def chebyshev_feature_map( n_qubits: int, support: tuple[int, ...] = None, param: str = "phi", op: TRotation = RX ) -> AbstractBlock: - """Construct a Chebyshev feature map + """Construct a Chebyshev feature map. Args: n_qubits: number of qubits across which the FM is created @@ -201,7 +217,7 @@ def chebyshev_feature_map( def tower_feature_map( n_qubits: int, support: tuple[int, ...] = None, param: str = "phi", op: TRotation = RX ) -> AbstractBlock: - """Construct a Chebyshev tower feature map + """Construct a Chebyshev tower feature map. Args: n_qubits: number of qubits across which the FM is created diff --git a/qadence/types.py b/qadence/types.py index d1bbaf37a..2848983cb 100644 --- a/qadence/types.py +++ b/qadence/types.py @@ -124,7 +124,7 @@ class LTSOrder(StrEnum): """ST4.""" -class BasisSet(str, Enum): +class BasisSet(StrEnum): """Basis set for feature maps.""" FOURIER = "Fourier" @@ -133,10 +133,15 @@ class BasisSet(str, Enum): """Chebyshev polynomials of the first kind.""" -class ReuploadScaling(str, Enum): +class ReuploadScaling(StrEnum): + """Scaling for data reuploads in feature maps.""" + CONSTANT = "Constant" + """Constant scaling.""" TOWER = "Tower" + """Linearly increasing scaling.""" EXP = "Exponential" + """Exponentially increasing scaling.""" class _DiffMode(StrEnum): From b324d845f626a68d96e292f56e2be70848c6f1a8 Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Thu, 12 Oct 2023 11:47:07 +0200 Subject: [PATCH 06/34] change inputs in code --- docs/development/draw.md | 4 ++-- docs/qml/index.md | 2 +- examples/draw.py | 2 +- qadence/constructors/feature_maps.py | 10 ++++---- tests/qadence/test_matrices.py | 23 ++++++++++++++----- .../test_measurements/test_tomography.py | 16 ++++++------- 6 files changed, 34 insertions(+), 23 deletions(-) diff --git a/docs/development/draw.md b/docs/development/draw.md index 803101bf0..0ec4374fe 100644 --- a/docs/development/draw.md +++ b/docs/development/draw.md @@ -56,9 +56,9 @@ print(html_string(block)) # markdown-exec: hide ``` ```python exec="on" source="material-block" html="1" -from qadence import feature_map, hea, chain +from qadence import feature_map, hea, chain, ReuploadScaling -block = chain(feature_map(4, fm_type="tower"), hea(4,2)) +block = chain(feature_map(4, fm_type=ReuploadScaling.TOWER), hea(4,2)) from qadence.draw import html_string # markdown-exec: hide print(html_string(block)) # markdown-exec: hide ``` diff --git a/docs/qml/index.md b/docs/qml/index.md index 6a873df74..d235ef707 100644 --- a/docs/qml/index.md +++ b/docs/qml/index.md @@ -35,7 +35,7 @@ Furthermore, Qadence is natively integrated with PyTorch automatic differentiati Qadence quantum models can be used seamlessly in a PyTorch workflow. Let's create a quantum neural network model using the feature map just defined, a -digital-analog variational ansaztz and a simple observable $X(0) \otimes X(1)$. We +digital-analog variational ansatz and a simple observable $X(0) \otimes X(1)$. We use the convenience `QNN` quantum model abstraction. ```python exec="on" source="material-block" result="json" session="qml" diff --git a/examples/draw.py b/examples/draw.py index 3861e10e3..5911c0253 100644 --- a/examples/draw.py +++ b/examples/draw.py @@ -40,7 +40,7 @@ class CustomTheme(BaseTheme): hamevo = HamEvo(kron(*map(Z, range(constants.n_qubits))), 10) b = chain( - feature_map(constants.n_qubits, fm_type="tower"), + feature_map(constants.n_qubits, fm_type="Tower"), hea(constants.n_qubits, 1), constants, fixed, diff --git a/qadence/constructors/feature_maps.py b/qadence/constructors/feature_maps.py index 0253f11ab..75f856004 100644 --- a/qadence/constructors/feature_maps.py +++ b/qadence/constructors/feature_maps.py @@ -53,9 +53,9 @@ def feature_map( op: TRotation = RX, fm_type: BasisSet | Type[sympy.Function] | str = BasisSet.FOURIER, reupload_scaling: ReuploadScaling | Callable | str = ReuploadScaling.CONSTANT, - feature_range: tuple[float, float] = None, - target_range: tuple[float, float] = None, - multiplier: Parameter | TParameter = None, + feature_range: tuple[float, float] | None = None, + target_range: tuple[float, float] | None = None, + multiplier: Parameter | TParameter | None = None, ) -> KronBlock: """Construct a feature map of a given type. @@ -115,8 +115,8 @@ def feature_map( if isinstance(param, Parameter): fparam = param - if fparam.is_trainable: - fparam.is_trainable = False + if fparam.trainable: + fparam.trainable = False else: fparam = FeatureParameter(param) diff --git a/tests/qadence/test_matrices.py b/tests/qadence/test_matrices.py index 7bf019939..87b35be15 100644 --- a/tests/qadence/test_matrices.py +++ b/tests/qadence/test_matrices.py @@ -39,6 +39,7 @@ MCRX, MCRY, MCRZ, + PHASE, RX, RY, RZ, @@ -56,7 +57,7 @@ Z, ) from qadence.states import equivalent_state, random_state, zero_state -from qadence.types import Interaction +from qadence.types import BasisSet, Interaction, ReuploadScaling def _calc_mat_vec_wavefunction( @@ -252,13 +253,23 @@ def test_total_magnetization(n_qubits: int) -> None: @pytest.mark.parametrize("n_qubits", [1, 2, 4]) -@pytest.mark.parametrize("fm_type", ["tower", "fourier", "chebyshev"]) -@pytest.mark.parametrize("op", [RX, RY, RZ]) -def test_feature_maps(n_qubits: int, fm_type: str, op: AbstractBlock) -> None: +@pytest.mark.parametrize("fm_type", [BasisSet.FOURIER, BasisSet.CHEBYSHEV]) +@pytest.mark.parametrize( + "reupload_scaling", [ReuploadScaling.CONSTANT, ReuploadScaling.TOWER, ReuploadScaling.EXP] +) +@pytest.mark.parametrize("op", [RX, RY, RZ, PHASE]) +def test_feature_maps( + n_qubits: int, + fm_type: BasisSet, + reupload_scaling: ReuploadScaling, + op: type[RX] | type[RY] | type[RZ] | type[PHASE], +) -> None: x = Parameter("x", trainable=True) - block = feature_map(n_qubits, param=x, op=op, fm_type=fm_type) # type: ignore[arg-type] + block = feature_map( + n_qubits, param=x, op=op, fm_type=fm_type, reupload_scaling=reupload_scaling + ) # type: ignore[arg-type] init_state = random_state(n_qubits) - wf_pyq = run(n_qubits, block, state=init_state) + wf_pyq = run(n_qubits, block, state=init_state, values={"x": torch.Tensor([1.0])}) wf_mat = _calc_mat_vec_wavefunction(block, n_qubits, init_state) assert equivalent_state(wf_pyq, wf_mat, atol=ATOL_32) diff --git a/tests/qadence/test_measurements/test_tomography.py b/tests/qadence/test_measurements/test_tomography.py index 171aa1c3a..dbed4bf41 100644 --- a/tests/qadence/test_measurements/test_tomography.py +++ b/tests/qadence/test_measurements/test_tomography.py @@ -9,7 +9,7 @@ from hypothesis import given, settings from metrics import HIGH_ACCEPTANCE, LOW_ACCEPTANCE, MIDDLE_ACCEPTANCE # type: ignore -from qadence import BackendName, DiffMode +from qadence import BackendName, BasisSet, DiffMode from qadence.backends import backend_factory from qadence.blocks import ( AbstractBlock, @@ -251,23 +251,23 @@ def test_empirical_average() -> None: (QuantumCircuit(1, H(0)), {}, Z(0)), (QuantumCircuit(2, kron(H(0), H(1))), {}, kron(X(0), X(1))), ( - QuantumCircuit(4, feature_map(4, fm_type="chebyshev"), hea(4, depth=2)), + QuantumCircuit(4, feature_map(4, fm_type=BasisSet.CHEBYSHEV), hea(4, depth=2)), {"phi": torch.rand(1)}, total_magnetization(4), ), ( - QuantumCircuit(4, feature_map(4, fm_type="chebyshev"), hea(4, depth=2)), + QuantumCircuit(4, feature_map(4, fm_type=BasisSet.CHEBYSHEV), hea(4, depth=2)), {"phi": torch.rand(1)}, zz_hamiltonian(4), ), # ( - # QuantumCircuit(4, feature_map(4, fm_type="chebyshev"), hea(4, depth=2)), + # QuantumCircuit(4, feature_map(4, fm_type=BasisSet.CHEBYSHEV), hea(4, depth=2)), # {"phi": torch.rand(1)}, # ising_hamiltonian(4), # HIGH_ACCEPTANCE, # ), ( - QuantumCircuit(4, feature_map(4, fm_type="chebyshev"), hea(4, depth=2)), + QuantumCircuit(4, feature_map(4, fm_type=BasisSet.CHEBYSHEV), hea(4, depth=2)), {"phi": torch.rand(1)}, add( 0.5 * kron(X(0), Y(1), X(2), Y(3)), @@ -478,7 +478,7 @@ def test_forward_and_backward_passes_with_qnn(observable: AbstractBlock, accepta kwargs = {"n_shots": 1000000} # fm = fourier_feature_map(n_qubits) - fm = feature_map(n_qubits, fm_type="chebyshev") + fm = feature_map(n_qubits, fm_type=BasisSet.CHEBYSHEV) ansatz = hea(n_qubits, depth=2) circuit = QuantumCircuit(n_qubits, fm, ansatz) values = {"phi": torch.rand(batch_size, requires_grad=True)} @@ -530,7 +530,7 @@ def test_partial_derivatives_with_qnn(observable: AbstractBlock, acceptance: flo kwargs = {"n_shots": 100000} # fm = fourier_feature_map(n_qubits) - fm = feature_map(n_qubits, fm_type="chebyshev") + fm = feature_map(n_qubits, fm_type=BasisSet.CHEBYSHEV) ansatz = hea(n_qubits, depth=2) circuit = QuantumCircuit(n_qubits, fm, ansatz) values = {"phi": torch.rand(batch_size, requires_grad=True)} @@ -626,7 +626,7 @@ def test_high_order_derivatives_with_qnn(observable: AbstractBlock, acceptance: kwargs = {"n_shots": 100000} # fm = fourier_feature_map(n_qubits) - fm = feature_map(n_qubits, fm_type="chebyshev") + fm = feature_map(n_qubits, fm_type=BasisSet.CHEBYSHEV) ansatz = hea(n_qubits, depth=2) circuit = QuantumCircuit(n_qubits, fm, ansatz) values = {"phi": torch.rand(batch_size, requires_grad=True)} From 9d233e004f5a5315f8376d815b08d521c5b60ead Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Thu, 12 Oct 2023 14:08:23 +0200 Subject: [PATCH 07/34] fix block_to_tensor test --- qadence/constructors/feature_maps.py | 8 ++++---- tests/qadence/test_matrices.py | 7 ++++--- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/qadence/constructors/feature_maps.py b/qadence/constructors/feature_maps.py index 75f856004..b9fdd7bc6 100644 --- a/qadence/constructors/feature_maps.py +++ b/qadence/constructors/feature_maps.py @@ -114,11 +114,11 @@ def feature_map( reupload_scaling = ReuploadScaling.TOWER if fm_type == "tower" else reupload_scaling if isinstance(param, Parameter): - fparam = param - if fparam.trainable: - fparam.trainable = False + fparam = param + if fparam.trainable: + fparam.trainable = False else: - fparam = FeatureParameter(param) + fparam = FeatureParameter(param) # Set feature and target range feature_range = _set_range(fm_type) if feature_range is None else feature_range diff --git a/tests/qadence/test_matrices.py b/tests/qadence/test_matrices.py index 87b35be15..53ffd94a3 100644 --- a/tests/qadence/test_matrices.py +++ b/tests/qadence/test_matrices.py @@ -264,13 +264,14 @@ def test_feature_maps( reupload_scaling: ReuploadScaling, op: type[RX] | type[RY] | type[RZ] | type[PHASE], ) -> None: - x = Parameter("x", trainable=True) + x = Parameter("x", trainable=False) + values = {"x": torch.rand(1)} block = feature_map( n_qubits, param=x, op=op, fm_type=fm_type, reupload_scaling=reupload_scaling ) # type: ignore[arg-type] init_state = random_state(n_qubits) - wf_pyq = run(n_qubits, block, state=init_state, values={"x": torch.Tensor([1.0])}) - wf_mat = _calc_mat_vec_wavefunction(block, n_qubits, init_state) + wf_pyq = run(n_qubits, block, state=init_state, values=values) + wf_mat = _calc_mat_vec_wavefunction(block, n_qubits, init_state, values=values) assert equivalent_state(wf_pyq, wf_mat, atol=ATOL_32) From c978ef68dff5adeafb1744324fbcb46c3ba0258d Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Thu, 12 Oct 2023 14:08:40 +0200 Subject: [PATCH 08/34] linting --- qadence/constructors/feature_maps.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/qadence/constructors/feature_maps.py b/qadence/constructors/feature_maps.py index b9fdd7bc6..75f856004 100644 --- a/qadence/constructors/feature_maps.py +++ b/qadence/constructors/feature_maps.py @@ -114,11 +114,11 @@ def feature_map( reupload_scaling = ReuploadScaling.TOWER if fm_type == "tower" else reupload_scaling if isinstance(param, Parameter): - fparam = param - if fparam.trainable: - fparam.trainable = False + fparam = param + if fparam.trainable: + fparam.trainable = False else: - fparam = FeatureParameter(param) + fparam = FeatureParameter(param) # Set feature and target range feature_range = _set_range(fm_type) if feature_range is None else feature_range From a50ff07553b81ad63d72f0cbbebaf1ea694243ca Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Thu, 12 Oct 2023 16:34:35 +0200 Subject: [PATCH 09/34] unit tests --- qadence/constructors/feature_maps.py | 39 +++----- tests/constructors/test_feature_maps.py | 128 ++++++++++++++++++++++++ 2 files changed, 141 insertions(+), 26 deletions(-) create mode 100644 tests/constructors/test_feature_maps.py diff --git a/qadence/constructors/feature_maps.py b/qadence/constructors/feature_maps.py index 75f856004..8992a4ac3 100644 --- a/qadence/constructors/feature_maps.py +++ b/qadence/constructors/feature_maps.py @@ -27,29 +27,17 @@ def _set_range(fm_type: BasisSet | Type[sympy.Function] | str) -> tuple[float, f return (0.0, 1.0) -def _rs_constant(i: int) -> int: - return 1 - - -def _rs_tower(i: int) -> float: - return float(i + 1) - - -def _rs_exp(i: int) -> float: - return float(2**i) - - RS_FUNC_DICT = { - ReuploadScaling.CONSTANT: _rs_constant, - ReuploadScaling.TOWER: _rs_tower, - ReuploadScaling.EXP: _rs_exp, + ReuploadScaling.CONSTANT: lambda i: 1, + ReuploadScaling.TOWER: lambda i: float(i + 1), + ReuploadScaling.EXP: lambda i: float(2**i), } def feature_map( n_qubits: int, support: tuple[int, ...] | None = None, - param: Parameter | sympy.Basic | str = "phi", + param: Parameter | str = "phi", op: TRotation = RX, fm_type: BasisSet | Type[sympy.Function] | str = BasisSet.FOURIER, reupload_scaling: ReuploadScaling | Callable | str = ReuploadScaling.CONSTANT, @@ -109,9 +97,13 @@ def feature_map( "Please use the respective enumerations: 'fm_type = BasisSet.FOURIER', " "'fm_type = BasisSet.CHEBYSHEV' or 'reupload_scaling = ReuploadScaling.TOWER'." ) - fm_type = BasisSet.FOURIER if fm_type == "fourier" else fm_type - fm_type = BasisSet.CHEBYSHEV if fm_type == "chebyshev" else fm_type - reupload_scaling = ReuploadScaling.TOWER if fm_type == "tower" else reupload_scaling + if fm_type == "fourier": + fm_type = BasisSet.FOURIER + elif fm_type == "chebyshev": + fm_type = BasisSet.CHEBYSHEV + elif fm_type == "tower": + fm_type = BasisSet.FOURIER + reupload_scaling = ReuploadScaling.TOWER if isinstance(param, Parameter): fparam = param @@ -125,13 +117,8 @@ def feature_map( target_range = _set_range(fm_type) if target_range is None else target_range # Rescale the feature parameter - f_max = max(feature_range) - f_min = min(feature_range) - t_max = max(target_range) - t_min = min(target_range) - - scaling = (t_max - t_min) / (f_max - f_min) - shift = t_min - f_min * scaling + scaling = (max(target_range) - min(target_range)) / (max(feature_range) - min(feature_range)) + shift = min(target_range) - min(feature_range) * scaling if math.isclose(scaling, 1.0): # So we don't get 1.0 factor in visualization diff --git a/tests/constructors/test_feature_maps.py b/tests/constructors/test_feature_maps.py new file mode 100644 index 000000000..14f530ba5 --- /dev/null +++ b/tests/constructors/test_feature_maps.py @@ -0,0 +1,128 @@ +from __future__ import annotations + +from typing import Callable + +import pytest +import sympy +import torch +from metrics import ATOL_64 + +from qadence import ( + PHASE, + RX, + BasisSet, + FeatureParameter, + ReuploadScaling, + Z, + expectation, + feature_map, + run, +) + +PARAM_DICT_0 = { + "support": None, + "param": FeatureParameter("x"), + "op": RX, + "feature_range": None, + "multiplier": None, +} + +PARAM_DICT_1 = { + "support": (3, 2, 1, 0), + "param": "x", + "op": PHASE, + "feature_range": (-2.0, -1.0), + "target_range": (1.0, 5.0), + "multiplier": FeatureParameter("y"), +} + + +@pytest.mark.parametrize("param_dict", [PARAM_DICT_0, PARAM_DICT_1]) +@pytest.mark.parametrize("fm_type", [BasisSet.FOURIER, BasisSet.CHEBYSHEV, sympy.asin]) +@pytest.mark.parametrize( + "reupload_scaling", + [ReuploadScaling.CONSTANT, ReuploadScaling.TOWER, ReuploadScaling.EXP, lambda i: 5 * i + 2], +) +def test_feature_map_creation_and_run( + param_dict: dict, + fm_type: BasisSet | type[sympy.Function], + reupload_scaling: ReuploadScaling | Callable, +) -> None: + n_qubits = 4 + + block = feature_map( + n_qubits=n_qubits, fm_type=fm_type, reupload_scaling=reupload_scaling, **param_dict + ) + + values = {"x": torch.rand(1), "y": torch.rand(1)} + + run(block, values=values) + + +@pytest.mark.parametrize("fm_type", [BasisSet.FOURIER, BasisSet.CHEBYSHEV]) +@pytest.mark.parametrize( + "reupload_scaling", + [ReuploadScaling.TOWER, ReuploadScaling.CONSTANT, ReuploadScaling.EXP, "exp_down"], +) +def test_feature_map_correctness(fm_type: BasisSet, reupload_scaling: ReuploadScaling) -> None: + n_qubits = 4 + support = tuple(range(n_qubits)) + + # Preparing exact result + if fm_type == BasisSet.CHEBYSHEV: + transform = torch.acos + xv = torch.linspace(-0.95, 0.95, 100) + feature_range = (-1.0, 1.0) + target_range = (-1.0, 1.0) + elif fm_type == BasisSet.FOURIER: + + def transform(x: torch.Tensor) -> torch.Tensor: + return x + + xv = torch.linspace(0.0, 2 * torch.pi, 100) + feature_range = (0.0, 2 * torch.pi) + target_range = (0.0, 2 * torch.pi) + + if reupload_scaling == ReuploadScaling.CONSTANT: + + def scaling(j: int) -> float: + return 1 + + elif reupload_scaling == ReuploadScaling.TOWER: + + def scaling(j: int) -> float: + return float(j + 1) + + elif reupload_scaling == ReuploadScaling.EXP: + + def scaling(j: int) -> float: + return float(2**j) + + elif reupload_scaling == "exp_down": + + def scaling(j: int) -> float: + return float(2 ** (n_qubits - j - 1)) + + reupload_scaling = ReuploadScaling.EXP + support = tuple(reversed(range(n_qubits))) + + target = torch.cat( + [torch.cos(scaling(j) * transform(xv)).unsqueeze(1) for j in range(n_qubits)], 1 + ) + + # Running the block expectation + block = feature_map( + n_qubits=n_qubits, + support=support, + param="x", + op=RX, + fm_type=fm_type, + reupload_scaling=reupload_scaling, + feature_range=feature_range, + target_range=target_range, + ) + + yv = expectation(block, [Z(j) for j in range(n_qubits)], values={"x": xv}) + + # Assert correctness + assert torch.allclose(yv, target, atol=ATOL_64) From 04e48bab274c8833b832ac841664ee5191f8291a Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Thu, 12 Oct 2023 17:11:10 +0200 Subject: [PATCH 10/34] unit tests --- tests/constructors/test_feature_maps.py | 32 ++++++++++++++++++------- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/tests/constructors/test_feature_maps.py b/tests/constructors/test_feature_maps.py index 14f530ba5..9bcb35f4b 100644 --- a/tests/constructors/test_feature_maps.py +++ b/tests/constructors/test_feature_maps.py @@ -13,7 +13,9 @@ BasisSet, FeatureParameter, ReuploadScaling, + X, Z, + exp_fourier_feature_map, expectation, feature_map, run, @@ -59,27 +61,26 @@ def test_feature_map_creation_and_run( run(block, values=values) +@pytest.mark.parametrize("n_qubits", [3, 4, 5]) @pytest.mark.parametrize("fm_type", [BasisSet.FOURIER, BasisSet.CHEBYSHEV]) @pytest.mark.parametrize( "reupload_scaling", [ReuploadScaling.TOWER, ReuploadScaling.CONSTANT, ReuploadScaling.EXP, "exp_down"], ) -def test_feature_map_correctness(fm_type: BasisSet, reupload_scaling: ReuploadScaling) -> None: - n_qubits = 4 +def test_feature_map_correctness( + n_qubits: int, fm_type: BasisSet, reupload_scaling: ReuploadScaling +) -> None: support = tuple(range(n_qubits)) # Preparing exact result if fm_type == BasisSet.CHEBYSHEV: - transform = torch.acos xv = torch.linspace(-0.95, 0.95, 100) + transformed_xv = torch.acos(xv) feature_range = (-1.0, 1.0) target_range = (-1.0, 1.0) elif fm_type == BasisSet.FOURIER: - - def transform(x: torch.Tensor) -> torch.Tensor: - return x - xv = torch.linspace(0.0, 2 * torch.pi, 100) + transformed_xv = xv feature_range = (0.0, 2 * torch.pi) target_range = (0.0, 2 * torch.pi) @@ -107,7 +108,7 @@ def scaling(j: int) -> float: support = tuple(reversed(range(n_qubits))) target = torch.cat( - [torch.cos(scaling(j) * transform(xv)).unsqueeze(1) for j in range(n_qubits)], 1 + [torch.cos(scaling(j) * transformed_xv).unsqueeze(1) for j in range(n_qubits)], 1 ) # Running the block expectation @@ -126,3 +127,18 @@ def scaling(j: int) -> float: # Assert correctness assert torch.allclose(yv, target, atol=ATOL_64) + + +@pytest.mark.parametrize("n_qubits", [3, 4, 5]) +def test_exp_fourier_feature_map_correctness(n_qubits: int) -> None: + block = exp_fourier_feature_map(n_qubits, param="x") + xv = torch.linspace(0.0, 2**n_qubits - 1, 100) + yv = expectation(block, [X(j) for j in range(n_qubits)], values={"x": xv}) + target = torch.cat( + [ + torch.cos(2 ** (j + 1) * torch.pi * xv / 2**n_qubits).unsqueeze(1) + for j in range(n_qubits) + ], + 1, + ) + assert torch.allclose(yv, target) From 4e3b8fe7b8359e9c2f6fcb9ee45d3c8e490e9706 Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Fri, 13 Oct 2023 09:29:42 +0200 Subject: [PATCH 11/34] fix pipeline --- docs/development/draw.md | 2 +- examples/draw.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/development/draw.md b/docs/development/draw.md index 0ec4374fe..bf08516ad 100644 --- a/docs/development/draw.md +++ b/docs/development/draw.md @@ -58,7 +58,7 @@ print(html_string(block)) # markdown-exec: hide ```python exec="on" source="material-block" html="1" from qadence import feature_map, hea, chain, ReuploadScaling -block = chain(feature_map(4, fm_type=ReuploadScaling.TOWER), hea(4,2)) +block = chain(feature_map(4, reupload_scaling=ReuploadScaling.TOWER), hea(4,2)) from qadence.draw import html_string # markdown-exec: hide print(html_string(block)) # markdown-exec: hide ``` diff --git a/examples/draw.py b/examples/draw.py index 5911c0253..8591c0846 100644 --- a/examples/draw.py +++ b/examples/draw.py @@ -40,7 +40,7 @@ class CustomTheme(BaseTheme): hamevo = HamEvo(kron(*map(Z, range(constants.n_qubits))), 10) b = chain( - feature_map(constants.n_qubits, fm_type="Tower"), + feature_map(constants.n_qubits, reupload_scaling="Tower"), hea(constants.n_qubits, 1), constants, fixed, From 088f48fe2c0295b130e781bee8685235969d5052 Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Fri, 13 Oct 2023 10:49:26 +0200 Subject: [PATCH 12/34] docs --- docs/development/draw.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/development/draw.md b/docs/development/draw.md index bf08516ad..803101bf0 100644 --- a/docs/development/draw.md +++ b/docs/development/draw.md @@ -56,9 +56,9 @@ print(html_string(block)) # markdown-exec: hide ``` ```python exec="on" source="material-block" html="1" -from qadence import feature_map, hea, chain, ReuploadScaling +from qadence import feature_map, hea, chain -block = chain(feature_map(4, reupload_scaling=ReuploadScaling.TOWER), hea(4,2)) +block = chain(feature_map(4, fm_type="tower"), hea(4,2)) from qadence.draw import html_string # markdown-exec: hide print(html_string(block)) # markdown-exec: hide ``` From 53f383a7b3b9e3b4aeebfeff9aaf80432a6c1a3e Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Fri, 13 Oct 2023 11:14:48 +0200 Subject: [PATCH 13/34] docs --- docs/development/draw.md | 4 +- docs/qml/index.md | 15 ++-- docs/qml/{qml_tools.md => ml_tools.md} | 114 +------------------------ docs/qml/qaoa.md | 2 +- docs/qml/qcl.md | 2 +- docs/qml/qml_constructors.md | 111 ++++++++++++++++++++++++ docs/tutorials/parameters.md | 2 +- docs/tutorials/quantummodels.md | 2 +- mkdocs.yml | 9 +- 9 files changed, 131 insertions(+), 130 deletions(-) rename docs/qml/{qml_tools.md => ml_tools.md} (57%) create mode 100644 docs/qml/qml_constructors.md diff --git a/docs/development/draw.md b/docs/development/draw.md index 803101bf0..df1f63fc5 100644 --- a/docs/development/draw.md +++ b/docs/development/draw.md @@ -56,9 +56,9 @@ print(html_string(block)) # markdown-exec: hide ``` ```python exec="on" source="material-block" html="1" -from qadence import feature_map, hea, chain +from qadence import feature_map, hea, chain, ReuploadScaling -block = chain(feature_map(4, fm_type="tower"), hea(4,2)) +block = chain(feature_map(4, reupload_scaling="Tower"), hea(4,2)) from qadence.draw import html_string # markdown-exec: hide print(html_string(block)) # markdown-exec: hide ``` diff --git a/docs/qml/index.md b/docs/qml/index.md index d235ef707..6e67c903b 100644 --- a/docs/qml/index.md +++ b/docs/qml/index.md @@ -1,9 +1,7 @@ Variational algorithms on noisy devices and quantum machine learning (QML) [^1] in particular are -the target applications for Qadence. For this purpose, the -library offers both flexible symbolic expressions for the -quantum circuit parameters via `sympy` (see [here](../tutorials/parameters.md) for more -details) and native automatic differentiation via integration with -[PyTorch](https://pytorch.org/) deep learning framework. +the target applications for Qadence. For this purpose, the library offers both flexible symbolic expressions for the +quantum circuit parameters via `sympy` (see [here](../tutorials/parameters.md) for more details) and native automatic +differentiation via integration with [PyTorch](https://pytorch.org/) deep learning framework. Qadence symbolic parameter interface allows to create arbitrary feature maps to encode classical data into quantum circuits @@ -29,14 +27,15 @@ print(samples) The [`constructors.feature_map`][qadence.constructors.feature_map] module provides convenience functions to build commonly used feature maps where the input parameter -is encoded in the single-qubit gates rotation angle. +is encoded in the single-qubit gates rotation angle. This function will be further +demonstrated in the [QML constructors tutorial](qml_constructors.md). Furthermore, Qadence is natively integrated with PyTorch automatic differentiation engine thus Qadence quantum models can be used seamlessly in a PyTorch workflow. Let's create a quantum neural network model using the feature map just defined, a -digital-analog variational ansatz and a simple observable $X(0) \otimes X(1)$. We -use the convenience `QNN` quantum model abstraction. +digital-analog variational ansatz ([also explained here](qml_constructors.md)) and a +simple observable $X(0) \otimes X(1)$. We use the convenience `QNN` quantum model abstraction. ```python exec="on" source="material-block" result="json" session="qml" ansatz = qd.hea(n_qubits, strategy="sDAQC") diff --git a/docs/qml/qml_tools.md b/docs/qml/ml_tools.md similarity index 57% rename from docs/qml/qml_tools.md rename to docs/qml/ml_tools.md index 81d164bba..b1e97382f 100644 --- a/docs/qml/qml_tools.md +++ b/docs/qml/ml_tools.md @@ -1,115 +1,3 @@ -Qadence offers a wide range of utilities for helping building and researching -quantum machine learning algorithms, including: - -* a set of constructors for circuits commonly used in quantum machine learning -* a set of tools for optimizing quantum neural networks and loading classical data into a QML algorithm - -## Quantum machine learning constructors - -Besides the [arbitrary Hamiltonian constructors](../tutorials/hamiltonians.md), Qadence also provides a complete set of -program constructors useful for digital-analog quantum machine learning programs. - -### Feature maps - -A few feature maps are directly available for loading classical data into quantum circuits by encoding them -into gate rotation angles. - -```python exec="on" source="material-block" result="json" session="fms" -from qadence import feature_map - -n_qubits = 3 - -fm = feature_map(n_qubits, fm_type="fourier") -print(f"Fourier = {fm}") # markdown-exec: hide - -fm = feature_map(n_qubits, fm_type="chebyshev") -print(f"Chebyshev {fm}") # markdown-exec: hide - -fm = feature_map(n_qubits, fm_type="tower") -print(f"Tower {fm}") # markdown-exec: hide -``` - -### Hardware-efficient ansatz - -Ansatze blocks for quantum machine-learning are typically built following the Hardware-Efficient Ansatz formalism (HEA). -Both fully digital and digital-analog HEAs can easily be built with the `hea` function. By default, -the digital version is returned: - -```python exec="on" source="material-block" html="1" session="ansatz" -from qadence import hea -from qadence.draw import display - -n_qubits = 3 -depth = 2 - -ansatz = hea(n_qubits, depth) -from qadence.draw import html_string # markdown-exec: hide -print(html_string(ansatz, size="4,4")) # markdown-exec: hide -``` - -As seen above, the rotation layers are automatically parameterized, and the prefix `"theta"` can be changed with the `param_prefix` argument. - -Furthermore, both the single-qubit rotations and the two-qubit entangler can be customized with the `operations` and `entangler` argument. The operations can be passed as a list of single-qubit rotations, while the entangler should be either `CNOT`, `CZ`, `CRX`, `CRY`, `CRZ` or `CPHASE`. - -```python exec="on" source="material-block" html="1" session="ansatz" -from qadence import RX, RY, CPHASE - -ansatz = hea( - n_qubits=n_qubits, - depth=depth, - param_prefix="phi", - operations=[RX, RY, RX], - entangler=CPHASE -) -from qadence.draw import html_string # markdown-exec: hide -print(html_string(ansatz, size="4,4")) # markdown-exec: hide -``` - -Having a truly *hardware-efficient* ansatz means that the entangling operation can be chosen according to each device's native interactions. Besides digital operations, in Qadence it is also possible to build digital-analog HEAs with the entanglement produced by the natural evolution of a set of interacting qubits, as natively implemented in neutral atom devices. As with other digital-analog functions, this can be controlled with the `strategy` argument which can be chosen from the [`Strategy`](../qadence/types.md) enum type. Currently, only `Strategy.DIGITAL` and `Strategy.SDAQC` are available. By default, calling `strategy = Strategy.SDAQC` will use a global entangling Hamiltonian with Ising-like NN interactions and constant interaction strength, - -```python exec="on" source="material-block" html="1" session="ansatz" -from qadence import Strategy - -ansatz = hea( - n_qubits, - depth=depth, - strategy=Strategy.SDAQC -) -from qadence.draw import html_string # markdown-exec: hide -print(html_string(ansatz, size="4,4")) # markdown-exec: hide -``` - -Note that, by default, only the time-parameter is automatically parameterized when building a digital-analog HEA. However, as described in the [Hamiltonians tutorial](../tutorials/hamiltonians.md), arbitrary interaction Hamiltonians can be easily built with the `hamiltonian_factory` function, with both customized or fully parameterized interactions, and these can be directly passed as the `entangler` for a customizable digital-analog HEA. - -```python exec="on" source="material-block" html="1" session="ansatz" -from qadence import hamiltonian_factory, Interaction, N, Register, hea - -# Build a parameterized neutral-atom Hamiltonian following a honeycomb_lattice: -register = Register.honeycomb_lattice(1, 1) - -entangler = hamiltonian_factory( - register, - interaction=Interaction.NN, - detuning=N, - interaction_strength="e", - detuning_strength="n" -) - -# Build a fully parameterized Digital-Analog HEA: -n_qubits = register.n_qubits -depth = 2 - -ansatz = hea( - n_qubits=register.n_qubits, - depth=depth, - operations=[RX, RY, RX], - entangler=entangler, - strategy=Strategy.SDAQC -) -from qadence.draw import html_string # markdown-exec: hide -print(html_string(ansatz, size="4,4")) # markdown-exec: hide -``` - ## Machine Learning Tools ### Dataloaders @@ -321,4 +209,4 @@ for i in range(n_epochs): loss = criterion(out, y) loss.backward() optimizer.step() -``` +``` \ No newline at end of file diff --git a/docs/qml/qaoa.md b/docs/qml/qaoa.md index 5dde67b9d..e6a57f0a6 100644 --- a/docs/qml/qaoa.md +++ b/docs/qml/qaoa.md @@ -140,7 +140,7 @@ for i in range(n_epochs): ``` Qadence offers some convenience functions to implement this training loop with advanced -logging and metrics track features. You can refer to [this](../qml/qml_tools.md) for more details. +logging and metrics track features. You can refer to [this tutorial](../qml/ml_tools.md) for more details. ## Results diff --git a/docs/qml/qcl.md b/docs/qml/qcl.md index 745bdd6f7..b1239ba78 100644 --- a/docs/qml/qcl.md +++ b/docs/qml/qcl.md @@ -114,7 +114,7 @@ assert loss.item() < 1e-3 ``` Qadence offers some convenience functions to implement this training loop with advanced -logging and metrics track features. You can refer to [this](../qml/qml_tools.md) for more details. +logging and metrics track features. You can refer to [this tutorial](../qml/ml_tools.md) for more details. The quantum model is now trained on the training data points. To determine the quality of the results, one can check to see how well it fits the function on the test set. diff --git a/docs/qml/qml_constructors.md b/docs/qml/qml_constructors.md new file mode 100644 index 000000000..e5ca0353e --- /dev/null +++ b/docs/qml/qml_constructors.md @@ -0,0 +1,111 @@ +Qadence offers a wide range of utilities for helping building and researching +quantum machine learning algorithms, including: + +* a set of constructors for circuits commonly used in quantum machine learning +* a set of tools for optimizing quantum neural networks and loading classical data into a QML algorithm + +## Quantum machine learning constructors + +Besides the [arbitrary Hamiltonian constructors](../tutorials/hamiltonians.md), Qadence also provides a complete set of +program constructors useful for digital-analog quantum machine learning programs. + +### Feature maps + +The `feature_map` function provides the necessary tools to easily several types of data-encoding blocks using rotation gates. The +two main types of feature maps use a Fourier basis or a Chebyshev basis. + +```python exec="on" source="material-block" html="1" session="fms" +from qadence import feature_map, BasisSet, chain +from qadence.draw import display + +n_qubits = 3 + +fourier_fm = feature_map(n_qubits, fm_type=BasisSet.FOURIER) + +chebyshev_fm = feature_map(n_qubits, fm_type=BasisSet.CHEBYSHEV) + +block = chain(fourier_fm, chebyshev_fm) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(block, size="6,4")) # markdown-exec: hide +``` + +### Hardware-efficient ansatz + +Ansatze blocks for quantum machine-learning are typically built following the Hardware-Efficient Ansatz formalism (HEA). +Both fully digital and digital-analog HEAs can easily be built with the `hea` function. By default, +the digital version is returned: + +```python exec="on" source="material-block" html="1" session="ansatz" +from qadence import hea +from qadence.draw import display + +n_qubits = 3 +depth = 2 + +ansatz = hea(n_qubits, depth) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(ansatz, size="8,4")) # markdown-exec: hide +``` + +As seen above, the rotation layers are automatically parameterized, and the prefix `"theta"` can be changed with the `param_prefix` argument. + +Furthermore, both the single-qubit rotations and the two-qubit entangler can be customized with the `operations` and `entangler` argument. The operations can be passed as a list of single-qubit rotations, while the entangler should be either `CNOT`, `CZ`, `CRX`, `CRY`, `CRZ` or `CPHASE`. + +```python exec="on" source="material-block" html="1" session="ansatz" +from qadence import RX, RY, CPHASE + +ansatz = hea( + n_qubits=n_qubits, + depth=depth, + param_prefix="phi", + operations=[RX, RY, RX], + entangler=CPHASE +) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(ansatz, size="8,4")) # markdown-exec: hide +``` + +Having a truly *hardware-efficient* ansatz means that the entangling operation can be chosen according to each device's native interactions. Besides digital operations, in Qadence it is also possible to build digital-analog HEAs with the entanglement produced by the natural evolution of a set of interacting qubits, as natively implemented in neutral atom devices. As with other digital-analog functions, this can be controlled with the `strategy` argument which can be chosen from the [`Strategy`](../qadence/types.md) enum type. Currently, only `Strategy.DIGITAL` and `Strategy.SDAQC` are available. By default, calling `strategy = Strategy.SDAQC` will use a global entangling Hamiltonian with Ising-like NN interactions and constant interaction strength, + +```python exec="on" source="material-block" html="1" session="ansatz" +from qadence import Strategy + +ansatz = hea( + n_qubits, + depth=depth, + strategy=Strategy.SDAQC +) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(ansatz, size="8,4")) # markdown-exec: hide +``` + +Note that, by default, only the time-parameter is automatically parameterized when building a digital-analog HEA. However, as described in the [Hamiltonians tutorial](../tutorials/hamiltonians.md), arbitrary interaction Hamiltonians can be easily built with the `hamiltonian_factory` function, with both customized or fully parameterized interactions, and these can be directly passed as the `entangler` for a customizable digital-analog HEA. + +```python exec="on" source="material-block" html="1" session="ansatz" +from qadence import hamiltonian_factory, Interaction, N, Register, hea + +# Build a parameterized neutral-atom Hamiltonian following a honeycomb_lattice: +register = Register.honeycomb_lattice(1, 1) + +entangler = hamiltonian_factory( + register, + interaction=Interaction.NN, + detuning=N, + interaction_strength="e", + detuning_strength="n" +) + +# Build a fully parameterized Digital-Analog HEA: +n_qubits = register.n_qubits +depth = 2 + +ansatz = hea( + n_qubits=register.n_qubits, + depth=depth, + operations=[RX, RY, RX], + entangler=entangler, + strategy=Strategy.SDAQC +) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(ansatz, size="8,4")) # markdown-exec: hide +``` diff --git a/docs/tutorials/parameters.md b/docs/tutorials/parameters.md index 6cd1f7ac1..f9669de98 100644 --- a/docs/tutorials/parameters.md +++ b/docs/tutorials/parameters.md @@ -261,7 +261,7 @@ print(html_string(circuit)) # markdown-exec: hide print(html_string(circuit)) # markdown-exec: hide ``` -The `hea` function will be further explored in the [QML Constructors tutorial](../qml/qml_tools.md). +The `hea` function will be further explored in the [QML Constructors tutorial](../qml/qml_constructors.md). ## Parametric observables diff --git a/docs/tutorials/quantummodels.md b/docs/tutorials/quantummodels.md index 836e7ca82..326de4781 100644 --- a/docs/tutorials/quantummodels.md +++ b/docs/tutorials/quantummodels.md @@ -85,5 +85,5 @@ print(f"{ex = }") # markdown-exec: hide ### Quantum Neural Network (QNN) The `QNN` is a subclass of the `QuantumModel` geared towards quantum machine learning and parameter optimisation. See the -[machine learning tools](../qml/qml_tools.md) section or the [`QNN` API reference][qadence.models.QNN] for more detailed +[quantum machine learning section](../qml/index.md) section or the [`QNN` API reference][qadence.models.QNN] for more detailed information, and the [parametric program tutorial](parameters.md) for parameterization. diff --git a/mkdocs.yml b/mkdocs.yml index cecb37b7a..ee36eafeb 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -28,9 +28,12 @@ nav: - Variational quantum algorithms: - qml/index.md - - Quantum circuit learning: qml/qcl.md - - Solving MaxCut with QAOA: qml/qaoa.md - - Tools for quantum machine learning: qml/qml_tools.md + - Tools for quantum machine learning: + - Constructors: qml/qml_constructors.md + - Training tools: qml/ml_tools.md + - Example applications: + - Quantum circuit learning: qml/qcl.md + - Solving MaxCut with QAOA: qml/qaoa.md - Advanced Tutorials: - Quantum circuits differentiation: advanced_tutorials/differentiability.md From f86b88ce6be10de0d58cc01df93325558a6e3165 Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Fri, 13 Oct 2023 11:18:18 +0200 Subject: [PATCH 14/34] linting --- docs/qml/index.md | 4 ++-- docs/qml/ml_tools.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/qml/index.md b/docs/qml/index.md index 6e67c903b..8932ce7bf 100644 --- a/docs/qml/index.md +++ b/docs/qml/index.md @@ -1,6 +1,6 @@ Variational algorithms on noisy devices and quantum machine learning (QML) [^1] in particular are the target applications for Qadence. For this purpose, the library offers both flexible symbolic expressions for the -quantum circuit parameters via `sympy` (see [here](../tutorials/parameters.md) for more details) and native automatic +quantum circuit parameters via `sympy` (see [here](../tutorials/parameters.md) for more details) and native automatic differentiation via integration with [PyTorch](https://pytorch.org/) deep learning framework. Qadence symbolic parameter interface allows to create @@ -34,7 +34,7 @@ Furthermore, Qadence is natively integrated with PyTorch automatic differentiati Qadence quantum models can be used seamlessly in a PyTorch workflow. Let's create a quantum neural network model using the feature map just defined, a -digital-analog variational ansatz ([also explained here](qml_constructors.md)) and a +digital-analog variational ansatz ([also explained here](qml_constructors.md)) and a simple observable $X(0) \otimes X(1)$. We use the convenience `QNN` quantum model abstraction. ```python exec="on" source="material-block" result="json" session="qml" diff --git a/docs/qml/ml_tools.md b/docs/qml/ml_tools.md index b1e97382f..9195ca3f1 100644 --- a/docs/qml/ml_tools.md +++ b/docs/qml/ml_tools.md @@ -209,4 +209,4 @@ for i in range(n_epochs): loss = criterion(out, y) loss.backward() optimizer.step() -``` \ No newline at end of file +``` From c1d5521206da928a5de924528f3ef7b4833c84ce Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Fri, 13 Oct 2023 15:03:22 +0200 Subject: [PATCH 15/34] comments --- qadence/constructors/feature_maps.py | 98 ++++++++++--------- qadence/types.py | 4 +- tests/constructors/test_feature_maps.py | 16 +-- tests/qadence/test_matrices.py | 6 +- .../test_measurements/test_tomography.py | 16 +-- 5 files changed, 72 insertions(+), 68 deletions(-) diff --git a/qadence/constructors/feature_maps.py b/qadence/constructors/feature_maps.py index 8992a4ac3..6f0758e92 100644 --- a/qadence/constructors/feature_maps.py +++ b/qadence/constructors/feature_maps.py @@ -1,27 +1,27 @@ from __future__ import annotations import inspect -import math -from typing import Callable, Type, Union +from collections.abc import Callable +from math import isclose, pi -import sympy +from sympy import Function, acos from qadence.blocks import AbstractBlock, KronBlock, chain, kron, tag from qadence.logger import get_logger from qadence.operations import PHASE, RX, RY, RZ, H from qadence.parameters import FeatureParameter, Parameter -from qadence.types import BasisSet, ReuploadScaling, TParameter +from qadence.types import BasisFeatureMap, ReuploadScaling, TParameter logger = get_logger(__name__) -TRotation = Type[Union[RX, RY, RZ, PHASE]] ROTATIONS = [RX, RY, RZ, PHASE] +RotationTypes = type[RX | RY | RZ | PHASE] -def _set_range(fm_type: BasisSet | Type[sympy.Function] | str) -> tuple[float, float]: - if fm_type == BasisSet.FOURIER: - return (0.0, 2 * sympy.pi) - elif fm_type == BasisSet.CHEBYSHEV: +def _set_range(fm_type: BasisFeatureMap | type[Function] | str) -> tuple[float, float]: + if fm_type == BasisFeatureMap.FOURIER: + return (0.0, 2 * pi) + elif fm_type == BasisFeatureMap.CHEBYSHEV: return (-1.0, 1.0) else: return (0.0, 1.0) @@ -38,8 +38,8 @@ def feature_map( n_qubits: int, support: tuple[int, ...] | None = None, param: Parameter | str = "phi", - op: TRotation = RX, - fm_type: BasisSet | Type[sympy.Function] | str = BasisSet.FOURIER, + op: RotationTypes = RX, + fm_type: BasisFeatureMap | type[Function] | str = BasisFeatureMap.FOURIER, reupload_scaling: ReuploadScaling | Callable | str = ReuploadScaling.CONSTANT, feature_range: tuple[float, float] | None = None, target_range: tuple[float, float] | None = None, @@ -55,8 +55,8 @@ def feature_map( param: Parameter of the feature map; you can pass a string, sympy expression or Parameter; it will be set as non-trainable (FeatureParameter) regardless. op: Rotation operation of the feature map; choose from RX, RY, RZ, PHASE - fm_type: Determines the basis set for the encoding; choose from `BasisSet.FOURIER` for - Fourier encoding, or `BasisSet.CHEBYSHEV` for Chebyshev polynomials of the first kind. + fm_type: Basis set for data encoding; choose from `BasisFeatureMap.FOURIER` for Fourier + encoding, or `BasisFeatureMap.CHEBYSHEV` for Chebyshev polynomials of the first kind. reupload_scaling: how the feature map scales the data that is re-uploaded for each qubit. feature_range: range of data that the input data is assumed to come from. target_range: range of data the data encoder assumes as the natural range. For example, @@ -66,15 +66,15 @@ def feature_map( Example: ```python exec="on" source="material-block" result="json" - from qadence import feature_map, BasisSet, ReuploadScaling + from qadence import feature_map, BasisFeatureMap, ReuploadScaling - fm = feature_map(3, fm_type=BasisSet.FOURIER) + fm = feature_map(3, fm_type=BasisFeatureMap.FOURIER) print(f"{fm = }") - fm = feature_map(3, fm_type=BasisSet.CHEBYSHEV) + fm = feature_map(3, fm_type=BasisFeatureMap.CHEBYSHEV) print(f"{fm = }") - fm = feature_map(3, fm_type=BasisSet.FOURIER, reupload_scaling = ReuploadScaling.TOWER) + fm = feature_map(3, fm_type=BasisFeatureMap.FOURIER, reupload_scaling = ReuploadScaling.TOWER) print(f"{fm = }") ``` """ @@ -87,22 +87,23 @@ def feature_map( if op not in ROTATIONS: raise ValueError( - f"Operation {op} not supported. Please one from {[rot.__name__ for rot in ROTATIONS]}." + f"Operation {op} not supported. " + f"Please provide one from {[rot.__name__ for rot in ROTATIONS]}." ) - # Backward compatibility + # Backwards compatibility if fm_type in ("fourier", "chebyshev", "tower"): logger.warning( "Selecting `fm_type` as 'fourier', 'chebyshev' or 'tower' is deprecated. " - "Please use the respective enumerations: 'fm_type = BasisSet.FOURIER', " - "'fm_type = BasisSet.CHEBYSHEV' or 'reupload_scaling = ReuploadScaling.TOWER'." + "Please use the respective enumerations: 'fm_type = BasisFeatureMap.FOURIER', " + "'fm_type = BasisFeatureMap.CHEBYSHEV' or 'reupload_scaling = ReuploadScaling.TOWER'." ) if fm_type == "fourier": - fm_type = BasisSet.FOURIER + fm_type = BasisFeatureMap.FOURIER elif fm_type == "chebyshev": - fm_type = BasisSet.CHEBYSHEV + fm_type = BasisFeatureMap.CHEBYSHEV elif fm_type == "tower": - fm_type = BasisSet.FOURIER + fm_type = BasisFeatureMap.FOURIER reupload_scaling = ReuploadScaling.TOWER if isinstance(param, Parameter): @@ -120,45 +121,44 @@ def feature_map( scaling = (max(target_range) - min(target_range)) / (max(feature_range) - min(feature_range)) shift = min(target_range) - min(feature_range) * scaling - if math.isclose(scaling, 1.0): + if isclose(scaling, 1.0): # So we don't get 1.0 factor in visualization scaled_fparam = fparam + shift else: scaled_fparam = scaling * fparam + shift # Transform feature parameter - if fm_type == BasisSet.FOURIER: + if fm_type == BasisFeatureMap.FOURIER: transformed_feature = scaled_fparam - elif fm_type == BasisSet.CHEBYSHEV: - transformed_feature = sympy.acos(scaled_fparam) - elif inspect.isclass(fm_type) and issubclass(fm_type, sympy.Function): + elif fm_type == BasisFeatureMap.CHEBYSHEV: + transformed_feature = acos(scaled_fparam) + elif inspect.isclass(fm_type) and issubclass(fm_type, Function): transformed_feature = fm_type(scaled_fparam) else: raise NotImplementedError( - f"Feature map type {fm_type} not implemented. Choose an item from the BasisSet enum: " - f"{[bs.name for bs in BasisSet]}, or your own sympy.Function to wrap the given " - "feature parameter with." + f"Feature map type {fm_type} not implemented. Choose an item from the BasisFeatureMap " + f"enum: {[bs.name for bs in BasisFeatureMap]}, or your own sympy.Function to wrap " + "the given feature parameter with." ) - basis_tag = fm_type.value if isinstance(fm_type, BasisSet) else str(fm_type) + basis_tag = fm_type.value if isinstance(fm_type, BasisFeatureMap) else str(fm_type) # Set reupload scaling function if callable(reupload_scaling): rs_func = reupload_scaling rs_tag = "Custom" else: - try: - rs_func = RS_FUNC_DICT[reupload_scaling] # type: ignore [index] - if isinstance(reupload_scaling, ReuploadScaling): - rs_tag = reupload_scaling.value - else: - rs_tag = reupload_scaling - except (KeyError, ValueError) as error: + rs_func = RS_FUNC_DICT.get(reupload_scaling, None) # type: ignore [call-overload] + if rs_func is None: raise NotImplementedError( f"Reupload scaling {reupload_scaling} not implemented; choose an item from " f"the ReuploadScaling enum: {[rs.name for rs in ReuploadScaling]}, or your own " "python function with a single int arg as input and int or float output." ) + if isinstance(reupload_scaling, ReuploadScaling): + rs_tag = reupload_scaling.value + else: + rs_tag = reupload_scaling # Set overall multiplier multiplier = 1 if multiplier is None else multiplier @@ -175,7 +175,7 @@ def feature_map( def fourier_feature_map( - n_qubits: int, support: tuple[int, ...] = None, param: str = "phi", op: TRotation = RX + n_qubits: int, support: tuple[int, ...] = None, param: str = "phi", op: RotationTypes = RX ) -> AbstractBlock: """Construct a Fourier feature map. @@ -183,12 +183,12 @@ def fourier_feature_map( n_qubits: number of qubits across which the FM is created param: The base name for the feature `Parameter` """ - fm = feature_map(n_qubits, support=support, param=param, op=op, fm_type=BasisSet.FOURIER) + fm = feature_map(n_qubits, support=support, param=param, op=op, fm_type=BasisFeatureMap.FOURIER) return fm def chebyshev_feature_map( - n_qubits: int, support: tuple[int, ...] = None, param: str = "phi", op: TRotation = RX + n_qubits: int, support: tuple[int, ...] = None, param: str = "phi", op: RotationTypes = RX ) -> AbstractBlock: """Construct a Chebyshev feature map. @@ -197,12 +197,14 @@ def chebyshev_feature_map( support (Iterable[int]): The qubit support param: The base name for the feature `Parameter` """ - fm = feature_map(n_qubits, support=support, param=param, op=op, fm_type=BasisSet.CHEBYSHEV) + fm = feature_map( + n_qubits, support=support, param=param, op=op, fm_type=BasisFeatureMap.CHEBYSHEV + ) return fm def tower_feature_map( - n_qubits: int, support: tuple[int, ...] = None, param: str = "phi", op: TRotation = RX + n_qubits: int, support: tuple[int, ...] = None, param: str = "phi", op: RotationTypes = RX ) -> AbstractBlock: """Construct a Chebyshev tower feature map. @@ -215,7 +217,7 @@ def tower_feature_map( support=support, param=param, op=op, - fm_type=BasisSet.CHEBYSHEV, + fm_type=BasisFeatureMap.CHEBYSHEV, reupload_scaling=ReuploadScaling.TOWER, ) return fm @@ -247,10 +249,10 @@ def exp_fourier_feature_map( support=support, param=param, op=RZ, - fm_type=BasisSet.FOURIER, + fm_type=BasisFeatureMap.FOURIER, reupload_scaling=ReuploadScaling.EXP, feature_range=feature_range, - target_range=(0.0, 2 * sympy.pi), + target_range=(0.0, 2 * pi), ) rlayer.tag = None return tag(chain(hlayer, rlayer), f"ExpFourierFM({param})") diff --git a/qadence/types.py b/qadence/types.py index 2848983cb..f5b91f6aa 100644 --- a/qadence/types.py +++ b/qadence/types.py @@ -34,7 +34,7 @@ "StateGeneratorType", "LTSOrder", "ReuploadScaling", - "BasisSet", + "BasisFeatureMap", "TensorType", "DiffMode", "BackendName", @@ -124,7 +124,7 @@ class LTSOrder(StrEnum): """ST4.""" -class BasisSet(StrEnum): +class BasisFeatureMap(StrEnum): """Basis set for feature maps.""" FOURIER = "Fourier" diff --git a/tests/constructors/test_feature_maps.py b/tests/constructors/test_feature_maps.py index 9bcb35f4b..bd375f5ed 100644 --- a/tests/constructors/test_feature_maps.py +++ b/tests/constructors/test_feature_maps.py @@ -10,7 +10,7 @@ from qadence import ( PHASE, RX, - BasisSet, + BasisFeatureMap, FeatureParameter, ReuploadScaling, X, @@ -40,14 +40,16 @@ @pytest.mark.parametrize("param_dict", [PARAM_DICT_0, PARAM_DICT_1]) -@pytest.mark.parametrize("fm_type", [BasisSet.FOURIER, BasisSet.CHEBYSHEV, sympy.asin]) +@pytest.mark.parametrize( + "fm_type", [BasisFeatureMap.FOURIER, BasisFeatureMap.CHEBYSHEV, sympy.asin] +) @pytest.mark.parametrize( "reupload_scaling", [ReuploadScaling.CONSTANT, ReuploadScaling.TOWER, ReuploadScaling.EXP, lambda i: 5 * i + 2], ) def test_feature_map_creation_and_run( param_dict: dict, - fm_type: BasisSet | type[sympy.Function], + fm_type: BasisFeatureMap | type[sympy.Function], reupload_scaling: ReuploadScaling | Callable, ) -> None: n_qubits = 4 @@ -62,23 +64,23 @@ def test_feature_map_creation_and_run( @pytest.mark.parametrize("n_qubits", [3, 4, 5]) -@pytest.mark.parametrize("fm_type", [BasisSet.FOURIER, BasisSet.CHEBYSHEV]) +@pytest.mark.parametrize("fm_type", [BasisFeatureMap.FOURIER, BasisFeatureMap.CHEBYSHEV]) @pytest.mark.parametrize( "reupload_scaling", [ReuploadScaling.TOWER, ReuploadScaling.CONSTANT, ReuploadScaling.EXP, "exp_down"], ) def test_feature_map_correctness( - n_qubits: int, fm_type: BasisSet, reupload_scaling: ReuploadScaling + n_qubits: int, fm_type: BasisFeatureMap, reupload_scaling: ReuploadScaling ) -> None: support = tuple(range(n_qubits)) # Preparing exact result - if fm_type == BasisSet.CHEBYSHEV: + if fm_type == BasisFeatureMap.CHEBYSHEV: xv = torch.linspace(-0.95, 0.95, 100) transformed_xv = torch.acos(xv) feature_range = (-1.0, 1.0) target_range = (-1.0, 1.0) - elif fm_type == BasisSet.FOURIER: + elif fm_type == BasisFeatureMap.FOURIER: xv = torch.linspace(0.0, 2 * torch.pi, 100) transformed_xv = xv feature_range = (0.0, 2 * torch.pi) diff --git a/tests/qadence/test_matrices.py b/tests/qadence/test_matrices.py index 53ffd94a3..bf9442c6f 100644 --- a/tests/qadence/test_matrices.py +++ b/tests/qadence/test_matrices.py @@ -57,7 +57,7 @@ Z, ) from qadence.states import equivalent_state, random_state, zero_state -from qadence.types import BasisSet, Interaction, ReuploadScaling +from qadence.types import BasisFeatureMap, Interaction, ReuploadScaling def _calc_mat_vec_wavefunction( @@ -253,14 +253,14 @@ def test_total_magnetization(n_qubits: int) -> None: @pytest.mark.parametrize("n_qubits", [1, 2, 4]) -@pytest.mark.parametrize("fm_type", [BasisSet.FOURIER, BasisSet.CHEBYSHEV]) +@pytest.mark.parametrize("fm_type", [BasisFeatureMap.FOURIER, BasisFeatureMap.CHEBYSHEV]) @pytest.mark.parametrize( "reupload_scaling", [ReuploadScaling.CONSTANT, ReuploadScaling.TOWER, ReuploadScaling.EXP] ) @pytest.mark.parametrize("op", [RX, RY, RZ, PHASE]) def test_feature_maps( n_qubits: int, - fm_type: BasisSet, + fm_type: BasisFeatureMap, reupload_scaling: ReuploadScaling, op: type[RX] | type[RY] | type[RZ] | type[PHASE], ) -> None: diff --git a/tests/qadence/test_measurements/test_tomography.py b/tests/qadence/test_measurements/test_tomography.py index dbed4bf41..1c035e2af 100644 --- a/tests/qadence/test_measurements/test_tomography.py +++ b/tests/qadence/test_measurements/test_tomography.py @@ -9,7 +9,7 @@ from hypothesis import given, settings from metrics import HIGH_ACCEPTANCE, LOW_ACCEPTANCE, MIDDLE_ACCEPTANCE # type: ignore -from qadence import BackendName, BasisSet, DiffMode +from qadence import BackendName, BasisFeatureMap, DiffMode from qadence.backends import backend_factory from qadence.blocks import ( AbstractBlock, @@ -251,23 +251,23 @@ def test_empirical_average() -> None: (QuantumCircuit(1, H(0)), {}, Z(0)), (QuantumCircuit(2, kron(H(0), H(1))), {}, kron(X(0), X(1))), ( - QuantumCircuit(4, feature_map(4, fm_type=BasisSet.CHEBYSHEV), hea(4, depth=2)), + QuantumCircuit(4, feature_map(4, fm_type=BasisFeatureMap.CHEBYSHEV), hea(4, depth=2)), {"phi": torch.rand(1)}, total_magnetization(4), ), ( - QuantumCircuit(4, feature_map(4, fm_type=BasisSet.CHEBYSHEV), hea(4, depth=2)), + QuantumCircuit(4, feature_map(4, fm_type=BasisFeatureMap.CHEBYSHEV), hea(4, depth=2)), {"phi": torch.rand(1)}, zz_hamiltonian(4), ), # ( - # QuantumCircuit(4, feature_map(4, fm_type=BasisSet.CHEBYSHEV), hea(4, depth=2)), + # QuantumCircuit(4, feature_map(4, fm_type=BasisFeatureMap.CHEBYSHEV), hea(4, depth=2)), # {"phi": torch.rand(1)}, # ising_hamiltonian(4), # HIGH_ACCEPTANCE, # ), ( - QuantumCircuit(4, feature_map(4, fm_type=BasisSet.CHEBYSHEV), hea(4, depth=2)), + QuantumCircuit(4, feature_map(4, fm_type=BasisFeatureMap.CHEBYSHEV), hea(4, depth=2)), {"phi": torch.rand(1)}, add( 0.5 * kron(X(0), Y(1), X(2), Y(3)), @@ -478,7 +478,7 @@ def test_forward_and_backward_passes_with_qnn(observable: AbstractBlock, accepta kwargs = {"n_shots": 1000000} # fm = fourier_feature_map(n_qubits) - fm = feature_map(n_qubits, fm_type=BasisSet.CHEBYSHEV) + fm = feature_map(n_qubits, fm_type=BasisFeatureMap.CHEBYSHEV) ansatz = hea(n_qubits, depth=2) circuit = QuantumCircuit(n_qubits, fm, ansatz) values = {"phi": torch.rand(batch_size, requires_grad=True)} @@ -530,7 +530,7 @@ def test_partial_derivatives_with_qnn(observable: AbstractBlock, acceptance: flo kwargs = {"n_shots": 100000} # fm = fourier_feature_map(n_qubits) - fm = feature_map(n_qubits, fm_type=BasisSet.CHEBYSHEV) + fm = feature_map(n_qubits, fm_type=BasisFeatureMap.CHEBYSHEV) ansatz = hea(n_qubits, depth=2) circuit = QuantumCircuit(n_qubits, fm, ansatz) values = {"phi": torch.rand(batch_size, requires_grad=True)} @@ -626,7 +626,7 @@ def test_high_order_derivatives_with_qnn(observable: AbstractBlock, acceptance: kwargs = {"n_shots": 100000} # fm = fourier_feature_map(n_qubits) - fm = feature_map(n_qubits, fm_type=BasisSet.CHEBYSHEV) + fm = feature_map(n_qubits, fm_type=BasisFeatureMap.CHEBYSHEV) ansatz = hea(n_qubits, depth=2) circuit = QuantumCircuit(n_qubits, fm, ansatz) values = {"phi": torch.rand(batch_size, requires_grad=True)} From ca355bffdd9ad2bc6ce69127913e182b836c65ff Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Fri, 13 Oct 2023 15:04:59 +0200 Subject: [PATCH 16/34] trainable --- qadence/constructors/feature_maps.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/qadence/constructors/feature_maps.py b/qadence/constructors/feature_maps.py index 6f0758e92..5d0515d85 100644 --- a/qadence/constructors/feature_maps.py +++ b/qadence/constructors/feature_maps.py @@ -108,8 +108,7 @@ def feature_map( if isinstance(param, Parameter): fparam = param - if fparam.trainable: - fparam.trainable = False + fparam.trainable = False else: fparam = FeatureParameter(param) From 60e70e8743e9937bca27d5f2fe969eee125de3ce Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Fri, 13 Oct 2023 15:10:45 +0200 Subject: [PATCH 17/34] scaling enum --- qadence/constructors/feature_maps.py | 26 ++++++++++++------------- qadence/types.py | 4 ++-- tests/constructors/test_feature_maps.py | 23 +++++++++++++--------- tests/qadence/test_matrices.py | 6 +++--- 4 files changed, 32 insertions(+), 27 deletions(-) diff --git a/qadence/constructors/feature_maps.py b/qadence/constructors/feature_maps.py index 5d0515d85..1f61bc63a 100644 --- a/qadence/constructors/feature_maps.py +++ b/qadence/constructors/feature_maps.py @@ -10,7 +10,7 @@ from qadence.logger import get_logger from qadence.operations import PHASE, RX, RY, RZ, H from qadence.parameters import FeatureParameter, Parameter -from qadence.types import BasisFeatureMap, ReuploadScaling, TParameter +from qadence.types import BasisFeatureMap, ScalingFeatureMap, TParameter logger = get_logger(__name__) @@ -28,9 +28,9 @@ def _set_range(fm_type: BasisFeatureMap | type[Function] | str) -> tuple[float, RS_FUNC_DICT = { - ReuploadScaling.CONSTANT: lambda i: 1, - ReuploadScaling.TOWER: lambda i: float(i + 1), - ReuploadScaling.EXP: lambda i: float(2**i), + ScalingFeatureMap.CONSTANT: lambda i: 1, + ScalingFeatureMap.TOWER: lambda i: float(i + 1), + ScalingFeatureMap.EXP: lambda i: float(2**i), } @@ -40,7 +40,7 @@ def feature_map( param: Parameter | str = "phi", op: RotationTypes = RX, fm_type: BasisFeatureMap | type[Function] | str = BasisFeatureMap.FOURIER, - reupload_scaling: ReuploadScaling | Callable | str = ReuploadScaling.CONSTANT, + reupload_scaling: ScalingFeatureMap | Callable | str = ScalingFeatureMap.CONSTANT, feature_range: tuple[float, float] | None = None, target_range: tuple[float, float] | None = None, multiplier: Parameter | TParameter | None = None, @@ -66,7 +66,7 @@ def feature_map( Example: ```python exec="on" source="material-block" result="json" - from qadence import feature_map, BasisFeatureMap, ReuploadScaling + from qadence import feature_map, BasisFeatureMap, ScalingFeatureMap fm = feature_map(3, fm_type=BasisFeatureMap.FOURIER) print(f"{fm = }") @@ -74,7 +74,7 @@ def feature_map( fm = feature_map(3, fm_type=BasisFeatureMap.CHEBYSHEV) print(f"{fm = }") - fm = feature_map(3, fm_type=BasisFeatureMap.FOURIER, reupload_scaling = ReuploadScaling.TOWER) + fm = feature_map(3, fm_type=BasisFeatureMap.FOURIER, reupload_scaling = ScalingFeatureMap.TOWER) print(f"{fm = }") ``` """ @@ -96,7 +96,7 @@ def feature_map( logger.warning( "Selecting `fm_type` as 'fourier', 'chebyshev' or 'tower' is deprecated. " "Please use the respective enumerations: 'fm_type = BasisFeatureMap.FOURIER', " - "'fm_type = BasisFeatureMap.CHEBYSHEV' or 'reupload_scaling = ReuploadScaling.TOWER'." + "'fm_type = BasisFeatureMap.CHEBYSHEV' or 'reupload_scaling = ScalingFeatureMap.TOWER'." ) if fm_type == "fourier": fm_type = BasisFeatureMap.FOURIER @@ -104,7 +104,7 @@ def feature_map( fm_type = BasisFeatureMap.CHEBYSHEV elif fm_type == "tower": fm_type = BasisFeatureMap.FOURIER - reupload_scaling = ReuploadScaling.TOWER + reupload_scaling = ScalingFeatureMap.TOWER if isinstance(param, Parameter): fparam = param @@ -151,10 +151,10 @@ def feature_map( if rs_func is None: raise NotImplementedError( f"Reupload scaling {reupload_scaling} not implemented; choose an item from " - f"the ReuploadScaling enum: {[rs.name for rs in ReuploadScaling]}, or your own " + f"the ScalingFeatureMap enum: {[rs.name for rs in ScalingFeatureMap]}, or your own " "python function with a single int arg as input and int or float output." ) - if isinstance(reupload_scaling, ReuploadScaling): + if isinstance(reupload_scaling, ScalingFeatureMap): rs_tag = reupload_scaling.value else: rs_tag = reupload_scaling @@ -217,7 +217,7 @@ def tower_feature_map( param=param, op=op, fm_type=BasisFeatureMap.CHEBYSHEV, - reupload_scaling=ReuploadScaling.TOWER, + reupload_scaling=ScalingFeatureMap.TOWER, ) return fm @@ -249,7 +249,7 @@ def exp_fourier_feature_map( param=param, op=RZ, fm_type=BasisFeatureMap.FOURIER, - reupload_scaling=ReuploadScaling.EXP, + reupload_scaling=ScalingFeatureMap.EXP, feature_range=feature_range, target_range=(0.0, 2 * pi), ) diff --git a/qadence/types.py b/qadence/types.py index f5b91f6aa..af959f619 100644 --- a/qadence/types.py +++ b/qadence/types.py @@ -33,7 +33,7 @@ "BackendName", "StateGeneratorType", "LTSOrder", - "ReuploadScaling", + "ScalingFeatureMap", "BasisFeatureMap", "TensorType", "DiffMode", @@ -133,7 +133,7 @@ class BasisFeatureMap(StrEnum): """Chebyshev polynomials of the first kind.""" -class ReuploadScaling(StrEnum): +class ScalingFeatureMap(StrEnum): """Scaling for data reuploads in feature maps.""" CONSTANT = "Constant" diff --git a/tests/constructors/test_feature_maps.py b/tests/constructors/test_feature_maps.py index bd375f5ed..e90a19b2a 100644 --- a/tests/constructors/test_feature_maps.py +++ b/tests/constructors/test_feature_maps.py @@ -12,7 +12,7 @@ RX, BasisFeatureMap, FeatureParameter, - ReuploadScaling, + ScalingFeatureMap, X, Z, exp_fourier_feature_map, @@ -45,12 +45,17 @@ ) @pytest.mark.parametrize( "reupload_scaling", - [ReuploadScaling.CONSTANT, ReuploadScaling.TOWER, ReuploadScaling.EXP, lambda i: 5 * i + 2], + [ + ScalingFeatureMap.CONSTANT, + ScalingFeatureMap.TOWER, + ScalingFeatureMap.EXP, + lambda i: 5 * i + 2, + ], ) def test_feature_map_creation_and_run( param_dict: dict, fm_type: BasisFeatureMap | type[sympy.Function], - reupload_scaling: ReuploadScaling | Callable, + reupload_scaling: ScalingFeatureMap | Callable, ) -> None: n_qubits = 4 @@ -67,10 +72,10 @@ def test_feature_map_creation_and_run( @pytest.mark.parametrize("fm_type", [BasisFeatureMap.FOURIER, BasisFeatureMap.CHEBYSHEV]) @pytest.mark.parametrize( "reupload_scaling", - [ReuploadScaling.TOWER, ReuploadScaling.CONSTANT, ReuploadScaling.EXP, "exp_down"], + [ScalingFeatureMap.TOWER, ScalingFeatureMap.CONSTANT, ScalingFeatureMap.EXP, "exp_down"], ) def test_feature_map_correctness( - n_qubits: int, fm_type: BasisFeatureMap, reupload_scaling: ReuploadScaling + n_qubits: int, fm_type: BasisFeatureMap, reupload_scaling: ScalingFeatureMap ) -> None: support = tuple(range(n_qubits)) @@ -86,17 +91,17 @@ def test_feature_map_correctness( feature_range = (0.0, 2 * torch.pi) target_range = (0.0, 2 * torch.pi) - if reupload_scaling == ReuploadScaling.CONSTANT: + if reupload_scaling == ScalingFeatureMap.CONSTANT: def scaling(j: int) -> float: return 1 - elif reupload_scaling == ReuploadScaling.TOWER: + elif reupload_scaling == ScalingFeatureMap.TOWER: def scaling(j: int) -> float: return float(j + 1) - elif reupload_scaling == ReuploadScaling.EXP: + elif reupload_scaling == ScalingFeatureMap.EXP: def scaling(j: int) -> float: return float(2**j) @@ -106,7 +111,7 @@ def scaling(j: int) -> float: def scaling(j: int) -> float: return float(2 ** (n_qubits - j - 1)) - reupload_scaling = ReuploadScaling.EXP + reupload_scaling = ScalingFeatureMap.EXP support = tuple(reversed(range(n_qubits))) target = torch.cat( diff --git a/tests/qadence/test_matrices.py b/tests/qadence/test_matrices.py index bf9442c6f..d0f0c3c45 100644 --- a/tests/qadence/test_matrices.py +++ b/tests/qadence/test_matrices.py @@ -57,7 +57,7 @@ Z, ) from qadence.states import equivalent_state, random_state, zero_state -from qadence.types import BasisFeatureMap, Interaction, ReuploadScaling +from qadence.types import BasisFeatureMap, Interaction, ScalingFeatureMap def _calc_mat_vec_wavefunction( @@ -255,13 +255,13 @@ def test_total_magnetization(n_qubits: int) -> None: @pytest.mark.parametrize("n_qubits", [1, 2, 4]) @pytest.mark.parametrize("fm_type", [BasisFeatureMap.FOURIER, BasisFeatureMap.CHEBYSHEV]) @pytest.mark.parametrize( - "reupload_scaling", [ReuploadScaling.CONSTANT, ReuploadScaling.TOWER, ReuploadScaling.EXP] + "reupload_scaling", [ScalingFeatureMap.CONSTANT, ScalingFeatureMap.TOWER, ScalingFeatureMap.EXP] ) @pytest.mark.parametrize("op", [RX, RY, RZ, PHASE]) def test_feature_maps( n_qubits: int, fm_type: BasisFeatureMap, - reupload_scaling: ReuploadScaling, + reupload_scaling: ScalingFeatureMap, op: type[RX] | type[RY] | type[RZ] | type[PHASE], ) -> None: x = Parameter("x", trainable=False) From 6763f6b24de4ca029f120df3e376a3d87984f70f Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Fri, 13 Oct 2023 15:20:41 +0200 Subject: [PATCH 18/34] type[union] fix for python 3.9 --- qadence/constructors/feature_maps.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qadence/constructors/feature_maps.py b/qadence/constructors/feature_maps.py index 1f61bc63a..242f98566 100644 --- a/qadence/constructors/feature_maps.py +++ b/qadence/constructors/feature_maps.py @@ -15,7 +15,7 @@ logger = get_logger(__name__) ROTATIONS = [RX, RY, RZ, PHASE] -RotationTypes = type[RX | RY | RZ | PHASE] +RotationTypes = type[RX] | type[RY] | type[RZ] | type[PHASE] def _set_range(fm_type: BasisFeatureMap | type[Function] | str) -> tuple[float, float]: From 7de9acc0dba4f344e89307b51793b979bc01698f Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Fri, 13 Oct 2023 15:25:40 +0200 Subject: [PATCH 19/34] docstring --- qadence/constructors/feature_maps.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/qadence/constructors/feature_maps.py b/qadence/constructors/feature_maps.py index 242f98566..dc6958ac6 100644 --- a/qadence/constructors/feature_maps.py +++ b/qadence/constructors/feature_maps.py @@ -52,15 +52,17 @@ def feature_map( support: Puts one feature-encoding rotation gate on every qubit in `support`. n_qubits in this case specifies the total overall qubits of the circuit, which may be wider than the support itself, but not narrower. - param: Parameter of the feature map; you can pass a string, sympy expression or Parameter; + param: Parameter of the feature map; you can pass a string or Parameter; it will be set as non-trainable (FeatureParameter) regardless. - op: Rotation operation of the feature map; choose from RX, RY, RZ, PHASE + op: Rotation operation of the feature map; choose from RX, RY, RZ or PHASE. fm_type: Basis set for data encoding; choose from `BasisFeatureMap.FOURIER` for Fourier encoding, or `BasisFeatureMap.CHEBYSHEV` for Chebyshev polynomials of the first kind. reupload_scaling: how the feature map scales the data that is re-uploaded for each qubit. + choose from `ScalingFeatureMap` enumeration or provide your own function with a single + int as input and int or float as output. feature_range: range of data that the input data is assumed to come from. target_range: range of data the data encoder assumes as the natural range. For example, - in Chebyshev polynomials it is (-1, 1), while for Fourier it may be chosen as (0, 2pi). + in Chebyshev polynomials it is (-1, 1), while for Fourier it may be chosen as (0, 2*pi). multiplier: overall multiplier; this is useful for reuploading the feature map serially with different scalings; can be a number or parameter/expression. From f17ec05853e8f225aac0b4d48e97388812d6962e Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Fri, 13 Oct 2023 15:29:14 +0200 Subject: [PATCH 20/34] update --- docs/development/draw.md | 2 +- docs/qml/qml_constructors.md | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/development/draw.md b/docs/development/draw.md index df1f63fc5..032007e9f 100644 --- a/docs/development/draw.md +++ b/docs/development/draw.md @@ -56,7 +56,7 @@ print(html_string(block)) # markdown-exec: hide ``` ```python exec="on" source="material-block" html="1" -from qadence import feature_map, hea, chain, ReuploadScaling +from qadence import feature_map, hea, chain block = chain(feature_map(4, reupload_scaling="Tower"), hea(4,2)) from qadence.draw import html_string # markdown-exec: hide diff --git a/docs/qml/qml_constructors.md b/docs/qml/qml_constructors.md index e5ca0353e..d6d54dc0a 100644 --- a/docs/qml/qml_constructors.md +++ b/docs/qml/qml_constructors.md @@ -15,14 +15,14 @@ The `feature_map` function provides the necessary tools to easily several types two main types of feature maps use a Fourier basis or a Chebyshev basis. ```python exec="on" source="material-block" html="1" session="fms" -from qadence import feature_map, BasisSet, chain +from qadence import feature_map, BasisFeatureMap, chain from qadence.draw import display n_qubits = 3 -fourier_fm = feature_map(n_qubits, fm_type=BasisSet.FOURIER) +fourier_fm = feature_map(n_qubits, fm_type=BasisFeatureMap.FOURIER) -chebyshev_fm = feature_map(n_qubits, fm_type=BasisSet.CHEBYSHEV) +chebyshev_fm = feature_map(n_qubits, fm_type=BasisFeatureMap.CHEBYSHEV) block = chain(fourier_fm, chebyshev_fm) from qadence.draw import html_string # markdown-exec: hide From 1591d560fc2190559554576ca9f9a57390d4a546 Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Fri, 13 Oct 2023 17:02:07 +0200 Subject: [PATCH 21/34] more docs --- docs/qml/index.md | 11 +++- docs/qml/ml_tools.md | 10 ++-- docs/qml/qml_constructors.md | 100 +++++++++++++++++++++++++++++++---- 3 files changed, 103 insertions(+), 18 deletions(-) diff --git a/docs/qml/index.md b/docs/qml/index.md index 8932ce7bf..5a89b78d3 100644 --- a/docs/qml/index.md +++ b/docs/qml/index.md @@ -1,8 +1,15 @@ -Variational algorithms on noisy devices and quantum machine learning (QML) [^1] in particular are -the target applications for Qadence. For this purpose, the library offers both flexible symbolic expressions for the +Variational algorithms on noisy devices and quantum machine learning (QML)[^1] in particular are one of the main +target applications for Qadence. For this purpose, the library offers both flexible symbolic expressions for the quantum circuit parameters via `sympy` (see [here](../tutorials/parameters.md) for more details) and native automatic differentiation via integration with [PyTorch](https://pytorch.org/) deep learning framework. +Furthermore, Qadence offers a wide range of utilities for helping building and researching quantum machine learning algorithms, including: + +* [a set of constructors](qml_constructors.md) for circuits commonly used in quantum machine learning such as feature maps and ansatze +* [a set of tools](ml_tools) for training and optimizing quantum neural networks and loading classical data into a QML algorithm + +## Some simple examples + Qadence symbolic parameter interface allows to create arbitrary feature maps to encode classical data into quantum circuits with an arbitrary non-linear function embedding for the input values: diff --git a/docs/qml/ml_tools.md b/docs/qml/ml_tools.md index 9195ca3f1..12f4538d4 100644 --- a/docs/qml/ml_tools.md +++ b/docs/qml/ml_tools.md @@ -1,6 +1,4 @@ -## Machine Learning Tools - -### Dataloaders +## Dataloaders When using `qadence`, you can supply classical data to a quantum machine learning algorithm by using a standard PyTorch `DataLoader` instance. Qadence also provides @@ -49,7 +47,7 @@ for i in range(n_epochs): ``` -### Optimization routines +## Optimization routines For training QML models, `qadence` also offers a few out-of-the-box routines for optimizing differentiable models like `QNN`s and `QuantumModel`s containing either *trainable* and/or *non-trainable* parameters @@ -102,7 +100,7 @@ config = TrainConfig( Let's see it in action with a simple example. -#### Fitting a funtion with a QNN using `ml_tools` +### Fitting a funtion with a QNN using `ml_tools` Let's look at a complete example of how to use `train_with_grad` now. @@ -164,7 +162,7 @@ plt.plot(model(input_values).detach().numpy()) For users who want to use the low-level API of `qadence`, here is the example from above written without `train_with_grad`. -#### Fitting a function - Low-level API +### Fitting a function - Low-level API ```python exec="on" source="material-block" result="json" from pathlib import Path diff --git a/docs/qml/qml_constructors.md b/docs/qml/qml_constructors.md index d6d54dc0a..afb54eee7 100644 --- a/docs/qml/qml_constructors.md +++ b/docs/qml/qml_constructors.md @@ -1,17 +1,11 @@ -Qadence offers a wide range of utilities for helping building and researching -quantum machine learning algorithms, including: - -* a set of constructors for circuits commonly used in quantum machine learning -* a set of tools for optimizing quantum neural networks and loading classical data into a QML algorithm - -## Quantum machine learning constructors +# Quantum machine learning constructors Besides the [arbitrary Hamiltonian constructors](../tutorials/hamiltonians.md), Qadence also provides a complete set of program constructors useful for digital-analog quantum machine learning programs. -### Feature maps +## Feature maps -The `feature_map` function provides the necessary tools to easily several types of data-encoding blocks using rotation gates. The +The `feature_map` function can easily create several types of data-encoding blocks. The two main types of feature maps use a Fourier basis or a Chebyshev basis. ```python exec="on" source="material-block" html="1" session="fms" @@ -29,7 +23,93 @@ from qadence.draw import html_string # markdown-exec: hide print(html_string(block, size="6,4")) # markdown-exec: hide ``` -### Hardware-efficient ansatz +A custom encoding function can also be passed with `sympy` + +```python exec="on" source="material-block" html="1" session="fms" +from sympy import asin, Function + +n_qubits = 3 + +# Using a pre-defined sympy Function +custom_fm_0 = feature_map(n_qubits, fm_type=asin) + +# Creating a custom sub-class of Function +class custom_func(Function): + @classmethod + def eval(cls, x): + return asin(x) + x**2 + +custom_fm_1 = feature_map(n_qubits, fm_type=custom_func) + +block = chain(custom_fm_0, custom_fm_1) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(block, size="6,4")) # markdown-exec: hide +``` + +Furthermore, the `reupload_scaling` argument can be used to change the scaling applied to each qubit +in the support of the feature map. The default scalings can be chosen from the `ScalingFeatureMap` enumeration. + +```python exec="on" source="material-block" html="1" session="fms" +from qadence import ScalingFeatureMap +from qadence.draw import display + +n_qubits = 5 + +# Default constant value +fm_constant = feature_map(n_qubits, fm_type=BasisFeatureMap.FOURIER, reupload_scaling=ScalingFeatureMap.CONSTANT) + +# Linearly increasing scaling +fm_tower = feature_map(n_qubits, fm_type=BasisFeatureMap.FOURIER, reupload_scaling=ScalingFeatureMap.TOWER) + +# Exponentially increasing scaling +fm_exp = feature_map(n_qubits, fm_type=BasisFeatureMap.FOURIER, reupload_scaling=ScalingFeatureMap.EXP) + +block = chain(fm_constant, fm_tower, fm_exp) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(block, size="6,4")) # markdown-exec: hide +``` + +A custom scaling can also be defined with a function with an `int` input and `int` or `float` output. + +```python exec="on" source="material-block" html="1" session="fms" +n_qubits = 5 + +def custom_scaling(i: int) -> int | float: + """Sqrt(i+1)""" + return (i+1) ** (0.5) + +# Custom scaling function +fm_custom = feature_map(n_qubits, fm_type=BasisFeatureMap.CHEBYSHEV, reupload_scaling=custom_scaling) + +from qadence.draw import html_string # markdown-exec: hide +print(html_string(fm_custom, size="6,4")) # markdown-exec: hide +``` + +A full description of the remaining arguments can be found in the [`feature_map` API reference][qadence.constructors.feature_map]. We provide an example below. + +```python exec="on" source="material-block" html="1" session="fms" +from qadence import RY + +n_qubits = 5 + +# Custom scaling function +fm_full = feature_map( + n_qubits = n_qubits, + support = tuple(reversed(range(n_qubits))), # Reverse the qubit support to run the scaling from bottom to top + param = "x", # Change the name of the parameter + op = RY, # Change the rotation gate between RX, RY, RZ or PHASE + fm_type = BasisFeatureMap.CHEBYSHEV, + reupload_scaling = ScalingFeatureMap.EXP, + feature_range = (-1.0, 2.0), # Range from which the input data comes from + target_range = (1.0, 3.0), # Range the encoder assumes as the natural range + multiplier = 5.0 # Extra multiplier, which can also be a Parameter +) + +from qadence.draw import html_string # markdown-exec: hide +print(html_string(fm_full, size="6,4")) # markdown-exec: hide +``` + +## Hardware-efficient ansatz Ansatze blocks for quantum machine-learning are typically built following the Hardware-Efficient Ansatz formalism (HEA). Both fully digital and digital-analog HEAs can easily be built with the `hea` function. By default, From 99770b9628426d0115da449fd4bfef98bc5be9ec Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Fri, 13 Oct 2023 17:09:47 +0200 Subject: [PATCH 22/34] revert to Type --- qadence/constructors/feature_maps.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/qadence/constructors/feature_maps.py b/qadence/constructors/feature_maps.py index dc6958ac6..2cdfe1b96 100644 --- a/qadence/constructors/feature_maps.py +++ b/qadence/constructors/feature_maps.py @@ -3,6 +3,7 @@ import inspect from collections.abc import Callable from math import isclose, pi +from typing import Type from sympy import Function, acos @@ -15,7 +16,7 @@ logger = get_logger(__name__) ROTATIONS = [RX, RY, RZ, PHASE] -RotationTypes = type[RX] | type[RY] | type[RZ] | type[PHASE] +RotationTypes = Type[RX | RY | RZ | PHASE] def _set_range(fm_type: BasisFeatureMap | type[Function] | str) -> tuple[float, float]: From 0f900c1d3d02ed88c39a1a1dc5df8538b60d7266 Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Fri, 13 Oct 2023 17:18:20 +0200 Subject: [PATCH 23/34] Revert to Union --- qadence/constructors/feature_maps.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/qadence/constructors/feature_maps.py b/qadence/constructors/feature_maps.py index 2cdfe1b96..c833f5fd5 100644 --- a/qadence/constructors/feature_maps.py +++ b/qadence/constructors/feature_maps.py @@ -3,7 +3,7 @@ import inspect from collections.abc import Callable from math import isclose, pi -from typing import Type +from typing import Union from sympy import Function, acos @@ -16,7 +16,7 @@ logger = get_logger(__name__) ROTATIONS = [RX, RY, RZ, PHASE] -RotationTypes = Type[RX | RY | RZ | PHASE] +RotationTypes = type[Union[RX | RY | RZ | PHASE]] def _set_range(fm_type: BasisFeatureMap | type[Function] | str) -> tuple[float, float]: From 796ab9aae9b0fdcf086d0960627df59bec78bede Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Fri, 13 Oct 2023 17:24:27 +0200 Subject: [PATCH 24/34] fix --- qadence/constructors/feature_maps.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qadence/constructors/feature_maps.py b/qadence/constructors/feature_maps.py index c833f5fd5..ca93ea615 100644 --- a/qadence/constructors/feature_maps.py +++ b/qadence/constructors/feature_maps.py @@ -16,7 +16,7 @@ logger = get_logger(__name__) ROTATIONS = [RX, RY, RZ, PHASE] -RotationTypes = type[Union[RX | RY | RZ | PHASE]] +RotationTypes = type[Union[RX, RY, RZ, PHASE]] def _set_range(fm_type: BasisFeatureMap | type[Function] | str) -> tuple[float, float]: From ba1b4a9b5d9b6ca58034a16f78c970f845c4afb5 Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Fri, 13 Oct 2023 17:25:11 +0200 Subject: [PATCH 25/34] fix --- qadence/constructors/feature_maps.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qadence/constructors/feature_maps.py b/qadence/constructors/feature_maps.py index c833f5fd5..ca93ea615 100644 --- a/qadence/constructors/feature_maps.py +++ b/qadence/constructors/feature_maps.py @@ -16,7 +16,7 @@ logger = get_logger(__name__) ROTATIONS = [RX, RY, RZ, PHASE] -RotationTypes = type[Union[RX | RY | RZ | PHASE]] +RotationTypes = type[Union[RX, RY, RZ, PHASE]] def _set_range(fm_type: BasisFeatureMap | type[Function] | str) -> tuple[float, float]: From bbe8f1843aa6e1ad8bfe2647a7f3700d68ff3b5e Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Mon, 16 Oct 2023 12:39:27 +0200 Subject: [PATCH 26/34] tower to chebyshev --- qadence/constructors/feature_maps.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qadence/constructors/feature_maps.py b/qadence/constructors/feature_maps.py index ca93ea615..ffee66c12 100644 --- a/qadence/constructors/feature_maps.py +++ b/qadence/constructors/feature_maps.py @@ -106,7 +106,7 @@ def feature_map( elif fm_type == "chebyshev": fm_type = BasisFeatureMap.CHEBYSHEV elif fm_type == "tower": - fm_type = BasisFeatureMap.FOURIER + fm_type = BasisFeatureMap.CHEBYSHEV reupload_scaling = ScalingFeatureMap.TOWER if isinstance(param, Parameter): From 2f1db4e52596df18fd7da6f40489026b2930d6a5 Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Mon, 16 Oct 2023 12:57:01 +0200 Subject: [PATCH 27/34] update index.md --- docs/qml/index.md | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/docs/qml/index.md b/docs/qml/index.md index 5a89b78d3..33778af95 100644 --- a/docs/qml/index.md +++ b/docs/qml/index.md @@ -14,7 +14,7 @@ Qadence symbolic parameter interface allows to create arbitrary feature maps to encode classical data into quantum circuits with an arbitrary non-linear function embedding for the input values: -```python exec="on" source="material-block" html="1" result="json" session="qml" +```python exec="on" source="material-block" result="json" session="qml" import qadence as qd from qadence.operations import * import torch @@ -29,7 +29,7 @@ feature_map = qd.kron(RX(i, 2 * acos(fp)) for i in range(n_qubits)) # the name of the assigned to the feature parameter inputs = {"phi": torch.rand(3)} samples = qd.sample(feature_map, values=inputs) -print(samples) +print(samples[0]) ``` The [`constructors.feature_map`][qadence.constructors.feature_map] module provides @@ -53,22 +53,23 @@ model = qd.QNN(circuit, observable) # NOTE: the `QNN` is a torch.nn.Module assert isinstance(model, torch.nn.Module) +print(isinstance(model, torch.nn.Module)) # markdown-exec: hide ``` Differentiation works the same way as any other PyTorch module: -```python exec="on" source="material-block" html="1" result="json" session="qml" +```python exec="on" source="material-block" result="json" session="qml" values = {"phi": torch.rand(10, requires_grad=True)} # the forward pass of the quantum model returns the expectation # value of the input observable out = model(values) -print(f"Quantum model output: {out}") +print(f"Quantum model output: \n{out}\n") # you can compute the gradient with respect to inputs using # PyTorch autograd differentiation engine dout = torch.autograd.grad(out, values["phi"], torch.ones_like(out), create_graph=True)[0] -print(f"First-order derivative w.r.t. the feature parameter: {dout}") +print(f"First-order derivative w.r.t. the feature parameter: \n{dout}") # you can also call directly a backward pass to compute derivatives with respect # to the variational parameters and use it for implementing variational @@ -80,12 +81,12 @@ To run QML on real devices, Qadence offers generalized parameter shift rules (GP for arbitrary quantum operations which can be selected when constructing the `QNN` model: -```python exec="on" source="material-block" html="1" result="json" session="qml" +```python exec="on" source="material-block" result="json" session="qml" model = qd.QNN(circuit, observable, diff_mode="gpsr") out = model(values) dout = torch.autograd.grad(out, values["phi"], torch.ones_like(out), create_graph=True)[0] -print(f"First-order derivative w.r.t. the feature parameter: {dout}") +print(f"First-order derivative w.r.t. the feature parameter: \n{dout}") ``` See [here](../advanced_tutorials/differentiability.md) for more details on how the parameter From e18e2414266993772804281b1430842eabd7a028 Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Mon, 16 Oct 2023 13:43:51 +0200 Subject: [PATCH 28/34] ml_tools --- docs/qml/ml_tools.md | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/docs/qml/ml_tools.md b/docs/qml/ml_tools.md index 12f4538d4..03a2d8477 100644 --- a/docs/qml/ml_tools.md +++ b/docs/qml/ml_tools.md @@ -1,11 +1,11 @@ ## Dataloaders -When using `qadence`, you can supply classical data to a quantum machine learning +When using Qadence, you can supply classical data to a quantum machine learning algorithm by using a standard PyTorch `DataLoader` instance. Qadence also provides the `DictDataLoader` convenience class which allows to build dictionaries of `DataLoader`s instances and easily iterate over them. -```python exec="on" source="material-block" result="json" +```python exec="on" source="material-block" import torch from torch.utils.data import DataLoader, TensorDataset from qadence.ml_tools import DictDataLoader @@ -49,9 +49,9 @@ for i in range(n_epochs): ## Optimization routines -For training QML models, `qadence` also offers a few out-of-the-box routines for optimizing differentiable +For training QML models, Qadence also offers a few out-of-the-box routines for optimizing differentiable models like `QNN`s and `QuantumModel`s containing either *trainable* and/or *non-trainable* parameters -(you can refer to [this](../tutorials/parameters) for a refresh about different parameter types): +(you can refer to [the parameters tutorial](../tutorials/parameters.md) for a refresh about different parameter types): * [`train_with_grad`][qadence.ml_tools.train_with_grad] for gradient-based optimization using PyTorch native optimizers * [`train_gradient_free`][qadence.ml_tools.train_gradient_free] for gradient-free optimization using @@ -65,7 +65,7 @@ As every other training routine commonly used in Machine Learning, it requires However, in addition, it requires a `loss_fn` and a `TrainConfig`. A `loss_fn` is required to be a function which expects both a model and data and returns a tuple of (loss, metrics: ``), where `metrics` is a dict of scalars which can be customized too. -```python exec="on" source="material-block" result="json" +```python exec="on" source="material-block" import torch from itertools import count cnt = count() @@ -83,7 +83,7 @@ def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, d The [`TrainConfig`][qadence.ml_tools.config.TrainConfig] tells `train_with_grad` what batch_size should be used, how many epochs to train, in which intervals to print/log metrics and how often to store intermediate checkpoints. -```python exec="on" source="material-block" result="json" +```python exec="on" source="material-block" from qadence.ml_tools import TrainConfig batch_size = 5 @@ -104,7 +104,7 @@ Let's see it in action with a simple example. Let's look at a complete example of how to use `train_with_grad` now. -```python exec="on" source="material-block" result="json" +```python exec="on" source="material-block" html="1" from pathlib import Path import torch from itertools import count @@ -138,7 +138,7 @@ def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, d tmp_path = Path("/tmp") -n_epochs = 5 +n_epochs = 50 config = TrainConfig( folder=tmp_path, @@ -155,8 +155,11 @@ y = torch.sin(x) train_with_grad(model, (x, y), optimizer, config, loss_fn=loss_fn) -plt.plot(y.numpy()) -plt.plot(model(input_values).detach().numpy()) +plt.clf() # markdown-exec: hide +plt.plot(x.numpy(), y.numpy()) +plt.plot(x.numpy(), model(x).detach().numpy()) +from docs import docsutils # markdown-exec: hide +print(docsutils.fig_to_html(plt.gcf())) # markdown-exec: hide ``` For users who want to use the low-level API of `qadence`, here is the example from above @@ -164,7 +167,7 @@ written without `train_with_grad`. ### Fitting a function - Low-level API -```python exec="on" source="material-block" result="json" +```python exec="on" source="material-block" from pathlib import Path import torch from itertools import count From 913dee4cf735a41c60f5f539f35b2ff6cc5223fb Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Tue, 17 Oct 2023 14:50:20 +0200 Subject: [PATCH 29/34] fix conflicts --- qadence/constructors/feature_maps.py | 89 ---------------------------- qadence/types.py | 8 --- 2 files changed, 97 deletions(-) diff --git a/qadence/constructors/feature_maps.py b/qadence/constructors/feature_maps.py index e171b77fb..b0754b377 100644 --- a/qadence/constructors/feature_maps.py +++ b/qadence/constructors/feature_maps.py @@ -1,10 +1,7 @@ from __future__ import annotations import inspect -<<<<<<< HEAD -======= import warnings ->>>>>>> main from collections.abc import Callable from math import isclose, pi from typing import Union @@ -15,11 +12,7 @@ from qadence.logger import get_logger from qadence.operations import PHASE, RX, RY, RZ, H from qadence.parameters import FeatureParameter, Parameter -<<<<<<< HEAD -from qadence.types import BasisFeatureMap, ScalingFeatureMap, TParameter -======= from qadence.types import BasisSet, ReuploadScaling, TParameter ->>>>>>> main logger = get_logger(__name__) @@ -27,32 +20,19 @@ RotationTypes = type[Union[RX, RY, RZ, PHASE]] -<<<<<<< HEAD -def _set_range(fm_type: BasisFeatureMap | type[Function] | str) -> tuple[float, float]: - if fm_type == BasisFeatureMap.FOURIER: - return (0.0, 2 * pi) - elif fm_type == BasisFeatureMap.CHEBYSHEV: -======= def _set_range(fm_type: BasisSet | type[Function] | str) -> tuple[float, float]: if fm_type == BasisSet.FOURIER: return (0.0, 2 * pi) elif fm_type == BasisSet.CHEBYSHEV: ->>>>>>> main return (-1.0, 1.0) else: return (0.0, 1.0) RS_FUNC_DICT = { -<<<<<<< HEAD - ScalingFeatureMap.CONSTANT: lambda i: 1, - ScalingFeatureMap.TOWER: lambda i: float(i + 1), - ScalingFeatureMap.EXP: lambda i: float(2**i), -======= ReuploadScaling.CONSTANT: lambda i: 1, ReuploadScaling.TOWER: lambda i: float(i + 1), ReuploadScaling.EXP: lambda i: float(2**i), ->>>>>>> main } @@ -61,13 +41,8 @@ def feature_map( support: tuple[int, ...] | None = None, param: Parameter | str = "phi", op: RotationTypes = RX, -<<<<<<< HEAD - fm_type: BasisFeatureMap | type[Function] | str = BasisFeatureMap.FOURIER, - reupload_scaling: ScalingFeatureMap | Callable | str = ScalingFeatureMap.CONSTANT, -======= fm_type: BasisSet | type[Function] | str = BasisSet.FOURIER, reupload_scaling: ReuploadScaling | Callable | str = ReuploadScaling.CONSTANT, ->>>>>>> main feature_range: tuple[float, float] | None = None, target_range: tuple[float, float] | None = None, multiplier: Parameter | TParameter | None = None, @@ -82,17 +57,10 @@ def feature_map( param: Parameter of the feature map; you can pass a string or Parameter; it will be set as non-trainable (FeatureParameter) regardless. op: Rotation operation of the feature map; choose from RX, RY, RZ or PHASE. -<<<<<<< HEAD - fm_type: Basis set for data encoding; choose from `BasisFeatureMap.FOURIER` for Fourier - encoding, or `BasisFeatureMap.CHEBYSHEV` for Chebyshev polynomials of the first kind. - reupload_scaling: how the feature map scales the data that is re-uploaded for each qubit. - choose from `ScalingFeatureMap` enumeration or provide your own function with a single -======= fm_type: Basis set for data encoding; choose from `BasisSet.FOURIER` for Fourier encoding, or `BasisSet.CHEBYSHEV` for Chebyshev polynomials of the first kind. reupload_scaling: how the feature map scales the data that is re-uploaded for each qubit. choose from `ReuploadScaling` enumeration or provide your own function with a single ->>>>>>> main int as input and int or float as output. feature_range: range of data that the input data is assumed to come from. target_range: range of data the data encoder assumes as the natural range. For example, @@ -102,17 +70,6 @@ def feature_map( Example: ```python exec="on" source="material-block" result="json" -<<<<<<< HEAD - from qadence import feature_map, BasisFeatureMap, ScalingFeatureMap - - fm = feature_map(3, fm_type=BasisFeatureMap.FOURIER) - print(f"{fm = }") - - fm = feature_map(3, fm_type=BasisFeatureMap.CHEBYSHEV) - print(f"{fm = }") - - fm = feature_map(3, fm_type=BasisFeatureMap.FOURIER, reupload_scaling = ScalingFeatureMap.TOWER) -======= from qadence import feature_map, BasisSet, ReuploadScaling fm = feature_map(3, fm_type=BasisSet.FOURIER) @@ -122,7 +79,6 @@ def feature_map( print(f"{fm = }") fm = feature_map(3, fm_type=BasisSet.FOURIER, reupload_scaling = ReuploadScaling.TOWER) ->>>>>>> main print(f"{fm = }") ``` """ @@ -175,35 +131,20 @@ def feature_map( scaled_fparam = scaling * fparam + shift # Transform feature parameter -<<<<<<< HEAD - if fm_type == BasisFeatureMap.FOURIER: - transformed_feature = scaled_fparam - elif fm_type == BasisFeatureMap.CHEBYSHEV: -======= if fm_type == BasisSet.FOURIER: transformed_feature = scaled_fparam elif fm_type == BasisSet.CHEBYSHEV: ->>>>>>> main transformed_feature = acos(scaled_fparam) elif inspect.isclass(fm_type) and issubclass(fm_type, Function): transformed_feature = fm_type(scaled_fparam) else: raise NotImplementedError( -<<<<<<< HEAD - f"Feature map type {fm_type} not implemented. Choose an item from the BasisFeatureMap " - f"enum: {[bs.name for bs in BasisFeatureMap]}, or your own sympy.Function to wrap " - "the given feature parameter with." - ) - - basis_tag = fm_type.value if isinstance(fm_type, BasisFeatureMap) else str(fm_type) -======= f"Feature map type {fm_type} not implemented. Choose an item from the BasisSet " f"enum: {[bs.name for bs in BasisSet]}, or your own sympy.Function to wrap " "the given feature parameter with." ) basis_tag = fm_type.value if isinstance(fm_type, BasisSet) else str(fm_type) ->>>>>>> main # Set reupload scaling function if callable(reupload_scaling): @@ -214,17 +155,10 @@ def feature_map( if rs_func is None: raise NotImplementedError( f"Reupload scaling {reupload_scaling} not implemented; choose an item from " -<<<<<<< HEAD - f"the ScalingFeatureMap enum: {[rs.name for rs in ScalingFeatureMap]}, or your own " - "python function with a single int arg as input and int or float output." - ) - if isinstance(reupload_scaling, ScalingFeatureMap): -======= f"the ReuploadScaling enum: {[rs.name for rs in ReuploadScaling]}, or your own " "python function with a single int arg as input and int or float output." ) if isinstance(reupload_scaling, ReuploadScaling): ->>>>>>> main rs_tag = reupload_scaling.value else: rs_tag = reupload_scaling @@ -252,15 +186,11 @@ def fourier_feature_map( n_qubits: number of qubits across which the FM is created param: The base name for the feature `Parameter` """ -<<<<<<< HEAD - fm = feature_map(n_qubits, support=support, param=param, op=op, fm_type=BasisFeatureMap.FOURIER) -======= warnings.warn( "Function 'fourier_feature_map' is deprecated. Please use 'feature_map' directly.", FutureWarning, ) fm = feature_map(n_qubits, support=support, param=param, op=op, fm_type=BasisSet.FOURIER) ->>>>>>> main return fm @@ -274,17 +204,11 @@ def chebyshev_feature_map( support (Iterable[int]): The qubit support param: The base name for the feature `Parameter` """ -<<<<<<< HEAD - fm = feature_map( - n_qubits, support=support, param=param, op=op, fm_type=BasisFeatureMap.CHEBYSHEV - ) -======= warnings.warn( "Function 'chebyshev_feature_map' is deprecated. Please use 'feature_map' directly.", FutureWarning, ) fm = feature_map(n_qubits, support=support, param=param, op=op, fm_type=BasisSet.CHEBYSHEV) ->>>>>>> main return fm @@ -297,25 +221,17 @@ def tower_feature_map( n_qubits: number of qubits across which the FM is created param: The base name for the feature `Parameter` """ -<<<<<<< HEAD -======= warnings.warn( "Function 'tower_feature_map' is deprecated. Please use feature_map directly.", FutureWarning, ) ->>>>>>> main fm = feature_map( n_qubits, support=support, param=param, op=op, -<<<<<<< HEAD - fm_type=BasisFeatureMap.CHEBYSHEV, - reupload_scaling=ScalingFeatureMap.TOWER, -======= fm_type=BasisSet.CHEBYSHEV, reupload_scaling=ReuploadScaling.TOWER, ->>>>>>> main ) return fm @@ -346,13 +262,8 @@ def exp_fourier_feature_map( support=support, param=param, op=RZ, -<<<<<<< HEAD - fm_type=BasisFeatureMap.FOURIER, - reupload_scaling=ScalingFeatureMap.EXP, -======= fm_type=BasisSet.FOURIER, reupload_scaling=ReuploadScaling.EXP, ->>>>>>> main feature_range=feature_range, target_range=(0.0, 2 * pi), ) diff --git a/qadence/types.py b/qadence/types.py index 355b34ef7..2848983cb 100644 --- a/qadence/types.py +++ b/qadence/types.py @@ -124,11 +124,7 @@ class LTSOrder(StrEnum): """ST4.""" -<<<<<<< HEAD -class BasisFeatureMap(StrEnum): -======= class BasisSet(StrEnum): ->>>>>>> main """Basis set for feature maps.""" FOURIER = "Fourier" @@ -137,11 +133,7 @@ class BasisSet(StrEnum): """Chebyshev polynomials of the first kind.""" -<<<<<<< HEAD -class ScalingFeatureMap(StrEnum): -======= class ReuploadScaling(StrEnum): ->>>>>>> main """Scaling for data reuploads in feature maps.""" CONSTANT = "Constant" From 7ade9979dea8a32b7493d9e938ae00cc2a93576a Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Tue, 17 Oct 2023 14:51:15 +0200 Subject: [PATCH 30/34] update docs --- docs/qml/qml_constructors.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/docs/qml/qml_constructors.md b/docs/qml/qml_constructors.md index afb54eee7..1250719c8 100644 --- a/docs/qml/qml_constructors.md +++ b/docs/qml/qml_constructors.md @@ -9,14 +9,14 @@ The `feature_map` function can easily create several types of data-encoding bloc two main types of feature maps use a Fourier basis or a Chebyshev basis. ```python exec="on" source="material-block" html="1" session="fms" -from qadence import feature_map, BasisFeatureMap, chain +from qadence import feature_map, BasisSet, chain from qadence.draw import display n_qubits = 3 -fourier_fm = feature_map(n_qubits, fm_type=BasisFeatureMap.FOURIER) +fourier_fm = feature_map(n_qubits, fm_type=BasisSet.FOURIER) -chebyshev_fm = feature_map(n_qubits, fm_type=BasisFeatureMap.CHEBYSHEV) +chebyshev_fm = feature_map(n_qubits, fm_type=BasisSet.CHEBYSHEV) block = chain(fourier_fm, chebyshev_fm) from qadence.draw import html_string # markdown-exec: hide @@ -47,22 +47,22 @@ print(html_string(block, size="6,4")) # markdown-exec: hide ``` Furthermore, the `reupload_scaling` argument can be used to change the scaling applied to each qubit -in the support of the feature map. The default scalings can be chosen from the `ScalingFeatureMap` enumeration. +in the support of the feature map. The default scalings can be chosen from the `ReuploadScaling` enumeration. ```python exec="on" source="material-block" html="1" session="fms" -from qadence import ScalingFeatureMap +from qadence import ReuploadScaling from qadence.draw import display n_qubits = 5 # Default constant value -fm_constant = feature_map(n_qubits, fm_type=BasisFeatureMap.FOURIER, reupload_scaling=ScalingFeatureMap.CONSTANT) +fm_constant = feature_map(n_qubits, fm_type=BasisSet.FOURIER, reupload_scaling=ReuploadScaling.CONSTANT) # Linearly increasing scaling -fm_tower = feature_map(n_qubits, fm_type=BasisFeatureMap.FOURIER, reupload_scaling=ScalingFeatureMap.TOWER) +fm_tower = feature_map(n_qubits, fm_type=BasisSet.FOURIER, reupload_scaling=ReuploadScaling.TOWER) # Exponentially increasing scaling -fm_exp = feature_map(n_qubits, fm_type=BasisFeatureMap.FOURIER, reupload_scaling=ScalingFeatureMap.EXP) +fm_exp = feature_map(n_qubits, fm_type=BasisSet.FOURIER, reupload_scaling=ReuploadScaling.EXP) block = chain(fm_constant, fm_tower, fm_exp) from qadence.draw import html_string # markdown-exec: hide @@ -79,7 +79,7 @@ def custom_scaling(i: int) -> int | float: return (i+1) ** (0.5) # Custom scaling function -fm_custom = feature_map(n_qubits, fm_type=BasisFeatureMap.CHEBYSHEV, reupload_scaling=custom_scaling) +fm_custom = feature_map(n_qubits, fm_type=BasisSet.CHEBYSHEV, reupload_scaling=custom_scaling) from qadence.draw import html_string # markdown-exec: hide print(html_string(fm_custom, size="6,4")) # markdown-exec: hide @@ -98,8 +98,8 @@ fm_full = feature_map( support = tuple(reversed(range(n_qubits))), # Reverse the qubit support to run the scaling from bottom to top param = "x", # Change the name of the parameter op = RY, # Change the rotation gate between RX, RY, RZ or PHASE - fm_type = BasisFeatureMap.CHEBYSHEV, - reupload_scaling = ScalingFeatureMap.EXP, + fm_type = BasisSet.CHEBYSHEV, + reupload_scaling = ReuploadScaling.EXP, feature_range = (-1.0, 2.0), # Range from which the input data comes from target_range = (1.0, 3.0), # Range the encoder assumes as the natural range multiplier = 5.0 # Extra multiplier, which can also be a Parameter From df2296758038780613d4776ba1268c8de98343b2 Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Wed, 18 Oct 2023 11:31:41 +0200 Subject: [PATCH 31/34] comments --- docs/qml/index.md | 9 +++++---- docs/qml/ml_tools.md | 4 ++-- docs/qml/qml_constructors.md | 2 +- mkdocs.yml | 10 ++++------ 4 files changed, 12 insertions(+), 13 deletions(-) diff --git a/docs/qml/index.md b/docs/qml/index.md index 33778af95..838da66bc 100644 --- a/docs/qml/index.md +++ b/docs/qml/index.md @@ -22,14 +22,15 @@ from sympy import acos n_qubits = 4 +# Example feature map, also directly available with the `feature_map` function fp = qd.FeatureParameter("phi") -feature_map = qd.kron(RX(i, 2 * acos(fp)) for i in range(n_qubits)) +fm = qd.kron(RX(i, acos(fp)) for i in range(n_qubits)) # the key in the dictionary must correspond to # the name of the assigned to the feature parameter inputs = {"phi": torch.rand(3)} -samples = qd.sample(feature_map, values=inputs) -print(samples[0]) +samples = qd.sample(fm, values=inputs) +print(samples[0]) # markdown-exec: hide ``` The [`constructors.feature_map`][qadence.constructors.feature_map] module provides @@ -64,7 +65,7 @@ values = {"phi": torch.rand(10, requires_grad=True)} # the forward pass of the quantum model returns the expectation # value of the input observable out = model(values) -print(f"Quantum model output: \n{out}\n") +print(f"Quantum model output: \n{out}\n") # markdown-exec: hide # you can compute the gradient with respect to inputs using # PyTorch autograd differentiation engine diff --git a/docs/qml/ml_tools.md b/docs/qml/ml_tools.md index 03a2d8477..8fd2b1b96 100644 --- a/docs/qml/ml_tools.md +++ b/docs/qml/ml_tools.md @@ -50,8 +50,8 @@ for i in range(n_epochs): ## Optimization routines For training QML models, Qadence also offers a few out-of-the-box routines for optimizing differentiable -models like `QNN`s and `QuantumModel`s containing either *trainable* and/or *non-trainable* parameters -(you can refer to [the parameters tutorial](../tutorials/parameters.md) for a refresh about different parameter types): +models, _e.g._ `QNN`s and `QuantumModel`s containing either *trainable* and/or *non-trainable* parameters +(see [the parameters tutorial](../tutorials/parameters.md) for a refresh about different parameter types): * [`train_with_grad`][qadence.ml_tools.train_with_grad] for gradient-based optimization using PyTorch native optimizers * [`train_gradient_free`][qadence.ml_tools.train_gradient_free] for gradient-free optimization using diff --git a/docs/qml/qml_constructors.md b/docs/qml/qml_constructors.md index 1250719c8..773ca6a26 100644 --- a/docs/qml/qml_constructors.md +++ b/docs/qml/qml_constructors.md @@ -145,7 +145,7 @@ from qadence.draw import html_string # markdown-exec: hide print(html_string(ansatz, size="8,4")) # markdown-exec: hide ``` -Having a truly *hardware-efficient* ansatz means that the entangling operation can be chosen according to each device's native interactions. Besides digital operations, in Qadence it is also possible to build digital-analog HEAs with the entanglement produced by the natural evolution of a set of interacting qubits, as natively implemented in neutral atom devices. As with other digital-analog functions, this can be controlled with the `strategy` argument which can be chosen from the [`Strategy`](../qadence/types.md) enum type. Currently, only `Strategy.DIGITAL` and `Strategy.SDAQC` are available. By default, calling `strategy = Strategy.SDAQC` will use a global entangling Hamiltonian with Ising-like NN interactions and constant interaction strength, +Having a truly *hardware-efficient* ansatz means that the entangling operation can be chosen according to each device's native interactions. Besides digital operations, in Qadence it is also possible to build digital-analog HEAs with the entanglement produced by the natural evolution of a set of interacting qubits, as natively implemented in neutral atom devices. As with other digital-analog functions, this can be controlled with the `strategy` argument which can be chosen from the [`Strategy`](../qadence/types.md) enum type. Currently, only `Strategy.DIGITAL` and `Strategy.SDAQC` are available. By default, calling `strategy = Strategy.SDAQC` will use a global entangling Hamiltonian with Ising-like $NN$ interactions and constant interaction strength, ```python exec="on" source="material-block" html="1" session="ansatz" from qadence import Strategy diff --git a/mkdocs.yml b/mkdocs.yml index 5852e93d3..faf74e560 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -28,12 +28,10 @@ nav: - Variational quantum algorithms: - qml/index.md - - Tools for quantum machine learning: - - Constructors: qml/qml_constructors.md - - Training tools: qml/ml_tools.md - - Example applications: - - Quantum circuit learning: qml/qcl.md - - Solving MaxCut with QAOA: qml/qaoa.md + - Constructors: qml/qml_constructors.md + - Training tools: qml/ml_tools.md + - Quantum circuit learning: qml/qcl.md + - Solving MaxCut with QAOA: qml/qaoa.md - Advanced Tutorials: - Quantum circuits differentiation: advanced_tutorials/differentiability.md From 9b4bd6d2dc188c9f4776863826fdc1037417fff2 Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Wed, 18 Oct 2023 11:32:44 +0200 Subject: [PATCH 32/34] fix --- docs/qml/ml_tools.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/qml/ml_tools.md b/docs/qml/ml_tools.md index 8fd2b1b96..aeb7ad465 100644 --- a/docs/qml/ml_tools.md +++ b/docs/qml/ml_tools.md @@ -51,7 +51,7 @@ for i in range(n_epochs): For training QML models, Qadence also offers a few out-of-the-box routines for optimizing differentiable models, _e.g._ `QNN`s and `QuantumModel`s containing either *trainable* and/or *non-trainable* parameters -(see [the parameters tutorial](../tutorials/parameters.md) for a refresh about different parameter types): +(see [the parameters tutorial](../tutorials/parameters.md) for detailed information about parameter types): * [`train_with_grad`][qadence.ml_tools.train_with_grad] for gradient-based optimization using PyTorch native optimizers * [`train_gradient_free`][qadence.ml_tools.train_gradient_free] for gradient-free optimization using From af4aeeefa653be86876c75650ff4d2cea56cb858 Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Wed, 18 Oct 2023 11:34:34 +0200 Subject: [PATCH 33/34] more fix --- docs/qml/index.md | 2 +- docs/qml/ml_tools.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/qml/index.md b/docs/qml/index.md index 838da66bc..cfbe37228 100644 --- a/docs/qml/index.md +++ b/docs/qml/index.md @@ -30,7 +30,7 @@ fm = qd.kron(RX(i, acos(fp)) for i in range(n_qubits)) # the name of the assigned to the feature parameter inputs = {"phi": torch.rand(3)} samples = qd.sample(fm, values=inputs) -print(samples[0]) # markdown-exec: hide +print(f"samples = {samples[0]}") # markdown-exec: hide ``` The [`constructors.feature_map`][qadence.constructors.feature_map] module provides diff --git a/docs/qml/ml_tools.md b/docs/qml/ml_tools.md index aeb7ad465..6d7d717b5 100644 --- a/docs/qml/ml_tools.md +++ b/docs/qml/ml_tools.md @@ -50,7 +50,7 @@ for i in range(n_epochs): ## Optimization routines For training QML models, Qadence also offers a few out-of-the-box routines for optimizing differentiable -models, _e.g._ `QNN`s and `QuantumModel`s containing either *trainable* and/or *non-trainable* parameters +models, _e.g._ `QNN`s and `QuantumModel`, containing either *trainable* and/or *non-trainable* parameters (see [the parameters tutorial](../tutorials/parameters.md) for detailed information about parameter types): * [`train_with_grad`][qadence.ml_tools.train_with_grad] for gradient-based optimization using PyTorch native optimizers From 4c9134287f35bcf1fd918bd4bec431263152b1f4 Mon Sep 17 00:00:00 2001 From: jpmoutinho Date: Wed, 18 Oct 2023 11:50:16 +0200 Subject: [PATCH 34/34] fix --- docs/qml/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/qml/index.md b/docs/qml/index.md index cfbe37228..0945a80de 100644 --- a/docs/qml/index.md +++ b/docs/qml/index.md @@ -47,7 +47,7 @@ simple observable $X(0) \otimes X(1)$. We use the convenience `QNN` quantum mode ```python exec="on" source="material-block" result="json" session="qml" ansatz = qd.hea(n_qubits, strategy="sDAQC") -circuit = qd.QuantumCircuit(n_qubits, feature_map, ansatz) +circuit = qd.QuantumCircuit(n_qubits, fm, ansatz) observable = qd.kron(X(0), X(1)) model = qd.QNN(circuit, observable)