From f5e7e6dccd154916b4192304f489ce0beb431284 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 15 Feb 2024 03:43:53 -0500 Subject: [PATCH 01/14] checkpoint Signed-off-by: Jinzhe Zeng --- deepmd/dpmodel/fitting/invar_fitting.py | 42 ++++++ deepmd/tf/env.py | 27 ++-- deepmd/tf/fit/ener.py | 104 ++++++++++++- deepmd/tf/fit/fitting.py | 134 +++++++++++++++++ source/tests/consistent/fitting/common.py | 33 ++++ source/tests/consistent/fitting/test_ener.py | 149 +++++++++++++++++++ 6 files changed, 471 insertions(+), 18 deletions(-) create mode 100644 source/tests/consistent/fitting/common.py create mode 100644 source/tests/consistent/fitting/test_ener.py diff --git a/deepmd/dpmodel/fitting/invar_fitting.py b/deepmd/dpmodel/fitting/invar_fitting.py index 58607a9f26..573b542174 100644 --- a/deepmd/dpmodel/fitting/invar_fitting.py +++ b/deepmd/dpmodel/fitting/invar_fitting.py @@ -371,3 +371,45 @@ def call( else: outs = self.nets[()](xx) + self.bias_atom_e[atype] return {self.var_name: outs} + + +class EnergyFittingNet(InvarFitting): + def __init__( + self, + ntypes: int, + dim_descrpt: int, + neuron: List[int] = [120, 120, 120], + resnet_dt: bool = True, + numb_fparam: int = 0, + numb_aparam: int = 0, + rcond: Optional[float] = None, + tot_ener_zero: bool = False, + trainable: Optional[List[bool]] = None, + atom_ener: Optional[List[float]] = None, + activation_function: str = "tanh", + precision: str = DEFAULT_PRECISION, + layer_name: Optional[List[Optional[str]]] = None, + use_aparam_as_mask: bool = False, + spin: Any = None, + distinguish_types: bool = False, + ): + super().__init__( + var_name="energy", + ntypes=ntypes, + dim_descrpt=dim_descrpt, + dim_out=1, + neuron=neuron, + resnet_dt=resnet_dt, + numb_fparam=numb_fparam, + numb_aparam=numb_aparam, + rcond=rcond, + tot_ener_zero=tot_ener_zero, + trainable=trainable, + atom_ener=atom_ener, + activation_function=activation_function, + precision=precision, + layer_name=layer_name, + use_aparam_as_mask=use_aparam_as_mask, + spin=spin, + distinguish_types=distinguish_types, + ) diff --git a/deepmd/tf/env.py b/deepmd/tf/env.py index fe5bb81bae..2afe5cc862 100644 --- a/deepmd/tf/env.py +++ b/deepmd/tf/env.py @@ -140,17 +140,22 @@ def dlopen_library(module: str, filename: str): r"filter_type_(all)/(idt)_(\d+)|" )[:-1] +# subpatterns: +# \1: layer index or "final" +# \2: type of centeral atom, optional +# the last: weight name FITTING_NET_PATTERN = str( - r"layer_\d+/matrix|" - r"layer_\d+_type_\d+/matrix|" - r"layer_\d+/bias|" - r"layer_\d+_type_\d+/bias|" - r"layer_\d+/idt|" - r"layer_\d+_type_\d+/idt|" - r"final_layer/matrix|" - r"final_layer_type_\d+/matrix|" - r"final_layer/bias|" - r"final_layer_type_\d+/bias|" + r"layer_(\d+)/(matrix)|" + r"layer_(\d+)_type_(\d+)/(matrix)|" + r"layer_(\d+)/(bias)|" + r"layer_(\d+)_type_(\d+)/(bias)|" + r"layer_(\d+)/(idt)|" + r"layer_(\d+)_type_(\d+)/(idt)|" + r"(final)_layer/(matrix)|" + r"(final)_layer_type_(\d+)/(matrix)|" + r"(final)_layer/(bias)|" + r"(final)_layer_type_(\d+)/(bias)|" + # TODO: not sure how to parse for shared layers... # layer_name r"share_.+_type_\d/matrix|" r"share_.+_type_\d/bias|" @@ -158,7 +163,7 @@ def dlopen_library(module: str, filename: str): r"share_.+/matrix|" r"share_.+/bias|" r"share_.+/idt|" -) +)[:-1] TYPE_EMBEDDING_PATTERN = str( r"type_embed_net+/matrix_\d+|" diff --git a/deepmd/tf/fit/ener.py b/deepmd/tf/fit/ener.py index 751e5091bd..ac5a2a7187 100644 --- a/deepmd/tf/fit/ener.py +++ b/deepmd/tf/fit/ener.py @@ -1,6 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import logging from typing import ( + TYPE_CHECKING, List, Optional, ) @@ -53,6 +54,11 @@ Spin, ) +if TYPE_CHECKING: + from deepmd.tf.descriptor import ( + Descriptor, + ) + log = logging.getLogger(__name__) @@ -130,7 +136,7 @@ class EnerFitting(Fitting): def __init__( self, - descrpt: tf.Tensor, + descrpt: "Descriptor", neuron: List[int] = [120, 120, 120], resnet_dt: bool = True, numb_fparam: int = 0, @@ -176,6 +182,7 @@ def __init__( self.ntypes_spin = self.spin.get_ntypes_spin() if self.spin is not None else 0 self.seed_shift = one_layer_rand_seed_shift() self.tot_ener_zero = tot_ener_zero + self.activation_function_name = activation_function self.fitting_activation_fn = get_activation_func(activation_function) self.fitting_precision = get_precision(precision) self.trainable = trainable @@ -202,16 +209,16 @@ def __init__( add_data_requirement( "fparam", self.numb_fparam, atomic=False, must=True, high_prec=False ) - self.fparam_avg = None - self.fparam_std = None - self.fparam_inv_std = None + self.fparam_avg = None + self.fparam_std = None + self.fparam_inv_std = None if self.numb_aparam > 0: add_data_requirement( "aparam", self.numb_aparam, atomic=True, must=True, high_prec=False ) - self.aparam_avg = None - self.aparam_std = None - self.aparam_inv_std = None + self.aparam_avg = None + self.aparam_std = None + self.aparam_inv_std = None self.fitting_net_variables = None self.mixed_prec = None @@ -921,3 +928,86 @@ def get_loss(self, loss: dict, lr) -> Loss: return EnerSpinLoss(**loss, use_spin=self.spin.use_spin) else: raise RuntimeError("unknown loss type") + + @classmethod + def deserialize(cls, data: dict, suffix: str): + """Deserialize the model. + + Parameters + ---------- + data : dict + The serialized data + + Returns + ------- + Model + The deserialized model + """ + fitting = cls(**data) + fitting.fitting_net_variables = cls.from_dp_variables( + data["@variables"]["networks"] + ) + fitting.bias_atom_e = data["@variables"]["bias_atom_e"] + if fitting.numb_fparam > 0: + fitting.fparam_avg = data["@variables"]["fparam_avg"] + fitting.fparam_inv_std = data["@variables"]["fparam_inv_std"] + if fitting.numb_aparam > 0: + fitting.aparam_avg = data["@variables"]["aparam_avg"] + fitting.aparam_inv_std = data["@variables"]["aparam_inv_std"] + return fitting + + def serialize(self, suffix: str) -> dict: + """Serialize the model. + + Returns + ------- + dict + The serialized data + """ + data = { + "var_name": "energy", + "ntypes": self.ntypes, + "dim_descrpt": self.dim_descrpt, + # very bad design: type embedding is not passed to the class + # TODO: refactor the class + "distinguish_types": True, + "dim_out": 1, + "neuron": self.n_neuron, + "resnet_dt": self.resnet_dt, + "numb_fparam": self.numb_fparam, + "numb_aparam": self.numb_aparam, + "rcond": self.rcond, + "tot_ener_zero": self.tot_ener_zero, + "trainable": self.trainable, + "atom_ener": self.atom_ener, + "activation_function": self.activation_function_name, + "precision": self.fitting_precision.name, + "layer_name": self.layer_name, + "use_aparam_as_mask": self.use_aparam_as_mask, + "spin": self.spin, + "nets": self.serialize_network( + ntypes=self.ntypes, + # TODO: consider type embeddings + ndim=1, + in_dim=self.dim_descrpt, + neuron=self.n_neuron, + activation_function=self.activation_function_name, + resnet_dt=self.resnet_dt, + variables=self.fitting_net_variables, + suffix=suffix, + ), + "@variables": { + "bias_atom_e": self.bias_atom_e, + "fparam_avg": self.fparam_avg, + "fparam_inv_std": self.fparam_inv_std, + "aparam_avg": self.aparam_avg, + "aparam_inv_std": self.aparam_inv_std, + }, + } + if self.numb_fparam > 0: + data["@variables"]["fparam_avg"] = self.fparam_avg + data["@variables"]["fparam_inv_std"] = self.fparam_inv_std + if self.numb_aparam > 0: + data["@variables"]["aparam_avg"] = self.aparam_avg + data["@variables"]["aparam_inv_std"] = self.aparam_inv_std + return data diff --git a/deepmd/tf/fit/fitting.py b/deepmd/tf/fit/fitting.py index 5d666a19f7..ca08832853 100644 --- a/deepmd/tf/fit/fitting.py +++ b/deepmd/tf/fit/fitting.py @@ -1,12 +1,19 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +import re from abc import ( abstractmethod, ) from typing import ( Callable, + List, ) +from deepmd.dpmodel.utils.network import ( + FittingNet, + NetworkCollection, +) from deepmd.tf.env import ( + FITTING_NET_PATTERN, tf, ) from deepmd.tf.loss.loss import ( @@ -102,3 +109,130 @@ def get_loss(self, loss: dict, lr) -> Loss: Loss the loss function """ + + def serialize_network( + self, + ntypes: int, + ndim: int, + in_dim: int, + neuron: List[int], + activation_function: str, + resnet_dt: bool, + variables: dict, + suffix: str = "", + ) -> dict: + """Serialize network. + + Parameters + ---------- + ntypes : int + The number of types + ndim : int + The dimension of elements + in_dim : int + The input dimension + neuron : List[int] + The neuron list + activation_function : str + The activation function + resnet_dt : bool + Whether to use resnet + variables : dict + The input variables + suffix : str, optional + The suffix of the scope + + Returns + ------- + dict + The converted network data + """ + fittings = NetworkCollection( + ntypes=ntypes, + ndim=ndim, + network_type="fitting_network", + ) + if suffix != "": + fitting_net_pattern = ( + FITTING_NET_PATTERN.replace("/(idt)", suffix + "/(idt)") + .replace("/(bias)", suffix + "/(bias)") + .replace("/(matrix)", suffix + "/(matrix)") + ) + else: + fitting_net_pattern = FITTING_NET_PATTERN + for key, value in variables.items(): + m = re.search(fitting_net_pattern, key) + m = [mm for mm in m.groups() if mm is not None] + layer_idx = int(m[0]) if m[0] != "final" else len(neuron) + weight_name = m[-1] + if ndim == 0: + network_idx = () + elif ndim == 1: + network_idx = (int(m[1]),) + else: + raise ValueError(f"Invalid ndim: {ndim}") + if fittings[network_idx] is None: + # initialize the network if it is not initialized + fittings[network_idx] = FittingNet( + in_dim=in_dim, + out_dim=1, + neuron=neuron, + activation_function=activation_function, + resnet_dt=resnet_dt, + precision=self.precision.name, + bias_out=True, + ) + assert fittings[network_idx] is not None + if weight_name == "idt": + value = value.ravel() + fittings[network_idx][layer_idx][weight_name] = value + return fittings.serialize() + + @classmethod + def deserialize_network(cls, data: dict, suffix: str = "") -> dict: + """Deserialize network. + + Parameters + ---------- + data : dict + The input network data + suffix : str, optional + The suffix of the scope + + Returns + ------- + variables : dict + The input variables + """ + embedding_net_variables = {} + embeddings = NetworkCollection.deserialize(data) + for ii in range(embeddings.ntypes**embeddings.ndim): + net_idx = [] + rest_ii = ii + for _ in range(embeddings.ndim): + net_idx.append(rest_ii % embeddings.ntypes) + rest_ii //= embeddings.ntypes + net_idx = tuple(net_idx) + if embeddings.ndim == 0: + key = "" + elif embeddings.ndim == 1: + key = "_type_" + key[5:] + else: + raise ValueError(f"Invalid ndim: {embeddings.ndim}") + network = embeddings[net_idx] + assert network is not None + for layer_idx, layer in enumerate(network.layers): + if layer_idx == len(network.layers) - 1: + layer_name = "final_layer" + else: + layer_name = f"layer_{layer_idx}" + embedding_net_variables[f"{layer_name}{key}{suffix}/matrix"] = layer.w + embedding_net_variables[f"{layer_name}{key}{suffix}/bias"] = layer.b + if layer.idt is not None: + embedding_net_variables[ + f"{layer_name}{key}{suffix}/idt" + ] = layer.idt.reshape(1, -1) + else: + # prevent keyError + embedding_net_variables[f"{layer_name}{key}{suffix}/idt_"] = 0.0 + return embedding_net_variables diff --git a/source/tests/consistent/fitting/common.py b/source/tests/consistent/fitting/common.py new file mode 100644 index 0000000000..5b901ecd7a --- /dev/null +++ b/source/tests/consistent/fitting/common.py @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + + +from ..common import ( + INSTALLED_PT, + INSTALLED_TF, +) + +if INSTALLED_PT: + pass +if INSTALLED_TF: + from deepmd.tf.env import ( + GLOBAL_TF_FLOAT_PRECISION, + tf, + ) + + +class FittingTest: + """Useful utilities for descriptor tests.""" + + def build_tf_fitting(self, obj, inputs, natoms, suffix): + t_inputs = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None], name="i_inputs") + t_natoms = tf.placeholder(tf.int32, natoms.shape, name="i_natoms") + t_des = obj.build( + t_inputs, + t_natoms, + {}, + suffix=suffix, + ) + return [t_des], { + t_inputs: inputs, + t_natoms: natoms, + } diff --git a/source/tests/consistent/fitting/test_ener.py b/source/tests/consistent/fitting/test_ener.py new file mode 100644 index 0000000000..1b57d0987f --- /dev/null +++ b/source/tests/consistent/fitting/test_ener.py @@ -0,0 +1,149 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest +from typing import ( + Any, + Tuple, +) + +import numpy as np + +from deepmd.dpmodel.descriptor.se_e2_a import DescrptSeA as DescrptSeADP +from deepmd.env import ( + GLOBAL_NP_FLOAT_PRECISION, +) + +from ..common import ( + INSTALLED_PT, + INSTALLED_TF, + CommonTest, + parameterized, +) +from .common import ( + DescriptorTest, +) + +if INSTALLED_PT: + from deepmd.pt.model.descriptor.se_a import DescrptSeA as DescrptSeAPT +else: + DescrptSeAPT = None +if INSTALLED_TF: + from deepmd.tf.descriptor.se_a import DescrptSeA as DescrptSeATF +else: + DescrptSeATF = None +from deepmd.utils.argcheck import ( + descrpt_se_a_args, +) + + +@parameterized( + (True, False), # resnet_dt + (True, False), # type_one_side + ([], [[0, 1]]), # excluded_types +) +class TestEner(CommonTest, DescriptorTest, unittest.TestCase): + @property + def data(self) -> dict: + ( + resnet_dt, + type_one_side, + excluded_types, + ) = self.param + return { + "sel": [10, 10], + "rcut_smth": 5.80, + "rcut": 6.00, + "neuron": [6, 12, 24], + "axis_neuron": 3, + "resnet_dt": resnet_dt, + "type_one_side": type_one_side, + "exclude_types": excluded_types, + "seed": 1145141919810, + } + + @property + def skip_pt(self) -> bool: + ( + resnet_dt, + type_one_side, + excluded_types, + ) = self.param + return not type_one_side or excluded_types != [] or CommonTest.skip_pt + + @property + def skip_dp(self) -> bool: + ( + resnet_dt, + type_one_side, + excluded_types, + ) = self.param + return not type_one_side or excluded_types != [] or CommonTest.skip_dp + + tf_class = DescrptSeATF + dp_class = DescrptSeADP + pt_class = DescrptSeAPT + args = descrpt_se_a_args() + + def setUp(self): + CommonTest.setUp(self) + + self.ntypes = 2 + self.coords = np.array( + [ + 12.83, + 2.56, + 2.18, + 12.09, + 2.87, + 2.74, + 00.25, + 3.32, + 1.68, + 3.36, + 3.00, + 1.81, + 3.51, + 2.51, + 2.60, + 4.27, + 3.22, + 1.56, + ], + dtype=GLOBAL_NP_FLOAT_PRECISION, + ) + self.atype = np.array([0, 1, 1, 0, 1, 1], dtype=np.int32) + self.box = np.array( + [13.0, 0.0, 0.0, 0.0, 13.0, 0.0, 0.0, 0.0, 13.0], + dtype=GLOBAL_NP_FLOAT_PRECISION, + ) + self.natoms = np.array([6, 6, 2, 4], dtype=np.int32) + + def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: + return self.build_tf_descriptor( + obj, + self.natoms, + self.coords, + self.atype, + self.box, + suffix, + ) + + def eval_dp(self, dp_obj: Any) -> Any: + return self.eval_dp_descriptor( + dp_obj, + self.natoms, + self.coords, + self.atype, + self.box, + ) + + def eval_pt(self, pt_obj: Any) -> Any: + return self.eval_pt_descriptor( + pt_obj, + self.natoms, + self.coords, + self.atype, + self.box, + ) + + def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: + return (ret[0],) From 6a65065faf6de42e9c4211d682548d0bbd8656a1 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sat, 17 Feb 2024 05:44:36 -0500 Subject: [PATCH 02/14] done Signed-off-by: Jinzhe Zeng --- deepmd/dpmodel/fitting/invar_fitting.py | 26 ++-- deepmd/pt/model/model/__init__.py | 2 + deepmd/pt/model/task/ener.py | 4 +- deepmd/pt/model/task/fitting.py | 6 +- deepmd/tf/fit/ener.py | 22 +-- deepmd/tf/fit/fitting.py | 32 ++-- deepmd/tf/model/model.py | 8 +- deepmd/tf/utils/graph.py | 6 +- source/tests/consistent/common.py | 4 +- source/tests/consistent/fitting/__init__.py | 1 + source/tests/consistent/fitting/common.py | 6 +- source/tests/consistent/fitting/test_ener.py | 152 +++++++++---------- 12 files changed, 143 insertions(+), 126 deletions(-) create mode 100644 source/tests/consistent/fitting/__init__.py diff --git a/deepmd/dpmodel/fitting/invar_fitting.py b/deepmd/dpmodel/fitting/invar_fitting.py index 573b542174..d4263b210a 100644 --- a/deepmd/dpmodel/fitting/invar_fitting.py +++ b/deepmd/dpmodel/fitting/invar_fitting.py @@ -138,7 +138,7 @@ def __init__( raise NotImplementedError("use_aparam_as_mask is not implemented") if layer_name is not None: raise NotImplementedError("layer_name is not implemented") - if atom_ener is not None: + if atom_ener is not None and atom_ener != []: raise NotImplementedError("atom_ener is not implemented") self.var_name = var_name @@ -152,6 +152,10 @@ def __init__( self.rcond = rcond self.tot_ener_zero = tot_ener_zero self.trainable = trainable + if self.trainable is None: + self.trainable = [True for ii in range(len(self.neuron) + 1)] + if isinstance(self.trainable, bool): + self.trainable = [self.trainable] * (len(self.neuron) + 1) self.atom_ener = atom_ener self.activation_function = activation_function self.precision = precision @@ -281,6 +285,8 @@ def deserialize(cls, data: dict) -> "InvarFitting": data = copy.deepcopy(data) variables = data.pop("@variables") nets = data.pop("nets") + data.pop("var_name") + data.pop("dim_out") obj = cls(**data) for kk in variables.keys(): obj[kk] = variables[kk] @@ -289,14 +295,14 @@ def deserialize(cls, data: dict) -> "InvarFitting": def call( self, - descriptor: np.array, - atype: np.array, - gr: Optional[np.array] = None, - g2: Optional[np.array] = None, - h2: Optional[np.array] = None, - fparam: Optional[np.array] = None, - aparam: Optional[np.array] = None, - ) -> Dict[str, np.array]: + descriptor: np.ndarray, + atype: np.ndarray, + gr: Optional[np.ndarray] = None, + g2: Optional[np.ndarray] = None, + h2: Optional[np.ndarray] = None, + fparam: Optional[np.ndarray] = None, + aparam: Optional[np.ndarray] = None, + ) -> Dict[str, np.ndarray]: """Calculate the fitting. Parameters @@ -392,6 +398,8 @@ def __init__( use_aparam_as_mask: bool = False, spin: Any = None, distinguish_types: bool = False, + # not used + seed: Optional[int] = None, ): super().__init__( var_name="energy", diff --git a/deepmd/pt/model/model/__init__.py b/deepmd/pt/model/model/__init__.py index 8199a8490b..81a3d9ffa0 100644 --- a/deepmd/pt/model/model/__init__.py +++ b/deepmd/pt/model/model/__init__.py @@ -54,6 +54,7 @@ def get_zbl_model(model_params): fitting_net["ntypes"] = descriptor.get_ntypes() fitting_net["distinguish_types"] = descriptor.distinguish_types() fitting_net["embedding_width"] = descriptor.get_dim_out() + fitting_net["dim_descrpt"] = descriptor.get_dim_out() grad_force = "direct" not in fitting_net["type"] if not grad_force: fitting_net["out_dim"] = descriptor.get_dim_emb() @@ -89,6 +90,7 @@ def get_model(model_params): fitting_net["ntypes"] = descriptor.get_ntypes() fitting_net["distinguish_types"] = descriptor.distinguish_types() fitting_net["embedding_width"] = descriptor.get_dim_out() + fitting_net["dim_descrpt"] = descriptor.get_dim_out() grad_force = "direct" not in fitting_net["type"] if not grad_force: fitting_net["out_dim"] = descriptor.get_dim_emb() diff --git a/deepmd/pt/model/task/ener.py b/deepmd/pt/model/task/ener.py index f1dad4c58d..cc8e25ad5d 100644 --- a/deepmd/pt/model/task/ener.py +++ b/deepmd/pt/model/task/ener.py @@ -196,7 +196,7 @@ class EnergyFittingNet(InvarFitting): def __init__( self, ntypes: int, - embedding_width: int, + dim_descrpt: int, neuron: List[int] = [128, 128, 128], bias_atom_e: Optional[torch.Tensor] = None, resnet_dt: bool = True, @@ -210,7 +210,7 @@ def __init__( super().__init__( "energy", ntypes, - embedding_width, + dim_descrpt, 1, neuron=neuron, bias_atom_e=bias_atom_e, diff --git a/deepmd/pt/model/task/fitting.py b/deepmd/pt/model/task/fitting.py index b2d8c875ce..3e3c6f88d4 100644 --- a/deepmd/pt/model/task/fitting.py +++ b/deepmd/pt/model/task/fitting.py @@ -520,8 +520,8 @@ def serialize(self) -> dict: # "spin": self.spin , ## NOTICE: not supported by far "tot_ener_zero": False, - "trainable": True, - "atom_ener": None, + "trainable": [True] * (len(self.neuron) + 1), + "atom_ener": [], "layer_name": None, "use_aparam_as_mask": False, "spin": None, @@ -532,6 +532,8 @@ def deserialize(cls, data: dict) -> "GeneralFitting": data = copy.deepcopy(data) variables = data.pop("@variables") nets = data.pop("nets") + data.pop("var_name") + data.pop("dim_out") obj = cls(**data) for kk in variables.keys(): obj[kk] = to_torch_tensor(variables[kk]) diff --git a/deepmd/tf/fit/ener.py b/deepmd/tf/fit/ener.py index ac5a2a7187..d32c784818 100644 --- a/deepmd/tf/fit/ener.py +++ b/deepmd/tf/fit/ener.py @@ -55,9 +55,7 @@ ) if TYPE_CHECKING: - from deepmd.tf.descriptor import ( - Descriptor, - ) + pass log = logging.getLogger(__name__) @@ -136,7 +134,8 @@ class EnerFitting(Fitting): def __init__( self, - descrpt: "Descriptor", + ntypes: int, + dim_descrpt: int, neuron: List[int] = [120, 120, 120], resnet_dt: bool = True, numb_fparam: int = 0, @@ -156,8 +155,8 @@ def __init__( ) -> None: """Constructor.""" # model param - self.ntypes = descrpt.get_ntypes() - self.dim_descrpt = descrpt.get_dim_out() + self.ntypes = ntypes + self.dim_descrpt = dim_descrpt self.use_aparam_as_mask = use_aparam_as_mask # args = ()\ # .add('numb_fparam', int, default = 0)\ @@ -944,8 +943,9 @@ def deserialize(cls, data: dict, suffix: str): The deserialized model """ fitting = cls(**data) - fitting.fitting_net_variables = cls.from_dp_variables( - data["@variables"]["networks"] + fitting.fitting_net_variables = cls.deserialize_network( + data["nets"], + suffix=suffix, ) fitting.bias_atom_e = data["@variables"]["bias_atom_e"] if fitting.numb_fparam > 0: @@ -1004,10 +1004,4 @@ def serialize(self, suffix: str) -> dict: "aparam_inv_std": self.aparam_inv_std, }, } - if self.numb_fparam > 0: - data["@variables"]["fparam_avg"] = self.fparam_avg - data["@variables"]["fparam_inv_std"] = self.fparam_inv_std - if self.numb_aparam > 0: - data["@variables"]["aparam_avg"] = self.aparam_avg - data["@variables"]["aparam_inv_std"] = self.aparam_inv_std return data diff --git a/deepmd/tf/fit/fitting.py b/deepmd/tf/fit/fitting.py index ca08832853..2307fb957d 100644 --- a/deepmd/tf/fit/fitting.py +++ b/deepmd/tf/fit/fitting.py @@ -204,35 +204,35 @@ def deserialize_network(cls, data: dict, suffix: str = "") -> dict: variables : dict The input variables """ - embedding_net_variables = {} - embeddings = NetworkCollection.deserialize(data) - for ii in range(embeddings.ntypes**embeddings.ndim): + fitting_net_variables = {} + fittings = NetworkCollection.deserialize(data) + for ii in range(fittings.ntypes**fittings.ndim): net_idx = [] rest_ii = ii - for _ in range(embeddings.ndim): - net_idx.append(rest_ii % embeddings.ntypes) - rest_ii //= embeddings.ntypes + for _ in range(fittings.ndim): + net_idx.append(rest_ii % fittings.ntypes) + rest_ii //= fittings.ntypes net_idx = tuple(net_idx) - if embeddings.ndim == 0: + if fittings.ndim == 0: key = "" - elif embeddings.ndim == 1: - key = "_type_" + key[5:] + elif fittings.ndim == 1: + key = "_type_" + str(net_idx[0]) else: - raise ValueError(f"Invalid ndim: {embeddings.ndim}") - network = embeddings[net_idx] + raise ValueError(f"Invalid ndim: {fittings.ndim}") + network = fittings[net_idx] assert network is not None for layer_idx, layer in enumerate(network.layers): if layer_idx == len(network.layers) - 1: layer_name = "final_layer" else: layer_name = f"layer_{layer_idx}" - embedding_net_variables[f"{layer_name}{key}{suffix}/matrix"] = layer.w - embedding_net_variables[f"{layer_name}{key}{suffix}/bias"] = layer.b + fitting_net_variables[f"{layer_name}{key}{suffix}/matrix"] = layer.w + fitting_net_variables[f"{layer_name}{key}{suffix}/bias"] = layer.b if layer.idt is not None: - embedding_net_variables[ + fitting_net_variables[ f"{layer_name}{key}{suffix}/idt" ] = layer.idt.reshape(1, -1) else: # prevent keyError - embedding_net_variables[f"{layer_name}{key}{suffix}/idt_"] = 0.0 - return embedding_net_variables + fitting_net_variables[f"{layer_name}{key}{suffix}/idt"] = 0.0 + return fitting_net_variables diff --git a/deepmd/tf/model/model.py b/deepmd/tf/model/model.py index eee138907f..ac970e0b53 100644 --- a/deepmd/tf/model/model.py +++ b/deepmd/tf/model/model.py @@ -631,7 +631,13 @@ def __init__( if isinstance(fitting_net, Fitting): self.fitting = fitting_net else: - self.fitting = Fitting(**fitting_net, descrpt=self.descrpt, spin=self.spin) + self.fitting = Fitting( + **fitting_net, + descrpt=self.descrpt, + spin=self.spin, + ntypes=self.descrpt.get_ntypes(), + dim_descrpt=self.descrpt.get_dim_out(), + ) self.rcut = self.descrpt.get_rcut() self.ntypes = self.descrpt.get_ntypes() diff --git a/deepmd/tf/utils/graph.py b/deepmd/tf/utils/graph.py index 7e67cf27a6..8c4b0fcc84 100644 --- a/deepmd/tf/utils/graph.py +++ b/deepmd/tf/utils/graph.py @@ -356,9 +356,9 @@ def get_fitting_net_nodes_from_graph_def( """ if suffix != "": fitting_net_pattern = ( - FITTING_NET_PATTERN.replace("/idt", suffix + "/idt") - .replace("/bias", suffix + "/bias") - .replace("/matrix", suffix + "/matrix") + FITTING_NET_PATTERN.replace("/(idt)", suffix + "/(idt)") + .replace("/(bias)", suffix + "/(bias)") + .replace("/(matrix)", suffix + "/(matrix)") ) else: fitting_net_pattern = FITTING_NET_PATTERN diff --git a/source/tests/consistent/common.py b/source/tests/consistent/common.py index e5633726ef..de6630ce35 100644 --- a/source/tests/consistent/common.py +++ b/source/tests/consistent/common.py @@ -59,6 +59,8 @@ class CommonTest(ABC): data: ClassVar[dict] """Arguments data.""" + addtional_data: ClassVar[dict] = {} + """Additional data that will not be checked.""" tf_class: ClassVar[Optional[type]] """TensorFlow model class.""" dp_class: ClassVar[Optional[type]] @@ -89,7 +91,7 @@ def init_backend_cls(self, cls) -> Any: base = Argument("arg", dict, sub_fields=self.args) data = base.normalize_value(self.data, trim_pattern="_*") base.check_value(data, strict=True) - return cls(**data) + return cls(**data, **self.addtional_data) @abstractmethod def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: diff --git a/source/tests/consistent/fitting/__init__.py b/source/tests/consistent/fitting/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/source/tests/consistent/fitting/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/source/tests/consistent/fitting/common.py b/source/tests/consistent/fitting/common.py index 5b901ecd7a..953e21a807 100644 --- a/source/tests/consistent/fitting/common.py +++ b/source/tests/consistent/fitting/common.py @@ -18,16 +18,18 @@ class FittingTest: """Useful utilities for descriptor tests.""" - def build_tf_fitting(self, obj, inputs, natoms, suffix): + def build_tf_fitting(self, obj, inputs, natoms, atype, suffix): t_inputs = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None], name="i_inputs") t_natoms = tf.placeholder(tf.int32, natoms.shape, name="i_natoms") + t_atype = tf.placeholder(tf.int32, [None], name="i_atype") t_des = obj.build( t_inputs, t_natoms, - {}, + {"atype": t_atype}, suffix=suffix, ) return [t_des], { t_inputs: inputs, t_natoms: natoms, + t_atype: atype, } diff --git a/source/tests/consistent/fitting/test_ener.py b/source/tests/consistent/fitting/test_ener.py index 1b57d0987f..4e562e9e42 100644 --- a/source/tests/consistent/fitting/test_ener.py +++ b/source/tests/consistent/fitting/test_ener.py @@ -7,7 +7,7 @@ import numpy as np -from deepmd.dpmodel.descriptor.se_e2_a import DescrptSeA as DescrptSeADP +from deepmd.dpmodel.fitting.invar_fitting import EnergyFittingNet as EnerFittingDP from deepmd.env import ( GLOBAL_NP_FLOAT_PRECISION, ) @@ -19,131 +19,131 @@ parameterized, ) from .common import ( - DescriptorTest, + FittingTest, ) if INSTALLED_PT: - from deepmd.pt.model.descriptor.se_a import DescrptSeA as DescrptSeAPT + import torch + + from deepmd.pt.model.task.ener import EnergyFittingNet as EnerFittingPT + from deepmd.pt.utils.env import DEVICE as PT_DEVICE else: - DescrptSeAPT = None + EnerFittingPT = object if INSTALLED_TF: - from deepmd.tf.descriptor.se_a import DescrptSeA as DescrptSeATF + from deepmd.tf.fit.ener import EnerFitting as EnerFittingTF else: - DescrptSeATF = None + EnerFittingTF = object from deepmd.utils.argcheck import ( - descrpt_se_a_args, + fitting_ener, ) @parameterized( (True, False), # resnet_dt - (True, False), # type_one_side - ([], [[0, 1]]), # excluded_types + ("float64", "float32"), # precision + (True, False), # distinguish_types ) -class TestEner(CommonTest, DescriptorTest, unittest.TestCase): +class TestEner(CommonTest, FittingTest, unittest.TestCase): @property def data(self) -> dict: ( resnet_dt, - type_one_side, - excluded_types, + precision, + distinguish_types, ) = self.param return { - "sel": [10, 10], - "rcut_smth": 5.80, - "rcut": 6.00, - "neuron": [6, 12, 24], - "axis_neuron": 3, + "neuron": [5, 5, 5], "resnet_dt": resnet_dt, - "type_one_side": type_one_side, - "exclude_types": excluded_types, - "seed": 1145141919810, + "precision": precision, + "seed": 20240217, } + @property + def skip_tf(self) -> bool: + ( + resnet_dt, + precision, + distinguish_types, + ) = self.param + # TODO: distinguish_types + return not distinguish_types or CommonTest.skip_pt + @property def skip_pt(self) -> bool: ( resnet_dt, - type_one_side, - excluded_types, + precision, + distinguish_types, ) = self.param - return not type_one_side or excluded_types != [] or CommonTest.skip_pt + # TODO: float32 has bug + return precision == "float32" or CommonTest.skip_pt @property def skip_dp(self) -> bool: ( resnet_dt, - type_one_side, - excluded_types, + precision, + distinguish_types, ) = self.param - return not type_one_side or excluded_types != [] or CommonTest.skip_dp + # TODO: float32 has bug + return precision == "float32" or CommonTest.skip_dp - tf_class = DescrptSeATF - dp_class = DescrptSeADP - pt_class = DescrptSeAPT - args = descrpt_se_a_args() + tf_class = EnerFittingTF + dp_class = EnerFittingDP + pt_class = EnerFittingPT + args = fitting_ener() def setUp(self): CommonTest.setUp(self) self.ntypes = 2 - self.coords = np.array( - [ - 12.83, - 2.56, - 2.18, - 12.09, - 2.87, - 2.74, - 00.25, - 3.32, - 1.68, - 3.36, - 3.00, - 1.81, - 3.51, - 2.51, - 2.60, - 4.27, - 3.22, - 1.56, - ], - dtype=GLOBAL_NP_FLOAT_PRECISION, - ) - self.atype = np.array([0, 1, 1, 0, 1, 1], dtype=np.int32) - self.box = np.array( - [13.0, 0.0, 0.0, 0.0, 13.0, 0.0, 0.0, 0.0, 13.0], - dtype=GLOBAL_NP_FLOAT_PRECISION, - ) self.natoms = np.array([6, 6, 2, 4], dtype=np.int32) + self.inputs = np.ones((1, 6, 20), dtype=GLOBAL_NP_FLOAT_PRECISION) + self.atype = np.array([0, 1, 1, 0, 1, 1], dtype=np.int32) + # inconsistent if not sorted + self.atype.sort() + + @property + def addtional_data(self) -> dict: + ( + resnet_dt, + precision, + distinguish_types, + ) = self.param + return { + "ntypes": self.ntypes, + "dim_descrpt": self.inputs.shape[-1], + "distinguish_types": distinguish_types, + } def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: - return self.build_tf_descriptor( + return self.build_tf_fitting( obj, + self.inputs.ravel(), self.natoms, - self.coords, self.atype, - self.box, suffix, ) - def eval_dp(self, dp_obj: Any) -> Any: - return self.eval_dp_descriptor( - dp_obj, - self.natoms, - self.coords, - self.atype, - self.box, + def eval_pt(self, pt_obj: Any) -> Any: + return ( + pt_obj( + torch.from_numpy(self.inputs).to(device=PT_DEVICE), + torch.from_numpy(self.atype).to(device=PT_DEVICE), + )["energy"] + .detach() + .cpu() + .numpy() ) - def eval_pt(self, pt_obj: Any) -> Any: - return self.eval_pt_descriptor( - pt_obj, - self.natoms, - self.coords, + def eval_dp(self, dp_obj: Any) -> Any: + return dp_obj( + self.inputs, self.atype, - self.box, - ) + )["energy"] def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: - return (ret[0],) + if backend == self.RefBackend.TF: + # shape is not same + ret = ret[0].reshape(-1, self.natoms[0], 1) + return (ret,) From d8d7873b8b774f4688f080e9f09c4e5efc0fd339 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sat, 17 Feb 2024 05:57:06 -0500 Subject: [PATCH 03/14] test fparam Signed-off-by: Jinzhe Zeng --- deepmd/tf/fit/ener.py | 2 +- source/tests/consistent/fitting/common.py | 17 ++++++++--- source/tests/consistent/fitting/test_ener.py | 31 ++++++++++++++++++++ 3 files changed, 45 insertions(+), 5 deletions(-) diff --git a/deepmd/tf/fit/ener.py b/deepmd/tf/fit/ener.py index d32c784818..418f16f84c 100644 --- a/deepmd/tf/fit/ener.py +++ b/deepmd/tf/fit/ener.py @@ -989,7 +989,7 @@ def serialize(self, suffix: str) -> dict: ntypes=self.ntypes, # TODO: consider type embeddings ndim=1, - in_dim=self.dim_descrpt, + in_dim=self.dim_descrpt + self.numb_fparam + self.numb_aparam, neuron=self.n_neuron, activation_function=self.activation_function_name, resnet_dt=self.resnet_dt, diff --git a/source/tests/consistent/fitting/common.py b/source/tests/consistent/fitting/common.py index 953e21a807..276e81dbc6 100644 --- a/source/tests/consistent/fitting/common.py +++ b/source/tests/consistent/fitting/common.py @@ -18,18 +18,27 @@ class FittingTest: """Useful utilities for descriptor tests.""" - def build_tf_fitting(self, obj, inputs, natoms, atype, suffix): + def build_tf_fitting(self, obj, inputs, natoms, atype, fparam, suffix): t_inputs = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None], name="i_inputs") t_natoms = tf.placeholder(tf.int32, natoms.shape, name="i_natoms") t_atype = tf.placeholder(tf.int32, [None], name="i_atype") - t_des = obj.build( + extras = {} + feed_dict = {} + if fparam is not None: + t_fparam = tf.placeholder( + GLOBAL_TF_FLOAT_PRECISION, [None], name="i_fparam" + ) + extras["fparam"] = t_fparam + feed_dict[t_fparam] = fparam + t_out = obj.build( t_inputs, t_natoms, - {"atype": t_atype}, + {"atype": t_atype, **extras}, suffix=suffix, ) - return [t_des], { + return [t_out], { t_inputs: inputs, t_natoms: natoms, t_atype: atype, + **feed_dict, } diff --git a/source/tests/consistent/fitting/test_ener.py b/source/tests/consistent/fitting/test_ener.py index 4e562e9e42..1b4fab0a54 100644 --- a/source/tests/consistent/fitting/test_ener.py +++ b/source/tests/consistent/fitting/test_ener.py @@ -42,6 +42,7 @@ (True, False), # resnet_dt ("float64", "float32"), # precision (True, False), # distinguish_types + (0, 1), # numb_fparam ) class TestEner(CommonTest, FittingTest, unittest.TestCase): @property @@ -50,11 +51,13 @@ def data(self) -> dict: resnet_dt, precision, distinguish_types, + numb_fparam, ) = self.param return { "neuron": [5, 5, 5], "resnet_dt": resnet_dt, "precision": precision, + "numb_fparam": numb_fparam, "seed": 20240217, } @@ -64,6 +67,7 @@ def skip_tf(self) -> bool: resnet_dt, precision, distinguish_types, + numb_fparam, ) = self.param # TODO: distinguish_types return not distinguish_types or CommonTest.skip_pt @@ -74,6 +78,7 @@ def skip_pt(self) -> bool: resnet_dt, precision, distinguish_types, + numb_fparam, ) = self.param # TODO: float32 has bug return precision == "float32" or CommonTest.skip_pt @@ -84,6 +89,7 @@ def skip_dp(self) -> bool: resnet_dt, precision, distinguish_types, + numb_fparam, ) = self.param # TODO: float32 has bug return precision == "float32" or CommonTest.skip_dp @@ -102,6 +108,7 @@ def setUp(self): self.atype = np.array([0, 1, 1, 0, 1, 1], dtype=np.int32) # inconsistent if not sorted self.atype.sort() + self.fparam = -np.ones((1,), dtype=GLOBAL_NP_FLOAT_PRECISION) @property def addtional_data(self) -> dict: @@ -109,6 +116,7 @@ def addtional_data(self) -> dict: resnet_dt, precision, distinguish_types, + numb_fparam, ) = self.param return { "ntypes": self.ntypes, @@ -117,19 +125,35 @@ def addtional_data(self) -> dict: } def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: + ( + resnet_dt, + precision, + distinguish_types, + numb_fparam, + ) = self.param return self.build_tf_fitting( obj, self.inputs.ravel(), self.natoms, self.atype, + self.fparam if numb_fparam else None, suffix, ) def eval_pt(self, pt_obj: Any) -> Any: + ( + resnet_dt, + precision, + distinguish_types, + numb_fparam, + ) = self.param return ( pt_obj( torch.from_numpy(self.inputs).to(device=PT_DEVICE), torch.from_numpy(self.atype).to(device=PT_DEVICE), + fparam=torch.from_numpy(self.fparam).to(device=PT_DEVICE) + if numb_fparam + else None, )["energy"] .detach() .cpu() @@ -137,9 +161,16 @@ def eval_pt(self, pt_obj: Any) -> Any: ) def eval_dp(self, dp_obj: Any) -> Any: + ( + resnet_dt, + precision, + distinguish_types, + numb_fparam, + ) = self.param return dp_obj( self.inputs, self.atype, + fparam=self.fparam if numb_fparam else None, )["energy"] def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: From 0085db68c5d06f5a1bdde7888baa00e932a2d574 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sat, 17 Feb 2024 06:27:43 -0500 Subject: [PATCH 04/14] fix tests Signed-off-by: Jinzhe Zeng --- source/tests/pt/model/test_fitting_net.py | 4 +++- source/tests/pt/model/test_model.py | 4 +++- source/tests/pt/test_stat.py | 4 +++- source/tests/tf/test_data_large_batch.py | 9 ++++++--- source/tests/tf/test_fitting_ener_type.py | 3 ++- source/tests/tf/test_fitting_stat.py | 3 ++- source/tests/tf/test_gen_stat_data.py | 13 +++++++++++-- source/tests/tf/test_layer_name.py | 3 ++- source/tests/tf/test_model_loc_frame.py | 6 +++++- source/tests/tf/test_model_multi.py | 3 ++- source/tests/tf/test_model_se_a.py | 9 ++++++--- source/tests/tf/test_model_se_a_aparam.py | 3 ++- source/tests/tf/test_model_se_a_ebd.py | 3 ++- source/tests/tf/test_model_se_a_ebd_v2.py | 3 ++- source/tests/tf/test_model_se_a_fparam.py | 3 ++- source/tests/tf/test_model_se_a_srtab.py | 3 ++- source/tests/tf/test_model_se_a_type.py | 3 ++- source/tests/tf/test_model_se_atten.py | 12 ++++++++---- source/tests/tf/test_model_se_r.py | 3 ++- source/tests/tf/test_model_se_t.py | 3 ++- source/tests/tf/test_model_spin.py | 3 ++- 21 files changed, 71 insertions(+), 29 deletions(-) diff --git a/source/tests/pt/model/test_fitting_net.py b/source/tests/pt/model/test_fitting_net.py index e12a397347..2bcfb7b64c 100644 --- a/source/tests/pt/model/test_fitting_net.py +++ b/source/tests/pt/model/test_fitting_net.py @@ -95,7 +95,9 @@ def setUp(self): cnt += self.natoms[i + 2] fake_d = FakeDescriptor(2, 30) - self.dp_fn = EnerFitting(fake_d, self.n_neuron) + self.dp_fn = EnerFitting( + fake_d.get_ntypes(), fake_d.get_dim_out(), self.n_neuron + ) self.dp_fn.bias_atom_e = rng.uniform(size=[self.ntypes]) def test_consistency(self): diff --git a/source/tests/pt/model/test_model.py b/source/tests/pt/model/test_model.py index efe013a8a1..856b48064b 100644 --- a/source/tests/pt/model/test_model.py +++ b/source/tests/pt/model/test_model.py @@ -195,7 +195,9 @@ def _get_dp_model(self): neuron=self.filter_neuron, axis_neuron=self.axis_neuron, ) - dp_fitting = EnerFitting(descrpt=dp_descrpt, neuron=self.n_neuron) + dp_fitting = EnerFitting( + dp_descrpt.get_ntypes(), dp_descrpt.get_dim_out(), neuron=self.n_neuron + ) return EnerModel( dp_descrpt, dp_fitting, diff --git a/source/tests/pt/test_stat.py b/source/tests/pt/test_stat.py index bc95575a5a..e7cb61be81 100644 --- a/source/tests/pt/test_stat.py +++ b/source/tests/pt/test_stat.py @@ -122,7 +122,9 @@ def my_merge(energy, natoms): energy = self.dp_sampled["energy"] natoms = self.dp_sampled["natoms_vec"] energy, natoms = my_merge(energy, natoms) - dp_fn = EnerFitting(self.dp_d, self.n_neuron) + dp_fn = EnerFitting( + self.dp_d.get_ntypes(), self.dp_d.get_dim_out(), self.n_neuron + ) dp_fn.compute_output_stats(self.dp_sampled) bias_atom_e = compute_output_bias(energy, natoms) self.assertTrue(np.allclose(dp_fn.bias_atom_e, bias_atom_e[:, 0])) diff --git a/source/tests/tf/test_data_large_batch.py b/source/tests/tf/test_data_large_batch.py index c31a4c4a9a..bc021a4494 100644 --- a/source/tests/tf/test_data_large_batch.py +++ b/source/tests/tf/test_data_large_batch.py @@ -112,7 +112,8 @@ def test_data_mixed_type(self): jdata["model"]["descriptor"].pop("type", None) jdata["model"]["descriptor"]["ntypes"] = 2 descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) - jdata["model"]["fitting_net"]["descrpt"] = descrpt + jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() + jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( @@ -308,7 +309,8 @@ def test_stripped_data_mixed_type(self): jdata["model"]["descriptor"]["ntypes"] = 2 jdata["model"]["descriptor"]["stripped_type_embedding"] = True descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) - jdata["model"]["fitting_net"]["descrpt"] = descrpt + jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() + jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( @@ -504,7 +506,8 @@ def test_compressible_data_mixed_type(self): jdata["model"]["descriptor"]["stripped_type_embedding"] = True jdata["model"]["descriptor"]["attn_layer"] = 0 descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) - jdata["model"]["fitting_net"]["descrpt"] = descrpt + jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() + jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( diff --git a/source/tests/tf/test_fitting_ener_type.py b/source/tests/tf/test_fitting_ener_type.py index 4dd6fb80a1..61f2607032 100644 --- a/source/tests/tf/test_fitting_ener_type.py +++ b/source/tests/tf/test_fitting_ener_type.py @@ -54,7 +54,8 @@ def test_fitting(self): jdata["model"]["descriptor"].pop("type", None) descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) - jdata["model"]["fitting_net"]["descrpt"] = descrpt + jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() + jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) diff --git a/source/tests/tf/test_fitting_stat.py b/source/tests/tf/test_fitting_stat.py index 9e2408c57b..100868fd18 100644 --- a/source/tests/tf/test_fitting_stat.py +++ b/source/tests/tf/test_fitting_stat.py @@ -81,7 +81,8 @@ def test(self): # fitting = EnerFitting(jdata['fitting_net'], descrpt) descrpt = DescrptSeA(6.0, 5.8, [46, 92], neuron=[25, 50, 100], axis_neuron=16) fitting = EnerFitting( - descrpt, + descrpt.get_ntypes(), + descrpt.get_dim_out(), neuron=[240, 240, 240], resnet_dt=True, numb_fparam=2, diff --git a/source/tests/tf/test_gen_stat_data.py b/source/tests/tf/test_gen_stat_data.py index 18191eb21d..fe4ec36b24 100644 --- a/source/tests/tf/test_gen_stat_data.py +++ b/source/tests/tf/test_gen_stat_data.py @@ -119,7 +119,12 @@ def test_ener_shift(self): ener_shift0 = data.compute_energy_shift(rcond=1) all_stat = make_stat_input(data, 4, merge_sys=False) descrpt = DescrptSeA(6.0, 5.8, [46, 92], neuron=[25, 50, 100], axis_neuron=16) - fitting = EnerFitting(descrpt, neuron=[240, 240, 240], resnet_dt=True) + fitting = EnerFitting( + descrpt.get_ntypes(), + descrpt.get_dim_out(), + neuron=[240, 240, 240], + resnet_dt=True, + ) ener_shift1 = fitting._compute_output_stats(all_stat, rcond=1) np.testing.assert_almost_equal(ener_shift0, ener_shift1) @@ -131,7 +136,11 @@ def test_ener_shift_assigned(self): all_stat = make_stat_input(data, 4, merge_sys=False) descrpt = DescrptSeA(6.0, 5.8, [46, 92], neuron=[25, 50, 100], axis_neuron=16) fitting = EnerFitting( - descrpt, neuron=[240, 240, 240], resnet_dt=True, atom_ener=[ae0, None, None] + descrpt.get_ntypes(), + descrpt.get_dim_out(), + neuron=[240, 240, 240], + resnet_dt=True, + atom_ener=[ae0, None, None], ) ener_shift1 = fitting._compute_output_stats(all_stat, rcond=1) # check assigned energy diff --git a/source/tests/tf/test_layer_name.py b/source/tests/tf/test_layer_name.py index 8c2264315f..d1c06f3a0e 100644 --- a/source/tests/tf/test_layer_name.py +++ b/source/tests/tf/test_layer_name.py @@ -66,7 +66,8 @@ def test_model(self): fitting_type_dict[fitting_key] = item_fitting_type item_fitting_param.pop("type", None) item_fitting_param.pop("fit_diag", None) - item_fitting_param["descrpt"] = descrpt + item_fitting_param["ntypes"] = descrpt.get_ntypes() + item_fitting_param["dim_descript"] = descrpt.get_dim_out() if item_fitting_type == "ener": fitting_dict[fitting_key] = EnerFitting( **item_fitting_param, uniform_seed=True diff --git a/source/tests/tf/test_model_loc_frame.py b/source/tests/tf/test_model_loc_frame.py index f97e349145..84467b436a 100644 --- a/source/tests/tf/test_model_loc_frame.py +++ b/source/tests/tf/test_model_loc_frame.py @@ -53,7 +53,11 @@ def test_model(self): jdata["model"]["descriptor"].pop("_comment", None) descrpt = DescrptLocFrame(**jdata["model"]["descriptor"]) fitting = EnerFitting( - descrpt, neuron=[240, 120, 60, 30, 10], seed=1, uniform_seed=True + descrpt.get_ntypes(), + descrpt.get_dim_out(), + neuron=[240, 120, 60, 30, 10], + seed=1, + uniform_seed=True, ) model = EnerModel( descrpt, diff --git a/source/tests/tf/test_model_multi.py b/source/tests/tf/test_model_multi.py index c526b479a6..bd7881bfc6 100644 --- a/source/tests/tf/test_model_multi.py +++ b/source/tests/tf/test_model_multi.py @@ -68,7 +68,8 @@ def test_model(self): fitting_type_dict[fitting_key] = item_fitting_type item_fitting_param.pop("type", None) item_fitting_param.pop("fit_diag", None) - item_fitting_param["descrpt"] = descrpt + item_fitting_param["ntypes"] = descrpt.get_ntypes() + item_fitting_param["dim_descript"] = descrpt.get_dim_out() if item_fitting_type == "ener": fitting_dict[fitting_key] = EnerFitting( **item_fitting_param, uniform_seed=True diff --git a/source/tests/tf/test_model_se_a.py b/source/tests/tf/test_model_se_a.py index 57a8f4af52..bf947fc940 100644 --- a/source/tests/tf/test_model_se_a.py +++ b/source/tests/tf/test_model_se_a.py @@ -74,7 +74,8 @@ def test_model_atom_ener(self): jdata["model"]["descriptor"].pop("type", None) descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) - jdata["model"]["fitting_net"]["descrpt"] = descrpt + jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() + jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) model = EnerModel(descrpt, fitting) @@ -154,7 +155,8 @@ def test_model(self): jdata["model"]["descriptor"].pop("type", None) descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) - jdata["model"]["fitting_net"]["descrpt"] = descrpt + jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() + jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) model = EnerModel(descrpt, fitting) @@ -298,7 +300,8 @@ def test_model_atom_ener_type_embedding(self): typeebd = TypeEmbedNet(**jdata["model"]["type_embeding"]) jdata["model"]["descriptor"].pop("type", None) descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) - jdata["model"]["fitting_net"]["descrpt"] = descrpt + jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() + jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) model = EnerModel(descrpt, fitting, typeebd=typeebd) diff --git a/source/tests/tf/test_model_se_a_aparam.py b/source/tests/tf/test_model_se_a_aparam.py index 6b37dfd459..0030104e20 100644 --- a/source/tests/tf/test_model_se_a_aparam.py +++ b/source/tests/tf/test_model_se_a_aparam.py @@ -53,7 +53,8 @@ def test_model(self): jdata["model"]["descriptor"].pop("type", None) descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) - jdata["model"]["fitting_net"]["descrpt"] = descrpt + jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() + jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) model = EnerModel(descrpt, fitting) diff --git a/source/tests/tf/test_model_se_a_ebd.py b/source/tests/tf/test_model_se_a_ebd.py index b819c2ddc9..3d63db9adf 100644 --- a/source/tests/tf/test_model_se_a_ebd.py +++ b/source/tests/tf/test_model_se_a_ebd.py @@ -54,7 +54,8 @@ def test_model(self): descrpt = DescrptSeAEbd( **jdata["model"]["descriptor"], ) - jdata["model"]["fitting_net"]["descrpt"] = descrpt + jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() + jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() fitting = EnerFitting( **jdata["model"]["fitting_net"], ) diff --git a/source/tests/tf/test_model_se_a_ebd_v2.py b/source/tests/tf/test_model_se_a_ebd_v2.py index 0cc89f5151..73db73b56a 100644 --- a/source/tests/tf/test_model_se_a_ebd_v2.py +++ b/source/tests/tf/test_model_se_a_ebd_v2.py @@ -70,7 +70,8 @@ def test_model(self): descrpt = DescrptSeAEbdV2( **jdata["model"]["descriptor"], ) - jdata["model"]["fitting_net"]["descrpt"] = descrpt + jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() + jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() fitting = EnerFitting( **jdata["model"]["fitting_net"], ) diff --git a/source/tests/tf/test_model_se_a_fparam.py b/source/tests/tf/test_model_se_a_fparam.py index 806ae13582..09fc93336e 100644 --- a/source/tests/tf/test_model_se_a_fparam.py +++ b/source/tests/tf/test_model_se_a_fparam.py @@ -52,7 +52,8 @@ def test_model(self): jdata["model"]["descriptor"].pop("type", None) descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) - jdata["model"]["fitting_net"]["descrpt"] = descrpt + jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() + jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) # descrpt = DescrptSeA(jdata['model']['descriptor']) # fitting = EnerFitting(jdata['model']['fitting_net'], descrpt) diff --git a/source/tests/tf/test_model_se_a_srtab.py b/source/tests/tf/test_model_se_a_srtab.py index 3a93349741..4fc71a3680 100644 --- a/source/tests/tf/test_model_se_a_srtab.py +++ b/source/tests/tf/test_model_se_a_srtab.py @@ -69,7 +69,8 @@ def test_model(self): jdata["model"]["descriptor"].pop("type", None) descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) - jdata["model"]["fitting_net"]["descrpt"] = descrpt + jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() + jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) # descrpt = DescrptSeA(jdata['model']['descriptor']) # fitting = EnerFitting(jdata['model']['fitting_net'], descrpt) diff --git a/source/tests/tf/test_model_se_a_type.py b/source/tests/tf/test_model_se_a_type.py index 4b19378cf6..63a14b749e 100644 --- a/source/tests/tf/test_model_se_a_type.py +++ b/source/tests/tf/test_model_se_a_type.py @@ -55,7 +55,8 @@ def test_model(self): jdata["model"]["descriptor"].pop("type", None) descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) - jdata["model"]["fitting_net"]["descrpt"] = descrpt + jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() + jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( diff --git a/source/tests/tf/test_model_se_atten.py b/source/tests/tf/test_model_se_atten.py index ad6926e0da..b3d1da8927 100644 --- a/source/tests/tf/test_model_se_atten.py +++ b/source/tests/tf/test_model_se_atten.py @@ -67,7 +67,8 @@ def test_model(self): jdata["model"]["descriptor"].pop("type", None) jdata["model"]["descriptor"]["ntypes"] = 2 descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) - jdata["model"]["fitting_net"]["descrpt"] = descrpt + jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() + jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( @@ -292,7 +293,8 @@ def test_compressible_model(self): jdata["model"]["descriptor"]["stripped_type_embedding"] = True jdata["model"]["descriptor"]["attn_layer"] = 0 descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) - jdata["model"]["fitting_net"]["descrpt"] = descrpt + jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() + jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( @@ -519,7 +521,8 @@ def test_stripped_type_embedding_model(self): jdata["model"]["descriptor"]["stripped_type_embedding"] = True jdata["model"]["descriptor"]["attn_layer"] = 2 descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) - jdata["model"]["fitting_net"]["descrpt"] = descrpt + jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() + jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( @@ -757,7 +760,8 @@ def test_smoothness_of_stripped_type_embedding_smooth_model(self): jdata["model"]["descriptor"]["rcut"] = 6.0 jdata["model"]["descriptor"]["rcut_smth"] = 4.0 descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) - jdata["model"]["fitting_net"]["descrpt"] = descrpt + jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() + jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( diff --git a/source/tests/tf/test_model_se_r.py b/source/tests/tf/test_model_se_r.py index a635e6c3c4..c38cda7bc9 100644 --- a/source/tests/tf/test_model_se_r.py +++ b/source/tests/tf/test_model_se_r.py @@ -52,7 +52,8 @@ def test_model(self): jdata["model"]["descriptor"].pop("type", None) descrpt = DescrptSeR(**jdata["model"]["descriptor"], uniform_seed=True) - jdata["model"]["fitting_net"]["descrpt"] = descrpt + jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() + jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) # fitting = EnerFitting(jdata['model']['fitting_net'], descrpt) model = EnerModel(descrpt, fitting) diff --git a/source/tests/tf/test_model_se_t.py b/source/tests/tf/test_model_se_t.py index 881a0e06c4..b8563b5411 100644 --- a/source/tests/tf/test_model_se_t.py +++ b/source/tests/tf/test_model_se_t.py @@ -52,7 +52,8 @@ def test_model(self): jdata["model"]["descriptor"].pop("type", None) descrpt = DescrptSeT(**jdata["model"]["descriptor"], uniform_seed=True) - jdata["model"]["fitting_net"]["descrpt"] = descrpt + jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() + jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) model = EnerModel(descrpt, fitting) diff --git a/source/tests/tf/test_model_spin.py b/source/tests/tf/test_model_spin.py index 26100c19d0..597af6d268 100644 --- a/source/tests/tf/test_model_spin.py +++ b/source/tests/tf/test_model_spin.py @@ -71,7 +71,8 @@ def test_model_spin(self): descrpt_param["spin"] = spin descrpt = DescrptSeA(**descrpt_param, uniform_seed=True) fitting_param.pop("type", None) - fitting_param["descrpt"] = descrpt + fitting_param["ntypes"] = descrpt.get_ntypes() + fitting_param["dim_descript"] = descrpt.get_dim_out() fitting_param["spin"] = spin fitting = EnerFitting(**fitting_param, uniform_seed=True) model = EnerModel(descrpt, fitting, spin=spin) From 1e78ed586e76acfe23d0ebd97827d65fc7495e7f Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sat, 17 Feb 2024 06:36:41 -0500 Subject: [PATCH 05/14] fix typo Signed-off-by: Jinzhe Zeng --- source/tests/tf/test_data_large_batch.py | 6 +++--- source/tests/tf/test_fitting_ener_type.py | 2 +- source/tests/tf/test_layer_name.py | 2 +- source/tests/tf/test_model_multi.py | 2 +- source/tests/tf/test_model_se_a.py | 6 +++--- source/tests/tf/test_model_se_a_aparam.py | 2 +- source/tests/tf/test_model_se_a_ebd.py | 2 +- source/tests/tf/test_model_se_a_ebd_v2.py | 2 +- source/tests/tf/test_model_se_a_fparam.py | 2 +- source/tests/tf/test_model_se_a_srtab.py | 2 +- source/tests/tf/test_model_se_a_type.py | 2 +- source/tests/tf/test_model_se_atten.py | 8 ++++---- source/tests/tf/test_model_se_r.py | 2 +- source/tests/tf/test_model_se_t.py | 2 +- source/tests/tf/test_model_spin.py | 2 +- 15 files changed, 22 insertions(+), 22 deletions(-) diff --git a/source/tests/tf/test_data_large_batch.py b/source/tests/tf/test_data_large_batch.py index bc021a4494..53991fa7f2 100644 --- a/source/tests/tf/test_data_large_batch.py +++ b/source/tests/tf/test_data_large_batch.py @@ -113,7 +113,7 @@ def test_data_mixed_type(self): jdata["model"]["descriptor"]["ntypes"] = 2 descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() - jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( @@ -310,7 +310,7 @@ def test_stripped_data_mixed_type(self): jdata["model"]["descriptor"]["stripped_type_embedding"] = True descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() - jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( @@ -507,7 +507,7 @@ def test_compressible_data_mixed_type(self): jdata["model"]["descriptor"]["attn_layer"] = 0 descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() - jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( diff --git a/source/tests/tf/test_fitting_ener_type.py b/source/tests/tf/test_fitting_ener_type.py index 61f2607032..f88692be74 100644 --- a/source/tests/tf/test_fitting_ener_type.py +++ b/source/tests/tf/test_fitting_ener_type.py @@ -55,7 +55,7 @@ def test_fitting(self): jdata["model"]["descriptor"].pop("type", None) descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() - jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) diff --git a/source/tests/tf/test_layer_name.py b/source/tests/tf/test_layer_name.py index d1c06f3a0e..089bd19dd1 100644 --- a/source/tests/tf/test_layer_name.py +++ b/source/tests/tf/test_layer_name.py @@ -67,7 +67,7 @@ def test_model(self): item_fitting_param.pop("type", None) item_fitting_param.pop("fit_diag", None) item_fitting_param["ntypes"] = descrpt.get_ntypes() - item_fitting_param["dim_descript"] = descrpt.get_dim_out() + item_fitting_param["dim_descrpt"] = descrpt.get_dim_out() if item_fitting_type == "ener": fitting_dict[fitting_key] = EnerFitting( **item_fitting_param, uniform_seed=True diff --git a/source/tests/tf/test_model_multi.py b/source/tests/tf/test_model_multi.py index bd7881bfc6..447ad24abe 100644 --- a/source/tests/tf/test_model_multi.py +++ b/source/tests/tf/test_model_multi.py @@ -69,7 +69,7 @@ def test_model(self): item_fitting_param.pop("type", None) item_fitting_param.pop("fit_diag", None) item_fitting_param["ntypes"] = descrpt.get_ntypes() - item_fitting_param["dim_descript"] = descrpt.get_dim_out() + item_fitting_param["dim_descrpt"] = descrpt.get_dim_out() if item_fitting_type == "ener": fitting_dict[fitting_key] = EnerFitting( **item_fitting_param, uniform_seed=True diff --git a/source/tests/tf/test_model_se_a.py b/source/tests/tf/test_model_se_a.py index bf947fc940..e60cb2307f 100644 --- a/source/tests/tf/test_model_se_a.py +++ b/source/tests/tf/test_model_se_a.py @@ -75,7 +75,7 @@ def test_model_atom_ener(self): jdata["model"]["descriptor"].pop("type", None) descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() - jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) model = EnerModel(descrpt, fitting) @@ -156,7 +156,7 @@ def test_model(self): jdata["model"]["descriptor"].pop("type", None) descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() - jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) model = EnerModel(descrpt, fitting) @@ -301,7 +301,7 @@ def test_model_atom_ener_type_embedding(self): jdata["model"]["descriptor"].pop("type", None) descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() - jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) model = EnerModel(descrpt, fitting, typeebd=typeebd) diff --git a/source/tests/tf/test_model_se_a_aparam.py b/source/tests/tf/test_model_se_a_aparam.py index 0030104e20..6bf059f8fa 100644 --- a/source/tests/tf/test_model_se_a_aparam.py +++ b/source/tests/tf/test_model_se_a_aparam.py @@ -54,7 +54,7 @@ def test_model(self): jdata["model"]["descriptor"].pop("type", None) descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() - jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) model = EnerModel(descrpt, fitting) diff --git a/source/tests/tf/test_model_se_a_ebd.py b/source/tests/tf/test_model_se_a_ebd.py index 3d63db9adf..599cce6386 100644 --- a/source/tests/tf/test_model_se_a_ebd.py +++ b/source/tests/tf/test_model_se_a_ebd.py @@ -55,7 +55,7 @@ def test_model(self): **jdata["model"]["descriptor"], ) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() - jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() fitting = EnerFitting( **jdata["model"]["fitting_net"], ) diff --git a/source/tests/tf/test_model_se_a_ebd_v2.py b/source/tests/tf/test_model_se_a_ebd_v2.py index 73db73b56a..22b3c3389d 100644 --- a/source/tests/tf/test_model_se_a_ebd_v2.py +++ b/source/tests/tf/test_model_se_a_ebd_v2.py @@ -71,7 +71,7 @@ def test_model(self): **jdata["model"]["descriptor"], ) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() - jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() fitting = EnerFitting( **jdata["model"]["fitting_net"], ) diff --git a/source/tests/tf/test_model_se_a_fparam.py b/source/tests/tf/test_model_se_a_fparam.py index 09fc93336e..4f94fc1655 100644 --- a/source/tests/tf/test_model_se_a_fparam.py +++ b/source/tests/tf/test_model_se_a_fparam.py @@ -53,7 +53,7 @@ def test_model(self): jdata["model"]["descriptor"].pop("type", None) descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() - jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) # descrpt = DescrptSeA(jdata['model']['descriptor']) # fitting = EnerFitting(jdata['model']['fitting_net'], descrpt) diff --git a/source/tests/tf/test_model_se_a_srtab.py b/source/tests/tf/test_model_se_a_srtab.py index 4fc71a3680..00f59668a0 100644 --- a/source/tests/tf/test_model_se_a_srtab.py +++ b/source/tests/tf/test_model_se_a_srtab.py @@ -70,7 +70,7 @@ def test_model(self): jdata["model"]["descriptor"].pop("type", None) descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() - jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) # descrpt = DescrptSeA(jdata['model']['descriptor']) # fitting = EnerFitting(jdata['model']['fitting_net'], descrpt) diff --git a/source/tests/tf/test_model_se_a_type.py b/source/tests/tf/test_model_se_a_type.py index 63a14b749e..9c0a07cc98 100644 --- a/source/tests/tf/test_model_se_a_type.py +++ b/source/tests/tf/test_model_se_a_type.py @@ -56,7 +56,7 @@ def test_model(self): jdata["model"]["descriptor"].pop("type", None) descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() - jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( diff --git a/source/tests/tf/test_model_se_atten.py b/source/tests/tf/test_model_se_atten.py index b3d1da8927..13e4c554ca 100644 --- a/source/tests/tf/test_model_se_atten.py +++ b/source/tests/tf/test_model_se_atten.py @@ -68,7 +68,7 @@ def test_model(self): jdata["model"]["descriptor"]["ntypes"] = 2 descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() - jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( @@ -294,7 +294,7 @@ def test_compressible_model(self): jdata["model"]["descriptor"]["attn_layer"] = 0 descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() - jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( @@ -522,7 +522,7 @@ def test_stripped_type_embedding_model(self): jdata["model"]["descriptor"]["attn_layer"] = 2 descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() - jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( @@ -761,7 +761,7 @@ def test_smoothness_of_stripped_type_embedding_smooth_model(self): jdata["model"]["descriptor"]["rcut_smth"] = 4.0 descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() - jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( diff --git a/source/tests/tf/test_model_se_r.py b/source/tests/tf/test_model_se_r.py index c38cda7bc9..1e63922e19 100644 --- a/source/tests/tf/test_model_se_r.py +++ b/source/tests/tf/test_model_se_r.py @@ -53,7 +53,7 @@ def test_model(self): jdata["model"]["descriptor"].pop("type", None) descrpt = DescrptSeR(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() - jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) # fitting = EnerFitting(jdata['model']['fitting_net'], descrpt) model = EnerModel(descrpt, fitting) diff --git a/source/tests/tf/test_model_se_t.py b/source/tests/tf/test_model_se_t.py index b8563b5411..d75fac2f07 100644 --- a/source/tests/tf/test_model_se_t.py +++ b/source/tests/tf/test_model_se_t.py @@ -53,7 +53,7 @@ def test_model(self): jdata["model"]["descriptor"].pop("type", None) descrpt = DescrptSeT(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() - jdata["model"]["fitting_net"]["dim_descript"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) model = EnerModel(descrpt, fitting) diff --git a/source/tests/tf/test_model_spin.py b/source/tests/tf/test_model_spin.py index 597af6d268..5d20c76c35 100644 --- a/source/tests/tf/test_model_spin.py +++ b/source/tests/tf/test_model_spin.py @@ -72,7 +72,7 @@ def test_model_spin(self): descrpt = DescrptSeA(**descrpt_param, uniform_seed=True) fitting_param.pop("type", None) fitting_param["ntypes"] = descrpt.get_ntypes() - fitting_param["dim_descript"] = descrpt.get_dim_out() + fitting_param["dim_descrpt"] = descrpt.get_dim_out() fitting_param["spin"] = spin fitting = EnerFitting(**fitting_param, uniform_seed=True) model = EnerModel(descrpt, fitting, spin=spin) From 40da153a5b7b019793f9a6490de50b51aa61210c Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sat, 17 Feb 2024 06:40:20 -0500 Subject: [PATCH 06/14] fix errors Signed-off-by: Jinzhe Zeng --- deepmd/tf/model/multi.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/deepmd/tf/model/multi.py b/deepmd/tf/model/multi.py index 52bbcebf4d..833a700ebc 100644 --- a/deepmd/tf/model/multi.py +++ b/deepmd/tf/model/multi.py @@ -134,7 +134,11 @@ def __init__( fitting_dict[item] = item_fitting_param else: fitting_dict[item] = Fitting( - **item_fitting_param, descrpt=self.descrpt, spin=self.spin + **item_fitting_param, + descrpt=self.descrpt, + spin=self.spin, + ntypes=self.descrpt.get_ntypes(), + dim_descrpt=self.descrpt.get_dim_out(), ) # type embedding From bca0f44ed3e4a4346af97e8af21e3457343af647 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sat, 17 Feb 2024 17:27:06 -0500 Subject: [PATCH 07/14] only pop in the subclass Signed-off-by: Jinzhe Zeng --- deepmd/dpmodel/fitting/invar_fitting.py | 9 +++++++-- deepmd/pt/model/task/ener.py | 8 ++++++++ deepmd/pt/model/task/fitting.py | 2 -- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/deepmd/dpmodel/fitting/invar_fitting.py b/deepmd/dpmodel/fitting/invar_fitting.py index 8915c67a6f..bc0f5d10d8 100644 --- a/deepmd/dpmodel/fitting/invar_fitting.py +++ b/deepmd/dpmodel/fitting/invar_fitting.py @@ -290,8 +290,6 @@ def deserialize(cls, data: dict) -> "InvarFitting": data = copy.deepcopy(data) variables = data.pop("@variables") nets = data.pop("nets") - data.pop("var_name") - data.pop("dim_out") obj = cls(**data) for kk in variables.keys(): obj[kk] = variables[kk] @@ -432,3 +430,10 @@ def __init__( spin=spin, distinguish_types=distinguish_types, ) + + @classmethod + def deserialize(cls, data: dict) -> "InvarFitting": + data = copy.deepcopy(data) + data.pop("var_name") + data.pop("dim_out") + return super().deserialize(data) diff --git a/deepmd/pt/model/task/ener.py b/deepmd/pt/model/task/ener.py index 5ed4fb5d3c..0c25defd33 100644 --- a/deepmd/pt/model/task/ener.py +++ b/deepmd/pt/model/task/ener.py @@ -1,4 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +import copy import logging from typing import ( List, @@ -235,6 +236,13 @@ def get_stat_name(cls, ntypes, type_name="ener", **kwargs): assert fitting_type in ["ener"] return f"stat_file_fitting_ener_ntypes{ntypes}.npz" + @classmethod + def deserialize(cls, data: dict) -> "GeneralFitting": + data = copy.deepcopy(data) + data.pop("var_name") + data.pop("dim_out") + return super().deserialize(data) + @Fitting.register("direct_force") @Fitting.register("direct_force_ener") diff --git a/deepmd/pt/model/task/fitting.py b/deepmd/pt/model/task/fitting.py index 0f2cf9e293..24a8d3456a 100644 --- a/deepmd/pt/model/task/fitting.py +++ b/deepmd/pt/model/task/fitting.py @@ -539,8 +539,6 @@ def deserialize(cls, data: dict) -> "GeneralFitting": data = copy.deepcopy(data) variables = data.pop("@variables") nets = data.pop("nets") - data.pop("var_name") - data.pop("dim_out") obj = cls(**data) for kk in variables.keys(): obj[kk] = to_torch_tensor(variables[kk]) From 3bcdc33e4074e923926f0e5544307ce37a52aba7 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sat, 17 Feb 2024 18:32:41 -0500 Subject: [PATCH 08/14] fix tests Signed-off-by: Jinzhe Zeng --- deepmd/dpmodel/fitting/invar_fitting.py | 1 + deepmd/tf/fit/ener.py | 1 + source/tests/consistent/fitting/test_ener.py | 4 ++-- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/deepmd/dpmodel/fitting/invar_fitting.py b/deepmd/dpmodel/fitting/invar_fitting.py index bc0f5d10d8..ac272039de 100644 --- a/deepmd/dpmodel/fitting/invar_fitting.py +++ b/deepmd/dpmodel/fitting/invar_fitting.py @@ -407,6 +407,7 @@ def __init__( use_aparam_as_mask: bool = False, spin: Any = None, distinguish_types: bool = False, + exclude_types: List[int] = [], # not used seed: Optional[int] = None, ): diff --git a/deepmd/tf/fit/ener.py b/deepmd/tf/fit/ener.py index 418f16f84c..8b4a573a58 100644 --- a/deepmd/tf/fit/ener.py +++ b/deepmd/tf/fit/ener.py @@ -985,6 +985,7 @@ def serialize(self, suffix: str) -> dict: "layer_name": self.layer_name, "use_aparam_as_mask": self.use_aparam_as_mask, "spin": self.spin, + "exclude_types": [], "nets": self.serialize_network( ntypes=self.ntypes, # TODO: consider type embeddings diff --git a/source/tests/consistent/fitting/test_ener.py b/source/tests/consistent/fitting/test_ener.py index 1b4fab0a54..d8bcbc5ad8 100644 --- a/source/tests/consistent/fitting/test_ener.py +++ b/source/tests/consistent/fitting/test_ener.py @@ -150,7 +150,7 @@ def eval_pt(self, pt_obj: Any) -> Any: return ( pt_obj( torch.from_numpy(self.inputs).to(device=PT_DEVICE), - torch.from_numpy(self.atype).to(device=PT_DEVICE), + torch.from_numpy(self.atype.reshape(1, -1)).to(device=PT_DEVICE), fparam=torch.from_numpy(self.fparam).to(device=PT_DEVICE) if numb_fparam else None, @@ -169,7 +169,7 @@ def eval_dp(self, dp_obj: Any) -> Any: ) = self.param return dp_obj( self.inputs, - self.atype, + self.atype.reshape(1, -1), fparam=self.fparam if numb_fparam else None, )["energy"] From 00fc31a9602b2b14488c22497b70011b7babcbf1 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sat, 17 Feb 2024 18:34:28 -0500 Subject: [PATCH 09/14] fix test Signed-off-by: Jinzhe Zeng --- source/tests/tf/test_model_multi.py | 1 + 1 file changed, 1 insertion(+) diff --git a/source/tests/tf/test_model_multi.py b/source/tests/tf/test_model_multi.py index 447ad24abe..b978dff1ab 100644 --- a/source/tests/tf/test_model_multi.py +++ b/source/tests/tf/test_model_multi.py @@ -68,6 +68,7 @@ def test_model(self): fitting_type_dict[fitting_key] = item_fitting_type item_fitting_param.pop("type", None) item_fitting_param.pop("fit_diag", None) + item_fitting_param["descrpt"] = descrpt item_fitting_param["ntypes"] = descrpt.get_ntypes() item_fitting_param["dim_descrpt"] = descrpt.get_dim_out() if item_fitting_type == "ener": From fb68528898cf178f8ad6a242b068bb54ff370e36 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sun, 18 Feb 2024 20:24:37 +0000 Subject: [PATCH 10/14] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- deepmd/pt/model/task/ener.py | 1 + 1 file changed, 1 insertion(+) diff --git a/deepmd/pt/model/task/ener.py b/deepmd/pt/model/task/ener.py index 26915ac660..3d0d3fc19d 100644 --- a/deepmd/pt/model/task/ener.py +++ b/deepmd/pt/model/task/ener.py @@ -240,6 +240,7 @@ def deserialize(cls, data: dict) -> "GeneralFitting": data.pop("dim_out") return super().deserialize(data) + @Fitting.register("direct_force") @Fitting.register("direct_force_ener") @fitting_check_output From b212131956135dabd1c775968635042083afb644 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sun, 18 Feb 2024 17:02:13 -0500 Subject: [PATCH 11/14] loose rtol for float32 Signed-off-by: Jinzhe Zeng --- source/tests/consistent/common.py | 14 +++++----- source/tests/consistent/fitting/test_ener.py | 27 ++++++++++++-------- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/source/tests/consistent/common.py b/source/tests/consistent/common.py index de6630ce35..abf2507ef5 100644 --- a/source/tests/consistent/common.py +++ b/source/tests/consistent/common.py @@ -75,6 +75,8 @@ class CommonTest(ABC): """Whether to skip the TensorFlow model.""" skip_pt: ClassVar[bool] = not INSTALLED_PT """Whether to skip the PyTorch model.""" + rtol = 1e-10 + """Relative tolerance for comparing the return value. Override for float32.""" def setUp(self): self.unique_id = uuid4().hex @@ -239,7 +241,7 @@ def test_tf_consistent_with_ref(self): ret2 = self.extract_ret(ret2, self.RefBackend.TF) np.testing.assert_equal(data1, data2) for rr1, rr2 in zip(ret1, ret2): - np.testing.assert_allclose(rr1, rr2) + np.testing.assert_allclose(rr1, rr2, rtol=self.rtol) def test_tf_self_consistent(self): """Test whether TF is self consistent.""" @@ -253,7 +255,7 @@ def test_tf_self_consistent(self): ret2, data2 = self.get_tf_ret_serialization_from_cls(obj2) np.testing.assert_equal(data1, data2) for rr1, rr2 in zip(ret1, ret2): - np.testing.assert_allclose(rr1, rr2) + np.testing.assert_allclose(rr1, rr2, rtol=self.rtol) def test_dp_consistent_with_ref(self): """Test whether DP and reference are consistent.""" @@ -270,7 +272,7 @@ def test_dp_consistent_with_ref(self): data2 = dp_obj.serialize() np.testing.assert_equal(data1, data2) for rr1, rr2 in zip(ret1, ret2): - np.testing.assert_allclose(rr1, rr2) + np.testing.assert_allclose(rr1, rr2, rtol=self.rtol) def test_dp_self_consistent(self): """Test whether DP is self consistent.""" @@ -283,7 +285,7 @@ def test_dp_self_consistent(self): np.testing.assert_equal(data1, data2) for rr1, rr2 in zip(ret1, ret2): if isinstance(rr1, np.ndarray) and isinstance(rr2, np.ndarray): - np.testing.assert_allclose(rr1, rr2) + np.testing.assert_allclose(rr1, rr2, rtol=self.rtol) else: self.assertEqual(rr1, rr2) @@ -302,7 +304,7 @@ def test_pt_consistent_with_ref(self): data2 = obj.serialize() np.testing.assert_equal(data1, data2) for rr1, rr2 in zip(ret1, ret2): - np.testing.assert_allclose(rr1, rr2) + np.testing.assert_allclose(rr1, rr2, rtol=self.rtol) def test_pt_self_consistent(self): """Test whether PT is self consistent.""" @@ -315,7 +317,7 @@ def test_pt_self_consistent(self): np.testing.assert_equal(data1, data2) for rr1, rr2 in zip(ret1, ret2): if isinstance(rr1, np.ndarray) and isinstance(rr2, np.ndarray): - np.testing.assert_allclose(rr1, rr2) + np.testing.assert_allclose(rr1, rr2, rtol=self.rtol) else: self.assertEqual(rr1, rr2) diff --git a/source/tests/consistent/fitting/test_ener.py b/source/tests/consistent/fitting/test_ener.py index 732087f3af..e122d31633 100644 --- a/source/tests/consistent/fitting/test_ener.py +++ b/source/tests/consistent/fitting/test_ener.py @@ -83,17 +83,6 @@ def skip_pt(self) -> bool: # TODO: float32 has bug return precision == "float32" or CommonTest.skip_pt - @property - def skip_dp(self) -> bool: - ( - resnet_dt, - precision, - distinguish_types, - numb_fparam, - ) = self.param - # TODO: float32 has bug - return precision == "float32" or CommonTest.skip_dp - tf_class = EnerFittingTF dp_class = EnerFittingDP pt_class = EnerFittingPT @@ -178,3 +167,19 @@ def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: # shape is not same ret = ret[0].reshape(-1, self.natoms[0], 1) return (ret,) + + @property + def rtol(self) -> float: + """Relative tolerance for comparing the return value.""" + ( + resnet_dt, + precision, + distinguish_types, + numb_fparam, + ) = self.param + if precision == "float64": + return 1e-10 + elif precision == "float32": + return 1e-6 + else: + raise ValueError(f"Unknown precision: {precision}") From 563d14677599bfff5c559862de2df565e043d27d Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sun, 18 Feb 2024 17:26:41 -0500 Subject: [PATCH 12/14] 1e-6 still too strict. use 1e-5 Signed-off-by: Jinzhe Zeng --- source/tests/consistent/fitting/test_ener.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/tests/consistent/fitting/test_ener.py b/source/tests/consistent/fitting/test_ener.py index e122d31633..1e303159c8 100644 --- a/source/tests/consistent/fitting/test_ener.py +++ b/source/tests/consistent/fitting/test_ener.py @@ -180,6 +180,6 @@ def rtol(self) -> float: if precision == "float64": return 1e-10 elif precision == "float32": - return 1e-6 + return 1e-5 else: raise ValueError(f"Unknown precision: {precision}") From 2158d2459ac4ee06a8e26f2293a00b0c83977767 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sun, 18 Feb 2024 17:28:11 -0500 Subject: [PATCH 13/14] fix dipole fitting Signed-off-by: Jinzhe Zeng --- deepmd/dpmodel/fitting/dipole_fitting.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deepmd/dpmodel/fitting/dipole_fitting.py b/deepmd/dpmodel/fitting/dipole_fitting.py index 64cad75b62..d40639b1cd 100644 --- a/deepmd/dpmodel/fitting/dipole_fitting.py +++ b/deepmd/dpmodel/fitting/dipole_fitting.py @@ -102,7 +102,7 @@ def __init__( raise NotImplementedError("use_aparam_as_mask is not implemented") if layer_name is not None: raise NotImplementedError("layer_name is not implemented") - if atom_ener is not None: + if atom_ener is not None and atom_ener != []: raise NotImplementedError("atom_ener is not implemented") self.dim_rot_mat = dim_rot_mat From 673d98dde0aac9d5dc9e5b1a54463248dd5f91fc Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sun, 18 Feb 2024 18:32:06 -0500 Subject: [PATCH 14/14] change 1e-5 to 1e-4 Signed-off-by: Jinzhe Zeng --- source/tests/consistent/fitting/test_ener.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/tests/consistent/fitting/test_ener.py b/source/tests/consistent/fitting/test_ener.py index 1e303159c8..222c4d84a5 100644 --- a/source/tests/consistent/fitting/test_ener.py +++ b/source/tests/consistent/fitting/test_ener.py @@ -180,6 +180,6 @@ def rtol(self) -> float: if precision == "float64": return 1e-10 elif precision == "float32": - return 1e-5 + return 1e-4 else: raise ValueError(f"Unknown precision: {precision}")