Skip to content

Commit

Permalink
fix_ci_uniform
Browse files Browse the repository at this point in the history
  • Loading branch information
zhangyubo0722 authored and TingquanGao committed Sep 25, 2024
1 parent 7285248 commit febeb0b
Show file tree
Hide file tree
Showing 5 changed files with 37 additions and 23 deletions.
2 changes: 2 additions & 0 deletions paddlets/models/anomaly/dl/anomaly_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
from paddlets.metrics import (MetricContainer, Metric)
from paddlets.models.data_adapter import DataAdapter
from paddlets.models.utils import check_tsdataset, to_tsdataset, build_network_input_spec
from paddlets.utils.utils import convert_and_remove_types
from paddlets.datasets import TSDataset
from paddlets.logger import raise_if, raise_if_not, raise_log, Logger

Expand Down Expand Up @@ -880,6 +881,7 @@ def save(self,
model_meta.update(data_info)
if model_name is not None:
model_meta['Global'] = {'model_name': model_name}
model_meta = convert_and_remove_types(model_meta)
yaml.dump(model_meta, f)
except Exception as e:
raise_log(
Expand Down
2 changes: 2 additions & 0 deletions paddlets/models/classify/dl/paddle_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import paddle.nn.functional as F
from paddle.nn import CrossEntropyLoss
from sklearn.utils import check_random_state
from paddlets.utils.utils import convert_and_remove_types
import numpy as np
import paddle

Expand Down Expand Up @@ -720,6 +721,7 @@ def save(self,
model_meta.update(data_info)
if model_name is not None:
model_meta['Global'] = {'model_name': model_name}
model_meta = convert_and_remove_types(model_meta)
yaml.dump(model_meta, f)
except Exception as e:
raise_log(
Expand Down
2 changes: 2 additions & 0 deletions paddlets/models/forecasting/dl/paddle_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

from paddlets.models.base import BaseModel
from paddlets.models.utils import build_network_input_spec
from paddlets.utils.utils import convert_and_remove_types
from paddlets import TSDataset
from paddlets.logger import raise_if, raise_if_not, raise_log

Expand Down Expand Up @@ -200,6 +201,7 @@ def save(self,
model_meta.update(data_info)
if model_name is not None:
model_meta['Global'] = {'model_name': model_name}
model_meta = convert_and_remove_types(model_meta)
yaml.dump(model_meta, f)
except Exception as e:
raise_log(
Expand Down
30 changes: 20 additions & 10 deletions paddlets/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -489,15 +489,25 @@ def build_ts_infer_input(tsdataset: TSDataset,
return res


def convert_floats(obj):
if isinstance(obj, dict):
return {k: convert_floats(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [convert_floats(elem) for elem in obj]
elif isinstance(obj, np.float32):
return float(obj)
else:
return obj
def convert_and_remove_types(data):
if isinstance(data, dict):
return {
k: convert_and_remove_types(v)
for k, v in data.items() if not isinstance(v, type)
}
elif isinstance(data, list):
return [convert_and_remove_types(v) for v in data]
elif isinstance(data, np.ndarray):
return data.tolist()
elif isinstance(data, (np.float32, np.float64)):
return float(data)
elif isinstance(data, (np.int32, np.int64)):
return int(data)
elif isinstance(data, np.bool_):
return bool(data)
elif isinstance(data, (np.str_, np.unicode_)):
return str(data)
return data


def update_train_results(save_path, score, model_name="", done_flag=True):
Expand Down Expand Up @@ -526,6 +536,6 @@ def update_train_results(save_path, score, model_name="", done_flag=True):
"inference", f"inference.{tag}"
if tag != "inference_config" else "inference.yml")

train_results = convert_floats(train_results)
train_results = convert_and_remove_types(train_results)
with open(train_results_path, "w") as fp:
json.dump(train_results, fp)
24 changes: 11 additions & 13 deletions tools/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import argparse
import warnings
import joblib
import copy

import paddle
from paddlets.utils.config import Config
Expand All @@ -14,6 +15,7 @@
from paddlets.utils.manager import MODELS
from paddlets.metrics import MSE, MAE
from paddlets.utils import backtest
from paddlets.utils.utils import convert_and_remove_types
from paddlets.logger import Logger
from paddlets.utils.utils import set_print_mem_info, update_train_results
from export import export
Expand Down Expand Up @@ -43,8 +45,7 @@ def parse_args():
type=str,
default=None)
# Runntime params
parser.add_argument(
'--seq_len', help='input length in training.', type=int)
parser.add_argument('--seq_len', help='input length in training.', type=int)
parser.add_argument(
'--predict_len', help='output length in training.', type=int)
parser.add_argument('--epoch', help='Iterations in training.', type=int)
Expand All @@ -54,10 +55,7 @@ def parse_args():

# Other params
parser.add_argument(
'--seed',
help='Set the random seed in training.',
default=42,
type=int)
'--seed', help='Set the random seed in training.', default=42, type=int)
parser.add_argument(
'--opts', help='Update the key-value pairs of all options.', nargs='+')

Expand Down Expand Up @@ -233,8 +231,7 @@ def main(args):
ts_train, ts_val, ts_test = get_dataset(dataset['name'], split,
seq_len, info_params)
else:
ts_train = get_dataset(dataset['name'], split, seq_len,
info_params)
ts_train = get_dataset(dataset['name'], split, seq_len, info_params)

if cfg.model['name'] in ['TimesNetModel', 'Nonstationary_Transformer'
] and args.device == 'xpu':
Expand All @@ -243,8 +240,7 @@ def main(args):
if cfg.model['name'] == 'PP-TS':
from paddlets.ensemble import WeightingEnsembleForecaster
estimators = []
for model_name, model_cfg in cfg.model['model_cfg']['Ensemble'].items(
):
for model_name, model_cfg in cfg.model['model_cfg']['Ensemble'].items():
model_cfg = Config(
model_cfg,
seq_len=seq_len,
Expand Down Expand Up @@ -315,8 +311,8 @@ def main(args):
if dataset['name'] != 'TSDataset':
ts_all = get_dataset(dataset['name'])
ts_all = time_feature_generator.fit_transform(ts_all)
ts_train._known_cov = ts_all._known_cov[split['train'][0]:
split['train'][1]]
ts_train._known_cov = ts_all._known_cov[split['train'][0]:split[
'train'][1]]
if ts_val is not None:
ts_val._known_cov = ts_all._known_cov[split['val'][
0] - seq_len:split['val'][1]]
Expand Down Expand Up @@ -394,7 +390,9 @@ def main(args):
}, open(cfg_output_dir + '/score.json', 'w'))
if cfg.dic.get("uniform_output_enabled", False):
with open(os.path.join(args.save_dir, "config.yaml"), "w") as f:
yaml.dump(cfg.dic, f)
dict_to_dump = copy.deepcopy(cfg.dic)
dict_to_dump = convert_and_remove_types(dict_to_dump)
yaml.dump(dict_to_dump, f)
export(args, model)
update_train_results(
args.save_dir,
Expand Down

0 comments on commit febeb0b

Please sign in to comment.