-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain.py
71 lines (61 loc) · 3.82 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import os
from rl_utils import *
import ppo
if __name__ == '__main__':
# 防止报错 OMP: Error #15: Initializing libiomp5md.dll, but found libiomp5md.dll already initialized.
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
# 获取当前路径
curr_path = os.path.dirname(os.path.abspath(__file__))
# 获取当前时间
curr_time = ppo.datetime.datetime.now().strftime("%Y_%m_%d-%H_%M_%S")
# 相关参数设置
parser = ppo.argparse.ArgumentParser(description="hyper parameters")
parser.add_argument('--algo_name', default='PPO', type=str, help="name of algorithm")
parser.add_argument('--env_name', default='CartPole-v0', type=str, help="name of environment")
parser.add_argument('--continuous', default=False, type=bool,
help="if PPO is continuous") # PPO既可适用于连续动作空间,也可以适用于离散动作空间
parser.add_argument('--train_eps', default=200, type=int, help="episodes of training")
parser.add_argument('--test_eps', default=20, type=int, help="episodes of testing")
parser.add_argument('--gamma', default=0.99, type=float, help="discounted factor")
parser.add_argument('--batch_size', default=5, type=int) # mini-batch SGD中的批量大小
parser.add_argument('--n_epochs', default=4, type=int)
parser.add_argument('--actor_lr', default=0.0003, type=float, help="learning rate of actor net")
parser.add_argument('--critic_lr', default=0.0003, type=float, help="learning rate of critic net")
parser.add_argument('--gae_lambda', default=0.95, type=float)
parser.add_argument('--policy_clip', default=0.2, type=float) # PPO-clip中的clip参数,一般是0.1~0.2左右
parser.add_argument('--update_fre', default=20, type=int)
parser.add_argument('--hidden_dim', default=256, type=int)
parser.add_argument('--device', default='cuda', type=str, help="cpu or cuda")
parser.add_argument('--seed', default=520, type=int, help="seed")
parser.add_argument('--show_fig', default=False, type=bool, help="if show figure or not")
parser.add_argument('--save_fig', default=True, type=bool, help="if save figure or not")
parser.add_argument('--train_render', default=False, type=bool,
help="Whether to render the environment during training")
parser.add_argument('--test_render', default=True, type=bool,
help="Whether to render the environment during testing")
args = parser.parse_args()
default_args = {'result_path': f"{curr_path}/outputs/{args.env_name}/{curr_time}/results/",
'model_path': f"{curr_path}/outputs/{args.env_name}/{curr_time}/models/",
}
# 将参数转化为字典 type(dict)
arg_dict = {**vars(args), **default_args}
print("算法参数字典:", arg_dict)
# 用之前设定的超参数创建环境和智能体
env, agent = ppo.create_env_agent(arg_dict)
# 传入算法参数、环境、智能体,然后开始训练
res_dic = ppo.train(arg_dict, env, agent)
print("算法返回结果字典:", res_dic)
# 保存相关信息
agent.save_model(path=arg_dict['model_path'])
save_args(arg_dict, path=arg_dict['result_path'])
save_results(res_dic, tag='train', path=arg_dict['result_path'])
plot_rewards(res_dic['rewards'], arg_dict, path=arg_dict['result_path'], tag="train")
# =================================================================================================
# 创建新环境和智能体用来测试
print("=" * 300)
env, agent = ppo.create_env_agent(arg_dict)
# 加载已保存的智能体
agent.load_model(path=arg_dict['model_path'])
res_dic = ppo.test(arg_dict, env, agent)
save_results(res_dic, tag='test', path=arg_dict['result_path'])
plot_rewards(res_dic['rewards'], arg_dict, path=arg_dict['result_path'], tag="test")