From 21a265c63f0899e1b7a188da80ffe31d862ca4bc Mon Sep 17 00:00:00 2001 From: Artur Niederfahrenhorst Date: Wed, 17 May 2023 00:47:02 +0200 Subject: [PATCH] [RLlib] Add missing `sampler_results` key to fetch min desired reward in RLlib release tests (#35354) Signed-off-by: Artur Niederfahrenhorst --- rllib/utils/test_utils.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/rllib/utils/test_utils.py b/rllib/utils/test_utils.py index 6d1db91141047..fd57836fb47b8 100644 --- a/rllib/utils/test_utils.py +++ b/rllib/utils/test_utils.py @@ -786,9 +786,9 @@ def should_check_eval(experiment): check_eval = should_check_eval(e) episode_reward_key = ( - "episode_reward_mean" + "sampler_results/episode_reward_mean" if not check_eval - else "evaluation/episode_reward_mean" + else "evaluation/sampler_results/episode_reward_mean" ) # For smoke-tests, we just run for n min. @@ -904,14 +904,18 @@ def should_check_eval(experiment): if check_eval: episode_reward_mean = np.mean( [ - t.metric_analysis["evaluation/episode_reward_mean"]["max"] + t.metric_analysis[ + "evaluation/sampler_results/episode_reward_mean" + ]["max"] for t in trials_for_experiment ] ) else: episode_reward_mean = np.mean( [ - t.metric_analysis["episode_reward_mean"]["max"] + t.metric_analysis["sampler_results/episode_reward_mean"][ + "max" + ] for t in trials_for_experiment ] )