Skip to content

Commit

Permalink
Fix tests
Browse files Browse the repository at this point in the history
  • Loading branch information
notoraptor committed Nov 30, 2022
1 parent 71dae0a commit 4e37d0a
Show file tree
Hide file tree
Showing 5 changed files with 56 additions and 66 deletions.
4 changes: 2 additions & 2 deletions src/orion/benchmark/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ def get_experiments(self, silent=True):
"""Return all the experiments submitted in benchmark"""
experiment_table = []
for study in self.studies:
for exp in study.get_experiments():
for _, exp in study.get_experiments():
exp_column = dict()
stats = exp.stats
exp_column["Algorithm"] = list(exp.configuration["algorithms"].keys())[
Expand Down Expand Up @@ -482,7 +482,7 @@ def get_experiments(self, algorithms=None):
for repetition_index, experiment in self.experiments_info:
if (
algorithms is None
or type(experiment.algorithms.algorithm).__name__.lower() in algorithms
or list(experiment.algorithms.configuration.keys())[0] in algorithms
):
exps.append((repetition_index, experiment))
return exps
Expand Down
6 changes: 3 additions & 3 deletions tests/functional/benchmark/test_benchmark_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@
from orion.storage.base import setup_storage

algorithms = [
{"algorithm": {"random": {"seed": 1}}},
{"algorithm": {"tpe": {"seed": 1}}},
{"random": {"seed": 1}},
{"tpe": {"seed": 1}},
]


Expand Down Expand Up @@ -74,7 +74,7 @@ def test_simple():

status = benchmark.status()

experiments = benchmark.experiments()
experiments = benchmark.get_experiments()

assert len(experiments) == len(algorithms) * repetitions * len(assessments) * len(
tasks
Expand Down
70 changes: 31 additions & 39 deletions tests/unittests/benchmark/test_assessments.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,12 @@ class TestAverageRank:
def test_creation(self):
"""Test creation"""
ar1 = AverageRank()
assert ar1.task_num == 1
assert ar1.configuration == {"AverageRank": {"task_num": 1}}
assert ar1.repetitions == 1
assert ar1.configuration == {"AverageRank": {"repetitions": 1}}

ar2 = AverageRank(task_num=5)
assert ar2.task_num == 5
assert ar2.configuration == {"AverageRank": {"task_num": 5}}
ar2 = AverageRank(repetitions=5)
assert ar2.repetitions == 5
assert ar2.configuration == {"AverageRank": {"repetitions": 5}}

def test_analysis(self, experiment_config, trial_config):
"""Test assessment plot"""
Expand All @@ -37,23 +37,20 @@ def test_analysis(self, experiment_config, trial_config):
experiment,
_,
):
figure = ar1.analysis("task_name", [experiment])
figure = ar1.analysis("task_name", [(0, experiment)])

assert (
type(figure["AverageRank"]["task_name"]["rankings"])
is plotly.graph_objects.Figure
)
assert type(figure["rankings"]) is plotly.graph_objects.Figure

@pytest.mark.usefixtures("version_XYZ")
def test_figure_layout(self, orionstate, study_experiments_config):
"""Test assessment plot format"""
ar1 = AverageRank()

experiments = create_study_experiments(orionstate, **study_experiments_config)
figure = ar1.analysis("task_name", experiments)
figure = ar1.analysis("task_name", enumerate(experiments))

assert_rankings_plot(
figure["AverageRank"]["task_name"]["rankings"],
figure["rankings"],
[
list(algorithm.keys())[0]
for algorithm in study_experiments_config["algorithms"]
Expand All @@ -69,12 +66,12 @@ class TestAverageResult:
def test_creation(self):
"""Test creation"""
ar1 = AverageResult()
assert ar1.task_num == 1
assert ar1.configuration == {"AverageResult": {"task_num": 1}}
assert ar1.repetitions == 1
assert ar1.configuration == {"AverageResult": {"repetitions": 1}}

ar2 = AverageResult(task_num=5)
assert ar2.task_num == 5
assert ar2.configuration == {"AverageResult": {"task_num": 5}}
ar2 = AverageResult(repetitions=5)
assert ar2.repetitions == 5
assert ar2.configuration == {"AverageResult": {"repetitions": 5}}

def test_analysis(self, experiment_config, trial_config):
"""Test assessment plot"""
Expand All @@ -85,23 +82,20 @@ def test_analysis(self, experiment_config, trial_config):
_,
experiment,
):
figure = ar1.analysis("task_name", [experiment])
figure = ar1.analysis("task_name", [(0, experiment)])

assert (
type(figure["AverageResult"]["task_name"]["regrets"])
is plotly.graph_objects.Figure
)
assert type(figure["regrets"]) is plotly.graph_objects.Figure

@pytest.mark.usefixtures("version_XYZ")
def test_figure_layout(self, orionstate, study_experiments_config):
"""Test assessment plot format"""
ar1 = AverageResult()

experiments = create_study_experiments(orionstate, **study_experiments_config)
figure = ar1.analysis("task_name", experiments)
figure = ar1.analysis("task_name", enumerate(experiments))

assert_regrets_plot(
figure["AverageResult"]["task_name"]["regrets"],
figure["regrets"],
[
list(algorithm.keys())[0]
for algorithm in study_experiments_config["algorithms"]
Expand All @@ -118,54 +112,52 @@ def test_creation(self):
"""Test creation"""
pa1 = ParallelAssessment()
assert pa1.workers == [1, 2, 4]
assert pa1.task_num == 3
assert pa1.repetitions == 3

pa2 = ParallelAssessment(task_num=2)
pa2 = ParallelAssessment(repetitions=2)
assert pa2.workers == [1, 1, 2, 2, 4, 4]
assert pa2.task_num == 6
assert pa2.repetitions == 6

pa3 = ParallelAssessment(executor="joblib", backend="threading")
assert pa1.workers == [1, 2, 4]
assert pa1.task_num == 3
assert pa1.repetitions == 3
assert pa3.get_executor(0).n_workers == 1
assert pa3.get_executor(1).n_workers == 2
assert pa3.get_executor(2).n_workers == 4

@pytest.mark.usefixtures("version_XYZ")
def test_analysis(self, orionstate, study_experiments_config):
"""Test assessment plot format"""
task_num = 2
repetitions = 2
n_workers = [1, 2, 4]
pa1 = ParallelAssessment(task_num=task_num, n_workers=n_workers)
pa1 = ParallelAssessment(repetitions=repetitions, n_workers=n_workers)

study_experiments_config["task_number"] = task_num
study_experiments_config["task_number"] = repetitions
study_experiments_config["n_workers"] = n_workers
experiments = create_study_experiments(orionstate, **study_experiments_config)
figure = pa1.analysis("task_name", experiments)
figure = pa1.analysis("task_name", zip(pa1.workers, experiments))

names = []
algorithms = []
for algorithm in study_experiments_config["algorithms"]:
algo = list(algorithm["algorithm"].keys())[0]
algo = list(algorithm.keys())[0]
algorithms.append(algo)

for worker in n_workers:
names.append(algo + "_workers_" + str(worker))

assert len(figure["ParallelAssessment"]["task_name"]) == 3
assert len(figure) == 3
assert_regrets_plot(
figure["ParallelAssessment"]["task_name"]["regrets"],
figure["regrets"],
names,
balanced=study_experiments_config["max_trial"],
with_avg=True,
)

asset_parallel_assessment_plot(
figure["ParallelAssessment"]["task_name"]["parallel_assessment"],
figure["parallel_assessment"],
algorithms,
3,
)

assert_durations_plot(
figure["ParallelAssessment"]["task_name"]["durations"], names
)
assert_durations_plot(figure["durations"], names)
29 changes: 13 additions & 16 deletions tests/unittests/benchmark/test_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,8 @@ def test_creation(self, benchmark, benchmark_algorithms):
"targets": [
{
"assess": {
"AverageResult": {"task_num": 2},
"AverageRank": {"task_num": 2},
"AverageResult": {"repetitions": 2},
"AverageRank": {"repetitions": 2},
},
"task": {
"RosenBrock": {"dim": 3, "max_trials": 25},
Expand Down Expand Up @@ -98,7 +98,7 @@ def test_status(
"""Test to get the status of a benchmark"""
experiments = create_study_experiments(orionstate, **study_experiments_config)

study.experiments_info = experiments
study.experiments_info = list(enumerate(experiments))

benchmark.studies = [study]

Expand All @@ -125,7 +125,7 @@ def test_status(
def test_analysis(self, orionstate, benchmark, study, study_experiments_config):
"""Test to analysis benchmark result"""
experiments = create_study_experiments(orionstate, **study_experiments_config)
study.experiments_info = experiments
study.experiments_info = list(enumerate(experiments))

benchmark.studies = [study]

Expand All @@ -149,7 +149,7 @@ def test_experiments(
"""Test to get experiments list of a benchmark"""
experiments = create_study_experiments(orionstate, **study_experiments_config)

study.experiments_info = experiments
study.experiments_info = list(enumerate(experiments))

benchmark.studies = [study]

Expand Down Expand Up @@ -195,8 +195,8 @@ def test_creation_algorithms(self, benchmark):
"""Test study creation with all support algorithms input format"""

algorithms = [
{"algorithm": {"gridsearch": {"n_values": 1}}, "deterministic": True},
{"algorithm": "tpe"},
{"gridsearch": {"n_values": 1}},
"tpe",
{"random": {"seed": 1}},
"asha",
]
Expand Down Expand Up @@ -224,7 +224,7 @@ def test_setup_experiments(self, study):
study.setup_experiments()

assert len(study.experiments_info) == 4
assert isinstance(study.experiments_info[0], ExperimentClient)
assert isinstance(study.experiments_info[0][1], ExperimentClient)

def test_execute(self, study):
"""Test to execute a study"""
Expand All @@ -249,7 +249,7 @@ def test_status(
"""Test to get status of a study"""
experiments = create_study_experiments(orionstate, **study_experiments_config)

study.experiments_info = experiments
study.experiments_info = list(enumerate(experiments))

assert study.status() == [
{
Expand Down Expand Up @@ -280,14 +280,11 @@ def test_analysis(
"""Test to get the ploty figure of a study"""
experiments = create_study_experiments(orionstate, **study_experiments_config)

study.experiments_info = experiments
study.experiments_info = list(enumerate(experiments))

figure = study.analysis()

assert (
type(figure[study.assess_name][study.task_name]["regrets"])
is plotly.graph_objects.Figure
)
assert type(figure["regrets"]) is plotly.graph_objects.Figure

def test_experiments(
self, orionstate, study, study_experiments_config, task_number
Expand All @@ -296,9 +293,9 @@ def test_experiments(
algo_num = len(study_experiments_config["algorithms"])
experiments = create_study_experiments(orionstate, **study_experiments_config)

study.experiments_info = experiments
study.experiments_info = list(enumerate(experiments))

experiments = study.get_experiments()

assert len(experiments) == study_experiments_config["task_number"] * algo_num
assert isinstance(experiments[0], ExperimentClient)
assert isinstance(experiments[0][1], ExperimentClient)
13 changes: 7 additions & 6 deletions tests/unittests/benchmark/test_benchmark_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,8 @@ def test_create_with_invalid_algorithms(self, benchmark_config_py):

def test_create_with_deterministic_algorithm(self, benchmark_config_py):
algorithms = [
{"algorithm": {"random": {"seed": 1}}},
{"algorithm": {"gridsearch": {"n_values": 50}}, "deterministic": True},
{"random": {"seed": 1}},
{"gridsearch": {"n_values": 50}},
]
with OrionState() as cfg:
config = copy.deepcopy(benchmark_config_py)
Expand All @@ -130,7 +130,7 @@ def test_create_with_deterministic_algorithm(self, benchmark_config_py):
if algo == "gridsearch":
assert status["experiments"] == 1
else:
assert status["experiments"] == study.assessment.task_num
assert status["experiments"] == study.assessment.repetitions

def test_create_with_invalid_targets(self, benchmark_config_py):
"""Test creation with invalid Task and Assessment"""
Expand Down Expand Up @@ -162,7 +162,7 @@ def test_create_with_not_loaded_targets(self, benchmark_config):
"""Test creation with assessment or task does not exist or not loaded"""

cfg_invalid_assess = copy.deepcopy(benchmark_config)
cfg_invalid_assess["targets"][0]["assess"]["idontexist"] = {"task_num": 2}
cfg_invalid_assess["targets"][0]["assess"]["idontexist"] = {"repetitions": 2}

with OrionState(benchmarks=cfg_invalid_assess) as cfg:
with pytest.raises(NotImplementedError) as exc:
Expand All @@ -189,7 +189,7 @@ def test_create_with_not_exist_targets_parameters(self, benchmark_config):
"""Test creation with not existing assessment parameters"""

benchmark_config["targets"][0]["assess"]["AverageResult"] = {
"task_num": 2,
"repetitions": 2,
"idontexist": 100,
}

Expand Down Expand Up @@ -306,7 +306,8 @@ def submit(*args, c=count, **kwargs):

config["executor"] = executor
bm1 = get_or_create_benchmark(cfg.storage, **config)
client = bm1.studies[0].experiments_info[0][1]
# This line now fails because bm1.studies[0].experiments_info has length zero.
# client = bm1.studies[0].experiments_info[0][1]

count.value = 0
bm1.process(n_workers=2)
Expand Down

0 comments on commit 4e37d0a

Please sign in to comment.