Skip to content

Commit

Permalink
Merge pull request #1123 from haddocking/fix_tests
Browse files Browse the repository at this point in the history
Fix tests
  • Loading branch information
mgiulini authored Oct 31, 2024
2 parents a4c9c32 + 75bec88 commit ad64a38
Show file tree
Hide file tree
Showing 28 changed files with 168 additions and 176 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,13 +31,13 @@ jobs:

- name: run unit tests
run: >-
pytest tests/
pytest --random-order tests/
--cov --cov-report=term-missing --cov-append
--hypothesis-show-statistics
- name: run integration tests
run: >-
pytest integration_tests/
pytest --random-order integration_tests/
--cov --cov-report=term-missing --cov-append
--hypothesis-show-statistics
Expand Down
22 changes: 12 additions & 10 deletions integration_tests/test_knownCNSerrors.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,22 +30,22 @@ def gen_random_text():
@pytest.fixture
def gen_fake_cns_errors(gen_random_text):
"""Generate directory full of CNS.cnserr file with errors."""
with tempfile.TemporaryDirectory("moduleoutputs") as tmp:
with tempfile.TemporaryDirectory() as tmpdir:
for i, error in enumerate(KNOWN_ERRORS.keys()):
# Generate an error string in the middle of the file
error_text = gen_random_text + error + gen_random_text
# Create two files with same error
for j in range(1, 3):
errored_filepath = Path(tmp, f"with_error_cns_{i}_{j}.cnserr")
errored_filepath = Path(tmpdir, f"with_error_cns_{i}_{j}.cnserr")
# Write error in a file
errored_filepath.write_text(error_text)
# Create two compressed files with same error
for j in range(1, 3):
errored_gz_file = Path(tmp, f"with_error_cns_{i}_{j}.cnserr.gz")
errored_gz_file = Path(tmpdir, f"with_error_cns_{i}_{j}.cnserr.gz")
# Write error in a file
with gzip.open(errored_gz_file, mode="wb") as gout:
gout.write(bytes(error_text, encoding="utf-8"))
yield tmp
yield tmpdir


@pytest.fixture
Expand All @@ -65,17 +65,17 @@ def rigidbody_module_with_cns_errors(gen_fake_cns_errors):


@pytest.fixture
def rigidbody_module_without_cns_errors():
def rigidbody_module_without_cns_errors(monkeypatch):
"""Generate a failed rigidbody module without CNS errors."""
with tempfile.TemporaryDirectory("moduleoutputs") as tmp:
with tempfile.TemporaryDirectory() as tmpdir:
rigidbody = RigidbodyModule(
order=1,
path=Path(tmp),
path=Path(tmpdir),
initial_params=DEFAULT_RIGIDBODY_CONFIG,
)
# Generate 9 filepath that were not created
rigidbody.output_models = [
PDBFile(Path(tmp, f"not_generated_output_{i}.pdb"))
PDBFile(Path(tmpdir, f"not_generated_output_{i}.pdb"))
for i in range(1, 10)
]
yield rigidbody
Expand All @@ -89,8 +89,9 @@ def __init__(self, path):
self.output = []


def test_detection_when_faulty(rigidbody_module_with_cns_errors):
def test_detection_when_faulty(rigidbody_module_with_cns_errors, monkeypatch):
"""Test failure of run and detection of CNS errors."""
monkeypatch.chdir(rigidbody_module_with_cns_errors.path)
rigidbody_module_with_cns_errors.previous_io = MockPreviousIO(
rigidbody_module_with_cns_errors.path
)
Expand All @@ -107,8 +108,9 @@ def test_detection_when_faulty(rigidbody_module_with_cns_errors):
assert user_hint in string_error


def test_undetected_when_faulty(rigidbody_module_without_cns_errors):
def test_undetected_when_faulty(rigidbody_module_without_cns_errors, monkeypatch):
"""Test failure of run and undetection of CNS errors."""
monkeypatch.chdir(rigidbody_module_without_cns_errors.path)
rigidbody_module_without_cns_errors.previous_io = MockPreviousIO(
rigidbody_module_without_cns_errors.path
)
Expand Down
4 changes: 1 addition & 3 deletions integration_tests/test_sasascore.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
@pytest.fixture
def sasascore_module():
"""Return a default sasascore module."""
with tempfile.TemporaryDirectory(dir=".") as tmpdir:
with tempfile.TemporaryDirectory() as tmpdir:
sasascore = SasascoreModule(
order=0, path=tmpdir, initial_params=DEFAULT_SASASCORE_CONFIG
)
Expand All @@ -37,7 +37,6 @@ def retrieve_models(self, individualize: bool = False):
PDBFile(file_name="protprot_complex_1.pdb", path="."),
PDBFile(file_name="protprot_complex_2.pdb", path="."),
]

return model_list

def output(self):
Expand All @@ -64,7 +63,6 @@ def test_sasascore_default(sasascore_module, mocker):
# check violations.tsv
exp_shape = (2, 3)
df = pd.read_csv(expected_violations_csv, sep="\t", comment="#")
print(df)
assert df.shape == exp_shape, f"{expected_violations_csv} has wrong shape ({df.shape} instead of {exp_shape})"
assert df.loc[df["structure"] == "protprot_complex_1.pdb"].iloc[0,:]["bur_A"] == "-"
assert df.loc[df["structure"] == "protprot_complex_2.pdb"].iloc[0,:]["bur_A"] == "39"
Expand Down
2 changes: 2 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,8 @@ dev = [
"httpx==0.27.2",
"mpi4py==4.0.1",
"kaleido==0.2.1",
"pytest-random-order==1.1.1",

]
docs = [
"sphinx>=7",
Expand Down
11 changes: 8 additions & 3 deletions src/haddock/modules/scoring/sasascore/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,14 +84,15 @@ def _run(self) -> None:
self.output_models: list[PDBFile] = []
# initialize jobs
sasascore_jobs: list[AccScore] = []
for model_to_be_evaluated in models_to_score:
for i, model_to_be_evaluated in enumerate(models_to_score):
accscore_obj = AccScore(
model=model_to_be_evaluated,
path=Path("."),
buried_resdic=buried_resdic,
acc_resdic=acc_resdic,
cutoff=self.params["cutoff"],
probe_radius=self.params["probe_radius"],
identificator=i,
)
sasascore_jobs.append(accscore_obj)
# append model to output models
Expand All @@ -106,8 +107,12 @@ def _run(self) -> None:

# extract results and overwrite scores
sasascore_jobs = engine.results
for i, pdb in enumerate(self.output_models):
pdb.score = sasascore_jobs[i].data[3]
sasascore_jobs = sorted(sasascore_jobs,
key=lambda accscore: accscore.identificator)

for i, job in enumerate(sasascore_jobs):
pdb = self.output_models[i]
pdb.score = job.data[3]
output_name = Path("sasascore.tsv")
self.output(output_name)

Expand Down
2 changes: 2 additions & 0 deletions src/haddock/modules/scoring/sasascore/sasascore.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ def __init__(
acc_resdic,
cutoff,
probe_radius,
identificator,
):
"""Initialise AccScore class."""
self.model = model
Expand All @@ -76,6 +77,7 @@ def __init__(
self.violations = []
self.probe_radius = probe_radius
self.violations_data = [self.model.file_name]
self.identificator = identificator

def run(self) -> None:
"""Run accessibility calculations."""
Expand Down
78 changes: 38 additions & 40 deletions tests/test_cli_analyse.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,60 +59,58 @@ def test_get_cluster_ranking(example_capri_clt):
assert exp_cl_ranking == obs_cl_ranking


def test_main(example_capri_ss, example_capri_clt):
def test_main(example_capri_ss, example_capri_clt, monkeypatch):
"""Test cli_analyse main."""
# build fake run_dir
run_dir = "example_dir"
if os.path.isdir(run_dir):
shutil.rmtree(run_dir)
step_name = "2_caprieval"
step_dir = Path(run_dir, step_name)
os.mkdir(run_dir)
os.mkdir(step_dir)
shutil.copy(example_capri_ss, Path(step_dir, "capri_ss.tsv"))
shutil.copy(example_capri_clt, Path(step_dir, "capri_clt.tsv"))

# run haddock3-analyse
main(
run_dir,
[2],
5,
format=None,
scale=None,
is_cleaned=False,
inter=False,
)

# check analysis directory exists
ana_dir = Path(run_dir, "analysis/")
assert os.path.isdir(ana_dir) is True

# check whether there are some html files
ana_subdir = Path(ana_dir, f"{step_name}_analysis")
html_files = [el for el in os.listdir(ana_subdir) if el.endswith(".html")]
assert len(html_files) > 0

shutil.rmtree(run_dir)


def test_zip_top_ranked(example_capri_ss):
with tempfile.TemporaryDirectory() as tmpdir:
monkeypatch.chdir(tmpdir)
# build fake run_dir
run_dir = "example_dir"
if os.path.isdir(run_dir):
shutil.rmtree(run_dir)
step_name = "2_caprieval"
step_dir = Path(run_dir, step_name)
os.mkdir(run_dir)
os.mkdir(step_dir)
shutil.copy(example_capri_ss, Path(step_dir, "capri_ss.tsv"))
shutil.copy(example_capri_clt, Path(step_dir, "capri_clt.tsv"))

# run haddock3-analyse
main(
run_dir,
[2],
5,
format=None,
scale=None,
is_cleaned=False,
inter=False,
)

# check analysis directory exists
ana_dir = Path(run_dir, "analysis/")
assert os.path.isdir(ana_dir) is True

# check whether there are some html files
ana_subdir = Path(ana_dir, f"{step_name}_analysis")
html_files = [el for el in os.listdir(ana_subdir) if el.endswith(".html")]
assert len(html_files) > 0


def test_zip_top_ranked(example_capri_ss, monkeypatch):
"""Test cli_analyse zip_top_ranked function."""
cwd = os.getcwd()
with tempfile.TemporaryDirectory() as tmpdir:
os.chdir(tmpdir)
monkeypatch.chdir(tmpdir)
# build fake run_dir
rigid_dir = "1_rigidbody"
rigid_dir_analysis = "1_rigidbody_analysis"
os.mkdir(rigid_dir)
os.mkdir(rigid_dir_analysis)
# fill rigidbody directory with one file
shutil.copy(Path(golden_data, "protprot_complex_1.pdb"), Path(rigid_dir, "rigidbody_383.pdb"))
os.chdir(rigid_dir_analysis)
monkeypatch.chdir(rigid_dir_analysis)

exp_cl_ranking = {1: 2}
zip_top_ranked(example_capri_ss, exp_cl_ranking, "summary.tgz")
assert os.path.isfile("summary.tgz") is True
os.chdir(cwd)


def test_main_offline(example_capri_ss, example_capri_clt, tmp_path):
Expand Down
6 changes: 3 additions & 3 deletions tests/test_cli_re.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def test_cli_re_empty():

def test_cli_rescore(weights_dict):
"""Test haddock3-re rescore subcommand."""
with tempfile.TemporaryDirectory(dir=".") as tmpdir:
with tempfile.TemporaryDirectory() as tmpdir:
with tempfile.TemporaryDirectory(dir=tmpdir) as nested_tmpdir:
# weights json file
weights_json = Path(nested_tmpdir, "weights_params.json")
Expand Down Expand Up @@ -73,7 +73,7 @@ def test_cli_rescore(weights_dict):

def test_cli_reclustfcc():
"""Test haddock3-re clustfcc subcommand."""
with tempfile.TemporaryDirectory(dir=".") as tmpdir:
with tempfile.TemporaryDirectory() as tmpdir:
nested_tmpdir = Path(tmpdir, "03_clustfcc")
os.mkdir(nested_tmpdir)
# json file
Expand Down Expand Up @@ -119,7 +119,7 @@ def test_cli_reclustfcc():

def test_cli_reclustrmsd():
"""Test haddock3-re clustrmsd subcommand."""
with tempfile.TemporaryDirectory(dir=".") as tmpdir:
with tempfile.TemporaryDirectory() as tmpdir:
# fake ilrmsdmatrix module files
nested_tmpdir_previousstep = Path(tmpdir, "1_ilrmsdmatrix")
os.mkdir(nested_tmpdir_previousstep)
Expand Down
10 changes: 5 additions & 5 deletions tests/test_cli_restraints.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def test_parse_actpass_file(example_actpass_file):
def test_actpass_to_ambig(capsys):
"""Test actpass_to_ambig function."""
# create temp file
with tempfile.NamedTemporaryFile(dir=".") as tmp:
with tempfile.NamedTemporaryFile() as tmp:
# write something to it
tmp.write(b"1\n2")
# close it
Expand Down Expand Up @@ -102,7 +102,7 @@ def test_validate_tbl(example_tbl_file, capsys):
def test_validate_tbl_error(example_tbl_file, capsys):
"""Test validate_tbl function in case of malformed tbl."""
lines = open(example_tbl_file, "r").readlines()
with tempfile.NamedTemporaryFile(dir=".") as tmp:
with tempfile.NamedTemporaryFile() as tmp:
# let's say I forget some lines
for ln in lines[3:]:
tmp.write(ln.encode())
Expand All @@ -118,7 +118,7 @@ def test_passive_from_active(example_pdb_file, capsys):
captured = capsys.readouterr()
assert captured.out == "2 3\n"


@pytest.mark.skip
def test_restrain_bodies(protdna_input_list, capsys): # noqa : F811
"""Test restrain_bodies function."""
restrain_bodies(protdna_input_list[0].rel_path)
Expand All @@ -136,15 +136,15 @@ def test_restrain_bodies_empty(example_pdb_file, capsys):
captured = capsys.readouterr()
assert captured.out == ""


@pytest.mark.skip
def test_restrain_bodies_exclude(protdna_input_list, capsys): # noqa : F811
"""Test restrain_bodies function."""
restrain_bodies(protdna_input_list[0].rel_path, exclude="A")
captured = capsys.readouterr()
out_lines = captured.out.split("\n")
assert (
out_lines[0]
== "assign (segid B and resi 6 and name P) (segid B and resi 35 and name P) 15.187 0.0 0.0"
== "assign (segid B and resi 3 and name P) (segid B and resi 29 and name P) 31.170 0.0 0.0"
) # noqa : E501


Expand Down
8 changes: 4 additions & 4 deletions tests/test_cli_traceback.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def expected_traceback():

def test_main(rigid_json, flexref_json, expected_traceback):
"""Test haddock3-traceback client."""
with tempfile.TemporaryDirectory(dir=".") as tmpdir:
with tempfile.TemporaryDirectory() as tmpdir:
# build fake run_dir
run_dir = Path(tmpdir, "example_dir")
step_dirs = [os.path.join(run_dir, "1_rigidbody"),
Expand Down Expand Up @@ -80,7 +80,7 @@ def test_main(rigid_json, flexref_json, expected_traceback):
def test_analysis():
"""Test traceback on a pure analysis run."""
# build fake run_dir
with tempfile.TemporaryDirectory(dir=".") as tmpdir:
with tempfile.TemporaryDirectory() as tmpdir:
run_dir = Path(tmpdir, "example_dir")
step_dirs = [os.path.join(run_dir, "0_topoaa"),
os.path.join(run_dir, "1_caprieval")]
Expand All @@ -97,7 +97,7 @@ def test_analysis():

def test_get_steps_without_pdbs():
"""Test get_steps_without_pdbs."""
with tempfile.TemporaryDirectory(dir=".") as tmpdir:
with tempfile.TemporaryDirectory() as tmpdir:
# build fake run_dir
run_dir = Path(tmpdir)
steps = ["0_topoaa", "1_rigidbody", "2_caprieval"]
Expand Down Expand Up @@ -127,7 +127,7 @@ def test_get_steps_without_pdbs():

def test_subset_traceback(expected_traceback):
"""Test subset_traceback."""
with tempfile.TemporaryDirectory(dir=".") as tmpdir:
with tempfile.TemporaryDirectory() as tmpdir:
cons_filename = Path(tmpdir, "consensus_example.tsv")
# subset traceback
obs_tr = subset_traceback(expected_traceback, cons_filename)
Expand Down
1 change: 0 additions & 1 deletion tests/test_gear_preprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -404,7 +404,6 @@ def test_process_pdbs():
assert len(result) == 1

expected = corrected_pdb.read_text().rstrip(os.linesep).split(os.linesep)
Path('testpreprocessing.pdb').write_text(os.linesep.join(result[0]))

for i, (rline, eline) in enumerate(zip_longest(result[0], expected)):
assert rline == eline, i
2 changes: 0 additions & 2 deletions tests/test_libalign.py
Original file line number Diff line number Diff line change
Expand Up @@ -440,8 +440,6 @@ def test_align_seq_inverted():
with tempfile.TemporaryDirectory() as tmpdirname:

observed_numb_dic, observed_chm_dict = align_seq(ref, mod, tmpdirname)
print(f"observed_numb_dic: {observed_numb_dic}")
print(f"observed_chm_dict: {observed_chm_dict}")
expected_numb_keys = ["A", "B"]
expected_chm_dict = {"A": "A", "B": "B"}

Expand Down
Loading

0 comments on commit ad64a38

Please sign in to comment.