-
Notifications
You must be signed in to change notification settings - Fork 320
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge branch 'master' into database/show_upgrade_versions
- Loading branch information
Showing
7 changed files
with
233 additions
and
1 deletion.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
139 changes: 139 additions & 0 deletions
139
qcodes/tests/dataset/legacy_DB_generation/generate_version_3.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,139 @@ | ||
# Generate version 3 database files for qcodes' test suite to consume | ||
|
||
import os | ||
import numpy as np | ||
|
||
# NB: it's important that we do not import anything from qcodes before we | ||
# do the git magic (which we do below), hence the relative import here | ||
import utils as utils | ||
|
||
|
||
fixturepath = os.sep.join(os.path.realpath(__file__).split(os.sep)[:-2]) | ||
fixturepath = os.path.join(fixturepath, 'fixtures', 'db_files') | ||
|
||
|
||
def generate_empty_DB_file(): | ||
""" | ||
Generate the bare minimal DB file with no runs | ||
""" | ||
|
||
import qcodes.dataset.sqlite_base as sqlite_base | ||
|
||
v3fixturepath = os.path.join(fixturepath, 'version3') | ||
os.makedirs(v3fixturepath, exist_ok=True) | ||
path = os.path.join(v3fixturepath, 'empty.db') | ||
|
||
if os.path.exists(path): | ||
os.remove(path) | ||
|
||
sqlite_base.connect(path) | ||
|
||
|
||
def generate_DB_file_with_some_runs_having_not_run_descriptions(): | ||
""" | ||
Generate a .db-file with a handful of runs some of which lack run | ||
description or have it as empty object (based on a real case). | ||
Generated runs: | ||
#1: run with parameters and correct run description | ||
#2: run with parameters but run description is NULL | ||
#3: run with parameters but run description is empty RunDescriber | ||
#4: run without parameters but run description is NULL | ||
""" | ||
v3fixturepath = os.path.join(fixturepath, 'version3') | ||
os.makedirs(v3fixturepath, exist_ok=True) | ||
path = os.path.join(v3fixturepath, 'some_runs_without_run_description.db') | ||
|
||
if os.path.exists(path): | ||
os.remove(path) | ||
|
||
from qcodes.dataset.measurements import Measurement | ||
from qcodes.dataset.experiment_container import Experiment | ||
from qcodes import Parameter | ||
from qcodes.dataset.descriptions import RunDescriber | ||
from qcodes.dataset.dependencies import InterDependencies | ||
|
||
exp = Experiment(path_to_db=path, | ||
name='experiment_1', | ||
sample_name='no_sample_1') | ||
conn = exp.conn | ||
|
||
# Now make some parameters to use in measurements | ||
params = [] | ||
for n in range(5): | ||
params.append(Parameter(f'p{n}', label=f'Parameter {n}', | ||
unit=f'unit {n}', set_cmd=None, get_cmd=None)) | ||
|
||
# Set up a measurement | ||
|
||
meas = Measurement(exp) | ||
meas.register_parameter(params[0]) | ||
meas.register_parameter(params[1]) | ||
meas.register_parameter(params[2], basis=(params[0],)) | ||
meas.register_parameter(params[3], basis=(params[1],)) | ||
meas.register_parameter(params[4], setpoints=(params[2], params[3])) | ||
|
||
# Initially make 3 correct runs | ||
|
||
run_ids = [] | ||
|
||
for _ in range(3): | ||
|
||
with meas.run() as datasaver: | ||
|
||
for x in np.random.rand(10): | ||
for y in np.random.rand(10): | ||
z = np.random.rand() | ||
datasaver.add_result((params[2], x), | ||
(params[3], y), | ||
(params[4], z)) | ||
|
||
run_ids.append(datasaver.run_id) | ||
|
||
assert [1, 2, 3] == run_ids, 'Run ids of generated runs are not as ' \ | ||
'expected after generating runs #1-3' | ||
|
||
# Formulate SQL query for adjusting run_description column | ||
|
||
set_run_description_sql = f""" | ||
UPDATE runs | ||
SET run_description = ? | ||
WHERE run_id == ? | ||
""" | ||
|
||
# Make run_description of run #2 NULL | ||
|
||
conn.execute(set_run_description_sql, (None, run_ids[1])) | ||
conn.commit() # just to be sure | ||
|
||
# Make run_description of run #3 equivalent to an empty RunDescriber | ||
|
||
empty_run_description = RunDescriber(InterDependencies()).to_json() | ||
conn.execute(set_run_description_sql, (empty_run_description, run_ids[2])) | ||
conn.commit() # just to be sure | ||
|
||
# Set up a measurement without parameters, and create run #4 out of it | ||
|
||
meas_no_params = Measurement(exp) | ||
|
||
with meas_no_params.run() as datasaver: | ||
pass | ||
|
||
run_ids.append(datasaver.run_id) | ||
|
||
assert [1, 2, 3, 4] == run_ids, 'Run ids of generated runs are not as ' \ | ||
'expected after generating run #4' | ||
|
||
# Make run_description of run #4 NULL | ||
|
||
conn.execute(set_run_description_sql, (None, run_ids[3])) | ||
conn.commit() # just to be sure | ||
|
||
|
||
if __name__ == '__main__': | ||
|
||
gens = (generate_empty_DB_file, | ||
generate_DB_file_with_some_runs_having_not_run_descriptions) | ||
|
||
# pylint: disable=E1101 | ||
utils.checkout_to_old_version_and_run_generators(version=3, gens=gens) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,45 @@ | ||
import os | ||
|
||
import pytest | ||
|
||
import qcodes | ||
from qcodes.dataset.data_set import DataSet | ||
from qcodes.dataset.dependencies import InterDependencies | ||
from qcodes.dataset.descriptions import RunDescriber | ||
from qcodes.dataset.sqlite_base import _fix_wrong_run_descriptions, \ | ||
get_user_version | ||
from qcodes.tests.dataset.temporary_databases import temporarily_copied_DB | ||
|
||
fixturepath = os.sep.join(qcodes.tests.dataset.__file__.split(os.sep)[:-1]) | ||
fixturepath = os.path.join(fixturepath, 'fixtures') | ||
|
||
|
||
def test_fix_wrong_run_descriptions(): | ||
v3fixpath = os.path.join(fixturepath, 'db_files', 'version3') | ||
|
||
if not os.path.exists(v3fixpath): | ||
pytest.skip( | ||
"No db-file fixtures found. You can generate test db-files" | ||
" using the scripts in the legacy_DB_generation folder") | ||
|
||
dbname_old = os.path.join(v3fixpath, 'some_runs_without_run_description.db') | ||
|
||
with temporarily_copied_DB(dbname_old, debug=False, version=3) as conn: | ||
|
||
assert get_user_version(conn) == 3 | ||
|
||
ds1 = DataSet(conn=conn, run_id=1) | ||
expected_description = ds1.description | ||
|
||
empty_description = RunDescriber(InterDependencies()) | ||
|
||
_fix_wrong_run_descriptions(conn, [1, 2, 3, 4]) | ||
|
||
ds2 = DataSet(conn=conn, run_id=2) | ||
assert expected_description == ds2.description | ||
|
||
ds3 = DataSet(conn=conn, run_id=3) | ||
assert expected_description == ds3.description | ||
|
||
ds4 = DataSet(conn=conn, run_id=4) | ||
assert empty_description == ds4.description |