From f18d6d0fc262911cfda6d2c9f8da37fbaaf53431 Mon Sep 17 00:00:00 2001 From: Musa-Sina-Ertugrul Date: Thu, 25 Apr 2024 12:52:37 +0300 Subject: [PATCH] Solving merge problems #1268 --- .../framework/composite/composite_analysis.py | 42 +-- .../framework/experiment_data.py | 334 ++++-------------- test/framework/test_composite.py | 15 +- 3 files changed, 70 insertions(+), 321 deletions(-) diff --git a/qiskit_experiments/framework/composite/composite_analysis.py b/qiskit_experiments/framework/composite/composite_analysis.py index b16ca627a9..3150637d31 100644 --- a/qiskit_experiments/framework/composite/composite_analysis.py +++ b/qiskit_experiments/framework/composite/composite_analysis.py @@ -117,43 +117,11 @@ def copy(self): def _run_analysis(self, experiment_data: ExperimentData): child_data = experiment_data.child_data() - -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD if len(child_data) == 0: # Child data is automatically created when composite result data is added. # Validate that child data size matches with number of analysis entries. experiment_data.create_child_data() -======= - -<<<<<<< HEAD - experiment_data._add_data(component_expdata,experiment_data.data()) ->>>>>>> 2dbba8ac (Passed test new start) -======= - experiment_data.add_data(experiment_data.data()) ->>>>>>> 0bd3a186 (Updated add_data and deprecated _add_data #1268) -======= - marginalized_data = self._marginalized_component_data(experiment_data.data()) - - for sub_expdata, sub_data in zip(component_expdata, marginalized_data): - # Clear any previously stored data and add marginalized data - sub_expdata._result_data.clear() - sub_expdata.add_data(sub_data) ->>>>>>> c79e888e (Updated add_data, _run_analysis, composite_test #1268) - - if len(self._analyses) != len(child_data): - raise("analysis length and experiment lenggth are not same") - - for sub_analysis, sub_data in zip(self._analyses, child_data): - # Since copy for replace result is handled at the parent level - # we always run with replace result on component analysis -<<<<<<< HEAD - sub_analysis.run(sub_data, replace_results=True) -======= - self._analyses[i].run(sub_expdata, replace_results=True) ->>>>>>> 0bd3a186 (Updated add_data and deprecated _add_data #1268) -======= + if len(self._analyses) != len(child_data): # Child data is automatically created when composite result data is added. # Validate that child data size matches with number of analysis entries. @@ -167,28 +135,22 @@ def _run_analysis(self, experiment_data: ExperimentData): # Since copy for replace result is handled at the parent level # we always run with replace result on component analysis sub_analysis.run(sub_data, replace_results=True) ->>>>>>> a3abf4d2 (Fix marginalize problems) - # Analysis is running in parallel so we add loop to wait # for all component analysis to finish before returning # the parent experiment analysis results for sub_data in child_data: sub_data.block_for_results() - # Optionally flatten results from all component experiments # for adding to the main experiment data container if self._flatten_results: analysis_results, figures = self._combine_results(child_data) -<<<<<<< HEAD - -======= ->>>>>>> a3abf4d2 (Fix marginalize problems) for res in analysis_results: # Override experiment ID because entries are flattened res.experiment_id = experiment_data.experiment_id return analysis_results, figures return [], [] + def _set_flatten_results(self): """Recursively set flatten_results to True for all composite components.""" self._flatten_results = True diff --git a/qiskit_experiments/framework/experiment_data.py b/qiskit_experiments/framework/experiment_data.py index 07c836e2e0..83d446213b 100644 --- a/qiskit_experiments/framework/experiment_data.py +++ b/qiskit_experiments/framework/experiment_data.py @@ -19,18 +19,12 @@ from typing import Dict, Optional, List, Union, Any, Callable, Tuple, Iterator, TYPE_CHECKING from datetime import datetime, timezone from concurrent import futures -<<<<<<< HEAD from functools import wraps, partial -======= from threading import Event -<<<<<<< HEAD from functools import wraps, singledispatch ->>>>>>> 0bd3a186 (Updated add_data and deprecated _add_data #1268) from collections import deque, defaultdict -======= from functools import wraps, singledispatch, partial from collections import deque ->>>>>>> a3abf4d2 (Fix marginalize problems) import contextlib import copy import uuid @@ -46,12 +40,9 @@ from qiskit.result import Result from qiskit.result import marginal_distribution from qiskit.result.postprocess import format_counts_memory -<<<<<<< HEAD from qiskit.result import marginal_distribution from qiskit.result.postprocess import format_counts_memory from qiskit.result.utils import marginal_memory -======= ->>>>>>> a3abf4d2 (Fix marginalize problems) from qiskit.providers.jobstatus import JobStatus, JOB_FINAL_STATES from qiskit.exceptions import QiskitError from qiskit.providers import Job, Backend, Provider @@ -74,13 +65,10 @@ from qiskit_experiments.framework.analysis_result_data import AnalysisResultData from qiskit_experiments.framework.analysis_result_table import AnalysisResultTable from qiskit_experiments.framework import BackendData -<<<<<<< HEAD from qiskit_experiments.framework.containers import ArtifactData from qiskit_experiments.framework import ExperimentStatus, AnalysisStatus, AnalysisCallback from qiskit_experiments.framework.package_deps import qiskit_version -======= from qiskit_experiments.exceptions import AnalysisError ->>>>>>> 1ed676e0 (Updated add_data #1268) from qiskit_experiments.database_service.exceptions import ( ExperimentDataError, ExperimentEntryNotFound, @@ -723,8 +711,7 @@ def source(self) -> Dict: # Data addition and deletion -<<<<<<< HEAD - def _add_data( + def add_data( self, data: Union[Result, List[Result], Dict, List[Dict]], ) -> None: @@ -741,6 +728,7 @@ def _add_data( Raises: TypeError: If the input data type is invalid. """ + if any(not future.done() for future in self._analysis_futures.values()): LOG.warning( "Not all analysis has finished running. Adding new data may " @@ -750,193 +738,83 @@ def _add_data( data = [data] # Directly add non-job data - - with self._result_data.lock: - tmp_exp_data = ExperimentData() - composite_flag = False - experiment_seperator = defaultdict(lambda : ExperimentData()) - for datum in data: - if isinstance(datum, dict): - if "metadata" in datum and "composite_metadata" in datum["metadata"]: - composite_flag = True -<<<<<<< HEAD -<<<<<<< HEAD - marginalized_data = self._marginalized_component_data([datum]) - for inner_datum in marginalized_data: - #print(inner_datum) - if "experiment_type" in inner_datum[0]["metadata"]: - if inner_datum[0]["metadata"]["experiment_type"] in experiment_seperator: - experiment_seperator[inner_datum[0]["metadata"]["experiment_type"]].add_data(inner_datum[0]) - else: - experiment_seperator[inner_datum[0]["metadata"]["experiment_type"]] = ExperimentData() - experiment_seperator[inner_datum[0]["metadata"]["experiment_type"]].add_data(inner_datum[0]) -<<<<<<< HEAD -<<<<<<< HEAD - else: - self._result_data.append(datum) -======= - - self._result_data.append(datum) -======= - -======= - experiment_seperator[datum["metadata"]["experiment_type"]].add_data(datum["metadata"]["composite_metadata"]) -<<<<<<< HEAD - ->>>>>>> 0bd3a186 (Updated add_data and deprecated _add_data #1268) - elif "composite_metadata" in datum: - composite_flag = True - experiment_seperator[datum["experiment_type"]].add_data(datum["composite_metadata"]) - -<<<<<<< HEAD + for datum in data: + if isinstance(datum, dict): + with self._result_data.lock: self._result_data.append(datum) ->>>>>>> 9eb2dba0 (Updated add_data tests passed #1268) -======= -======= -======= - experiment_seperator[datum["metadata"]["composite_index"]].add_data(datum["metadata"]["composite_metadata"]) ->>>>>>> 8f212786 (commit before second approach) - marginalized_datum = self._marginalized_component_data([datum]) - for inner_datum in marginalized_datum: - for inner_inner_datum in inner_datum: - experiment_seperator[datum["metadata"]["composite_index"]].add_data([inner_inner_datum]) - elif "composite_metadata" in datum: - composite_flag = True - experiment_seperator[datum["composite_index"]].add_data(datum["composite_metadata"]) - marginalized_datum = self._marginalized_component_data([datum]) - for inner_datum in marginalized_datum: - for inner_inner_datum in inner_datum: - experiment_seperator[datum["composite_index"]].add_data([inner_inner_datum]) - ->>>>>>> dd257a28 (Updated add_data #1268) - if datum not in self._result_data: - self._result_data.append(datum) ->>>>>>> 0bd3a186 (Updated add_data and deprecated _add_data #1268) - ->>>>>>> e7f46c3a (Updated add_data tests passed #1268) - elif isinstance(datum, Result): - self._add_result_data(datum) - else: - raise TypeError(f"Invalid data type {type(datum)}.") - - if composite_flag: - - tmp_exp_data._set_child_data(list(experiment_seperator.values())) -<<<<<<< HEAD - if self._child_data.values() != []: - self.add_child_data(tmp_exp_data) - else: - self._set_child_data([tmp_exp_data]) + elif isinstance(datum, Result): + self._add_result_data(datum) + else: + raise TypeError(f"Invalid data type {type(datum)}.") + self.create_child_data() + self._init_children_data() - def __add_data( - self, - data: Union[Result, List[Result], Dict, List[Dict]], - ) -> None: - """Add experiment data. + @property + def __retrive_self_attrs_as_dict(self) -> dict: + + return { + "backend": self.backend, + "tags": self.tags, + "auto_save": self.auto_save, + "service": self.service, + "provider": self.provider, + "backed_name": self.backend_name, + "notes": self.notes, + "start_datetime": self.start_datetime, + "verbose": self.verbose, + "source": self.source, + "share_level": self.share_level, + "experiment_type": self.experiment_type, + } - Args: - data: Experiment data to add. Several types are accepted for convenience: + def create_child_data(self) -> "ExperimenData": # pylint: disable=inconsistent-return-statements - * Result: Add data from this ``Result`` object. - * List[Result]: Add data from the ``Result`` objects. - * Dict: Add this data. - * List[Dict]: Add this list of data. + """Bootstrap child experiment data containers from result metadata. - Raises: - TypeError: If the input data type is invalid. + Returns: + Current instance populated with the child experiment data. """ - if any(not future.done() for future in self._analysis_futures.values()): - LOG.warning( - "Not all analysis has finished running. Adding new data may " - "create unexpected analysis results." - ) - if not isinstance(data, list): - data = [data] - # Directly add non-job data - for datum in data: - if isinstance(datum, dict): - with self._result_data.lock: - self._result_data.append(datum) - elif isinstance(datum, Result): - if datum["metadata"]: - self._set_child_data(datum["metadata"]._metadata()) - else: - self._add_result_data(datum) - else: - raise TypeError(f"Invalid data type {type(datum)}.") -======= - self.add_child_data(tmp_exp_data) ->>>>>>> c79e888e (Updated add_data, _run_analysis, composite_test #1268) + if (component_metadata := self.metadata.get("component_metadata", None)) is None: + return -======= ->>>>>>> 745669fd (Tests passed second approach, Updated add_data #1268) - def add_data( - self, - data: Union[Result, List[Result], Dict, List[Dict]], - ) -> None: - """Add experiment data. + while (new_idx := len(self._child_data)) < len(component_metadata): + child_data = ExperimentData(**self.__retrive_self_attrs_as_dict) + # Add automatically generated component experiment metadata + try: + this_data = component_metadata[new_idx].copy() + child_data.metadata.update(this_data) + except (KeyError, IndexError): + pass + try: + component_type = self.metadata["component_types"][new_idx] + child_data.experiment_type = component_type + except (KeyError, IndexError): + pass + self.add_child_data(child_data) - Args: - data: Experiment data to add. Several types are accepted for convenience: + return self - * Result: Add data from this ``Result`` object. - * List[Result]: Add data from the ``Result`` objects. - * Dict: Add this data. - * List[Dict]: Add this list of data. + def _init_children_data(self): # pylint: disable=inconsistent-return-statements - Raises: - TypeError: If the input data type is invalid. - """ + """Bootstrap Experiment data containers's data - if any(not future.done() for future in self._analysis_futures.values()): - LOG.warning( - "Not all analysis has finished running. Adding new data may " - "create unexpected analysis results." - ) - if not isinstance(data, list): - data = [data] + Returns: + self : return itself for method calling + """ - # Directly add non-job data - for datum in data: - if isinstance(datum, dict): - self._add_canonical_dict_data(datum) - elif isinstance(datum, Result): - self._add_result_data(datum) - else: - raise TypeError(f"Invalid data type {type(datum)}.") + if self.metadata.get("component_metadata", None) is None: + return - def _add_canonical_dict_data(self, data: dict): - """A common subroutine to store result dictionary in canonical format. + with self._result_data.lock: + for data in self._result_data: + for idx, sub_data in self._decompose_component_data(data): + # NOTE : These lines for preventing multiple data addition, + # it occurs and I dont know why + if sub_data not in self.child_data(idx).data(): + self.child_data(idx).add_data(sub_data) - Args: - data: A single formatted entry of experiment results. - ExperimentData expects this data dictionary to include keys such as - metadata, counts, memory and so forth. - """ - if "metadata" in data and "composite_metadata" in data["metadata"]: - composite_index = data["metadata"]["composite_index"] - max_index = max(composite_index) - with self._child_data.lock: - while (new_idx := len(self._child_data)) <= max_index: - child_data = ExperimentData() - # Add automatically generated component experiment metadata - try: - component_metadata = self.metadata["component_metadata"][new_idx].copy() - child_data.metadata.update(component_metadata) - except (KeyError, IndexError): - pass - try: - component_type = self.metadata["component_types"][new_idx] - child_data.experiment_type = component_type - except (KeyError, IndexError): - pass - self.add_child_data(child_data) - for idx, sub_data in self._decompose_component_data(data): - self.child_data(idx).add_data(sub_data) - else: - with self._result_data.lock: - self._result_data.append(data) + return self @staticmethod def _decompose_component_data( @@ -973,58 +851,6 @@ def _decompose_component_data( else: formatted_mem = None -<<<<<<< HEAD - # Pre-process the memory if any to avoid redundant calls to format_counts_memory - f_memory = None - if ( - "memory" in datum - and composite_clbits is not None - and isinstance(datum["memory"][0], str) - ): - f_memory = marginal_memory(datum["memory"], composite_clbits) - - if "composite_index" not in metadata: - continue - - for i, index in enumerate(metadata["composite_index"]): - if index not in marginalized_data: - # Initialize data list for marginalized - marginalized_data[index] = [] - sub_data = {"metadata": metadata["composite_metadata"][i]} - if "counts" in datum: - if composite_clbits is not None: - sub_data["counts"] = marginal_distribution( - counts=datum["counts"], - indices=composite_clbits[i], - ) - else: - sub_data["counts"] = datum["counts"] - if "memory" in datum: - if composite_clbits is not None: - # level 2 - if f_memory is not None: - idx = slice( - -1 - composite_clbits[i][-1], -composite_clbits[i][0] or None - ) - sub_data["memory"] = [shot[idx] for shot in f_memory] - # level 1 - else: - mem = np.array(datum["memory"]) - - # Averaged level 1 data - if len(mem.shape) == 2: - sub_data["memory"] = mem[composite_clbits[i]].tolist() - # Single-shot level 1 data - if len(mem.shape) == 3: - sub_data["memory"] = mem[:, composite_clbits[i]].tolist() - else: - sub_data["memory"] = datum["memory"] - marginalized_data[index].append(sub_data) - - # Sort by index - return [marginalized_data[i] for i in sorted(marginalized_data.keys())] - -======= for i, exp_idx in enumerate(metadata["composite_index"]): sub_data = tmp_sub_data.copy() try: @@ -1061,7 +887,6 @@ def _decompose_component_data( sub_data["memory"] = composite_data["memory"] yield exp_idx, sub_data ->>>>>>> a3abf4d2 (Fix marginalize problems) def add_jobs( self, jobs: Union[Job, List[Job]], @@ -1316,11 +1141,6 @@ def _add_result_data(self, result: Result, job_id: Optional[str] = None) -> None if job_id not in self._jobs: self._jobs[job_id] = None self.job_ids.append(job_id) -<<<<<<< HEAD -<<<<<<< HEAD -======= - ->>>>>>> a3abf4d2 (Fix marginalize problems) for i, _ in enumerate(result.results): data = result.data(i) data["job_id"] = job_id @@ -1334,32 +1154,8 @@ def _add_result_data(self, result: Result, job_id: Optional[str] = None) -> None data["meas_level"] = expr_result.meas_level if hasattr(expr_result, "meas_return"): data["meas_return"] = expr_result.meas_return -<<<<<<< HEAD self.add_data(data) -======= - with self._result_data.lock: - # Lock data while adding all result data - results = [] - for i, _ in enumerate(result.results): - data = result.data(i) - data["job_id"] = job_id - if "counts" in data: - # Format to Counts object rather than hex dict - data["counts"] = result.get_counts(i) - expr_result = result.results[i] - if hasattr(expr_result, "header") and hasattr(expr_result.header, "metadata"): - data["metadata"] = expr_result.header.metadata - data["shots"] = expr_result.shots - data["meas_level"] = expr_result.meas_level - if hasattr(expr_result, "meas_return"): - data["meas_return"] = expr_result.meas_return - results.append(data) - - self.add_data(results) ->>>>>>> 5e4b9d2d (Updated add_data and _add_result_data, deprecated _add_data #1268) -======= - self._add_canonical_dict_data(data) ->>>>>>> a3abf4d2 (Fix marginalize problems) + def _retrieve_data(self): """Retrieve job data if missing experiment data.""" diff --git a/test/framework/test_composite.py b/test/framework/test_composite.py index ad39059f5d..fba6bbe467 100644 --- a/test/framework/test_composite.py +++ b/test/framework/test_composite.py @@ -14,11 +14,13 @@ import copy import uuid +from itertools import tee from test.fake_experiment import FakeExperiment, FakeAnalysis from test.base import QiskitExperimentsTestCase from unittest import mock from ddt import ddt, data +import pandas as pd from qiskit import QuantumCircuit from qiskit.result import Result @@ -27,6 +29,7 @@ from qiskit_ibm_experiment import IBMExperimentService +from qiskit_experiments.database_service import Qubit from qiskit_experiments.exceptions import QiskitError from qiskit_experiments.test.utils import FakeJob from qiskit_experiments.test.fake_backend import FakeBackend @@ -948,7 +951,6 @@ def test_batch_transpile_options_integrated(self): expdata = self.batch2.run(backend, noise_model=noise_model, shots=1000) self.assertExperimentDone(expdata) -<<<<<<< HEAD self.assertEqual(expdata.child_data(0).analysis_results("non-zero counts").value, 8) self.assertEqual( @@ -957,17 +959,6 @@ def test_batch_transpile_options_integrated(self): self.assertEqual( expdata.child_data(1).child_data(1).analysis_results("non-zero counts").value, 4 ) -======= - - self.assertEqual(expdata.child_data(0).analysis_results(0).value, 8) -<<<<<<< HEAD - self.assertEqual(expdata.child_data(1).child_data(1).analysis_results(0).value, 16) - self.assertEqual(expdata.child_data(1).child_data(2).analysis_results(0).value, 4) ->>>>>>> c79e888e (Updated add_data, _run_analysis, composite_test #1268) -======= - self.assertEqual(expdata.child_data(1).child_data(0).analysis_results(0).value, 16) - self.assertEqual(expdata.child_data(1).child_data(1).analysis_results(0).value, 4) ->>>>>>> 73db5bde (Tests passed , Finished second approach add_data #1268) def test_separate_jobs(self): """Test the separate_job experiment option"""