diff --git a/crates/accelerate/src/edge_collections.rs b/crates/accelerate/src/edge_collections.rs index 2fd06e5116f2..50d75a4ba01f 100644 --- a/crates/accelerate/src/edge_collections.rs +++ b/crates/accelerate/src/edge_collections.rs @@ -12,14 +12,15 @@ use numpy::IntoPyArray; use pyo3::prelude::*; -use pyo3::Python; + +use crate::nlayout::PhysicalQubit; /// A simple container that contains a vector representing edges in the /// coupling map that are found to be optimal by the swap mapper. #[pyclass(module = "qiskit._accelerate.stochastic_swap")] #[derive(Clone, Debug)] pub struct EdgeCollection { - pub edges: Vec, + pub edges: Vec, } impl Default for EdgeCollection { @@ -42,7 +43,7 @@ impl EdgeCollection { /// edge_start (int): The beginning edge. /// edge_end (int): The end of the edge. #[pyo3(text_signature = "(self, edge_start, edge_end, /)")] - pub fn add(&mut self, edge_start: usize, edge_end: usize) { + pub fn add(&mut self, edge_start: PhysicalQubit, edge_end: PhysicalQubit) { self.edges.push(edge_start); self.edges.push(edge_end); } @@ -57,11 +58,11 @@ impl EdgeCollection { self.edges.clone().into_pyarray(py).into() } - fn __getstate__(&self) -> Vec { + fn __getstate__(&self) -> Vec { self.edges.clone() } - fn __setstate__(&mut self, state: Vec) { + fn __setstate__(&mut self, state: Vec) { self.edges = state } } diff --git a/crates/accelerate/src/error_map.rs b/crates/accelerate/src/error_map.rs index d699d383a7d0..607813d3930b 100644 --- a/crates/accelerate/src/error_map.rs +++ b/crates/accelerate/src/error_map.rs @@ -13,6 +13,8 @@ use pyo3::exceptions::PyIndexError; use pyo3::prelude::*; +use crate::nlayout::PhysicalQubit; + use hashbrown::HashMap; /// A mapping that represents the avg error rate for a particular edge in @@ -34,7 +36,7 @@ use hashbrown::HashMap; #[pyclass(mapping, module = "qiskit._accelerate.error_map")] #[derive(Clone, Debug)] pub struct ErrorMap { - pub error_map: HashMap<[usize; 2], f64>, + pub error_map: HashMap<[PhysicalQubit; 2], f64>, } #[pymethods] @@ -60,26 +62,26 @@ impl ErrorMap { /// construct the error map iteratively with :meth:`.add_error` instead of /// constructing an intermediate dict and using this constructor. #[staticmethod] - fn from_dict(error_map: HashMap<[usize; 2], f64>) -> Self { + fn from_dict(error_map: HashMap<[PhysicalQubit; 2], f64>) -> Self { ErrorMap { error_map } } - fn add_error(&mut self, index: [usize; 2], error_rate: f64) { + fn add_error(&mut self, index: [PhysicalQubit; 2], error_rate: f64) { self.error_map.insert(index, error_rate); } - // The pickle protocol methods can't return `HashMap<[usize; 2], f64>` to Python, because by - // PyO3's natural conversion as of 0.17.3 it will attempt to construct a `dict[list[int], - // float]`, where `list[int]` is unhashable in Python. + // The pickle protocol methods can't return `HashMap<[T; 2], f64>` to Python, because by PyO3's + // natural conversion as of 0.17.3 it will attempt to construct a `dict[list[T], float]`, where + // `list[T]` is unhashable in Python. - fn __getstate__(&self) -> HashMap<(usize, usize), f64> { + fn __getstate__(&self) -> HashMap<(PhysicalQubit, PhysicalQubit), f64> { self.error_map .iter() .map(|([a, b], value)| ((*a, *b), *value)) .collect() } - fn __setstate__(&mut self, state: HashMap<[usize; 2], f64>) { + fn __setstate__(&mut self, state: HashMap<[PhysicalQubit; 2], f64>) { self.error_map = state; } @@ -87,18 +89,18 @@ impl ErrorMap { Ok(self.error_map.len()) } - fn __getitem__(&self, key: [usize; 2]) -> PyResult { + fn __getitem__(&self, key: [PhysicalQubit; 2]) -> PyResult { match self.error_map.get(&key) { Some(data) => Ok(*data), None => Err(PyIndexError::new_err("No node found for index")), } } - fn __contains__(&self, key: [usize; 2]) -> PyResult { + fn __contains__(&self, key: [PhysicalQubit; 2]) -> PyResult { Ok(self.error_map.contains_key(&key)) } - fn get(&self, py: Python, key: [usize; 2], default: Option) -> PyObject { + fn get(&self, py: Python, key: [PhysicalQubit; 2], default: Option) -> PyObject { match self.error_map.get(&key).copied() { Some(val) => val.to_object(py), None => match default { diff --git a/crates/accelerate/src/nlayout.rs b/crates/accelerate/src/nlayout.rs index 87ae47a7fb43..871d3696ddcc 100644 --- a/crates/accelerate/src/nlayout.rs +++ b/crates/accelerate/src/nlayout.rs @@ -11,9 +11,76 @@ // that they have been altered from the originals. use pyo3::prelude::*; +use pyo3::types::PyList; use hashbrown::HashMap; +/// A newtype for the different categories of qubits used within layouts. This is to enforce +/// significantly more type safety when dealing with mixtures of physical and virtual qubits, as we +/// typically are when dealing with layouts. In Rust space, `NLayout` only works in terms of the +/// correct newtype, meaning that it's not possible to accidentally pass the wrong type of qubit to +/// a lookup. We can't enforce the same rules on integers in Python space without runtime +/// overhead, so we just allow conversion to and from any valid `PyLong`. +macro_rules! qubit_newtype { + ($id: ident) => { + #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub struct $id(u32); + + impl $id { + #[inline] + pub fn new(val: u32) -> Self { + Self(val) + } + #[inline] + pub fn index(&self) -> usize { + self.0 as usize + } + } + + impl pyo3::IntoPy for $id { + fn into_py(self, py: Python<'_>) -> PyObject { + self.0.into_py(py) + } + } + impl pyo3::ToPyObject for $id { + fn to_object(&self, py: Python<'_>) -> PyObject { + self.0.to_object(py) + } + } + + impl pyo3::FromPyObject<'_> for $id { + fn extract(ob: &PyAny) -> PyResult { + Ok(Self(ob.extract()?)) + } + } + + unsafe impl numpy::Element for $id { + const IS_COPY: bool = true; + + fn get_dtype(py: Python<'_>) -> &numpy::PyArrayDescr { + u32::get_dtype(py) + } + } + }; +} + +qubit_newtype!(PhysicalQubit); +impl PhysicalQubit { + /// Get the virtual qubit that currently corresponds to this index of physical qubit in the + /// given layout. + pub fn to_virt(self, layout: &NLayout) -> VirtualQubit { + layout.phys_to_virt[self.index()] + } +} +qubit_newtype!(VirtualQubit); +impl VirtualQubit { + /// Get the physical qubit that currently corresponds to this index of virtual qubit in the + /// given layout. + pub fn to_phys(self, layout: &NLayout) -> PhysicalQubit { + layout.virt_to_phys[self.index()] + } +} + /// An unsigned integer Vector based layout class /// /// This class tracks the layout (or mapping between virtual qubits in the the @@ -27,90 +94,80 @@ use hashbrown::HashMap; #[pyclass(module = "qiskit._accelerate.stochastic_swap")] #[derive(Clone, Debug)] pub struct NLayout { - pub logic_to_phys: Vec, - pub phys_to_logic: Vec, -} - -impl NLayout { - pub fn swap(&mut self, idx1: usize, idx2: usize) { - self.phys_to_logic.swap(idx1, idx2); - self.logic_to_phys[self.phys_to_logic[idx1]] = idx1; - self.logic_to_phys[self.phys_to_logic[idx2]] = idx2; - } + virt_to_phys: Vec, + phys_to_virt: Vec, } #[pymethods] impl NLayout { #[new] - #[pyo3(text_signature = "(qubit_indices, logical_qubits, physical_qubits, /)")] fn new( - qubit_indices: HashMap, - logical_qubits: usize, + qubit_indices: HashMap, + virtual_qubits: usize, physical_qubits: usize, ) -> Self { let mut res = NLayout { - logic_to_phys: vec![std::usize::MAX; logical_qubits], - phys_to_logic: vec![std::usize::MAX; physical_qubits], + virt_to_phys: vec![PhysicalQubit(std::u32::MAX); virtual_qubits], + phys_to_virt: vec![VirtualQubit(std::u32::MAX); physical_qubits], }; - for (key, value) in qubit_indices { - res.logic_to_phys[key] = value; - res.phys_to_logic[value] = key; + for (virt, phys) in qubit_indices { + res.virt_to_phys[virt.index()] = phys; + res.phys_to_virt[phys.index()] = virt; } res } - fn __getstate__(&self) -> [Vec; 2] { - [self.logic_to_phys.clone(), self.phys_to_logic.clone()] + fn __getstate__(&self) -> (Vec, Vec) { + (self.virt_to_phys.clone(), self.phys_to_virt.clone()) } - fn __setstate__(&mut self, state: [Vec; 2]) { - self.logic_to_phys = state[0].clone(); - self.phys_to_logic = state[1].clone(); + fn __setstate__(&mut self, state: (Vec, Vec)) { + self.virt_to_phys = state.0; + self.phys_to_virt = state.1; } - /// Return the layout mapping + /// Return the layout mapping. /// /// .. note:: /// - /// this copies the data from Rust to Python and has linear - /// overhead based on the number of qubits. + /// This copies the data from Rust to Python and has linear overhead based on the number of + /// qubits. /// /// Returns: - /// list: A list of 2 element lists in the form: - /// ``[[logical_qubit, physical_qubit], ...]``. Where the logical qubit - /// is the index in the qubit index in the circuit. + /// list: A list of 2 element lists in the form ``[(virtual_qubit, physical_qubit), ...]``, + /// where the virtual qubit is the index in the qubit index in the circuit. /// #[pyo3(text_signature = "(self, /)")] - fn layout_mapping(&self) -> Vec<[usize; 2]> { - (0..self.logic_to_phys.len()) - .map(|i| [i, self.logic_to_phys[i]]) - .collect() + fn layout_mapping(&self, py: Python<'_>) -> Py { + PyList::new(py, self.iter_virtual()).into() } - /// Get physical bit from logical bit - #[pyo3(text_signature = "(self, logical_bit, /)")] - fn logical_to_physical(&self, logical_bit: usize) -> usize { - self.logic_to_phys[logical_bit] + /// Get physical bit from virtual bit + #[pyo3(text_signature = "(self, virtual, /)")] + pub fn virtual_to_physical(&self, r#virtual: VirtualQubit) -> PhysicalQubit { + self.virt_to_phys[r#virtual.index()] } - /// Get logical bit from physical bit - #[pyo3(text_signature = "(self, physical_bit, /)")] - pub fn physical_to_logical(&self, physical_bit: usize) -> usize { - self.phys_to_logic[physical_bit] + /// Get virtual bit from physical bit + #[pyo3(text_signature = "(self, physical, /)")] + pub fn physical_to_virtual(&self, physical: PhysicalQubit) -> VirtualQubit { + self.phys_to_virt[physical.index()] } /// Swap the specified virtual qubits #[pyo3(text_signature = "(self, bit_a, bit_b, /)")] - pub fn swap_logical(&mut self, bit_a: usize, bit_b: usize) { - self.logic_to_phys.swap(bit_a, bit_b); - self.phys_to_logic[self.logic_to_phys[bit_a]] = bit_a; - self.phys_to_logic[self.logic_to_phys[bit_b]] = bit_b; + pub fn swap_virtual(&mut self, bit_a: VirtualQubit, bit_b: VirtualQubit) { + self.virt_to_phys.swap(bit_a.index(), bit_b.index()); + self.phys_to_virt[self.virt_to_phys[bit_a.index()].index()] = bit_a; + self.phys_to_virt[self.virt_to_phys[bit_b.index()].index()] = bit_b; } /// Swap the specified physical qubits #[pyo3(text_signature = "(self, bit_a, bit_b, /)")] - pub fn swap_physical(&mut self, bit_a: usize, bit_b: usize) { - self.swap(bit_a, bit_b) + pub fn swap_physical(&mut self, bit_a: PhysicalQubit, bit_b: PhysicalQubit) { + self.phys_to_virt.swap(bit_a.index(), bit_b.index()); + self.virt_to_phys[self.phys_to_virt[bit_a.index()].index()] = bit_a; + self.virt_to_phys[self.phys_to_virt[bit_b.index()].index()] = bit_b; } pub fn copy(&self) -> NLayout { @@ -118,23 +175,44 @@ impl NLayout { } #[staticmethod] - pub fn generate_trivial_layout(num_qubits: usize) -> Self { + pub fn generate_trivial_layout(num_qubits: u32) -> Self { NLayout { - logic_to_phys: (0..num_qubits).collect(), - phys_to_logic: (0..num_qubits).collect(), + virt_to_phys: (0..num_qubits).map(PhysicalQubit).collect(), + phys_to_virt: (0..num_qubits).map(VirtualQubit).collect(), } } #[staticmethod] - pub fn from_logical_to_physical(logic_to_phys: Vec) -> Self { - let mut phys_to_logic = vec![std::usize::MAX; logic_to_phys.len()]; - for (logic, phys) in logic_to_phys.iter().enumerate() { - phys_to_logic[*phys] = logic; - } - NLayout { - logic_to_phys, - phys_to_logic, + pub fn from_virtual_to_physical(virt_to_phys: Vec) -> PyResult { + let mut phys_to_virt = vec![VirtualQubit(std::u32::MAX); virt_to_phys.len()]; + for (virt, phys) in virt_to_phys.iter().enumerate() { + phys_to_virt[phys.index()] = VirtualQubit(virt.try_into()?); } + Ok(NLayout { + virt_to_phys, + phys_to_virt, + }) + } +} + +impl NLayout { + /// Iterator of `(VirtualQubit, PhysicalQubit)` pairs, in order of the `VirtualQubit` indices. + pub fn iter_virtual( + &'_ self, + ) -> impl ExactSizeIterator + '_ { + self.virt_to_phys + .iter() + .enumerate() + .map(|(v, p)| (VirtualQubit::new(v as u32), *p)) + } + /// Iterator of `(PhysicalQubit, VirtualQubit)` pairs, in order of the `PhysicalQubit` indices. + pub fn iter_physical( + &'_ self, + ) -> impl ExactSizeIterator + '_ { + self.phys_to_virt + .iter() + .enumerate() + .map(|(p, v)| (PhysicalQubit::new(p as u32), *v)) } } diff --git a/crates/accelerate/src/sabre_layout.rs b/crates/accelerate/src/sabre_layout.rs index c347c2ae4980..799407e0adf7 100644 --- a/crates/accelerate/src/sabre_layout.rs +++ b/crates/accelerate/src/sabre_layout.rs @@ -21,7 +21,7 @@ use rand_pcg::Pcg64Mcg; use rayon::prelude::*; use crate::getenv_use_multiple_threads; -use crate::nlayout::NLayout; +use crate::nlayout::{NLayout, PhysicalQubit}; use crate::sabre_swap::neighbor_table::NeighborTable; use crate::sabre_swap::sabre_dag::SabreDAG; use crate::sabre_swap::swap_map::SwapMap; @@ -114,15 +114,16 @@ fn layout_trial( max_iterations: usize, num_swap_trials: usize, run_swap_in_parallel: bool, -) -> (NLayout, Vec, SabreResult) { - let num_physical_qubits = distance_matrix.shape()[0]; +) -> (NLayout, Vec, SabreResult) { + let num_physical_qubits: u32 = distance_matrix.shape()[0].try_into().unwrap(); let mut rng = Pcg64Mcg::seed_from_u64(seed); // Pick a random initial layout including a full ancilla allocation. let mut initial_layout = { - let mut physical_qubits: Vec = (0..num_physical_qubits).collect(); + let mut physical_qubits: Vec = + (0..num_physical_qubits).map(PhysicalQubit::new).collect(); physical_qubits.shuffle(&mut rng); - NLayout::from_logical_to_physical(physical_qubits) + NLayout::from_virtual_to_physical(physical_qubits).unwrap() }; // Sabre routing currently enforces that control-flow blocks return to their starting layout, @@ -175,9 +176,8 @@ fn layout_trial( Some(run_swap_in_parallel), ); let final_permutation = initial_layout - .phys_to_logic - .iter() - .map(|initial| final_layout.logic_to_phys[*initial]) + .iter_physical() + .map(|(_, virt)| virt.to_phys(&final_layout)) .collect(); (initial_layout, final_permutation, sabre_result) } diff --git a/crates/accelerate/src/sabre_swap/layer.rs b/crates/accelerate/src/sabre_swap/layer.rs index 5724f4137201..6c792b5364cd 100644 --- a/crates/accelerate/src/sabre_swap/layer.rs +++ b/crates/accelerate/src/sabre_swap/layer.rs @@ -15,56 +15,56 @@ use indexmap::IndexMap; use ndarray::prelude::*; use rustworkx_core::petgraph::prelude::*; -use crate::nlayout::NLayout; +use crate::nlayout::{NLayout, VirtualQubit}; /// A container for the current non-routable parts of the front layer. This only ever holds /// two-qubit gates; the only reason a 0q- or 1q operation can be unroutable is because it has an /// unsatisfied 2q predecessor, which disqualifies it from being in the front layer. pub struct FrontLayer { /// Map of the (index to the) node to the qubits it acts on. - nodes: IndexMap, + nodes: IndexMap, /// Map of each qubit to the node that acts on it and the other qubit that node acts on, if this /// qubit is active (otherwise `None`). - qubits: Vec>, + qubits: Vec>, } impl FrontLayer { - pub fn new(num_qubits: usize) -> Self { + pub fn new(num_qubits: u32) -> Self { FrontLayer { // This is the maximum capacity of the front layer, since each qubit must be one of a // pair, and can only have one gate in the layer. nodes: IndexMap::with_capacity_and_hasher( - num_qubits / 2, + num_qubits as usize / 2, ahash::RandomState::default(), ), - qubits: vec![None; num_qubits], + qubits: vec![None; num_qubits as usize], } } /// Add a node into the front layer, with the two qubits it operates on. - pub fn insert(&mut self, index: NodeIndex, qubits: [usize; 2]) { + pub fn insert(&mut self, index: NodeIndex, qubits: [VirtualQubit; 2]) { let [a, b] = qubits; - self.qubits[a] = Some((index, b)); - self.qubits[b] = Some((index, a)); + self.qubits[a.index()] = Some((index, b)); + self.qubits[b.index()] = Some((index, a)); self.nodes.insert(index, qubits); } /// Remove a node from the front layer. pub fn remove(&mut self, index: &NodeIndex) { let [q0, q1] = self.nodes.remove(index).unwrap(); - self.qubits[q0] = None; - self.qubits[q1] = None; + self.qubits[q0.index()] = None; + self.qubits[q1.index()] = None; } /// Query whether a qubit has an active node. #[inline] - pub fn is_active(&self, qubit: usize) -> bool { - self.qubits[qubit].is_some() + pub fn is_active(&self, qubit: VirtualQubit) -> bool { + self.qubits[qubit.index()].is_some() } /// Calculate the score _difference_ caused by this swap, compared to not making the swap. #[inline] - pub fn score(&self, swap: [usize; 2], layout: &NLayout, dist: &ArrayView2) -> f64 { + pub fn score(&self, swap: [VirtualQubit; 2], layout: &NLayout, dist: &ArrayView2) -> f64 { if self.is_empty() { return 0.0; } @@ -76,13 +76,15 @@ impl FrontLayer { // equal anyway, so not affect the score. let [a, b] = swap; let mut total = 0.0; - if let Some((_, c)) = self.qubits[a] { - let p_c = layout.logic_to_phys[c]; - total += dist[[layout.logic_to_phys[b], p_c]] - dist[[layout.logic_to_phys[a], p_c]] + if let Some((_, c)) = self.qubits[a.index()] { + let p_c = c.to_phys(layout); + total += dist[[b.to_phys(layout).index(), p_c.index()]] + - dist[[a.to_phys(layout).index(), p_c.index()]] } - if let Some((_, c)) = self.qubits[b] { - let p_c = layout.logic_to_phys[c]; - total += dist[[layout.logic_to_phys[a], p_c]] - dist[[layout.logic_to_phys[b], p_c]] + if let Some((_, c)) = self.qubits[b.index()] { + let p_c = c.to_phys(layout); + total += dist[[a.to_phys(layout).index(), p_c.index()]] + - dist[[b.to_phys(layout).index(), p_c.index()]] } total / self.nodes.len() as f64 } @@ -93,7 +95,7 @@ impl FrontLayer { return 0.0; } self.iter() - .map(|(_, &[l_a, l_b])| dist[[layout.logic_to_phys[l_a], layout.logic_to_phys[l_b]]]) + .map(|(_, &[a, b])| dist[[a.to_phys(layout).index(), b.to_phys(layout).index()]]) .sum::() / self.nodes.len() as f64 } @@ -103,23 +105,23 @@ impl FrontLayer { pub fn routable_after( &self, routable: &mut Vec, - swap: &[usize; 2], + swap: &[VirtualQubit; 2], layout: &NLayout, coupling: &DiGraph<(), ()>, ) { let [a, b] = *swap; - if let Some((node, c)) = self.qubits[a] { + if let Some((node, c)) = self.qubits[a.index()] { if coupling.contains_edge( - NodeIndex::new(layout.logic_to_phys[b]), - NodeIndex::new(layout.logic_to_phys[c]), + NodeIndex::new(b.to_phys(layout).index()), + NodeIndex::new(c.to_phys(layout).index()), ) { routable.push(node); } } - if let Some((node, c)) = self.qubits[b] { + if let Some((node, c)) = self.qubits[b.index()] { if coupling.contains_edge( - NodeIndex::new(layout.logic_to_phys[a]), - NodeIndex::new(layout.logic_to_phys[c]), + NodeIndex::new(a.to_phys(layout).index()), + NodeIndex::new(c.to_phys(layout).index()), ) { routable.push(node); } @@ -133,7 +135,7 @@ impl FrontLayer { } /// Iterator over the nodes and the pair of qubits they act on. - pub fn iter(&self) -> impl Iterator { + pub fn iter(&self) -> impl Iterator { self.nodes.iter() } @@ -143,7 +145,7 @@ impl FrontLayer { } /// Iterator over the qubits that have active nodes on them. - pub fn iter_active(&self) -> impl Iterator { + pub fn iter_active(&self) -> impl Iterator { self.nodes.values().flatten() } } @@ -152,24 +154,24 @@ impl FrontLayer { /// qubit. This does not have `remove` method (and its data structures aren't optimised for fast /// removal), since the extended set is built from scratch each time a new gate is routed. pub struct ExtendedSet { - nodes: IndexMap, - qubits: Vec>, + nodes: IndexMap, + qubits: Vec>, } impl ExtendedSet { - pub fn new(num_qubits: usize, max_size: usize) -> Self { + pub fn new(num_qubits: u32, max_size: usize) -> Self { ExtendedSet { nodes: IndexMap::with_capacity_and_hasher(max_size, ahash::RandomState::default()), - qubits: vec![Vec::new(); num_qubits], + qubits: vec![Vec::new(); num_qubits as usize], } } /// Add a node and its active qubits to the extended set. - pub fn insert(&mut self, index: NodeIndex, qubits: &[usize; 2]) -> bool { + pub fn insert(&mut self, index: NodeIndex, qubits: &[VirtualQubit; 2]) -> bool { let [a, b] = *qubits; if self.nodes.insert(index, *qubits).is_none() { - self.qubits[a].push(b); - self.qubits[b].push(a); + self.qubits[a.index()].push(b); + self.qubits[b.index()].push(a); true } else { false @@ -177,29 +179,29 @@ impl ExtendedSet { } /// Calculate the score of applying the given swap, relative to not applying it. - pub fn score(&self, swap: [usize; 2], layout: &NLayout, dist: &ArrayView2) -> f64 { + pub fn score(&self, swap: [VirtualQubit; 2], layout: &NLayout, dist: &ArrayView2) -> f64 { if self.nodes.is_empty() { return 0.0; } - let [l_a, l_b] = swap; - let p_a = layout.logic_to_phys[l_a]; - let p_b = layout.logic_to_phys[l_b]; + let [a, b] = swap; + let p_a = a.to_phys(layout); + let p_b = b.to_phys(layout); let mut total = 0.0; - for &l_other in self.qubits[l_a].iter() { + for other in self.qubits[a.index()].iter() { // If the other qubit is also active then the score won't have changed, but since the // distance is absolute, we'd double count rather than ignore if we didn't skip it. - if l_other == l_b { + if *other == b { continue; } - let p_other = layout.logic_to_phys[l_other]; - total += dist[[p_b, p_other]] - dist[[p_a, p_other]]; + let p_other = other.to_phys(layout); + total += dist[[p_b.index(), p_other.index()]] - dist[[p_a.index(), p_other.index()]]; } - for &l_other in self.qubits[l_b].iter() { - if l_other == l_a { + for other in self.qubits[b.index()].iter() { + if *other == a { continue; } - let p_other = layout.logic_to_phys[l_other]; - total += dist[[p_a, p_other]] - dist[[p_b, p_other]]; + let p_other = other.to_phys(layout); + total += dist[[p_a.index(), p_other.index()]] - dist[[p_b.index(), p_other.index()]]; } total / self.nodes.len() as f64 } @@ -211,7 +213,7 @@ impl ExtendedSet { } self.nodes .values() - .map(|&[l_a, l_b]| dist[[layout.logic_to_phys[l_a], layout.logic_to_phys[l_b]]]) + .map(|&[a, b]| dist[[a.to_phys(layout).index(), b.to_phys(layout).index()]]) .sum::() / self.nodes.len() as f64 } @@ -219,8 +221,8 @@ impl ExtendedSet { /// Clear all nodes from the extended set. pub fn clear(&mut self) { for &[a, b] in self.nodes.values() { - self.qubits[a].clear(); - self.qubits[b].clear(); + self.qubits[a.index()].clear(); + self.qubits[b.index()].clear(); } self.nodes.clear() } diff --git a/crates/accelerate/src/sabre_swap/mod.rs b/crates/accelerate/src/sabre_swap/mod.rs index 12c21cbd1ce1..792313699bfa 100644 --- a/crates/accelerate/src/sabre_swap/mod.rs +++ b/crates/accelerate/src/sabre_swap/mod.rs @@ -38,7 +38,7 @@ use rustworkx_core::token_swapper::token_swapper; use std::cmp::Ordering; use crate::getenv_use_multiple_threads; -use crate::nlayout::NLayout; +use crate::nlayout::{NLayout, PhysicalQubit, VirtualQubit}; use layer::{ExtendedSet, FrontLayer}; use neighbor_table::NeighborTable; @@ -120,7 +120,7 @@ impl NodeBlockResults { pub struct BlockResult { #[pyo3(get)] pub result: SabreResult, - pub swap_epilogue: Vec<[usize; 2]>, + pub swap_epilogue: Vec<[VirtualQubit; 2]>, } #[pymethods] @@ -146,14 +146,14 @@ fn obtain_swaps<'a>( front_layer: &'a FrontLayer, neighbors: &'a NeighborTable, layout: &'a NLayout, -) -> impl Iterator + 'a { +) -> impl Iterator + 'a { front_layer.iter_active().flat_map(move |&v| { - neighbors.neighbors[layout.logic_to_phys[v]] + neighbors[v.to_phys(layout)] .iter() - .filter_map(move |&neighbor| { - let virtual_neighbor = layout.phys_to_logic[neighbor]; - if virtual_neighbor > v || !front_layer.is_active(virtual_neighbor) { - Some([v, virtual_neighbor]) + .filter_map(move |p_neighbor| { + let neighbor = p_neighbor.to_virt(layout); + if neighbor > v || !front_layer.is_active(neighbor) { + Some([v, neighbor]) } else { None } @@ -196,16 +196,6 @@ fn populate_extended_set( } } -fn cmap_from_neighor_table(neighbor_table: &NeighborTable) -> DiGraph<(), ()> { - DiGraph::<(), ()>::from_edges(neighbor_table.neighbors.iter().enumerate().flat_map( - |(u, targets)| { - targets - .iter() - .map(move |v| (NodeIndex::new(u), NodeIndex::new(*v))) - }, - )) -} - /// Run sabre swap on a circuit /// /// Returns: @@ -218,7 +208,7 @@ fn cmap_from_neighor_table(neighbor_table: &NeighborTable) -> DiGraph<(), ()> { #[pyfunction] pub fn build_swap_map( py: Python, - num_qubits: usize, + num_qubits: u32, dag: &SabreDAG, neighbor_table: &NeighborTable, distance_matrix: PyReadonlyArray2, @@ -246,17 +236,18 @@ pub fn build_swap_map( res.node_block_results, PyArray::from_iter( py, - initial_layout - .phys_to_logic - .iter() - .map(|initial| final_layout.logic_to_phys[*initial]), + (0..num_qubits).map(|phys| { + PhysicalQubit::new(phys) + .to_virt(initial_layout) + .to_phys(&final_layout) + }), ) .into(), ) } pub fn build_swap_map_inner( - num_qubits: usize, + num_qubits: u32, dag: &SabreDAG, neighbor_table: &NeighborTable, dist: &ArrayView2, @@ -270,7 +261,7 @@ pub fn build_swap_map_inner( Some(run_in_parallel) => run_in_parallel, None => getenv_use_multiple_threads() && num_trials > 1, }; - let coupling_graph: DiGraph<(), ()> = cmap_from_neighor_table(neighbor_table); + let coupling_graph = neighbor_table.coupling_graph(); let outer_rng = match seed { Some(seed) => Pcg64Mcg::seed_from_u64(seed), None => Pcg64Mcg::from_entropy(), @@ -327,7 +318,7 @@ pub fn build_swap_map_inner( } fn swap_map_trial( - num_qubits: usize, + num_qubits: u32, dag: &SabreDAG, neighbor_table: &NeighborTable, dist: &ArrayView2, @@ -336,15 +327,15 @@ fn swap_map_trial( seed: u64, initial_layout: &NLayout, ) -> (SabreResult, NLayout) { - let max_iterations_without_progress = 10 * neighbor_table.neighbors.len(); - let mut out_map: HashMap> = HashMap::new(); + let max_iterations_without_progress = 10 * num_qubits as usize; + let mut out_map: HashMap> = HashMap::new(); let mut gate_order = Vec::with_capacity(dag.dag.node_count()); let mut front_layer = FrontLayer::new(num_qubits); let mut extended_set = ExtendedSet::new(num_qubits, EXTENDED_SET_SIZE); let mut required_predecessors: Vec = vec![0; dag.dag.node_count()]; let mut layout = initial_layout.clone(); let mut num_search_steps: u8 = 0; - let mut qubits_decay: Vec = vec![1.; num_qubits]; + let mut qubits_decay: Vec = vec![1.; num_qubits as usize]; let mut rng = Pcg64Mcg::seed_from_u64(seed); let mut node_block_results: HashMap> = HashMap::with_capacity(dag.node_blocks.len()); @@ -393,7 +384,7 @@ fn swap_map_trial( // each iteration of this loop, we route either one or two gates. let mut routable_nodes = Vec::::with_capacity(2); while !front_layer.is_empty() { - let mut current_swaps: Vec<[usize; 2]> = Vec::new(); + let mut current_swaps: Vec<[VirtualQubit; 2]> = Vec::new(); // Swap-mapping loop. This is the main part of the algorithm, which we repeat until we // either successfully route a node, or exceed the maximum number of attempts. while routable_nodes.is_empty() && current_swaps.len() <= max_iterations_without_progress { @@ -409,14 +400,14 @@ fn swap_map_trial( ); front_layer.routable_after(&mut routable_nodes, &best_swap, &layout, coupling_graph); current_swaps.push(best_swap); - layout.swap_logical(best_swap[0], best_swap[1]); + layout.swap_virtual(best_swap[0], best_swap[1]); num_search_steps += 1; if num_search_steps >= DECAY_RESET_INTERVAL { qubits_decay.fill(1.); num_search_steps = 0; } else { - qubits_decay[best_swap[0]] += DECAY_RATE; - qubits_decay[best_swap[1]] += DECAY_RATE; + qubits_decay[best_swap[0].index()] += DECAY_RATE; + qubits_decay[best_swap[1].index()] += DECAY_RATE; } } // If we exceeded the number of allowed attempts without successfully routing a node, we @@ -431,7 +422,7 @@ fn swap_map_trial( let (node, qubits) = closest_operation(&front_layer, &layout, dist); swaps_to_route(&mut current_swaps, &qubits, &layout, coupling_graph); for &[a, b] in current_swaps.iter() { - layout.swap_logical(a, b); + layout.swap_virtual(a, b); } routable_nodes.push(node); } @@ -472,12 +463,12 @@ fn swap_map_trial( fn update_route( seed: u64, nodes: &[NodeIndex], - swaps: Vec<[usize; 2]>, + swaps: Vec<[VirtualQubit; 2]>, dag: &SabreDAG, layout: &NLayout, coupling: &DiGraph<(), ()>, gate_order: &mut Vec, - out_map: &mut HashMap>, + out_map: &mut HashMap>, front_layer: &mut FrontLayer, extended_set: &mut ExtendedSet, required_predecessors: &mut [u32], @@ -516,16 +507,14 @@ fn gen_swap_epilogue( mut from_layout: NLayout, to_layout: &NLayout, seed: u64, -) -> Vec<[usize; 2]> { +) -> Vec<[VirtualQubit; 2]> { // Map physical location in from_layout to physical location in to_layout let mapping: HashMap = from_layout - .logic_to_phys - .iter() - .enumerate() - .map(|(v, p)| { + .iter_physical() + .map(|(p, v)| { ( - NodeIndex::new(*p), - NodeIndex::new(to_layout.logic_to_phys[v]), + NodeIndex::new(p.index()), + NodeIndex::new(v.to_phys(to_layout).index()), ) }) .collect(); @@ -542,11 +531,10 @@ fn gen_swap_epilogue( swaps .into_iter() .map(|(l, r)| { - let ret = [ - from_layout.phys_to_logic[l.index()], - from_layout.phys_to_logic[r.index()], - ]; - from_layout.swap_physical(l.index(), r.index()); + let p_l = PhysicalQubit::new(l.index().try_into().unwrap()); + let p_r = PhysicalQubit::new(r.index().try_into().unwrap()); + let ret = [p_l.to_virt(&from_layout), p_r.to_virt(&from_layout)]; + from_layout.swap_physical(p_l, p_r); ret }) .collect() @@ -602,8 +590,8 @@ fn route_reachable_nodes( // placed in the gate order. [a, b] if !coupling.contains_edge( - NodeIndex::new(layout.logic_to_phys[a]), - NodeIndex::new(layout.logic_to_phys[b]), + NodeIndex::new(a.to_phys(layout).index()), + NodeIndex::new(b.to_phys(layout).index()), ) => { // 2Q op that cannot be placed. Add it to the front layer @@ -628,11 +616,11 @@ fn route_reachable_nodes( } /// Walk through the swaps in the given vector, undoing them on the layout and removing them. -fn undo_swaps(swaps: &mut Vec<[usize; 2]>, layout: &mut NLayout) { +fn undo_swaps(swaps: &mut Vec<[VirtualQubit; 2]>, layout: &mut NLayout) { swaps .drain(..) .rev() - .for_each(|swap| layout.swap_logical(swap[0], swap[1])); + .for_each(|swap| layout.swap_virtual(swap[0], swap[1])); } /// Find the node index and its associated virtual qubits that is currently the closest to being @@ -641,68 +629,52 @@ fn closest_operation( front_layer: &FrontLayer, layout: &NLayout, dist: &ArrayView2, -) -> (NodeIndex, [usize; 2]) { +) -> (NodeIndex, [VirtualQubit; 2]) { let (&node, qubits) = front_layer .iter() - .map(|(node, qubits)| { - ( - node, - [ - layout.logic_to_phys[qubits[0]], - layout.logic_to_phys[qubits[1]], - ], - ) - }) + .map(|(node, qubits)| (node, [qubits[0].to_phys(layout), qubits[1].to_phys(layout)])) .min_by(|(_, qubits_a), (_, qubits_b)| { - dist[*qubits_a] - .partial_cmp(&dist[*qubits_b]) + dist[[qubits_a[0].index(), qubits_a[1].index()]] + .partial_cmp(&dist[[qubits_b[0].index(), qubits_b[1].index()]]) .unwrap_or(Ordering::Equal) }) .unwrap(); - ( - node, - [ - layout.phys_to_logic[qubits[0]], - layout.phys_to_logic[qubits[1]], - ], - ) + (node, [qubits[0].to_virt(layout), qubits[1].to_virt(layout)]) } /// Add the minimal set of swaps to the `swaps` vector that bring the two `qubits` together so that /// a 2q gate on them could be routed. fn swaps_to_route( - swaps: &mut Vec<[usize; 2]>, - qubits: &[usize; 2], + swaps: &mut Vec<[VirtualQubit; 2]>, + qubits: &[VirtualQubit; 2], layout: &NLayout, coupling_graph: &DiGraph<(), ()>, ) { let mut shortest_paths: DictMap> = DictMap::new(); - let u = layout.logic_to_phys[qubits[0]]; - let v = layout.logic_to_phys[qubits[1]]; (dijkstra( coupling_graph, - NodeIndex::::new(u), - Some(NodeIndex::::new(v)), + NodeIndex::new(qubits[0].to_phys(layout).index()), + Some(NodeIndex::new(qubits[1].to_phys(layout).index())), |_| Ok(1.), Some(&mut shortest_paths), ) as PyResult>>) .unwrap(); - let shortest_path: Vec = shortest_paths - .get(&NodeIndex::new(v)) + let shortest_path = shortest_paths + .get(&NodeIndex::new(qubits[1].to_phys(layout).index())) .unwrap() .iter() - .map(|n| n.index()) - .collect(); + .map(|n| PhysicalQubit::new(n.index() as u32)) + .collect::>(); // Insert greedy swaps along that shortest path let split: usize = shortest_path.len() / 2; let forwards = &shortest_path[1..split]; let backwards = &shortest_path[split..shortest_path.len() - 1]; swaps.reserve(shortest_path.len() - 2); for swap in forwards { - swaps.push([qubits[0], layout.phys_to_logic[*swap]]); + swaps.push([qubits[0], swap.to_virt(layout)]); } for swap in backwards.iter().rev() { - swaps.push([qubits[1], layout.phys_to_logic[*swap]]); + swaps.push([qubits[1], swap.to_virt(layout)]); } } @@ -716,9 +688,9 @@ fn choose_best_swap( qubits_decay: &[f64], heuristic: &Heuristic, rng: &mut Pcg64Mcg, -) -> [usize; 2] { +) -> [VirtualQubit; 2] { let mut min_score = f64::MAX; - let mut best_swaps: Vec<[usize; 2]> = Vec::new(); + let mut best_swaps: Vec<[VirtualQubit; 2]> = Vec::new(); // The decay heuristic is the only one that actually needs the absolute score. let absolute_score = match heuristic { Heuristic::Decay => { @@ -735,7 +707,7 @@ fn choose_best_swap( + EXTENDED_SET_WEIGHT * extended_set.score(swap, layout, dist) } Heuristic::Decay => { - qubits_decay[swap[0]].max(qubits_decay[swap[1]]) + qubits_decay[swap[0].index()].max(qubits_decay[swap[1].index()]) * (absolute_score + layer.score(swap, layout, dist) + EXTENDED_SET_WEIGHT * extended_set.score(swap, layout, dist)) diff --git a/crates/accelerate/src/sabre_swap/neighbor_table.rs b/crates/accelerate/src/sabre_swap/neighbor_table.rs index 528d90a4c8cf..b0e12c655140 100644 --- a/crates/accelerate/src/sabre_swap/neighbor_table.rs +++ b/crates/accelerate/src/sabre_swap/neighbor_table.rs @@ -15,6 +15,9 @@ use ndarray::prelude::*; use numpy::PyReadonlyArray2; use pyo3::prelude::*; use rayon::prelude::*; +use rustworkx_core::petgraph::prelude::*; + +use crate::nlayout::PhysicalQubit; /// A simple container that contains a vector of vectors representing /// neighbors of each node in the coupling map @@ -29,30 +32,51 @@ use rayon::prelude::*; #[pyclass(module = "qiskit._accelerate.sabre_swap")] #[derive(Clone, Debug)] pub struct NeighborTable { - pub neighbors: Vec>, + neighbors: Vec>, +} + +impl NeighborTable { + /// Regenerate a Rust-space coupling graph from the table. + pub fn coupling_graph(&self) -> DiGraph<(), ()> { + DiGraph::from_edges(self.neighbors.iter().enumerate().flat_map(|(u, targets)| { + targets + .iter() + .map(move |v| (NodeIndex::new(u), NodeIndex::new(v.index()))) + })) + } +} + +impl std::ops::Index for NeighborTable { + type Output = [PhysicalQubit]; + + fn index(&self, index: PhysicalQubit) -> &Self::Output { + &self.neighbors[index.index()] + } } + #[pymethods] impl NeighborTable { #[new] #[pyo3(text_signature = "(/, adjacency_matrix=None)")] - pub fn new(adjacency_matrix: Option>) -> Self { + pub fn new(adjacency_matrix: Option>) -> PyResult { let run_in_parallel = getenv_use_multiple_threads(); let neighbors = match adjacency_matrix { Some(adjacency_matrix) => { let adj_mat = adjacency_matrix.as_array(); - let build_neighbors = |row: ArrayView1| -> Vec { + let build_neighbors = |row: ArrayView1| -> PyResult> { row.iter() .enumerate() - .filter_map( - |(row_index, value)| { - if *value == 0. { - None - } else { - Some(row_index) - } - }, - ) + .filter_map(|(row_index, value)| { + if *value == 0. { + None + } else { + Some(match row_index.try_into() { + Ok(index) => Ok(PhysicalQubit::new(index)), + Err(err) => Err(err.into()), + }) + } + }) .collect() }; if run_in_parallel { @@ -60,24 +84,24 @@ impl NeighborTable { .axis_iter(Axis(0)) .into_par_iter() .map(|row| build_neighbors(row)) - .collect() + .collect::>()? } else { adj_mat .axis_iter(Axis(0)) .map(|row| build_neighbors(row)) - .collect() + .collect::>()? } } None => Vec::new(), }; - NeighborTable { neighbors } + Ok(NeighborTable { neighbors }) } - fn __getstate__(&self) -> Vec> { + fn __getstate__(&self) -> Vec> { self.neighbors.clone() } - fn __setstate__(&mut self, state: Vec>) { + fn __setstate__(&mut self, state: Vec>) { self.neighbors = state } } diff --git a/crates/accelerate/src/sabre_swap/sabre_dag.rs b/crates/accelerate/src/sabre_swap/sabre_dag.rs index 945ddfd7778a..d511a789c59c 100644 --- a/crates/accelerate/src/sabre_swap/sabre_dag.rs +++ b/crates/accelerate/src/sabre_swap/sabre_dag.rs @@ -15,11 +15,13 @@ use hashbrown::HashSet; use pyo3::prelude::*; use rustworkx_core::petgraph::prelude::*; +use crate::nlayout::VirtualQubit; + /// Named access to the node elements in the [SabreDAG]. #[derive(Clone, Debug)] pub struct DAGNode { pub py_node_id: usize, - pub qubits: Vec, + pub qubits: Vec, } /// A DAG representation of the logical circuit to be routed. This represents the same dataflow @@ -38,7 +40,7 @@ pub struct SabreDAG { pub num_clbits: usize, pub dag: DiGraph, pub first_layer: Vec, - pub nodes: Vec<(usize, Vec, HashSet)>, + pub nodes: Vec<(usize, Vec, HashSet)>, pub node_blocks: HashMap>, } @@ -49,7 +51,7 @@ impl SabreDAG { pub fn new( num_qubits: usize, num_clbits: usize, - nodes: Vec<(usize, Vec, HashSet)>, + nodes: Vec<(usize, Vec, HashSet)>, node_blocks: HashMap>, ) -> Self { let mut qubit_pos: Vec> = vec![None; num_qubits]; @@ -65,11 +67,11 @@ impl SabreDAG { }); let mut is_front = true; for x in qargs { - if let Some(predecessor) = qubit_pos[*x] { + if let Some(predecessor) = qubit_pos[x.index()] { is_front = false; dag.add_edge(predecessor, gate_index, ()); } - qubit_pos[*x] = Some(gate_index); + qubit_pos[x.index()] = Some(gate_index); } for x in cargs { if let Some(predecessor) = clbit_pos[*x] { diff --git a/crates/accelerate/src/sabre_swap/swap_map.rs b/crates/accelerate/src/sabre_swap/swap_map.rs index 2b6c78b57b9e..eafecaf1d189 100644 --- a/crates/accelerate/src/sabre_swap/swap_map.rs +++ b/crates/accelerate/src/sabre_swap/swap_map.rs @@ -14,11 +14,13 @@ use hashbrown::HashMap; use pyo3::exceptions::PyIndexError; use pyo3::prelude::*; +use crate::nlayout::VirtualQubit; + /// A container for required swaps before a gate qubit #[pyclass(module = "qiskit._accelerate.sabre_swap")] #[derive(Clone, Debug)] pub struct SwapMap { - pub map: HashMap>, + pub map: HashMap>, } #[pymethods] @@ -32,7 +34,7 @@ impl SwapMap { self.map.contains_key(&object) } - pub fn __getitem__(&self, object: usize) -> PyResult> { + pub fn __getitem__(&self, object: usize) -> PyResult> { match self.map.get(&object) { Some(val) => Ok(val.clone()), None => Err(PyIndexError::new_err(format!( diff --git a/crates/accelerate/src/stochastic_swap.rs b/crates/accelerate/src/stochastic_swap.rs index e6078933ca72..6d5ed693be42 100644 --- a/crates/accelerate/src/stochastic_swap.rs +++ b/crates/accelerate/src/stochastic_swap.rs @@ -33,20 +33,21 @@ use rand_pcg::Pcg64Mcg; use crate::edge_collections::EdgeCollection; use crate::getenv_use_multiple_threads; -use crate::nlayout::NLayout; +use crate::nlayout::{NLayout, PhysicalQubit, VirtualQubit}; #[inline] fn compute_cost( dist: &ArrayView2, layout: &NLayout, - gates: &[usize], + gates: &[VirtualQubit], num_gates: usize, ) -> f64 { (0..num_gates) - .map(|kk| { - let ii = layout.logic_to_phys[gates[2 * kk]]; - let jj = layout.logic_to_phys[gates[2 * kk + 1]]; - dist[[ii, jj]] + .map(|gate| { + dist[[ + gates[2 * gate].to_phys(layout).index(), + gates[2 * gate + 1].to_phys(layout).index(), + ]] }) .sum() } @@ -79,11 +80,11 @@ fn compute_random_scaling( fn swap_trial( num_qubits: usize, int_layout: &NLayout, - int_qubit_subset: &[usize], - gates: &[usize], + int_qubit_subset: &[VirtualQubit], + gates: &[VirtualQubit], cdist: ArrayView2, cdist2: ArrayView2, - edges: &[usize], + edges: &[PhysicalQubit], seed: u64, trial_num: u64, locked_best_possible: Option<&RwLock<&mut Option<(u64, f64, EdgeCollection, NLayout)>>>, @@ -112,10 +113,10 @@ fn swap_trial( let mut new_cost: f64; let mut dist: f64; - let mut optimal_start: usize = std::usize::MAX; - let mut optimal_end: usize = std::usize::MAX; - let mut optimal_start_qubit = std::usize::MAX; - let mut optimal_end_qubit = std::usize::MAX; + let mut optimal_start = PhysicalQubit::new(std::u32::MAX); + let mut optimal_end = PhysicalQubit::new(std::u32::MAX); + let mut optimal_start_qubit = VirtualQubit::new(std::u32::MAX); + let mut optimal_end_qubit = VirtualQubit::new(std::u32::MAX); let mut scale = Array2::zeros((num_qubits, num_qubits)); @@ -128,7 +129,7 @@ fn swap_trial( compute_random_scaling(&mut scale, &cdist2, &rand_arr, num_qubits); - let input_qubit_set: HashSet = int_qubit_subset.iter().copied().collect(); + let input_qubit_set = int_qubit_subset.iter().copied().collect::>(); while depth_step < depth_max { let mut qubit_set = input_qubit_set.clone(); @@ -139,11 +140,11 @@ fn swap_trial( for idx in 0..num_edges { let start_edge = edges[2 * idx]; let end_edge = edges[2 * idx + 1]; - let start_qubit = trial_layout.phys_to_logic[start_edge]; - let end_qubit = trial_layout.phys_to_logic[end_edge]; + let start_qubit = start_edge.to_virt(&trial_layout); + let end_qubit = end_edge.to_virt(&trial_layout); if qubit_set.contains(&start_qubit) && qubit_set.contains(&end_qubit) { // Try this edge to reduce cost - trial_layout.swap(start_edge, end_edge); + trial_layout.swap_physical(start_edge, end_edge); // compute objective function new_cost = compute_cost(&scale.view(), &trial_layout, gates, num_gates); // record progress if we succeed @@ -156,7 +157,7 @@ fn swap_trial( optimal_start_qubit = start_qubit; optimal_end_qubit = end_qubit; } - trial_layout.swap(start_edge, end_edge); + trial_layout.swap_physical(start_edge, end_edge); } } // After going over all edges @@ -242,11 +243,11 @@ pub fn swap_trials( num_trials: u64, num_qubits: usize, int_layout: &NLayout, - int_qubit_subset: PyReadonlyArray1, - int_gates: PyReadonlyArray1, + int_qubit_subset: PyReadonlyArray1, + int_gates: PyReadonlyArray1, cdist: PyReadonlyArray2, cdist2: PyReadonlyArray2, - edges: PyReadonlyArray1, + edges: PyReadonlyArray1, seed: Option, ) -> PyResult<(Option, Option, usize)> { let int_qubit_subset_arr = int_qubit_subset.as_slice()?; diff --git a/crates/accelerate/src/vf2_layout.rs b/crates/accelerate/src/vf2_layout.rs index dbac7e4ab9b7..65817f4ac477 100644 --- a/crates/accelerate/src/vf2_layout.rs +++ b/crates/accelerate/src/vf2_layout.rs @@ -18,7 +18,7 @@ use pyo3::wrap_pyfunction; use rayon::prelude::*; use crate::error_map::ErrorMap; -use crate::nlayout::NLayout; +use crate::nlayout::{NLayout, VirtualQubit}; const PARALLEL_THRESHOLD: usize = 50; @@ -29,23 +29,21 @@ const PARALLEL_THRESHOLD: usize = 50; )] pub fn score_layout( bit_list: PyReadonlyArray1, - edge_list: IndexMap<[usize; 2], i32>, + edge_list: IndexMap<[VirtualQubit; 2], i32>, error_map: &ErrorMap, layout: &NLayout, strict_direction: bool, run_in_parallel: bool, ) -> PyResult { let bit_counts = bit_list.as_slice()?; - let edge_filter_map = |(index_arr, gate_count): (&[usize; 2], &i32)| -> Option { - let mut error = error_map.error_map.get(&[ - layout.logic_to_phys[index_arr[0]], - layout.logic_to_phys[index_arr[1]], - ]); + let edge_filter_map = |(index_arr, gate_count): (&[VirtualQubit; 2], &i32)| -> Option { + let mut error = error_map + .error_map + .get(&[index_arr[0].to_phys(layout), index_arr[1].to_phys(layout)]); if !strict_direction && error.is_none() { - error = error_map.error_map.get(&[ - layout.logic_to_phys[index_arr[1]], - layout.logic_to_phys[index_arr[0]], - ]); + error = error_map + .error_map + .get(&[index_arr[1].to_phys(layout), index_arr[0].to_phys(layout)]); } error.map(|error| { if !error.is_nan() { @@ -55,9 +53,9 @@ pub fn score_layout( } }) }; - let bit_filter_map = |(index, gate_counts): (usize, &i32)| -> Option { - let bit_index = layout.logic_to_phys[index]; - let error = error_map.error_map.get(&[bit_index, bit_index]); + let bit_filter_map = |(v_bit_index, gate_counts): (usize, &i32)| -> Option { + let p_bit = VirtualQubit::new(v_bit_index.try_into().unwrap()).to_phys(layout); + let error = error_map.error_map.get(&[p_bit, p_bit]); error.map(|error| { if !error.is_nan() { diff --git a/qiskit/transpiler/passes/routing/sabre_swap.py b/qiskit/transpiler/passes/routing/sabre_swap.py index 1f7aed674a4c..77a47e068b88 100644 --- a/qiskit/transpiler/passes/routing/sabre_swap.py +++ b/qiskit/transpiler/passes/routing/sabre_swap.py @@ -360,10 +360,10 @@ def apply_swaps(dest_dag, swaps, layout): # outermost DAG, since the scope binding occurred as the `SabreDAG` objects were built # up; they're all provided to Sabre routing as full-width already. qubits = ( - physical_qubits[layout.logical_to_physical(a)], - physical_qubits[layout.logical_to_physical(b)], + physical_qubits[layout.virtual_to_physical(a)], + physical_qubits[layout.virtual_to_physical(b)], ) - layout.swap_logical(a, b) + layout.swap_virtual(a, b) dest_dag.apply_operation_back(SwapGate(), qubits, ()) def recurse(dest_dag, source_dag, result, root_logical_map, layout): @@ -380,7 +380,7 @@ def recurse(dest_dag, source_dag, result, root_logical_map, layout): dest_dag.apply_operation_back( node.op, [ - physical_qubits[layout.logical_to_physical(root_logical_map[q])] + physical_qubits[layout.virtual_to_physical(root_logical_map[q])] for q in node.qargs ], node.cargs, diff --git a/qiskit/transpiler/passes/routing/stochastic_swap.py b/qiskit/transpiler/passes/routing/stochastic_swap.py index 5bc7c97f31f7..81304b4b4e4b 100644 --- a/qiskit/transpiler/passes/routing/stochastic_swap.py +++ b/qiskit/transpiler/passes/routing/stochastic_swap.py @@ -202,13 +202,13 @@ def _layer_permutation(self, dag, layer_partition, layout, qubit_subset, couplin cdist2 = coupling._dist_matrix**2 int_qubit_subset = np.fromiter( (dag.find_bit(bit).index for bit in qubit_subset), - dtype=np.uintp, + dtype=np.uint32, count=len(qubit_subset), ) int_gates = np.fromiter( (dag.find_bit(bit).index for gate in gates for bit in gate), - dtype=np.uintp, + dtype=np.uint32, count=2 * len(gates), ) @@ -218,7 +218,7 @@ def _layer_permutation(self, dag, layer_partition, layout, qubit_subset, couplin trial_circuit = DAGCircuit() # SWAP circuit for slice of swaps in this trial trial_circuit.add_qubits(layout.get_virtual_bits()) - edges = np.asarray(coupling.get_edges(), dtype=np.uintp).ravel() + edges = np.asarray(coupling.get_edges(), dtype=np.uint32).ravel() cdist = coupling._dist_matrix best_edges, best_layout, best_depth = stochastic_swap_rs.swap_trials( trials,