Skip to content

Commit 524fc74

Browse files
committed
Skip decomposer construction in consolidate blocks with force_consolidate
In the recently merged Qiskit#16075 a default decomposer was added to the consolidate block pass if the `force_consolidate` flag was set. This was intended to enable running the pass in the absence of the options required to configure a decomposer. If the force consolidate flag is set we don't need a two qubit decomposer configured since we're always going to consolidate a 2q block of > 1 gate. However, that fix was incomplete because we don't want to use a decomposer if force_consolidate is set to True. The 2q decomposer is used to compute the number of basis gates for a 2q decomposition of the unitary based on the unitary's coordinates in the weyl chamber. This basis gate count is then used as a heuristic in the pass on whether to consolidate the block to a UnitaryGate or not. Specifically, if the number of basis gates for the weyl coordinates is less than the number of basis gates in the block then we consolidate the block into a `UnitaryGate`. With `force_consolidate=True` all of this work, including the initialization of the decomposers (which isn't free), is wasted since we know we're going to consolidate. This commit adjusts the logic in the pass so that the decomposer is set to None when `force_consolidate=True` and the pass skips all usage of the decomposer internally when force_consolidate is set to True. Internally the rust code of the pass isn't handling the interface as ideally as I would like. The rust interface I'd like is to define the decomposer interface as something like: ```rust enum DecomposerInput { ForceConsolidate, Decomposer(DecomposerType), } ``` and removing the dedicated force_consolidate argument to make the use of a decomposer and force consolidate mutally exclusive. However, this isn't compatible with the pyo3 boundary layer. Since the inner function of consolidate blocks is oddly the `py_` function to avoid a larger refactor this opted to just make the decomposer arg an `Option<DecomposerType>` and do a runtime check around the value and `force_consolidate`. This has some additional overhead but in practice it's negligible.
1 parent 56beeb8 commit 524fc74

2 files changed

Lines changed: 62 additions & 30 deletions

File tree

crates/transpiler/src/passes/consolidate_blocks.rs

Lines changed: 51 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -223,7 +223,7 @@ impl PhysQargsMap {
223223
#[pyo3(name = "consolidate_blocks", signature = (dag, decomposer, basis_gate_name, force_consolidate, target=None, basis_gates=None, blocks=None, runs=None, qubit_map=None))]
224224
fn py_run_consolidate_blocks(
225225
dag: &mut DAGCircuit,
226-
decomposer: DecomposerType,
226+
decomposer: Option<DecomposerType>,
227227
basis_gate_name: &str,
228228
force_consolidate: bool,
229229
target: Option<&Target>,
@@ -232,6 +232,11 @@ fn py_run_consolidate_blocks(
232232
runs: Option<Vec<Vec<usize>>>,
233233
qubit_map: Option<Vec<PhysicalQubit>>,
234234
) -> PyResult<()> {
235+
// If we don't have a decomposer and force consolidate is not set then there is not any
236+
// consolidation to do.
237+
if decomposer.is_none() && !force_consolidate {
238+
return Ok(());
239+
}
235240
// The node indices that enter from `blocks` and `runs` come from Python space, and we can't
236241
// trust that they come from a correct analysis (or the block/run collection might have been
237242
// invalidated). Rather than panicking, we should raise Python-space exceptions. We don't have
@@ -408,14 +413,23 @@ fn py_run_consolidate_blocks(
408413
];
409414
let matrix = blocks_to_matrix(dag, &block, block_index_map).ok();
410415
if let Some(matrix) = matrix {
411-
let num_basis_gates = match decomposer {
412-
DecomposerType::TwoQubitBasis(ref decomp) => decomp.num_basis_gates_inner(
413-
nalgebra_array_view::<Complex64, U4, U4>(matrix.as_view()),
414-
)?,
415-
DecomposerType::TwoQubitControlledU(ref decomp) => decomp
416-
.num_basis_gates_inner(nalgebra_array_view::<Complex64, U4, U4>(
417-
matrix.as_view(),
418-
))?,
416+
let num_basis_gates = if let Some(ref decomposer) = decomposer {
417+
match decomposer {
418+
DecomposerType::TwoQubitBasis(decomp) => decomp.num_basis_gates_inner(
419+
nalgebra_array_view::<Complex64, U4, U4>(matrix.as_view()),
420+
)?,
421+
DecomposerType::TwoQubitControlledU(decomp) => decomp
422+
.num_basis_gates_inner(nalgebra_array_view::<Complex64, U4, U4>(
423+
matrix.as_view(),
424+
))?,
425+
}
426+
} else {
427+
// If we don't have a decomposer set we have force_consolidate set.
428+
// This value doesn't matter since we're always going to consolidate,
429+
// but usize::MAX is selected as a safeguard against a logic bug since
430+
// with that value we'll only consolidate if force_consolidate is true
431+
// in the absence of a decomposer.
432+
usize::MAX
419433
};
420434

421435
if force_consolidate
@@ -559,19 +573,34 @@ pub fn run_consolidate_blocks(
559573
target: Option<&Target>,
560574
) -> PyResult<()> {
561575
let approximation_degree = approximation_degree.unwrap_or(1.0);
562-
let (decomposer, basis_gate) = get_decomposer_and_basis_gate(target, approximation_degree);
563-
py_run_consolidate_blocks(
564-
dag,
565-
decomposer,
566-
basis_gate.name(),
567-
force_consolidate,
568-
target,
569-
None,
570-
None,
571-
None,
572-
// TODO: this doesn't handle the possibility of control-flow operations yet.
573-
None,
574-
)
576+
if force_consolidate {
577+
py_run_consolidate_blocks(
578+
dag,
579+
None,
580+
"cx",
581+
force_consolidate,
582+
target,
583+
None,
584+
None,
585+
None,
586+
// TODO: this doesn't handle the possibility of control-flow operations yet.
587+
None,
588+
)
589+
} else {
590+
let (decomposer, basis_gate) = get_decomposer_and_basis_gate(target, approximation_degree);
591+
py_run_consolidate_blocks(
592+
dag,
593+
Some(decomposer),
594+
basis_gate.name(),
595+
force_consolidate,
596+
target,
597+
None,
598+
None,
599+
None,
600+
// TODO: this doesn't handle the possibility of control-flow operations yet.
601+
None,
602+
)
603+
}
575604
}
576605

577606
pub fn consolidate_blocks_mod(m: &Bound<PyModule>) -> PyResult<()> {

qiskit/transpiler/passes/optimization/consolidate_blocks.py

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -123,24 +123,23 @@ def __init__(
123123
basis_fidelity=approximation_degree or 1.0,
124124
)
125125
self.basis_gate_name = next(iter(kak_gates))
126-
elif force_consolidate:
127-
# if we haven't found a decomposer but consolidation is forced,
128-
# pick a default
129-
self.decomposer = TwoQubitBasisDecomposer(CXGate())
130-
self.basis_gate_name = "cx"
131126
else:
132127
self.decomposer = None
133-
else:
128+
self.basis_gate_name = "cx"
129+
elif not force_consolidate:
134130
self.decomposer = TwoQubitBasisDecomposer(CXGate())
135131
self.basis_gate_name = "cx"
132+
else:
133+
self.decomposer = None
134+
self.basis_gate_name = "cx"
136135

137136
def run(self, dag):
138137
"""Run the ConsolidateBlocks pass on `dag`.
139138
140139
Iterate over each block and replace it with an equivalent Unitary
141140
on the same wires.
142141
"""
143-
if self.decomposer is None:
142+
if not self.force_consolidate and self.decomposer is None:
144143
return dag
145144

146145
blocks = self.property_set["block_list"]
@@ -153,9 +152,13 @@ def run(self, dag):
153152
qubit_map = self.property_set.get(self._QUBIT_MAP_KEY, None)
154153
if qubit_map is None:
155154
qubit_map = list(range(dag.num_qubits()))
155+
if self.decomposer is not None:
156+
decomposer = self.decomposer._inner_decomposer
157+
else:
158+
decomposer = None
156159
consolidate_blocks(
157160
dag,
158-
self.decomposer._inner_decomposer,
161+
decomposer,
159162
self.basis_gate_name,
160163
self.force_consolidate,
161164
target=self.target,

0 commit comments

Comments
 (0)