-
Notifications
You must be signed in to change notification settings - Fork 2.9k
Expand file tree
/
Copy pathmod.rs
More file actions
4506 lines (4260 loc) · 185 KB
/
mod.rs
File metadata and controls
4506 lines (4260 loc) · 185 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// This code is part of Qiskit.
//
// (C) Copyright IBM 2024
//
// This code is licensed under the Apache License, Version 2.0. You may
// obtain a copy of this license in the LICENSE.txt file in the root directory
// of this source tree or at https://www.apache.org/licenses/LICENSE-2.0.
//
// Any modifications or derivative works of this code must retain this
// copyright notice, and modified files need to carry a notice indicating
// that they have been altered from the originals.
mod lookup;
use hashbrown::HashSet;
use indexmap::IndexSet;
use itertools::Itertools;
use lookup::conjugate_bitterm;
use ndarray::Array2;
use num_complex::Complex64;
use num_traits::Zero;
use numpy::{
PyArray1, PyArray2, PyArrayDescr, PyArrayDescrMethods, PyArrayLike1, PyArrayMethods,
PyReadonlyArray1, PyReadonlyArray2, PyUntypedArrayMethods,
};
use pyo3::{
IntoPyObjectExt, PyErr,
exceptions::{PyRuntimeError, PyTypeError, PyValueError, PyZeroDivisionError},
intern,
prelude::*,
sync::PyOnceLock,
types::{IntoPyDict, PyList, PyString, PyTuple, PyType},
};
use std::{
cmp::Ordering,
collections::btree_map,
ops::{AddAssign, DivAssign, MulAssign, SubAssign},
sync::{Arc, RwLock, RwLockReadGuard},
};
use thiserror::Error;
use qiskit_util::py::{
ImportOnceCell, PySequenceIndex, SequenceIndex, imports::NUMPY_COPY_ONLY_IF_NEEDED,
};
static PAULI_TYPE: ImportOnceCell = ImportOnceCell::new("qiskit.quantum_info", "Pauli");
static PAULI_LIST_TYPE: ImportOnceCell = ImportOnceCell::new("qiskit.quantum_info", "PauliList");
static SPARSE_PAULI_OP_TYPE: ImportOnceCell =
ImportOnceCell::new("qiskit.quantum_info", "SparsePauliOp");
static BIT_TERM_PY_ENUM: PyOnceLock<Py<PyType>> = PyOnceLock::new();
static BIT_TERM_INTO_PY: PyOnceLock<[Option<Py<PyAny>>; 16]> = PyOnceLock::new();
/// Named handle to the alphabet of single-qubit terms.
///
/// This is just the Rust-space representation. We make a separate Python-space `enum.IntEnum` to
/// represent the same information, since we enforce strongly typed interactions in Rust, including
/// not allowing the stored values to be outside the valid `BitTerm`\ s, but doing so in Python would
/// make it very difficult to use the class efficiently with Numpy array views. We attach this
/// sister class of `BitTerm` to `SparseObservable` as a scoped class.
///
/// # Representation
///
/// The `u8` representation and the exact numerical values of these are part of the public API. The
/// low two bits are the symplectic Pauli representation of the required measurement basis with Z in
/// the Lsb0 and X in the Lsb1 (e.g. X and its eigenstate projectors all have their two low bits as
/// `0b10`). The high two bits are `00` for the operator, `10` for the projector to the positive
/// eigenstate, and `01` for the projector to the negative eigenstate.
///
/// The `0b00_00` representation thus ends up being the natural representation of the `I` operator,
/// but this is never stored, and is not named in the enumeration.
///
/// This operator does not store phase terms of $-i$. `BitTerm::Y` has `(1, 1)` as its `(z, x)`
/// representation, and represents exactly the Pauli Y operator; any additional phase is stored only
/// in a corresponding coefficient.
///
/// # Dev notes
///
/// This type is required to be `u8`, but it's a subtype of `u8` because not all `u8` are valid
/// `BitTerm`\ s. For interop with Python space, we accept Numpy arrays of `u8` to represent this,
/// which we transmute into slices of `BitTerm`, after checking that all the values are correct (or
/// skipping the check if Python space promises that it upheld the checks).
///
/// We deliberately _don't_ impl `numpy::Element` for `BitTerm` (which would let us accept and
/// return `PyArray1<BitTerm>` at Python-space boundaries) so that it's clear when we're doing
/// the transmute, and we have to be explicit about the safety of that.
#[repr(u8)]
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub enum BitTerm {
/// Pauli X operator.
X = 0b00_10,
/// Projector to the positive eigenstate of Pauli X.
Plus = 0b10_10,
/// Projector to the negative eigenstate of Pauli X.
Minus = 0b01_10,
/// Pauli Y operator.
Y = 0b00_11,
/// Projector to the positive eigenstate of Pauli Y.
Right = 0b10_11,
/// Projector to the negative eigenstate of Pauli Y.
Left = 0b01_11,
/// Pauli Z operator.
Z = 0b00_01,
/// Projector to the positive eigenstate of Pauli Z.
Zero = 0b10_01,
/// Projector to the negative eigenstate of Pauli Z.
One = 0b01_01,
}
impl From<BitTerm> for u8 {
fn from(value: BitTerm) -> u8 {
value as u8
}
}
unsafe impl ::bytemuck::CheckedBitPattern for BitTerm {
type Bits = u8;
#[inline(always)]
fn is_valid_bit_pattern(bits: &Self::Bits) -> bool {
*bits <= 0b11_11 && (*bits & 0b11_00) < 0b11_00 && (*bits & 0b00_11) != 0
}
}
unsafe impl ::bytemuck::NoUninit for BitTerm {}
impl BitTerm {
/// Get the label of this `BitTerm` used in Python-space applications. This is a single-letter
/// string.
#[inline]
pub fn py_label(&self) -> &'static str {
// Note: these labels are part of the stable Python API and should not be changed.
match self {
Self::X => "X",
Self::Plus => "+",
Self::Minus => "-",
Self::Y => "Y",
Self::Right => "r",
Self::Left => "l",
Self::Z => "Z",
Self::Zero => "0",
Self::One => "1",
}
}
/// Get the name of this `BitTerm`, which is how Python space refers to the integer constant.
#[inline]
pub fn py_name(&self) -> &'static str {
// Note: these names are part of the stable Python API and should not be changed.
match self {
Self::X => "X",
Self::Plus => "PLUS",
Self::Minus => "MINUS",
Self::Y => "Y",
Self::Right => "RIGHT",
Self::Left => "LEFT",
Self::Z => "Z",
Self::Zero => "ZERO",
Self::One => "ONE",
}
}
/// Attempt to convert a `u8` into `BitTerm`.
///
/// Unlike the implementation of `TryFrom<u8>`, this allows `b'I'` as an alphabet letter,
/// returning `Ok(None)` for it. All other letters outside the alphabet return the complete
/// error condition.
#[inline]
fn try_from_u8(value: u8) -> Result<Option<Self>, BitTermFromU8Error> {
match value {
b'+' => Ok(Some(BitTerm::Plus)),
b'-' => Ok(Some(BitTerm::Minus)),
b'0' => Ok(Some(BitTerm::Zero)),
b'1' => Ok(Some(BitTerm::One)),
b'I' => Ok(None),
b'X' => Ok(Some(BitTerm::X)),
b'Y' => Ok(Some(BitTerm::Y)),
b'Z' => Ok(Some(BitTerm::Z)),
b'l' => Ok(Some(BitTerm::Left)),
b'r' => Ok(Some(BitTerm::Right)),
_ => Err(BitTermFromU8Error(value)),
}
}
/// Does this term include an X component in its ZX representation?
///
/// This is true for the operators and eigenspace projectors associated with X and Y.
pub fn has_x_component(&self) -> bool {
((*self as u8) & (Self::X as u8)) != 0
}
/// Does this term include a Z component in its ZX representation?
///
/// This is true for the operators and eigenspace projectors associated with Y and Z.
pub fn has_z_component(&self) -> bool {
((*self as u8) & (Self::Z as u8)) != 0
}
pub fn is_projector(&self) -> bool {
!matches!(self, BitTerm::X | BitTerm::Y | BitTerm::Z)
}
}
fn bit_term_as_pauli(bit: &BitTerm) -> &'static [(bool, Option<BitTerm>)] {
match bit {
BitTerm::X => &[(true, Some(BitTerm::X))],
BitTerm::Y => &[(true, Some(BitTerm::Y))],
BitTerm::Z => &[(true, Some(BitTerm::Z))],
BitTerm::Plus => &[(true, None), (true, Some(BitTerm::X))],
BitTerm::Minus => &[(true, None), (false, Some(BitTerm::X))],
BitTerm::Right => &[(true, None), (true, Some(BitTerm::Y))],
BitTerm::Left => &[(true, None), (false, Some(BitTerm::Y))],
BitTerm::Zero => &[(true, None), (true, Some(BitTerm::Z))],
BitTerm::One => &[(true, None), (false, Some(BitTerm::Z))],
}
}
/// The error type for a failed conversion into `BitTerm`.
#[derive(Error, Debug)]
#[error("{0} is not a valid letter of the single-qubit alphabet")]
pub struct BitTermFromU8Error(u8);
// `BitTerm` allows safe `as` casting into `u8`. This is the reverse, which is fallible, because
// `BitTerm` is a value-wise subtype of `u8`.
impl ::std::convert::TryFrom<u8> for BitTerm {
type Error = BitTermFromU8Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
::bytemuck::checked::try_cast(value).map_err(|_| BitTermFromU8Error(value))
}
}
/// Error cases stemming from data coherence at the point of entry into `SparseObservable` from
/// user-provided arrays.
///
/// These most typically appear during [from_raw_parts], but can also be introduced by various
/// remapping arithmetic functions.
///
/// These are generally associated with the Python-space `ValueError` because all of the
/// `TypeError`-related ones are statically forbidden (within Rust) by the language, and conversion
/// failures on entry to Rust from Python space will automatically raise `TypeError`.
#[derive(Error, Debug)]
pub enum CoherenceError {
#[error("`boundaries` ({boundaries}) must be one element longer than `coeffs` ({coeffs})")]
MismatchedTermCount { coeffs: usize, boundaries: usize },
#[error("`bit_terms` ({bit_terms}) and `indices` ({indices}) must be the same length")]
MismatchedItemCount { bit_terms: usize, indices: usize },
#[error("the first item of `boundaries` ({0}) must be 0")]
BadInitialBoundary(usize),
#[error(
"the last item of `boundaries` ({last}) must match the length of `bit_terms` and `indices` ({items})"
)]
BadFinalBoundary { last: usize, items: usize },
#[error("all qubit indices must be less than the number of qubits")]
BitIndexTooHigh,
#[error("the values in `boundaries` include backwards slices")]
DecreasingBoundaries,
#[error("the values in `indices` are not term-wise increasing")]
UnsortedIndices,
#[error("the input contains duplicate qubits")]
DuplicateIndices,
#[error("the provided qubit mapping does not account for all contained qubits")]
IndexMapTooSmall,
#[error("cannot shrink the qubit count in an observable from {current} to {target}")]
NotEnoughQubits { current: usize, target: usize },
}
/// An error related to processing of a string label (both dense and sparse).
#[derive(Error, Debug)]
pub enum LabelError {
#[error("label with length {label} cannot be added to a {num_qubits}-qubit operator")]
WrongLengthDense { num_qubits: u32, label: usize },
#[error("label with length {label} does not match indices of length {indices}")]
WrongLengthIndices { label: usize, indices: usize },
#[error("index {index} is out of range for a {num_qubits}-qubit operator")]
BadIndex { index: u32, num_qubits: u32 },
#[error("index {index} is duplicated in a single specifier")]
DuplicateIndex { index: u32 },
#[error("labels must only contain letters from the alphabet 'IXYZ+-rl01'")]
OutsideAlphabet,
}
#[derive(Error, Debug)]
pub enum ArithmeticError {
#[error("mismatched numbers of qubits: {left}, {right}")]
MismatchedQubits { left: u32, right: u32 },
#[error("invalid operation: {0}")]
InvalidOperation(String),
#[error("duplicate indices in qargs")]
DuplicatedIndex,
#[error("{0}")]
OutOfBounds(String),
}
/// One part of the type of the iteration value from [PairwiseOrdered].
///
/// The struct iterates over two sorted lists, and returns values from the left iterator, the right
/// iterator, or both simultaneously, depending on some "ordering" key attached to each. This
/// `enum` is to pass on the information on which iterator is being returned from.
enum Paired<T> {
Left(T),
Right(T),
Both(T, T),
}
/// An iterator combinator that zip-merges two sorted iterators.
///
/// This is created by [pairwise_ordered]; see that method for the description.
struct PairwiseOrdered<C, T, I1, I2>
where
C: Ord,
I1: Iterator<Item = (C, T)>,
I2: Iterator<Item = (C, T)>,
{
left: ::std::iter::Peekable<I1>,
right: ::std::iter::Peekable<I2>,
}
impl<C, T, I1, I2> Iterator for PairwiseOrdered<C, T, I1, I2>
where
C: Ord,
I1: Iterator<Item = (C, T)>,
I2: Iterator<Item = (C, T)>,
{
type Item = (C, Paired<T>);
fn next(&mut self) -> Option<Self::Item> {
let order = match (self.left.peek(), self.right.peek()) {
(None, None) => return None,
(Some(_), None) => Ordering::Less,
(None, Some(_)) => Ordering::Greater,
(Some((left, _)), Some((right, _))) => left.cmp(right),
};
match order {
Ordering::Less => self.left.next().map(|(i, value)| (i, Paired::Left(value))),
Ordering::Greater => self
.right
.next()
.map(|(i, value)| (i, Paired::Right(value))),
Ordering::Equal => {
let (index, left) = self.left.next().unwrap();
let (_, right) = self.right.next().unwrap();
Some((index, Paired::Both(left, right)))
}
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let left = self.left.size_hint();
let right = self.right.size_hint();
(
left.0.max(right.0),
left.1.and_then(|left| right.1.map(|right| left.max(right))),
)
}
}
/// An iterator combinator that zip-merges two sorted iterators.
///
/// The two iterators must yield the same items, where each item comprises some totally ordered
/// index, and an associated value. Both input iterators must be sorted in terms of the index. The
/// output iteration is over 2-tuples, also in sorted order, of the seen ordered index values, and a
/// [Paired] object indicating which iterator (or both) the values were drawn from.
fn pairwise_ordered<C, T, I1, I2>(
left: I1,
right: I2,
) -> PairwiseOrdered<C, T, <I1 as IntoIterator>::IntoIter, <I2 as IntoIterator>::IntoIter>
where
C: Ord,
I1: IntoIterator<Item = (C, T)>,
I2: IntoIterator<Item = (C, T)>,
{
PairwiseOrdered {
left: left.into_iter().peekable(),
right: right.into_iter().peekable(),
}
}
/// An observable over Pauli bases that stores its data in a qubit-sparse format.
///
/// See [PySparseObservable] for detailed docs.
#[derive(Clone, Debug, PartialEq)]
pub struct SparseObservable {
/// The number of qubits the operator acts on. This is not inferable from any other shape or
/// values, since identities are not stored explicitly.
num_qubits: u32,
/// The coefficients of each abstract term in the sum. This has as many elements as terms in
/// the sum.
coeffs: Vec<Complex64>,
/// A flat list of single-qubit terms. This is more naturally a list of lists, but is stored
/// flat for memory usage and locality reasons, with the sublists denoted by `boundaries.`
bit_terms: Vec<BitTerm>,
/// A flat list of the qubit indices that the corresponding entries in `bit_terms` act on. This
/// list must always be term-wise sorted, where a term is a sublist as denoted by `boundaries`.
indices: Vec<u32>,
/// Indices that partition `bit_terms` and `indices` into sublists for each individual term in
/// the sum. `boundaries[0]..boundaries[1]` is the range of indices into `bit_terms` and
/// `indices` that correspond to the first term of the sum. All unspecified qubit indices are
/// implicitly the identity. This is one item longer than `coeffs`, since `boundaries[0]` is
/// always an explicit zero (for algorithmic ease).
boundaries: Vec<usize>,
}
impl SparseObservable {
/// Create a new observable from the raw components that make it up.
///
/// This checks the input values for data coherence on entry. If you are certain you have the
/// correct values, you can call `new_unchecked` instead.
pub fn new(
num_qubits: u32,
coeffs: Vec<Complex64>,
bit_terms: Vec<BitTerm>,
indices: Vec<u32>,
boundaries: Vec<usize>,
) -> Result<Self, CoherenceError> {
if coeffs.len() + 1 != boundaries.len() {
return Err(CoherenceError::MismatchedTermCount {
coeffs: coeffs.len(),
boundaries: boundaries.len(),
});
}
if bit_terms.len() != indices.len() {
return Err(CoherenceError::MismatchedItemCount {
bit_terms: bit_terms.len(),
indices: indices.len(),
});
}
// We already checked that `boundaries` is at least length 1.
if *boundaries.first().unwrap() != 0 {
return Err(CoherenceError::BadInitialBoundary(boundaries[0]));
}
if *boundaries.last().unwrap() != indices.len() {
return Err(CoherenceError::BadFinalBoundary {
last: *boundaries.last().unwrap(),
items: indices.len(),
});
}
for (&left, &right) in boundaries[..].iter().zip(&boundaries[1..]) {
if right < left {
return Err(CoherenceError::DecreasingBoundaries);
}
let indices = &indices[left..right];
if !indices.is_empty() {
for (index_left, index_right) in indices[..].iter().zip(&indices[1..]) {
if index_left == index_right {
return Err(CoherenceError::DuplicateIndices);
} else if index_left > index_right {
return Err(CoherenceError::UnsortedIndices);
}
}
}
if indices.last().map(|&ix| ix >= num_qubits).unwrap_or(false) {
return Err(CoherenceError::BitIndexTooHigh);
}
}
// SAFETY: we've just done the coherence checks.
Ok(unsafe { Self::new_unchecked(num_qubits, coeffs, bit_terms, indices, boundaries) })
}
/// Create a new observable from the raw components without checking data coherence.
///
/// # Safety
///
/// It is up to the caller to ensure that the data-coherence requirements, as enumerated in the
/// struct-level documentation, have been upheld.
#[inline(always)]
pub unsafe fn new_unchecked(
num_qubits: u32,
coeffs: Vec<Complex64>,
bit_terms: Vec<BitTerm>,
indices: Vec<u32>,
boundaries: Vec<usize>,
) -> Self {
Self {
num_qubits,
coeffs,
bit_terms,
indices,
boundaries,
}
}
/// Create a zero operator with pre-allocated space for the given number of summands and
/// single-qubit bit terms.
#[inline]
pub fn with_capacity(num_qubits: u32, num_terms: usize, num_bit_terms: usize) -> Self {
Self {
num_qubits,
coeffs: Vec::with_capacity(num_terms),
bit_terms: Vec::with_capacity(num_bit_terms),
indices: Vec::with_capacity(num_bit_terms),
boundaries: {
let mut boundaries = Vec::with_capacity(num_terms + 1);
boundaries.push(0);
boundaries
},
}
}
/// Get an iterator over the individual terms of the operator.
///
/// Recall that two [SparseObservable]s that have different term orders can still represent the
/// same object. Use [canonicalize] to apply a canonical ordering to the terms.
pub fn iter(&'_ self) -> impl ExactSizeIterator<Item = SparseTermView<'_>> + '_ {
self.coeffs.iter().enumerate().map(|(i, coeff)| {
let start = self.boundaries[i];
let end = self.boundaries[i + 1];
SparseTermView {
num_qubits: self.num_qubits,
coeff: *coeff,
bit_terms: &self.bit_terms[start..end],
indices: &self.indices[start..end],
}
})
}
/// Get an iterator over the individual terms of the operator that allows in-place mutation.
///
/// The length and indices of these views cannot be mutated, since both would allow breaking
/// data coherence.
pub fn iter_mut(&mut self) -> IterMut<'_> {
self.into()
}
/// Get the number of qubits the observable is defined on.
#[inline]
pub fn num_qubits(&self) -> u32 {
self.num_qubits
}
/// Get the number of terms in the observable.
#[inline]
pub fn num_terms(&self) -> usize {
self.coeffs.len()
}
/// Get the coefficients of the terms.
#[inline]
pub fn coeffs(&self) -> &[Complex64] {
&self.coeffs
}
/// Get a mutable slice of the coefficients.
#[inline]
pub fn coeffs_mut(&mut self) -> &mut [Complex64] {
&mut self.coeffs
}
/// Get the indices of each [BitTerm].
#[inline]
pub fn indices(&self) -> &[u32] {
&self.indices
}
/// Get a mutable slice of the indices.
///
/// # Safety
///
/// Modifying the indices can cause an incoherent state of the [SparseObservable].
/// It should be ensured that the indices are consistent with the coeffs, bit_terms, and
/// boundaries.
#[inline]
pub unsafe fn indices_mut(&mut self) -> &mut [u32] {
&mut self.indices
}
/// Get the boundaries of each term.
#[inline]
pub fn boundaries(&self) -> &[usize] {
&self.boundaries
}
/// Get a mutable slice of the boundaries.
///
/// # Safety
///
/// Modifying the boundaries can cause an incoherent state of the [SparseObservable].
/// It should be ensured that the boundaries are sorted and the length/elements are consistent
/// with the coeffs, bit_terms, and indices.
#[inline]
pub unsafe fn boundaries_mut(&mut self) -> &mut [usize] {
&mut self.boundaries
}
/// Get the [BitTerm]s in the observable.
#[inline]
pub fn bit_terms(&self) -> &[BitTerm] {
&self.bit_terms
}
/// Get a muitable slice of the bit terms.
#[inline]
pub fn bit_terms_mut(&mut self) -> &mut [BitTerm] {
&mut self.bit_terms
}
/// Create a zero operator on ``num_qubits`` qubits.
pub fn zero(num_qubits: u32) -> Self {
Self::with_capacity(num_qubits, 0, 0)
}
/// Create an identity operator on ``num_qubits`` qubits.
pub fn identity(num_qubits: u32) -> Self {
Self {
num_qubits,
coeffs: vec![Complex64::new(1.0, 0.0)],
bit_terms: vec![],
indices: vec![],
boundaries: vec![0, 0],
}
}
/// Clear all the terms from this operator, making it equal to the zero operator again.
///
/// This does not change the capacity of the internal allocations, so subsequent addition or
/// subtraction operations may not need to reallocate.
pub fn clear(&mut self) {
self.coeffs.clear();
self.bit_terms.clear();
self.indices.clear();
self.boundaries.truncate(1);
}
/// Reduce the observable to its canonical form.
///
/// This sums like terms, removing them if the final complex coefficient's absolute value is
/// less than or equal to the tolerance. The terms are reordered to some canonical ordering.
///
/// This function is idempotent.
pub fn canonicalize(&self, tol: f64) -> SparseObservable {
let mut terms = btree_map::BTreeMap::new();
for term in self.iter() {
terms
.entry((term.indices, term.bit_terms))
.and_modify(|c| *c += term.coeff)
.or_insert(term.coeff);
}
let mut out = SparseObservable::zero(self.num_qubits);
for ((indices, bit_terms), coeff) in terms {
if coeff.norm_sqr() <= tol * tol {
continue;
}
out.coeffs.push(coeff);
out.bit_terms.extend_from_slice(bit_terms);
out.indices.extend_from_slice(indices);
out.boundaries.push(out.indices.len());
}
out
}
/// Tensor product of `self` with `other`.
///
/// The bit ordering is defined such that the qubit indices of `other` will remain the same, and
/// the indices of `self` will be offset by the number of qubits in `other`. This is the same
/// convention as used by the rest of Qiskit's `quantum_info` operators.
///
/// Put another way, in the simplest case of two observables formed of dense labels:
///
/// ```
/// let mut left = SparseObservable::zero(5);
/// left.add_dense_label("IXY+Z", Complex64::new(1.0, 0.0));
/// let mut right = SparseObservable::zero(6);
/// right.add_dense_label("IIrl01", Complex64::new(1.0, 0.0));
///
/// // The result is the concatenation of the two labels.
/// let mut out = SparseObservable::zero(11);
/// out.add_dense_label("IXY+ZIIrl01", Complex64::new(1.0, 0.0));
///
/// assert_eq!(left.tensor(right), out);
/// ```
pub fn tensor(&self, other: &SparseObservable) -> SparseObservable {
let mut out = SparseObservable::with_capacity(
self.num_qubits + other.num_qubits,
self.coeffs.len() * other.coeffs.len(),
other.coeffs.len() * self.bit_terms.len() + self.coeffs.len() * other.bit_terms.len(),
);
let mut self_indices = Vec::new();
for self_term in self.iter() {
self_indices.clear();
self_indices.extend(self_term.indices.iter().map(|i| i + other.num_qubits));
for other_term in other.iter() {
out.coeffs.push(self_term.coeff * other_term.coeff);
out.indices.extend_from_slice(other_term.indices);
out.indices.extend_from_slice(&self_indices);
out.bit_terms.extend_from_slice(other_term.bit_terms);
out.bit_terms.extend_from_slice(self_term.bit_terms);
out.boundaries.push(out.bit_terms.len());
}
}
out
}
/// Calculate the adjoint of this observable.
///
/// This is well defined in the abstract mathematical sense. All the terms of the single-qubit
/// alphabet are self-adjoint, so the result of this operation is the same observable, except
/// its coefficients are all their complex conjugates.
pub fn adjoint(&self) -> SparseObservable {
SparseObservable {
num_qubits: self.num_qubits,
coeffs: self.coeffs.iter().map(|c| c.conj()).collect(),
bit_terms: self.bit_terms.clone(),
indices: self.indices.clone(),
boundaries: self.boundaries.clone(),
}
}
/// Calculate the transpose.
///
/// This operation transposes the individual bit terms but does not directly act
/// on the coefficients.
pub fn transpose(&self) -> SparseObservable {
let mut out = self.clone();
for term in out.iter_mut() {
for bit_term in term.bit_terms {
match bit_term {
BitTerm::Y => {
*term.coeff = -*term.coeff;
}
BitTerm::Right => {
*bit_term = BitTerm::Left;
}
BitTerm::Left => {
*bit_term = BitTerm::Right;
}
_ => (),
}
}
}
out
}
/// Calculate the complex conjugate.
///
/// This operation equals transposing the observable and complex conjugating the coefficients.
pub fn conjugate(&self) -> SparseObservable {
let mut out = self.transpose();
for coeff in out.coeffs.iter_mut() {
*coeff = coeff.conj();
}
out
}
/// Compose another [SparseObservable] onto this one.
///
/// In terms of operator algebras, composition corresponds to left-multiplication:
/// ``let c = a.compose(b);`` corresponds to $C = B A$.
///
/// Beware that this function can cause exponential explosion of the memory usage of the
/// observable, as the alphabet of [SparseObservable] is not closed under composition; the
/// composition of two single-bit terms can be a sum, which multiplies the total number of
/// terms. This memory usage is not _necessarily_ inherent to the resultant observable, but
/// finding an efficient re-factorization of the sum is generally equally computationally hard.
/// It's better to use domain knowledge of your observables to minimize the number of terms that
/// ever exist, rather than trying to simplify them after the fact.
///
/// # Panics
///
/// If `self` and `other` have different numbers of qubits.
pub fn compose(&self, other: &SparseObservable) -> SparseObservable {
if other.num_qubits != self.num_qubits {
panic!(
"operand ({}) has a different number of qubits to the base ({})",
other.num_qubits, self.num_qubits
);
}
let mut out = SparseObservable::zero(self.num_qubits);
let mut term_state = compose::Iter::new(self.num_qubits);
for left in other.iter() {
for right in self.iter() {
term_state.load_from(
left.coeff * right.coeff,
left.indices
.iter()
.copied()
.zip(left.bit_terms.iter().copied()),
right
.indices
.iter()
.copied()
.zip(right.bit_terms.iter().copied()),
);
out.boundaries.reserve(term_state.num_terms());
out.coeffs.reserve(term_state.num_terms());
out.indices
.reserve(term_state.num_terms() * term_state.term_len());
out.bit_terms
.reserve(term_state.num_terms() * term_state.term_len());
while let Some(term) = term_state.next() {
out.add_term(term)
.expect("qubit counts were checked during initialisation");
}
}
}
out
}
/// Compose another [SparseObservable] onto this one, remapping the qubits.
///
/// The `qubit_fn` is called for each qubit in each term in `other` to determine which qubit in
/// `self` it corresponds to; this should typically be a very cheap function to call, like a
/// getter from a slice.
///
/// See [compose] for further information.
///
/// # Panics
///
/// If `other` has more qubits than `self`, if the `qubit_fn` returns a qubit index greater
/// or equal to the number of qubits in `self`, or if `qubit_fn` returns a duplicate index (this
/// is only guaranteed to be detected if an individual term contains duplicates).
pub fn compose_map(
&self,
other: &SparseObservable,
mut qubit_fn: impl FnMut(u32) -> u32,
) -> SparseObservable {
if other.num_qubits > self.num_qubits {
panic!(
"operand has more qubits ({}) than the base ({})",
other.num_qubits, self.num_qubits
);
}
let mut out = SparseObservable::zero(self.num_qubits);
let mut mapped_term = btree_map::BTreeMap::<u32, BitTerm>::new();
let mut term_state = compose::Iter::new(self.num_qubits);
// This choice of loop ordering is because we already know that `self`'s `indices` are
// sorted, but there's no reason that that the output of `qubit_fn` should maintain order,
// and this way round, we sort as few times as possible.
for left in other.iter() {
mapped_term.clear();
for (index, term) in left.indices.iter().zip(left.bit_terms) {
let qubit = qubit_fn(*index);
if qubit >= self.num_qubits {
panic!("remapped {index} to {qubit}, which is out of bounds");
}
if mapped_term.insert(qubit, *term).is_some() {
panic!("duplicate qubit {qubit} in remapped term");
};
}
for right in self.iter() {
term_state.load_from(
left.coeff * right.coeff,
mapped_term.iter().map(|(k, v)| (*k, *v)),
right
.indices
.iter()
.copied()
.zip(right.bit_terms.iter().copied()),
);
out.boundaries.reserve(term_state.num_terms());
out.coeffs.reserve(term_state.num_terms());
out.indices
.reserve(term_state.num_terms() * term_state.term_len());
out.bit_terms
.reserve(term_state.num_terms() * term_state.term_len());
while let Some(term) = term_state.next() {
out.add_term(term)
.expect("qubit counts were checked during initialisation");
}
}
}
out
}
/// Evolve this [SparseObservable] by another one.
///
/// In terms of operator algebra, evolution corresponds to conjugation:
/// ``let out = q.evolve(p);`` corresponds to $P^\dagger Q P$.
///
/// This implements Heisenberg-picture evolution of the observable. Unlike a
/// literal implementation via two full compositions, this method performs the
/// conjugation directly at the single-qubit level using a fixed lookup table
/// for $P^\dagger Q P$. This avoids materializing any intermediate
/// [SparseObservable] and computes the evolved observable in a single pass.
///
/// Currently, this method supports evolution only by a *single-term* [SparseObservable].
pub fn evolve(
&self,
op: &SparseObservable,
qargs: Option<&[u32]>,
) -> Result<SparseObservable, ArithmeticError> {
if op.num_terms() != 1 {
return Err(ArithmeticError::InvalidOperation(
"evolve only supports single-term operators".to_string(),
));
}
let t = op.iter().next().unwrap();
let op_coeff = t.coeff;
let mut layout = vec![None; self.num_qubits as usize];
if let Some(qargs) = qargs {
if op.num_qubits > self.num_qubits {
return Err(ArithmeticError::OutOfBounds(format!(
"operator has more qubits ({}) than the base ({})",
op.num_qubits, self.num_qubits
)));
}
// Handling the zero-qubit scalar edge case (Identity Operator evolution).
if op.num_qubits == 0 {
let scalar = op.coeffs()[0];
return Ok(self * (scalar.conj() * scalar));
}
if qargs.len() != op.num_qubits as usize {
return Err(ArithmeticError::OutOfBounds(format!(
"qargs has length {}, but operator has {} qubit(s)",
qargs.len(),
op.num_qubits
)));
}
let qargs_set = HashSet::<&u32>::from_iter(qargs.iter());
if qargs_set.len() != qargs.len() {
return Err(ArithmeticError::DuplicatedIndex);
}
if let Some(&max_q) = qargs.iter().max() {
if max_q >= self.num_qubits {
return Err(ArithmeticError::OutOfBounds(
"qargs contains out-of-range qubits".to_string(),
));
}
}
// This maps operator bit terms to observable qubits via qargs, considering
// qargs[i] specifies which observable qubit (at index i), the next operator qubit
// in consideration acts on. Operator qubits are numbered 0 to (num_qubits - 1),
// where qubit 0 is considered, the rightmost (least significant) qubit.
for (op_qubit, &self_qubit) in qargs.iter().enumerate() {
if let Some(bit_term_idx) = t.indices.iter().position(|&q| q as usize == op_qubit) {
layout[self_qubit as usize] = Some(t.bit_terms[bit_term_idx]);
}
}
} else {
if self.num_qubits != op.num_qubits {
return Err(ArithmeticError::MismatchedQubits {
left: self.num_qubits,
right: op.num_qubits,
});
}
for (q, bt) in t.indices.iter().zip(t.bit_terms.iter()) {
layout[*q as usize] = Some(*bt);
}
}
let mut out = SparseObservable::zero(self.num_qubits);
for term in self.iter() {
let mut frontier = vec![(term.coeff, Vec::<u32>::new(), Vec::<BitTerm>::new())];
let mut term_map = vec![None; self.num_qubits as usize];
for (i, &q) in term.indices.iter().enumerate() {
term_map[q as usize] = Some(term.bit_terms[i]);
}
for (q, &op_bt) in layout.iter().enumerate() {
let term_bt = term_map[q];
let mut next_frontier = Vec::new();
for (coeff, indices, bit_terms) in frontier {
match (op_bt, term_bt) {
(None, None) => {
next_frontier.push((coeff, indices, bit_terms));
}
(None, Some(bt)) => {
let mut indices = indices;
let mut bit_terms = bit_terms;
indices.push(q as u32);
bit_terms.push(bt);
next_frontier.push((coeff, indices, bit_terms));
}
(Some(_), None) => {
next_frontier.push((coeff, indices, bit_terms));
}
(Some(p), Some(qbt)) => {
let outputs = conjugate_bitterm(p, qbt);
for &(c, new_bt) in outputs {
let mut new_indices = indices.clone();
let mut new_bit_terms = bit_terms.clone();
new_indices.push(q as u32);
new_bit_terms.push(new_bt);
next_frontier.push((coeff * c, new_indices, new_bit_terms));
}
}
}
}
frontier = next_frontier;
if frontier.is_empty() {
break;
}
}
for (coeff, indices, bit_terms) in frontier {
if coeff == Complex64::new(0.0, 0.0) {
continue;
}
out.coeffs.push(op_coeff.conj() * coeff * op_coeff);
out.indices.extend(indices);
out.bit_terms.extend(bit_terms);
out.boundaries.push(out.indices.len());