Skip to content

Commit a028d2b

Browse files
authored
Generate opset23 with opgen (#2226)
1 parent 147e428 commit a028d2b

10 files changed

Lines changed: 2293 additions & 76 deletions

File tree

onnxscript/onnx_opset/__init__.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
from onnxscript.onnx_opset._impl.opset20 import Opset20
4040
from onnxscript.onnx_opset._impl.opset21 import Opset21
4141
from onnxscript.onnx_opset._impl.opset22 import Opset22
42+
from onnxscript.onnx_opset._impl.opset23 import Opset23
4243
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml1 import Opset_ai_onnx_ml1
4344
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml2 import Opset_ai_onnx_ml2
4445
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml3 import Opset_ai_onnx_ml3
@@ -73,6 +74,7 @@
7374
"opset20",
7475
"opset21",
7576
"opset22",
77+
"opset23",
7678
"opset_ai_onnx_ml1",
7779
"opset_ai_onnx_ml2",
7880
"opset_ai_onnx_ml3",
@@ -110,6 +112,7 @@
110112
opset20 = Opset20()
111113
opset21 = Opset21()
112114
opset22 = Opset22()
115+
opset23 = Opset23()
113116
opset_ai_onnx_ml1 = Opset_ai_onnx_ml1()
114117
opset_ai_onnx_ml2 = Opset_ai_onnx_ml2()
115118
opset_ai_onnx_ml3 = Opset_ai_onnx_ml3()
@@ -205,6 +208,10 @@
205208
"",
206209
22,
207210
): opset22,
211+
(
212+
"",
213+
23,
214+
): opset23,
208215
(
209216
"ai.onnx.ml",
210217
1,

onnxscript/onnx_opset/_impl/opset13.py

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -334,6 +334,8 @@ def Clip(
334334
Clip operator limits the given input within an interval. The interval is
335335
specified by the inputs 'min' and 'max'. They default to
336336
numeric_limits::lowest() and numeric_limits::max(), respectively.
337+
When 'min' is greater than 'max', the clip operator sets all the 'input' values to
338+
the value of 'max'. Thus, this is equivalent to 'Min(max, Max(input, min))'.
337339
338340
339341
Args:
@@ -875,7 +877,22 @@ def Gather(self, data: T_Gather, indices: Tind_Gather, *, axis: int = 0) -> T_Ga
875877
entries of the axis dimension of `data` (by default outer-most one as axis=0) indexed by `indices`, and concatenates
876878
them in an output tensor of rank q + (r - 1).
877879
878-
If `axis = 0`, let `k = indices[i_{0}, ..., i_{q-1}]`
880+
It is an indexing operation that indexes into the input `data` along a single (specified) axis.
881+
Each entry in `indices` produces a `r-1` dimensional slice of the input tensor.
882+
The entire operation produces, conceptually, a `q`-dimensional tensor of `r-1` dimensional slices,
883+
which is arranged into a `q + (r-1)`-dimensional tensor, with the `q` dimensions taking the
884+
place of the original `axis` that is being indexed into.
885+
886+
The following few examples illustrate how `Gather` works for specific shapes of `data`,
887+
`indices`, and given value of `axis`:
888+
| data shape | indices shape | axis | output shape | output equation |
889+
| --- | --- | --- | --- | --- |
890+
| (P, Q) | ( ) (a scalar) | 0 | (Q) | output[q] = data[indices, q] |
891+
| (P, Q, R) | ( ) (a scalar) | 1 | (P, R) | output[p, r] = data[p, indices, r] |
892+
| (P, Q) | (R, S) | 0 | (R, S, Q) | output[r, s, q] = data[ [indices[r, s], q] |
893+
| (P, Q) | (R, S) | 1 | (P, R, S) | output[p, r, s] = data[ p, indices[r, s]] |
894+
895+
More generally, if `axis = 0`, let `k = indices[i_{0}, ..., i_{q-1}]`
879896
then `output[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[k , j_{0}, ..., j_{r-2}]`:
880897
881898
::

onnxscript/onnx_opset/_impl/opset18.py

Lines changed: 11 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -169,12 +169,18 @@ def CenterCropPad(
169169
170170
Center crop or pad an input to given dimensions.
171171
172-
The crop/pad dimensions can be specified for a subset of the `axes`. Non-specified dimensions will not be
173-
cropped or padded.
172+
The crop/pad dimensions can be specified for a subset of the `axes`; unspecified dimensions will remain unchanged.
174173
175-
If the input dimensions are bigger than the crop shape, a centered cropping window is extracted from the input.
176-
If the input dimensions are smaller than the crop shape, the input is padded on each side equally,
177-
so that the input is centered in the output.
174+
If the input dimensions are larger than the target crop dimensions, a centered cropping window will be extracted
175+
from the input. The starting value for the cropping window is rounded down, which means that if the difference
176+
between the input shape and the crop shape is odd, the cropping window will be shifted half a pixel to the left
177+
of the input center.
178+
179+
If the input dimensions are smaller than the target crop dimensions, the input will be padded equally on both sides
180+
to center it in the output. In cases where the total number of padding pixels is odd, an additional pixel will be
181+
added to the right side.
182+
183+
The padding value used is zero.
178184
179185
180186
Args:
@@ -286,65 +292,6 @@ def Col2Im(
286292
strides=strides,
287293
)
288294

289-
T_GroupNormalization = TypeVar("T_GroupNormalization", BFLOAT16, DOUBLE, FLOAT, FLOAT16)
290-
291-
def GroupNormalization(
292-
self,
293-
X: T_GroupNormalization,
294-
scale: T_GroupNormalization,
295-
bias: T_GroupNormalization,
296-
*,
297-
epsilon: float = 9.999999747378752e-06,
298-
num_groups: int,
299-
) -> T_GroupNormalization:
300-
r"""[🌐 GroupNormalization(18)](https://onnx.ai/onnx/operators/onnx__GroupNormalization.html#groupnormalization-18 "Online Documentation")
301-
302-
303-
A GroupNormalization function. Carries out group normalization as described in
304-
the paper https://arxiv.org/abs/1803.08494
305-
306-
This operator transforms input according to
307-
::
308-
309-
y = scale * (x - mean) / sqrt(variance + epsilon) + bias,
310-
311-
312-
where the mean and variance are computed per instance per group of channels, and
313-
`scale` and `bias` should be specified for each group of channels. The number of
314-
groups `num_groups` should be divisible by the number of channels so that there are
315-
an equal number of channels per group.
316-
317-
When the number of groups is the same as the number of channels, this operator is
318-
equivalent to InstanceNormalization. When there is only one group, this operator
319-
is equivalent to LayerNormalization.
320-
321-
322-
Args:
323-
X: (differentiable) Input data tensor. Dimensions for image cases are `(N x
324-
C x H x W)`, where `N` is the batch size, `C` is the number of channels,
325-
and `H` and `W` are the height and width of the data. Statistics are
326-
computed for every group of channels over `C`, `H`, and `W`. For
327-
non-image cases, the dimensions are in the form of `(N x C x D1 x D2 ...
328-
Dn)`.
329-
330-
scale: (differentiable) Scale tensor of shape `(num_groups)`.
331-
332-
bias: (differentiable) Bias tensor of shape `(num_groups)`.
333-
334-
epsilon: The epsilon value to use to avoid division by zero.
335-
336-
num_groups: The number of groups of channels. It should be a divisor of the
337-
number of channels `C`.
338-
"""
339-
340-
schema = get_schema("GroupNormalization", 18, "")
341-
op = Op(self, "GroupNormalization", schema)
342-
return op(
343-
*self._prepare_inputs(schema, X, scale, bias),
344-
epsilon=epsilon,
345-
num_groups=num_groups,
346-
)
347-
348295
T_LpPool = TypeVar("T_LpPool", DOUBLE, FLOAT, FLOAT16)
349296

350297
def LpPool(

onnxscript/onnx_opset/_impl/opset2.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919

2020
from onnxscript.onnx_opset._impl.opset1 import Opset1
2121
from onnxscript.onnx_types import (
22-
BFLOAT16,
2322
BOOL,
2423
COMPLEX64,
2524
COMPLEX128,
@@ -43,7 +42,7 @@ class Opset2(Opset1):
4342
def __new__(cls):
4443
return Opset.__new__(cls, "", 2)
4544

46-
T_GlobalLpPool = TypeVar("T_GlobalLpPool", BFLOAT16, DOUBLE, FLOAT, FLOAT16)
45+
T_GlobalLpPool = TypeVar("T_GlobalLpPool", DOUBLE, FLOAT, FLOAT16)
4746

4847
def GlobalLpPool(self, X: T_GlobalLpPool, *, p: int = 2) -> T_GlobalLpPool:
4948
r"""[🌐 GlobalLpPool(2)](https://onnx.ai/onnx/operators/onnx__GlobalLpPool.html#globallppool-2 "Online Documentation")

onnxscript/onnx_opset/_impl/opset21.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -422,7 +422,6 @@ def DequantizeLinear(
422422
must have the same shape, determining the quantization's granularity: a scalar for per-tensor/per-layer quantization,
423423
a 1-D tensor for per-axis quantization, or have a rank identical to the input for blocked quantization.
424424
See QuantizeLinear for details on quantization granularity.
425-
426425
`x_zero_point` and `x` must have the same type. `x` and `y` must have the same shape. In the case of dequantizing
427426
`int32`, there's no zero point (zero point is supposed to be 0).
428427
`zero-point` is usually not used in the case of float8 types quantization, but the dequantization formula remains the same
@@ -535,7 +534,7 @@ def GroupNormalization(
535534
536535
537536
where the mean and variance are computed per instance per group of channels, and
538-
`scale` and `bias` should be specified for each group of channels. The number of
537+
`scale` and `bias` should be specified for each channel. The number of
539538
groups `num_groups` should be divisible by the number of channels so that there are
540539
an equal number of channels per group.
541540
@@ -1340,20 +1339,16 @@ def QuantizeLinear(
13401339
The linear quantization operator consumes a high-precision tensor, a scale, and a zero point to compute the
13411340
low-precision/quantized tensor. The scale factor and zero point must have the same shape, determining the quantization
13421341
granularity. The quantization formula is `y = saturate((x / y_scale) + y_zero_point)`.
1343-
13441342
Saturation is done according to:
13451343
- uint16: [0, 65535]
13461344
- int16: [-32768, 32767]
13471345
- uint8: [0, 255]
13481346
- int8: [-128, 127]
13491347
- uint4: [0, 15]
13501348
- int4: [-8, 7]
1351-
13521349
For `(x / y_scale)`, it rounds to the nearest even. Refer to https://en.wikipedia.org/wiki/Rounding for details.
1353-
13541350
`y_zero_point` and `y` must have the same type. `y_zero_point` is usually not used for quantization to float8 types, but the quantization
13551351
formula remains the same for consistency, and the type of the attribute `y_zero_point` still determines the quantization type.
1356-
13571352
There are three supported quantization granularities, determined by the shape of `y_scale`.
13581353
In all cases, `y_zero_point` must have the same shape as `y_scale`.
13591354
- Per-tensor (per-layer) quantization: `y_scale` is a scalar.

onnxscript/onnx_opset/_impl/opset22.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -989,7 +989,7 @@ def GlobalAveragePool(self, X: T_GlobalAveragePool) -> T_GlobalAveragePool:
989989
op = Op(self, "GlobalAveragePool", schema)
990990
return op(*self._prepare_inputs(schema, X))
991991

992-
T_GlobalLpPool = TypeVar("T_GlobalLpPool", DOUBLE, FLOAT, FLOAT16)
992+
T_GlobalLpPool = TypeVar("T_GlobalLpPool", BFLOAT16, DOUBLE, FLOAT, FLOAT16)
993993

994994
def GlobalLpPool(self, X: T_GlobalLpPool, *, p: int = 2) -> T_GlobalLpPool:
995995
r"""[🌐 GlobalLpPool(22)](https://onnx.ai/onnx/operators/onnx__GlobalLpPool.html#globallppool-22 "Online Documentation")

0 commit comments

Comments
 (0)