Skip to content
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 56 additions & 0 deletions onnxscript/function_libs/torch_lib/ops/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
from __future__ import annotations

import math
import string
from typing import Any, Optional, Sequence, Tuple, Union

import numpy as np
Expand Down Expand Up @@ -56,6 +57,7 @@
_INT64_MAX = 9223372036854775807
_INT64_MIN = -9223372036854775808
_MATH_PI = math.pi
_EINSUM_SYMBOLS = string.ascii_letters


@torch_op("aten::_local_scalar_dense", trace_only=True)
Expand Down Expand Up @@ -1192,6 +1194,60 @@ def aten_bernoulli_p(self: TTensor, p: float) -> TTensor:
return op.CastLike(sampled, self)


def _get_einsum_symbol(dim: int) -> str:
if dim >= len(_EINSUM_SYMBOLS):
raise ValueError("aten::_trilinear only supports up to 52 dimensions")
return _EINSUM_SYMBOLS[dim]


def _build_trilinear_subscript(total_dim: int, expanded_dims: Sequence[int]) -> str:
expanded_dims_set = set(expanded_dims)
return "".join(
_get_einsum_symbol(dim) for dim in range(total_dim) if dim not in expanded_dims_set
)
Comment thread
WineChord marked this conversation as resolved.
Outdated


def _build_trilinear_equation(
total_dim: int,
expand1: Sequence[int],
expand2: Sequence[int],
expand3: Sequence[int],
sumdim: Sequence[int],
) -> str:
sumdim_set = set(sumdim)
output_subscript = "".join(
_get_einsum_symbol(dim) for dim in range(total_dim) if dim not in sumdim_set
)
return (
f"{_build_trilinear_subscript(total_dim, expand1)},"
f"{_build_trilinear_subscript(total_dim, expand2)},"
f"{_build_trilinear_subscript(total_dim, expand3)}->{output_subscript}"
)


@torch_op("aten::_trilinear", trace_only=True)
def aten__trilinear(
Comment thread
WineChord marked this conversation as resolved.
Outdated
i1: TReal,
i2: TReal,
i3: TReal,
expand1: Sequence[int],
expand2: Sequence[int],
expand3: Sequence[int],
sumdim: Sequence[int],
unroll_dim: int = 1,
) -> TReal:
"""_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor"""

del unroll_dim

input_rank = getattr(i1, "rank", None)
if input_rank is None:
input_rank = len(i1.shape)
total_dim = input_rank + len(expand1)
equation = _build_trilinear_equation(total_dim, expand1, expand2, expand3, sumdim)
return op.Einsum(i1, i2, i3, equation=equation)


@torch_op("aten::bilinear", trace_only=True)
def aten_bilinear(
input1: TensorType,
Expand Down
36 changes: 36 additions & 0 deletions tests/function_libs/torch_lib/extra_opinfo.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,35 @@ def sample_inputs_bilinear(op_info, device, dtype, requires_grad, **kwargs):
yield opinfo_core.SampleInput(input1, args=(input2, weight, None))


def sample_inputs__trilinear(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for aten._trilinear using bilinear's internal call pattern."""
del op_info
del kwargs

make_arg = functools.partial(
torch_testing.make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
)

cases = [
(2, 3, 4, 5),
(1, 2, 2, 1),
(3, 5, 2, 4),
]
expand1 = (1, 3)
expand2 = (0,)
expand3 = (1, 2)
sumdim = (2, 3)
Comment thread
WineChord marked this conversation as resolved.

for batch_size, in1_features, in2_features, out_features in cases:
input1 = make_arg((batch_size, in1_features))
weight = make_arg((out_features, in1_features, in2_features))
input2 = make_arg((batch_size, in2_features))
yield opinfo_core.SampleInput(
input1,
args=(weight, input2, expand1, expand2, expand3, sumdim, 1),
)


def sample_inputs_bernoulli_p(op_info, device, dtype, requires_grad, **kwargs):
del op_info

Expand Down Expand Up @@ -2516,6 +2545,13 @@ def __init__(self):
sample_inputs_func=sample_inputs_bilinear,
supports_out=False,
),
opinfo_core.OpInfo(
"ops.aten._trilinear.default",
aten_name="_trilinear.default",
dtypes=common_dtype.floating_types(),
sample_inputs_func=sample_inputs__trilinear,
supports_out=False,
),
opinfo_core.OpInfo(
"ops.aten.bernoulli.p",
aten_name="bernoulli.p",
Expand Down
5 changes: 5 additions & 0 deletions tests/function_libs/torch_lib/ops_test_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -601,6 +601,11 @@ def _where_input_wrangler(
TorchLibOpInfo(
"bilinear", core_ops.aten_bilinear, tolerance={torch.float32: (2e-5, 2e-5)}
),
TorchLibOpInfo(
"ops.aten._trilinear.default",
core_ops.aten__trilinear,
tolerance={torch.float32: (2e-5, 2e-5)},
),
TorchLibOpInfo(
# This string is a unique ID. In extra_opinfo.py, we
# also define test data for this ID with
Expand Down
Loading