Skip to content

Commit

Permalink
[MLIR][TORCH] Add aten.special.expm1 op lowering (#3878)
Browse files Browse the repository at this point in the history
This commit adds the support for torch.aten.special.expm1 op by
decomposing it into torch.aten.expm1 op.

---------

Signed-off-by: Vivek Khandelwal <[email protected]>
  • Loading branch information
vivekkhandelwal1 authored Dec 11, 2024
1 parent 31b912e commit 5a5cc6b
Show file tree
Hide file tree
Showing 8 changed files with 112 additions and 6 deletions.
23 changes: 23 additions & 0 deletions include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -4610,6 +4610,29 @@ def Torch_AtenTrunc_Op : Torch_Op<"aten.trunc_", [
}];
}

def Torch_AtenSpecialExpm1Op : Torch_Op<"aten.special_expm1", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::special_expm1 : (Tensor) -> (Tensor)`";
let arguments = (ins
AnyTorchTensorType:$self
);
let results = (outs
AnyTorchOptionalTensorType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenSpecialExpm1Op::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 1, 1);
}
void AtenSpecialExpm1Op::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 1, 1);
}
}];
}

def Torch_AtenSignOp : Torch_Op<"aten.sign", [
AllowsTypeRefinement,
HasValueSemantics,
Expand Down
9 changes: 9 additions & 0 deletions lib/Dialect/Torch/Transforms/AbstractInterpLibrary.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6495,6 +6495,10 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
" %0 = call @__torch__.torch.jit._shape_functions.unary(%arg0) : (!torch.list<int>) -> !torch.list<int>\n"
" return %0 : !torch.list<int>\n"
" }\n"
" func.func @\"__torch_mlir_shape_fn.aten.special_expm1\"(%arg0: !torch.list<int>) -> !torch.list<int> {\n"
" %0 = call @__torch__.torch.jit._shape_functions.unary(%arg0) : (!torch.list<int>) -> !torch.list<int>\n"
" return %0 : !torch.list<int>\n"
" }\n"
" func.func @\"__torch_mlir_shape_fn.aten.isfinite\"(%arg0: !torch.list<int>) -> !torch.list<int> {\n"
" return %arg0 : !torch.list<int>\n"
" }\n"
Expand Down Expand Up @@ -11589,6 +11593,11 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
" %1 = call @__torch__._get_dtype_of_floating_point_op(%0#1) : (!torch.int) -> !torch.int\n"
" return %1 : !torch.int\n"
" }\n"
" func.func @\"__torch_mlir_dtype_fn.aten.special_expm1\"(%arg0: !torch.tuple<int, int>) -> !torch.int {\n"
" %0:2 = torch.prim.TupleUnpack %arg0 : !torch.tuple<int, int> -> !torch.int, !torch.int\n"
" %1 = call @__torch__._get_dtype_of_floating_point_op(%0#1) : (!torch.int) -> !torch.int\n"
" return %1 : !torch.int\n"
" }\n"
" func.func @\"__torch_mlir_dtype_fn.aten.isfinite\"(%arg0: !torch.tuple<int, int>) -> !torch.int {\n"
" %int11 = torch.constant.int 11\n"
" return %int11 : !torch.int\n"
Expand Down
14 changes: 14 additions & 0 deletions lib/Dialect/Torch/Transforms/DecomposeComplexOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11177,6 +11177,19 @@ class DecomposeTorchvisionNmsOp : public OpRewritePattern<TorchvisionNmsOp> {
};
} // namespace

namespace {
class DecomposeAtenSpecialExpm1Op
: public OpRewritePattern<AtenSpecialExpm1Op> {
public:
using OpRewritePattern<AtenSpecialExpm1Op>::OpRewritePattern;
LogicalResult matchAndRewrite(AtenSpecialExpm1Op op,
PatternRewriter &rewriter) const override {
rewriter.replaceOpWithNewOp<AtenExpm1Op>(op, op.getType(), op.getSelf());
return success();
}
};
} // namespace

namespace {
class DecomposeComplexOpsPass
: public DecomposeComplexOpsBase<DecomposeComplexOpsPass> {
Expand Down Expand Up @@ -11462,6 +11475,7 @@ class DecomposeComplexOpsPass
addPatternIfTargetOpIsIllegal<DecomposeAtenThresholdOp>(patterns);
addPatternIfTargetOpIsIllegal<DecomposeAtenFloatPowerTensorTensorOp>(
patterns);
addPatternIfTargetOpIsIllegal<DecomposeAtenSpecialExpm1Op>(patterns);

addPatternIfTargetOpIsIllegal<
DecomposeAtenFMaxMinOp<AtenFmaxOp, AtenMaximumOp>>(patterns);
Expand Down
1 change: 1 addition & 0 deletions lib/Dialect/Torch/Transforms/LowerToBackendContract.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -569,6 +569,7 @@ static void markDecomposedOpsAsIllegal(MLIRContext *context,
target.addIllegalOp<AtenLinalgNormOp>();
target.addIllegalOp<AtenFminOp>();
target.addIllegalOp<AtenFmaxOp>();
target.addIllegalOp<AtenSpecialExpm1Op>();

for (auto &opName : backendLegalOpsSet) {
target.addLegalOp(
Expand Down
12 changes: 8 additions & 4 deletions projects/pt1/e2e_testing/xfail_sets.py
Original file line number Diff line number Diff line change
Expand Up @@ -500,8 +500,6 @@
"AdaptiveMaxPool1dStatic_basic",
"CrossEntropyLossModule_basic",
"CrossEntropyLossNoReductionModule_basic",
"ElementwiseExpm1IntModule_basic",
"ElementwiseExpm1Module_basic",
"IsInfiniteModule_basic",
"InterpolateDynamicModule_sizes_nearest",
"IouOfModule_basic",
Expand Down Expand Up @@ -909,8 +907,6 @@
"AtenItemIntOpModule_basic",
"CrossEntropyLossModule_basic",
"CrossEntropyLossNoReductionModule_basic",
"ElementwiseExpm1IntModule_basic",
"ElementwiseExpm1Module_basic",
"InterpolateDynamicModule_sizes_nearest",
"IouOfModule_basic",
"IscloseStaticModuleTrue_basic",
Expand Down Expand Up @@ -1209,6 +1205,8 @@
"ElementwiseRsqrtModule_basic",
"ElementwiseSigmoidModule_basic",
"ElementwiseSinModule_basic",
"ElementwiseSpecialExpm1IntModule_basic",
"ElementwiseSpecialExpm1Module_basic",
"ElementwiseSqrtModule_basic",
"ElementwiseTanIntModule_basic",
"ElementwiseTanModule_basic",
Expand Down Expand Up @@ -2951,6 +2949,8 @@
"ElementwiseEluNonDefaultModule_basic",
"ElementwiseExpm1IntModule_basic",
"ElementwiseExpm1Module_basic",
"ElementwiseSpecialExpm1IntModule_basic",
"ElementwiseSpecialExpm1Module_basic",
"ElementwiseFmodTensor_Int_basic",
"ElementwiseCreateComplexModule_basic",
"ElementwiseMulTensorComplexModule_basic",
Expand Down Expand Up @@ -3662,6 +3662,8 @@
"ElementwiseQuantizePerTensorUIntModule_basic",
"ElementwiseSinhIntModule_basic",
"ElementwiseSinhModule_basic",
"ElementwiseSpecialExpm1IntModule_basic",
"ElementwiseSpecialExpm1Module_basic",
"ElementwiseToDtypeF32ToI64Module_basic",
"ElementwiseToDtypeI64ToUI8Module_basic",
"ElementwiseWhereScalarOtherStaticModule_basic",
Expand Down Expand Up @@ -4355,6 +4357,8 @@
"ElementwiseSinIntModule_basic",
"ElementwiseSinhIntModule_basic",
"ElementwiseSinhModule_basic",
"ElementwiseSpecialExpm1IntModule_basic",
"ElementwiseSpecialExpm1Module_basic",
"ElementwiseSqrtIntModule_basic",
"ElementwiseSubScalarIntModule_basic",
"ElementwiseTanIntModule_basic",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,9 @@ def aten〇exp2〡shape(self: List[int]) -> List[int]:
def aten〇expm1〡shape(self: List[int]) -> List[int]:
return upstream_shape_functions.unary(self)

def aten〇special_expm1〡shape(self: List[int]) -> List[int]:
return upstream_shape_functions.unary(self)

def aten〇isfinite〡shape(self: List[int]) -> List[int]:
return self

Expand Down Expand Up @@ -2717,6 +2720,11 @@ def aten〇expm1〡dtype(self_rank_dtype: Tuple[int, int]) -> int:
self_rank, self_dtype = self_rank_dtype
return _get_dtype_of_floating_point_op(self_dtype)

@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1))
def aten〇special_expm1〡dtype(self_rank_dtype: Tuple[int, int]) -> int:
self_rank, self_dtype = self_rank_dtype
return _get_dtype_of_floating_point_op(self_dtype)

def aten〇isfinite〡dtype(self_rank_dtype: Tuple[int, int]) -> int:
return torch.bool

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -452,6 +452,7 @@ def emit_with_mutating_variants(key, **kwargs):
emit_with_mutating_variants("aten::ceil : (Tensor) -> (Tensor)", has_folder=True)
emit_with_mutating_variants("aten::round : (Tensor) -> (Tensor)", has_folder=True)
emit_with_mutating_variants("aten::trunc : (Tensor) -> (Tensor)", has_folder=True)
emit("aten::special_expm1 : (Tensor) -> (Tensor)")
emit_with_mutating_variants(
"aten::sign : (Tensor) -> (Tensor)", has_canonicalizer=True
)
Expand Down
50 changes: 48 additions & 2 deletions projects/pt1/python/torch_mlir_e2e_test/test_suite/elementwise.py
Original file line number Diff line number Diff line change
Expand Up @@ -5207,7 +5207,7 @@ def __init__(self):
]
)
def forward(self, a):
return torch.special.expm1(a)
return torch.expm1(a)


@register_test_case(module_factory=lambda: ElementwiseExpm1Module())
Expand All @@ -5230,7 +5230,7 @@ def __init__(self):
]
)
def forward(self, a):
return torch.special.expm1(a)
return torch.expm1(a)


@register_test_case(module_factory=lambda: ElementwiseExpm1IntModule())
Expand All @@ -5241,6 +5241,52 @@ def ElementwiseExpm1IntModule_basic(module, tu: TestUtils):
# ==============================================================================


class ElementwiseSpecialExpm1Module(torch.nn.Module):
def __init__(self):
super().__init__()

@export
@annotate_args(
[
None,
([-1, -1], torch.float32, True),
]
)
def forward(self, a):
return torch.special.expm1(a)


@register_test_case(module_factory=lambda: ElementwiseSpecialExpm1Module())
def ElementwiseSpecialExpm1Module_basic(module, tu: TestUtils):
module.forward(tu.rand(3, 4))


# ==============================================================================


class ElementwiseSpecialExpm1IntModule(torch.nn.Module):
def __init__(self):
super().__init__()

@export
@annotate_args(
[
None,
([-1, -1], torch.int32, True),
]
)
def forward(self, a):
return torch.special.expm1(a)


@register_test_case(module_factory=lambda: ElementwiseSpecialExpm1IntModule())
def ElementwiseSpecialExpm1IntModule_basic(module, tu: TestUtils):
module.forward(tu.randint(3, 4, low=1, high=10).to(torch.int32))


# ==============================================================================


class ElementwiseRad2DegModule(torch.nn.Module):
def __init__(self):
super().__init__()
Expand Down

0 comments on commit 5a5cc6b

Please sign in to comment.