[mlir][linalg] Do not check if added dimension are static in linalg.broadcast.

Added dimensions can be both static and dinamic. Mapped dimension should be the same in the input and the init.

Differential Revision: https://reviews.llvm.org/D138291
This commit is contained in:
Oleg Shyshkov 2022-11-18 14:45:33 +01:00
parent d731d6df64
commit 244105f791
2 changed files with 1 additions and 20 deletions

View File

@ -1543,13 +1543,7 @@ LogicalResult BroadcastOp::verify() {
}
for (const auto &[idx, inputDimIdx] : llvm::enumerate(reverseDimMap)) {
if (inputDimIdx == kUnmappedDim) {
// This dimensions is being added. Should be statically known.
if (ShapedType::isDynamic(initShape[idx]))
return emitOpError()
<< "init dim " << idx
<< " can't be dynamic, because it's not matched to input";
} else {
if (inputDimIdx != kUnmappedDim) {
// This dimensions is mapped from the input. Init and input dims should
// match.
if (inputShape[inputDimIdx] != initShape[idx])

View File

@ -728,19 +728,6 @@ func.func @broadcast_mapped_dim_mismatch(
// -----
func.func @broadcast_added_dynamic_mismatch(
%input: tensor<4x16xf32>, %init: tensor<4x?x16xf32>)
-> tensor<4x?x16xf32> {
// expected-error @+1 {{'linalg.broadcast' op init dim 1 can't be dynamic, because it's not matched to input}}
%bcast = linalg.broadcast
ins(%input:tensor<4x16xf32>)
outs(%init:tensor<4x?x16xf32>)
dimensions = [0, 2]
func.return %bcast : tensor<4x?x16xf32>
}
// -----
func.func @broadcast_size_1_extension_not_supported(
%input: tensor<1x16xf32>, %init: tensor<4x?x16xf32>)
-> tensor<4x?x16xf32> {