[RISCV][Clang] Replace all undef value with poison
Address remaining work that dates back to discussion in D126745 Reviewed By: craig.topper Differential Revision: https://reviews.llvm.org/D134513
This commit is contained in:
parent
de3efd1b4c
commit
75279aeecd
|
@ -193,7 +193,7 @@ class RVVBuiltin<string suffix, string prototype, string type_range,
|
|||
// The policy scheme for unmasked intrinsic IR.
|
||||
// It could be NonePolicy, HasPassthruOperand or HasPolicyOperand.
|
||||
// HasPassthruOperand: Has a passthru operand to decide tail policy. If it is
|
||||
// undef, tail policy is tail agnostic, otherwise policy is tail undisturbed.
|
||||
// poison, tail policy is tail agnostic, otherwise policy is tail undisturbed.
|
||||
// HasPolicyOperand: Has a policy operand. 1 is tail agnostic and 0 is tail
|
||||
// undisturbed.
|
||||
PolicyScheme UnMaskedPolicyScheme = NonePolicy;
|
||||
|
@ -628,7 +628,7 @@ multiclass RVVVLEFFBuiltin<list<string> types> {
|
|||
ManualCodegen = [{
|
||||
{
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC)
|
||||
Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
|
||||
Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
|
||||
IntrinsicTypes = {ResultType, Ops[3]->getType()};
|
||||
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo());
|
||||
Value *NewVL = Ops[2];
|
||||
|
@ -649,7 +649,7 @@ multiclass RVVVLEFFBuiltin<list<string> types> {
|
|||
// Move mask to right before vl.
|
||||
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC)
|
||||
Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
|
||||
Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
|
||||
Ops.push_back(ConstantInt::get(Ops.back()->getType(), DefaultPolicy));
|
||||
IntrinsicTypes = {ResultType, Ops[4]->getType()};
|
||||
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo());
|
||||
|
@ -853,7 +853,7 @@ multiclass RVVUnitStridedSegLoad<string op> {
|
|||
SmallVector<llvm::Value*, 10> Operands;
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC) {
|
||||
for (unsigned I = 0; I < NF; ++I)
|
||||
Operands.push_back(llvm::UndefValue::get(ResultType));
|
||||
Operands.push_back(llvm::PoisonValue::get(ResultType));
|
||||
Operands.push_back(Ops[NF]);
|
||||
Operands.push_back(Ops[NF + 1]);
|
||||
} else {
|
||||
|
@ -884,7 +884,7 @@ multiclass RVVUnitStridedSegLoad<string op> {
|
|||
SmallVector<llvm::Value*, 12> Operands;
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC) {
|
||||
for (unsigned I = 0; I < NF; ++I)
|
||||
Operands.push_back(llvm::UndefValue::get(ResultType));
|
||||
Operands.push_back(llvm::PoisonValue::get(ResultType));
|
||||
Operands.push_back(Ops[NF + 1]);
|
||||
Operands.push_back(Ops[NF]);
|
||||
Operands.push_back(Ops[NF + 2]);
|
||||
|
@ -945,7 +945,7 @@ multiclass RVVUnitStridedSegLoadFF<string op> {
|
|||
Value *NewVL;
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC) {
|
||||
for (unsigned I = 0; I < NF; ++I)
|
||||
Operands.push_back(llvm::UndefValue::get(ResultType));
|
||||
Operands.push_back(llvm::PoisonValue::get(ResultType));
|
||||
Operands.push_back(Ops[NF]);
|
||||
Operands.push_back(Ops[NF + 2]);
|
||||
NewVL = Ops[NF + 1];
|
||||
|
@ -980,7 +980,7 @@ multiclass RVVUnitStridedSegLoadFF<string op> {
|
|||
Value *NewVL;
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC) {
|
||||
for (unsigned I = 0; I < NF; ++I)
|
||||
Operands.push_back(llvm::UndefValue::get(ResultType));
|
||||
Operands.push_back(llvm::PoisonValue::get(ResultType));
|
||||
Operands.push_back(Ops[NF + 1]);
|
||||
Operands.push_back(Ops[NF]);
|
||||
Operands.push_back(Ops[NF + 3]);
|
||||
|
@ -1043,7 +1043,7 @@ multiclass RVVStridedSegLoad<string op> {
|
|||
SmallVector<llvm::Value*, 12> Operands;
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC) {
|
||||
for (unsigned I = 0; I < NF; ++I)
|
||||
Operands.push_back(llvm::UndefValue::get(ResultType));
|
||||
Operands.push_back(llvm::PoisonValue::get(ResultType));
|
||||
Operands.push_back(Ops[NF]);
|
||||
Operands.push_back(Ops[NF + 1]);
|
||||
Operands.push_back(Ops[NF + 2]);
|
||||
|
@ -1076,7 +1076,7 @@ multiclass RVVStridedSegLoad<string op> {
|
|||
SmallVector<llvm::Value*, 12> Operands;
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC) {
|
||||
for (unsigned I = 0; I < NF; ++I)
|
||||
Operands.push_back(llvm::UndefValue::get(ResultType));
|
||||
Operands.push_back(llvm::PoisonValue::get(ResultType));
|
||||
Operands.push_back(Ops[NF + 1]);
|
||||
Operands.push_back(Ops[NF + 2]);
|
||||
Operands.push_back(Ops[NF]);
|
||||
|
@ -1133,7 +1133,7 @@ multiclass RVVIndexedSegLoad<string op> {
|
|||
SmallVector<llvm::Value*, 12> Operands;
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC) {
|
||||
for (unsigned I = 0; I < NF; ++I)
|
||||
Operands.push_back(llvm::UndefValue::get(ResultType));
|
||||
Operands.push_back(llvm::PoisonValue::get(ResultType));
|
||||
Operands.push_back(Ops[NF]);
|
||||
Operands.push_back(Ops[NF + 1]);
|
||||
Operands.push_back(Ops[NF + 2]);
|
||||
|
@ -1167,7 +1167,7 @@ multiclass RVVIndexedSegLoad<string op> {
|
|||
SmallVector<llvm::Value*, 12> Operands;
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC) {
|
||||
for (unsigned I = 0; I < NF; ++I)
|
||||
Operands.push_back(llvm::UndefValue::get(ResultType));
|
||||
Operands.push_back(llvm::PoisonValue::get(ResultType));
|
||||
Operands.push_back(Ops[NF + 1]);
|
||||
Operands.push_back(Ops[NF + 2]);
|
||||
Operands.push_back(Ops[NF]);
|
||||
|
@ -1363,7 +1363,7 @@ multiclass RVVPseudoUnaryBuiltin<string IR, string type_range> {
|
|||
ManualCodegen = [{
|
||||
{
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC)
|
||||
Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
|
||||
Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
|
||||
auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
|
||||
Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy));
|
||||
// passthru, op1, op2, vl
|
||||
|
@ -1375,7 +1375,7 @@ multiclass RVVPseudoUnaryBuiltin<string IR, string type_range> {
|
|||
{
|
||||
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC)
|
||||
Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
|
||||
Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
|
||||
auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
|
||||
Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy));
|
||||
Ops.push_back(ConstantInt::get(Ops.back()->getType(), DefaultPolicy));
|
||||
|
@ -1396,7 +1396,7 @@ multiclass RVVPseudoVNotBuiltin<string IR, string type_range> {
|
|||
ManualCodegen = [{
|
||||
{
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC)
|
||||
Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
|
||||
Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
|
||||
auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
|
||||
Ops.insert(Ops.begin() + 2,
|
||||
llvm::Constant::getAllOnesValue(ElemTy));
|
||||
|
@ -1411,7 +1411,7 @@ multiclass RVVPseudoVNotBuiltin<string IR, string type_range> {
|
|||
{
|
||||
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC)
|
||||
Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
|
||||
Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
|
||||
auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
|
||||
Ops.insert(Ops.begin() + 2,
|
||||
llvm::Constant::getAllOnesValue(ElemTy));
|
||||
|
@ -1453,7 +1453,7 @@ multiclass RVVPseudoVFUnaryBuiltin<string IR, string type_range> {
|
|||
ManualCodegen = [{
|
||||
{
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC)
|
||||
Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
|
||||
Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
|
||||
// op1, po2, vl
|
||||
IntrinsicTypes = {ResultType,
|
||||
Ops[1]->getType(), Ops[2]->getType()};
|
||||
|
@ -1465,7 +1465,7 @@ multiclass RVVPseudoVFUnaryBuiltin<string IR, string type_range> {
|
|||
{
|
||||
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC)
|
||||
Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
|
||||
Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
|
||||
Ops.insert(Ops.begin() + 2, Ops[1]);
|
||||
Ops.push_back(ConstantInt::get(Ops.back()->getType(), DefaultPolicy));
|
||||
// maskedoff, op1, op2, mask, vl
|
||||
|
@ -1489,7 +1489,7 @@ multiclass RVVPseudoVWCVTBuiltin<string IR, string MName, string type_range,
|
|||
ManualCodegen = [{
|
||||
{
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC)
|
||||
Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
|
||||
Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
|
||||
auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
|
||||
Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy));
|
||||
// passtru, op1, op2, vl
|
||||
|
@ -1504,7 +1504,7 @@ multiclass RVVPseudoVWCVTBuiltin<string IR, string MName, string type_range,
|
|||
{
|
||||
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC)
|
||||
Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
|
||||
Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
|
||||
auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
|
||||
Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy));
|
||||
Ops.push_back(ConstantInt::get(Ops.back()->getType(), DefaultPolicy));
|
||||
|
@ -1532,7 +1532,7 @@ multiclass RVVPseudoVNCVTBuiltin<string IR, string MName, string type_range,
|
|||
ManualCodegen = [{
|
||||
{
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC)
|
||||
Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
|
||||
Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
|
||||
Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(Ops.back()->getType()));
|
||||
// passthru, op1, xlen, vl
|
||||
IntrinsicTypes = {ResultType,
|
||||
|
@ -1546,7 +1546,7 @@ multiclass RVVPseudoVNCVTBuiltin<string IR, string MName, string type_range,
|
|||
{
|
||||
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC)
|
||||
Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
|
||||
Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
|
||||
Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(Ops.back()->getType()));
|
||||
Ops.push_back(ConstantInt::get(Ops.back()->getType(), DefaultPolicy));
|
||||
// maskedoff, op1, xlen, mask, vl
|
||||
|
@ -1904,9 +1904,9 @@ let HasMasked = false,
|
|||
MaskedPolicyScheme = NonePolicy,
|
||||
ManualCodegen = [{
|
||||
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
|
||||
// insert undef passthru
|
||||
// insert poison passthru
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC)
|
||||
Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
|
||||
Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
|
||||
IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()};
|
||||
}] in {
|
||||
defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "csil",
|
||||
|
@ -2048,9 +2048,9 @@ let HasMasked = false,
|
|||
MaskedPolicyScheme = NonePolicy,
|
||||
ManualCodegen = [{
|
||||
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
|
||||
// insert undef passthru
|
||||
// insert poison passthru
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC)
|
||||
Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
|
||||
Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
|
||||
IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()};
|
||||
}] in {
|
||||
defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "xfd",
|
||||
|
@ -2248,9 +2248,9 @@ let IsPrototypeDefaultTU = true,
|
|||
MaskedPolicyScheme = NonePolicy,
|
||||
ManualCodegen = [{
|
||||
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
|
||||
// insert undef passthru
|
||||
// insert poison passthru
|
||||
if (DefaultPolicy == TAIL_AGNOSTIC)
|
||||
Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
|
||||
Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
|
||||
IntrinsicTypes = {ResultType, Ops.back()->getType()};
|
||||
}] in {
|
||||
// signed and floating type
|
||||
|
@ -2288,7 +2288,7 @@ let HasMasked = false, HasVL = false, IRName = "" in {
|
|||
let Name = "vundefined", SupportOverloading = false,
|
||||
MaskedPolicyScheme = NonePolicy,
|
||||
ManualCodegen = [{
|
||||
return llvm::UndefValue::get(ResultType);
|
||||
return llvm::PoisonValue::get(ResultType);
|
||||
}] in {
|
||||
def vundefined : RVVBuiltin<"v", "v", "csilxfd">;
|
||||
def vundefined_u : RVVBuiltin<"Uv", "Uv", "csil">;
|
||||
|
@ -2320,7 +2320,7 @@ let HasMasked = false, HasVL = false, IRName = "" in {
|
|||
ManualCodegen = [{
|
||||
ID = Intrinsic::vector_insert;
|
||||
IntrinsicTypes = {ResultType, Ops[0]->getType()};
|
||||
Ops.push_back(llvm::UndefValue::get(ResultType));
|
||||
Ops.push_back(llvm::PoisonValue::get(ResultType));
|
||||
std::swap(Ops[0], Ops[1]);
|
||||
Ops.push_back(ConstantInt::get(Int64Ty, 0));
|
||||
return Builder.CreateCall(CGM.getIntrinsic(ID, IntrinsicTypes), Ops, "");
|
||||
|
|
|
@ -511,7 +511,7 @@ vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, v
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vcompress_vm_i32mf2_ta(vbool64_t mask, vint32mf2_t src, size_t vl) {
|
||||
|
@ -520,7 +520,7 @@ vint32mf2_t test_vcompress_vm_i32mf2_ta(vbool64_t mask, vint32mf2_t src, size_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vcompress_vm_u32mf2_ta(vbool64_t mask, vuint32mf2_t src, size_t vl) {
|
||||
|
@ -529,7 +529,7 @@ vuint32mf2_t test_vcompress_vm_u32mf2_ta(vbool64_t mask, vuint32mf2_t src, size_
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vcompress_vm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
|
||||
|
@ -16,7 +16,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) {
|
||||
|
@ -25,7 +25,7 @@ vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) {
|
||||
|
@ -34,7 +34,7 @@ vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) {
|
||||
|
@ -43,7 +43,7 @@ vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) {
|
||||
|
@ -52,7 +52,7 @@ vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) {
|
||||
|
@ -61,7 +61,7 @@ vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) {
|
||||
|
@ -70,7 +70,7 @@ vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) {
|
||||
|
@ -79,7 +79,7 @@ vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfabs_v_f64m8 (vfloat64m8_t op1, size_t vl) {
|
||||
|
@ -178,7 +178,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, siz
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfabs_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) {
|
||||
|
@ -205,7 +205,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vflo
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfabs_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1,
|
||||
|
@ -17,7 +17,7 @@ vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2,
|
||||
|
@ -27,7 +27,7 @@ vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2,
|
||||
|
@ -37,7 +37,7 @@ vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2,
|
||||
|
@ -47,7 +47,7 @@ vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2,
|
||||
|
@ -57,7 +57,7 @@ vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1,
|
||||
|
@ -67,7 +67,7 @@ vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1,
|
||||
|
@ -77,7 +77,7 @@ vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1,
|
||||
|
@ -87,7 +87,7 @@ vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmerge_vfm_f64m8(vbool8_t mask, vfloat64m8_t op1, double op2,
|
||||
|
@ -106,7 +106,7 @@ vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmerge_vfm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
|
||||
|
@ -16,7 +16,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) {
|
||||
|
@ -25,7 +25,7 @@ vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) {
|
||||
|
@ -34,7 +34,7 @@ vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) {
|
||||
|
@ -43,7 +43,7 @@ vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) {
|
||||
|
@ -52,7 +52,7 @@ vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) {
|
||||
|
@ -61,7 +61,7 @@ vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) {
|
||||
|
@ -70,7 +70,7 @@ vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) {
|
||||
|
@ -79,7 +79,7 @@ vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfneg_v_f64m8 (vfloat64m8_t op1, size_t vl) {
|
||||
|
@ -178,7 +178,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, siz
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfneg_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) {
|
||||
|
@ -205,7 +205,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vflo
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfneg_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.nxv1i8(<vscale x 2 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.nxv1i8(<vscale x 2 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf4_t test_vlmul_ext_v_i8mf8_i8mf4(vint8mf8_t op1) {
|
||||
|
@ -16,7 +16,7 @@ vint8mf4_t test_vlmul_ext_v_i8mf8_i8mf4(vint8mf8_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.vector.insert.nxv4i8.nxv1i8(<vscale x 4 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.vector.insert.nxv4i8.nxv1i8(<vscale x 4 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf2_t test_vlmul_ext_v_i8mf8_i8mf2(vint8mf8_t op1) {
|
||||
|
@ -25,7 +25,7 @@ vint8mf2_t test_vlmul_ext_v_i8mf8_i8mf2(vint8mf8_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.nxv1i8(<vscale x 8 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.nxv1i8(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m1_t test_vlmul_ext_v_i8mf8_i8m1(vint8mf8_t op1) {
|
||||
|
@ -34,7 +34,7 @@ vint8m1_t test_vlmul_ext_v_i8mf8_i8m1(vint8mf8_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv1i8(<vscale x 16 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv1i8(<vscale x 16 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vlmul_ext_v_i8mf8_i8m2(vint8mf8_t op1) {
|
||||
|
@ -43,7 +43,7 @@ vint8m2_t test_vlmul_ext_v_i8mf8_i8m2(vint8mf8_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv1i8(<vscale x 32 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv1i8(<vscale x 32 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vlmul_ext_v_i8mf8_i8m4(vint8mf8_t op1) {
|
||||
|
@ -52,7 +52,7 @@ vint8m4_t test_vlmul_ext_v_i8mf8_i8m4(vint8mf8_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv1i8(<vscale x 64 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv1i8(<vscale x 64 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m8_t test_vlmul_ext_v_i8mf8_i8m8(vint8mf8_t op1) {
|
||||
|
@ -61,7 +61,7 @@ vint8m8_t test_vlmul_ext_v_i8mf8_i8m8(vint8mf8_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.vector.insert.nxv4i8.nxv2i8(<vscale x 4 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.vector.insert.nxv4i8.nxv2i8(<vscale x 4 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf2_t test_vlmul_ext_v_i8mf4_i8mf2(vint8mf4_t op1) {
|
||||
|
@ -70,7 +70,7 @@ vint8mf2_t test_vlmul_ext_v_i8mf4_i8mf2(vint8mf4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.nxv2i8(<vscale x 8 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.nxv2i8(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m1_t test_vlmul_ext_v_i8mf4_i8m1(vint8mf4_t op1) {
|
||||
|
@ -79,7 +79,7 @@ vint8m1_t test_vlmul_ext_v_i8mf4_i8m1(vint8mf4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv2i8(<vscale x 16 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv2i8(<vscale x 16 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vlmul_ext_v_i8mf4_i8m2(vint8mf4_t op1) {
|
||||
|
@ -88,7 +88,7 @@ vint8m2_t test_vlmul_ext_v_i8mf4_i8m2(vint8mf4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv2i8(<vscale x 32 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv2i8(<vscale x 32 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vlmul_ext_v_i8mf4_i8m4(vint8mf4_t op1) {
|
||||
|
@ -97,7 +97,7 @@ vint8m4_t test_vlmul_ext_v_i8mf4_i8m4(vint8mf4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv2i8(<vscale x 64 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv2i8(<vscale x 64 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m8_t test_vlmul_ext_v_i8mf4_i8m8(vint8mf4_t op1) {
|
||||
|
@ -106,7 +106,7 @@ vint8m8_t test_vlmul_ext_v_i8mf4_i8m8(vint8mf4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.nxv4i8(<vscale x 8 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.nxv4i8(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m1_t test_vlmul_ext_v_i8mf2_i8m1(vint8mf2_t op1) {
|
||||
|
@ -115,7 +115,7 @@ vint8m1_t test_vlmul_ext_v_i8mf2_i8m1(vint8mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv4i8(<vscale x 16 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv4i8(<vscale x 16 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vlmul_ext_v_i8mf2_i8m2(vint8mf2_t op1) {
|
||||
|
@ -124,7 +124,7 @@ vint8m2_t test_vlmul_ext_v_i8mf2_i8m2(vint8mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv4i8(<vscale x 32 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv4i8(<vscale x 32 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vlmul_ext_v_i8mf2_i8m4(vint8mf2_t op1) {
|
||||
|
@ -133,7 +133,7 @@ vint8m4_t test_vlmul_ext_v_i8mf2_i8m4(vint8mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv4i8(<vscale x 64 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv4i8(<vscale x 64 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m8_t test_vlmul_ext_v_i8mf2_i8m8(vint8mf2_t op1) {
|
||||
|
@ -142,7 +142,7 @@ vint8m8_t test_vlmul_ext_v_i8mf2_i8m8(vint8mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vlmul_ext_v_i8m1_i8m2(vint8m1_t op1) {
|
||||
|
@ -151,7 +151,7 @@ vint8m2_t test_vlmul_ext_v_i8m1_i8m2(vint8m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vlmul_ext_v_i8m1_i8m4(vint8m1_t op1) {
|
||||
|
@ -160,7 +160,7 @@ vint8m4_t test_vlmul_ext_v_i8m1_i8m4(vint8m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m8_t test_vlmul_ext_v_i8m1_i8m8(vint8m1_t op1) {
|
||||
|
@ -169,7 +169,7 @@ vint8m8_t test_vlmul_ext_v_i8m1_i8m8(vint8m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vlmul_ext_v_i8m2_i8m4(vint8m2_t op1) {
|
||||
|
@ -178,7 +178,7 @@ vint8m4_t test_vlmul_ext_v_i8m2_i8m4(vint8m2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m8_t test_vlmul_ext_v_i8m2_i8m8(vint8m2_t op1) {
|
||||
|
@ -187,7 +187,7 @@ vint8m8_t test_vlmul_ext_v_i8m2_i8m8(vint8m2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m4_i8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m8_t test_vlmul_ext_v_i8m4_i8m8(vint8m4_t op1) {
|
||||
|
@ -196,7 +196,7 @@ vint8m8_t test_vlmul_ext_v_i8m4_i8m8(vint8m4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.vector.insert.nxv2i16.nxv1i16(<vscale x 2 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.vector.insert.nxv2i16.nxv1i16(<vscale x 2 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf2_t test_vlmul_ext_v_i16mf4_i16mf2(vint16mf4_t op1) {
|
||||
|
@ -205,7 +205,7 @@ vint16mf2_t test_vlmul_ext_v_i16mf4_i16mf2(vint16mf4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.nxv1i16(<vscale x 4 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.nxv1i16(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vlmul_ext_v_i16mf4_i16m1(vint16mf4_t op1) {
|
||||
|
@ -214,7 +214,7 @@ vint16m1_t test_vlmul_ext_v_i16mf4_i16m1(vint16mf4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vlmul_ext_v_i16mf4_i16m2(vint16mf4_t op1) {
|
||||
|
@ -223,7 +223,7 @@ vint16m2_t test_vlmul_ext_v_i16mf4_i16m2(vint16mf4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv1i16(<vscale x 16 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv1i16(<vscale x 16 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vlmul_ext_v_i16mf4_i16m4(vint16mf4_t op1) {
|
||||
|
@ -232,7 +232,7 @@ vint16m4_t test_vlmul_ext_v_i16mf4_i16m4(vint16mf4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv1i16(<vscale x 32 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv1i16(<vscale x 32 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vlmul_ext_v_i16mf4_i16m8(vint16mf4_t op1) {
|
||||
|
@ -241,7 +241,7 @@ vint16m8_t test_vlmul_ext_v_i16mf4_i16m8(vint16mf4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.nxv2i16(<vscale x 4 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.nxv2i16(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vlmul_ext_v_i16mf2_i16m1(vint16mf2_t op1) {
|
||||
|
@ -250,7 +250,7 @@ vint16m1_t test_vlmul_ext_v_i16mf2_i16m1(vint16mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv2i16(<vscale x 8 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv2i16(<vscale x 8 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vlmul_ext_v_i16mf2_i16m2(vint16mf2_t op1) {
|
||||
|
@ -259,7 +259,7 @@ vint16m2_t test_vlmul_ext_v_i16mf2_i16m2(vint16mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv2i16(<vscale x 16 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv2i16(<vscale x 16 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vlmul_ext_v_i16mf2_i16m4(vint16mf2_t op1) {
|
||||
|
@ -268,7 +268,7 @@ vint16m4_t test_vlmul_ext_v_i16mf2_i16m4(vint16mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv2i16(<vscale x 32 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv2i16(<vscale x 32 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vlmul_ext_v_i16mf2_i16m8(vint16mf2_t op1) {
|
||||
|
@ -277,7 +277,7 @@ vint16m8_t test_vlmul_ext_v_i16mf2_i16m8(vint16mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vlmul_ext_v_i16m1_i16m2(vint16m1_t op1) {
|
||||
|
@ -286,7 +286,7 @@ vint16m2_t test_vlmul_ext_v_i16m1_i16m2(vint16m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vlmul_ext_v_i16m1_i16m4(vint16m1_t op1) {
|
||||
|
@ -295,7 +295,7 @@ vint16m4_t test_vlmul_ext_v_i16m1_i16m4(vint16m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vlmul_ext_v_i16m1_i16m8(vint16m1_t op1) {
|
||||
|
@ -304,7 +304,7 @@ vint16m8_t test_vlmul_ext_v_i16m1_i16m8(vint16m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vlmul_ext_v_i16m2_i16m4(vint16m2_t op1) {
|
||||
|
@ -313,7 +313,7 @@ vint16m4_t test_vlmul_ext_v_i16m2_i16m4(vint16m2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vlmul_ext_v_i16m2_i16m8(vint16m2_t op1) {
|
||||
|
@ -322,7 +322,7 @@ vint16m8_t test_vlmul_ext_v_i16m2_i16m8(vint16m2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m4_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vlmul_ext_v_i16m4_i16m8(vint16m4_t op1) {
|
||||
|
@ -331,7 +331,7 @@ vint16m8_t test_vlmul_ext_v_i16m4_i16m8(vint16m4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.nxv1i32(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.nxv1i32(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vlmul_ext_v_i32mf2_i32m1(vint32mf2_t op1) {
|
||||
|
@ -340,7 +340,7 @@ vint32m1_t test_vlmul_ext_v_i32mf2_i32m1(vint32mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.nxv1i32(<vscale x 4 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.nxv1i32(<vscale x 4 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vlmul_ext_v_i32mf2_i32m2(vint32mf2_t op1) {
|
||||
|
@ -349,7 +349,7 @@ vint32m2_t test_vlmul_ext_v_i32mf2_i32m2(vint32mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv1i32(<vscale x 8 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv1i32(<vscale x 8 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vlmul_ext_v_i32mf2_i32m4(vint32mf2_t op1) {
|
||||
|
@ -358,7 +358,7 @@ vint32m4_t test_vlmul_ext_v_i32mf2_i32m4(vint32mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv1i32(<vscale x 16 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv1i32(<vscale x 16 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vlmul_ext_v_i32mf2_i32m8(vint32mf2_t op1) {
|
||||
|
@ -367,7 +367,7 @@ vint32m8_t test_vlmul_ext_v_i32mf2_i32m8(vint32mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vlmul_ext_v_i32m1_i32m2(vint32m1_t op1) {
|
||||
|
@ -376,7 +376,7 @@ vint32m2_t test_vlmul_ext_v_i32m1_i32m2(vint32m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vlmul_ext_v_i32m1_i32m4(vint32m1_t op1) {
|
||||
|
@ -385,7 +385,7 @@ vint32m4_t test_vlmul_ext_v_i32m1_i32m4(vint32m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vlmul_ext_v_i32m1_i32m8(vint32m1_t op1) {
|
||||
|
@ -394,7 +394,7 @@ vint32m8_t test_vlmul_ext_v_i32m1_i32m8(vint32m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vlmul_ext_v_i32m2_i32m4(vint32m2_t op1) {
|
||||
|
@ -403,7 +403,7 @@ vint32m4_t test_vlmul_ext_v_i32m2_i32m4(vint32m2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vlmul_ext_v_i32m2_i32m8(vint32m2_t op1) {
|
||||
|
@ -412,7 +412,7 @@ vint32m8_t test_vlmul_ext_v_i32m2_i32m8(vint32m2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m4_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vlmul_ext_v_i32m4_i32m8(vint32m4_t op1) {
|
||||
|
@ -421,7 +421,7 @@ vint32m8_t test_vlmul_ext_v_i32m4_i32m8(vint32m4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vlmul_ext_v_i64m1_i64m2(vint64m1_t op1) {
|
||||
|
@ -430,7 +430,7 @@ vint64m2_t test_vlmul_ext_v_i64m1_i64m2(vint64m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vlmul_ext_v_i64m1_i64m4(vint64m1_t op1) {
|
||||
|
@ -439,7 +439,7 @@ vint64m4_t test_vlmul_ext_v_i64m1_i64m4(vint64m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vlmul_ext_v_i64m1_i64m8(vint64m1_t op1) {
|
||||
|
@ -448,7 +448,7 @@ vint64m8_t test_vlmul_ext_v_i64m1_i64m8(vint64m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vlmul_ext_v_i64m2_i64m4(vint64m2_t op1) {
|
||||
|
@ -457,7 +457,7 @@ vint64m4_t test_vlmul_ext_v_i64m2_i64m4(vint64m2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vlmul_ext_v_i64m2_i64m8(vint64m2_t op1) {
|
||||
|
@ -466,7 +466,7 @@ vint64m8_t test_vlmul_ext_v_i64m2_i64m8(vint64m2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m4_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vlmul_ext_v_i64m4_i64m8(vint64m4_t op1) {
|
||||
|
@ -475,7 +475,7 @@ vint64m8_t test_vlmul_ext_v_i64m4_i64m8(vint64m4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.nxv1i8(<vscale x 2 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.nxv1i8(<vscale x 2 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf4_t test_vlmul_ext_v_u8mf8_u8mf4(vuint8mf8_t op1) {
|
||||
|
@ -484,7 +484,7 @@ vuint8mf4_t test_vlmul_ext_v_u8mf8_u8mf4(vuint8mf8_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.vector.insert.nxv4i8.nxv1i8(<vscale x 4 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.vector.insert.nxv4i8.nxv1i8(<vscale x 4 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf2_t test_vlmul_ext_v_u8mf8_u8mf2(vuint8mf8_t op1) {
|
||||
|
@ -493,7 +493,7 @@ vuint8mf2_t test_vlmul_ext_v_u8mf8_u8mf2(vuint8mf8_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.nxv1i8(<vscale x 8 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.nxv1i8(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m1_t test_vlmul_ext_v_u8mf8_u8m1(vuint8mf8_t op1) {
|
||||
|
@ -502,7 +502,7 @@ vuint8m1_t test_vlmul_ext_v_u8mf8_u8m1(vuint8mf8_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv1i8(<vscale x 16 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv1i8(<vscale x 16 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m2_t test_vlmul_ext_v_u8mf8_u8m2(vuint8mf8_t op1) {
|
||||
|
@ -511,7 +511,7 @@ vuint8m2_t test_vlmul_ext_v_u8mf8_u8m2(vuint8mf8_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv1i8(<vscale x 32 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv1i8(<vscale x 32 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_vlmul_ext_v_u8mf8_u8m4(vuint8mf8_t op1) {
|
||||
|
@ -520,7 +520,7 @@ vuint8m4_t test_vlmul_ext_v_u8mf8_u8m4(vuint8mf8_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv1i8(<vscale x 64 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv1i8(<vscale x 64 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m8_t test_vlmul_ext_v_u8mf8_u8m8(vuint8mf8_t op1) {
|
||||
|
@ -529,7 +529,7 @@ vuint8m8_t test_vlmul_ext_v_u8mf8_u8m8(vuint8mf8_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.vector.insert.nxv4i8.nxv2i8(<vscale x 4 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.vector.insert.nxv4i8.nxv2i8(<vscale x 4 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf2_t test_vlmul_ext_v_u8mf4_u8mf2(vuint8mf4_t op1) {
|
||||
|
@ -538,7 +538,7 @@ vuint8mf2_t test_vlmul_ext_v_u8mf4_u8mf2(vuint8mf4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.nxv2i8(<vscale x 8 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.nxv2i8(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m1_t test_vlmul_ext_v_u8mf4_u8m1(vuint8mf4_t op1) {
|
||||
|
@ -547,7 +547,7 @@ vuint8m1_t test_vlmul_ext_v_u8mf4_u8m1(vuint8mf4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv2i8(<vscale x 16 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv2i8(<vscale x 16 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m2_t test_vlmul_ext_v_u8mf4_u8m2(vuint8mf4_t op1) {
|
||||
|
@ -556,7 +556,7 @@ vuint8m2_t test_vlmul_ext_v_u8mf4_u8m2(vuint8mf4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv2i8(<vscale x 32 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv2i8(<vscale x 32 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_vlmul_ext_v_u8mf4_u8m4(vuint8mf4_t op1) {
|
||||
|
@ -565,7 +565,7 @@ vuint8m4_t test_vlmul_ext_v_u8mf4_u8m4(vuint8mf4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv2i8(<vscale x 64 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv2i8(<vscale x 64 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m8_t test_vlmul_ext_v_u8mf4_u8m8(vuint8mf4_t op1) {
|
||||
|
@ -574,7 +574,7 @@ vuint8m8_t test_vlmul_ext_v_u8mf4_u8m8(vuint8mf4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.nxv4i8(<vscale x 8 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.nxv4i8(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m1_t test_vlmul_ext_v_u8mf2_u8m1(vuint8mf2_t op1) {
|
||||
|
@ -583,7 +583,7 @@ vuint8m1_t test_vlmul_ext_v_u8mf2_u8m1(vuint8mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv4i8(<vscale x 16 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv4i8(<vscale x 16 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m2_t test_vlmul_ext_v_u8mf2_u8m2(vuint8mf2_t op1) {
|
||||
|
@ -592,7 +592,7 @@ vuint8m2_t test_vlmul_ext_v_u8mf2_u8m2(vuint8mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv4i8(<vscale x 32 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv4i8(<vscale x 32 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_vlmul_ext_v_u8mf2_u8m4(vuint8mf2_t op1) {
|
||||
|
@ -601,7 +601,7 @@ vuint8m4_t test_vlmul_ext_v_u8mf2_u8m4(vuint8mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv4i8(<vscale x 64 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv4i8(<vscale x 64 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m8_t test_vlmul_ext_v_u8mf2_u8m8(vuint8mf2_t op1) {
|
||||
|
@ -610,7 +610,7 @@ vuint8m8_t test_vlmul_ext_v_u8mf2_u8m8(vuint8mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m2_t test_vlmul_ext_v_u8m1_u8m2(vuint8m1_t op1) {
|
||||
|
@ -619,7 +619,7 @@ vuint8m2_t test_vlmul_ext_v_u8m1_u8m2(vuint8m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_vlmul_ext_v_u8m1_u8m4(vuint8m1_t op1) {
|
||||
|
@ -628,7 +628,7 @@ vuint8m4_t test_vlmul_ext_v_u8m1_u8m4(vuint8m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m8_t test_vlmul_ext_v_u8m1_u8m8(vuint8m1_t op1) {
|
||||
|
@ -637,7 +637,7 @@ vuint8m8_t test_vlmul_ext_v_u8m1_u8m8(vuint8m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_vlmul_ext_v_u8m2_u8m4(vuint8m2_t op1) {
|
||||
|
@ -646,7 +646,7 @@ vuint8m4_t test_vlmul_ext_v_u8m2_u8m4(vuint8m2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m8_t test_vlmul_ext_v_u8m2_u8m8(vuint8m2_t op1) {
|
||||
|
@ -655,7 +655,7 @@ vuint8m8_t test_vlmul_ext_v_u8m2_u8m8(vuint8m2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m4_u8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m8_t test_vlmul_ext_v_u8m4_u8m8(vuint8m4_t op1) {
|
||||
|
@ -664,7 +664,7 @@ vuint8m8_t test_vlmul_ext_v_u8m4_u8m8(vuint8m4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.vector.insert.nxv2i16.nxv1i16(<vscale x 2 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.vector.insert.nxv2i16.nxv1i16(<vscale x 2 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_vlmul_ext_v_u16mf4_u16mf2(vuint16mf4_t op1) {
|
||||
|
@ -673,7 +673,7 @@ vuint16mf2_t test_vlmul_ext_v_u16mf4_u16mf2(vuint16mf4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.nxv1i16(<vscale x 4 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.nxv1i16(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vlmul_ext_v_u16mf4_u16m1(vuint16mf4_t op1) {
|
||||
|
@ -682,7 +682,7 @@ vuint16m1_t test_vlmul_ext_v_u16mf4_u16m1(vuint16mf4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vlmul_ext_v_u16mf4_u16m2(vuint16mf4_t op1) {
|
||||
|
@ -691,7 +691,7 @@ vuint16m2_t test_vlmul_ext_v_u16mf4_u16m2(vuint16mf4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv1i16(<vscale x 16 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv1i16(<vscale x 16 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vlmul_ext_v_u16mf4_u16m4(vuint16mf4_t op1) {
|
||||
|
@ -700,7 +700,7 @@ vuint16m4_t test_vlmul_ext_v_u16mf4_u16m4(vuint16mf4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv1i16(<vscale x 32 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv1i16(<vscale x 32 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vlmul_ext_v_u16mf4_u16m8(vuint16mf4_t op1) {
|
||||
|
@ -709,7 +709,7 @@ vuint16m8_t test_vlmul_ext_v_u16mf4_u16m8(vuint16mf4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.nxv2i16(<vscale x 4 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.nxv2i16(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vlmul_ext_v_u16mf2_u16m1(vuint16mf2_t op1) {
|
||||
|
@ -718,7 +718,7 @@ vuint16m1_t test_vlmul_ext_v_u16mf2_u16m1(vuint16mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv2i16(<vscale x 8 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv2i16(<vscale x 8 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vlmul_ext_v_u16mf2_u16m2(vuint16mf2_t op1) {
|
||||
|
@ -727,7 +727,7 @@ vuint16m2_t test_vlmul_ext_v_u16mf2_u16m2(vuint16mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv2i16(<vscale x 16 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv2i16(<vscale x 16 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vlmul_ext_v_u16mf2_u16m4(vuint16mf2_t op1) {
|
||||
|
@ -736,7 +736,7 @@ vuint16m4_t test_vlmul_ext_v_u16mf2_u16m4(vuint16mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv2i16(<vscale x 32 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv2i16(<vscale x 32 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vlmul_ext_v_u16mf2_u16m8(vuint16mf2_t op1) {
|
||||
|
@ -745,7 +745,7 @@ vuint16m8_t test_vlmul_ext_v_u16mf2_u16m8(vuint16mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vlmul_ext_v_u16m1_u16m2(vuint16m1_t op1) {
|
||||
|
@ -754,7 +754,7 @@ vuint16m2_t test_vlmul_ext_v_u16m1_u16m2(vuint16m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vlmul_ext_v_u16m1_u16m4(vuint16m1_t op1) {
|
||||
|
@ -763,7 +763,7 @@ vuint16m4_t test_vlmul_ext_v_u16m1_u16m4(vuint16m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vlmul_ext_v_u16m1_u16m8(vuint16m1_t op1) {
|
||||
|
@ -772,7 +772,7 @@ vuint16m8_t test_vlmul_ext_v_u16m1_u16m8(vuint16m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vlmul_ext_v_u16m2_u16m4(vuint16m2_t op1) {
|
||||
|
@ -781,7 +781,7 @@ vuint16m4_t test_vlmul_ext_v_u16m2_u16m4(vuint16m2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vlmul_ext_v_u16m2_u16m8(vuint16m2_t op1) {
|
||||
|
@ -790,7 +790,7 @@ vuint16m8_t test_vlmul_ext_v_u16m2_u16m8(vuint16m2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m4_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vlmul_ext_v_u16m4_u16m8(vuint16m4_t op1) {
|
||||
|
@ -799,7 +799,7 @@ vuint16m8_t test_vlmul_ext_v_u16m4_u16m8(vuint16m4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.nxv1i32(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.nxv1i32(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vlmul_ext_v_u32mf2_u32m1(vuint32mf2_t op1) {
|
||||
|
@ -808,7 +808,7 @@ vuint32m1_t test_vlmul_ext_v_u32mf2_u32m1(vuint32mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.nxv1i32(<vscale x 4 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.nxv1i32(<vscale x 4 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vlmul_ext_v_u32mf2_u32m2(vuint32mf2_t op1) {
|
||||
|
@ -817,7 +817,7 @@ vuint32m2_t test_vlmul_ext_v_u32mf2_u32m2(vuint32mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv1i32(<vscale x 8 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv1i32(<vscale x 8 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vlmul_ext_v_u32mf2_u32m4(vuint32mf2_t op1) {
|
||||
|
@ -826,7 +826,7 @@ vuint32m4_t test_vlmul_ext_v_u32mf2_u32m4(vuint32mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv1i32(<vscale x 16 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv1i32(<vscale x 16 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vlmul_ext_v_u32mf2_u32m8(vuint32mf2_t op1) {
|
||||
|
@ -835,7 +835,7 @@ vuint32m8_t test_vlmul_ext_v_u32mf2_u32m8(vuint32mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vlmul_ext_v_u32m1_u32m2(vuint32m1_t op1) {
|
||||
|
@ -844,7 +844,7 @@ vuint32m2_t test_vlmul_ext_v_u32m1_u32m2(vuint32m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vlmul_ext_v_u32m1_u32m4(vuint32m1_t op1) {
|
||||
|
@ -853,7 +853,7 @@ vuint32m4_t test_vlmul_ext_v_u32m1_u32m4(vuint32m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vlmul_ext_v_u32m1_u32m8(vuint32m1_t op1) {
|
||||
|
@ -862,7 +862,7 @@ vuint32m8_t test_vlmul_ext_v_u32m1_u32m8(vuint32m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vlmul_ext_v_u32m2_u32m4(vuint32m2_t op1) {
|
||||
|
@ -871,7 +871,7 @@ vuint32m4_t test_vlmul_ext_v_u32m2_u32m4(vuint32m2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vlmul_ext_v_u32m2_u32m8(vuint32m2_t op1) {
|
||||
|
@ -880,7 +880,7 @@ vuint32m8_t test_vlmul_ext_v_u32m2_u32m8(vuint32m2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m4_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vlmul_ext_v_u32m4_u32m8(vuint32m4_t op1) {
|
||||
|
@ -889,7 +889,7 @@ vuint32m8_t test_vlmul_ext_v_u32m4_u32m8(vuint32m4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vlmul_ext_v_u64m1_u64m2(vuint64m1_t op1) {
|
||||
|
@ -898,7 +898,7 @@ vuint64m2_t test_vlmul_ext_v_u64m1_u64m2(vuint64m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vlmul_ext_v_u64m1_u64m4(vuint64m1_t op1) {
|
||||
|
@ -907,7 +907,7 @@ vuint64m4_t test_vlmul_ext_v_u64m1_u64m4(vuint64m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vlmul_ext_v_u64m1_u64m8(vuint64m1_t op1) {
|
||||
|
@ -916,7 +916,7 @@ vuint64m8_t test_vlmul_ext_v_u64m1_u64m8(vuint64m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vlmul_ext_v_u64m2_u64m4(vuint64m2_t op1) {
|
||||
|
@ -925,7 +925,7 @@ vuint64m4_t test_vlmul_ext_v_u64m2_u64m4(vuint64m2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vlmul_ext_v_u64m2_u64m8(vuint64m2_t op1) {
|
||||
|
@ -934,7 +934,7 @@ vuint64m8_t test_vlmul_ext_v_u64m2_u64m8(vuint64m2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m4_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vlmul_ext_v_u64m4_u64m8(vuint64m4_t op1) {
|
||||
|
@ -943,7 +943,7 @@ vuint64m8_t test_vlmul_ext_v_u64m4_u64m8(vuint64m4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.nxv1f32(<vscale x 2 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.nxv1f32(<vscale x 2 x float> poison, <vscale x 1 x float> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vlmul_ext_v_f32mf2_f32m1(vfloat32mf2_t op1) {
|
||||
|
@ -952,7 +952,7 @@ vfloat32m1_t test_vlmul_ext_v_f32mf2_f32m1(vfloat32mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.nxv1f32(<vscale x 4 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.nxv1f32(<vscale x 4 x float> poison, <vscale x 1 x float> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vlmul_ext_v_f32mf2_f32m2(vfloat32mf2_t op1) {
|
||||
|
@ -961,7 +961,7 @@ vfloat32m2_t test_vlmul_ext_v_f32mf2_f32m2(vfloat32mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.vector.insert.nxv8f32.nxv1f32(<vscale x 8 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.vector.insert.nxv8f32.nxv1f32(<vscale x 8 x float> poison, <vscale x 1 x float> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vlmul_ext_v_f32mf2_f32m4(vfloat32mf2_t op1) {
|
||||
|
@ -970,7 +970,7 @@ vfloat32m4_t test_vlmul_ext_v_f32mf2_f32m4(vfloat32mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv1f32(<vscale x 16 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv1f32(<vscale x 16 x float> poison, <vscale x 1 x float> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vlmul_ext_v_f32mf2_f32m8(vfloat32mf2_t op1) {
|
||||
|
@ -979,7 +979,7 @@ vfloat32m8_t test_vlmul_ext_v_f32mf2_f32m8(vfloat32mf2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float> poison, <vscale x 2 x float> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vlmul_ext_v_f32m1_f32m2(vfloat32m1_t op1) {
|
||||
|
@ -988,7 +988,7 @@ vfloat32m2_t test_vlmul_ext_v_f32m1_f32m2(vfloat32m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.vector.insert.nxv8f32.nxv2f32(<vscale x 8 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.vector.insert.nxv8f32.nxv2f32(<vscale x 8 x float> poison, <vscale x 2 x float> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vlmul_ext_v_f32m1_f32m4(vfloat32m1_t op1) {
|
||||
|
@ -997,7 +997,7 @@ vfloat32m4_t test_vlmul_ext_v_f32m1_f32m4(vfloat32m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv2f32(<vscale x 16 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv2f32(<vscale x 16 x float> poison, <vscale x 2 x float> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vlmul_ext_v_f32m1_f32m8(vfloat32m1_t op1) {
|
||||
|
@ -1006,7 +1006,7 @@ vfloat32m8_t test_vlmul_ext_v_f32m1_f32m8(vfloat32m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> poison, <vscale x 4 x float> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vlmul_ext_v_f32m2_f32m4(vfloat32m2_t op1) {
|
||||
|
@ -1015,7 +1015,7 @@ vfloat32m4_t test_vlmul_ext_v_f32m2_f32m4(vfloat32m2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> poison, <vscale x 4 x float> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vlmul_ext_v_f32m2_f32m8(vfloat32m2_t op1) {
|
||||
|
@ -1024,7 +1024,7 @@ vfloat32m8_t test_vlmul_ext_v_f32m2_f32m8(vfloat32m2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m4_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv8f32(<vscale x 16 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv8f32(<vscale x 16 x float> poison, <vscale x 8 x float> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vlmul_ext_v_f32m4_f32m8(vfloat32m4_t op1) {
|
||||
|
@ -1033,7 +1033,7 @@ vfloat32m8_t test_vlmul_ext_v_f32m4_f32m8(vfloat32m4_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.nxv1f64(<vscale x 2 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.nxv1f64(<vscale x 2 x double> poison, <vscale x 1 x double> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vlmul_ext_v_f64m1_f64m2(vfloat64m1_t op1) {
|
||||
|
@ -1042,7 +1042,7 @@ vfloat64m2_t test_vlmul_ext_v_f64m1_f64m2(vfloat64m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.vector.insert.nxv4f64.nxv1f64(<vscale x 4 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.vector.insert.nxv4f64.nxv1f64(<vscale x 4 x double> poison, <vscale x 1 x double> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vlmul_ext_v_f64m1_f64m4(vfloat64m1_t op1) {
|
||||
|
@ -1051,7 +1051,7 @@ vfloat64m4_t test_vlmul_ext_v_f64m1_f64m4(vfloat64m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nxv1f64(<vscale x 8 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nxv1f64(<vscale x 8 x double> poison, <vscale x 1 x double> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vlmul_ext_v_f64m1_f64m8(vfloat64m1_t op1) {
|
||||
|
@ -1060,7 +1060,7 @@ vfloat64m8_t test_vlmul_ext_v_f64m1_f64m8(vfloat64m1_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.vector.insert.nxv4f64.nxv2f64(<vscale x 4 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.vector.insert.nxv4f64.nxv2f64(<vscale x 4 x double> poison, <vscale x 2 x double> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vlmul_ext_v_f64m2_f64m4(vfloat64m2_t op1) {
|
||||
|
@ -1069,7 +1069,7 @@ vfloat64m4_t test_vlmul_ext_v_f64m2_f64m4(vfloat64m2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nxv2f64(<vscale x 8 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nxv2f64(<vscale x 8 x double> poison, <vscale x 2 x double> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vlmul_ext_v_f64m2_f64m8(vfloat64m2_t op1) {
|
||||
|
@ -1078,7 +1078,7 @@ vfloat64m8_t test_vlmul_ext_v_f64m2_f64m8(vfloat64m2_t op1) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m4_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nxv4f64(<vscale x 8 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nxv4f64(<vscale x 8 x double> poison, <vscale x 4 x double> [[OP1:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vlmul_ext_v_f64m4_f64m8(vfloat64m4_t op1) {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -6951,7 +6951,7 @@ void test_vloxseg2ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> poison, i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -7319,7 +7319,7 @@ void test_vlseg2e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
|
|||
|
||||
// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2_ta(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } @llvm.riscv.vlseg2ff.nxv1i32.i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } @llvm.riscv.vlseg2ff.nxv1i32.i32(<vscale x 1 x i32> poison, <vscale x 1 x i32> poison, i32* [[BASE:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 0
|
||||
// CHECK-RV32-NEXT: store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
|
||||
// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 1
|
||||
|
@ -7330,7 +7330,7 @@ void test_vlseg2e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
|
|||
//
|
||||
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } @llvm.riscv.vlseg2ff.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } @llvm.riscv.vlseg2ff.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> poison, i32* [[BASE:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 1
|
||||
|
@ -7397,7 +7397,7 @@ void test_vlseg2e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_
|
|||
|
||||
// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2_tama(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]], i32 3)
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32(<vscale x 1 x i32> poison, <vscale x 1 x i32> poison, i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]], i32 3)
|
||||
// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 0
|
||||
// CHECK-RV32-NEXT: store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
|
||||
// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 1
|
||||
|
@ -7408,7 +7408,7 @@ void test_vlseg2e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_
|
|||
//
|
||||
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> poison, i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 1
|
||||
|
|
|
@ -3661,7 +3661,7 @@ void test_vlsseg2e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg2.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg2.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> poison, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
|
||||
|
@ -3700,7 +3700,7 @@ void test_vlsseg2e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg2.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg2.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> poison, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -6951,7 +6951,7 @@ void test_vluxseg2ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> poison, i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -7,7 +7,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
|
||||
|
@ -17,7 +17,7 @@ vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2,
|
||||
|
@ -27,7 +27,7 @@ vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
|
||||
|
@ -37,7 +37,7 @@ vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2,
|
||||
|
@ -47,7 +47,7 @@ vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
|
||||
|
@ -57,7 +57,7 @@ vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2,
|
||||
|
@ -67,7 +67,7 @@ vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2,
|
||||
|
@ -77,7 +77,7 @@ vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2,
|
||||
|
@ -87,7 +87,7 @@ vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2,
|
||||
|
@ -97,7 +97,7 @@ vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2,
|
||||
|
@ -107,7 +107,7 @@ vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2,
|
||||
|
@ -117,7 +117,7 @@ vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2,
|
||||
|
@ -127,7 +127,7 @@ vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2,
|
||||
|
@ -137,7 +137,7 @@ vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2,
|
||||
|
@ -147,7 +147,7 @@ vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1,
|
||||
|
@ -157,7 +157,7 @@ vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2,
|
||||
|
@ -167,7 +167,7 @@ vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1,
|
||||
|
@ -177,7 +177,7 @@ vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2,
|
||||
|
@ -187,7 +187,7 @@ vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
|
||||
|
@ -197,7 +197,7 @@ vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2,
|
||||
|
@ -207,7 +207,7 @@ vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
|
||||
|
@ -217,7 +217,7 @@ vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2,
|
||||
|
@ -227,7 +227,7 @@ vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
|
||||
|
@ -237,7 +237,7 @@ vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2,
|
||||
|
@ -247,7 +247,7 @@ vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
|
||||
|
@ -257,7 +257,7 @@ vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2,
|
||||
|
@ -267,7 +267,7 @@ vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1,
|
||||
|
@ -277,7 +277,7 @@ vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2,
|
||||
|
@ -287,7 +287,7 @@ vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
|
||||
|
@ -297,7 +297,7 @@ vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2,
|
||||
|
@ -307,7 +307,7 @@ vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
|
||||
|
@ -317,7 +317,7 @@ vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2,
|
||||
|
@ -327,7 +327,7 @@ vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
|
||||
|
@ -337,7 +337,7 @@ vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2,
|
||||
|
@ -347,7 +347,7 @@ vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
|
||||
|
@ -357,7 +357,7 @@ vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2,
|
||||
|
@ -367,7 +367,7 @@ vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
|
||||
|
@ -377,7 +377,7 @@ vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2,
|
||||
|
@ -387,7 +387,7 @@ vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
|
||||
|
@ -397,7 +397,7 @@ vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2,
|
||||
|
@ -407,7 +407,7 @@ vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
|
||||
|
@ -417,7 +417,7 @@ vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2,
|
||||
|
@ -427,7 +427,7 @@ vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
|
||||
|
@ -437,7 +437,7 @@ vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2,
|
||||
|
@ -447,7 +447,7 @@ vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1,
|
||||
|
@ -457,7 +457,7 @@ vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2,
|
||||
|
@ -467,7 +467,7 @@ vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1,
|
||||
|
@ -477,7 +477,7 @@ vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2,
|
||||
|
@ -487,7 +487,7 @@ vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1,
|
||||
|
@ -497,7 +497,7 @@ vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
|
||||
|
@ -507,7 +507,7 @@ vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2,
|
||||
|
@ -517,7 +517,7 @@ vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2,
|
||||
|
@ -527,7 +527,7 @@ vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2,
|
||||
|
@ -537,7 +537,7 @@ vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2,
|
||||
|
@ -547,7 +547,7 @@ vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2,
|
||||
|
@ -557,7 +557,7 @@ vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2,
|
||||
|
@ -567,7 +567,7 @@ vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2,
|
||||
|
@ -577,7 +577,7 @@ vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2,
|
||||
|
@ -587,7 +587,7 @@ vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1,
|
||||
|
@ -597,7 +597,7 @@ vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1,
|
||||
|
@ -607,7 +607,7 @@ vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1,
|
||||
|
@ -617,7 +617,7 @@ vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1,
|
||||
|
@ -627,7 +627,7 @@ vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1,
|
||||
|
@ -637,7 +637,7 @@ vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2,
|
||||
|
@ -647,7 +647,7 @@ vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1,
|
||||
|
@ -657,7 +657,7 @@ vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
|
||||
|
@ -667,7 +667,7 @@ vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1,
|
||||
|
@ -677,7 +677,7 @@ vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
|
||||
|
@ -687,7 +687,7 @@ vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1,
|
||||
|
@ -697,7 +697,7 @@ vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
|
||||
|
@ -707,7 +707,7 @@ vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1,
|
||||
|
@ -717,7 +717,7 @@ vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1,
|
||||
|
@ -727,7 +727,7 @@ vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1,
|
||||
|
@ -737,7 +737,7 @@ vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2,
|
||||
|
@ -747,7 +747,7 @@ vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1,
|
||||
|
@ -757,7 +757,7 @@ vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2,
|
||||
|
@ -767,7 +767,7 @@ vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1,
|
||||
|
@ -777,7 +777,7 @@ vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
|
||||
|
@ -787,7 +787,7 @@ vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1,
|
||||
|
@ -797,7 +797,7 @@ vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
|
||||
|
@ -807,7 +807,7 @@ vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1,
|
||||
|
@ -817,7 +817,7 @@ vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2,
|
||||
|
@ -827,7 +827,7 @@ vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1,
|
||||
|
@ -837,7 +837,7 @@ vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2,
|
||||
|
@ -847,7 +847,7 @@ vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1,
|
||||
|
@ -857,7 +857,7 @@ vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2,
|
||||
|
@ -867,7 +867,7 @@ vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1,
|
||||
|
@ -877,7 +877,7 @@ vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2,
|
||||
|
@ -887,7 +887,7 @@ vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1,
|
||||
|
@ -897,7 +897,7 @@ vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1,
|
||||
|
@ -907,7 +907,7 @@ vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1,
|
||||
|
@ -917,7 +917,7 @@ vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1,
|
||||
|
@ -927,7 +927,7 @@ vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1,
|
||||
|
@ -937,7 +937,7 @@ vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1,
|
||||
|
@ -947,7 +947,7 @@ vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1,
|
||||
|
@ -957,7 +957,7 @@ vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1,
|
||||
|
@ -967,7 +967,7 @@ vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vmerge_vvm_f64m8(vbool8_t mask, vfloat64m8_t op1,
|
||||
|
@ -1013,7 +1013,7 @@ vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vmerge_vvm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
|
||||
|
@ -1022,7 +1022,7 @@ vint32mf2_t test_vmerge_vvm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, vint32mf2
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vmerge_vxm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
|
||||
|
@ -1031,7 +1031,7 @@ vint32mf2_t test_vmerge_vxm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, int32_t o
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
|
||||
|
@ -1040,7 +1040,7 @@ vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, vuint32
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vmerge_vxm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
|
||||
|
@ -1058,7 +1058,7 @@ vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vfl
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vmerge_vvm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf8_t test_vncvt_x_x_w_i8mf8 (vint16mf4_t src, size_t vl) {
|
||||
|
@ -15,7 +15,7 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8 (vint16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf4_t test_vncvt_x_x_w_i8mf4 (vint16mf2_t src, size_t vl) {
|
||||
|
@ -24,7 +24,7 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4 (vint16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf2_t test_vncvt_x_x_w_i8mf2 (vint16m1_t src, size_t vl) {
|
||||
|
@ -33,7 +33,7 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2 (vint16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m1_t test_vncvt_x_x_w_i8m1 (vint16m2_t src, size_t vl) {
|
||||
|
@ -42,7 +42,7 @@ vint8m1_t test_vncvt_x_x_w_i8m1 (vint16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vncvt_x_x_w_i8m2 (vint16m4_t src, size_t vl) {
|
||||
|
@ -51,7 +51,7 @@ vint8m2_t test_vncvt_x_x_w_i8m2 (vint16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vncvt_x_x_w_i8m4 (vint16m8_t src, size_t vl) {
|
||||
|
@ -60,7 +60,7 @@ vint8m4_t test_vncvt_x_x_w_i8m4 (vint16m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf8_t test_vncvt_x_x_w_u8mf8 (vuint16mf4_t src, size_t vl) {
|
||||
|
@ -69,7 +69,7 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8 (vuint16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf4_t test_vncvt_x_x_w_u8mf4 (vuint16mf2_t src, size_t vl) {
|
||||
|
@ -78,7 +78,7 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4 (vuint16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf2_t test_vncvt_x_x_w_u8mf2 (vuint16m1_t src, size_t vl) {
|
||||
|
@ -87,7 +87,7 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2 (vuint16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m1_t test_vncvt_x_x_w_u8m1 (vuint16m2_t src, size_t vl) {
|
||||
|
@ -96,7 +96,7 @@ vuint8m1_t test_vncvt_x_x_w_u8m1 (vuint16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m2_t test_vncvt_x_x_w_u8m2 (vuint16m4_t src, size_t vl) {
|
||||
|
@ -105,7 +105,7 @@ vuint8m2_t test_vncvt_x_x_w_u8m2 (vuint16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_vncvt_x_x_w_u8m4 (vuint16m8_t src, size_t vl) {
|
||||
|
@ -114,7 +114,7 @@ vuint8m4_t test_vncvt_x_x_w_u8m4 (vuint16m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vncvt_x_x_w_i16mf4 (vint32mf2_t src, size_t vl) {
|
||||
|
@ -123,7 +123,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4 (vint32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf2_t test_vncvt_x_x_w_i16mf2 (vint32m1_t src, size_t vl) {
|
||||
|
@ -132,7 +132,7 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2 (vint32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vncvt_x_x_w_i16m1 (vint32m2_t src, size_t vl) {
|
||||
|
@ -141,7 +141,7 @@ vint16m1_t test_vncvt_x_x_w_i16m1 (vint32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vncvt_x_x_w_i16m2 (vint32m4_t src, size_t vl) {
|
||||
|
@ -150,7 +150,7 @@ vint16m2_t test_vncvt_x_x_w_i16m2 (vint32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vncvt_x_x_w_i16m4 (vint32m8_t src, size_t vl) {
|
||||
|
@ -159,7 +159,7 @@ vint16m4_t test_vncvt_x_x_w_i16m4 (vint32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vncvt_x_x_w_u16mf4 (vuint32mf2_t src, size_t vl) {
|
||||
|
@ -168,7 +168,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4 (vuint32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_vncvt_x_x_w_u16mf2 (vuint32m1_t src, size_t vl) {
|
||||
|
@ -177,7 +177,7 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2 (vuint32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vncvt_x_x_w_u16m1 (vuint32m2_t src, size_t vl) {
|
||||
|
@ -186,7 +186,7 @@ vuint16m1_t test_vncvt_x_x_w_u16m1 (vuint32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vncvt_x_x_w_u16m2 (vuint32m4_t src, size_t vl) {
|
||||
|
@ -195,7 +195,7 @@ vuint16m2_t test_vncvt_x_x_w_u16m2 (vuint32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vncvt_x_x_w_u16m4 (vuint32m8_t src, size_t vl) {
|
||||
|
@ -204,7 +204,7 @@ vuint16m4_t test_vncvt_x_x_w_u16m4 (vuint32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vncvt_x_x_w_i32mf2 (vint64m1_t src, size_t vl) {
|
||||
|
@ -213,7 +213,7 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2 (vint64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vncvt_x_x_w_i32m1 (vint64m2_t src, size_t vl) {
|
||||
|
@ -222,7 +222,7 @@ vint32m1_t test_vncvt_x_x_w_i32m1 (vint64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vncvt_x_x_w_i32m2 (vint64m4_t src, size_t vl) {
|
||||
|
@ -231,7 +231,7 @@ vint32m2_t test_vncvt_x_x_w_i32m2 (vint64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vncvt_x_x_w_i32m4 (vint64m8_t src, size_t vl) {
|
||||
|
@ -240,7 +240,7 @@ vint32m4_t test_vncvt_x_x_w_i32m4 (vint64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vncvt_x_x_w_u32mf2 (vuint64m1_t src, size_t vl) {
|
||||
|
@ -249,7 +249,7 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2 (vuint64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vncvt_x_x_w_u32m1 (vuint64m2_t src, size_t vl) {
|
||||
|
@ -258,7 +258,7 @@ vuint32m1_t test_vncvt_x_x_w_u32m1 (vuint64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vncvt_x_x_w_u32m2 (vuint64m4_t src, size_t vl) {
|
||||
|
@ -267,7 +267,7 @@ vuint32m2_t test_vncvt_x_x_w_u32m2 (vuint64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vncvt_x_x_w_u32m4 (vuint64m8_t src, size_t vl) {
|
||||
|
@ -564,7 +564,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4_tu(vuint16mf4_t merge, vuint32mf2_t src, si
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vncvt_x_x_w_i16mf4_ta(vint32mf2_t src, size_t vl) {
|
||||
|
@ -573,7 +573,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4_ta(vint32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vncvt_x_x_w_u16mf4_ta(vuint32mf2_t src, size_t vl) {
|
||||
|
@ -618,7 +618,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t merge, vu
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vncvt_x_x_w_i16mf4_tama(vbool64_t mask, vint32mf2_t src, size_t vl) {
|
||||
|
@ -627,7 +627,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4_tama(vbool64_t mask, vint32mf2_t src, size_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vncvt_x_x_w_u16mf4_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) {
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) {
|
||||
|
@ -16,7 +16,7 @@ vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) {
|
||||
|
@ -25,7 +25,7 @@ vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) {
|
||||
|
@ -34,7 +34,7 @@ vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) {
|
||||
|
@ -43,7 +43,7 @@ vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) {
|
||||
|
@ -52,7 +52,7 @@ vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) {
|
||||
|
@ -61,7 +61,7 @@ vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) {
|
||||
|
@ -70,7 +70,7 @@ vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) {
|
||||
|
@ -79,7 +79,7 @@ vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) {
|
||||
|
@ -88,7 +88,7 @@ vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) {
|
||||
|
@ -97,7 +97,7 @@ vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) {
|
||||
|
@ -106,7 +106,7 @@ vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) {
|
||||
|
@ -115,7 +115,7 @@ vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) {
|
||||
|
@ -124,7 +124,7 @@ vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) {
|
||||
|
@ -133,7 +133,7 @@ vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) {
|
||||
|
@ -142,7 +142,7 @@ vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) {
|
||||
|
@ -151,7 +151,7 @@ vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) {
|
||||
|
@ -160,7 +160,7 @@ vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) {
|
||||
|
@ -169,7 +169,7 @@ vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) {
|
||||
|
@ -178,7 +178,7 @@ vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) {
|
||||
|
@ -187,7 +187,7 @@ vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) {
|
||||
|
@ -196,7 +196,7 @@ vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vneg_v_i64m8 (vint64m8_t op1, size_t vl) {
|
||||
|
@ -412,7 +412,7 @@ vint32mf2_t test_vneg_v_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t vl)
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vneg_v_i32mf2_ta(vint32mf2_t op1, size_t vl) {
|
||||
|
@ -439,7 +439,7 @@ vint32mf2_t test_vneg_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vneg_v_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t vl) {
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf8_t test_vnot_v_i8mf8 (vint8mf8_t op1, size_t vl) {
|
||||
|
@ -16,7 +16,7 @@ vint8mf8_t test_vnot_v_i8mf8 (vint8mf8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf4_t test_vnot_v_i8mf4 (vint8mf4_t op1, size_t vl) {
|
||||
|
@ -25,7 +25,7 @@ vint8mf4_t test_vnot_v_i8mf4 (vint8mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf2_t test_vnot_v_i8mf2 (vint8mf2_t op1, size_t vl) {
|
||||
|
@ -34,7 +34,7 @@ vint8mf2_t test_vnot_v_i8mf2 (vint8mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m1_t test_vnot_v_i8m1 (vint8m1_t op1, size_t vl) {
|
||||
|
@ -43,7 +43,7 @@ vint8m1_t test_vnot_v_i8m1 (vint8m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vnot_v_i8m2 (vint8m2_t op1, size_t vl) {
|
||||
|
@ -52,7 +52,7 @@ vint8m2_t test_vnot_v_i8m2 (vint8m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vnot_v_i8m4 (vint8m4_t op1, size_t vl) {
|
||||
|
@ -61,7 +61,7 @@ vint8m4_t test_vnot_v_i8m4 (vint8m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m8_t test_vnot_v_i8m8 (vint8m8_t op1, size_t vl) {
|
||||
|
@ -70,7 +70,7 @@ vint8m8_t test_vnot_v_i8m8 (vint8m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vnot_v_i16mf4 (vint16mf4_t op1, size_t vl) {
|
||||
|
@ -79,7 +79,7 @@ vint16mf4_t test_vnot_v_i16mf4 (vint16mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf2_t test_vnot_v_i16mf2 (vint16mf2_t op1, size_t vl) {
|
||||
|
@ -88,7 +88,7 @@ vint16mf2_t test_vnot_v_i16mf2 (vint16mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vnot_v_i16m1 (vint16m1_t op1, size_t vl) {
|
||||
|
@ -97,7 +97,7 @@ vint16m1_t test_vnot_v_i16m1 (vint16m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vnot_v_i16m2 (vint16m2_t op1, size_t vl) {
|
||||
|
@ -106,7 +106,7 @@ vint16m2_t test_vnot_v_i16m2 (vint16m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vnot_v_i16m4 (vint16m4_t op1, size_t vl) {
|
||||
|
@ -115,7 +115,7 @@ vint16m4_t test_vnot_v_i16m4 (vint16m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vnot_v_i16m8 (vint16m8_t op1, size_t vl) {
|
||||
|
@ -124,7 +124,7 @@ vint16m8_t test_vnot_v_i16m8 (vint16m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vnot_v_i32mf2 (vint32mf2_t op1, size_t vl) {
|
||||
|
@ -133,7 +133,7 @@ vint32mf2_t test_vnot_v_i32mf2 (vint32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vnot_v_i32m1 (vint32m1_t op1, size_t vl) {
|
||||
|
@ -142,7 +142,7 @@ vint32m1_t test_vnot_v_i32m1 (vint32m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vnot_v_i32m2 (vint32m2_t op1, size_t vl) {
|
||||
|
@ -151,7 +151,7 @@ vint32m2_t test_vnot_v_i32m2 (vint32m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vnot_v_i32m4 (vint32m4_t op1, size_t vl) {
|
||||
|
@ -160,7 +160,7 @@ vint32m4_t test_vnot_v_i32m4 (vint32m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vnot_v_i32m8 (vint32m8_t op1, size_t vl) {
|
||||
|
@ -169,7 +169,7 @@ vint32m8_t test_vnot_v_i32m8 (vint32m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vnot_v_i64m1 (vint64m1_t op1, size_t vl) {
|
||||
|
@ -178,7 +178,7 @@ vint64m1_t test_vnot_v_i64m1 (vint64m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vnot_v_i64m2 (vint64m2_t op1, size_t vl) {
|
||||
|
@ -187,7 +187,7 @@ vint64m2_t test_vnot_v_i64m2 (vint64m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vnot_v_i64m4 (vint64m4_t op1, size_t vl) {
|
||||
|
@ -196,7 +196,7 @@ vint64m4_t test_vnot_v_i64m4 (vint64m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vnot_v_i64m8 (vint64m8_t op1, size_t vl) {
|
||||
|
@ -205,7 +205,7 @@ vint64m8_t test_vnot_v_i64m8 (vint64m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf8_t test_vnot_v_u8mf8 (vuint8mf8_t op1, size_t vl) {
|
||||
|
@ -214,7 +214,7 @@ vuint8mf8_t test_vnot_v_u8mf8 (vuint8mf8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf4_t test_vnot_v_u8mf4 (vuint8mf4_t op1, size_t vl) {
|
||||
|
@ -223,7 +223,7 @@ vuint8mf4_t test_vnot_v_u8mf4 (vuint8mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf2_t test_vnot_v_u8mf2 (vuint8mf2_t op1, size_t vl) {
|
||||
|
@ -232,7 +232,7 @@ vuint8mf2_t test_vnot_v_u8mf2 (vuint8mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m1_t test_vnot_v_u8m1 (vuint8m1_t op1, size_t vl) {
|
||||
|
@ -241,7 +241,7 @@ vuint8m1_t test_vnot_v_u8m1 (vuint8m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m2_t test_vnot_v_u8m2 (vuint8m2_t op1, size_t vl) {
|
||||
|
@ -250,7 +250,7 @@ vuint8m2_t test_vnot_v_u8m2 (vuint8m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_vnot_v_u8m4 (vuint8m4_t op1, size_t vl) {
|
||||
|
@ -259,7 +259,7 @@ vuint8m4_t test_vnot_v_u8m4 (vuint8m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m8_t test_vnot_v_u8m8 (vuint8m8_t op1, size_t vl) {
|
||||
|
@ -268,7 +268,7 @@ vuint8m8_t test_vnot_v_u8m8 (vuint8m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vnot_v_u16mf4 (vuint16mf4_t op1, size_t vl) {
|
||||
|
@ -277,7 +277,7 @@ vuint16mf4_t test_vnot_v_u16mf4 (vuint16mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_vnot_v_u16mf2 (vuint16mf2_t op1, size_t vl) {
|
||||
|
@ -286,7 +286,7 @@ vuint16mf2_t test_vnot_v_u16mf2 (vuint16mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vnot_v_u16m1 (vuint16m1_t op1, size_t vl) {
|
||||
|
@ -295,7 +295,7 @@ vuint16m1_t test_vnot_v_u16m1 (vuint16m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vnot_v_u16m2 (vuint16m2_t op1, size_t vl) {
|
||||
|
@ -304,7 +304,7 @@ vuint16m2_t test_vnot_v_u16m2 (vuint16m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vnot_v_u16m4 (vuint16m4_t op1, size_t vl) {
|
||||
|
@ -313,7 +313,7 @@ vuint16m4_t test_vnot_v_u16m4 (vuint16m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vnot_v_u16m8 (vuint16m8_t op1, size_t vl) {
|
||||
|
@ -322,7 +322,7 @@ vuint16m8_t test_vnot_v_u16m8 (vuint16m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vnot_v_u32mf2 (vuint32mf2_t op1, size_t vl) {
|
||||
|
@ -331,7 +331,7 @@ vuint32mf2_t test_vnot_v_u32mf2 (vuint32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vnot_v_u32m1 (vuint32m1_t op1, size_t vl) {
|
||||
|
@ -340,7 +340,7 @@ vuint32m1_t test_vnot_v_u32m1 (vuint32m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vnot_v_u32m2 (vuint32m2_t op1, size_t vl) {
|
||||
|
@ -349,7 +349,7 @@ vuint32m2_t test_vnot_v_u32m2 (vuint32m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vnot_v_u32m4 (vuint32m4_t op1, size_t vl) {
|
||||
|
@ -358,7 +358,7 @@ vuint32m4_t test_vnot_v_u32m4 (vuint32m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vnot_v_u32m8 (vuint32m8_t op1, size_t vl) {
|
||||
|
@ -367,7 +367,7 @@ vuint32m8_t test_vnot_v_u32m8 (vuint32m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vnot_v_u64m1 (vuint64m1_t op1, size_t vl) {
|
||||
|
@ -376,7 +376,7 @@ vuint64m1_t test_vnot_v_u64m1 (vuint64m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vnot_v_u64m2 (vuint64m2_t op1, size_t vl) {
|
||||
|
@ -385,7 +385,7 @@ vuint64m2_t test_vnot_v_u64m2 (vuint64m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vnot_v_u64m4 (vuint64m4_t op1, size_t vl) {
|
||||
|
@ -394,7 +394,7 @@ vuint64m4_t test_vnot_v_u64m4 (vuint64m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vnot_v_u64m8 (vuint64m8_t op1, size_t vl) {
|
||||
|
@ -817,7 +817,7 @@ vuint32mf2_t test_vnot_v_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vnot_v_i32mf2_ta(vint32mf2_t op1, size_t vl) {
|
||||
|
@ -826,7 +826,7 @@ vint32mf2_t test_vnot_v_i32mf2_ta(vint32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vnot_v_u32mf2_ta(vuint32mf2_t op1, size_t vl) {
|
||||
|
@ -871,7 +871,7 @@ vuint32mf2_t test_vnot_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vnot_v_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t vl) {
|
||||
|
@ -880,7 +880,7 @@ vint32mf2_t test_vnot_v_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t vl)
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vnot_v_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t vl) {
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) {
|
||||
|
@ -15,7 +15,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) {
|
||||
|
@ -24,7 +24,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) {
|
||||
|
@ -33,7 +33,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) {
|
||||
|
@ -42,7 +42,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) {
|
||||
|
@ -51,7 +51,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) {
|
||||
|
@ -60,7 +60,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) {
|
||||
|
@ -69,7 +69,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) {
|
||||
|
@ -78,7 +78,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) {
|
||||
|
@ -87,7 +87,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) {
|
||||
|
@ -96,7 +96,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) {
|
||||
|
@ -105,7 +105,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) {
|
||||
|
@ -114,7 +114,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) {
|
||||
|
@ -123,7 +123,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) {
|
||||
|
@ -132,7 +132,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) {
|
||||
|
@ -141,7 +141,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) {
|
||||
|
@ -150,7 +150,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) {
|
||||
|
@ -159,7 +159,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) {
|
||||
|
@ -168,7 +168,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) {
|
||||
|
@ -177,7 +177,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) {
|
||||
|
@ -186,7 +186,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) {
|
||||
|
@ -195,7 +195,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) {
|
||||
|
@ -204,7 +204,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) {
|
||||
|
@ -213,7 +213,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) {
|
||||
|
@ -222,7 +222,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) {
|
||||
|
@ -231,7 +231,7 @@ vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) {
|
||||
|
@ -240,7 +240,7 @@ vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) {
|
||||
|
@ -249,7 +249,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) {
|
||||
|
@ -258,7 +258,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) {
|
||||
|
@ -267,7 +267,7 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vwcvtu_x_x_v_u64m8 (vuint32m4_t src, size_t vl) {
|
||||
|
@ -564,7 +564,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1_tu(vuint64m1_t merge, vuint32mf2_t src, size
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vwcvt_x_x_v_i64m1_ta(vint32mf2_t src, size_t vl) {
|
||||
|
@ -573,7 +573,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1_ta(vint32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vwcvtu_x_x_v_u64m1_ta(vuint32mf2_t src, size_t vl) {
|
||||
|
@ -618,7 +618,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuin
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vwcvt_x_x_v_i64m1_tama(vbool64_t mask, vint32mf2_t src, size_t vl) {
|
||||
|
@ -627,7 +627,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1_tama(vbool64_t mask, vint32mf2_t src, size_t v
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vwcvtu_x_x_v_u64m1_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) {
|
||||
|
|
|
@ -566,7 +566,7 @@ vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, v
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vcompress_vm_i32mf2_ta(vbool64_t mask, vint32mf2_t src, size_t vl) {
|
||||
|
@ -575,7 +575,7 @@ vint32mf2_t test_vcompress_vm_i32mf2_ta(vbool64_t mask, vint32mf2_t src, size_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vcompress_vm_u32mf2_ta(vbool64_t mask, vuint32mf2_t src, size_t vl) {
|
||||
|
@ -584,7 +584,7 @@ vuint32mf2_t test_vcompress_vm_u32mf2_ta(vbool64_t mask, vuint32mf2_t src, size_
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vcompress_vm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
|
||||
|
@ -17,7 +17,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) {
|
||||
|
@ -26,7 +26,7 @@ vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) {
|
||||
|
@ -35,7 +35,7 @@ vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) {
|
||||
|
@ -44,7 +44,7 @@ vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) {
|
||||
|
@ -53,7 +53,7 @@ vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) {
|
||||
|
@ -62,7 +62,7 @@ vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) {
|
||||
|
@ -71,7 +71,7 @@ vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) {
|
||||
|
@ -80,7 +80,7 @@ vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfabs_v_f64m8 (vfloat64m8_t op1, size_t vl) {
|
||||
|
@ -170,7 +170,7 @@ vfloat64m8_t test_vfabs_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_vfabs_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
|
||||
|
@ -179,7 +179,7 @@ vfloat16mf4_t test_vfabs_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_vfabs_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
|
||||
|
@ -188,7 +188,7 @@ vfloat16mf2_t test_vfabs_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_vfabs_v_f16m1 (vfloat16m1_t op1, size_t vl) {
|
||||
|
@ -197,7 +197,7 @@ vfloat16m1_t test_vfabs_v_f16m1 (vfloat16m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_vfabs_v_f16m2 (vfloat16m2_t op1, size_t vl) {
|
||||
|
@ -206,7 +206,7 @@ vfloat16m2_t test_vfabs_v_f16m2 (vfloat16m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_vfabs_v_f16m4 (vfloat16m4_t op1, size_t vl) {
|
||||
|
@ -215,7 +215,7 @@ vfloat16m4_t test_vfabs_v_f16m4 (vfloat16m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_vfabs_v_f16m8 (vfloat16m8_t op1, size_t vl) {
|
||||
|
@ -287,7 +287,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, siz
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfabs_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) {
|
||||
|
@ -314,7 +314,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vflo
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfabs_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1,
|
||||
|
@ -18,7 +18,7 @@ vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2,
|
||||
|
@ -28,7 +28,7 @@ vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2,
|
||||
|
@ -38,7 +38,7 @@ vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2,
|
||||
|
@ -48,7 +48,7 @@ vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2,
|
||||
|
@ -58,7 +58,7 @@ vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1,
|
||||
|
@ -68,7 +68,7 @@ vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1,
|
||||
|
@ -78,7 +78,7 @@ vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1,
|
||||
|
@ -88,7 +88,7 @@ vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmerge_vfm_f64m8(vbool8_t mask, vfloat64m8_t op1, double op2,
|
||||
|
@ -98,7 +98,7 @@ vfloat64m8_t test_vfmerge_vfm_f64m8(vbool8_t mask, vfloat64m8_t op1, double op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_vfmerge_vfm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
|
||||
|
@ -107,7 +107,7 @@ vfloat16mf4_t test_vfmerge_vfm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, _Float1
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_vfmerge_vfm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
|
||||
|
@ -116,7 +116,7 @@ vfloat16mf2_t test_vfmerge_vfm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, _Float1
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_vfmerge_vfm_f16m1(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
|
||||
|
@ -125,7 +125,7 @@ vfloat16m1_t test_vfmerge_vfm_f16m1(vbool16_t mask, vfloat16m1_t op1, _Float16 o
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_vfmerge_vfm_f16m2(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
|
||||
|
@ -134,7 +134,7 @@ vfloat16m2_t test_vfmerge_vfm_f16m2(vbool8_t mask, vfloat16m2_t op1, _Float16 op
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_vfmerge_vfm_f16m4(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
|
||||
|
@ -143,7 +143,7 @@ vfloat16m4_t test_vfmerge_vfm_f16m4(vbool4_t mask, vfloat16m4_t op1, _Float16 op
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_vfmerge_vfm_f16m8(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
|
||||
|
@ -161,7 +161,7 @@ vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmerge_vfm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
|
||||
|
@ -17,7 +17,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) {
|
||||
|
@ -26,7 +26,7 @@ vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) {
|
||||
|
@ -35,7 +35,7 @@ vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) {
|
||||
|
@ -44,7 +44,7 @@ vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) {
|
||||
|
@ -53,7 +53,7 @@ vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) {
|
||||
|
@ -62,7 +62,7 @@ vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) {
|
||||
|
@ -71,7 +71,7 @@ vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) {
|
||||
|
@ -80,7 +80,7 @@ vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfneg_v_f64m8 (vfloat64m8_t op1, size_t vl) {
|
||||
|
@ -170,7 +170,7 @@ vfloat64m8_t test_vfneg_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_vfneg_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
|
||||
|
@ -179,7 +179,7 @@ vfloat16mf4_t test_vfneg_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_vfneg_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
|
||||
|
@ -188,7 +188,7 @@ vfloat16mf2_t test_vfneg_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_vfneg_v_f16m1 (vfloat16m1_t op1, size_t vl) {
|
||||
|
@ -197,7 +197,7 @@ vfloat16m1_t test_vfneg_v_f16m1 (vfloat16m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_vfneg_v_f16m2 (vfloat16m2_t op1, size_t vl) {
|
||||
|
@ -206,7 +206,7 @@ vfloat16m2_t test_vfneg_v_f16m2 (vfloat16m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_vfneg_v_f16m4 (vfloat16m4_t op1, size_t vl) {
|
||||
|
@ -215,7 +215,7 @@ vfloat16m4_t test_vfneg_v_f16m4 (vfloat16m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_vfneg_v_f16m8 (vfloat16m8_t op1, size_t vl) {
|
||||
|
@ -287,7 +287,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, siz
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfneg_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) {
|
||||
|
@ -314,7 +314,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vflo
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfneg_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
// CHECK-RV64-LABEL: @test_vle8ff_v_i8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i8>, i64 } @llvm.riscv.vleff.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i8>, i64 } @llvm.riscv.vleff.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -22,7 +22,7 @@ vint8mf8_t test_vle8ff_v_i8mf8(const int8_t *base, size_t *new_vl, size_t vl) {
|
|||
// CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i8>, i64 } @llvm.riscv.vleff.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i8>, i64 } @llvm.riscv.vleff.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -35,7 +35,7 @@ vint8mf4_t test_vle8ff_v_i8mf4(const int8_t *base, size_t *new_vl, size_t vl) {
|
|||
// CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i8>, i64 } @llvm.riscv.vleff.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i8>, i64 } @llvm.riscv.vleff.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -48,7 +48,7 @@ vint8mf2_t test_vle8ff_v_i8mf2(const int8_t *base, size_t *new_vl, size_t vl) {
|
|||
// CHECK-RV64-LABEL: @test_vle8ff_v_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -61,7 +61,7 @@ vint8m1_t test_vle8ff_v_i8m1(const int8_t *base, size_t *new_vl, size_t vl) {
|
|||
// CHECK-RV64-LABEL: @test_vle8ff_v_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i8>, i64 } @llvm.riscv.vleff.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i8>, i64 } @llvm.riscv.vleff.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -74,7 +74,7 @@ vint8m2_t test_vle8ff_v_i8m2(const int8_t *base, size_t *new_vl, size_t vl) {
|
|||
// CHECK-RV64-LABEL: @test_vle8ff_v_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i8>, i64 } @llvm.riscv.vleff.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i8>, i64 } @llvm.riscv.vleff.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -87,7 +87,7 @@ vint8m4_t test_vle8ff_v_i8m4(const int8_t *base, size_t *new_vl, size_t vl) {
|
|||
// CHECK-RV64-LABEL: @test_vle8ff_v_i8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 64 x i8>, i64 } @llvm.riscv.vleff.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 64 x i8>, i64 } @llvm.riscv.vleff.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -100,7 +100,7 @@ vint8m8_t test_vle8ff_v_i8m8(const int8_t *base, size_t *new_vl, size_t vl) {
|
|||
// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i8>, i64 } @llvm.riscv.vleff.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i8>, i64 } @llvm.riscv.vleff.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -113,7 +113,7 @@ vuint8mf8_t test_vle8ff_v_u8mf8(const uint8_t *base, size_t *new_vl, size_t vl)
|
|||
// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i8>, i64 } @llvm.riscv.vleff.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i8>, i64 } @llvm.riscv.vleff.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -126,7 +126,7 @@ vuint8mf4_t test_vle8ff_v_u8mf4(const uint8_t *base, size_t *new_vl, size_t vl)
|
|||
// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i8>, i64 } @llvm.riscv.vleff.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i8>, i64 } @llvm.riscv.vleff.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -139,7 +139,7 @@ vuint8mf2_t test_vle8ff_v_u8mf2(const uint8_t *base, size_t *new_vl, size_t vl)
|
|||
// CHECK-RV64-LABEL: @test_vle8ff_v_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -152,7 +152,7 @@ vuint8m1_t test_vle8ff_v_u8m1(const uint8_t *base, size_t *new_vl, size_t vl) {
|
|||
// CHECK-RV64-LABEL: @test_vle8ff_v_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i8>, i64 } @llvm.riscv.vleff.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i8>, i64 } @llvm.riscv.vleff.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -165,7 +165,7 @@ vuint8m2_t test_vle8ff_v_u8m2(const uint8_t *base, size_t *new_vl, size_t vl) {
|
|||
// CHECK-RV64-LABEL: @test_vle8ff_v_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i8>, i64 } @llvm.riscv.vleff.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i8>, i64 } @llvm.riscv.vleff.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -178,7 +178,7 @@ vuint8m4_t test_vle8ff_v_u8m4(const uint8_t *base, size_t *new_vl, size_t vl) {
|
|||
// CHECK-RV64-LABEL: @test_vle8ff_v_u8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 64 x i8>, i64 } @llvm.riscv.vleff.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 64 x i8>, i64 } @llvm.riscv.vleff.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -373,7 +373,7 @@ vuint8m8_t test_vle8ff_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8
|
|||
// CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i16>, i64 } @llvm.riscv.vleff.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i16>, i64 } @llvm.riscv.vleff.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i16>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i16>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -386,7 +386,7 @@ vint16mf4_t test_vle16ff_v_i16mf4(const int16_t *base, size_t *new_vl, size_t vl
|
|||
// CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i16>, i64 } @llvm.riscv.vleff.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i16>, i64 } @llvm.riscv.vleff.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i16>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i16>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -399,7 +399,7 @@ vint16mf2_t test_vle16ff_v_i16mf2(const int16_t *base, size_t *new_vl, size_t vl
|
|||
// CHECK-RV64-LABEL: @test_vle16ff_v_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i16>, i64 } @llvm.riscv.vleff.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i16>, i64 } @llvm.riscv.vleff.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i16>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i16>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -412,7 +412,7 @@ vint16m1_t test_vle16ff_v_i16m1(const int16_t *base, size_t *new_vl, size_t vl)
|
|||
// CHECK-RV64-LABEL: @test_vle16ff_v_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i16>, i64 } @llvm.riscv.vleff.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i16>, i64 } @llvm.riscv.vleff.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i16>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -425,7 +425,7 @@ vint16m2_t test_vle16ff_v_i16m2(const int16_t *base, size_t *new_vl, size_t vl)
|
|||
// CHECK-RV64-LABEL: @test_vle16ff_v_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i16>, i64 } @llvm.riscv.vleff.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i16>, i64 } @llvm.riscv.vleff.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i16>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i16>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -438,7 +438,7 @@ vint16m4_t test_vle16ff_v_i16m4(const int16_t *base, size_t *new_vl, size_t vl)
|
|||
// CHECK-RV64-LABEL: @test_vle16ff_v_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i16>, i64 } @llvm.riscv.vleff.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i16>, i64 } @llvm.riscv.vleff.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x i16>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 32 x i16>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -451,7 +451,7 @@ vint16m8_t test_vle16ff_v_i16m8(const int16_t *base, size_t *new_vl, size_t vl)
|
|||
// CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i16>, i64 } @llvm.riscv.vleff.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i16>, i64 } @llvm.riscv.vleff.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i16>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i16>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -464,7 +464,7 @@ vuint16mf4_t test_vle16ff_v_u16mf4(const uint16_t *base, size_t *new_vl, size_t
|
|||
// CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i16>, i64 } @llvm.riscv.vleff.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i16>, i64 } @llvm.riscv.vleff.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i16>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i16>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -477,7 +477,7 @@ vuint16mf2_t test_vle16ff_v_u16mf2(const uint16_t *base, size_t *new_vl, size_t
|
|||
// CHECK-RV64-LABEL: @test_vle16ff_v_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i16>, i64 } @llvm.riscv.vleff.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i16>, i64 } @llvm.riscv.vleff.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i16>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i16>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -490,7 +490,7 @@ vuint16m1_t test_vle16ff_v_u16m1(const uint16_t *base, size_t *new_vl, size_t vl
|
|||
// CHECK-RV64-LABEL: @test_vle16ff_v_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i16>, i64 } @llvm.riscv.vleff.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i16>, i64 } @llvm.riscv.vleff.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i16>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -503,7 +503,7 @@ vuint16m2_t test_vle16ff_v_u16m2(const uint16_t *base, size_t *new_vl, size_t vl
|
|||
// CHECK-RV64-LABEL: @test_vle16ff_v_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i16>, i64 } @llvm.riscv.vleff.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i16>, i64 } @llvm.riscv.vleff.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i16>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i16>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -516,7 +516,7 @@ vuint16m4_t test_vle16ff_v_u16m4(const uint16_t *base, size_t *new_vl, size_t vl
|
|||
// CHECK-RV64-LABEL: @test_vle16ff_v_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i16>, i64 } @llvm.riscv.vleff.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i16>, i64 } @llvm.riscv.vleff.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x i16>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 32 x i16>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -685,7 +685,7 @@ vuint16m8_t test_vle16ff_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const u
|
|||
// CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -698,7 +698,7 @@ vint32mf2_t test_vle32ff_v_i32mf2(const int32_t *base, size_t *new_vl, size_t vl
|
|||
// CHECK-RV64-LABEL: @test_vle32ff_v_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -711,7 +711,7 @@ vint32m1_t test_vle32ff_v_i32m1(const int32_t *base, size_t *new_vl, size_t vl)
|
|||
// CHECK-RV64-LABEL: @test_vle32ff_v_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i32>, i64 } @llvm.riscv.vleff.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i32>, i64 } @llvm.riscv.vleff.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -724,7 +724,7 @@ vint32m2_t test_vle32ff_v_i32m2(const int32_t *base, size_t *new_vl, size_t vl)
|
|||
// CHECK-RV64-LABEL: @test_vle32ff_v_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i32>, i64 } @llvm.riscv.vleff.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i32>, i64 } @llvm.riscv.vleff.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -737,7 +737,7 @@ vint32m4_t test_vle32ff_v_i32m4(const int32_t *base, size_t *new_vl, size_t vl)
|
|||
// CHECK-RV64-LABEL: @test_vle32ff_v_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i32>, i64 } @llvm.riscv.vleff.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i32>, i64 } @llvm.riscv.vleff.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i32>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i32>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -750,7 +750,7 @@ vint32m8_t test_vle32ff_v_i32m8(const int32_t *base, size_t *new_vl, size_t vl)
|
|||
// CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -763,7 +763,7 @@ vuint32mf2_t test_vle32ff_v_u32mf2(const uint32_t *base, size_t *new_vl, size_t
|
|||
// CHECK-RV64-LABEL: @test_vle32ff_v_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -776,7 +776,7 @@ vuint32m1_t test_vle32ff_v_u32m1(const uint32_t *base, size_t *new_vl, size_t vl
|
|||
// CHECK-RV64-LABEL: @test_vle32ff_v_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i32>, i64 } @llvm.riscv.vleff.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i32>, i64 } @llvm.riscv.vleff.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -789,7 +789,7 @@ vuint32m2_t test_vle32ff_v_u32m2(const uint32_t *base, size_t *new_vl, size_t vl
|
|||
// CHECK-RV64-LABEL: @test_vle32ff_v_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i32>, i64 } @llvm.riscv.vleff.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i32>, i64 } @llvm.riscv.vleff.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -802,7 +802,7 @@ vuint32m4_t test_vle32ff_v_u32m4(const uint32_t *base, size_t *new_vl, size_t vl
|
|||
// CHECK-RV64-LABEL: @test_vle32ff_v_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i32>, i64 } @llvm.riscv.vleff.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i32>, i64 } @llvm.riscv.vleff.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i32>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i32>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -815,7 +815,7 @@ vuint32m8_t test_vle32ff_v_u32m8(const uint32_t *base, size_t *new_vl, size_t vl
|
|||
// CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x float>, i64 } @llvm.riscv.vleff.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x float>, i64 } @llvm.riscv.vleff.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x float>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x float>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -828,7 +828,7 @@ vfloat32mf2_t test_vle32ff_v_f32mf2(const float *base, size_t *new_vl, size_t vl
|
|||
// CHECK-RV64-LABEL: @test_vle32ff_v_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x float>, i64 } @llvm.riscv.vleff.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x float>, i64 } @llvm.riscv.vleff.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x float>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x float>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -841,7 +841,7 @@ vfloat32m1_t test_vle32ff_v_f32m1(const float *base, size_t *new_vl, size_t vl)
|
|||
// CHECK-RV64-LABEL: @test_vle32ff_v_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x float>, i64 } @llvm.riscv.vleff.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x float>, i64 } @llvm.riscv.vleff.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x float>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x float>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -854,7 +854,7 @@ vfloat32m2_t test_vle32ff_v_f32m2(const float *base, size_t *new_vl, size_t vl)
|
|||
// CHECK-RV64-LABEL: @test_vle32ff_v_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x float>, i64 } @llvm.riscv.vleff.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x float>, i64 } @llvm.riscv.vleff.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x float>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x float>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -867,7 +867,7 @@ vfloat32m4_t test_vle32ff_v_f32m4(const float *base, size_t *new_vl, size_t vl)
|
|||
// CHECK-RV64-LABEL: @test_vle32ff_v_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x float>, i64 } @llvm.riscv.vleff.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x float>, i64 } @llvm.riscv.vleff.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x float>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x float>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1075,7 +1075,7 @@ vfloat32m8_t test_vle32ff_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const
|
|||
// CHECK-RV64-LABEL: @test_vle64ff_v_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i64>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i64>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1088,7 +1088,7 @@ vint64m1_t test_vle64ff_v_i64m1(const int64_t *base, size_t *new_vl, size_t vl)
|
|||
// CHECK-RV64-LABEL: @test_vle64ff_v_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i64>, i64 } @llvm.riscv.vleff.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i64>, i64 } @llvm.riscv.vleff.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i64>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1101,7 +1101,7 @@ vint64m2_t test_vle64ff_v_i64m2(const int64_t *base, size_t *new_vl, size_t vl)
|
|||
// CHECK-RV64-LABEL: @test_vle64ff_v_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i64>, i64 } @llvm.riscv.vleff.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i64>, i64 } @llvm.riscv.vleff.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i64>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i64>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1114,7 +1114,7 @@ vint64m4_t test_vle64ff_v_i64m4(const int64_t *base, size_t *new_vl, size_t vl)
|
|||
// CHECK-RV64-LABEL: @test_vle64ff_v_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i64>, i64 } @llvm.riscv.vleff.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i64>, i64 } @llvm.riscv.vleff.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i64>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i64>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1127,7 +1127,7 @@ vint64m8_t test_vle64ff_v_i64m8(const int64_t *base, size_t *new_vl, size_t vl)
|
|||
// CHECK-RV64-LABEL: @test_vle64ff_v_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i64>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i64>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1140,7 +1140,7 @@ vuint64m1_t test_vle64ff_v_u64m1(const uint64_t *base, size_t *new_vl, size_t vl
|
|||
// CHECK-RV64-LABEL: @test_vle64ff_v_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i64>, i64 } @llvm.riscv.vleff.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i64>, i64 } @llvm.riscv.vleff.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i64>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1153,7 +1153,7 @@ vuint64m2_t test_vle64ff_v_u64m2(const uint64_t *base, size_t *new_vl, size_t vl
|
|||
// CHECK-RV64-LABEL: @test_vle64ff_v_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i64>, i64 } @llvm.riscv.vleff.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i64>, i64 } @llvm.riscv.vleff.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i64>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i64>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1166,7 +1166,7 @@ vuint64m4_t test_vle64ff_v_u64m4(const uint64_t *base, size_t *new_vl, size_t vl
|
|||
// CHECK-RV64-LABEL: @test_vle64ff_v_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i64>, i64 } @llvm.riscv.vleff.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i64>, i64 } @llvm.riscv.vleff.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i64>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i64>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1179,7 +1179,7 @@ vuint64m8_t test_vle64ff_v_u64m8(const uint64_t *base, size_t *new_vl, size_t vl
|
|||
// CHECK-RV64-LABEL: @test_vle64ff_v_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x double>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x double>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1192,7 +1192,7 @@ vfloat64m1_t test_vle64ff_v_f64m1(const double *base, size_t *new_vl, size_t vl)
|
|||
// CHECK-RV64-LABEL: @test_vle64ff_v_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x double>, i64 } @llvm.riscv.vleff.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x double>, i64 } @llvm.riscv.vleff.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x double>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x double>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1205,7 +1205,7 @@ vfloat64m2_t test_vle64ff_v_f64m2(const double *base, size_t *new_vl, size_t vl)
|
|||
// CHECK-RV64-LABEL: @test_vle64ff_v_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x double>, i64 } @llvm.riscv.vleff.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x double>, i64 } @llvm.riscv.vleff.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x double>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x double>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1218,7 +1218,7 @@ vfloat64m4_t test_vle64ff_v_f64m4(const double *base, size_t *new_vl, size_t vl)
|
|||
// CHECK-RV64-LABEL: @test_vle64ff_v_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x double>, i64 } @llvm.riscv.vleff.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x double>, i64 } @llvm.riscv.vleff.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x double>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x double>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1387,7 +1387,7 @@ vfloat64m8_t test_vle64ff_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const
|
|||
// CHECK-RV64-LABEL: @test_vle16ff_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x half>, i64 } @llvm.riscv.vleff.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x half>, i64 } @llvm.riscv.vleff.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1400,7 +1400,7 @@ vfloat16mf4_t test_vle16ff_v_f16mf4(const _Float16 *base, size_t *new_vl, size_t
|
|||
// CHECK-RV64-LABEL: @test_vle16ff_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x half>, i64 } @llvm.riscv.vleff.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x half>, i64 } @llvm.riscv.vleff.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1413,7 +1413,7 @@ vfloat16mf2_t test_vle16ff_v_f16mf2(const _Float16 *base, size_t *new_vl, size_t
|
|||
// CHECK-RV64-LABEL: @test_vle16ff_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x half>, i64 } @llvm.riscv.vleff.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x half>, i64 } @llvm.riscv.vleff.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1426,7 +1426,7 @@ vfloat16m1_t test_vle16ff_v_f16m1(const _Float16 *base, size_t *new_vl, size_t v
|
|||
// CHECK-RV64-LABEL: @test_vle16ff_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x half>, i64 } @llvm.riscv.vleff.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x half>, i64 } @llvm.riscv.vleff.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1439,7 +1439,7 @@ vfloat16m2_t test_vle16ff_v_f16m2(const _Float16 *base, size_t *new_vl, size_t v
|
|||
// CHECK-RV64-LABEL: @test_vle16ff_v_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x half>, i64 } @llvm.riscv.vleff.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x half>, i64 } @llvm.riscv.vleff.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x half>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1452,7 +1452,7 @@ vfloat16m4_t test_vle16ff_v_f16m4(const _Float16 *base, size_t *new_vl, size_t v
|
|||
// CHECK-RV64-LABEL: @test_vle16ff_v_f16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x half>, i64 } @llvm.riscv.vleff.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x half>, i64 } @llvm.riscv.vleff.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x half>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 32 x half>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1582,7 +1582,7 @@ vfloat32mf2_t test_vle32ff_v_f32mf2_tu(vfloat32mf2_t merge, const float *base, s
|
|||
// CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1595,7 +1595,7 @@ vint32mf2_t test_vle32ff_v_i32mf2_ta(const int32_t *base, size_t *new_vl, size_t
|
|||
// CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1608,7 +1608,7 @@ vuint32mf2_t test_vle32ff_v_u32mf2_ta(const uint32_t *base, size_t *new_vl, size
|
|||
// CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x float>, i64 } @llvm.riscv.vleff.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x float>, i64 } @llvm.riscv.vleff.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float>* [[TMP0]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x float>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x float>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1699,7 +1699,7 @@ vfloat32mf2_t test_vle32ff_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, co
|
|||
// CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1712,7 +1712,7 @@ vint32mf2_t test_vle32ff_v_i32mf2_tama(vbool64_t mask, const int32_t *base, size
|
|||
// CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
@ -1725,7 +1725,7 @@ vuint32mf2_t test_vle32ff_v_u32mf2_tama(vbool64_t mask, const uint32_t *base, si
|
|||
// CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x float>, i64 } @llvm.riscv.vleff.mask.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x float>, i64 } @llvm.riscv.vleff.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x float>, i64 } [[TMP1]], 0
|
||||
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x float>, i64 } [[TMP1]], 1
|
||||
// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -7481,7 +7481,7 @@ void test_vloxseg2ei64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg2.nxv4f16.nxv4i8.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg2.nxv4f16.nxv4i8.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7494,7 +7494,7 @@ void test_vloxseg2ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float1
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg3.nxv4f16.nxv4i8.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg3.nxv4f16.nxv4i8.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7509,7 +7509,7 @@ void test_vloxseg3ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg4.nxv4f16.nxv4i8.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg4.nxv4f16.nxv4i8.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7526,7 +7526,7 @@ void test_vloxseg4ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg5.nxv4f16.nxv4i8.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg5.nxv4f16.nxv4i8.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7545,7 +7545,7 @@ void test_vloxseg5ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg6.nxv4f16.nxv4i8.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg6.nxv4f16.nxv4i8.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7566,7 +7566,7 @@ void test_vloxseg6ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg7.nxv4f16.nxv4i8.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg7.nxv4f16.nxv4i8.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7589,7 +7589,7 @@ void test_vloxseg7ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg8.nxv4f16.nxv4i8.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg8.nxv4f16.nxv4i8.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7614,7 +7614,7 @@ void test_vloxseg8ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg2.nxv8f16.nxv8i8.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg2.nxv8f16.nxv8i8.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
@ -7627,7 +7627,7 @@ void test_vloxseg2ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float1
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg3.nxv8f16.nxv8i8.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg3.nxv8f16.nxv8i8.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
@ -7642,7 +7642,7 @@ void test_vloxseg3ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg4.nxv8f16.nxv8i8.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg4.nxv8f16.nxv8i8.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
@ -7659,7 +7659,7 @@ void test_vloxseg4ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vloxseg2.nxv16f16.nxv16i8.i64(<vscale x 16 x half> undef, <vscale x 16 x half> undef, half* [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vloxseg2.nxv16f16.nxv16i8.i64(<vscale x 16 x half> poison, <vscale x 16 x half> poison, half* [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
|
||||
|
@ -7672,7 +7672,7 @@ void test_vloxseg2ei8_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float1
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg2.nxv4f16.nxv4i16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg2.nxv4f16.nxv4i16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7685,7 +7685,7 @@ void test_vloxseg2ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg3.nxv4f16.nxv4i16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg3.nxv4f16.nxv4i16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7700,7 +7700,7 @@ void test_vloxseg3ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg4.nxv4f16.nxv4i16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg4.nxv4f16.nxv4i16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7717,7 +7717,7 @@ void test_vloxseg4ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg5.nxv4f16.nxv4i16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg5.nxv4f16.nxv4i16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7736,7 +7736,7 @@ void test_vloxseg5ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg6.nxv4f16.nxv4i16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg6.nxv4f16.nxv4i16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7757,7 +7757,7 @@ void test_vloxseg6ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg7.nxv4f16.nxv4i16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg7.nxv4f16.nxv4i16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7780,7 +7780,7 @@ void test_vloxseg7ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg8.nxv4f16.nxv4i16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg8.nxv4f16.nxv4i16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7805,7 +7805,7 @@ void test_vloxseg8ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg2.nxv8f16.nxv8i16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg2.nxv8f16.nxv8i16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
@ -7818,7 +7818,7 @@ void test_vloxseg2ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg3.nxv8f16.nxv8i16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg3.nxv8f16.nxv8i16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
@ -7833,7 +7833,7 @@ void test_vloxseg3ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg4.nxv8f16.nxv8i16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg4.nxv8f16.nxv8i16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
@ -7850,7 +7850,7 @@ void test_vloxseg4ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vloxseg2.nxv16f16.nxv16i16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> undef, half* [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vloxseg2.nxv16f16.nxv16i16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> poison, half* [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
|
||||
|
@ -7863,7 +7863,7 @@ void test_vloxseg2ei16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg2.nxv4f16.nxv4i32.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg2.nxv4f16.nxv4i32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7876,7 +7876,7 @@ void test_vloxseg2ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg3.nxv4f16.nxv4i32.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg3.nxv4f16.nxv4i32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7891,7 +7891,7 @@ void test_vloxseg3ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg4.nxv4f16.nxv4i32.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg4.nxv4f16.nxv4i32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7908,7 +7908,7 @@ void test_vloxseg4ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg5.nxv4f16.nxv4i32.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg5.nxv4f16.nxv4i32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7927,7 +7927,7 @@ void test_vloxseg5ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg6.nxv4f16.nxv4i32.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg6.nxv4f16.nxv4i32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7948,7 +7948,7 @@ void test_vloxseg6ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg7.nxv4f16.nxv4i32.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg7.nxv4f16.nxv4i32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7971,7 +7971,7 @@ void test_vloxseg7ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg8.nxv4f16.nxv4i32.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg8.nxv4f16.nxv4i32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7996,7 +7996,7 @@ void test_vloxseg8ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg2.nxv8f16.nxv8i32.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg2.nxv8f16.nxv8i32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
@ -8009,7 +8009,7 @@ void test_vloxseg2ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg3.nxv8f16.nxv8i32.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg3.nxv8f16.nxv8i32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
@ -8024,7 +8024,7 @@ void test_vloxseg3ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg4.nxv8f16.nxv8i32.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg4.nxv8f16.nxv8i32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
@ -8041,7 +8041,7 @@ void test_vloxseg4ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64(<vscale x 16 x half> undef, <vscale x 16 x half> undef, half* [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> poison, half* [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
|
||||
|
@ -8054,7 +8054,7 @@ void test_vloxseg2ei32_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg2.nxv4f16.nxv4i64.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg2.nxv4f16.nxv4i64.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -8067,7 +8067,7 @@ void test_vloxseg2ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg3.nxv4f16.nxv4i64.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg3.nxv4f16.nxv4i64.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -8082,7 +8082,7 @@ void test_vloxseg3ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg4.nxv4f16.nxv4i64.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg4.nxv4f16.nxv4i64.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -8099,7 +8099,7 @@ void test_vloxseg4ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg5.nxv4f16.nxv4i64.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg5.nxv4f16.nxv4i64.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -8118,7 +8118,7 @@ void test_vloxseg5ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg6.nxv4f16.nxv4i64.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg6.nxv4f16.nxv4i64.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -8139,7 +8139,7 @@ void test_vloxseg6ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg7.nxv4f16.nxv4i64.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg7.nxv4f16.nxv4i64.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -8162,7 +8162,7 @@ void test_vloxseg7ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg8.nxv4f16.nxv4i64.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg8.nxv4f16.nxv4i64.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -8187,7 +8187,7 @@ void test_vloxseg8ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg2.nxv8f16.nxv8i64.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg2.nxv8f16.nxv8i64.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
@ -8200,7 +8200,7 @@ void test_vloxseg2ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg3.nxv8f16.nxv8i64.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg3.nxv8f16.nxv8i64.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
@ -8215,7 +8215,7 @@ void test_vloxseg3ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg4.nxv8f16.nxv8i64.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg4.nxv8f16.nxv8i64.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
|
|
@ -6925,7 +6925,7 @@ void test_vloxseg8ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg2.nxv1f16.nxv1i8.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg2.nxv1f16.nxv1i8.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -6938,7 +6938,7 @@ void test_vloxseg2ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Flo
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg3.nxv1f16.nxv1i8.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg3.nxv1f16.nxv1i8.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -6953,7 +6953,7 @@ void test_vloxseg3ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg4.nxv1f16.nxv1i8.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg4.nxv1f16.nxv1i8.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -6970,7 +6970,7 @@ void test_vloxseg4ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg5.nxv1f16.nxv1i8.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg5.nxv1f16.nxv1i8.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -6989,7 +6989,7 @@ void test_vloxseg5ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg6.nxv1f16.nxv1i8.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg6.nxv1f16.nxv1i8.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7010,7 +7010,7 @@ void test_vloxseg6ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg7.nxv1f16.nxv1i8.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg7.nxv1f16.nxv1i8.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7033,7 +7033,7 @@ void test_vloxseg7ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg8.nxv1f16.nxv1i8.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg8.nxv1f16.nxv1i8.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7058,7 +7058,7 @@ void test_vloxseg8ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg2.nxv2f16.nxv2i8.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg2.nxv2f16.nxv2i8.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7071,7 +7071,7 @@ void test_vloxseg2ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Flo
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg3.nxv2f16.nxv2i8.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg3.nxv2f16.nxv2i8.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7086,7 +7086,7 @@ void test_vloxseg3ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg4.nxv2f16.nxv2i8.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg4.nxv2f16.nxv2i8.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7103,7 +7103,7 @@ void test_vloxseg4ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg5.nxv2f16.nxv2i8.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg5.nxv2f16.nxv2i8.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7122,7 +7122,7 @@ void test_vloxseg5ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg6.nxv2f16.nxv2i8.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg6.nxv2f16.nxv2i8.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7143,7 +7143,7 @@ void test_vloxseg6ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg7.nxv2f16.nxv2i8.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg7.nxv2f16.nxv2i8.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7166,7 +7166,7 @@ void test_vloxseg7ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg8.nxv2f16.nxv2i8.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg8.nxv2f16.nxv2i8.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7191,7 +7191,7 @@ void test_vloxseg8ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg2.nxv1f16.nxv1i16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg2.nxv1f16.nxv1i16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7204,7 +7204,7 @@ void test_vloxseg2ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Fl
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg3.nxv1f16.nxv1i16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg3.nxv1f16.nxv1i16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7219,7 +7219,7 @@ void test_vloxseg3ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg4.nxv1f16.nxv1i16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg4.nxv1f16.nxv1i16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7236,7 +7236,7 @@ void test_vloxseg4ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg5.nxv1f16.nxv1i16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg5.nxv1f16.nxv1i16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7255,7 +7255,7 @@ void test_vloxseg5ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg6.nxv1f16.nxv1i16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg6.nxv1f16.nxv1i16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7276,7 +7276,7 @@ void test_vloxseg6ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg7.nxv1f16.nxv1i16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg7.nxv1f16.nxv1i16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7299,7 +7299,7 @@ void test_vloxseg7ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg8.nxv1f16.nxv1i16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg8.nxv1f16.nxv1i16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7324,7 +7324,7 @@ void test_vloxseg8ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg2.nxv2f16.nxv2i16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg2.nxv2f16.nxv2i16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7337,7 +7337,7 @@ void test_vloxseg2ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Fl
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg3.nxv2f16.nxv2i16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg3.nxv2f16.nxv2i16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7352,7 +7352,7 @@ void test_vloxseg3ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg4.nxv2f16.nxv2i16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg4.nxv2f16.nxv2i16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7369,7 +7369,7 @@ void test_vloxseg4ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg5.nxv2f16.nxv2i16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg5.nxv2f16.nxv2i16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7388,7 +7388,7 @@ void test_vloxseg5ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg6.nxv2f16.nxv2i16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg6.nxv2f16.nxv2i16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7409,7 +7409,7 @@ void test_vloxseg6ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg7.nxv2f16.nxv2i16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg7.nxv2f16.nxv2i16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7432,7 +7432,7 @@ void test_vloxseg7ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg8.nxv2f16.nxv2i16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg8.nxv2f16.nxv2i16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7457,7 +7457,7 @@ void test_vloxseg8ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg2.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg2.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7470,7 +7470,7 @@ void test_vloxseg2ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Fl
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg3.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg3.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7485,7 +7485,7 @@ void test_vloxseg3ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg4.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg4.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7502,7 +7502,7 @@ void test_vloxseg4ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg5.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg5.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7521,7 +7521,7 @@ void test_vloxseg5ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg6.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg6.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7542,7 +7542,7 @@ void test_vloxseg6ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg7.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg7.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7565,7 +7565,7 @@ void test_vloxseg7ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg8.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg8.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7590,7 +7590,7 @@ void test_vloxseg8ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg2.nxv2f16.nxv2i32.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg2.nxv2f16.nxv2i32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7603,7 +7603,7 @@ void test_vloxseg2ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Fl
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg3.nxv2f16.nxv2i32.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg3.nxv2f16.nxv2i32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7618,7 +7618,7 @@ void test_vloxseg3ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg4.nxv2f16.nxv2i32.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg4.nxv2f16.nxv2i32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7635,7 +7635,7 @@ void test_vloxseg4ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg5.nxv2f16.nxv2i32.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg5.nxv2f16.nxv2i32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7654,7 +7654,7 @@ void test_vloxseg5ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg6.nxv2f16.nxv2i32.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg6.nxv2f16.nxv2i32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7675,7 +7675,7 @@ void test_vloxseg6ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg7.nxv2f16.nxv2i32.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg7.nxv2f16.nxv2i32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7698,7 +7698,7 @@ void test_vloxseg7ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg8.nxv2f16.nxv2i32.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg8.nxv2f16.nxv2i32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7723,7 +7723,7 @@ void test_vloxseg8ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg2.nxv1f16.nxv1i64.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg2.nxv1f16.nxv1i64.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7736,7 +7736,7 @@ void test_vloxseg2ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Fl
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg3.nxv1f16.nxv1i64.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg3.nxv1f16.nxv1i64.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7751,7 +7751,7 @@ void test_vloxseg3ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg4.nxv1f16.nxv1i64.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg4.nxv1f16.nxv1i64.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7768,7 +7768,7 @@ void test_vloxseg4ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg5.nxv1f16.nxv1i64.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg5.nxv1f16.nxv1i64.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7787,7 +7787,7 @@ void test_vloxseg5ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg6.nxv1f16.nxv1i64.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg6.nxv1f16.nxv1i64.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7808,7 +7808,7 @@ void test_vloxseg6ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg7.nxv1f16.nxv1i64.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg7.nxv1f16.nxv1i64.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7831,7 +7831,7 @@ void test_vloxseg7ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg8.nxv1f16.nxv1i64.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg8.nxv1f16.nxv1i64.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7856,7 +7856,7 @@ void test_vloxseg8ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg2.nxv2f16.nxv2i64.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg2.nxv2f16.nxv2i64.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7869,7 +7869,7 @@ void test_vloxseg2ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Fl
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg3.nxv2f16.nxv2i64.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg3.nxv2f16.nxv2i64.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7884,7 +7884,7 @@ void test_vloxseg3ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg4.nxv2f16.nxv2i64.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg4.nxv2f16.nxv2i64.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7901,7 +7901,7 @@ void test_vloxseg4ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg5.nxv2f16.nxv2i64.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg5.nxv2f16.nxv2i64.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7920,7 +7920,7 @@ void test_vloxseg5ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg6.nxv2f16.nxv2i64.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg6.nxv2f16.nxv2i64.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7941,7 +7941,7 @@ void test_vloxseg6ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg7.nxv2f16.nxv2i64.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg7.nxv2f16.nxv2i64.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7964,7 +7964,7 @@ void test_vloxseg7ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg8.nxv2f16.nxv2i64.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg8.nxv2f16.nxv2i64.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -9079,7 +9079,7 @@ void test_vloxseg2ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> poison, i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -7481,7 +7481,7 @@ void test_vluxseg2ei64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg2.nxv4f16.nxv4i8.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg2.nxv4f16.nxv4i8.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7494,7 +7494,7 @@ void test_vluxseg2ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float1
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg3.nxv4f16.nxv4i8.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg3.nxv4f16.nxv4i8.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7509,7 +7509,7 @@ void test_vluxseg3ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg4.nxv4f16.nxv4i8.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg4.nxv4f16.nxv4i8.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7526,7 +7526,7 @@ void test_vluxseg4ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg5.nxv4f16.nxv4i8.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg5.nxv4f16.nxv4i8.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7545,7 +7545,7 @@ void test_vluxseg5ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg6.nxv4f16.nxv4i8.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg6.nxv4f16.nxv4i8.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7566,7 +7566,7 @@ void test_vluxseg6ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg7.nxv4f16.nxv4i8.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg7.nxv4f16.nxv4i8.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7589,7 +7589,7 @@ void test_vluxseg7ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg8.nxv4f16.nxv4i8.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg8.nxv4f16.nxv4i8.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7614,7 +7614,7 @@ void test_vluxseg8ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg2.nxv8f16.nxv8i8.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg2.nxv8f16.nxv8i8.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
@ -7627,7 +7627,7 @@ void test_vluxseg2ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float1
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg3.nxv8f16.nxv8i8.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg3.nxv8f16.nxv8i8.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
@ -7642,7 +7642,7 @@ void test_vluxseg3ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg4.nxv8f16.nxv8i8.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg4.nxv8f16.nxv8i8.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
@ -7659,7 +7659,7 @@ void test_vluxseg4ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vluxseg2.nxv16f16.nxv16i8.i64(<vscale x 16 x half> undef, <vscale x 16 x half> undef, half* [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vluxseg2.nxv16f16.nxv16i8.i64(<vscale x 16 x half> poison, <vscale x 16 x half> poison, half* [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
|
||||
|
@ -7672,7 +7672,7 @@ void test_vluxseg2ei8_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float1
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg2.nxv4f16.nxv4i16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg2.nxv4f16.nxv4i16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7685,7 +7685,7 @@ void test_vluxseg2ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg3.nxv4f16.nxv4i16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg3.nxv4f16.nxv4i16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7700,7 +7700,7 @@ void test_vluxseg3ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg4.nxv4f16.nxv4i16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg4.nxv4f16.nxv4i16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7717,7 +7717,7 @@ void test_vluxseg4ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg5.nxv4f16.nxv4i16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg5.nxv4f16.nxv4i16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7736,7 +7736,7 @@ void test_vluxseg5ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg6.nxv4f16.nxv4i16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg6.nxv4f16.nxv4i16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7757,7 +7757,7 @@ void test_vluxseg6ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg7.nxv4f16.nxv4i16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg7.nxv4f16.nxv4i16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7780,7 +7780,7 @@ void test_vluxseg7ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg8.nxv4f16.nxv4i16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg8.nxv4f16.nxv4i16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7805,7 +7805,7 @@ void test_vluxseg8ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg2.nxv8f16.nxv8i16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg2.nxv8f16.nxv8i16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
@ -7818,7 +7818,7 @@ void test_vluxseg2ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg3.nxv8f16.nxv8i16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg3.nxv8f16.nxv8i16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
@ -7833,7 +7833,7 @@ void test_vluxseg3ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg4.nxv8f16.nxv8i16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg4.nxv8f16.nxv8i16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
@ -7850,7 +7850,7 @@ void test_vluxseg4ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vluxseg2.nxv16f16.nxv16i16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> undef, half* [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vluxseg2.nxv16f16.nxv16i16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> poison, half* [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
|
||||
|
@ -7863,7 +7863,7 @@ void test_vluxseg2ei16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg2.nxv4f16.nxv4i32.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg2.nxv4f16.nxv4i32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7876,7 +7876,7 @@ void test_vluxseg2ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg3.nxv4f16.nxv4i32.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg3.nxv4f16.nxv4i32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7891,7 +7891,7 @@ void test_vluxseg3ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg4.nxv4f16.nxv4i32.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg4.nxv4f16.nxv4i32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7908,7 +7908,7 @@ void test_vluxseg4ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg5.nxv4f16.nxv4i32.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg5.nxv4f16.nxv4i32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7927,7 +7927,7 @@ void test_vluxseg5ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg6.nxv4f16.nxv4i32.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg6.nxv4f16.nxv4i32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7948,7 +7948,7 @@ void test_vluxseg6ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg7.nxv4f16.nxv4i32.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg7.nxv4f16.nxv4i32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7971,7 +7971,7 @@ void test_vluxseg7ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg8.nxv4f16.nxv4i32.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg8.nxv4f16.nxv4i32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -7996,7 +7996,7 @@ void test_vluxseg8ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg2.nxv8f16.nxv8i32.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg2.nxv8f16.nxv8i32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
@ -8009,7 +8009,7 @@ void test_vluxseg2ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg3.nxv8f16.nxv8i32.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg3.nxv8f16.nxv8i32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
@ -8024,7 +8024,7 @@ void test_vluxseg3ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg4.nxv8f16.nxv8i32.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg4.nxv8f16.nxv8i32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
@ -8041,7 +8041,7 @@ void test_vluxseg4ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vluxseg2.nxv16f16.nxv16i32.i64(<vscale x 16 x half> undef, <vscale x 16 x half> undef, half* [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vluxseg2.nxv16f16.nxv16i32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> poison, half* [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
|
||||
|
@ -8054,7 +8054,7 @@ void test_vluxseg2ei32_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg2.nxv4f16.nxv4i64.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg2.nxv4f16.nxv4i64.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -8067,7 +8067,7 @@ void test_vluxseg2ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg3.nxv4f16.nxv4i64.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg3.nxv4f16.nxv4i64.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -8082,7 +8082,7 @@ void test_vluxseg3ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg4.nxv4f16.nxv4i64.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg4.nxv4f16.nxv4i64.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -8099,7 +8099,7 @@ void test_vluxseg4ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg5.nxv4f16.nxv4i64.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg5.nxv4f16.nxv4i64.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -8118,7 +8118,7 @@ void test_vluxseg5ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg6.nxv4f16.nxv4i64.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg6.nxv4f16.nxv4i64.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -8139,7 +8139,7 @@ void test_vluxseg6ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg7.nxv4f16.nxv4i64.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg7.nxv4f16.nxv4i64.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -8162,7 +8162,7 @@ void test_vluxseg7ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg8.nxv4f16.nxv4i64.i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg8.nxv4f16.nxv4i64.i64(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison, half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
|
||||
|
@ -8187,7 +8187,7 @@ void test_vluxseg8ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg2.nxv8f16.nxv8i64.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg2.nxv8f16.nxv8i64.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
@ -8200,7 +8200,7 @@ void test_vluxseg2ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg3.nxv8f16.nxv8i64.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg3.nxv8f16.nxv8i64.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
@ -8215,7 +8215,7 @@ void test_vluxseg3ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg4.nxv8f16.nxv8i64.i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg4.nxv8f16.nxv8i64.i64(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison, half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
|
||||
|
|
|
@ -6912,7 +6912,7 @@ void test_vluxseg8ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg2.nxv1f16.nxv1i8.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg2.nxv1f16.nxv1i8.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -6925,7 +6925,7 @@ void test_vluxseg2ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Flo
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg3.nxv1f16.nxv1i8.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg3.nxv1f16.nxv1i8.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -6940,7 +6940,7 @@ void test_vluxseg3ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg4.nxv1f16.nxv1i8.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg4.nxv1f16.nxv1i8.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -6957,7 +6957,7 @@ void test_vluxseg4ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg5.nxv1f16.nxv1i8.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg5.nxv1f16.nxv1i8.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -6976,7 +6976,7 @@ void test_vluxseg5ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg6.nxv1f16.nxv1i8.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg6.nxv1f16.nxv1i8.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -6997,7 +6997,7 @@ void test_vluxseg6ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg7.nxv1f16.nxv1i8.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg7.nxv1f16.nxv1i8.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7020,7 +7020,7 @@ void test_vluxseg7ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg8.nxv1f16.nxv1i8.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg8.nxv1f16.nxv1i8.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7045,7 +7045,7 @@ void test_vluxseg8ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg2.nxv2f16.nxv2i8.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg2.nxv2f16.nxv2i8.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7058,7 +7058,7 @@ void test_vluxseg2ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Flo
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg3.nxv2f16.nxv2i8.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg3.nxv2f16.nxv2i8.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7073,7 +7073,7 @@ void test_vluxseg3ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg4.nxv2f16.nxv2i8.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg4.nxv2f16.nxv2i8.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7090,7 +7090,7 @@ void test_vluxseg4ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg5.nxv2f16.nxv2i8.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg5.nxv2f16.nxv2i8.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7109,7 +7109,7 @@ void test_vluxseg5ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg6.nxv2f16.nxv2i8.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg6.nxv2f16.nxv2i8.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7130,7 +7130,7 @@ void test_vluxseg6ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg7.nxv2f16.nxv2i8.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg7.nxv2f16.nxv2i8.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7153,7 +7153,7 @@ void test_vluxseg7ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg8.nxv2f16.nxv2i8.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg8.nxv2f16.nxv2i8.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7178,7 +7178,7 @@ void test_vluxseg8ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg2.nxv1f16.nxv1i16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg2.nxv1f16.nxv1i16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7191,7 +7191,7 @@ void test_vluxseg2ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Fl
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg3.nxv1f16.nxv1i16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg3.nxv1f16.nxv1i16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7206,7 +7206,7 @@ void test_vluxseg3ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg4.nxv1f16.nxv1i16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg4.nxv1f16.nxv1i16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7223,7 +7223,7 @@ void test_vluxseg4ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg5.nxv1f16.nxv1i16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg5.nxv1f16.nxv1i16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7242,7 +7242,7 @@ void test_vluxseg5ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg6.nxv1f16.nxv1i16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg6.nxv1f16.nxv1i16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7263,7 +7263,7 @@ void test_vluxseg6ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg7.nxv1f16.nxv1i16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg7.nxv1f16.nxv1i16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7286,7 +7286,7 @@ void test_vluxseg7ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg8.nxv1f16.nxv1i16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg8.nxv1f16.nxv1i16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7311,7 +7311,7 @@ void test_vluxseg8ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg2.nxv2f16.nxv2i16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg2.nxv2f16.nxv2i16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7324,7 +7324,7 @@ void test_vluxseg2ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Fl
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg3.nxv2f16.nxv2i16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg3.nxv2f16.nxv2i16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7339,7 +7339,7 @@ void test_vluxseg3ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg4.nxv2f16.nxv2i16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg4.nxv2f16.nxv2i16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7356,7 +7356,7 @@ void test_vluxseg4ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg5.nxv2f16.nxv2i16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg5.nxv2f16.nxv2i16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7375,7 +7375,7 @@ void test_vluxseg5ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg6.nxv2f16.nxv2i16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg6.nxv2f16.nxv2i16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7396,7 +7396,7 @@ void test_vluxseg6ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg7.nxv2f16.nxv2i16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg7.nxv2f16.nxv2i16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7419,7 +7419,7 @@ void test_vluxseg7ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg8.nxv2f16.nxv2i16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg8.nxv2f16.nxv2i16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7444,7 +7444,7 @@ void test_vluxseg8ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg2.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg2.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7457,7 +7457,7 @@ void test_vluxseg2ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Fl
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg3.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg3.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7472,7 +7472,7 @@ void test_vluxseg3ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg4.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg4.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7489,7 +7489,7 @@ void test_vluxseg4ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg5.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg5.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7508,7 +7508,7 @@ void test_vluxseg5ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg6.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg6.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7529,7 +7529,7 @@ void test_vluxseg6ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg7.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg7.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7552,7 +7552,7 @@ void test_vluxseg7ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg8.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg8.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7577,7 +7577,7 @@ void test_vluxseg8ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg2.nxv2f16.nxv2i32.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg2.nxv2f16.nxv2i32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7590,7 +7590,7 @@ void test_vluxseg2ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Fl
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg3.nxv2f16.nxv2i32.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg3.nxv2f16.nxv2i32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7605,7 +7605,7 @@ void test_vluxseg3ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg4.nxv2f16.nxv2i32.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg4.nxv2f16.nxv2i32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7622,7 +7622,7 @@ void test_vluxseg4ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg5.nxv2f16.nxv2i32.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg5.nxv2f16.nxv2i32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7641,7 +7641,7 @@ void test_vluxseg5ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg6.nxv2f16.nxv2i32.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg6.nxv2f16.nxv2i32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7662,7 +7662,7 @@ void test_vluxseg6ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg7.nxv2f16.nxv2i32.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg7.nxv2f16.nxv2i32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7685,7 +7685,7 @@ void test_vluxseg7ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg8.nxv2f16.nxv2i32.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg8.nxv2f16.nxv2i32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7710,7 +7710,7 @@ void test_vluxseg8ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg2.nxv1f16.nxv1i64.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg2.nxv1f16.nxv1i64.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7723,7 +7723,7 @@ void test_vluxseg2ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Fl
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg3.nxv1f16.nxv1i64.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg3.nxv1f16.nxv1i64.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7738,7 +7738,7 @@ void test_vluxseg3ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg4.nxv1f16.nxv1i64.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg4.nxv1f16.nxv1i64.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7755,7 +7755,7 @@ void test_vluxseg4ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg5.nxv1f16.nxv1i64.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg5.nxv1f16.nxv1i64.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7774,7 +7774,7 @@ void test_vluxseg5ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg6.nxv1f16.nxv1i64.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg6.nxv1f16.nxv1i64.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7795,7 +7795,7 @@ void test_vluxseg6ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg7.nxv1f16.nxv1i64.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg7.nxv1f16.nxv1i64.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7818,7 +7818,7 @@ void test_vluxseg7ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg8.nxv1f16.nxv1i64.i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg8.nxv1f16.nxv1i64.i64(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison, half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
|
||||
|
@ -7843,7 +7843,7 @@ void test_vluxseg8ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg2.nxv2f16.nxv2i64.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg2.nxv2f16.nxv2i64.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7856,7 +7856,7 @@ void test_vluxseg2ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Fl
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg3.nxv2f16.nxv2i64.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg3.nxv2f16.nxv2i64.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7871,7 +7871,7 @@ void test_vluxseg3ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg4.nxv2f16.nxv2i64.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg4.nxv2f16.nxv2i64.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7888,7 +7888,7 @@ void test_vluxseg4ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg5.nxv2f16.nxv2i64.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg5.nxv2f16.nxv2i64.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7907,7 +7907,7 @@ void test_vluxseg5ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg6.nxv2f16.nxv2i64.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg6.nxv2f16.nxv2i64.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7928,7 +7928,7 @@ void test_vluxseg6ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg7.nxv2f16.nxv2i64.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg7.nxv2f16.nxv2i64.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -7951,7 +7951,7 @@ void test_vluxseg7ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg8.nxv2f16.nxv2i64.i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg8.nxv2f16.nxv2i64.i64(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison, half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
|
||||
|
@ -9079,7 +9079,7 @@ void test_vluxseg2ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> poison, i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
|
||||
// CHECK-RV64-NEXT: store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
|
||||
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -8,7 +8,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
|
||||
|
@ -18,7 +18,7 @@ vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2,
|
||||
|
@ -28,7 +28,7 @@ vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
|
||||
|
@ -38,7 +38,7 @@ vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2,
|
||||
|
@ -48,7 +48,7 @@ vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
|
||||
|
@ -58,7 +58,7 @@ vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2,
|
||||
|
@ -68,7 +68,7 @@ vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2,
|
||||
|
@ -78,7 +78,7 @@ vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2,
|
||||
|
@ -88,7 +88,7 @@ vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2,
|
||||
|
@ -98,7 +98,7 @@ vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2,
|
||||
|
@ -108,7 +108,7 @@ vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2,
|
||||
|
@ -118,7 +118,7 @@ vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2,
|
||||
|
@ -128,7 +128,7 @@ vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2,
|
||||
|
@ -138,7 +138,7 @@ vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2,
|
||||
|
@ -148,7 +148,7 @@ vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1,
|
||||
|
@ -158,7 +158,7 @@ vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2,
|
||||
|
@ -168,7 +168,7 @@ vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1,
|
||||
|
@ -178,7 +178,7 @@ vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2,
|
||||
|
@ -188,7 +188,7 @@ vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
|
||||
|
@ -198,7 +198,7 @@ vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2,
|
||||
|
@ -208,7 +208,7 @@ vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
|
||||
|
@ -218,7 +218,7 @@ vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2,
|
||||
|
@ -228,7 +228,7 @@ vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
|
||||
|
@ -238,7 +238,7 @@ vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2,
|
||||
|
@ -248,7 +248,7 @@ vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
|
||||
|
@ -258,7 +258,7 @@ vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2,
|
||||
|
@ -268,7 +268,7 @@ vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1,
|
||||
|
@ -278,7 +278,7 @@ vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2,
|
||||
|
@ -288,7 +288,7 @@ vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
|
||||
|
@ -298,7 +298,7 @@ vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2,
|
||||
|
@ -308,7 +308,7 @@ vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
|
||||
|
@ -318,7 +318,7 @@ vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2,
|
||||
|
@ -328,7 +328,7 @@ vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
|
||||
|
@ -338,7 +338,7 @@ vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2,
|
||||
|
@ -348,7 +348,7 @@ vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
|
||||
|
@ -358,7 +358,7 @@ vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2,
|
||||
|
@ -368,7 +368,7 @@ vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
|
||||
|
@ -378,7 +378,7 @@ vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2,
|
||||
|
@ -388,7 +388,7 @@ vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
|
||||
|
@ -398,7 +398,7 @@ vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2,
|
||||
|
@ -408,7 +408,7 @@ vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
|
||||
|
@ -418,7 +418,7 @@ vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2,
|
||||
|
@ -428,7 +428,7 @@ vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
|
||||
|
@ -438,7 +438,7 @@ vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2,
|
||||
|
@ -448,7 +448,7 @@ vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1,
|
||||
|
@ -458,7 +458,7 @@ vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2,
|
||||
|
@ -468,7 +468,7 @@ vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1,
|
||||
|
@ -478,7 +478,7 @@ vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2,
|
||||
|
@ -488,7 +488,7 @@ vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1,
|
||||
|
@ -498,7 +498,7 @@ vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
|
||||
|
@ -508,7 +508,7 @@ vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2,
|
||||
|
@ -518,7 +518,7 @@ vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2,
|
||||
|
@ -528,7 +528,7 @@ vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2,
|
||||
|
@ -538,7 +538,7 @@ vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2,
|
||||
|
@ -548,7 +548,7 @@ vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2,
|
||||
|
@ -558,7 +558,7 @@ vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2,
|
||||
|
@ -568,7 +568,7 @@ vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2,
|
||||
|
@ -578,7 +578,7 @@ vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2,
|
||||
|
@ -588,7 +588,7 @@ vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1,
|
||||
|
@ -598,7 +598,7 @@ vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1,
|
||||
|
@ -608,7 +608,7 @@ vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1,
|
||||
|
@ -618,7 +618,7 @@ vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1,
|
||||
|
@ -628,7 +628,7 @@ vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1,
|
||||
|
@ -638,7 +638,7 @@ vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2,
|
||||
|
@ -648,7 +648,7 @@ vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1,
|
||||
|
@ -658,7 +658,7 @@ vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
|
||||
|
@ -668,7 +668,7 @@ vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1,
|
||||
|
@ -678,7 +678,7 @@ vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
|
||||
|
@ -688,7 +688,7 @@ vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1,
|
||||
|
@ -698,7 +698,7 @@ vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
|
||||
|
@ -708,7 +708,7 @@ vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1,
|
||||
|
@ -718,7 +718,7 @@ vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1,
|
||||
|
@ -728,7 +728,7 @@ vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1,
|
||||
|
@ -738,7 +738,7 @@ vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2,
|
||||
|
@ -748,7 +748,7 @@ vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1,
|
||||
|
@ -758,7 +758,7 @@ vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2,
|
||||
|
@ -768,7 +768,7 @@ vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1,
|
||||
|
@ -778,7 +778,7 @@ vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
|
||||
|
@ -788,7 +788,7 @@ vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1,
|
||||
|
@ -798,7 +798,7 @@ vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
|
||||
|
@ -808,7 +808,7 @@ vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1,
|
||||
|
@ -818,7 +818,7 @@ vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2,
|
||||
|
@ -828,7 +828,7 @@ vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1,
|
||||
|
@ -838,7 +838,7 @@ vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2,
|
||||
|
@ -848,7 +848,7 @@ vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1,
|
||||
|
@ -858,7 +858,7 @@ vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2,
|
||||
|
@ -868,7 +868,7 @@ vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1,
|
||||
|
@ -878,7 +878,7 @@ vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2,
|
||||
|
@ -888,7 +888,7 @@ vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1,
|
||||
|
@ -898,7 +898,7 @@ vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1,
|
||||
|
@ -908,7 +908,7 @@ vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1,
|
||||
|
@ -918,7 +918,7 @@ vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1,
|
||||
|
@ -928,7 +928,7 @@ vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1,
|
||||
|
@ -938,7 +938,7 @@ vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1,
|
||||
|
@ -948,7 +948,7 @@ vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1,
|
||||
|
@ -958,7 +958,7 @@ vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1,
|
||||
|
@ -968,7 +968,7 @@ vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vmerge_vvm_f64m8(vbool8_t mask, vfloat64m8_t op1,
|
||||
|
@ -978,7 +978,7 @@ vfloat64m8_t test_vmerge_vvm_f64m8(vbool8_t mask, vfloat64m8_t op1,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_vmerge_vvm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
|
||||
|
@ -987,7 +987,7 @@ vfloat16mf4_t test_vmerge_vvm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, vfloat16
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_vmerge_vvm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
|
||||
|
@ -996,7 +996,7 @@ vfloat16mf2_t test_vmerge_vvm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, vfloat16
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_vmerge_vvm_f16m1(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
|
||||
|
@ -1005,7 +1005,7 @@ vfloat16m1_t test_vmerge_vvm_f16m1(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_vmerge_vvm_f16m2(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
|
||||
|
@ -1014,7 +1014,7 @@ vfloat16m2_t test_vmerge_vvm_f16m2(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_vmerge_vvm_f16m4(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
|
||||
|
@ -1023,7 +1023,7 @@ vfloat16m4_t test_vmerge_vvm_f16m4(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_vmerge_vvm_f16m8(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
|
||||
|
@ -1068,7 +1068,7 @@ vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vmerge_vvm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
|
||||
|
@ -1077,7 +1077,7 @@ vint32mf2_t test_vmerge_vvm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, vint32mf2
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vmerge_vxm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
|
||||
|
@ -1086,7 +1086,7 @@ vint32mf2_t test_vmerge_vxm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, int32_t o
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
|
||||
|
@ -1095,7 +1095,7 @@ vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, vuint32
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vmerge_vxm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
|
||||
|
@ -1113,7 +1113,7 @@ vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vfl
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vmerge_vvm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf8_t test_vncvt_x_x_w_i8mf8 (vint16mf4_t src, size_t vl) {
|
||||
|
@ -15,7 +15,7 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8 (vint16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf4_t test_vncvt_x_x_w_i8mf4 (vint16mf2_t src, size_t vl) {
|
||||
|
@ -24,7 +24,7 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4 (vint16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf2_t test_vncvt_x_x_w_i8mf2 (vint16m1_t src, size_t vl) {
|
||||
|
@ -33,7 +33,7 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2 (vint16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m1_t test_vncvt_x_x_w_i8m1 (vint16m2_t src, size_t vl) {
|
||||
|
@ -42,7 +42,7 @@ vint8m1_t test_vncvt_x_x_w_i8m1 (vint16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vncvt_x_x_w_i8m2 (vint16m4_t src, size_t vl) {
|
||||
|
@ -51,7 +51,7 @@ vint8m2_t test_vncvt_x_x_w_i8m2 (vint16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vncvt_x_x_w_i8m4 (vint16m8_t src, size_t vl) {
|
||||
|
@ -60,7 +60,7 @@ vint8m4_t test_vncvt_x_x_w_i8m4 (vint16m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf8_t test_vncvt_x_x_w_u8mf8 (vuint16mf4_t src, size_t vl) {
|
||||
|
@ -69,7 +69,7 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8 (vuint16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf4_t test_vncvt_x_x_w_u8mf4 (vuint16mf2_t src, size_t vl) {
|
||||
|
@ -78,7 +78,7 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4 (vuint16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf2_t test_vncvt_x_x_w_u8mf2 (vuint16m1_t src, size_t vl) {
|
||||
|
@ -87,7 +87,7 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2 (vuint16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m1_t test_vncvt_x_x_w_u8m1 (vuint16m2_t src, size_t vl) {
|
||||
|
@ -96,7 +96,7 @@ vuint8m1_t test_vncvt_x_x_w_u8m1 (vuint16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m2_t test_vncvt_x_x_w_u8m2 (vuint16m4_t src, size_t vl) {
|
||||
|
@ -105,7 +105,7 @@ vuint8m2_t test_vncvt_x_x_w_u8m2 (vuint16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_vncvt_x_x_w_u8m4 (vuint16m8_t src, size_t vl) {
|
||||
|
@ -114,7 +114,7 @@ vuint8m4_t test_vncvt_x_x_w_u8m4 (vuint16m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vncvt_x_x_w_i16mf4 (vint32mf2_t src, size_t vl) {
|
||||
|
@ -123,7 +123,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4 (vint32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf2_t test_vncvt_x_x_w_i16mf2 (vint32m1_t src, size_t vl) {
|
||||
|
@ -132,7 +132,7 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2 (vint32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vncvt_x_x_w_i16m1 (vint32m2_t src, size_t vl) {
|
||||
|
@ -141,7 +141,7 @@ vint16m1_t test_vncvt_x_x_w_i16m1 (vint32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vncvt_x_x_w_i16m2 (vint32m4_t src, size_t vl) {
|
||||
|
@ -150,7 +150,7 @@ vint16m2_t test_vncvt_x_x_w_i16m2 (vint32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vncvt_x_x_w_i16m4 (vint32m8_t src, size_t vl) {
|
||||
|
@ -159,7 +159,7 @@ vint16m4_t test_vncvt_x_x_w_i16m4 (vint32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vncvt_x_x_w_u16mf4 (vuint32mf2_t src, size_t vl) {
|
||||
|
@ -168,7 +168,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4 (vuint32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_vncvt_x_x_w_u16mf2 (vuint32m1_t src, size_t vl) {
|
||||
|
@ -177,7 +177,7 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2 (vuint32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vncvt_x_x_w_u16m1 (vuint32m2_t src, size_t vl) {
|
||||
|
@ -186,7 +186,7 @@ vuint16m1_t test_vncvt_x_x_w_u16m1 (vuint32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vncvt_x_x_w_u16m2 (vuint32m4_t src, size_t vl) {
|
||||
|
@ -195,7 +195,7 @@ vuint16m2_t test_vncvt_x_x_w_u16m2 (vuint32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vncvt_x_x_w_u16m4 (vuint32m8_t src, size_t vl) {
|
||||
|
@ -204,7 +204,7 @@ vuint16m4_t test_vncvt_x_x_w_u16m4 (vuint32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vncvt_x_x_w_i32mf2 (vint64m1_t src, size_t vl) {
|
||||
|
@ -213,7 +213,7 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2 (vint64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vncvt_x_x_w_i32m1 (vint64m2_t src, size_t vl) {
|
||||
|
@ -222,7 +222,7 @@ vint32m1_t test_vncvt_x_x_w_i32m1 (vint64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vncvt_x_x_w_i32m2 (vint64m4_t src, size_t vl) {
|
||||
|
@ -231,7 +231,7 @@ vint32m2_t test_vncvt_x_x_w_i32m2 (vint64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vncvt_x_x_w_i32m4 (vint64m8_t src, size_t vl) {
|
||||
|
@ -240,7 +240,7 @@ vint32m4_t test_vncvt_x_x_w_i32m4 (vint64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vncvt_x_x_w_u32mf2 (vuint64m1_t src, size_t vl) {
|
||||
|
@ -249,7 +249,7 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2 (vuint64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vncvt_x_x_w_u32m1 (vuint64m2_t src, size_t vl) {
|
||||
|
@ -258,7 +258,7 @@ vuint32m1_t test_vncvt_x_x_w_u32m1 (vuint64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vncvt_x_x_w_u32m2 (vuint64m4_t src, size_t vl) {
|
||||
|
@ -267,7 +267,7 @@ vuint32m2_t test_vncvt_x_x_w_u32m2 (vuint64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vncvt_x_x_w_u32m4 (vuint64m8_t src, size_t vl) {
|
||||
|
@ -564,7 +564,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4_tu(vuint16mf4_t merge, vuint32mf2_t src, si
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vncvt_x_x_w_i16mf4_ta(vint32mf2_t src, size_t vl) {
|
||||
|
@ -573,7 +573,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4_ta(vint32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vncvt_x_x_w_u16mf4_ta(vuint32mf2_t src, size_t vl) {
|
||||
|
@ -618,7 +618,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t merge, vu
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vncvt_x_x_w_i16mf4_tama(vbool64_t mask, vint32mf2_t src, size_t vl) {
|
||||
|
@ -627,7 +627,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4_tama(vbool64_t mask, vint32mf2_t src, size_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vncvt_x_x_w_u16mf4_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) {
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) {
|
||||
|
@ -16,7 +16,7 @@ vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) {
|
||||
|
@ -25,7 +25,7 @@ vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) {
|
||||
|
@ -34,7 +34,7 @@ vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) {
|
||||
|
@ -43,7 +43,7 @@ vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) {
|
||||
|
@ -52,7 +52,7 @@ vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) {
|
||||
|
@ -61,7 +61,7 @@ vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) {
|
||||
|
@ -70,7 +70,7 @@ vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) {
|
||||
|
@ -79,7 +79,7 @@ vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) {
|
||||
|
@ -88,7 +88,7 @@ vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) {
|
||||
|
@ -97,7 +97,7 @@ vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) {
|
||||
|
@ -106,7 +106,7 @@ vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) {
|
||||
|
@ -115,7 +115,7 @@ vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) {
|
||||
|
@ -124,7 +124,7 @@ vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) {
|
||||
|
@ -133,7 +133,7 @@ vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) {
|
||||
|
@ -142,7 +142,7 @@ vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) {
|
||||
|
@ -151,7 +151,7 @@ vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) {
|
||||
|
@ -160,7 +160,7 @@ vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) {
|
||||
|
@ -169,7 +169,7 @@ vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) {
|
||||
|
@ -178,7 +178,7 @@ vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) {
|
||||
|
@ -187,7 +187,7 @@ vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) {
|
||||
|
@ -196,7 +196,7 @@ vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vneg_v_i64m8 (vint64m8_t op1, size_t vl) {
|
||||
|
@ -412,7 +412,7 @@ vint32mf2_t test_vneg_v_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t vl)
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vneg_v_i32mf2_ta(vint32mf2_t op1, size_t vl) {
|
||||
|
@ -439,7 +439,7 @@ vint32mf2_t test_vneg_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vneg_v_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t vl) {
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf8_t test_vnot_v_i8mf8 (vint8mf8_t op1, size_t vl) {
|
||||
|
@ -16,7 +16,7 @@ vint8mf8_t test_vnot_v_i8mf8 (vint8mf8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf4_t test_vnot_v_i8mf4 (vint8mf4_t op1, size_t vl) {
|
||||
|
@ -25,7 +25,7 @@ vint8mf4_t test_vnot_v_i8mf4 (vint8mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf2_t test_vnot_v_i8mf2 (vint8mf2_t op1, size_t vl) {
|
||||
|
@ -34,7 +34,7 @@ vint8mf2_t test_vnot_v_i8mf2 (vint8mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m1_t test_vnot_v_i8m1 (vint8m1_t op1, size_t vl) {
|
||||
|
@ -43,7 +43,7 @@ vint8m1_t test_vnot_v_i8m1 (vint8m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vnot_v_i8m2 (vint8m2_t op1, size_t vl) {
|
||||
|
@ -52,7 +52,7 @@ vint8m2_t test_vnot_v_i8m2 (vint8m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vnot_v_i8m4 (vint8m4_t op1, size_t vl) {
|
||||
|
@ -61,7 +61,7 @@ vint8m4_t test_vnot_v_i8m4 (vint8m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m8_t test_vnot_v_i8m8 (vint8m8_t op1, size_t vl) {
|
||||
|
@ -70,7 +70,7 @@ vint8m8_t test_vnot_v_i8m8 (vint8m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vnot_v_i16mf4 (vint16mf4_t op1, size_t vl) {
|
||||
|
@ -79,7 +79,7 @@ vint16mf4_t test_vnot_v_i16mf4 (vint16mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf2_t test_vnot_v_i16mf2 (vint16mf2_t op1, size_t vl) {
|
||||
|
@ -88,7 +88,7 @@ vint16mf2_t test_vnot_v_i16mf2 (vint16mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vnot_v_i16m1 (vint16m1_t op1, size_t vl) {
|
||||
|
@ -97,7 +97,7 @@ vint16m1_t test_vnot_v_i16m1 (vint16m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vnot_v_i16m2 (vint16m2_t op1, size_t vl) {
|
||||
|
@ -106,7 +106,7 @@ vint16m2_t test_vnot_v_i16m2 (vint16m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vnot_v_i16m4 (vint16m4_t op1, size_t vl) {
|
||||
|
@ -115,7 +115,7 @@ vint16m4_t test_vnot_v_i16m4 (vint16m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vnot_v_i16m8 (vint16m8_t op1, size_t vl) {
|
||||
|
@ -124,7 +124,7 @@ vint16m8_t test_vnot_v_i16m8 (vint16m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vnot_v_i32mf2 (vint32mf2_t op1, size_t vl) {
|
||||
|
@ -133,7 +133,7 @@ vint32mf2_t test_vnot_v_i32mf2 (vint32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vnot_v_i32m1 (vint32m1_t op1, size_t vl) {
|
||||
|
@ -142,7 +142,7 @@ vint32m1_t test_vnot_v_i32m1 (vint32m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vnot_v_i32m2 (vint32m2_t op1, size_t vl) {
|
||||
|
@ -151,7 +151,7 @@ vint32m2_t test_vnot_v_i32m2 (vint32m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vnot_v_i32m4 (vint32m4_t op1, size_t vl) {
|
||||
|
@ -160,7 +160,7 @@ vint32m4_t test_vnot_v_i32m4 (vint32m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vnot_v_i32m8 (vint32m8_t op1, size_t vl) {
|
||||
|
@ -169,7 +169,7 @@ vint32m8_t test_vnot_v_i32m8 (vint32m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vnot_v_i64m1 (vint64m1_t op1, size_t vl) {
|
||||
|
@ -178,7 +178,7 @@ vint64m1_t test_vnot_v_i64m1 (vint64m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vnot_v_i64m2 (vint64m2_t op1, size_t vl) {
|
||||
|
@ -187,7 +187,7 @@ vint64m2_t test_vnot_v_i64m2 (vint64m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vnot_v_i64m4 (vint64m4_t op1, size_t vl) {
|
||||
|
@ -196,7 +196,7 @@ vint64m4_t test_vnot_v_i64m4 (vint64m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vnot_v_i64m8 (vint64m8_t op1, size_t vl) {
|
||||
|
@ -205,7 +205,7 @@ vint64m8_t test_vnot_v_i64m8 (vint64m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf8_t test_vnot_v_u8mf8 (vuint8mf8_t op1, size_t vl) {
|
||||
|
@ -214,7 +214,7 @@ vuint8mf8_t test_vnot_v_u8mf8 (vuint8mf8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf4_t test_vnot_v_u8mf4 (vuint8mf4_t op1, size_t vl) {
|
||||
|
@ -223,7 +223,7 @@ vuint8mf4_t test_vnot_v_u8mf4 (vuint8mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf2_t test_vnot_v_u8mf2 (vuint8mf2_t op1, size_t vl) {
|
||||
|
@ -232,7 +232,7 @@ vuint8mf2_t test_vnot_v_u8mf2 (vuint8mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m1_t test_vnot_v_u8m1 (vuint8m1_t op1, size_t vl) {
|
||||
|
@ -241,7 +241,7 @@ vuint8m1_t test_vnot_v_u8m1 (vuint8m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m2_t test_vnot_v_u8m2 (vuint8m2_t op1, size_t vl) {
|
||||
|
@ -250,7 +250,7 @@ vuint8m2_t test_vnot_v_u8m2 (vuint8m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_vnot_v_u8m4 (vuint8m4_t op1, size_t vl) {
|
||||
|
@ -259,7 +259,7 @@ vuint8m4_t test_vnot_v_u8m4 (vuint8m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m8_t test_vnot_v_u8m8 (vuint8m8_t op1, size_t vl) {
|
||||
|
@ -268,7 +268,7 @@ vuint8m8_t test_vnot_v_u8m8 (vuint8m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vnot_v_u16mf4 (vuint16mf4_t op1, size_t vl) {
|
||||
|
@ -277,7 +277,7 @@ vuint16mf4_t test_vnot_v_u16mf4 (vuint16mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_vnot_v_u16mf2 (vuint16mf2_t op1, size_t vl) {
|
||||
|
@ -286,7 +286,7 @@ vuint16mf2_t test_vnot_v_u16mf2 (vuint16mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vnot_v_u16m1 (vuint16m1_t op1, size_t vl) {
|
||||
|
@ -295,7 +295,7 @@ vuint16m1_t test_vnot_v_u16m1 (vuint16m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vnot_v_u16m2 (vuint16m2_t op1, size_t vl) {
|
||||
|
@ -304,7 +304,7 @@ vuint16m2_t test_vnot_v_u16m2 (vuint16m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vnot_v_u16m4 (vuint16m4_t op1, size_t vl) {
|
||||
|
@ -313,7 +313,7 @@ vuint16m4_t test_vnot_v_u16m4 (vuint16m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vnot_v_u16m8 (vuint16m8_t op1, size_t vl) {
|
||||
|
@ -322,7 +322,7 @@ vuint16m8_t test_vnot_v_u16m8 (vuint16m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vnot_v_u32mf2 (vuint32mf2_t op1, size_t vl) {
|
||||
|
@ -331,7 +331,7 @@ vuint32mf2_t test_vnot_v_u32mf2 (vuint32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vnot_v_u32m1 (vuint32m1_t op1, size_t vl) {
|
||||
|
@ -340,7 +340,7 @@ vuint32m1_t test_vnot_v_u32m1 (vuint32m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vnot_v_u32m2 (vuint32m2_t op1, size_t vl) {
|
||||
|
@ -349,7 +349,7 @@ vuint32m2_t test_vnot_v_u32m2 (vuint32m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vnot_v_u32m4 (vuint32m4_t op1, size_t vl) {
|
||||
|
@ -358,7 +358,7 @@ vuint32m4_t test_vnot_v_u32m4 (vuint32m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vnot_v_u32m8 (vuint32m8_t op1, size_t vl) {
|
||||
|
@ -367,7 +367,7 @@ vuint32m8_t test_vnot_v_u32m8 (vuint32m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vnot_v_u64m1 (vuint64m1_t op1, size_t vl) {
|
||||
|
@ -376,7 +376,7 @@ vuint64m1_t test_vnot_v_u64m1 (vuint64m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vnot_v_u64m2 (vuint64m2_t op1, size_t vl) {
|
||||
|
@ -385,7 +385,7 @@ vuint64m2_t test_vnot_v_u64m2 (vuint64m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vnot_v_u64m4 (vuint64m4_t op1, size_t vl) {
|
||||
|
@ -394,7 +394,7 @@ vuint64m4_t test_vnot_v_u64m4 (vuint64m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vnot_v_u64m8 (vuint64m8_t op1, size_t vl) {
|
||||
|
@ -817,7 +817,7 @@ vuint32mf2_t test_vnot_v_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vnot_v_i32mf2_ta(vint32mf2_t op1, size_t vl) {
|
||||
|
@ -826,7 +826,7 @@ vint32mf2_t test_vnot_v_i32mf2_ta(vint32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vnot_v_u32mf2_ta(vuint32mf2_t op1, size_t vl) {
|
||||
|
@ -871,7 +871,7 @@ vuint32mf2_t test_vnot_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vnot_v_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t vl) {
|
||||
|
@ -880,7 +880,7 @@ vint32mf2_t test_vnot_v_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t vl)
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vnot_v_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t vl) {
|
||||
|
|
|
@ -8,325 +8,325 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_i8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> poison
|
||||
//
|
||||
vint8mf8_t test_vundefined_i8mf8() { return vundefined_i8mf8(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_i8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> poison
|
||||
//
|
||||
vint8mf4_t test_vundefined_i8mf4() { return vundefined_i8mf4(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_i8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> poison
|
||||
//
|
||||
vint8mf2_t test_vundefined_i8mf2() { return vundefined_i8mf2(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> poison
|
||||
//
|
||||
vint8m1_t test_vundefined_i8m1() { return vundefined_i8m1(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> poison
|
||||
//
|
||||
vint8m2_t test_vundefined_i8m2() { return vundefined_i8m2(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> poison
|
||||
//
|
||||
vint8m4_t test_vundefined_i8m4() { return vundefined_i8m4(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_i8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> poison
|
||||
//
|
||||
vint8m8_t test_vundefined_i8m8() { return vundefined_i8m8(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_i16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> poison
|
||||
//
|
||||
vint16mf4_t test_vundefined_i16mf4() { return vundefined_i16mf4(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> poison
|
||||
//
|
||||
vint16mf2_t test_vundefined_i16mf2() { return vundefined_i16mf2(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> poison
|
||||
//
|
||||
vint16m1_t test_vundefined_i16m1() { return vundefined_i16m1(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> poison
|
||||
//
|
||||
vint16m2_t test_vundefined_i16m2() { return vundefined_i16m2(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> poison
|
||||
//
|
||||
vint16m4_t test_vundefined_i16m4() { return vundefined_i16m4(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> poison
|
||||
//
|
||||
vint16m8_t test_vundefined_i16m8() { return vundefined_i16m8(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> poison
|
||||
//
|
||||
vint32mf2_t test_vundefined_i32mf2() { return vundefined_i32mf2(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> poison
|
||||
//
|
||||
vint32m1_t test_vundefined_i32m1() { return vundefined_i32m1(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> poison
|
||||
//
|
||||
vint32m2_t test_vundefined_i32m2() { return vundefined_i32m2(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> poison
|
||||
//
|
||||
vint32m4_t test_vundefined_i32m4() { return vundefined_i32m4(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> poison
|
||||
//
|
||||
vint32m8_t test_vundefined_i32m8() { return vundefined_i32m8(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> poison
|
||||
//
|
||||
vint64m1_t test_vundefined_i64m1() { return vundefined_i64m1(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> poison
|
||||
//
|
||||
vint64m2_t test_vundefined_i64m2() { return vundefined_i64m2(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> poison
|
||||
//
|
||||
vint64m4_t test_vundefined_i64m4() { return vundefined_i64m4(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> poison
|
||||
//
|
||||
vint64m8_t test_vundefined_i64m8() { return vundefined_i64m8(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_u8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> poison
|
||||
//
|
||||
vuint8mf8_t test_vundefined_u8mf8() { return vundefined_u8mf8(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_u8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> poison
|
||||
//
|
||||
vuint8mf4_t test_vundefined_u8mf4() { return vundefined_u8mf4(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_u8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> poison
|
||||
//
|
||||
vuint8mf2_t test_vundefined_u8mf2() { return vundefined_u8mf2(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> poison
|
||||
//
|
||||
vuint8m1_t test_vundefined_u8m1() { return vundefined_u8m1(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> poison
|
||||
//
|
||||
vuint8m2_t test_vundefined_u8m2() { return vundefined_u8m2(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> poison
|
||||
//
|
||||
vuint8m4_t test_vundefined_u8m4() { return vundefined_u8m4(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_u8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> poison
|
||||
//
|
||||
vuint8m8_t test_vundefined_u8m8() { return vundefined_u8m8(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> poison
|
||||
//
|
||||
vuint16mf4_t test_vundefined_u16mf4() { return vundefined_u16mf4(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> poison
|
||||
//
|
||||
vuint16mf2_t test_vundefined_u16mf2() { return vundefined_u16mf2(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> poison
|
||||
//
|
||||
vuint16m1_t test_vundefined_u16m1() { return vundefined_u16m1(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> poison
|
||||
//
|
||||
vuint16m2_t test_vundefined_u16m2() { return vundefined_u16m2(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> poison
|
||||
//
|
||||
vuint16m4_t test_vundefined_u16m4() { return vundefined_u16m4(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> poison
|
||||
//
|
||||
vuint16m8_t test_vundefined_u16m8() { return vundefined_u16m8(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> poison
|
||||
//
|
||||
vuint32mf2_t test_vundefined_u32mf2() { return vundefined_u32mf2(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> poison
|
||||
//
|
||||
vuint32m1_t test_vundefined_u32m1() { return vundefined_u32m1(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> poison
|
||||
//
|
||||
vuint32m2_t test_vundefined_u32m2() { return vundefined_u32m2(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> poison
|
||||
//
|
||||
vuint32m4_t test_vundefined_u32m4() { return vundefined_u32m4(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> poison
|
||||
//
|
||||
vuint32m8_t test_vundefined_u32m8() { return vundefined_u32m8(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> poison
|
||||
//
|
||||
vuint64m1_t test_vundefined_u64m1() { return vundefined_u64m1(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> poison
|
||||
//
|
||||
vuint64m2_t test_vundefined_u64m2() { return vundefined_u64m2(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> poison
|
||||
//
|
||||
vuint64m4_t test_vundefined_u64m4() { return vundefined_u64m4(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> poison
|
||||
//
|
||||
vuint64m8_t test_vundefined_u64m8() { return vundefined_u64m8(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> poison
|
||||
//
|
||||
vfloat32mf2_t test_vundefined_f32mf2() { return vundefined_f32mf2(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> poison
|
||||
//
|
||||
vfloat32m1_t test_vundefined_f32m1() { return vundefined_f32m1(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> poison
|
||||
//
|
||||
vfloat32m2_t test_vundefined_f32m2() { return vundefined_f32m2(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> poison
|
||||
//
|
||||
vfloat32m4_t test_vundefined_f32m4() { return vundefined_f32m4(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> poison
|
||||
//
|
||||
vfloat32m8_t test_vundefined_f32m8() { return vundefined_f32m8(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> poison
|
||||
//
|
||||
vfloat64m1_t test_vundefined_f64m1() { return vundefined_f64m1(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> poison
|
||||
//
|
||||
vfloat64m2_t test_vundefined_f64m2() { return vundefined_f64m2(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> poison
|
||||
//
|
||||
vfloat64m4_t test_vundefined_f64m4() { return vundefined_f64m4(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> poison
|
||||
//
|
||||
vfloat64m8_t test_vundefined_f64m8() { return vundefined_f64m8(); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> poison
|
||||
//
|
||||
vfloat16mf4_t test_vundefined_f16mf4 () {
|
||||
return vundefined_f16mf4();
|
||||
|
@ -334,7 +334,7 @@ vfloat16mf4_t test_vundefined_f16mf4 () {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> poison
|
||||
//
|
||||
vfloat16mf2_t test_vundefined_f16mf2 () {
|
||||
return vundefined_f16mf2();
|
||||
|
@ -342,7 +342,7 @@ vfloat16mf2_t test_vundefined_f16mf2 () {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> poison
|
||||
//
|
||||
vfloat16m1_t test_vundefined_f16m1 () {
|
||||
return vundefined_f16m1();
|
||||
|
@ -350,7 +350,7 @@ vfloat16m1_t test_vundefined_f16m1 () {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> poison
|
||||
//
|
||||
vfloat16m2_t test_vundefined_f16m2 () {
|
||||
return vundefined_f16m2();
|
||||
|
@ -358,7 +358,7 @@ vfloat16m2_t test_vundefined_f16m2 () {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> poison
|
||||
//
|
||||
vfloat16m4_t test_vundefined_f16m4 () {
|
||||
return vundefined_f16m4();
|
||||
|
@ -366,7 +366,7 @@ vfloat16m4_t test_vundefined_f16m4 () {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vundefined_f16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> undef
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> poison
|
||||
//
|
||||
vfloat16m8_t test_vundefined_f16m8 () {
|
||||
return vundefined_f16m8();
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) {
|
||||
|
@ -15,7 +15,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) {
|
||||
|
@ -24,7 +24,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) {
|
||||
|
@ -33,7 +33,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) {
|
||||
|
@ -42,7 +42,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) {
|
||||
|
@ -51,7 +51,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) {
|
||||
|
@ -60,7 +60,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) {
|
||||
|
@ -69,7 +69,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) {
|
||||
|
@ -78,7 +78,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) {
|
||||
|
@ -87,7 +87,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) {
|
||||
|
@ -96,7 +96,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) {
|
||||
|
@ -105,7 +105,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) {
|
||||
|
@ -114,7 +114,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) {
|
||||
|
@ -123,7 +123,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) {
|
||||
|
@ -132,7 +132,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) {
|
||||
|
@ -141,7 +141,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) {
|
||||
|
@ -150,7 +150,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) {
|
||||
|
@ -159,7 +159,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) {
|
||||
|
@ -168,7 +168,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) {
|
||||
|
@ -177,7 +177,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) {
|
||||
|
@ -186,7 +186,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) {
|
||||
|
@ -195,7 +195,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) {
|
||||
|
@ -204,7 +204,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) {
|
||||
|
@ -213,7 +213,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) {
|
||||
|
@ -222,7 +222,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) {
|
||||
|
@ -231,7 +231,7 @@ vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) {
|
||||
|
@ -240,7 +240,7 @@ vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) {
|
||||
|
@ -249,7 +249,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) {
|
||||
|
@ -258,7 +258,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) {
|
||||
|
@ -267,7 +267,7 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vwcvtu_x_x_v_u64m8 (vuint32m4_t src, size_t vl) {
|
||||
|
@ -564,7 +564,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1_tu(vuint64m1_t merge, vuint32mf2_t src, size
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vwcvt_x_x_v_i64m1_ta(vint32mf2_t src, size_t vl) {
|
||||
|
@ -573,7 +573,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1_ta(vint32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_ta(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vwcvtu_x_x_v_u64m1_ta(vuint32mf2_t src, size_t vl) {
|
||||
|
@ -618,7 +618,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuin
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vwcvt_x_x_v_i64m1_tama(vbool64_t mask, vint32mf2_t src, size_t vl) {
|
||||
|
@ -627,7 +627,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1_tama(vbool64_t mask, vint32mf2_t src, size_t v
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tama(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vwcvtu_x_x_v_u64m1_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) {
|
||||
|
|
Loading…
Reference in New Issue