[X86] Fix various type mismatches in intrinsic headers and intrinsic tests that cause extra bitcasts to be emitted in the IR.

Found via imprecise grepping of the -O0 IR. There could still be more bugs out there.

git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@336487 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Craig Topper 2018-07-07 17:03:32 +00:00
parent 27dd885739
commit 1d754e855c
7 changed files with 80 additions and 80 deletions

View File

@ -3374,7 +3374,7 @@ _mm512_maskz_permutex2var_epi64(__mmask8 __U, __m512i __A, __m512i __I,
#define _mm512_extractf64x4_pd(A, I) \
(__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
(__v4df)_mm256_undefined_si256(), \
(__v4df)_mm256_undefined_pd(), \
(__mmask8)-1)
#define _mm512_mask_extractf64x4_pd(W, U, A, imm) \
@ -5544,7 +5544,7 @@ _mm_maskz_getexp_ss (__mmask8 __U, __m128 __A, __m128 __B)
{
return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
(__v4sf) __B,
(__v4sf) _mm_setzero_pd (),
(__v4sf) _mm_setzero_ps (),
(__mmask8) __U,
_MM_FROUND_CUR_DIRECTION);
}
@ -5634,7 +5634,7 @@ _mm_maskz_getexp_ss (__mmask8 __U, __m128 __A, __m128 __B)
(__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(int)(((D)<<2) | (C)), \
(__v4sf)_mm_setzero_pd(), \
(__v4sf)_mm_setzero_ps(), \
(__mmask8)(U), \
_MM_FROUND_CUR_DIRECTION)
@ -6721,24 +6721,24 @@ _mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, int __B)
(__v8df)_mm512_setzero_pd())
#define _mm512_shuffle_ps(A, B, M) \
(__m512d)__builtin_shufflevector((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
0 + (((M) >> 0) & 0x3), \
0 + (((M) >> 2) & 0x3), \
16 + (((M) >> 4) & 0x3), \
16 + (((M) >> 6) & 0x3), \
4 + (((M) >> 0) & 0x3), \
4 + (((M) >> 2) & 0x3), \
20 + (((M) >> 4) & 0x3), \
20 + (((M) >> 6) & 0x3), \
8 + (((M) >> 0) & 0x3), \
8 + (((M) >> 2) & 0x3), \
24 + (((M) >> 4) & 0x3), \
24 + (((M) >> 6) & 0x3), \
12 + (((M) >> 0) & 0x3), \
12 + (((M) >> 2) & 0x3), \
28 + (((M) >> 4) & 0x3), \
28 + (((M) >> 6) & 0x3))
(__m512)__builtin_shufflevector((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
0 + (((M) >> 0) & 0x3), \
0 + (((M) >> 2) & 0x3), \
16 + (((M) >> 4) & 0x3), \
16 + (((M) >> 6) & 0x3), \
4 + (((M) >> 0) & 0x3), \
4 + (((M) >> 2) & 0x3), \
20 + (((M) >> 4) & 0x3), \
20 + (((M) >> 6) & 0x3), \
8 + (((M) >> 0) & 0x3), \
8 + (((M) >> 2) & 0x3), \
24 + (((M) >> 4) & 0x3), \
24 + (((M) >> 6) & 0x3), \
12 + (((M) >> 0) & 0x3), \
12 + (((M) >> 2) & 0x3), \
28 + (((M) >> 4) & 0x3), \
28 + (((M) >> 6) & 0x3))
#define _mm512_mask_shuffle_ps(W, U, A, B, M) \
(__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
@ -7651,7 +7651,7 @@ _mm512_maskz_getexp_ps (__mmask16 __U, __m512 __A)
(__mmask8)(mask), (int)(scale))
#define _mm512_i64gather_epi32(index, addr, scale) \
(__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_ps(), \
(__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_si256(), \
(int const *)(addr), \
(__v8di)(__m512i)(index), \
(__mmask8)-1, (int)(scale))
@ -7675,7 +7675,7 @@ _mm512_maskz_getexp_ps (__mmask16 __U, __m512 __A)
(__mmask8)(mask), (int)(scale))
#define _mm512_i64gather_epi64(index, addr, scale) \
(__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_pd(), \
(__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_epi32(), \
(long long const *)(addr), \
(__v8di)(__m512i)(index), (__mmask8)-1, \
(int)(scale))
@ -7825,16 +7825,16 @@ _mm_mask_fmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
}
#define _mm_fmadd_round_ss(A, B, C, R) \
(__m128d)__builtin_ia32_vfmaddss3_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)(__m128d)(C), (__mmask8)-1, \
(int)(R))
(__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)(__m128)(C), (__mmask8)-1, \
(int)(R))
#define _mm_mask_fmadd_round_ss(W, U, A, B, R) \
(__m128d)__builtin_ia32_vfmaddss3_mask((__v2df)(__m128d)(W), \
(__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), (__mmask8)(U), \
(int)(R))
(__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
(__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), (__mmask8)(U), \
(int)(R))
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_maskz_fmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
@ -8780,7 +8780,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_expand_epi64 ( __mmask8 __U, __m512i __A)
{
return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A,
(__v8di) _mm512_setzero_pd (),
(__v8di) _mm512_setzero_si512 (),
(__mmask8) __U);
}
@ -8812,7 +8812,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_expandloadu_epi64(__mmask8 __U, void const *__P)
{
return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *)__P,
(__v8di) _mm512_setzero_pd(),
(__v8di) _mm512_setzero_si512(),
(__mmask8) __U);
}
@ -8844,7 +8844,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_expandloadu_epi32(__mmask16 __U, void const *__P)
{
return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *)__P,
(__v16si) _mm512_setzero_ps(),
(__v16si) _mm512_setzero_si512(),
(__mmask16) __U);
}
@ -8876,7 +8876,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_expand_epi32 (__mmask16 __U, __m512i __A)
{
return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A,
(__v16si) _mm512_setzero_ps(),
(__v16si) _mm512_setzero_si512(),
(__mmask16) __U);
}
@ -8917,16 +8917,16 @@ _mm512_maskz_cvtps_pd (__mmask8 __U, __m256 __A)
(__v8df)_mm512_setzero_pd());
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_cvtpslo_pd (__m512 __A)
{
return (__m512) _mm512_cvtps_pd(_mm512_castps512_ps256(__A));
return (__m512d) _mm512_cvtps_pd(_mm512_castps512_ps256(__A));
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask_cvtpslo_pd (__m512d __W, __mmask8 __U, __m512 __A)
{
return (__m512) _mm512_mask_cvtps_pd(__W, __U, _mm512_castps512_ps256(__A));
return (__m512d) _mm512_mask_cvtps_pd(__W, __U, _mm512_castps512_ps256(__A));
}
static __inline__ __m512d __DEFAULT_FN_ATTRS

View File

@ -3582,7 +3582,7 @@ _mm256_maskz_scalef_ps (__mmask8 __U, __m256 __A, __m256 __B) {
_mm_maskz_sqrt_ps(__mmask8 __U, __m128 __A) {
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
(__v4sf)_mm_sqrt_ps(__A),
(__v4sf)_mm_setzero_pd());
(__v4sf)_mm_setzero_ps());
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
@ -6413,7 +6413,7 @@ _mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, int __imm)
#define _mm256_mask_shuffle_f64x2(W, U, A, B, imm) \
(__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
(__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \
(__v4df)(__m256)(W))
(__v4df)(__m256d)(W))
#define _mm256_maskz_shuffle_f64x2(U, A, B, imm) \
(__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
@ -6427,7 +6427,7 @@ _mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, int __imm)
#define _mm256_mask_shuffle_i32x4(W, U, A, B, imm) \
(__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
(__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \
(__v8si)(__m256)(W))
(__v8si)(__m256i)(W))
#define _mm256_maskz_shuffle_i32x4(U, A, B, imm) \
(__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
@ -6441,7 +6441,7 @@ _mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, int __imm)
#define _mm256_mask_shuffle_i64x2(W, U, A, B, imm) \
(__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
(__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \
(__v4di)(__m256)(W))
(__v4di)(__m256i)(W))
#define _mm256_maskz_shuffle_i64x2(U, A, B, imm) \
@ -7872,7 +7872,7 @@ _mm256_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
(__v4df)_mm256_setzero_pd())
#define _mm256_permutex_epi64(X, C) \
(__m256d)__builtin_ia32_permdi256((__v4di)(__m256i)(X), (int)(C))
(__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(X), (int)(C))
#define _mm256_mask_permutex_epi64(W, U, X, C) \
(__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \

View File

@ -4966,7 +4966,7 @@ _mm256_set_m128 (__m128 __hi, __m128 __lo)
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_set_m128d (__m128d __hi, __m128d __lo)
{
return (__m256d)_mm256_set_m128((__m128)__hi, (__m128)__lo);
return (__m256d) __builtin_shufflevector((__v2df)__lo, (__v2df)__hi, 0, 1, 2, 3);
}
/// Constructs a 256-bit integer vector by concatenating two 128-bit
@ -4986,7 +4986,7 @@ _mm256_set_m128d (__m128d __hi, __m128d __lo)
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_set_m128i (__m128i __hi, __m128i __lo)
{
return (__m256i)_mm256_set_m128((__m128)__hi, (__m128)__lo);
return (__m256i) __builtin_shufflevector((__v2di)__lo, (__v2di)__hi, 0, 1, 2, 3);
}
/// Constructs a 256-bit floating-point vector of [8 x float] by
@ -5032,7 +5032,7 @@ _mm256_setr_m128 (__m128 __lo, __m128 __hi)
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_setr_m128d (__m128d __lo, __m128d __hi)
{
return (__m256d)_mm256_set_m128((__m128)__hi, (__m128)__lo);
return (__m256d)_mm256_set_m128d(__hi, __lo);
}
/// Constructs a 256-bit integer vector by concatenating two 128-bit
@ -5053,7 +5053,7 @@ _mm256_setr_m128d (__m128d __lo, __m128d __hi)
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_setr_m128i (__m128i __lo, __m128i __hi)
{
return (__m256i)_mm256_set_m128((__m128)__hi, (__m128)__lo);
return (__m256i)_mm256_set_m128i(__hi, __lo);
}
#undef __DEFAULT_FN_ATTRS

View File

@ -1570,13 +1570,13 @@ __m256 test_mm256_set_m128(__m128 A, __m128 B) {
__m256d test_mm256_set_m128d(__m128d A, __m128d B) {
// CHECK-LABEL: test_mm256_set_m128d
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
return _mm256_set_m128d(A, B);
}
__m256i test_mm256_set_m128i(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm256_set_m128i
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
return _mm256_set_m128i(A, B);
}
@ -1796,13 +1796,13 @@ __m256 test_mm256_setr_m128(__m128 A, __m128 B) {
__m256d test_mm256_setr_m128d(__m128d A, __m128d B) {
// CHECK-LABEL: test_mm256_setr_m128d
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
return _mm256_setr_m128d(A, B);
}
__m256i test_mm256_setr_m128i(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm256_setr_m128i
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
return _mm256_setr_m128i(A, B);
}
@ -1990,7 +1990,7 @@ int test_mm256_testc_ps(__m256 A, __m256 B) {
return _mm256_testc_ps(A, B);
}
int test_mm256_testc_si256(__m256 A, __m256 B) {
int test_mm256_testc_si256(__m256i A, __m256i B) {
// CHECK-LABEL: test_mm256_testc_si256
// CHECK: call i32 @llvm.x86.avx.ptestc.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_testc_si256(A, B);
@ -2020,7 +2020,7 @@ int test_mm256_testnzc_ps(__m256 A, __m256 B) {
return _mm256_testnzc_ps(A, B);
}
int test_mm256_testnzc_si256(__m256 A, __m256 B) {
int test_mm256_testnzc_si256(__m256i A, __m256i B) {
// CHECK-LABEL: test_mm256_testnzc_si256
// CHECK: call i32 @llvm.x86.avx.ptestnzc.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_testnzc_si256(A, B);
@ -2050,7 +2050,7 @@ int test_mm256_testz_ps(__m256 A, __m256 B) {
return _mm256_testz_ps(A, B);
}
int test_mm256_testz_si256(__m256 A, __m256 B) {
int test_mm256_testz_si256(__m256i A, __m256i B) {
// CHECK-LABEL: test_mm256_testz_si256
// CHECK: call i32 @llvm.x86.avx.ptestz.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_testz_si256(A, B);

View File

@ -171,13 +171,13 @@ __m256 test_mm256_set_m128(__m128 hi, __m128 lo) {
__m256d test_mm256_set_m128d(__m128d hi, __m128d lo) {
// CHECK-LABEL: @test_mm256_set_m128d
// CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
// CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3>
return _mm256_set_m128d(hi, lo);
}
__m256i test_mm256_set_m128i(__m128i hi, __m128i lo) {
// CHECK-LABEL: @test_mm256_set_m128i
// CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
// CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3>
return _mm256_set_m128i(hi, lo);
}
@ -189,12 +189,12 @@ __m256 test_mm256_setr_m128(__m128 hi, __m128 lo) {
__m256d test_mm256_setr_m128d(__m128d hi, __m128d lo) {
// CHECK-LABEL: @test_mm256_setr_m128d
// CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
// CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3>
return _mm256_setr_m128d(lo, hi);
}
__m256i test_mm256_setr_m128i(__m128i hi, __m128i lo) {
// CHECK-LABEL: @test_mm256_setr_m128i
// CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
// CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3>
return _mm256_setr_m128i(lo, hi);
}

View File

@ -691,13 +691,13 @@ __m128d test_mm512_maskz_range_round_sd(__mmask8 __U, __m128d __A, __m128d __B)
return _mm_maskz_range_round_sd(__U, __A, __B, 4, 8);
}
__m128d test_mm512_range_round_ss(__m128d __A, __m128d __B) {
__m128 test_mm512_range_round_ss(__m128 __A, __m128 __B) {
// CHECK-LABEL: @test_mm512_range_round_ss
// CHECK: @llvm.x86.avx512.mask.range.ss
return _mm_range_round_ss(__A, __B, 4, 8);
}
__m128d test_mm512_mask_range_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
__m128 test_mm512_mask_range_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
// CHECK-LABEL: @test_mm512_mask_range_round_ss
// CHECK: @llvm.x86.avx512.mask.range.ss
return _mm_mask_range_round_ss(__W, __U, __A, __B, 4, 8);
@ -727,13 +727,13 @@ __m128d test_mm_maskz_range_sd(__mmask8 __U, __m128d __A, __m128d __B) {
return _mm_maskz_range_sd(__U, __A, __B, 4);
}
__m128d test_mm_range_ss(__m128d __A, __m128d __B) {
__m128 test_mm_range_ss(__m128 __A, __m128 __B) {
// CHECK-LABEL: @test_mm_range_ss
// CHECK: @llvm.x86.avx512.mask.range.ss
return _mm_range_ss(__A, __B, 4);
}
__m128d test_mm_mask_range_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
__m128 test_mm_mask_range_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
// CHECK-LABEL: @test_mm_mask_range_ss
// CHECK: @llvm.x86.avx512.mask.range.ss
return _mm_mask_range_ss(__W, __U, __A, __B, 4);
@ -1256,13 +1256,13 @@ __mmask16 test_mm512_fpclass_ps_mask(__m512 __A) {
return _mm512_fpclass_ps_mask(__A, 4);
}
__mmask8 test_mm_fpclass_sd_mask(__m128 __A) {
__mmask8 test_mm_fpclass_sd_mask(__m128d __A) {
// CHECK-LABEL: @test_mm_fpclass_sd_mask
// CHECK: @llvm.x86.avx512.mask.fpclass.sd
return _mm_fpclass_sd_mask (__A, 2);
}
__mmask8 test_mm_mask_fpclass_sd_mask(__mmask8 __U, __m128 __A) {
__mmask8 test_mm_mask_fpclass_sd_mask(__mmask8 __U, __m128d __A) {
// CHECK-LABEL: @test_mm_mask_fpclass_sd_mask
// CHECK: @llvm.x86.avx512.mask.fpclass.sd
return _mm_mask_fpclass_sd_mask (__U, __A, 2);

View File

@ -2378,14 +2378,14 @@ __m128 test_mm512_extractf32x4_ps(__m512 a)
return _mm512_extractf32x4_ps(a, 1);
}
__m128 test_mm512_mask_extractf32x4_ps(__m128 __W, __mmask8 __U,__m512d __A){
__m128 test_mm512_mask_extractf32x4_ps(__m128 __W, __mmask8 __U,__m512 __A){
// CHECK-LABEL:@test_mm512_mask_extractf32x4_ps
// CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm512_mask_extractf32x4_ps( __W, __U, __A, 1);
}
__m128 test_mm512_maskz_extractf32x4_ps( __mmask8 __U,__m512d __A){
__m128 test_mm512_maskz_extractf32x4_ps( __mmask8 __U,__m512 __A){
// CHECK-LABEL:@test_mm512_maskz_extractf32x4_ps
// CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
@ -5341,19 +5341,19 @@ __m128 test_mm_roundscale_ss(__m128 __A, __m128 __B) {
return _mm_roundscale_ss(__A, __B, 3);
}
__m128 test_mm_mask_roundscale_ss(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){
__m128 test_mm_mask_roundscale_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){
// CHECK-LABEL: @test_mm_mask_roundscale_ss
// CHECK: @llvm.x86.avx512.mask.rndscale.ss
return _mm_mask_roundscale_ss(__W,__U,__A,__B,3);
}
__m128 test_mm_maskz_roundscale_round_ss( __mmask8 __U, __m128d __A, __m128d __B){
__m128 test_mm_maskz_roundscale_round_ss( __mmask8 __U, __m128 __A, __m128 __B){
// CHECK-LABEL: @test_mm_maskz_roundscale_round_ss
// CHECK: @llvm.x86.avx512.mask.rndscale.ss
return _mm_maskz_roundscale_round_ss(__U,__A,__B,3,_MM_FROUND_CUR_DIRECTION);
}
__m128 test_mm_maskz_roundscale_ss(__mmask8 __U, __m128d __A, __m128d __B){
__m128 test_mm_maskz_roundscale_ss(__mmask8 __U, __m128 __A, __m128 __B){
// CHECK-LABEL: @test_mm_maskz_roundscale_ss
// CHECK: @llvm.x86.avx512.mask.rndscale.ss
return _mm_maskz_roundscale_ss(__U,__A,__B,3);
@ -6077,24 +6077,24 @@ __m512 test_mm512_maskz_broadcast_f32x4(__mmask16 __M, float const* __A) {
return _mm512_maskz_broadcast_f32x4(__M, _mm_loadu_ps(__A));
}
__m512d test_mm512_broadcast_f64x4(float const* __A) {
__m512d test_mm512_broadcast_f64x4(double const* __A) {
// CHECK-LABEL: @test_mm512_broadcast_f64x4
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
return _mm512_broadcast_f64x4(_mm256_loadu_ps(__A));
return _mm512_broadcast_f64x4(_mm256_loadu_pd(__A));
}
__m512d test_mm512_mask_broadcast_f64x4(__m512d __O, __mmask8 __M, float const* __A) {
__m512d test_mm512_mask_broadcast_f64x4(__m512d __O, __mmask8 __M, double const* __A) {
// CHECK-LABEL: @test_mm512_mask_broadcast_f64x4
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_broadcast_f64x4(__O, __M, _mm256_loadu_ps(__A));
return _mm512_mask_broadcast_f64x4(__O, __M, _mm256_loadu_pd(__A));
}
__m512d test_mm512_maskz_broadcast_f64x4(__mmask8 __M, float const* __A) {
__m512d test_mm512_maskz_broadcast_f64x4(__mmask8 __M, double const* __A) {
// CHECK-LABEL: @test_mm512_maskz_broadcast_f64x4
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_broadcast_f64x4(__M, _mm256_loadu_ps(__A));
return _mm512_maskz_broadcast_f64x4(__M, _mm256_loadu_pd(__A));
}
__m512i test_mm512_broadcast_i32x4(__m128i const* __A) {
@ -8630,7 +8630,7 @@ __m512d test_mm512_castpd128_pd512(__m128d __A) {
return _mm512_castpd128_pd512(__A);
}
__m512d test_mm512_set1_epi8(char d)
__m512i test_mm512_set1_epi8(char d)
{
// CHECK-LABEL: @test_mm512_set1_epi8
// CHECK: insertelement <64 x i8> {{.*}}, i32 0
@ -8645,7 +8645,7 @@ __m512d test_mm512_set1_epi8(char d)
return _mm512_set1_epi8(d);
}
__m512d test_mm512_set1_epi16(short d)
__m512i test_mm512_set1_epi16(short d)
{
// CHECK-LABEL: @test_mm512_set1_epi16
// CHECK: insertelement <32 x i16> {{.*}}, i32 0
@ -8702,14 +8702,14 @@ __m512i test_mm512_setr4_epi32(int e0, int e1, int e2, int e3)
return _mm512_setr4_epi64(e0, e1, e2, e3);
}
__m512i test_mm512_setr4_pd(double e0, double e1, double e2, double e3)
__m512d test_mm512_setr4_pd(double e0, double e1, double e2, double e3)
{
// CHECK-LABEL: @test_mm512_setr4_pd
// CHECK: insertelement <8 x double> {{.*}}, i32 7
return _mm512_setr4_pd(e0,e1,e2,e3);
}
__m512i test_mm512_setr4_ps(float e0, float e1, float e2, float e3)
__m512 test_mm512_setr4_ps(float e0, float e1, float e2, float e3)
{
// CHECK-LABEL: @test_mm512_setr4_ps
// CHECK: insertelement <16 x float> {{.*}}, i32 15