diff options
author | Michael Zuckerman <Michael.zuckerman@intel.com> | 2017-04-04 13:29:53 +0000 |
---|---|---|
committer | Michael Zuckerman <Michael.zuckerman@intel.com> | 2017-04-04 13:29:53 +0000 |
commit | 410778090360bf67a7360b42ed4e4f5e0c23c674 (patch) | |
tree | d59d818a4e16ffb8f14e596f8972cd223b853574 | |
parent | b90806f91c608d403c80f50ab8e052e756b9d8c2 (diff) |
[X86][Clang] Converting __mm{|256|512}_movm_epi{8|16|32|64} LLVMIR call into generic intrinsics.
This patch is a part two of two reviews, one for the clang and the other for LLVM.
In this patch, I covered the clang side, by introducing the intrinsic to the front end.
This is done by creating a generic replacement.
Differential Revision: https://reviews.llvm.org/D31394a
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@299431 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r-- | lib/CodeGen/CGBuiltin.cpp | 22 | ||||
-rw-r--r-- | test/CodeGen/avx512bw-builtins.c | 6 | ||||
-rw-r--r-- | test/CodeGen/avx512dq-builtins.c | 6 | ||||
-rw-r--r-- | test/CodeGen/avx512vlbw-builtins.c | 12 | ||||
-rw-r--r-- | test/CodeGen/avx512vldq-builtins.c | 15 |
5 files changed, 49 insertions, 12 deletions
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp index 463dda4213..a23fa9e470 100644 --- a/lib/CodeGen/CGBuiltin.cpp +++ b/lib/CodeGen/CGBuiltin.cpp @@ -7168,6 +7168,13 @@ static Value *EmitX86MinMax(CodeGenFunction &CGF, ICmpInst::Predicate Pred, return EmitX86Select(CGF, Ops[3], Res, Ops[2]); } +static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op, + llvm::Type *DstTy) { + unsigned NumberOfElements = DstTy->getVectorNumElements(); + Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements); + return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2"); +} + Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E) { if (BuiltinID == X86::BI__builtin_ms_va_start || @@ -7466,6 +7473,21 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, case X86::BI__builtin_ia32_storesd128_mask: { return EmitX86MaskedStore(*this, Ops, 16); } + + case X86::BI__builtin_ia32_cvtmask2b128: + case X86::BI__builtin_ia32_cvtmask2b256: + case X86::BI__builtin_ia32_cvtmask2b512: + case X86::BI__builtin_ia32_cvtmask2w128: + case X86::BI__builtin_ia32_cvtmask2w256: + case X86::BI__builtin_ia32_cvtmask2w512: + case X86::BI__builtin_ia32_cvtmask2d128: + case X86::BI__builtin_ia32_cvtmask2d256: + case X86::BI__builtin_ia32_cvtmask2d512: + case X86::BI__builtin_ia32_cvtmask2q128: + case X86::BI__builtin_ia32_cvtmask2q256: + case X86::BI__builtin_ia32_cvtmask2q512: + return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType())); + case X86::BI__builtin_ia32_movdqa32store128_mask: case X86::BI__builtin_ia32_movdqa64store128_mask: case X86::BI__builtin_ia32_storeaps128_mask: diff --git a/test/CodeGen/avx512bw-builtins.c b/test/CodeGen/avx512bw-builtins.c index 80b7a6d757..97ddbd5a87 100644 --- a/test/CodeGen/avx512bw-builtins.c +++ b/test/CodeGen/avx512bw-builtins.c @@ -1543,13 +1543,15 @@ __mmask64 test_mm512_movepi8_mask(__m512i __A) { __m512i test_mm512_movm_epi8(__mmask64 __A) { // CHECK-LABEL: @test_mm512_movm_epi8 - // CHECK: @llvm.x86.avx512.cvtmask2b.512 + // CHECK: %2 = bitcast i64 %1 to <64 x i1> + // CHECK: %vpmovm2.i = sext <64 x i1> %2 to <64 x i8> return _mm512_movm_epi8(__A); } __m512i test_mm512_movm_epi16(__mmask32 __A) { // CHECK-LABEL: @test_mm512_movm_epi16 - // CHECK: @llvm.x86.avx512.cvtmask2w.512 + // CHECK: %2 = bitcast i32 %1 to <32 x i1> + // CHECK: %vpmovm2.i = sext <32 x i1> %2 to <32 x i16> return _mm512_movm_epi16(__A); } diff --git a/test/CodeGen/avx512dq-builtins.c b/test/CodeGen/avx512dq-builtins.c index b4166ad7c4..55e47d6db8 100644 --- a/test/CodeGen/avx512dq-builtins.c +++ b/test/CodeGen/avx512dq-builtins.c @@ -929,13 +929,15 @@ __mmask16 test_mm512_movepi32_mask(__m512i __A) { __m512i test_mm512_movm_epi32(__mmask16 __A) { // CHECK-LABEL: @test_mm512_movm_epi32 - // CHECK: @llvm.x86.avx512.cvtmask2d.512 + // CHECK: %2 = bitcast i16 %1 to <16 x i1> + // CHECK: %vpmovm2.i = sext <16 x i1> %2 to <16 x i32> return _mm512_movm_epi32(__A); } __m512i test_mm512_movm_epi64(__mmask8 __A) { // CHECK-LABEL: @test_mm512_movm_epi64 - // CHECK: @llvm.x86.avx512.cvtmask2q.512 + // CHECK: %2 = bitcast i8 %1 to <8 x i1> + // CHECK: %vpmovm2.i = sext <8 x i1> %2 to <8 x i64> return _mm512_movm_epi64(__A); } diff --git a/test/CodeGen/avx512vlbw-builtins.c b/test/CodeGen/avx512vlbw-builtins.c index fe363c9ffd..9fc3851ad0 100644 --- a/test/CodeGen/avx512vlbw-builtins.c +++ b/test/CodeGen/avx512vlbw-builtins.c @@ -2521,25 +2521,29 @@ __mmask32 test_mm256_movepi8_mask(__m256i __A) { __m128i test_mm_movm_epi8(__mmask16 __A) { // CHECK-LABEL: @test_mm_movm_epi8 - // CHECK: @llvm.x86.avx512.cvtmask2b.128 + // CHECK: %2 = bitcast i16 %1 to <16 x i1> + // CHECK: %vpmovm2.i = sext <16 x i1> %2 to <16 x i8> return _mm_movm_epi8(__A); } __m256i test_mm256_movm_epi8(__mmask32 __A) { // CHECK-LABEL: @test_mm256_movm_epi8 - // CHECK: @llvm.x86.avx512.cvtmask2b.256 + // CHECK: %2 = bitcast i32 %1 to <32 x i1> + // CHECK: %vpmovm2.i = sext <32 x i1> %2 to <32 x i8> return _mm256_movm_epi8(__A); } __m128i test_mm_movm_epi16(__mmask8 __A) { // CHECK-LABEL: @test_mm_movm_epi16 - // CHECK: @llvm.x86.avx512.cvtmask2w.128 + // CHECK: %2 = bitcast i8 %1 to <8 x i1> + // CHECK: %vpmovm2.i = sext <8 x i1> %2 to <8 x i16> return _mm_movm_epi16(__A); } __m256i test_mm256_movm_epi16(__mmask16 __A) { // CHECK-LABEL: @test_mm256_movm_epi16 - // CHECK: @llvm.x86.avx512.cvtmask2w.256 + // CHECK: %2 = bitcast i16 %1 to <16 x i1> + // CHECK: %vpmovm2.i = sext <16 x i1> %2 to <16 x i16> return _mm256_movm_epi16(__A); } diff --git a/test/CodeGen/avx512vldq-builtins.c b/test/CodeGen/avx512vldq-builtins.c index a36892465b..483eae8674 100644 --- a/test/CodeGen/avx512vldq-builtins.c +++ b/test/CodeGen/avx512vldq-builtins.c @@ -865,25 +865,32 @@ __mmask8 test_mm256_movepi32_mask(__m256i __A) { __m128i test_mm_movm_epi32(__mmask8 __A) { // CHECK-LABEL: @test_mm_movm_epi32 - // CHECK: @llvm.x86.avx512.cvtmask2d.128 + // CHECK: %2 = bitcast i8 %1 to <8 x i1> + // CHECK: %extract.i = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + // CHECK: %vpmovm2.i = sext <4 x i1> %extract.i to <4 x i32> return _mm_movm_epi32(__A); } __m256i test_mm256_movm_epi32(__mmask8 __A) { // CHECK-LABEL: @test_mm256_movm_epi32 - // CHECK: @llvm.x86.avx512.cvtmask2d.256 + // CHECK: %2 = bitcast i8 %1 to <8 x i1> + // CHECK: %vpmovm2.i = sext <8 x i1> %2 to <8 x i32> return _mm256_movm_epi32(__A); } __m128i test_mm_movm_epi64(__mmask8 __A) { // CHECK-LABEL: @test_mm_movm_epi64 - // CHECK: @llvm.x86.avx512.cvtmask2q.128 + // CHECK: %2 = bitcast i8 %1 to <8 x i1> + // CHECK: %extract.i = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1> + // CHECK: %vpmovm2.i = sext <2 x i1> %extract.i to <2 x i64> return _mm_movm_epi64(__A); } __m256i test_mm256_movm_epi64(__mmask8 __A) { // CHECK-LABEL: @test_mm256_movm_epi64 - // CHECK: @llvm.x86.avx512.cvtmask2q.256 + // CHECK: %2 = bitcast i8 %1 to <8 x i1> + // CHECK: %extract.i = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + // CHECK: %vpmovm2.i = sext <4 x i1> %extract.i to <4 x i64> return _mm256_movm_epi64(__A); } |