summaryrefslogtreecommitdiffstats
path: root/lib/Headers/avx512vlbwintrin.h
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2018-05-31 00:51:20 +0000
committerCraig Topper <craig.topper@intel.com>2018-05-31 00:51:20 +0000
commit5eee836d45d8c29236a6836b581a17f9a36a926e (patch)
tree8358766521184eafe0d1a83a79e7f5d77ce0f6a7 /lib/Headers/avx512vlbwintrin.h
parentc1d322ac64b3ca72e442b2471da33357b7255350 (diff)
[X86] Remove __extension__ from macro intrinsics when its not needed.
I think this is a holdover from when we used to declare variables inside the macros. And then its been copy and pasted forward for years every time a new macro intrinsic gets added. Interestingly this caused some tests for IRGen to be slightly more optimized. We now return a zeroinitializer directly instead of going through a store+load. It also removed a bogus error message on another test. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@333613 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Headers/avx512vlbwintrin.h')
-rw-r--r--lib/Headers/avx512vlbwintrin.h136
1 files changed, 68 insertions, 68 deletions
diff --git a/lib/Headers/avx512vlbwintrin.h b/lib/Headers/avx512vlbwintrin.h
index a18e2c98a4..a04ef6f17c 100644
--- a/lib/Headers/avx512vlbwintrin.h
+++ b/lib/Headers/avx512vlbwintrin.h
@@ -33,85 +33,85 @@
/* Integer compare */
-#define _mm_cmp_epi8_mask(a, b, p) __extension__ ({ \
+#define _mm_cmp_epi8_mask(a, b, p) \
(__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
(__v16qi)(__m128i)(b), (int)(p), \
- (__mmask16)-1); })
+ (__mmask16)-1)
-#define _mm_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \
+#define _mm_mask_cmp_epi8_mask(m, a, b, p) \
(__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
(__v16qi)(__m128i)(b), (int)(p), \
- (__mmask16)(m)); })
+ (__mmask16)(m))
-#define _mm_cmp_epu8_mask(a, b, p) __extension__ ({ \
+#define _mm_cmp_epu8_mask(a, b, p) \
(__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
(__v16qi)(__m128i)(b), (int)(p), \
- (__mmask16)-1); })
+ (__mmask16)-1)
-#define _mm_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \
+#define _mm_mask_cmp_epu8_mask(m, a, b, p) \
(__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
(__v16qi)(__m128i)(b), (int)(p), \
- (__mmask16)(m)); })
+ (__mmask16)(m))
-#define _mm256_cmp_epi8_mask(a, b, p) __extension__ ({ \
+#define _mm256_cmp_epi8_mask(a, b, p) \
(__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
(__v32qi)(__m256i)(b), (int)(p), \
- (__mmask32)-1); })
+ (__mmask32)-1)
-#define _mm256_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \
+#define _mm256_mask_cmp_epi8_mask(m, a, b, p) \
(__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
(__v32qi)(__m256i)(b), (int)(p), \
- (__mmask32)(m)); })
+ (__mmask32)(m))
-#define _mm256_cmp_epu8_mask(a, b, p) __extension__ ({ \
+#define _mm256_cmp_epu8_mask(a, b, p) \
(__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
(__v32qi)(__m256i)(b), (int)(p), \
- (__mmask32)-1); })
+ (__mmask32)-1)
-#define _mm256_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \
+#define _mm256_mask_cmp_epu8_mask(m, a, b, p) \
(__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
(__v32qi)(__m256i)(b), (int)(p), \
- (__mmask32)(m)); })
+ (__mmask32)(m))
-#define _mm_cmp_epi16_mask(a, b, p) __extension__ ({ \
+#define _mm_cmp_epi16_mask(a, b, p) \
(__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
(__v8hi)(__m128i)(b), (int)(p), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \
+#define _mm_mask_cmp_epi16_mask(m, a, b, p) \
(__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
(__v8hi)(__m128i)(b), (int)(p), \
- (__mmask8)(m)); })
+ (__mmask8)(m))
-#define _mm_cmp_epu16_mask(a, b, p) __extension__ ({ \
+#define _mm_cmp_epu16_mask(a, b, p) \
(__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
(__v8hi)(__m128i)(b), (int)(p), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \
+#define _mm_mask_cmp_epu16_mask(m, a, b, p) \
(__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
(__v8hi)(__m128i)(b), (int)(p), \
- (__mmask8)(m)); })
+ (__mmask8)(m))
-#define _mm256_cmp_epi16_mask(a, b, p) __extension__ ({ \
+#define _mm256_cmp_epi16_mask(a, b, p) \
(__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
(__v16hi)(__m256i)(b), (int)(p), \
- (__mmask16)-1); })
+ (__mmask16)-1)
-#define _mm256_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \
+#define _mm256_mask_cmp_epi16_mask(m, a, b, p) \
(__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
(__v16hi)(__m256i)(b), (int)(p), \
- (__mmask16)(m)); })
+ (__mmask16)(m))
-#define _mm256_cmp_epu16_mask(a, b, p) __extension__ ({ \
+#define _mm256_cmp_epu16_mask(a, b, p) \
(__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
(__v16hi)(__m256i)(b), (int)(p), \
- (__mmask16)-1); })
+ (__mmask16)-1)
-#define _mm256_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \
+#define _mm256_mask_cmp_epu16_mask(m, a, b, p) \
(__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
(__v16hi)(__m256i)(b), (int)(p), \
- (__mmask16)(m)); })
+ (__mmask16)(m))
#define _mm_cmpeq_epi8_mask(A, B) \
_mm_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ)
@@ -1833,47 +1833,47 @@ _mm256_maskz_cvtepu8_epi16 (__mmask16 __U, __m128i __A)
}
-#define _mm_mask_shufflehi_epi16(W, U, A, imm) __extension__ ({ \
+#define _mm_mask_shufflehi_epi16(W, U, A, imm) \
(__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
(__v8hi)_mm_shufflehi_epi16((A), (imm)), \
- (__v8hi)(__m128i)(W)); })
+ (__v8hi)(__m128i)(W))
-#define _mm_maskz_shufflehi_epi16(U, A, imm) __extension__ ({ \
+#define _mm_maskz_shufflehi_epi16(U, A, imm) \
(__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
(__v8hi)_mm_shufflehi_epi16((A), (imm)), \
- (__v8hi)_mm_setzero_si128()); })
+ (__v8hi)_mm_setzero_si128())
-#define _mm256_mask_shufflehi_epi16(W, U, A, imm) __extension__ ({ \
+#define _mm256_mask_shufflehi_epi16(W, U, A, imm) \
(__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
- (__v16hi)(__m256i)(W)); })
+ (__v16hi)(__m256i)(W))
-#define _mm256_maskz_shufflehi_epi16(U, A, imm) __extension__ ({ \
+#define _mm256_maskz_shufflehi_epi16(U, A, imm) \
(__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
- (__v16hi)_mm256_setzero_si256()); })
+ (__v16hi)_mm256_setzero_si256())
-#define _mm_mask_shufflelo_epi16(W, U, A, imm) __extension__ ({ \
+#define _mm_mask_shufflelo_epi16(W, U, A, imm) \
(__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
(__v8hi)_mm_shufflelo_epi16((A), (imm)), \
- (__v8hi)(__m128i)(W)); })
+ (__v8hi)(__m128i)(W))
-#define _mm_maskz_shufflelo_epi16(U, A, imm) __extension__ ({ \
+#define _mm_maskz_shufflelo_epi16(U, A, imm) \
(__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
(__v8hi)_mm_shufflelo_epi16((A), (imm)), \
- (__v8hi)_mm_setzero_si128()); })
+ (__v8hi)_mm_setzero_si128())
-#define _mm256_mask_shufflelo_epi16(W, U, A, imm) __extension__ ({ \
+#define _mm256_mask_shufflelo_epi16(W, U, A, imm) \
(__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_shufflelo_epi16((A), \
(imm)), \
- (__v16hi)(__m256i)(W)); })
+ (__v16hi)(__m256i)(W))
-#define _mm256_maskz_shufflelo_epi16(U, A, imm) __extension__ ({ \
+#define _mm256_maskz_shufflelo_epi16(U, A, imm) \
(__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_shufflelo_epi16((A), \
(imm)), \
- (__v16hi)_mm256_setzero_si256()); })
+ (__v16hi)_mm256_setzero_si256())
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_sllv_epi16(__m256i __A, __m256i __B)
@@ -2693,61 +2693,61 @@ _mm256_mask_permutexvar_epi16 (__m256i __W, __mmask16 __M, __m256i __A,
(__v16hi)__W);
}
-#define _mm_mask_alignr_epi8(W, U, A, B, N) __extension__ ({ \
+#define _mm_mask_alignr_epi8(W, U, A, B, N) \
(__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
(__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \
- (__v16qi)(__m128i)(W)); })
+ (__v16qi)(__m128i)(W))
-#define _mm_maskz_alignr_epi8(U, A, B, N) __extension__ ({ \
+#define _mm_maskz_alignr_epi8(U, A, B, N) \
(__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
(__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \
- (__v16qi)_mm_setzero_si128()); })
+ (__v16qi)_mm_setzero_si128())
-#define _mm256_mask_alignr_epi8(W, U, A, B, N) __extension__ ({ \
+#define _mm256_mask_alignr_epi8(W, U, A, B, N) \
(__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
(__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \
- (__v32qi)(__m256i)(W)); })
+ (__v32qi)(__m256i)(W))
-#define _mm256_maskz_alignr_epi8(U, A, B, N) __extension__ ({ \
+#define _mm256_maskz_alignr_epi8(U, A, B, N) \
(__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
(__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \
- (__v32qi)_mm256_setzero_si256()); })
+ (__v32qi)_mm256_setzero_si256())
-#define _mm_dbsad_epu8(A, B, imm) __extension__ ({ \
+#define _mm_dbsad_epu8(A, B, imm) \
(__m128i)__builtin_ia32_dbpsadbw128_mask((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(imm), \
(__v8hi)_mm_setzero_si128(), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_dbsad_epu8(W, U, A, B, imm) __extension__ ({ \
+#define _mm_mask_dbsad_epu8(W, U, A, B, imm) \
(__m128i)__builtin_ia32_dbpsadbw128_mask((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(imm), \
(__v8hi)(__m128i)(W), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_maskz_dbsad_epu8(U, A, B, imm) __extension__ ({ \
+#define _mm_maskz_dbsad_epu8(U, A, B, imm) \
(__m128i)__builtin_ia32_dbpsadbw128_mask((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(imm), \
(__v8hi)_mm_setzero_si128(), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_dbsad_epu8(A, B, imm) __extension__ ({ \
+#define _mm256_dbsad_epu8(A, B, imm) \
(__m256i)__builtin_ia32_dbpsadbw256_mask((__v32qi)(__m256i)(A), \
(__v32qi)(__m256i)(B), (int)(imm), \
(__v16hi)_mm256_setzero_si256(), \
- (__mmask16)-1); })
+ (__mmask16)-1)
-#define _mm256_mask_dbsad_epu8(W, U, A, B, imm) __extension__ ({ \
+#define _mm256_mask_dbsad_epu8(W, U, A, B, imm) \
(__m256i)__builtin_ia32_dbpsadbw256_mask((__v32qi)(__m256i)(A), \
(__v32qi)(__m256i)(B), (int)(imm), \
(__v16hi)(__m256i)(W), \
- (__mmask16)(U)); })
+ (__mmask16)(U))
-#define _mm256_maskz_dbsad_epu8(U, A, B, imm) __extension__ ({ \
+#define _mm256_maskz_dbsad_epu8(U, A, B, imm) \
(__m256i)__builtin_ia32_dbpsadbw256_mask((__v32qi)(__m256i)(A), \
(__v32qi)(__m256i)(B), (int)(imm), \
(__v16hi)_mm256_setzero_si256(), \
- (__mmask16)(U)); })
+ (__mmask16)(U))
#undef __DEFAULT_FN_ATTRS