24 #error "Never use <avx512fintrin.h> directly; include <immintrin.h> instead."
27 #ifndef __AVX512FINTRIN_H
28 #define __AVX512FINTRIN_H
34 typedef long long __v8di
__attribute__((__vector_size__(64)));
38 typedef unsigned char __v64qu
__attribute__((__vector_size__(64)));
39 typedef unsigned short __v32hu
__attribute__((__vector_size__(64)));
40 typedef unsigned long long __v8du
__attribute__((__vector_size__(64)));
41 typedef unsigned int __v16su
__attribute__((__vector_size__(64)));
45 typedef long long __m512i
__attribute__((__vector_size__(64)));
51 #define _MM_FROUND_TO_NEAREST_INT 0x00
52 #define _MM_FROUND_TO_NEG_INF 0x01
53 #define _MM_FROUND_TO_POS_INF 0x02
54 #define _MM_FROUND_TO_ZERO 0x03
55 #define _MM_FROUND_CUR_DIRECTION 0x04
163 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512f")))
170 return (__m512i)(__v8di){ 0, 0, 0, 0, 0, 0, 0, 0 };
173 #define _mm512_setzero_epi32 _mm512_setzero_si512
178 return (__m512d)__builtin_ia32_undef512();
184 return (__m512)__builtin_ia32_undef512();
190 return (__m512)__builtin_ia32_undef512();
196 return (__m512i)__builtin_ia32_undef512();
202 return (__m512i)__builtin_shufflevector((__v4si) __A,
204 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
210 return (__m512i)__builtin_ia32_selectd_512(__M,
218 return (__m512i)__builtin_ia32_selectd_512(__M,
226 return (__m512i)__builtin_shufflevector((__v2di) __A,
228 0, 0, 0, 0, 0, 0, 0, 0);
234 return (__m512i)__builtin_ia32_selectq_512(__M,
243 return (__m512i)__builtin_ia32_selectq_512(__M,
251 return (__m512i) __builtin_ia32_pbroadcastd512_gpr_mask (__A,
261 return (__m512i) __builtin_ia32_pbroadcastq512_gpr_mask (__A,
266 return (__m512i) __builtin_ia32_pbroadcastq512_mem_mask (__A,
276 return (__m512){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
277 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
280 #define _mm512_setzero _mm512_setzero_ps
285 return (__m512d){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
291 return (__m512){ __w, __w, __w, __w, __w, __w, __w, __w,
292 __w, __w, __w, __w, __w, __w, __w, __w };
298 return (__m512d){ __w, __w, __w, __w, __w, __w, __w, __w };
304 return (__m512i)(__v64qi){ __w, __w, __w, __w, __w, __w, __w, __w,
305 __w, __w, __w, __w, __w, __w, __w, __w,
306 __w, __w, __w, __w, __w, __w, __w, __w,
307 __w, __w, __w, __w, __w, __w, __w, __w,
308 __w, __w, __w, __w, __w, __w, __w, __w,
309 __w, __w, __w, __w, __w, __w, __w, __w,
310 __w, __w, __w, __w, __w, __w, __w, __w,
311 __w, __w, __w, __w, __w, __w, __w, __w };
317 return (__m512i)(__v32hi){ __w, __w, __w, __w, __w, __w, __w, __w,
318 __w, __w, __w, __w, __w, __w, __w, __w,
319 __w, __w, __w, __w, __w, __w, __w, __w,
320 __w, __w, __w, __w, __w, __w, __w, __w };
326 return (__m512i)(__v16si){ __s, __s, __s, __s, __s, __s, __s, __s,
327 __s, __s, __s, __s, __s, __s, __s, __s };
333 return (__m512i)(__v8di){ __d, __d, __d, __d, __d, __d, __d, __d };
339 return (__m512)__builtin_shufflevector((__v4sf) __A,
341 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
347 return (__m512i)(__v16si)
348 { __D, __C, __B, __A, __D, __C, __B, __A,
349 __D, __C, __B, __A, __D, __C, __B, __A };
356 return (__m512i) (__v8di)
357 { __D, __C, __B, __A, __D, __C, __B, __A };
364 { __D, __C, __B, __A, __D, __C, __B, __A };
371 { __D, __C, __B, __A, __D, __C, __B, __A,
372 __D, __C, __B, __A, __D, __C, __B, __A };
375 #define _mm512_setr4_epi32(e0,e1,e2,e3) \
376 _mm512_set4_epi32((e3),(e2),(e1),(e0))
378 #define _mm512_setr4_epi64(e0,e1,e2,e3) \
379 _mm512_set4_epi64((e3),(e2),(e1),(e0))
381 #define _mm512_setr4_pd(e0,e1,e2,e3) \
382 _mm512_set4_pd((e3),(e2),(e1),(e0))
384 #define _mm512_setr4_ps(e0,e1,e2,e3) \
385 _mm512_set4_ps((e3),(e2),(e1),(e0))
390 return (__m512d)__builtin_shufflevector((__v2df) __A,
392 0, 0, 0, 0, 0, 0, 0, 0);
400 return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1);
406 return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7,
407 -1, -1, -1, -1, -1, -1, -1, -1);
413 return __builtin_shufflevector(__a, __a, 0, 1);
419 return __builtin_shufflevector(__A, __A, 0, 1, 2, 3);
425 return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
431 return __builtin_shufflevector(__A, __A, 0, 1, 2, 3, 4, 5, 6, 7);
437 return (__m512) (__A);
443 return (__m512i) (__A);
449 return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
455 return (__m512d) (__A);
461 return (__m512i) (__A);
467 return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
473 return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
479 return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1);
485 return (__m512) (__A);
491 return (__m512d) (__A);
497 return (__m128i)__builtin_shufflevector(__A, __A , 0, 1);
503 return (__m256i)__builtin_shufflevector(__A, __A , 0, 1, 2, 3);
510 return (__m512i)((__v16su)__a & (__v16su)
__b);
516 return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
531 return (__m512i)((__v8du)__a & (__v8du)
__b);
537 return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __k,
552 return (__m512i)(~(__v8du)(__A) & (__v8du)__B);
558 return (__m512i)(~(__v16su)(__A) & (__v16su)__B);
564 return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
579 return (__m512i)(~(__v8du)(__A) & (__v8du)__B);
585 return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
600 return (__m512i)((__v16su)__a | (__v16su)
__b);
606 return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
620 return (__m512i)((__v8du)__a | (__v8du)
__b);
626 return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k,
640 return (__m512i)((__v16su)__a ^ (__v16su)
__b);
646 return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
660 return (__m512i)((__v8du)__a ^ (__v8du)
__b);
666 return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k,
680 return (__m512i)((__v8du)__a & (__v8du)
__b);
686 return (__m512i)((__v8du)__a | (__v8du)
__b);
692 return (__m512i)((__v8du)__a ^ (__v8du)
__b);
700 return (__m512d)((__v8df)__a + (__v8df)
__b);
706 return (__m512)((__v16sf)__a + (__v16sf)
__b);
712 return (__m512d)((__v8df)__a * (__v8df)
__b);
718 return (__m512)((__v16sf)__a * (__v16sf)
__b);
724 return (__m512d)((__v8df)__a - (__v8df)
__b);
730 return (__m512)((__v16sf)__a - (__v16sf)
__b);
736 return (__m512i) ((__v8du) __A + (__v8du) __B);
742 return (__m512i) __builtin_ia32_paddq512_mask ((__v8di) __A,
751 return (__m512i) __builtin_ia32_paddq512_mask ((__v8di) __A,
761 return (__m512i) ((__v8du) __A - (__v8du) __B);
767 return (__m512i) __builtin_ia32_psubq512_mask ((__v8di) __A,
776 return (__m512i) __builtin_ia32_psubq512_mask ((__v8di) __A,
786 return (__m512i) ((__v16su) __A + (__v16su) __B);
792 return (__m512i) __builtin_ia32_paddd512_mask ((__v16si) __A,
801 return (__m512i) __builtin_ia32_paddd512_mask ((__v16si) __A,
811 return (__m512i) ((__v16su) __A - (__v16su) __B);
817 return (__m512i) __builtin_ia32_psubd512_mask ((__v16si) __A,
826 return (__m512i) __builtin_ia32_psubd512_mask ((__v16si) __A,
833 #define _mm512_mask_max_round_pd(W, U, A, B, R) __extension__ ({ \
834 (__m512d)__builtin_ia32_maxpd512_mask((__v8df)(__m512d)(A), \
835 (__v8df)(__m512d)(B), \
836 (__v8df)(__m512d)(W), (__mmask8)(U), \
839 #define _mm512_maskz_max_round_pd(U, A, B, R) __extension__ ({ \
840 (__m512d)__builtin_ia32_maxpd512_mask((__v8df)(__m512d)(A), \
841 (__v8df)(__m512d)(B), \
842 (__v8df)_mm512_setzero_pd(), \
843 (__mmask8)(U), (int)(R)); })
845 #define _mm512_max_round_pd(A, B, R) __extension__ ({ \
846 (__m512d)__builtin_ia32_maxpd512_mask((__v8df)(__m512d)(A), \
847 (__v8df)(__m512d)(B), \
848 (__v8df)_mm512_undefined_pd(), \
849 (__mmask8)-1, (int)(R)); })
854 return (__m512d) __builtin_ia32_maxpd512_mask ((__v8df) __A,
865 return (__m512d) __builtin_ia32_maxpd512_mask ((__v8df) __A,
875 return (__m512d) __builtin_ia32_maxpd512_mask ((__v8df) __A,
883 #define _mm512_mask_max_round_ps(W, U, A, B, R) __extension__ ({ \
884 (__m512)__builtin_ia32_maxps512_mask((__v16sf)(__m512)(A), \
885 (__v16sf)(__m512)(B), \
886 (__v16sf)(__m512)(W), (__mmask16)(U), \
889 #define _mm512_maskz_max_round_ps(U, A, B, R) __extension__ ({ \
890 (__m512)__builtin_ia32_maxps512_mask((__v16sf)(__m512)(A), \
891 (__v16sf)(__m512)(B), \
892 (__v16sf)_mm512_setzero_ps(), \
893 (__mmask16)(U), (int)(R)); })
895 #define _mm512_max_round_ps(A, B, R) __extension__ ({ \
896 (__m512)__builtin_ia32_maxps512_mask((__v16sf)(__m512)(A), \
897 (__v16sf)(__m512)(B), \
898 (__v16sf)_mm512_undefined_ps(), \
899 (__mmask16)-1, (int)(R)); })
904 return (__m512) __builtin_ia32_maxps512_mask ((__v16sf) __A,
915 return (__m512) __builtin_ia32_maxps512_mask ((__v16sf) __A,
925 return (__m512) __builtin_ia32_maxps512_mask ((__v16sf) __A,
935 return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A,
944 return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A,
951 #define _mm_max_round_ss(A, B, R) __extension__ ({ \
952 (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
953 (__v4sf)(__m128)(B), \
954 (__v4sf)_mm_setzero_ps(), \
955 (__mmask8)-1, (int)(R)); })
957 #define _mm_mask_max_round_ss(W, U, A, B, R) __extension__ ({ \
958 (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
959 (__v4sf)(__m128)(B), \
960 (__v4sf)(__m128)(W), (__mmask8)(U), \
963 #define _mm_maskz_max_round_ss(U, A, B, R) __extension__ ({ \
964 (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
965 (__v4sf)(__m128)(B), \
966 (__v4sf)_mm_setzero_ps(), \
967 (__mmask8)(U), (int)(R)); })
971 return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A,
980 return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A,
987 #define _mm_max_round_sd(A, B, R) __extension__ ({ \
988 (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
989 (__v2df)(__m128d)(B), \
990 (__v2df)_mm_setzero_pd(), \
991 (__mmask8)-1, (int)(R)); })
993 #define _mm_mask_max_round_sd(W, U, A, B, R) __extension__ ({ \
994 (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
995 (__v2df)(__m128d)(B), \
996 (__v2df)(__m128d)(W), \
997 (__mmask8)(U), (int)(R)); })
999 #define _mm_maskz_max_round_sd(U, A, B, R) __extension__ ({ \
1000 (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
1001 (__v2df)(__m128d)(B), \
1002 (__v2df)_mm_setzero_pd(), \
1003 (__mmask8)(U), (int)(R)); })
1005 static __inline __m512i
1009 return (__m512i) __builtin_ia32_pmaxsd512_mask ((__v16si) __A,
1019 return (__m512i) __builtin_ia32_pmaxsd512_mask ((__v16si) __A,
1021 (__v16si) __W, __M);
1027 return (__m512i) __builtin_ia32_pmaxsd512_mask ((__v16si) __A,
1037 return (__m512i) __builtin_ia32_pmaxud512_mask ((__v16si) __A,
1047 return (__m512i) __builtin_ia32_pmaxud512_mask ((__v16si) __A,
1049 (__v16si) __W, __M);
1055 return (__m512i) __builtin_ia32_pmaxud512_mask ((__v16si) __A,
1065 return (__m512i) __builtin_ia32_pmaxsq512_mask ((__v8di) __A,
1075 return (__m512i) __builtin_ia32_pmaxsq512_mask ((__v8di) __A,
1083 return (__m512i) __builtin_ia32_pmaxsq512_mask ((__v8di) __A,
1093 return (__m512i) __builtin_ia32_pmaxuq512_mask ((__v8di) __A,
1103 return (__m512i) __builtin_ia32_pmaxuq512_mask ((__v8di) __A,
1111 return (__m512i) __builtin_ia32_pmaxuq512_mask ((__v8di) __A,
1118 #define _mm512_mask_min_round_pd(W, U, A, B, R) __extension__ ({ \
1119 (__m512d)__builtin_ia32_minpd512_mask((__v8df)(__m512d)(A), \
1120 (__v8df)(__m512d)(B), \
1121 (__v8df)(__m512d)(W), (__mmask8)(U), \
1124 #define _mm512_maskz_min_round_pd(U, A, B, R) __extension__ ({ \
1125 (__m512d)__builtin_ia32_minpd512_mask((__v8df)(__m512d)(A), \
1126 (__v8df)(__m512d)(B), \
1127 (__v8df)_mm512_setzero_pd(), \
1128 (__mmask8)(U), (int)(R)); })
1130 #define _mm512_min_round_pd(A, B, R) __extension__ ({ \
1131 (__m512d)__builtin_ia32_minpd512_mask((__v8df)(__m512d)(A), \
1132 (__v8df)(__m512d)(B), \
1133 (__v8df)_mm512_undefined_pd(), \
1134 (__mmask8)-1, (int)(R)); })
1139 return (__m512d) __builtin_ia32_minpd512_mask ((__v8df) __A,
1150 return (__m512d) __builtin_ia32_minpd512_mask ((__v8df) __A,
1157 #define _mm512_mask_min_round_ps(W, U, A, B, R) __extension__ ({ \
1158 (__m512)__builtin_ia32_minps512_mask((__v16sf)(__m512)(A), \
1159 (__v16sf)(__m512)(B), \
1160 (__v16sf)(__m512)(W), (__mmask16)(U), \
1163 #define _mm512_maskz_min_round_ps(U, A, B, R) __extension__ ({ \
1164 (__m512)__builtin_ia32_minps512_mask((__v16sf)(__m512)(A), \
1165 (__v16sf)(__m512)(B), \
1166 (__v16sf)_mm512_setzero_ps(), \
1167 (__mmask16)(U), (int)(R)); })
1169 #define _mm512_min_round_ps(A, B, R) __extension__ ({ \
1170 (__m512)__builtin_ia32_minps512_mask((__v16sf)(__m512)(A), \
1171 (__v16sf)(__m512)(B), \
1172 (__v16sf)_mm512_undefined_ps(), \
1173 (__mmask16)-1, (int)(R)); })
1178 return (__m512d) __builtin_ia32_minpd512_mask ((__v8df) __A,
1189 return (__m512) __builtin_ia32_minps512_mask ((__v16sf) __A,
1200 return (__m512) __builtin_ia32_minps512_mask ((__v16sf) __A,
1210 return (__m512) __builtin_ia32_minps512_mask ((__v16sf) __A,
1220 return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A,
1229 return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A,
1236 #define _mm_min_round_ss(A, B, R) __extension__ ({ \
1237 (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
1238 (__v4sf)(__m128)(B), \
1239 (__v4sf)_mm_setzero_ps(), \
1240 (__mmask8)-1, (int)(R)); })
1242 #define _mm_mask_min_round_ss(W, U, A, B, R) __extension__ ({ \
1243 (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
1244 (__v4sf)(__m128)(B), \
1245 (__v4sf)(__m128)(W), (__mmask8)(U), \
1248 #define _mm_maskz_min_round_ss(U, A, B, R) __extension__ ({ \
1249 (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
1250 (__v4sf)(__m128)(B), \
1251 (__v4sf)_mm_setzero_ps(), \
1252 (__mmask8)(U), (int)(R)); })
1256 return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A,
1265 return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A,
1272 #define _mm_min_round_sd(A, B, R) __extension__ ({ \
1273 (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
1274 (__v2df)(__m128d)(B), \
1275 (__v2df)_mm_setzero_pd(), \
1276 (__mmask8)-1, (int)(R)); })
1278 #define _mm_mask_min_round_sd(W, U, A, B, R) __extension__ ({ \
1279 (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
1280 (__v2df)(__m128d)(B), \
1281 (__v2df)(__m128d)(W), \
1282 (__mmask8)(U), (int)(R)); })
1284 #define _mm_maskz_min_round_sd(U, A, B, R) __extension__ ({ \
1285 (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
1286 (__v2df)(__m128d)(B), \
1287 (__v2df)_mm_setzero_pd(), \
1288 (__mmask8)(U), (int)(R)); })
1290 static __inline __m512i
1294 return (__m512i) __builtin_ia32_pminsd512_mask ((__v16si) __A,
1304 return (__m512i) __builtin_ia32_pminsd512_mask ((__v16si) __A,
1306 (__v16si) __W, __M);
1312 return (__m512i) __builtin_ia32_pminsd512_mask ((__v16si) __A,
1322 return (__m512i) __builtin_ia32_pminud512_mask ((__v16si) __A,
1332 return (__m512i) __builtin_ia32_pminud512_mask ((__v16si) __A,
1334 (__v16si) __W, __M);
1340 return (__m512i) __builtin_ia32_pminud512_mask ((__v16si) __A,
1350 return (__m512i) __builtin_ia32_pminsq512_mask ((__v8di) __A,
1360 return (__m512i) __builtin_ia32_pminsq512_mask ((__v8di) __A,
1368 return (__m512i) __builtin_ia32_pminsq512_mask ((__v8di) __A,
1378 return (__m512i) __builtin_ia32_pminuq512_mask ((__v8di) __A,
1388 return (__m512i) __builtin_ia32_pminuq512_mask ((__v8di) __A,
1396 return (__m512i) __builtin_ia32_pminuq512_mask ((__v8di) __A,
1406 return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X,
1416 return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X,
1424 return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X,
1434 return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X,
1444 return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X,
1452 return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X,
1462 return (__m512i) ((__v16su) __A * (__v16su) __B);
1468 return (__m512i) __builtin_ia32_pmulld512_mask ((__v16si) __A,
1478 return (__m512i) __builtin_ia32_pmulld512_mask ((__v16si) __A,
1480 (__v16si) __W, __M);
1483 #define _mm512_mask_sqrt_round_pd(W, U, A, R) __extension__ ({ \
1484 (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)(__m512d)(A), \
1485 (__v8df)(__m512d)(W), (__mmask8)(U), \
1488 #define _mm512_maskz_sqrt_round_pd(U, A, R) __extension__ ({ \
1489 (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)(__m512d)(A), \
1490 (__v8df)_mm512_setzero_pd(), \
1491 (__mmask8)(U), (int)(R)); })
1493 #define _mm512_sqrt_round_pd(A, R) __extension__ ({ \
1494 (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)(__m512d)(A), \
1495 (__v8df)_mm512_undefined_pd(), \
1496 (__mmask8)-1, (int)(R)); })
1501 return (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)__a,
1510 return (__m512d) __builtin_ia32_sqrtpd512_mask ((__v8df) __A,
1519 return (__m512d) __builtin_ia32_sqrtpd512_mask ((__v8df) __A,
1526 #define _mm512_mask_sqrt_round_ps(W, U, A, R) __extension__ ({ \
1527 (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)(__m512)(A), \
1528 (__v16sf)(__m512)(W), (__mmask16)(U), \
1531 #define _mm512_maskz_sqrt_round_ps(U, A, R) __extension__ ({ \
1532 (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)(__m512)(A), \
1533 (__v16sf)_mm512_setzero_ps(), \
1534 (__mmask16)(U), (int)(R)); })
1536 #define _mm512_sqrt_round_ps(A, R) __extension__ ({ \
1537 (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)(__m512)(A), \
1538 (__v16sf)_mm512_undefined_ps(), \
1539 (__mmask16)-1, (int)(R)); })
1544 return (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)__a,
1553 return (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)__A,
1562 return (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)__A,
1571 return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
1579 return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
1587 return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
1596 return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
1605 return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
1613 return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
1622 return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
1632 return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
1641 return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
1650 return (__m128d) __builtin_ia32_rsqrt14sd_mask ((__v2df) __A,
1660 return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A,
1669 return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A,
1678 return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
1687 return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
1695 return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
1704 return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
1713 return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
1721 return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
1730 return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
1740 return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
1749 return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
1758 return (__m128d) __builtin_ia32_rcp14sd_mask ((__v2df) __A,
1768 return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A,
1777 return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A,
1786 return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
1795 return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
1804 return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
1813 return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
1822 return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
1831 return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
1840 return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
1849 return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
1858 return (__m512i) __builtin_ia32_pabsq512_mask ((__v8di) __A,
1867 return (__m512i) __builtin_ia32_pabsq512_mask ((__v8di) __A,
1875 return (__m512i) __builtin_ia32_pabsq512_mask ((__v8di) __A,
1884 return (__m512i) __builtin_ia32_pabsd512_mask ((__v16si) __A,
1893 return (__m512i) __builtin_ia32_pabsd512_mask ((__v16si) __A,
1901 return (__m512i) __builtin_ia32_pabsd512_mask ((__v16si) __A,
1909 return (__m128) __builtin_ia32_addss_round_mask ((__v4sf) __A,
1918 return (__m128) __builtin_ia32_addss_round_mask ((__v4sf) __A,
1925 #define _mm_add_round_ss(A, B, R) __extension__ ({ \
1926 (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
1927 (__v4sf)(__m128)(B), \
1928 (__v4sf)_mm_setzero_ps(), \
1929 (__mmask8)-1, (int)(R)); })
1931 #define _mm_mask_add_round_ss(W, U, A, B, R) __extension__ ({ \
1932 (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
1933 (__v4sf)(__m128)(B), \
1934 (__v4sf)(__m128)(W), (__mmask8)(U), \
1937 #define _mm_maskz_add_round_ss(U, A, B, R) __extension__ ({ \
1938 (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
1939 (__v4sf)(__m128)(B), \
1940 (__v4sf)_mm_setzero_ps(), \
1941 (__mmask8)(U), (int)(R)); })
1945 return (__m128d) __builtin_ia32_addsd_round_mask ((__v2df) __A,
1954 return (__m128d) __builtin_ia32_addsd_round_mask ((__v2df) __A,
1960 #define _mm_add_round_sd(A, B, R) __extension__ ({ \
1961 (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
1962 (__v2df)(__m128d)(B), \
1963 (__v2df)_mm_setzero_pd(), \
1964 (__mmask8)-1, (int)(R)); })
1966 #define _mm_mask_add_round_sd(W, U, A, B, R) __extension__ ({ \
1967 (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
1968 (__v2df)(__m128d)(B), \
1969 (__v2df)(__m128d)(W), \
1970 (__mmask8)(U), (int)(R)); })
1972 #define _mm_maskz_add_round_sd(U, A, B, R) __extension__ ({ \
1973 (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
1974 (__v2df)(__m128d)(B), \
1975 (__v2df)_mm_setzero_pd(), \
1976 (__mmask8)(U), (int)(R)); })
1980 return (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A,
1989 return (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A,
1998 return (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A,
2007 return (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A,
2014 #define _mm512_add_round_pd(A, B, R) __extension__ ({ \
2015 (__m512d)__builtin_ia32_addpd512_mask((__v8df)(__m512d)(A), \
2016 (__v8df)(__m512d)(B), \
2017 (__v8df)_mm512_setzero_pd(), \
2018 (__mmask8)-1, (int)(R)); })
2020 #define _mm512_mask_add_round_pd(W, U, A, B, R) __extension__ ({ \
2021 (__m512d)__builtin_ia32_addpd512_mask((__v8df)(__m512d)(A), \
2022 (__v8df)(__m512d)(B), \
2023 (__v8df)(__m512d)(W), (__mmask8)(U), \
2026 #define _mm512_maskz_add_round_pd(U, A, B, R) __extension__ ({ \
2027 (__m512d)__builtin_ia32_addpd512_mask((__v8df)(__m512d)(A), \
2028 (__v8df)(__m512d)(B), \
2029 (__v8df)_mm512_setzero_pd(), \
2030 (__mmask8)(U), (int)(R)); })
2032 #define _mm512_add_round_ps(A, B, R) __extension__ ({ \
2033 (__m512)__builtin_ia32_addps512_mask((__v16sf)(__m512)(A), \
2034 (__v16sf)(__m512)(B), \
2035 (__v16sf)_mm512_setzero_ps(), \
2036 (__mmask16)-1, (int)(R)); })
2038 #define _mm512_mask_add_round_ps(W, U, A, B, R) __extension__ ({ \
2039 (__m512)__builtin_ia32_addps512_mask((__v16sf)(__m512)(A), \
2040 (__v16sf)(__m512)(B), \
2041 (__v16sf)(__m512)(W), (__mmask16)(U), \
2044 #define _mm512_maskz_add_round_ps(U, A, B, R) __extension__ ({ \
2045 (__m512)__builtin_ia32_addps512_mask((__v16sf)(__m512)(A), \
2046 (__v16sf)(__m512)(B), \
2047 (__v16sf)_mm512_setzero_ps(), \
2048 (__mmask16)(U), (int)(R)); })
2052 return (__m128) __builtin_ia32_subss_round_mask ((__v4sf) __A,
2061 return (__m128) __builtin_ia32_subss_round_mask ((__v4sf) __A,
2067 #define _mm_sub_round_ss(A, B, R) __extension__ ({ \
2068 (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
2069 (__v4sf)(__m128)(B), \
2070 (__v4sf)_mm_setzero_ps(), \
2071 (__mmask8)-1, (int)(R)); })
2073 #define _mm_mask_sub_round_ss(W, U, A, B, R) __extension__ ({ \
2074 (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
2075 (__v4sf)(__m128)(B), \
2076 (__v4sf)(__m128)(W), (__mmask8)(U), \
2079 #define _mm_maskz_sub_round_ss(U, A, B, R) __extension__ ({ \
2080 (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
2081 (__v4sf)(__m128)(B), \
2082 (__v4sf)_mm_setzero_ps(), \
2083 (__mmask8)(U), (int)(R)); })
2087 return (__m128d) __builtin_ia32_subsd_round_mask ((__v2df) __A,
2096 return (__m128d) __builtin_ia32_subsd_round_mask ((__v2df) __A,
2103 #define _mm_sub_round_sd(A, B, R) __extension__ ({ \
2104 (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
2105 (__v2df)(__m128d)(B), \
2106 (__v2df)_mm_setzero_pd(), \
2107 (__mmask8)-1, (int)(R)); })
2109 #define _mm_mask_sub_round_sd(W, U, A, B, R) __extension__ ({ \
2110 (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
2111 (__v2df)(__m128d)(B), \
2112 (__v2df)(__m128d)(W), \
2113 (__mmask8)(U), (int)(R)); })
2115 #define _mm_maskz_sub_round_sd(U, A, B, R) __extension__ ({ \
2116 (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
2117 (__v2df)(__m128d)(B), \
2118 (__v2df)_mm_setzero_pd(), \
2119 (__mmask8)(U), (int)(R)); })
2123 return (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A,
2132 return (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A,
2142 return (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A,
2151 return (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A,
2159 #define _mm512_sub_round_pd(A, B, R) __extension__ ({ \
2160 (__m512d)__builtin_ia32_subpd512_mask((__v8df)(__m512d)(A), \
2161 (__v8df)(__m512d)(B), \
2162 (__v8df)_mm512_setzero_pd(), \
2163 (__mmask8)-1, (int)(R)); })
2165 #define _mm512_mask_sub_round_pd(W, U, A, B, R) __extension__ ({ \
2166 (__m512d)__builtin_ia32_subpd512_mask((__v8df)(__m512d)(A), \
2167 (__v8df)(__m512d)(B), \
2168 (__v8df)(__m512d)(W), (__mmask8)(U), \
2171 #define _mm512_maskz_sub_round_pd(U, A, B, R) __extension__ ({ \
2172 (__m512d)__builtin_ia32_subpd512_mask((__v8df)(__m512d)(A), \
2173 (__v8df)(__m512d)(B), \
2174 (__v8df)_mm512_setzero_pd(), \
2175 (__mmask8)(U), (int)(R)); })
2177 #define _mm512_sub_round_ps(A, B, R) __extension__ ({ \
2178 (__m512)__builtin_ia32_subps512_mask((__v16sf)(__m512)(A), \
2179 (__v16sf)(__m512)(B), \
2180 (__v16sf)_mm512_setzero_ps(), \
2181 (__mmask16)-1, (int)(R)); })
2183 #define _mm512_mask_sub_round_ps(W, U, A, B, R) __extension__ ({ \
2184 (__m512)__builtin_ia32_subps512_mask((__v16sf)(__m512)(A), \
2185 (__v16sf)(__m512)(B), \
2186 (__v16sf)(__m512)(W), (__mmask16)(U), \
2189 #define _mm512_maskz_sub_round_ps(U, A, B, R) __extension__ ({ \
2190 (__m512)__builtin_ia32_subps512_mask((__v16sf)(__m512)(A), \
2191 (__v16sf)(__m512)(B), \
2192 (__v16sf)_mm512_setzero_ps(), \
2193 (__mmask16)(U), (int)(R)); });
2197 return (__m128) __builtin_ia32_mulss_round_mask ((__v4sf) __A,
2206 return (__m128) __builtin_ia32_mulss_round_mask ((__v4sf) __A,
2212 #define _mm_mul_round_ss(A, B, R) __extension__ ({ \
2213 (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
2214 (__v4sf)(__m128)(B), \
2215 (__v4sf)_mm_setzero_ps(), \
2216 (__mmask8)-1, (int)(R)); })
2218 #define _mm_mask_mul_round_ss(W, U, A, B, R) __extension__ ({ \
2219 (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
2220 (__v4sf)(__m128)(B), \
2221 (__v4sf)(__m128)(W), (__mmask8)(U), \
2224 #define _mm_maskz_mul_round_ss(U, A, B, R) __extension__ ({ \
2225 (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
2226 (__v4sf)(__m128)(B), \
2227 (__v4sf)_mm_setzero_ps(), \
2228 (__mmask8)(U), (int)(R)); })
2232 return (__m128d) __builtin_ia32_mulsd_round_mask ((__v2df) __A,
2241 return (__m128d) __builtin_ia32_mulsd_round_mask ((__v2df) __A,
2248 #define _mm_mul_round_sd(A, B, R) __extension__ ({ \
2249 (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
2250 (__v2df)(__m128d)(B), \
2251 (__v2df)_mm_setzero_pd(), \
2252 (__mmask8)-1, (int)(R)); })
2254 #define _mm_mask_mul_round_sd(W, U, A, B, R) __extension__ ({ \
2255 (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
2256 (__v2df)(__m128d)(B), \
2257 (__v2df)(__m128d)(W), \
2258 (__mmask8)(U), (int)(R)); })
2260 #define _mm_maskz_mul_round_sd(U, A, B, R) __extension__ ({ \
2261 (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
2262 (__v2df)(__m128d)(B), \
2263 (__v2df)_mm_setzero_pd(), \
2264 (__mmask8)(U), (int)(R)); })
2268 return (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A,
2277 return (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A,
2287 return (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A,
2296 return (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A,
2304 #define _mm512_mul_round_pd(A, B, R) __extension__ ({ \
2305 (__m512d)__builtin_ia32_mulpd512_mask((__v8df)(__m512d)(A), \
2306 (__v8df)(__m512d)(B), \
2307 (__v8df)_mm512_setzero_pd(), \
2308 (__mmask8)-1, (int)(R)); })
2310 #define _mm512_mask_mul_round_pd(W, U, A, B, R) __extension__ ({ \
2311 (__m512d)__builtin_ia32_mulpd512_mask((__v8df)(__m512d)(A), \
2312 (__v8df)(__m512d)(B), \
2313 (__v8df)(__m512d)(W), (__mmask8)(U), \
2316 #define _mm512_maskz_mul_round_pd(U, A, B, R) __extension__ ({ \
2317 (__m512d)__builtin_ia32_mulpd512_mask((__v8df)(__m512d)(A), \
2318 (__v8df)(__m512d)(B), \
2319 (__v8df)_mm512_setzero_pd(), \
2320 (__mmask8)(U), (int)(R)); })
2322 #define _mm512_mul_round_ps(A, B, R) __extension__ ({ \
2323 (__m512)__builtin_ia32_mulps512_mask((__v16sf)(__m512)(A), \
2324 (__v16sf)(__m512)(B), \
2325 (__v16sf)_mm512_setzero_ps(), \
2326 (__mmask16)-1, (int)(R)); })
2328 #define _mm512_mask_mul_round_ps(W, U, A, B, R) __extension__ ({ \
2329 (__m512)__builtin_ia32_mulps512_mask((__v16sf)(__m512)(A), \
2330 (__v16sf)(__m512)(B), \
2331 (__v16sf)(__m512)(W), (__mmask16)(U), \
2334 #define _mm512_maskz_mul_round_ps(U, A, B, R) __extension__ ({ \
2335 (__m512)__builtin_ia32_mulps512_mask((__v16sf)(__m512)(A), \
2336 (__v16sf)(__m512)(B), \
2337 (__v16sf)_mm512_setzero_ps(), \
2338 (__mmask16)(U), (int)(R)); });
2342 return (__m128) __builtin_ia32_divss_round_mask ((__v4sf) __A,
2351 return (__m128) __builtin_ia32_divss_round_mask ((__v4sf) __A,
2358 #define _mm_div_round_ss(A, B, R) __extension__ ({ \
2359 (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
2360 (__v4sf)(__m128)(B), \
2361 (__v4sf)_mm_setzero_ps(), \
2362 (__mmask8)-1, (int)(R)); })
2364 #define _mm_mask_div_round_ss(W, U, A, B, R) __extension__ ({ \
2365 (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
2366 (__v4sf)(__m128)(B), \
2367 (__v4sf)(__m128)(W), (__mmask8)(U), \
2370 #define _mm_maskz_div_round_ss(U, A, B, R) __extension__ ({ \
2371 (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
2372 (__v4sf)(__m128)(B), \
2373 (__v4sf)_mm_setzero_ps(), \
2374 (__mmask8)(U), (int)(R)); })
2378 return (__m128d) __builtin_ia32_divsd_round_mask ((__v2df) __A,
2387 return (__m128d) __builtin_ia32_divsd_round_mask ((__v2df) __A,
2394 #define _mm_div_round_sd(A, B, R) __extension__ ({ \
2395 (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
2396 (__v2df)(__m128d)(B), \
2397 (__v2df)_mm_setzero_pd(), \
2398 (__mmask8)-1, (int)(R)); })
2400 #define _mm_mask_div_round_sd(W, U, A, B, R) __extension__ ({ \
2401 (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
2402 (__v2df)(__m128d)(B), \
2403 (__v2df)(__m128d)(W), \
2404 (__mmask8)(U), (int)(R)); })
2406 #define _mm_maskz_div_round_sd(U, A, B, R) __extension__ ({ \
2407 (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
2408 (__v2df)(__m128d)(B), \
2409 (__v2df)_mm_setzero_pd(), \
2410 (__mmask8)(U), (int)(R)); })
2415 return (__m512d)((__v8df)__a/(__v8df)
__b);
2420 return (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A,
2429 return (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A,
2440 return (__m512)((__v16sf)__a/(__v16sf)
__b);
2445 return (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A,
2454 return (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A,
2462 #define _mm512_div_round_pd(A, B, R) __extension__ ({ \
2463 (__m512d)__builtin_ia32_divpd512_mask((__v8df)(__m512d)(A), \
2464 (__v8df)(__m512d)(B), \
2465 (__v8df)_mm512_setzero_pd(), \
2466 (__mmask8)-1, (int)(R)); })
2468 #define _mm512_mask_div_round_pd(W, U, A, B, R) __extension__ ({ \
2469 (__m512d)__builtin_ia32_divpd512_mask((__v8df)(__m512d)(A), \
2470 (__v8df)(__m512d)(B), \
2471 (__v8df)(__m512d)(W), (__mmask8)(U), \
2474 #define _mm512_maskz_div_round_pd(U, A, B, R) __extension__ ({ \
2475 (__m512d)__builtin_ia32_divpd512_mask((__v8df)(__m512d)(A), \
2476 (__v8df)(__m512d)(B), \
2477 (__v8df)_mm512_setzero_pd(), \
2478 (__mmask8)(U), (int)(R)); })
2480 #define _mm512_div_round_ps(A, B, R) __extension__ ({ \
2481 (__m512)__builtin_ia32_divps512_mask((__v16sf)(__m512)(A), \
2482 (__v16sf)(__m512)(B), \
2483 (__v16sf)_mm512_setzero_ps(), \
2484 (__mmask16)-1, (int)(R)); })
2486 #define _mm512_mask_div_round_ps(W, U, A, B, R) __extension__ ({ \
2487 (__m512)__builtin_ia32_divps512_mask((__v16sf)(__m512)(A), \
2488 (__v16sf)(__m512)(B), \
2489 (__v16sf)(__m512)(W), (__mmask16)(U), \
2492 #define _mm512_maskz_div_round_ps(U, A, B, R) __extension__ ({ \
2493 (__m512)__builtin_ia32_divps512_mask((__v16sf)(__m512)(A), \
2494 (__v16sf)(__m512)(B), \
2495 (__v16sf)_mm512_setzero_ps(), \
2496 (__mmask16)(U), (int)(R)); });
2498 #define _mm512_roundscale_ps(A, B) __extension__ ({ \
2499 (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(B), \
2500 (__v16sf)(__m512)(A), (__mmask16)-1, \
2501 _MM_FROUND_CUR_DIRECTION); })
2503 #define _mm512_mask_roundscale_ps(A, B, C, imm) __extension__ ({\
2504 (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
2505 (__v16sf)(__m512)(A), (__mmask16)(B), \
2506 _MM_FROUND_CUR_DIRECTION); })
2508 #define _mm512_maskz_roundscale_ps(A, B, imm) __extension__ ({\
2509 (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
2510 (__v16sf)_mm512_setzero_ps(), \
2512 _MM_FROUND_CUR_DIRECTION); })
2514 #define _mm512_mask_roundscale_round_ps(A, B, C, imm, R) __extension__ ({ \
2515 (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
2516 (__v16sf)(__m512)(A), (__mmask16)(B), \
2519 #define _mm512_maskz_roundscale_round_ps(A, B, imm, R) __extension__ ({ \
2520 (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
2521 (__v16sf)_mm512_setzero_ps(), \
2522 (__mmask16)(A), (int)(R)); })
2524 #define _mm512_roundscale_round_ps(A, imm, R) __extension__ ({ \
2525 (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(imm), \
2526 (__v16sf)_mm512_undefined_ps(), \
2527 (__mmask16)-1, (int)(R)); })
2529 #define _mm512_roundscale_pd(A, B) __extension__ ({ \
2530 (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(B), \
2531 (__v8df)(__m512d)(A), (__mmask8)-1, \
2532 _MM_FROUND_CUR_DIRECTION); })
2534 #define _mm512_mask_roundscale_pd(A, B, C, imm) __extension__ ({\
2535 (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
2536 (__v8df)(__m512d)(A), (__mmask8)(B), \
2537 _MM_FROUND_CUR_DIRECTION); })
2539 #define _mm512_maskz_roundscale_pd(A, B, imm) __extension__ ({\
2540 (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
2541 (__v8df)_mm512_setzero_pd(), \
2543 _MM_FROUND_CUR_DIRECTION); })
2545 #define _mm512_mask_roundscale_round_pd(A, B, C, imm, R) __extension__ ({ \
2546 (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
2547 (__v8df)(__m512d)(A), (__mmask8)(B), \
2550 #define _mm512_maskz_roundscale_round_pd(A, B, imm, R) __extension__ ({ \
2551 (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
2552 (__v8df)_mm512_setzero_pd(), \
2553 (__mmask8)(A), (int)(R)); })
2555 #define _mm512_roundscale_round_pd(A, imm, R) __extension__ ({ \
2556 (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(imm), \
2557 (__v8df)_mm512_undefined_pd(), \
2558 (__mmask8)-1, (int)(R)); })
2560 #define _mm512_fmadd_round_pd(A, B, C, R) __extension__ ({ \
2561 (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
2562 (__v8df)(__m512d)(B), \
2563 (__v8df)(__m512d)(C), (__mmask8)-1, \
2567 #define _mm512_mask_fmadd_round_pd(A, U, B, C, R) __extension__ ({ \
2568 (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
2569 (__v8df)(__m512d)(B), \
2570 (__v8df)(__m512d)(C), \
2571 (__mmask8)(U), (int)(R)); })
2574 #define _mm512_mask3_fmadd_round_pd(A, B, C, U, R) __extension__ ({ \
2575 (__m512d)__builtin_ia32_vfmaddpd512_mask3((__v8df)(__m512d)(A), \
2576 (__v8df)(__m512d)(B), \
2577 (__v8df)(__m512d)(C), \
2578 (__mmask8)(U), (int)(R)); })
2581 #define _mm512_maskz_fmadd_round_pd(U, A, B, C, R) __extension__ ({ \
2582 (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
2583 (__v8df)(__m512d)(B), \
2584 (__v8df)(__m512d)(C), \
2585 (__mmask8)(U), (int)(R)); })
2588 #define _mm512_fmsub_round_pd(A, B, C, R) __extension__ ({ \
2589 (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
2590 (__v8df)(__m512d)(B), \
2591 -(__v8df)(__m512d)(C), \
2592 (__mmask8)-1, (int)(R)); })
2595 #define _mm512_mask_fmsub_round_pd(A, U, B, C, R) __extension__ ({ \
2596 (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
2597 (__v8df)(__m512d)(B), \
2598 -(__v8df)(__m512d)(C), \
2599 (__mmask8)(U), (int)(R)); })
2602 #define _mm512_maskz_fmsub_round_pd(U, A, B, C, R) __extension__ ({ \
2603 (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
2604 (__v8df)(__m512d)(B), \
2605 -(__v8df)(__m512d)(C), \
2606 (__mmask8)(U), (int)(R)); })
2609 #define _mm512_fnmadd_round_pd(A, B, C, R) __extension__ ({ \
2610 (__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
2611 (__v8df)(__m512d)(B), \
2612 (__v8df)(__m512d)(C), (__mmask8)-1, \
2616 #define _mm512_mask3_fnmadd_round_pd(A, B, C, U, R) __extension__ ({ \
2617 (__m512d)__builtin_ia32_vfmaddpd512_mask3(-(__v8df)(__m512d)(A), \
2618 (__v8df)(__m512d)(B), \
2619 (__v8df)(__m512d)(C), \
2620 (__mmask8)(U), (int)(R)); })
2623 #define _mm512_maskz_fnmadd_round_pd(U, A, B, C, R) __extension__ ({ \
2624 (__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
2625 (__v8df)(__m512d)(B), \
2626 (__v8df)(__m512d)(C), \
2627 (__mmask8)(U), (int)(R)); })
2630 #define _mm512_fnmsub_round_pd(A, B, C, R) __extension__ ({ \
2631 (__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
2632 (__v8df)(__m512d)(B), \
2633 -(__v8df)(__m512d)(C), \
2634 (__mmask8)-1, (int)(R)); })
2637 #define _mm512_maskz_fnmsub_round_pd(U, A, B, C, R) __extension__ ({ \
2638 (__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
2639 (__v8df)(__m512d)(B), \
2640 -(__v8df)(__m512d)(C), \
2641 (__mmask8)(U), (int)(R)); })
2647 return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
2657 return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
2667 return (__m512d) __builtin_ia32_vfmaddpd512_mask3 ((__v8df) __A,
2677 return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A,
2687 return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
2697 return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
2707 return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A,
2717 return (__m512d) __builtin_ia32_vfmaddpd512_mask (-(__v8df) __A,
2727 return (__m512d) __builtin_ia32_vfmaddpd512_mask3 (-(__v8df) __A,
2737 return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A,
2747 return (__m512d) __builtin_ia32_vfmaddpd512_mask (-(__v8df) __A,
2757 return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A,
2764 #define _mm512_fmadd_round_ps(A, B, C, R) __extension__ ({ \
2765 (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
2766 (__v16sf)(__m512)(B), \
2767 (__v16sf)(__m512)(C), (__mmask16)-1, \
2771 #define _mm512_mask_fmadd_round_ps(A, U, B, C, R) __extension__ ({ \
2772 (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
2773 (__v16sf)(__m512)(B), \
2774 (__v16sf)(__m512)(C), \
2775 (__mmask16)(U), (int)(R)); })
2778 #define _mm512_mask3_fmadd_round_ps(A, B, C, U, R) __extension__ ({ \
2779 (__m512)__builtin_ia32_vfmaddps512_mask3((__v16sf)(__m512)(A), \
2780 (__v16sf)(__m512)(B), \
2781 (__v16sf)(__m512)(C), \
2782 (__mmask16)(U), (int)(R)); })
2785 #define _mm512_maskz_fmadd_round_ps(U, A, B, C, R) __extension__ ({ \
2786 (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
2787 (__v16sf)(__m512)(B), \
2788 (__v16sf)(__m512)(C), \
2789 (__mmask16)(U), (int)(R)); })
2792 #define _mm512_fmsub_round_ps(A, B, C, R) __extension__ ({ \
2793 (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
2794 (__v16sf)(__m512)(B), \
2795 -(__v16sf)(__m512)(C), \
2796 (__mmask16)-1, (int)(R)); })
2799 #define _mm512_mask_fmsub_round_ps(A, U, B, C, R) __extension__ ({ \
2800 (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
2801 (__v16sf)(__m512)(B), \
2802 -(__v16sf)(__m512)(C), \
2803 (__mmask16)(U), (int)(R)); })
2806 #define _mm512_maskz_fmsub_round_ps(U, A, B, C, R) __extension__ ({ \
2807 (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
2808 (__v16sf)(__m512)(B), \
2809 -(__v16sf)(__m512)(C), \
2810 (__mmask16)(U), (int)(R)); })
2813 #define _mm512_fnmadd_round_ps(A, B, C, R) __extension__ ({ \
2814 (__m512)__builtin_ia32_vfmaddps512_mask(-(__v16sf)(__m512)(A), \
2815 (__v16sf)(__m512)(B), \
2816 (__v16sf)(__m512)(C), (__mmask16)-1, \
2820 #define _mm512_mask3_fnmadd_round_ps(A, B, C, U, R) __extension__ ({ \
2821 (__m512)__builtin_ia32_vfmaddps512_mask3(-(__v16sf)(__m512)(A), \
2822 (__v16sf)(__m512)(B), \
2823 (__v16sf)(__m512)(C), \
2824 (__mmask16)(U), (int)(R)); })
2827 #define _mm512_maskz_fnmadd_round_ps(U, A, B, C, R) __extension__ ({ \
2828 (__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
2829 (__v16sf)(__m512)(B), \
2830 (__v16sf)(__m512)(C), \
2831 (__mmask16)(U), (int)(R)); })
2834 #define _mm512_fnmsub_round_ps(A, B, C, R) __extension__ ({ \
2835 (__m512)__builtin_ia32_vfmaddps512_mask(-(__v16sf)(__m512)(A), \
2836 (__v16sf)(__m512)(B), \
2837 -(__v16sf)(__m512)(C), \
2838 (__mmask16)-1, (int)(R)); })
2841 #define _mm512_maskz_fnmsub_round_ps(U, A, B, C, R) __extension__ ({ \
2842 (__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
2843 (__v16sf)(__m512)(B), \
2844 -(__v16sf)(__m512)(C), \
2845 (__mmask16)(U), (int)(R)); })
2851 return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
2861 return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
2871 return (__m512) __builtin_ia32_vfmaddps512_mask3 ((__v16sf) __A,
2881 return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A,
2891 return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
2901 return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
2911 return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A,
2921 return (__m512) __builtin_ia32_vfmaddps512_mask (-(__v16sf) __A,
2931 return (__m512) __builtin_ia32_vfmaddps512_mask3 (-(__v16sf) __A,
2941 return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A,
2951 return (__m512) __builtin_ia32_vfmaddps512_mask (-(__v16sf) __A,
2961 return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A,
2968 #define _mm512_fmaddsub_round_pd(A, B, C, R) __extension__ ({ \
2969 (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
2970 (__v8df)(__m512d)(B), \
2971 (__v8df)(__m512d)(C), \
2972 (__mmask8)-1, (int)(R)); })
2975 #define _mm512_mask_fmaddsub_round_pd(A, U, B, C, R) __extension__ ({ \
2976 (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
2977 (__v8df)(__m512d)(B), \
2978 (__v8df)(__m512d)(C), \
2979 (__mmask8)(U), (int)(R)); })
2982 #define _mm512_mask3_fmaddsub_round_pd(A, B, C, U, R) __extension__ ({ \
2983 (__m512d)__builtin_ia32_vfmaddsubpd512_mask3((__v8df)(__m512d)(A), \
2984 (__v8df)(__m512d)(B), \
2985 (__v8df)(__m512d)(C), \
2986 (__mmask8)(U), (int)(R)); })
2989 #define _mm512_maskz_fmaddsub_round_pd(U, A, B, C, R) __extension__ ({ \
2990 (__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
2991 (__v8df)(__m512d)(B), \
2992 (__v8df)(__m512d)(C), \
2993 (__mmask8)(U), (int)(R)); })
2996 #define _mm512_fmsubadd_round_pd(A, B, C, R) __extension__ ({ \
2997 (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
2998 (__v8df)(__m512d)(B), \
2999 -(__v8df)(__m512d)(C), \
3000 (__mmask8)-1, (int)(R)); })
3003 #define _mm512_mask_fmsubadd_round_pd(A, U, B, C, R) __extension__ ({ \
3004 (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
3005 (__v8df)(__m512d)(B), \
3006 -(__v8df)(__m512d)(C), \
3007 (__mmask8)(U), (int)(R)); })
3010 #define _mm512_maskz_fmsubadd_round_pd(U, A, B, C, R) __extension__ ({ \
3011 (__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
3012 (__v8df)(__m512d)(B), \
3013 -(__v8df)(__m512d)(C), \
3014 (__mmask8)(U), (int)(R)); })
3020 return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
3030 return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
3040 return (__m512d) __builtin_ia32_vfmaddsubpd512_mask3 ((__v8df) __A,
3050 return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A,
3060 return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
3070 return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
3080 return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A,
3087 #define _mm512_fmaddsub_round_ps(A, B, C, R) __extension__ ({ \
3088 (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
3089 (__v16sf)(__m512)(B), \
3090 (__v16sf)(__m512)(C), \
3091 (__mmask16)-1, (int)(R)); })
3094 #define _mm512_mask_fmaddsub_round_ps(A, U, B, C, R) __extension__ ({ \
3095 (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
3096 (__v16sf)(__m512)(B), \
3097 (__v16sf)(__m512)(C), \
3098 (__mmask16)(U), (int)(R)); })
3101 #define _mm512_mask3_fmaddsub_round_ps(A, B, C, U, R) __extension__ ({ \
3102 (__m512)__builtin_ia32_vfmaddsubps512_mask3((__v16sf)(__m512)(A), \
3103 (__v16sf)(__m512)(B), \
3104 (__v16sf)(__m512)(C), \
3105 (__mmask16)(U), (int)(R)); })
3108 #define _mm512_maskz_fmaddsub_round_ps(U, A, B, C, R) __extension__ ({ \
3109 (__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
3110 (__v16sf)(__m512)(B), \
3111 (__v16sf)(__m512)(C), \
3112 (__mmask16)(U), (int)(R)); })
3115 #define _mm512_fmsubadd_round_ps(A, B, C, R) __extension__ ({ \
3116 (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
3117 (__v16sf)(__m512)(B), \
3118 -(__v16sf)(__m512)(C), \
3119 (__mmask16)-1, (int)(R)); })
3122 #define _mm512_mask_fmsubadd_round_ps(A, U, B, C, R) __extension__ ({ \
3123 (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
3124 (__v16sf)(__m512)(B), \
3125 -(__v16sf)(__m512)(C), \
3126 (__mmask16)(U), (int)(R)); })
3129 #define _mm512_maskz_fmsubadd_round_ps(U, A, B, C, R) __extension__ ({ \
3130 (__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
3131 (__v16sf)(__m512)(B), \
3132 -(__v16sf)(__m512)(C), \
3133 (__mmask16)(U), (int)(R)); })
3139 return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
3149 return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
3159 return (__m512) __builtin_ia32_vfmaddsubps512_mask3 ((__v16sf) __A,
3169 return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A,
3179 return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
3189 return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
3199 return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A,
3206 #define _mm512_mask3_fmsub_round_pd(A, B, C, U, R) __extension__ ({ \
3207 (__m512d)__builtin_ia32_vfmsubpd512_mask3((__v8df)(__m512d)(A), \
3208 (__v8df)(__m512d)(B), \
3209 (__v8df)(__m512d)(C), \
3210 (__mmask8)(U), (int)(R)); })
3216 return (__m512d) __builtin_ia32_vfmsubpd512_mask3 ((__v8df) __A,
3223 #define _mm512_mask3_fmsub_round_ps(A, B, C, U, R) __extension__ ({ \
3224 (__m512)__builtin_ia32_vfmsubps512_mask3((__v16sf)(__m512)(A), \
3225 (__v16sf)(__m512)(B), \
3226 (__v16sf)(__m512)(C), \
3227 (__mmask16)(U), (int)(R)); })
3233 return (__m512) __builtin_ia32_vfmsubps512_mask3 ((__v16sf) __A,
3240 #define _mm512_mask3_fmsubadd_round_pd(A, B, C, U, R) __extension__ ({ \
3241 (__m512d)__builtin_ia32_vfmsubaddpd512_mask3((__v8df)(__m512d)(A), \
3242 (__v8df)(__m512d)(B), \
3243 (__v8df)(__m512d)(C), \
3244 (__mmask8)(U), (int)(R)); })
3250 return (__m512d) __builtin_ia32_vfmsubaddpd512_mask3 ((__v8df) __A,
3257 #define _mm512_mask3_fmsubadd_round_ps(A, B, C, U, R) __extension__ ({ \
3258 (__m512)__builtin_ia32_vfmsubaddps512_mask3((__v16sf)(__m512)(A), \
3259 (__v16sf)(__m512)(B), \
3260 (__v16sf)(__m512)(C), \
3261 (__mmask16)(U), (int)(R)); })
3267 return (__m512) __builtin_ia32_vfmsubaddps512_mask3 ((__v16sf) __A,
3274 #define _mm512_mask_fnmadd_round_pd(A, U, B, C, R) __extension__ ({ \
3275 (__m512d)__builtin_ia32_vfnmaddpd512_mask((__v8df)(__m512d)(A), \
3276 (__v8df)(__m512d)(B), \
3277 (__v8df)(__m512d)(C), \
3278 (__mmask8)(U), (int)(R)); })
3284 return (__m512d) __builtin_ia32_vfnmaddpd512_mask ((__v8df) __A,
3291 #define _mm512_mask_fnmadd_round_ps(A, U, B, C, R) __extension__ ({ \
3292 (__m512)__builtin_ia32_vfnmaddps512_mask((__v16sf)(__m512)(A), \
3293 (__v16sf)(__m512)(B), \
3294 (__v16sf)(__m512)(C), \
3295 (__mmask16)(U), (int)(R)); })
3301 return (__m512) __builtin_ia32_vfnmaddps512_mask ((__v16sf) __A,
3308 #define _mm512_mask_fnmsub_round_pd(A, U, B, C, R) __extension__ ({ \
3309 (__m512d)__builtin_ia32_vfnmsubpd512_mask((__v8df)(__m512d)(A), \
3310 (__v8df)(__m512d)(B), \
3311 (__v8df)(__m512d)(C), \
3312 (__mmask8)(U), (int)(R)); })
3315 #define _mm512_mask3_fnmsub_round_pd(A, B, C, U, R) __extension__ ({ \
3316 (__m512d)__builtin_ia32_vfnmsubpd512_mask3((__v8df)(__m512d)(A), \
3317 (__v8df)(__m512d)(B), \
3318 (__v8df)(__m512d)(C), \
3319 (__mmask8)(U), (int)(R)); })
3325 return (__m512d) __builtin_ia32_vfnmsubpd512_mask ((__v8df) __A,
3335 return (__m512d) __builtin_ia32_vfnmsubpd512_mask3 ((__v8df) __A,
3342 #define _mm512_mask_fnmsub_round_ps(A, U, B, C, R) __extension__ ({ \
3343 (__m512)__builtin_ia32_vfnmsubps512_mask((__v16sf)(__m512)(A), \
3344 (__v16sf)(__m512)(B), \
3345 (__v16sf)(__m512)(C), \
3346 (__mmask16)(U), (int)(R)); })
3349 #define _mm512_mask3_fnmsub_round_ps(A, B, C, U, R) __extension__ ({ \
3350 (__m512)__builtin_ia32_vfnmsubps512_mask3((__v16sf)(__m512)(A), \
3351 (__v16sf)(__m512)(B), \
3352 (__v16sf)(__m512)(C), \
3353 (__mmask16)(U), (int)(R)); })
3359 return (__m512) __builtin_ia32_vfnmsubps512_mask ((__v16sf) __A,
3369 return (__m512) __builtin_ia32_vfnmsubps512_mask3 ((__v16sf) __A,
3383 return (__m512i) __builtin_ia32_vpermt2vard512_mask ((__v16si) __I
3392 __m512i __I, __m512i __B)
3394 return (__m512i) __builtin_ia32_vpermt2vard512_mask ((__v16si) __I
3403 __m512i __I, __m512i __B)
3405 return (__m512i) __builtin_ia32_vpermt2vard512_maskz ((__v16si) __I
3415 return (__m512i) __builtin_ia32_vpermt2varq512_mask ((__v8di) __I
3426 return (__m512i) __builtin_ia32_vpermt2varq512_mask ((__v8di) __I
3436 __m512i __I, __m512i __B)
3438 return (__m512i) __builtin_ia32_vpermt2varq512_maskz ((__v8di) __I
3445 #define _mm512_alignr_epi64(A, B, I) __extension__ ({ \
3446 (__m512i)__builtin_ia32_alignq512_mask((__v8di)(__m512i)(A), \
3447 (__v8di)(__m512i)(B), (int)(I), \
3448 (__v8di)_mm512_setzero_si512(), \
3451 #define _mm512_mask_alignr_epi64(W, U, A, B, imm) __extension__({\
3452 (__m512i)__builtin_ia32_alignq512_mask((__v8di)(__m512i)(A), \
3453 (__v8di)(__m512i)(B), (int)(imm), \
3454 (__v8di)(__m512i)(W), \
3457 #define _mm512_maskz_alignr_epi64(U, A, B, imm) __extension__({\
3458 (__m512i)__builtin_ia32_alignq512_mask((__v8di)(__m512i)(A), \
3459 (__v8di)(__m512i)(B), (int)(imm), \
3460 (__v8di)_mm512_setzero_si512(), \
3463 #define _mm512_alignr_epi32(A, B, I) __extension__ ({ \
3464 (__m512i)__builtin_ia32_alignd512_mask((__v16si)(__m512i)(A), \
3465 (__v16si)(__m512i)(B), (int)(I), \
3466 (__v16si)_mm512_setzero_si512(), \
3469 #define _mm512_mask_alignr_epi32(W, U, A, B, imm) __extension__ ({\
3470 (__m512i)__builtin_ia32_alignd512_mask((__v16si)(__m512i)(A), \
3471 (__v16si)(__m512i)(B), (int)(imm), \
3472 (__v16si)(__m512i)(W), \
3475 #define _mm512_maskz_alignr_epi32(U, A, B, imm) __extension__({\
3476 (__m512i)__builtin_ia32_alignd512_mask((__v16si)(__m512i)(A), \
3477 (__v16si)(__m512i)(B), (int)(imm), \
3478 (__v16si)_mm512_setzero_si512(), \
3482 #define _mm512_extractf64x4_pd(A, I) __extension__ ({ \
3483 (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
3484 (__v4df)_mm256_setzero_si256(), \
3487 #define _mm512_mask_extractf64x4_pd(W, U, A, imm) __extension__ ({\
3488 (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
3489 (__v4df)(__m256d)(W), \
3492 #define _mm512_maskz_extractf64x4_pd(U, A, imm) __extension__ ({\
3493 (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
3494 (__v4df)_mm256_setzero_pd(), \
3497 #define _mm512_extractf32x4_ps(A, I) __extension__ ({ \
3498 (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \
3499 (__v4sf)_mm_setzero_ps(), \
3502 #define _mm512_mask_extractf32x4_ps(W, U, A, imm) __extension__ ({\
3503 (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
3504 (__v4sf)(__m128)(W), \
3507 #define _mm512_maskz_extractf32x4_ps(U, A, imm) __extension__ ({\
3508 (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
3509 (__v4sf)_mm_setzero_ps(), \
3516 return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
3524 return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
3532 return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
3540 return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
3547 #define _mm512_cmp_round_ps_mask(A, B, P, R) __extension__ ({ \
3548 (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
3549 (__v16sf)(__m512)(B), (int)(P), \
3550 (__mmask16)-1, (int)(R)); })
3552 #define _mm512_mask_cmp_round_ps_mask(U, A, B, P, R) __extension__ ({ \
3553 (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
3554 (__v16sf)(__m512)(B), (int)(P), \
3555 (__mmask16)(U), (int)(R)); })
3557 #define _mm512_cmp_ps_mask(A, B, P) \
3558 _mm512_cmp_round_ps_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
3560 #define _mm512_mask_cmp_ps_mask(U, A, B, P) \
3561 _mm512_mask_cmp_round_ps_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION)
3563 #define _mm512_cmp_round_pd_mask(A, B, P, R) __extension__ ({ \
3564 (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
3565 (__v8df)(__m512d)(B), (int)(P), \
3566 (__mmask8)-1, (int)(R)); })
3568 #define _mm512_mask_cmp_round_pd_mask(U, A, B, P, R) __extension__ ({ \
3569 (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
3570 (__v8df)(__m512d)(B), (int)(P), \
3571 (__mmask8)(U), (int)(R)); })
3573 #define _mm512_cmp_pd_mask(A, B, P) \
3574 _mm512_cmp_round_pd_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
3576 #define _mm512_mask_cmp_pd_mask(U, A, B, P) \
3577 _mm512_mask_cmp_round_pd_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION)
3581 #define _mm512_cvtt_roundps_epu32(A, R) __extension__ ({ \
3582 (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
3583 (__v16si)_mm512_undefined_epi32(), \
3584 (__mmask16)-1, (int)(R)); })
3586 #define _mm512_mask_cvtt_roundps_epu32(W, U, A, R) __extension__ ({ \
3587 (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
3588 (__v16si)(__m512i)(W), \
3589 (__mmask16)(U), (int)(R)); })
3591 #define _mm512_maskz_cvtt_roundps_epu32(U, A, R) __extension__ ({ \
3592 (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
3593 (__v16si)_mm512_setzero_si512(), \
3594 (__mmask16)(U), (int)(R)); })
3600 return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
3610 return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
3619 return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
3625 #define _mm512_cvt_roundepi32_ps(A, R) __extension__ ({ \
3626 (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
3627 (__v16sf)_mm512_setzero_ps(), \
3628 (__mmask16)-1, (int)(R)); })
3630 #define _mm512_mask_cvt_roundepi32_ps(W, U, A, R) __extension__ ({ \
3631 (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
3632 (__v16sf)(__m512)(W), \
3633 (__mmask16)(U), (int)(R)); })
3635 #define _mm512_maskz_cvt_roundepi32_ps(U, A, R) __extension__ ({ \
3636 (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
3637 (__v16sf)_mm512_setzero_ps(), \
3638 (__mmask16)(U), (int)(R)); })
3640 #define _mm512_cvt_roundepu32_ps(A, R) __extension__ ({ \
3641 (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
3642 (__v16sf)_mm512_setzero_ps(), \
3643 (__mmask16)-1, (int)(R)); })
3645 #define _mm512_mask_cvt_roundepu32_ps(W, U, A, R) __extension__ ({ \
3646 (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
3647 (__v16sf)(__m512)(W), \
3648 (__mmask16)(U), (int)(R)); })
3650 #define _mm512_maskz_cvt_roundepu32_ps(U, A, R) __extension__ ({ \
3651 (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
3652 (__v16sf)_mm512_setzero_ps(), \
3653 (__mmask16)(U), (int)(R)); })
3658 return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A,
3667 return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A,
3676 return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A,
3685 return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A,
3694 return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A,
3702 return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A,
3710 return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A,
3719 return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A,
3728 return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A,
3737 return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A,
3746 return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A,
3754 return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A,
3759 #define _mm512_cvt_roundpd_ps(A, R) __extension__ ({ \
3760 (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
3761 (__v8sf)_mm256_setzero_ps(), \
3762 (__mmask8)-1, (int)(R)); })
3764 #define _mm512_mask_cvt_roundpd_ps(W, U, A, R) __extension__ ({ \
3765 (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
3766 (__v8sf)(__m256)(W), (__mmask8)(U), \
3769 #define _mm512_maskz_cvt_roundpd_ps(U, A, R) __extension__ ({ \
3770 (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
3771 (__v8sf)_mm256_setzero_ps(), \
3772 (__mmask8)(U), (int)(R)); })
3777 return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
3786 return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
3795 return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
3801 #define _mm512_cvt_roundps_ph(A, I) __extension__ ({ \
3802 (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
3803 (__v16hi)_mm256_undefined_si256(), \
3806 #define _mm512_mask_cvt_roundps_ph(U, W, A, I) __extension__ ({ \
3807 (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
3808 (__v16hi)(__m256i)(U), \
3811 #define _mm512_maskz_cvt_roundps_ph(W, A, I) __extension__ ({ \
3812 (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
3813 (__v16hi)_mm256_setzero_si256(), \
3816 #define _mm512_cvtps_ph(A, I) __extension__ ({ \
3817 (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
3818 (__v16hi)_mm256_setzero_si256(), \
3821 #define _mm512_mask_cvtps_ph(U, W, A, I) __extension__ ({ \
3822 (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
3823 (__v16hi)(__m256i)(U), \
3826 #define _mm512_maskz_cvtps_ph(W, A, I) __extension__ ({\
3827 (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
3828 (__v16hi)_mm256_setzero_si256(), \
3831 #define _mm512_cvt_roundph_ps(A, R) __extension__ ({ \
3832 (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
3833 (__v16sf)_mm512_undefined_ps(), \
3834 (__mmask16)-1, (int)(R)); })
3836 #define _mm512_mask_cvt_roundph_ps(W, U, A, R) __extension__ ({ \
3837 (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
3838 (__v16sf)(__m512)(W), \
3839 (__mmask16)(U), (int)(R)); })
3841 #define _mm512_maskz_cvt_roundph_ps(U, A, R) __extension__ ({ \
3842 (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
3843 (__v16sf)_mm512_setzero_ps(), \
3844 (__mmask16)(U), (int)(R)); })
3850 return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
3860 return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
3869 return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
3875 #define _mm512_cvtt_roundpd_epi32(A, R) __extension__ ({ \
3876 (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
3877 (__v8si)_mm256_setzero_si256(), \
3878 (__mmask8)-1, (int)(R)); })
3880 #define _mm512_mask_cvtt_roundpd_epi32(W, U, A, R) __extension__ ({ \
3881 (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
3882 (__v8si)(__m256i)(W), \
3883 (__mmask8)(U), (int)(R)); })
3885 #define _mm512_maskz_cvtt_roundpd_epi32(U, A, R) __extension__ ({ \
3886 (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
3887 (__v8si)_mm256_setzero_si256(), \
3888 (__mmask8)(U), (int)(R)); })
3893 return (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df) __a,
3902 return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A,
3911 return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A,
3917 #define _mm512_cvtt_roundps_epi32(A, R) __extension__ ({ \
3918 (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
3919 (__v16si)_mm512_setzero_si512(), \
3920 (__mmask16)-1, (int)(R)); })
3922 #define _mm512_mask_cvtt_roundps_epi32(W, U, A, R) __extension__ ({ \
3923 (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
3924 (__v16si)(__m512i)(W), \
3925 (__mmask16)(U), (int)(R)); })
3927 #define _mm512_maskz_cvtt_roundps_epi32(U, A, R) __extension__ ({ \
3928 (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
3929 (__v16si)_mm512_setzero_si512(), \
3930 (__mmask16)(U), (int)(R)); })
3936 __builtin_ia32_cvttps2dq512_mask((__v16sf) __a,
3944 return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A,
3953 return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A,
3959 #define _mm512_cvt_roundps_epi32(A, R) __extension__ ({ \
3960 (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
3961 (__v16si)_mm512_setzero_si512(), \
3962 (__mmask16)-1, (int)(R)); })
3964 #define _mm512_mask_cvt_roundps_epi32(W, U, A, R) __extension__ ({ \
3965 (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
3966 (__v16si)(__m512i)(W), \
3967 (__mmask16)(U), (int)(R)); })
3969 #define _mm512_maskz_cvt_roundps_epi32(U, A, R) __extension__ ({ \
3970 (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
3971 (__v16si)_mm512_setzero_si512(), \
3972 (__mmask16)(U), (int)(R)); })
3977 return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
3986 return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
3995 return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
4002 #define _mm512_cvt_roundpd_epi32(A, R) __extension__ ({ \
4003 (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
4004 (__v8si)_mm256_setzero_si256(), \
4005 (__mmask8)-1, (int)(R)); })
4007 #define _mm512_mask_cvt_roundpd_epi32(W, U, A, R) __extension__ ({ \
4008 (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
4009 (__v8si)(__m256i)(W), \
4010 (__mmask8)(U), (int)(R)); })
4012 #define _mm512_maskz_cvt_roundpd_epi32(U, A, R) __extension__ ({ \
4013 (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
4014 (__v8si)_mm256_setzero_si256(), \
4015 (__mmask8)(U), (int)(R)); })
4020 return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
4030 return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
4039 return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
4046 #define _mm512_cvt_roundps_epu32(A, R) __extension__ ({ \
4047 (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
4048 (__v16si)_mm512_setzero_si512(), \
4049 (__mmask16)-1, (int)(R)); })
4051 #define _mm512_mask_cvt_roundps_epu32(W, U, A, R) __extension__ ({ \
4052 (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
4053 (__v16si)(__m512i)(W), \
4054 (__mmask16)(U), (int)(R)); })
4056 #define _mm512_maskz_cvt_roundps_epu32(U, A, R) __extension__ ({ \
4057 (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
4058 (__v16si)_mm512_setzero_si512(), \
4059 (__mmask16)(U), (int)(R)); })
4064 return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,\
4074 return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,
4083 return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,
4090 #define _mm512_cvt_roundpd_epu32(A, R) __extension__ ({ \
4091 (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
4092 (__v8si)_mm256_setzero_si256(), \
4093 (__mmask8)-1, (int)(R)); })
4095 #define _mm512_mask_cvt_roundpd_epu32(W, U, A, R) __extension__ ({ \
4096 (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
4098 (__mmask8)(U), (int)(R)); })
4100 #define _mm512_maskz_cvt_roundpd_epu32(U, A, R) __extension__ ({ \
4101 (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
4102 (__v8si)_mm256_setzero_si256(), \
4103 (__mmask8)(U), (int)(R)); })
4108 return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
4118 return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
4127 return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
4139 return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b,
4140 1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
4146 return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
4154 return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
4162 return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b,
4163 0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
4169 return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
4177 return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
4185 return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b,
4187 2+4, 18+4, 3+4, 19+4,
4188 2+8, 18+8, 3+8, 19+8,
4189 2+12, 18+12, 3+12, 19+12);
4195 return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
4203 return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
4211 return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b,
4213 0+4, 16+4, 1+4, 17+4,
4214 0+8, 16+8, 1+8, 17+8,
4215 0+12, 16+12, 1+12, 17+12);
4221 return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
4229 return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
4237 return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B,
4239 2+4, 18+4, 3+4, 19+4,
4240 2+8, 18+8, 3+8, 19+8,
4241 2+12, 18+12, 3+12, 19+12);
4247 return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
4255 return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
4263 return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B,
4265 0+4, 16+4, 1+4, 17+4,
4266 0+8, 16+8, 1+8, 17+8,
4267 0+12, 16+12, 1+12, 17+12);
4273 return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
4281 return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
4289 return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B,
4290 1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
4296 return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
4304 return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
4312 return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B,
4313 0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
4319 return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
4327 return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
4337 return (__mmask16) __builtin_ia32_ptestmd512 ((__v16si) __A,
4345 return (__mmask16) __builtin_ia32_ptestmd512 ((__v16si) __A,
4346 (__v16si) __B, __U);
4352 return (__mmask8) __builtin_ia32_ptestmq512 ((__v8di) __A,
4360 return (__mmask8) __builtin_ia32_ptestmq512 ((__v8di) __A, (__v8di) __B, __U);
4369 return (__m512i) __builtin_ia32_loaddqusi512_mask ((
const int *) __P,
4378 return (__m512i) __builtin_ia32_loaddqusi512_mask ((
const int *) __P,
4387 return (__m512i) __builtin_ia32_loaddqusi512_mask ((
const int *)__P,
4396 return (__m512i) __builtin_ia32_loaddqudi512_mask ((
const long long *) __P,
4404 return (__m512i) __builtin_ia32_loaddqudi512_mask ((
const long long *)__P,
4413 return (__m512) __builtin_ia32_loadups512_mask ((
const float *) __P,
4421 return (__m512) __builtin_ia32_loadups512_mask ((
const float *)__P,
4430 return (__m512d) __builtin_ia32_loadupd512_mask ((
const double *) __P,
4438 return (__m512d) __builtin_ia32_loadupd512_mask ((
const double *)__P,
4450 return ((
struct __loadu_pd*)__p)->__v;
4459 return ((
struct __loadu_ps*)__p)->__v;
4465 return (__m512) __builtin_ia32_loadaps512_mask ((
const __v16sf *)__p,
4474 return (__m512) __builtin_ia32_loadaps512_mask ((
const __v16sf *) __P,
4482 return (__m512) __builtin_ia32_loadaps512_mask ((
const __v16sf *)__P,
4491 return (__m512d) __builtin_ia32_loadapd512_mask ((
const __v8df *)__p,
4500 return (__m512d) __builtin_ia32_loadapd512_mask ((
const __v8df *) __P,
4508 return (__m512d) __builtin_ia32_loadapd512_mask ((
const __v8df *)__P,
4517 return *(__m512i *) __P;
4523 return *(__m512i *) __P;
4529 return *(__m512i *) __P;
4537 __builtin_ia32_storedqudi512_mask ((
long long *)__P, (__v8di) __A,
4544 __builtin_ia32_storedqusi512_mask ((
int *) __P, (__v16si) __A,
4551 __builtin_ia32_storedqusi512_mask ((
int *)__P, (__v16si) __A,
4558 __builtin_ia32_storeupd512_mask ((
double *)__P, (__v8df) __A, (__mmask8) __U);
4564 __builtin_ia32_storeupd512_mask((
double *)__P, (__v8df)__A, (__mmask8)-1);
4570 __builtin_ia32_storeups512_mask ((
float *)__P, (__v16sf) __A,
4577 __builtin_ia32_storeups512_mask((
float *)__P, (__v16sf)__A, (__mmask16)-1);
4583 __builtin_ia32_storeapd512_mask ((__v8df *)__P, (__v8df) __A, (__mmask8) __U);
4589 *(__m512d*)__P = __A;
4595 __builtin_ia32_storeaps512_mask ((__v16sf *)__P, (__v16sf) __A,
4602 *(__m512*)__P = __A;
4608 *(__m512i *) __P = __A;
4614 *(__m512i *) __P = __A;
4620 *(__m512i *) __P = __A;
4628 return __builtin_ia32_knothi(__M);
4635 return (__mmask16)__builtin_ia32_pcmpeqd512_mask((__v16si)__a, (__v16si)__b,
4641 return (__mmask16)__builtin_ia32_pcmpeqd512_mask((__v16si)__a, (__v16si)__b,
4647 return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 0,
4653 return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 0,
4659 return (__mmask8)__builtin_ia32_pcmpeqq512_mask((__v8di)__a, (__v8di)__b,
4665 return (__mmask8)__builtin_ia32_pcmpeqq512_mask((__v8di)__a, (__v8di)__b,
4671 return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 0,
4677 return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 0,
4683 return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 5,
4689 return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 5,
4695 return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 5,
4701 return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 5,
4707 return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 5,
4713 return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 5,
4719 return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 5,
4725 return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 5,
4731 return (__mmask16)__builtin_ia32_pcmpgtd512_mask((__v16si)__a, (__v16si)__b,
4737 return (__mmask16)__builtin_ia32_pcmpgtd512_mask((__v16si)__a, (__v16si)__b,
4743 return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 6,
4749 return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 6,
4755 return (__mmask8)__builtin_ia32_pcmpgtq512_mask((__v8di)__a, (__v8di)__b,
4761 return (__mmask8)__builtin_ia32_pcmpgtq512_mask((__v8di)__a, (__v8di)__b,
4767 return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 6,
4773 return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 6,
4779 return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 2,
4785 return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 2,
4791 return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 2,
4797 return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 2,
4803 return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 2,
4809 return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 2,
4815 return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 2,
4821 return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 2,
4827 return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 1,
4833 return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 1,
4839 return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 1,
4845 return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 1,
4851 return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 1,
4857 return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 1,
4863 return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 1,
4869 return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 1,
4875 return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 4,
4881 return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 4,
4887 return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 4,
4893 return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 4,
4899 return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 4,
4905 return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 4,
4911 return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 4,
4917 return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 4,
4924 return (__m512i) __builtin_ia32_pmovsxbd512_mask ((__v16qi) __A,
4933 return (__m512i) __builtin_ia32_pmovsxbd512_mask ((__v16qi) __A,
4941 return (__m512i) __builtin_ia32_pmovsxbd512_mask ((__v16qi) __A,
4950 return (__m512i) __builtin_ia32_pmovsxbq512_mask ((__v16qi) __A,
4959 return (__m512i) __builtin_ia32_pmovsxbq512_mask ((__v16qi) __A,
4967 return (__m512i) __builtin_ia32_pmovsxbq512_mask ((__v16qi) __A,
4976 return (__m512i) __builtin_ia32_pmovsxdq512_mask ((__v8si) __X,
4985 return (__m512i) __builtin_ia32_pmovsxdq512_mask ((__v8si) __X,
4993 return (__m512i) __builtin_ia32_pmovsxdq512_mask ((__v8si) __X,
5002 return (__m512i) __builtin_ia32_pmovsxwd512_mask ((__v16hi) __A,
5011 return (__m512i) __builtin_ia32_pmovsxwd512_mask ((__v16hi) __A,
5019 return (__m512i) __builtin_ia32_pmovsxwd512_mask ((__v16hi) __A,
5028 return (__m512i) __builtin_ia32_pmovsxwq512_mask ((__v8hi) __A,
5037 return (__m512i) __builtin_ia32_pmovsxwq512_mask ((__v8hi) __A,
5045 return (__m512i) __builtin_ia32_pmovsxwq512_mask ((__v8hi) __A,
5054 return (__m512i) __builtin_ia32_pmovzxbd512_mask ((__v16qi) __A,
5063 return (__m512i) __builtin_ia32_pmovzxbd512_mask ((__v16qi) __A,
5071 return (__m512i) __builtin_ia32_pmovzxbd512_mask ((__v16qi) __A,
5080 return (__m512i) __builtin_ia32_pmovzxbq512_mask ((__v16qi) __A,
5089 return (__m512i) __builtin_ia32_pmovzxbq512_mask ((__v16qi) __A,
5097 return (__m512i) __builtin_ia32_pmovzxbq512_mask ((__v16qi) __A,
5106 return (__m512i) __builtin_ia32_pmovzxdq512_mask ((__v8si) __X,
5115 return (__m512i) __builtin_ia32_pmovzxdq512_mask ((__v8si) __X,
5123 return (__m512i) __builtin_ia32_pmovzxdq512_mask ((__v8si) __X,
5132 return (__m512i) __builtin_ia32_pmovzxwd512_mask ((__v16hi) __A,
5141 return (__m512i) __builtin_ia32_pmovzxwd512_mask ((__v16hi) __A,
5149 return (__m512i) __builtin_ia32_pmovzxwd512_mask ((__v16hi) __A,
5158 return (__m512i) __builtin_ia32_pmovzxwq512_mask ((__v8hi) __A,
5167 return (__m512i) __builtin_ia32_pmovzxwq512_mask ((__v8hi) __A,
5175 return (__m512i) __builtin_ia32_pmovzxwq512_mask ((__v8hi) __A,
5184 return (__m512i) __builtin_ia32_prorvd512_mask ((__v16si) __A,
5194 return (__m512i) __builtin_ia32_prorvd512_mask ((__v16si) __A,
5203 return (__m512i) __builtin_ia32_prorvd512_mask ((__v16si) __A,
5213 return (__m512i) __builtin_ia32_prorvq512_mask ((__v8di) __A,
5223 return (__m512i) __builtin_ia32_prorvq512_mask ((__v8di) __A,
5232 return (__m512i) __builtin_ia32_prorvq512_mask ((__v8di) __A,
5241 #define _mm512_cmp_epi32_mask(a, b, p) __extension__ ({ \
5242 (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
5243 (__v16si)(__m512i)(b), (int)(p), \
5246 #define _mm512_cmp_epu32_mask(a, b, p) __extension__ ({ \
5247 (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
5248 (__v16si)(__m512i)(b), (int)(p), \
5251 #define _mm512_cmp_epi64_mask(a, b, p) __extension__ ({ \
5252 (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
5253 (__v8di)(__m512i)(b), (int)(p), \
5256 #define _mm512_cmp_epu64_mask(a, b, p) __extension__ ({ \
5257 (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
5258 (__v8di)(__m512i)(b), (int)(p), \
5261 #define _mm512_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \
5262 (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
5263 (__v16si)(__m512i)(b), (int)(p), \
5266 #define _mm512_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \
5267 (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
5268 (__v16si)(__m512i)(b), (int)(p), \
5271 #define _mm512_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \
5272 (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
5273 (__v8di)(__m512i)(b), (int)(p), \
5276 #define _mm512_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \
5277 (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
5278 (__v8di)(__m512i)(b), (int)(p), \
5281 #define _mm512_rol_epi32(a, b) __extension__ ({ \
5282 (__m512i)__builtin_ia32_prold512_mask((__v16si)(__m512i)(a), (int)(b), \
5283 (__v16si)_mm512_setzero_si512(), \
5286 #define _mm512_mask_rol_epi32(W, U, a, b) __extension__ ({ \
5287 (__m512i)__builtin_ia32_prold512_mask((__v16si)(__m512i)(a), (int)(b), \
5288 (__v16si)(__m512i)(W), \
5291 #define _mm512_maskz_rol_epi32(U, a, b) __extension__ ({ \
5292 (__m512i)__builtin_ia32_prold512_mask((__v16si)(__m512i)(a), (int)(b), \
5293 (__v16si)_mm512_setzero_si512(), \
5296 #define _mm512_rol_epi64(a, b) __extension__ ({ \
5297 (__m512i)__builtin_ia32_prolq512_mask((__v8di)(__m512i)(a), (int)(b), \
5298 (__v8di)_mm512_setzero_si512(), \
5301 #define _mm512_mask_rol_epi64(W, U, a, b) __extension__ ({ \
5302 (__m512i)__builtin_ia32_prolq512_mask((__v8di)(__m512i)(a), (int)(b), \
5303 (__v8di)(__m512i)(W), (__mmask8)(U)); })
5305 #define _mm512_maskz_rol_epi64(U, a, b) __extension__ ({ \
5306 (__m512i)__builtin_ia32_prolq512_mask((__v8di)(__m512i)(a), (int)(b), \
5307 (__v8di)_mm512_setzero_si512(), \
5312 return (__m512i) __builtin_ia32_prolvd512_mask ((__v16si) __A,
5322 return (__m512i) __builtin_ia32_prolvd512_mask ((__v16si) __A,
5331 return (__m512i) __builtin_ia32_prolvd512_mask ((__v16si) __A,
5341 return (__m512i) __builtin_ia32_prolvq512_mask ((__v8di) __A,
5351 return (__m512i) __builtin_ia32_prolvq512_mask ((__v8di) __A,
5360 return (__m512i) __builtin_ia32_prolvq512_mask ((__v8di) __A,
5367 #define _mm512_ror_epi32(A, B) __extension__ ({ \
5368 (__m512i)__builtin_ia32_prord512_mask((__v16si)(__m512i)(A), (int)(B), \
5369 (__v16si)_mm512_setzero_si512(), \
5372 #define _mm512_mask_ror_epi32(W, U, A, B) __extension__ ({ \
5373 (__m512i)__builtin_ia32_prord512_mask((__v16si)(__m512i)(A), (int)(B), \
5374 (__v16si)(__m512i)(W), \
5377 #define _mm512_maskz_ror_epi32(U, A, B) __extension__ ({ \
5378 (__m512i)__builtin_ia32_prord512_mask((__v16si)(__m512i)(A), (int)(B), \
5379 (__v16si)_mm512_setzero_si512(), \
5382 #define _mm512_ror_epi64(A, B) __extension__ ({ \
5383 (__m512i)__builtin_ia32_prorq512_mask((__v8di)(__m512i)(A), (int)(B), \
5384 (__v8di)_mm512_setzero_si512(), \
5387 #define _mm512_mask_ror_epi64(W, U, A, B) __extension__ ({ \
5388 (__m512i)__builtin_ia32_prorq512_mask((__v8di)(__m512i)(A), (int)(B), \
5389 (__v8di)(__m512i)(W), (__mmask8)(U)); })
5391 #define _mm512_maskz_ror_epi64(U, A, B) __extension__ ({ \
5392 (__m512i)__builtin_ia32_prorq512_mask((__v8di)(__m512i)(A), (int)(B), \
5393 (__v8di)_mm512_setzero_si512(), \
5396 #define _mm512_slli_epi32(A, B) __extension__ ({ \
5397 (__m512i)__builtin_ia32_pslldi512_mask((__v16si)(__m512i)(A), (int)(B), \
5398 (__v16si)_mm512_setzero_si512(), \
5401 #define _mm512_mask_slli_epi32(W, U, A, B) __extension__ ({ \
5402 (__m512i)__builtin_ia32_pslldi512_mask((__v16si)(__m512i)(A), (int)(B), \
5403 (__v16si)(__m512i)(W), \
5406 #define _mm512_maskz_slli_epi32(U, A, B) __extension__ ({ \
5407 (__m512i)__builtin_ia32_pslldi512_mask((__v16si)(__m512i)(A), (int)(B), \
5408 (__v16si)_mm512_setzero_si512(), \
5411 #define _mm512_slli_epi64(A, B) __extension__ ({ \
5412 (__m512i)__builtin_ia32_psllqi512_mask((__v8di)(__m512i)(A), (int)(B), \
5413 (__v8di)_mm512_setzero_si512(), \
5416 #define _mm512_mask_slli_epi64(W, U, A, B) __extension__ ({ \
5417 (__m512i)__builtin_ia32_psllqi512_mask((__v8di)(__m512i)(A), (int)(B), \
5418 (__v8di)(__m512i)(W), \
5421 #define _mm512_maskz_slli_epi64(U, A, B) __extension__ ({ \
5422 (__m512i)__builtin_ia32_psllqi512_mask((__v8di)(__m512i)(A), (int)(B), \
5423 (__v8di)_mm512_setzero_si512(), \
5428 #define _mm512_srli_epi32(A, B) __extension__ ({ \
5429 (__m512i)__builtin_ia32_psrldi512_mask((__v16si)(__m512i)(A), (int)(B), \
5430 (__v16si)_mm512_setzero_si512(), \
5433 #define _mm512_mask_srli_epi32(W, U, A, B) __extension__ ({ \
5434 (__m512i)__builtin_ia32_psrldi512_mask((__v16si)(__m512i)(A), (int)(B), \
5435 (__v16si)(__m512i)(W), \
5438 #define _mm512_maskz_srli_epi32(U, A, B) __extension__ ({ \
5439 (__m512i)__builtin_ia32_psrldi512_mask((__v16si)(__m512i)(A), (int)(B), \
5440 (__v16si)_mm512_setzero_si512(), \
5443 #define _mm512_srli_epi64(A, B) __extension__ ({ \
5444 (__m512i)__builtin_ia32_psrlqi512_mask((__v8di)(__m512i)(A), (int)(B), \
5445 (__v8di)_mm512_setzero_si512(), \
5448 #define _mm512_mask_srli_epi64(W, U, A, B) __extension__ ({ \
5449 (__m512i)__builtin_ia32_psrlqi512_mask((__v8di)(__m512i)(A), (int)(B), \
5450 (__v8di)(__m512i)(W), \
5453 #define _mm512_maskz_srli_epi64(U, A, B) __extension__ ({ \
5454 (__m512i)__builtin_ia32_psrlqi512_mask((__v8di)(__m512i)(A), (int)(B), \
5455 (__v8di)_mm512_setzero_si512(), \
5461 return (__m512i) __builtin_ia32_movdqa32load512_mask ((
const __v16si *) __P,
5469 return (__m512i) __builtin_ia32_movdqa32load512_mask ((
const __v16si *) __P,
5478 __builtin_ia32_movdqa32store512_mask ((__v16si *) __P, (__v16si) __A,
5485 return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
5493 return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
5501 return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
5509 return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
5517 return (__m512i) __builtin_ia32_movdqa64load512_mask ((
const __v8di *) __P,
5525 return (__m512i) __builtin_ia32_movdqa64load512_mask ((
const __v8di *) __P,
5534 __builtin_ia32_movdqa64store512_mask ((__v8di *) __P, (__v8di) __A,
5541 return (__m512d)__builtin_shufflevector((__v8df)__A, (__v8df)__A,
5542 0, 0, 2, 2, 4, 4, 6, 6);
5548 return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
5556 return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
5561 #define _mm512_fixupimm_round_pd(A, B, C, imm, R) __extension__ ({ \
5562 (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
5563 (__v8df)(__m512d)(B), \
5564 (__v8di)(__m512i)(C), (int)(imm), \
5565 (__mmask8)-1, (int)(R)); })
5567 #define _mm512_mask_fixupimm_round_pd(A, U, B, C, imm, R) __extension__ ({ \
5568 (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
5569 (__v8df)(__m512d)(B), \
5570 (__v8di)(__m512i)(C), (int)(imm), \
5571 (__mmask8)(U), (int)(R)); })
5573 #define _mm512_fixupimm_pd(A, B, C, imm) __extension__ ({ \
5574 (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
5575 (__v8df)(__m512d)(B), \
5576 (__v8di)(__m512i)(C), (int)(imm), \
5578 _MM_FROUND_CUR_DIRECTION); })
5580 #define _mm512_mask_fixupimm_pd(A, U, B, C, imm) __extension__ ({ \
5581 (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
5582 (__v8df)(__m512d)(B), \
5583 (__v8di)(__m512i)(C), (int)(imm), \
5585 _MM_FROUND_CUR_DIRECTION); })
5587 #define _mm512_maskz_fixupimm_round_pd(U, A, B, C, imm, R) __extension__ ({ \
5588 (__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
5589 (__v8df)(__m512d)(B), \
5590 (__v8di)(__m512i)(C), \
5591 (int)(imm), (__mmask8)(U), \
5594 #define _mm512_maskz_fixupimm_pd(U, A, B, C, imm) __extension__ ({ \
5595 (__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
5596 (__v8df)(__m512d)(B), \
5597 (__v8di)(__m512i)(C), \
5598 (int)(imm), (__mmask8)(U), \
5599 _MM_FROUND_CUR_DIRECTION); })
5601 #define _mm512_fixupimm_round_ps(A, B, C, imm, R) __extension__ ({ \
5602 (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
5603 (__v16sf)(__m512)(B), \
5604 (__v16si)(__m512i)(C), (int)(imm), \
5605 (__mmask16)-1, (int)(R)); })
5607 #define _mm512_mask_fixupimm_round_ps(A, U, B, C, imm, R) __extension__ ({ \
5608 (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
5609 (__v16sf)(__m512)(B), \
5610 (__v16si)(__m512i)(C), (int)(imm), \
5611 (__mmask16)(U), (int)(R)); })
5613 #define _mm512_fixupimm_ps(A, B, C, imm) __extension__ ({ \
5614 (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
5615 (__v16sf)(__m512)(B), \
5616 (__v16si)(__m512i)(C), (int)(imm), \
5618 _MM_FROUND_CUR_DIRECTION); })
5620 #define _mm512_mask_fixupimm_ps(A, U, B, C, imm) __extension__ ({ \
5621 (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
5622 (__v16sf)(__m512)(B), \
5623 (__v16si)(__m512i)(C), (int)(imm), \
5625 _MM_FROUND_CUR_DIRECTION); })
5627 #define _mm512_maskz_fixupimm_round_ps(U, A, B, C, imm, R) __extension__ ({ \
5628 (__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
5629 (__v16sf)(__m512)(B), \
5630 (__v16si)(__m512i)(C), \
5631 (int)(imm), (__mmask16)(U), \
5634 #define _mm512_maskz_fixupimm_ps(U, A, B, C, imm) __extension__ ({ \
5635 (__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
5636 (__v16sf)(__m512)(B), \
5637 (__v16si)(__m512i)(C), \
5638 (int)(imm), (__mmask16)(U), \
5639 _MM_FROUND_CUR_DIRECTION); })
5641 #define _mm_fixupimm_round_sd(A, B, C, imm, R) __extension__ ({ \
5642 (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
5643 (__v2df)(__m128d)(B), \
5644 (__v2di)(__m128i)(C), (int)(imm), \
5645 (__mmask8)-1, (int)(R)); })
5647 #define _mm_mask_fixupimm_round_sd(A, U, B, C, imm, R) __extension__ ({ \
5648 (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
5649 (__v2df)(__m128d)(B), \
5650 (__v2di)(__m128i)(C), (int)(imm), \
5651 (__mmask8)(U), (int)(R)); })
5653 #define _mm_fixupimm_sd(A, B, C, imm) __extension__ ({ \
5654 (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
5655 (__v2df)(__m128d)(B), \
5656 (__v2di)(__m128i)(C), (int)(imm), \
5658 _MM_FROUND_CUR_DIRECTION); })
5660 #define _mm_mask_fixupimm_sd(A, U, B, C, imm) __extension__ ({ \
5661 (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
5662 (__v2df)(__m128d)(B), \
5663 (__v2di)(__m128i)(C), (int)(imm), \
5665 _MM_FROUND_CUR_DIRECTION); })
5667 #define _mm_maskz_fixupimm_round_sd(U, A, B, C, imm, R) __extension__ ({ \
5668 (__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
5669 (__v2df)(__m128d)(B), \
5670 (__v2di)(__m128i)(C), (int)(imm), \
5671 (__mmask8)(U), (int)(R)); })
5673 #define _mm_maskz_fixupimm_sd(U, A, B, C, imm) __extension__ ({ \
5674 (__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
5675 (__v2df)(__m128d)(B), \
5676 (__v2di)(__m128i)(C), (int)(imm), \
5678 _MM_FROUND_CUR_DIRECTION); })
5680 #define _mm_fixupimm_round_ss(A, B, C, imm, R) __extension__ ({ \
5681 (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
5682 (__v4sf)(__m128)(B), \
5683 (__v4si)(__m128i)(C), (int)(imm), \
5684 (__mmask8)-1, (int)(R)); })
5686 #define _mm_mask_fixupimm_round_ss(A, U, B, C, imm, R) __extension__ ({ \
5687 (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
5688 (__v4sf)(__m128)(B), \
5689 (__v4si)(__m128i)(C), (int)(imm), \
5690 (__mmask8)(U), (int)(R)); })
5692 #define _mm_fixupimm_ss(A, B, C, imm) __extension__ ({ \
5693 (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
5694 (__v4sf)(__m128)(B), \
5695 (__v4si)(__m128i)(C), (int)(imm), \
5697 _MM_FROUND_CUR_DIRECTION); })
5699 #define _mm_mask_fixupimm_ss(A, U, B, C, imm) __extension__ ({ \
5700 (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
5701 (__v4sf)(__m128)(B), \
5702 (__v4si)(__m128i)(C), (int)(imm), \
5704 _MM_FROUND_CUR_DIRECTION); })
5706 #define _mm_maskz_fixupimm_round_ss(U, A, B, C, imm, R) __extension__ ({ \
5707 (__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
5708 (__v4sf)(__m128)(B), \
5709 (__v4si)(__m128i)(C), (int)(imm), \
5710 (__mmask8)(U), (int)(R)); })
5712 #define _mm_maskz_fixupimm_ss(U, A, B, C, imm) __extension__ ({ \
5713 (__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
5714 (__v4sf)(__m128)(B), \
5715 (__v4si)(__m128i)(C), (int)(imm), \
5717 _MM_FROUND_CUR_DIRECTION); })
5719 #define _mm_getexp_round_sd(A, B, R) __extension__ ({ \
5720 (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
5721 (__v2df)(__m128d)(B), \
5722 (__v2df)_mm_setzero_pd(), \
5723 (__mmask8)-1, (int)(R)); })
5729 return (__m128d) __builtin_ia32_getexpsd128_round_mask ((__v2df) __A,
5736 return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A,
5743 #define _mm_mask_getexp_round_sd(W, U, A, B, R) __extension__ ({\
5744 (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
5745 (__v2df)(__m128d)(B), \
5746 (__v2df)(__m128d)(W), \
5747 (__mmask8)(U), (int)(R)); })
5752 return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A,
5759 #define _mm_maskz_getexp_round_sd(U, A, B, R) __extension__ ({\
5760 (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
5761 (__v2df)(__m128d)(B), \
5762 (__v2df)_mm_setzero_pd(), \
5763 (__mmask8)(U), (int)(R)); })
5765 #define _mm_getexp_round_ss(A, B, R) __extension__ ({ \
5766 (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
5767 (__v4sf)(__m128)(B), \
5768 (__v4sf)_mm_setzero_ps(), \
5769 (__mmask8)-1, (int)(R)); })
5774 return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
5781 return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
5788 #define _mm_mask_getexp_round_ss(W, U, A, B, R) __extension__ ({\
5789 (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
5790 (__v4sf)(__m128)(B), \
5791 (__v4sf)(__m128)(W), \
5792 (__mmask8)(U), (int)(R)); })
5797 return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
5804 #define _mm_maskz_getexp_round_ss(U, A, B, R) __extension__ ({\
5805 (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
5806 (__v4sf)(__m128)(B), \
5807 (__v4sf)_mm_setzero_ps(), \
5808 (__mmask8)(U), (int)(R)); })
5810 #define _mm_getmant_round_sd(A, B, C, D, R) __extension__ ({ \
5811 (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
5812 (__v2df)(__m128d)(B), \
5813 (int)(((D)<<2) | (C)), \
5814 (__v2df)_mm_setzero_pd(), \
5815 (__mmask8)-1, (int)(R)); })
5817 #define _mm_getmant_sd(A, B, C, D) __extension__ ({ \
5818 (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
5819 (__v2df)(__m128d)(B), \
5820 (int)(((D)<<2) | (C)), \
5821 (__v2df)_mm_setzero_pd(), \
5823 _MM_FROUND_CUR_DIRECTION); })
5825 #define _mm_mask_getmant_sd(W, U, A, B, C, D) __extension__ ({\
5826 (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
5827 (__v2df)(__m128d)(B), \
5828 (int)(((D)<<2) | (C)), \
5829 (__v2df)(__m128d)(W), \
5831 _MM_FROUND_CUR_DIRECTION); })
5833 #define _mm_mask_getmant_round_sd(W, U, A, B, C, D, R)({\
5834 (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
5835 (__v2df)(__m128d)(B), \
5836 (int)(((D)<<2) | (C)), \
5837 (__v2df)(__m128d)(W), \
5838 (__mmask8)(U), (int)(R)); })
5840 #define _mm_maskz_getmant_sd(U, A, B, C, D) __extension__ ({\
5841 (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
5842 (__v2df)(__m128d)(B), \
5843 (int)(((D)<<2) | (C)), \
5844 (__v2df)_mm_setzero_pd(), \
5846 _MM_FROUND_CUR_DIRECTION); })
5848 #define _mm_maskz_getmant_round_sd(U, A, B, C, D, R) __extension__ ({\
5849 (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
5850 (__v2df)(__m128d)(B), \
5851 (int)(((D)<<2) | (C)), \
5852 (__v2df)_mm_setzero_pd(), \
5853 (__mmask8)(U), (int)(R)); })
5855 #define _mm_getmant_round_ss(A, B, C, D, R) __extension__ ({ \
5856 (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
5857 (__v4sf)(__m128)(B), \
5858 (int)(((D)<<2) | (C)), \
5859 (__v4sf)_mm_setzero_ps(), \
5860 (__mmask8)-1, (int)(R)); })
5862 #define _mm_getmant_ss(A, B, C, D) __extension__ ({ \
5863 (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
5864 (__v4sf)(__m128)(B), \
5865 (int)(((D)<<2) | (C)), \
5866 (__v4sf)_mm_setzero_ps(), \
5868 _MM_FROUND_CUR_DIRECTION); })
5870 #define _mm_mask_getmant_ss(W, U, A, B, C, D) __extension__ ({\
5871 (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
5872 (__v4sf)(__m128)(B), \
5873 (int)(((D)<<2) | (C)), \
5874 (__v4sf)(__m128)(W), \
5876 _MM_FROUND_CUR_DIRECTION); })
5878 #define _mm_mask_getmant_round_ss(W, U, A, B, C, D, R)({\
5879 (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
5880 (__v4sf)(__m128)(B), \
5881 (int)(((D)<<2) | (C)), \
5882 (__v4sf)(__m128)(W), \
5883 (__mmask8)(U), (int)(R)); })
5885 #define _mm_maskz_getmant_ss(U, A, B, C, D) __extension__ ({\
5886 (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
5887 (__v4sf)(__m128)(B), \
5888 (int)(((D)<<2) | (C)), \
5889 (__v4sf)_mm_setzero_pd(), \
5891 _MM_FROUND_CUR_DIRECTION); })
5893 #define _mm_maskz_getmant_round_ss(U, A, B, C, D, R) __extension__ ({\
5894 (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
5895 (__v4sf)(__m128)(B), \
5896 (int)(((D)<<2) | (C)), \
5897 (__v4sf)_mm_setzero_ps(), \
5898 (__mmask8)(U), (int)(R)); })
5906 #define _mm_comi_round_sd(A, B, P, R) __extension__ ({\
5907 (int)__builtin_ia32_vcomisd((__v2df)(__m128d)(A), (__v2df)(__m128d)(B), \
5908 (int)(P), (int)(R)); })
5910 #define _mm_comi_round_ss(A, B, P, R) __extension__ ({\
5911 (int)__builtin_ia32_vcomiss((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \
5912 (int)(P), (int)(R)); })
5914 #define _mm_cvt_roundsd_si64(A, R) __extension__ ({ \
5915 (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)); })
5919 __mmask16 __U, __m512i __B)
5921 return (__m512i) __builtin_ia32_vpermi2vard512_mask ((__v16si) __A,
5931 return (__m512i) __builtin_ia32_pslld512_mask ((__v16si) __A,
5941 return (__m512i) __builtin_ia32_pslld512_mask ((__v16si) __A,
5950 return (__m512i) __builtin_ia32_pslld512_mask ((__v16si) __A,
5960 return (__m512i) __builtin_ia32_psllq512_mask ((__v8di) __A,
5970 return (__m512i) __builtin_ia32_psllq512_mask ((__v8di) __A,
5979 return (__m512i) __builtin_ia32_psllq512_mask ((__v8di) __A,
5989 return (__m512i) __builtin_ia32_psllv16si_mask ((__v16si) __X,
5999 return (__m512i) __builtin_ia32_psllv16si_mask ((__v16si) __X,
6008 return (__m512i) __builtin_ia32_psllv16si_mask ((__v16si) __X,
6018 return (__m512i) __builtin_ia32_psllv8di_mask ((__v8di) __X,
6028 return (__m512i) __builtin_ia32_psllv8di_mask ((__v8di) __X,
6037 return (__m512i) __builtin_ia32_psllv8di_mask ((__v8di) __X,
6047 return (__m512i) __builtin_ia32_psrad512_mask ((__v16si) __A,
6057 return (__m512i) __builtin_ia32_psrad512_mask ((__v16si) __A,
6066 return (__m512i) __builtin_ia32_psrad512_mask ((__v16si) __A,
6076 return (__m512i) __builtin_ia32_psraq512_mask ((__v8di) __A,
6086 return (__m512i) __builtin_ia32_psraq512_mask ((__v8di) __A,
6095 return (__m512i) __builtin_ia32_psraq512_mask ((__v8di) __A,
6105 return (__m512i) __builtin_ia32_psrav16si_mask ((__v16si) __X,
6115 return (__m512i) __builtin_ia32_psrav16si_mask ((__v16si) __X,
6124 return (__m512i) __builtin_ia32_psrav16si_mask ((__v16si) __X,
6134 return (__m512i) __builtin_ia32_psrav8di_mask ((__v8di) __X,
6144 return (__m512i) __builtin_ia32_psrav8di_mask ((__v8di) __X,
6153 return (__m512i) __builtin_ia32_psrav8di_mask ((__v8di) __X,
6163 return (__m512i) __builtin_ia32_psrld512_mask ((__v16si) __A,
6173 return (__m512i) __builtin_ia32_psrld512_mask ((__v16si) __A,
6182 return (__m512i) __builtin_ia32_psrld512_mask ((__v16si) __A,
6192 return (__m512i) __builtin_ia32_psrlq512_mask ((__v8di) __A,
6202 return (__m512i) __builtin_ia32_psrlq512_mask ((__v8di) __A,
6211 return (__m512i) __builtin_ia32_psrlq512_mask ((__v8di) __A,
6221 return (__m512i) __builtin_ia32_psrlv16si_mask ((__v16si) __X,
6231 return (__m512i) __builtin_ia32_psrlv16si_mask ((__v16si) __X,
6240 return (__m512i) __builtin_ia32_psrlv16si_mask ((__v16si) __X,
6250 return (__m512i) __builtin_ia32_psrlv8di_mask ((__v8di) __X,
6260 return (__m512i) __builtin_ia32_psrlv8di_mask ((__v8di) __X,
6269 return (__m512i) __builtin_ia32_psrlv8di_mask ((__v8di) __X,
6276 #define _mm512_ternarylogic_epi32(A, B, C, imm) __extension__ ({ \
6277 (__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
6278 (__v16si)(__m512i)(B), \
6279 (__v16si)(__m512i)(C), (int)(imm), \
6282 #define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm) __extension__ ({ \
6283 (__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
6284 (__v16si)(__m512i)(B), \
6285 (__v16si)(__m512i)(C), (int)(imm), \
6288 #define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm) __extension__ ({ \
6289 (__m512i)__builtin_ia32_pternlogd512_maskz((__v16si)(__m512i)(A), \
6290 (__v16si)(__m512i)(B), \
6291 (__v16si)(__m512i)(C), \
6292 (int)(imm), (__mmask16)(U)); })
6294 #define _mm512_ternarylogic_epi64(A, B, C, imm) __extension__ ({ \
6295 (__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
6296 (__v8di)(__m512i)(B), \
6297 (__v8di)(__m512i)(C), (int)(imm), \
6300 #define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm) __extension__ ({ \
6301 (__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
6302 (__v8di)(__m512i)(B), \
6303 (__v8di)(__m512i)(C), (int)(imm), \
6306 #define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm) __extension__ ({ \
6307 (__m512i)__builtin_ia32_pternlogq512_maskz((__v8di)(__m512i)(A), \
6308 (__v8di)(__m512i)(B), \
6309 (__v8di)(__m512i)(C), (int)(imm), \
6312 #define _mm_cvt_roundsd_i64(A, R) __extension__ ({ \
6313 (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)); })
6315 #define _mm_cvt_roundsd_si32(A, R) __extension__ ({ \
6316 (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)); })
6318 #define _mm_cvt_roundsd_i32(A, R) __extension__ ({ \
6319 (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)); })
6321 #define _mm_cvt_roundsd_u32(A, R) __extension__ ({ \
6322 (unsigned int)__builtin_ia32_vcvtsd2usi32((__v2df)(__m128d)(A), (int)(R)); })
6327 return (
unsigned) __builtin_ia32_vcvtsd2usi32 ((__v2df) __A,
6331 #define _mm_cvt_roundsd_u64(A, R) __extension__ ({ \
6332 (unsigned long long)__builtin_ia32_vcvtsd2usi64((__v2df)(__m128d)(A), \
6338 return (
unsigned long long) __builtin_ia32_vcvtsd2usi64 ((__v2df)
6343 #define _mm_cvt_roundss_si32(A, R) __extension__ ({ \
6344 (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)); })
6346 #define _mm_cvt_roundss_i32(A, R) __extension__ ({ \
6347 (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)); })
6349 #define _mm_cvt_roundss_si64(A, R) __extension__ ({ \
6350 (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)); })
6352 #define _mm_cvt_roundss_i64(A, R) __extension__ ({ \
6353 (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)); })
6355 #define _mm_cvt_roundss_u32(A, R) __extension__ ({ \
6356 (unsigned int)__builtin_ia32_vcvtss2usi32((__v4sf)(__m128)(A), (int)(R)); })
6361 return (
unsigned) __builtin_ia32_vcvtss2usi32 ((__v4sf) __A,
6365 #define _mm_cvt_roundss_u64(A, R) __extension__ ({ \
6366 (unsigned long long)__builtin_ia32_vcvtss2usi64((__v4sf)(__m128)(A), \
6372 return (
unsigned long long) __builtin_ia32_vcvtss2usi64 ((__v4sf)
6377 #define _mm_cvtt_roundsd_i32(A, R) __extension__ ({ \
6378 (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)); })
6380 #define _mm_cvtt_roundsd_si32(A, R) __extension__ ({ \
6381 (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)); })
6386 return (
int) __builtin_ia32_vcvttsd2si32 ((__v2df) __A,
6390 #define _mm_cvtt_roundsd_si64(A, R) __extension__ ({ \
6391 (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)); })
6393 #define _mm_cvtt_roundsd_i64(A, R) __extension__ ({ \
6394 (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)); })
6399 return (
long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A,
6403 #define _mm_cvtt_roundsd_u32(A, R) __extension__ ({ \
6404 (unsigned int)__builtin_ia32_vcvttsd2usi32((__v2df)(__m128d)(A), (int)(R)); })
6409 return (
unsigned) __builtin_ia32_vcvttsd2usi32 ((__v2df) __A,
6413 #define _mm_cvtt_roundsd_u64(A, R) __extension__ ({ \
6414 (unsigned long long)__builtin_ia32_vcvttsd2usi64((__v2df)(__m128d)(A), \
6420 return (
unsigned long long) __builtin_ia32_vcvttsd2usi64 ((__v2df)
6425 #define _mm_cvtt_roundss_i32(A, R) __extension__ ({ \
6426 (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)); })
6428 #define _mm_cvtt_roundss_si32(A, R) __extension__ ({ \
6429 (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)); })
6434 return (
int) __builtin_ia32_vcvttss2si32 ((__v4sf) __A,
6438 #define _mm_cvtt_roundss_i64(A, R) __extension__ ({ \
6439 (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)); })
6441 #define _mm_cvtt_roundss_si64(A, R) __extension__ ({ \
6442 (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)); })
6447 return (
long long) __builtin_ia32_vcvttss2si64 ((__v4sf) __A,
6451 #define _mm_cvtt_roundss_u32(A, R) __extension__ ({ \
6452 (unsigned int)__builtin_ia32_vcvttss2usi32((__v4sf)(__m128)(A), (int)(R)); })
6457 return (
unsigned) __builtin_ia32_vcvttss2usi32 ((__v4sf) __A,
6461 #define _mm_cvtt_roundss_u64(A, R) __extension__ ({ \
6462 (unsigned long long)__builtin_ia32_vcvttss2usi64((__v4sf)(__m128)(A), \
6468 return (
unsigned long long) __builtin_ia32_vcvttss2usi64 ((__v4sf)
6477 return (__m512d) __builtin_ia32_vpermi2varpd512_mask ((__v8df) __A,
6488 return (__m512) __builtin_ia32_vpermi2varps512_mask ((__v16sf) __A,
6497 __mmask8 __U, __m512i __B)
6499 return (__m512i) __builtin_ia32_vpermi2varq512_mask ((__v8di) __A,
6506 #define _mm512_permute_pd(X, C) __extension__ ({ \
6507 (__m512d)__builtin_shufflevector((__v8df)(__m512d)(X), \
6508 (__v8df)_mm512_undefined_pd(), \
6509 0 + (((C) >> 0) & 0x1), \
6510 0 + (((C) >> 1) & 0x1), \
6511 2 + (((C) >> 2) & 0x1), \
6512 2 + (((C) >> 3) & 0x1), \
6513 4 + (((C) >> 4) & 0x1), \
6514 4 + (((C) >> 5) & 0x1), \
6515 6 + (((C) >> 6) & 0x1), \
6516 6 + (((C) >> 7) & 0x1)); })
6518 #define _mm512_mask_permute_pd(W, U, X, C) __extension__ ({ \
6519 (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
6520 (__v8df)_mm512_permute_pd((X), (C)), \
6521 (__v8df)(__m512d)(W)); })
6523 #define _mm512_maskz_permute_pd(U, X, C) __extension__ ({ \
6524 (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
6525 (__v8df)_mm512_permute_pd((X), (C)), \
6526 (__v8df)_mm512_setzero_pd()); })
6528 #define _mm512_permute_ps(X, C) __extension__ ({ \
6529 (__m512)__builtin_shufflevector((__v16sf)(__m512)(X), \
6530 (__v16sf)_mm512_undefined_ps(), \
6531 0 + (((C) >> 0) & 0x3), \
6532 0 + (((C) >> 2) & 0x3), \
6533 0 + (((C) >> 4) & 0x3), \
6534 0 + (((C) >> 6) & 0x3), \
6535 4 + (((C) >> 0) & 0x3), \
6536 4 + (((C) >> 2) & 0x3), \
6537 4 + (((C) >> 4) & 0x3), \
6538 4 + (((C) >> 6) & 0x3), \
6539 8 + (((C) >> 0) & 0x3), \
6540 8 + (((C) >> 2) & 0x3), \
6541 8 + (((C) >> 4) & 0x3), \
6542 8 + (((C) >> 6) & 0x3), \
6543 12 + (((C) >> 0) & 0x3), \
6544 12 + (((C) >> 2) & 0x3), \
6545 12 + (((C) >> 4) & 0x3), \
6546 12 + (((C) >> 6) & 0x3)); })
6548 #define _mm512_mask_permute_ps(W, U, X, C) __extension__ ({ \
6549 (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
6550 (__v16sf)_mm512_permute_ps((X), (C)), \
6551 (__v16sf)(__m512)(W)); })
6553 #define _mm512_maskz_permute_ps(U, X, C) __extension__ ({ \
6554 (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
6555 (__v16sf)_mm512_permute_ps((X), (C)), \
6556 (__v16sf)_mm512_setzero_ps()); })
6561 return (__m512d) __builtin_ia32_vpermilvarpd512_mask ((__v8df) __A,
6571 return (__m512d) __builtin_ia32_vpermilvarpd512_mask ((__v8df) __A,
6580 return (__m512d) __builtin_ia32_vpermilvarpd512_mask ((__v8df) __A,
6590 return (__m512) __builtin_ia32_vpermilvarps512_mask ((__v16sf) __A,
6600 return (__m512) __builtin_ia32_vpermilvarps512_mask ((__v16sf) __A,
6609 return (__m512) __builtin_ia32_vpermilvarps512_mask ((__v16sf) __A,
6619 return (__m512d) __builtin_ia32_vpermt2varpd512_mask ((__v8di) __I
6629 return (__m512d) __builtin_ia32_vpermt2varpd512_mask ((__v8di) __I
6640 return (__m512d) __builtin_ia32_vpermt2varpd512_maskz ((__v8di) __I
6650 return (__m512) __builtin_ia32_vpermt2varps512_mask ((__v16si) __I
6660 return (__m512) __builtin_ia32_vpermt2varps512_mask ((__v16si) __I
6671 return (__m512) __builtin_ia32_vpermt2varps512_maskz ((__v16si) __I
6681 return (__mmask16) __builtin_ia32_ptestnmd512 ((__v16si) __A,
6689 return (__mmask16) __builtin_ia32_ptestnmd512 ((__v16si) __A,
6690 (__v16si) __B, __U);
6696 return (__mmask8) __builtin_ia32_ptestnmq512 ((__v8di) __A,
6704 return (__mmask8) __builtin_ia32_ptestnmq512 ((__v8di) __A,
6708 #define _mm512_cvtt_roundpd_epu32(A, R) __extension__ ({ \
6709 (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
6710 (__v8si)_mm256_undefined_si256(), \
6711 (__mmask8)-1, (int)(R)); })
6713 #define _mm512_mask_cvtt_roundpd_epu32(W, U, A, R) __extension__ ({ \
6714 (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
6715 (__v8si)(__m256i)(W), \
6716 (__mmask8)(U), (int)(R)); })
6718 #define _mm512_maskz_cvtt_roundpd_epu32(U, A, R) __extension__ ({ \
6719 (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
6720 (__v8si)_mm256_setzero_si256(), \
6721 (__mmask8)(U), (int)(R)); })
6726 return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
6736 return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
6745 return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
6752 #define _mm_roundscale_round_sd(A, B, imm, R) __extension__ ({ \
6753 (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
6754 (__v2df)(__m128d)(B), \
6755 (__v2df)_mm_setzero_pd(), \
6756 (__mmask8)-1, (int)(imm), \
6759 #define _mm_roundscale_sd(A, B, imm) __extension__ ({ \
6760 (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
6761 (__v2df)(__m128d)(B), \
6762 (__v2df)_mm_setzero_pd(), \
6763 (__mmask8)-1, (int)(imm), \
6764 _MM_FROUND_CUR_DIRECTION); })
6766 #define _mm_mask_roundscale_sd(W, U, A, B, imm) __extension__ ({ \
6767 (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
6768 (__v2df)(__m128d)(B), \
6769 (__v2df)(__m128d)(W), \
6770 (__mmask8)(U), (int)(imm), \
6771 _MM_FROUND_CUR_DIRECTION); })
6773 #define _mm_mask_roundscale_round_sd(W, U, A, B, I, R) __extension__ ({ \
6774 (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
6775 (__v2df)(__m128d)(B), \
6776 (__v2df)(__m128d)(W), \
6777 (__mmask8)(U), (int)(I), \
6780 #define _mm_maskz_roundscale_sd(U, A, B, I) __extension__ ({ \
6781 (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
6782 (__v2df)(__m128d)(B), \
6783 (__v2df)_mm_setzero_pd(), \
6784 (__mmask8)(U), (int)(I), \
6785 _MM_FROUND_CUR_DIRECTION); })
6787 #define _mm_maskz_roundscale_round_sd(U, A, B, I, R) __extension__ ({ \
6788 (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
6789 (__v2df)(__m128d)(B), \
6790 (__v2df)_mm_setzero_pd(), \
6791 (__mmask8)(U), (int)(I), \
6794 #define _mm_roundscale_round_ss(A, B, imm, R) __extension__ ({ \
6795 (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
6796 (__v4sf)(__m128)(B), \
6797 (__v4sf)_mm_setzero_ps(), \
6798 (__mmask8)-1, (int)(imm), \
6801 #define _mm_roundscale_ss(A, B, imm) __extension__ ({ \
6802 (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
6803 (__v4sf)(__m128)(B), \
6804 (__v4sf)_mm_setzero_ps(), \
6805 (__mmask8)-1, (int)(imm), \
6806 _MM_FROUND_CUR_DIRECTION); })
6808 #define _mm_mask_roundscale_ss(W, U, A, B, I) __extension__ ({ \
6809 (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
6810 (__v4sf)(__m128)(B), \
6811 (__v4sf)(__m128)(W), \
6812 (__mmask8)(U), (int)(I), \
6813 _MM_FROUND_CUR_DIRECTION); })
6815 #define _mm_mask_roundscale_round_ss(W, U, A, B, I, R) __extension__ ({ \
6816 (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
6817 (__v4sf)(__m128)(B), \
6818 (__v4sf)(__m128)(W), \
6819 (__mmask8)(U), (int)(I), \
6822 #define _mm_maskz_roundscale_ss(U, A, B, I) __extension__ ({ \
6823 (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
6824 (__v4sf)(__m128)(B), \
6825 (__v4sf)_mm_setzero_ps(), \
6826 (__mmask8)(U), (int)(I), \
6827 _MM_FROUND_CUR_DIRECTION); })
6829 #define _mm_maskz_roundscale_round_ss(U, A, B, I, R) __extension__ ({ \
6830 (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
6831 (__v4sf)(__m128)(B), \
6832 (__v4sf)_mm_setzero_ps(), \
6833 (__mmask8)(U), (int)(I), \
6836 #define _mm512_scalef_round_pd(A, B, R) __extension__ ({ \
6837 (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
6838 (__v8df)(__m512d)(B), \
6839 (__v8df)_mm512_undefined_pd(), \
6840 (__mmask8)-1, (int)(R)); })
6842 #define _mm512_mask_scalef_round_pd(W, U, A, B, R) __extension__ ({ \
6843 (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
6844 (__v8df)(__m512d)(B), \
6845 (__v8df)(__m512d)(W), \
6846 (__mmask8)(U), (int)(R)); })
6848 #define _mm512_maskz_scalef_round_pd(U, A, B, R) __extension__ ({ \
6849 (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
6850 (__v8df)(__m512d)(B), \
6851 (__v8df)_mm512_setzero_pd(), \
6852 (__mmask8)(U), (int)(R)); })
6857 return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
6868 return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
6878 return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
6886 #define _mm512_scalef_round_ps(A, B, R) __extension__ ({ \
6887 (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
6888 (__v16sf)(__m512)(B), \
6889 (__v16sf)_mm512_undefined_ps(), \
6890 (__mmask16)-1, (int)(R)); })
6892 #define _mm512_mask_scalef_round_ps(W, U, A, B, R) __extension__ ({ \
6893 (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
6894 (__v16sf)(__m512)(B), \
6895 (__v16sf)(__m512)(W), \
6896 (__mmask16)(U), (int)(R)); })
6898 #define _mm512_maskz_scalef_round_ps(U, A, B, R) __extension__ ({ \
6899 (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
6900 (__v16sf)(__m512)(B), \
6901 (__v16sf)_mm512_setzero_ps(), \
6902 (__mmask16)(U), (int)(R)); })
6907 return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
6918 return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
6928 return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
6936 #define _mm_scalef_round_sd(A, B, R) __extension__ ({ \
6937 (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
6938 (__v2df)(__m128d)(B), \
6939 (__v2df)_mm_setzero_pd(), \
6940 (__mmask8)-1, (int)(R)); })
6945 return (__m128d) __builtin_ia32_scalefsd_round_mask ((__v2df) __A,
6954 return (__m128d) __builtin_ia32_scalefsd_round_mask ( (__v2df) __A,
6961 #define _mm_mask_scalef_round_sd(W, U, A, B, R) __extension__ ({ \
6962 (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
6963 (__v2df)(__m128d)(B), \
6964 (__v2df)(__m128d)(W), \
6965 (__mmask8)(U), (int)(R)); })
6970 return (__m128d) __builtin_ia32_scalefsd_round_mask ( (__v2df) __A,
6977 #define _mm_maskz_scalef_round_sd(U, A, B, R) __extension__ ({ \
6978 (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
6979 (__v2df)(__m128d)(B), \
6980 (__v2df)_mm_setzero_pd(), \
6981 (__mmask8)(U), (int)(R)); })
6983 #define _mm_scalef_round_ss(A, B, R) __extension__ ({ \
6984 (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
6985 (__v4sf)(__m128)(B), \
6986 (__v4sf)_mm_setzero_ps(), \
6987 (__mmask8)-1, (int)(R)); })
6992 return (__m128) __builtin_ia32_scalefss_round_mask ((__v4sf) __A,
7001 return (__m128) __builtin_ia32_scalefss_round_mask ( (__v4sf) __A,
7008 #define _mm_mask_scalef_round_ss(W, U, A, B, R) __extension__ ({ \
7009 (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
7010 (__v4sf)(__m128)(B), \
7011 (__v4sf)(__m128)(W), \
7012 (__mmask8)(U), (int)(R)); })
7017 return (__m128) __builtin_ia32_scalefss_round_mask ( (__v4sf) __A,
7024 #define _mm_maskz_scalef_round_ss(U, A, B, R) __extension__ ({ \
7025 (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
7026 (__v4sf)(__m128)(B), \
7027 (__v4sf)_mm_setzero_ps(), \
7029 _MM_FROUND_CUR_DIRECTION); })
7031 #define _mm512_srai_epi32(A, B) __extension__ ({ \
7032 (__m512i)__builtin_ia32_psradi512_mask((__v16si)(__m512i)(A), (int)(B), \
7033 (__v16si)_mm512_setzero_si512(), \
7036 #define _mm512_mask_srai_epi32(W, U, A, B) __extension__ ({ \
7037 (__m512i)__builtin_ia32_psradi512_mask((__v16si)(__m512i)(A), (int)(B), \
7038 (__v16si)(__m512i)(W), \
7041 #define _mm512_maskz_srai_epi32(U, A, B) __extension__ ({ \
7042 (__m512i)__builtin_ia32_psradi512_mask((__v16si)(__m512i)(A), (int)(B), \
7043 (__v16si)_mm512_setzero_si512(), \
7046 #define _mm512_srai_epi64(A, B) __extension__ ({ \
7047 (__m512i)__builtin_ia32_psraqi512_mask((__v8di)(__m512i)(A), (int)(B), \
7048 (__v8di)_mm512_setzero_si512(), \
7051 #define _mm512_mask_srai_epi64(W, U, A, B) __extension__ ({ \
7052 (__m512i)__builtin_ia32_psraqi512_mask((__v8di)(__m512i)(A), (int)(B), \
7053 (__v8di)(__m512i)(W), \
7056 #define _mm512_maskz_srai_epi64(U, A, B) __extension__ ({ \
7057 (__m512i)__builtin_ia32_psraqi512_mask((__v8di)(__m512i)(A), (int)(B), \
7058 (__v8di)_mm512_setzero_si512(), \
7061 #define _mm512_shuffle_f32x4(A, B, imm) __extension__ ({ \
7062 (__m512)__builtin_ia32_shuf_f32x4_mask((__v16sf)(__m512)(A), \
7063 (__v16sf)(__m512)(B), (int)(imm), \
7064 (__v16sf)_mm512_undefined_ps(), \
7067 #define _mm512_mask_shuffle_f32x4(W, U, A, B, imm) __extension__ ({ \
7068 (__m512)__builtin_ia32_shuf_f32x4_mask((__v16sf)(__m512)(A), \
7069 (__v16sf)(__m512)(B), (int)(imm), \
7070 (__v16sf)(__m512)(W), \
7073 #define _mm512_maskz_shuffle_f32x4(U, A, B, imm) __extension__ ({ \
7074 (__m512)__builtin_ia32_shuf_f32x4_mask((__v16sf)(__m512)(A), \
7075 (__v16sf)(__m512)(B), (int)(imm), \
7076 (__v16sf)_mm512_setzero_ps(), \
7079 #define _mm512_shuffle_f64x2(A, B, imm) __extension__ ({ \
7080 (__m512d)__builtin_ia32_shuf_f64x2_mask((__v8df)(__m512d)(A), \
7081 (__v8df)(__m512d)(B), (int)(imm), \
7082 (__v8df)_mm512_undefined_pd(), \
7085 #define _mm512_mask_shuffle_f64x2(W, U, A, B, imm) __extension__ ({ \
7086 (__m512d)__builtin_ia32_shuf_f64x2_mask((__v8df)(__m512d)(A), \
7087 (__v8df)(__m512d)(B), (int)(imm), \
7088 (__v8df)(__m512d)(W), \
7091 #define _mm512_maskz_shuffle_f64x2(U, A, B, imm) __extension__ ({ \
7092 (__m512d)__builtin_ia32_shuf_f64x2_mask((__v8df)(__m512d)(A), \
7093 (__v8df)(__m512d)(B), (int)(imm), \
7094 (__v8df)_mm512_setzero_pd(), \
7097 #define _mm512_shuffle_i32x4(A, B, imm) __extension__ ({ \
7098 (__m512i)__builtin_ia32_shuf_i32x4_mask((__v16si)(__m512i)(A), \
7099 (__v16si)(__m512i)(B), (int)(imm), \
7100 (__v16si)_mm512_setzero_si512(), \
7103 #define _mm512_mask_shuffle_i32x4(W, U, A, B, imm) __extension__ ({ \
7104 (__m512i)__builtin_ia32_shuf_i32x4_mask((__v16si)(__m512i)(A), \
7105 (__v16si)(__m512i)(B), (int)(imm), \
7106 (__v16si)(__m512i)(W), \
7109 #define _mm512_maskz_shuffle_i32x4(U, A, B, imm) __extension__ ({ \
7110 (__m512i)__builtin_ia32_shuf_i32x4_mask((__v16si)(__m512i)(A), \
7111 (__v16si)(__m512i)(B), (int)(imm), \
7112 (__v16si)_mm512_setzero_si512(), \
7115 #define _mm512_shuffle_i64x2(A, B, imm) __extension__ ({ \
7116 (__m512i)__builtin_ia32_shuf_i64x2_mask((__v8di)(__m512i)(A), \
7117 (__v8di)(__m512i)(B), (int)(imm), \
7118 (__v8di)_mm512_setzero_si512(), \
7121 #define _mm512_mask_shuffle_i64x2(W, U, A, B, imm) __extension__ ({ \
7122 (__m512i)__builtin_ia32_shuf_i64x2_mask((__v8di)(__m512i)(A), \
7123 (__v8di)(__m512i)(B), (int)(imm), \
7124 (__v8di)(__m512i)(W), \
7127 #define _mm512_maskz_shuffle_i64x2(U, A, B, imm) __extension__ ({ \
7128 (__m512i)__builtin_ia32_shuf_i64x2_mask((__v8di)(__m512i)(A), \
7129 (__v8di)(__m512i)(B), (int)(imm), \
7130 (__v8di)_mm512_setzero_si512(), \
7133 #define _mm512_shuffle_pd(A, B, M) __extension__ ({ \
7134 (__m512d)__builtin_shufflevector((__v8df)(__m512d)(A), \
7135 (__v8df)(__m512d)(B), \
7136 0 + (((M) >> 0) & 0x1), \
7137 8 + (((M) >> 1) & 0x1), \
7138 2 + (((M) >> 2) & 0x1), \
7139 10 + (((M) >> 3) & 0x1), \
7140 4 + (((M) >> 4) & 0x1), \
7141 12 + (((M) >> 5) & 0x1), \
7142 6 + (((M) >> 6) & 0x1), \
7143 14 + (((M) >> 7) & 0x1)); })
7145 #define _mm512_mask_shuffle_pd(W, U, A, B, M) __extension__ ({ \
7146 (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
7147 (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
7148 (__v8df)(__m512d)(W)); })
7150 #define _mm512_maskz_shuffle_pd(U, A, B, M) __extension__ ({ \
7151 (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
7152 (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
7153 (__v8df)_mm512_setzero_pd()); })
7155 #define _mm512_shuffle_ps(A, B, M) __extension__ ({ \
7156 (__m512d)__builtin_shufflevector((__v16sf)(__m512)(A), \
7157 (__v16sf)(__m512)(B), \
7158 0 + (((M) >> 0) & 0x3), \
7159 0 + (((M) >> 2) & 0x3), \
7160 16 + (((M) >> 4) & 0x3), \
7161 16 + (((M) >> 6) & 0x3), \
7162 4 + (((M) >> 0) & 0x3), \
7163 4 + (((M) >> 2) & 0x3), \
7164 20 + (((M) >> 4) & 0x3), \
7165 20 + (((M) >> 6) & 0x3), \
7166 8 + (((M) >> 0) & 0x3), \
7167 8 + (((M) >> 2) & 0x3), \
7168 24 + (((M) >> 4) & 0x3), \
7169 24 + (((M) >> 6) & 0x3), \
7170 12 + (((M) >> 0) & 0x3), \
7171 12 + (((M) >> 2) & 0x3), \
7172 28 + (((M) >> 4) & 0x3), \
7173 28 + (((M) >> 6) & 0x3)); })
7175 #define _mm512_mask_shuffle_ps(W, U, A, B, M) __extension__ ({ \
7176 (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
7177 (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
7178 (__v16sf)(__m512)(W)); })
7180 #define _mm512_maskz_shuffle_ps(U, A, B, M) __extension__ ({ \
7181 (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
7182 (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
7183 (__v16sf)_mm512_setzero_ps()); })
7185 #define _mm_sqrt_round_sd(A, B, R) __extension__ ({ \
7186 (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
7187 (__v2df)(__m128d)(B), \
7188 (__v2df)_mm_setzero_pd(), \
7189 (__mmask8)-1, (int)(R)); })
7194 return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A,
7201 #define _mm_mask_sqrt_round_sd(W, U, A, B, R) __extension__ ({ \
7202 (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
7203 (__v2df)(__m128d)(B), \
7204 (__v2df)(__m128d)(W), \
7205 (__mmask8)(U), (int)(R)); })
7210 return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A,
7217 #define _mm_maskz_sqrt_round_sd(U, A, B, R) __extension__ ({ \
7218 (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
7219 (__v2df)(__m128d)(B), \
7220 (__v2df)_mm_setzero_pd(), \
7221 (__mmask8)(U), (int)(R)); })
7223 #define _mm_sqrt_round_ss(A, B, R) __extension__ ({ \
7224 (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
7225 (__v4sf)(__m128)(B), \
7226 (__v4sf)_mm_setzero_ps(), \
7227 (__mmask8)-1, (int)(R)); })
7232 return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A,
7239 #define _mm_mask_sqrt_round_ss(W, U, A, B, R) __extension__ ({ \
7240 (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
7241 (__v4sf)(__m128)(B), \
7242 (__v4sf)(__m128)(W), (__mmask8)(U), \
7248 return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A,
7255 #define _mm_maskz_sqrt_round_ss(U, A, B, R) __extension__ ({ \
7256 (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
7257 (__v4sf)(__m128)(B), \
7258 (__v4sf)_mm_setzero_ps(), \
7259 (__mmask8)(U), (int)(R)); })
7264 return (__m512) __builtin_ia32_broadcastf32x4_512 ((__v4sf) __A,
7273 return (__m512) __builtin_ia32_broadcastf32x4_512 ((__v4sf) __A,
7281 return (__m512) __builtin_ia32_broadcastf32x4_512 ((__v4sf) __A,
7290 return (__m512d) __builtin_ia32_broadcastf64x4_512 ((__v4df) __A,
7299 return (__m512d) __builtin_ia32_broadcastf64x4_512 ((__v4df) __A,
7307 return (__m512d) __builtin_ia32_broadcastf64x4_512 ((__v4df) __A,
7316 return (__m512i) __builtin_ia32_broadcasti32x4_512 ((__v4si) __A,
7325 return (__m512i) __builtin_ia32_broadcasti32x4_512 ((__v4si) __A,
7333 return (__m512i) __builtin_ia32_broadcasti32x4_512 ((__v4si) __A,
7342 return (__m512i) __builtin_ia32_broadcasti64x4_512 ((__v4di) __A,
7351 return (__m512i) __builtin_ia32_broadcasti64x4_512 ((__v4di) __A,
7359 return (__m512i) __builtin_ia32_broadcasti64x4_512 ((__v4di) __A,
7368 return (__m512d)__builtin_ia32_selectpd_512(__M,
7376 return (__m512d)__builtin_ia32_selectpd_512(__M,
7384 return (__m512)__builtin_ia32_selectps_512(__M,
7392 return (__m512)__builtin_ia32_selectps_512(__M,
7400 return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
7408 return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
7409 (__v16qi) __O, __M);
7415 return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
7423 __builtin_ia32_pmovsdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
7429 return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
7437 return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
7438 (__v16hi) __O, __M);
7444 return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
7452 __builtin_ia32_pmovsdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M);
7458 return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
7466 return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
7467 (__v16qi) __O, __M);
7473 return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
7481 __builtin_ia32_pmovsqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
7487 return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
7495 return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
7502 return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
7510 __builtin_ia32_pmovsqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M);
7516 return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
7524 return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
7531 return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
7539 __builtin_ia32_pmovsqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M);
7545 return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
7553 return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
7561 return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
7569 __builtin_ia32_pmovusdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
7575 return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
7583 return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
7591 return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
7599 __builtin_ia32_pmovusdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M);
7605 return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
7613 return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
7621 return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
7629 __builtin_ia32_pmovusqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
7635 return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
7643 return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
7650 return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
7658 __builtin_ia32_pmovusqd512mem_mask ((__v8si*) __P, (__v8di) __A, __M);
7664 return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
7672 return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
7679 return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
7687 __builtin_ia32_pmovusqw512mem_mask ((__v8hi*) __P, (__v8di) __A, __M);
7693 return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
7701 return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
7702 (__v16qi) __O, __M);
7708 return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
7716 __builtin_ia32_pmovdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
7722 return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
7730 return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
7731 (__v16hi) __O, __M);
7737 return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
7745 __builtin_ia32_pmovdw512mem_mask ((__v16hi *) __P, (__v16si) __A, __M);
7751 return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
7759 return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
7760 (__v16qi) __O, __M);
7766 return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
7774 __builtin_ia32_pmovqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
7780 return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
7788 return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
7795 return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
7803 __builtin_ia32_pmovqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M);
7809 return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
7817 return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
7824 return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
7832 __builtin_ia32_pmovqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M);
7835 #define _mm512_extracti32x4_epi32(A, imm) __extension__ ({ \
7836 (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
7837 (__v4si)_mm_undefined_si128(), \
7840 #define _mm512_mask_extracti32x4_epi32(W, U, A, imm) __extension__ ({ \
7841 (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
7842 (__v4si)(__m128i)(W), \
7845 #define _mm512_maskz_extracti32x4_epi32(U, A, imm) __extension__ ({ \
7846 (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
7847 (__v4si)_mm_setzero_si128(), \
7850 #define _mm512_extracti64x4_epi64(A, imm) __extension__ ({ \
7851 (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
7852 (__v4di)_mm256_undefined_si256(), \
7855 #define _mm512_mask_extracti64x4_epi64(W, U, A, imm) __extension__ ({ \
7856 (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
7857 (__v4di)(__m256i)(W), \
7860 #define _mm512_maskz_extracti64x4_epi64(U, A, imm) __extension__ ({ \
7861 (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
7862 (__v4di)_mm256_setzero_si256(), \
7865 #define _mm512_insertf64x4(A, B, imm) __extension__ ({ \
7866 (__m512d)__builtin_ia32_insertf64x4_mask((__v8df)(__m512d)(A), \
7867 (__v4df)(__m256d)(B), (int)(imm), \
7868 (__v8df)_mm512_undefined_pd(), \
7871 #define _mm512_mask_insertf64x4(W, U, A, B, imm) __extension__ ({ \
7872 (__m512d)__builtin_ia32_insertf64x4_mask((__v8df)(__m512d)(A), \
7873 (__v4df)(__m256d)(B), (int)(imm), \
7874 (__v8df)(__m512d)(W), \
7877 #define _mm512_maskz_insertf64x4(U, A, B, imm) __extension__ ({ \
7878 (__m512d)__builtin_ia32_insertf64x4_mask((__v8df)(__m512d)(A), \
7879 (__v4df)(__m256d)(B), (int)(imm), \
7880 (__v8df)_mm512_setzero_pd(), \
7883 #define _mm512_inserti64x4(A, B, imm) __extension__ ({ \
7884 (__m512i)__builtin_ia32_inserti64x4_mask((__v8di)(__m512i)(A), \
7885 (__v4di)(__m256i)(B), (int)(imm), \
7886 (__v8di)_mm512_setzero_si512(), \
7889 #define _mm512_mask_inserti64x4(W, U, A, B, imm) __extension__ ({ \
7890 (__m512i)__builtin_ia32_inserti64x4_mask((__v8di)(__m512i)(A), \
7891 (__v4di)(__m256i)(B), (int)(imm), \
7892 (__v8di)(__m512i)(W), \
7895 #define _mm512_maskz_inserti64x4(U, A, B, imm) __extension__ ({ \
7896 (__m512i)__builtin_ia32_inserti64x4_mask((__v8di)(__m512i)(A), \
7897 (__v4di)(__m256i)(B), (int)(imm), \
7898 (__v8di)_mm512_setzero_si512(), \
7901 #define _mm512_insertf32x4(A, B, imm) __extension__ ({ \
7902 (__m512)__builtin_ia32_insertf32x4_mask((__v16sf)(__m512)(A), \
7903 (__v4sf)(__m128)(B), (int)(imm), \
7904 (__v16sf)_mm512_undefined_ps(), \
7907 #define _mm512_mask_insertf32x4(W, U, A, B, imm) __extension__ ({ \
7908 (__m512)__builtin_ia32_insertf32x4_mask((__v16sf)(__m512)(A), \
7909 (__v4sf)(__m128)(B), (int)(imm), \
7910 (__v16sf)(__m512)(W), \
7913 #define _mm512_maskz_insertf32x4(U, A, B, imm) __extension__ ({ \
7914 (__m512)__builtin_ia32_insertf32x4_mask((__v16sf)(__m512)(A), \
7915 (__v4sf)(__m128)(B), (int)(imm), \
7916 (__v16sf)_mm512_setzero_ps(), \
7919 #define _mm512_inserti32x4(A, B, imm) __extension__ ({ \
7920 (__m512i)__builtin_ia32_inserti32x4_mask((__v16si)(__m512i)(A), \
7921 (__v4si)(__m128i)(B), (int)(imm), \
7922 (__v16si)_mm512_setzero_si512(), \
7925 #define _mm512_mask_inserti32x4(W, U, A, B, imm) __extension__ ({ \
7926 (__m512i)__builtin_ia32_inserti32x4_mask((__v16si)(__m512i)(A), \
7927 (__v4si)(__m128i)(B), (int)(imm), \
7928 (__v16si)(__m512i)(W), \
7931 #define _mm512_maskz_inserti32x4(U, A, B, imm) __extension__ ({ \
7932 (__m512i)__builtin_ia32_inserti32x4_mask((__v16si)(__m512i)(A), \
7933 (__v4si)(__m128i)(B), (int)(imm), \
7934 (__v16si)_mm512_setzero_si512(), \
7937 #define _mm512_getmant_round_pd(A, B, C, R) __extension__ ({ \
7938 (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
7939 (int)(((C)<<2) | (B)), \
7940 (__v8df)_mm512_undefined_pd(), \
7941 (__mmask8)-1, (int)(R)); })
7943 #define _mm512_mask_getmant_round_pd(W, U, A, B, C, R) __extension__ ({ \
7944 (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
7945 (int)(((C)<<2) | (B)), \
7946 (__v8df)(__m512d)(W), \
7947 (__mmask8)(U), (int)(R)); })
7949 #define _mm512_maskz_getmant_round_pd(U, A, B, C, R) __extension__ ({ \
7950 (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
7951 (int)(((C)<<2) | (B)), \
7952 (__v8df)_mm512_setzero_pd(), \
7953 (__mmask8)(U), (int)(R)); })
7955 #define _mm512_getmant_pd(A, B, C) __extension__ ({ \
7956 (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
7957 (int)(((C)<<2) | (B)), \
7958 (__v8df)_mm512_setzero_pd(), \
7960 _MM_FROUND_CUR_DIRECTION); })
7962 #define _mm512_mask_getmant_pd(W, U, A, B, C) __extension__ ({ \
7963 (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
7964 (int)(((C)<<2) | (B)), \
7965 (__v8df)(__m512d)(W), \
7967 _MM_FROUND_CUR_DIRECTION); })
7969 #define _mm512_maskz_getmant_pd(U, A, B, C) __extension__ ({ \
7970 (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
7971 (int)(((C)<<2) | (B)), \
7972 (__v8df)_mm512_setzero_pd(), \
7974 _MM_FROUND_CUR_DIRECTION); })
7976 #define _mm512_getmant_round_ps(A, B, C, R) __extension__ ({ \
7977 (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
7978 (int)(((C)<<2) | (B)), \
7979 (__v16sf)_mm512_undefined_ps(), \
7980 (__mmask16)-1, (int)(R)); })
7982 #define _mm512_mask_getmant_round_ps(W, U, A, B, C, R) __extension__ ({ \
7983 (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
7984 (int)(((C)<<2) | (B)), \
7985 (__v16sf)(__m512)(W), \
7986 (__mmask16)(U), (int)(R)); })
7988 #define _mm512_maskz_getmant_round_ps(U, A, B, C, R) __extension__ ({ \
7989 (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
7990 (int)(((C)<<2) | (B)), \
7991 (__v16sf)_mm512_setzero_ps(), \
7992 (__mmask16)(U), (int)(R)); })
7994 #define _mm512_getmant_ps(A, B, C) __extension__ ({ \
7995 (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
7996 (int)(((C)<<2)|(B)), \
7997 (__v16sf)_mm512_undefined_ps(), \
7999 _MM_FROUND_CUR_DIRECTION); })
8001 #define _mm512_mask_getmant_ps(W, U, A, B, C) __extension__ ({ \
8002 (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
8003 (int)(((C)<<2)|(B)), \
8004 (__v16sf)(__m512)(W), \
8006 _MM_FROUND_CUR_DIRECTION); })
8008 #define _mm512_maskz_getmant_ps(U, A, B, C) __extension__ ({ \
8009 (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
8010 (int)(((C)<<2)|(B)), \
8011 (__v16sf)_mm512_setzero_ps(), \
8013 _MM_FROUND_CUR_DIRECTION); })
8015 #define _mm512_getexp_round_pd(A, R) __extension__ ({ \
8016 (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
8017 (__v8df)_mm512_undefined_pd(), \
8018 (__mmask8)-1, (int)(R)); })
8020 #define _mm512_mask_getexp_round_pd(W, U, A, R) __extension__ ({ \
8021 (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
8022 (__v8df)(__m512d)(W), \
8023 (__mmask8)(U), (int)(R)); })
8025 #define _mm512_maskz_getexp_round_pd(U, A, R) __extension__ ({ \
8026 (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
8027 (__v8df)_mm512_setzero_pd(), \
8028 (__mmask8)(U), (int)(R)); })
8033 return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
8042 return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
8051 return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
8057 #define _mm512_getexp_round_ps(A, R) __extension__ ({ \
8058 (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
8059 (__v16sf)_mm512_undefined_ps(), \
8060 (__mmask16)-1, (int)(R)); })
8062 #define _mm512_mask_getexp_round_ps(W, U, A, R) __extension__ ({ \
8063 (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
8064 (__v16sf)(__m512)(W), \
8065 (__mmask16)(U), (int)(R)); })
8067 #define _mm512_maskz_getexp_round_ps(U, A, R) __extension__ ({ \
8068 (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
8069 (__v16sf)_mm512_setzero_ps(), \
8070 (__mmask16)(U), (int)(R)); })
8075 return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
8084 return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
8093 return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
8099 #define _mm512_i64gather_ps(index, addr, scale) __extension__ ({ \
8100 (__m256)__builtin_ia32_gatherdiv16sf((__v8sf)_mm256_undefined_ps(), \
8101 (float const *)(addr), \
8102 (__v8di)(__m512i)(index), (__mmask8)-1, \
8105 #define _mm512_mask_i64gather_ps( __v1_old, __mask, __index,\
8106 __addr, __scale) __extension__({\
8107 __builtin_ia32_gatherdiv16sf ((__v8sf) __v1_old,\
8108 __addr,(__v8di) __index, __mask, __scale);\
8111 #define _mm512_i64gather_epi32(index, addr, scale) __extension__ ({\
8112 (__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_ps(), \
8113 (int const *)(addr), \
8114 (__v8di)(__m512i)(index), \
8115 (__mmask8)-1, (int)(scale)); })
8117 #define _mm512_mask_i64gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\
8118 (__m256i)__builtin_ia32_gatherdiv16si((__v8si)(__m256i)(v1_old), \
8119 (int const *)(addr), \
8120 (__v8di)(__m512i)(index), \
8121 (__mmask8)(mask), (int)(scale)); })
8123 #define _mm512_i64gather_pd(index, addr, scale) __extension__ ({\
8124 (__m512d)__builtin_ia32_gatherdiv8df((__v8df)_mm512_undefined_pd(), \
8125 (double const *)(addr), \
8126 (__v8di)(__m512i)(index), (__mmask8)-1, \
8129 #define _mm512_mask_i64gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\
8130 (__m512d)__builtin_ia32_gatherdiv8df((__v8df)(__m512d)(v1_old), \
8131 (double const *)(addr), \
8132 (__v8di)(__m512i)(index), \
8133 (__mmask8)(mask), (int)(scale)); })
8135 #define _mm512_i64gather_epi64(index, addr, scale) __extension__ ({\
8136 (__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_pd(), \
8137 (long long const *)(addr), \
8138 (__v8di)(__m512i)(index), (__mmask8)-1, \
8141 #define _mm512_mask_i64gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\
8142 (__m512i)__builtin_ia32_gatherdiv8di((__v8di)(__m512i)(v1_old), \
8143 (long long const *)(addr), \
8144 (__v8di)(__m512i)(index), \
8145 (__mmask8)(mask), (int)(scale)); })
8147 #define _mm512_i32gather_ps(index, addr, scale) __extension__ ({\
8148 (__m512)__builtin_ia32_gathersiv16sf((__v16sf)_mm512_undefined_ps(), \
8149 (float const *)(addr), \
8150 (__v16sf)(__m512)(index), \
8151 (__mmask16)-1, (int)(scale)); })
8153 #define _mm512_mask_i32gather_ps(v1_old, mask, index, addr, scale) __extension__ ({\
8154 (__m512)__builtin_ia32_gathersiv16sf((__v16sf)(__m512)(v1_old), \
8155 (float const *)(addr), \
8156 (__v16sf)(__m512)(index), \
8157 (__mmask16)(mask), (int)(scale)); })
8159 #define _mm512_i32gather_epi32(index, addr, scale) __extension__ ({\
8160 (__m512i)__builtin_ia32_gathersiv16si((__v16si)_mm512_undefined_epi32(), \
8161 (int const *)(addr), \
8162 (__v16si)(__m512i)(index), \
8163 (__mmask16)-1, (int)(scale)); })
8165 #define _mm512_mask_i32gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\
8166 (__m512i)__builtin_ia32_gathersiv16si((__v16si)(__m512i)(v1_old), \
8167 (int const *)(addr), \
8168 (__v16si)(__m512i)(index), \
8169 (__mmask16)(mask), (int)(scale)); })
8171 #define _mm512_i32gather_pd(index, addr, scale) __extension__ ({\
8172 (__m512d)__builtin_ia32_gathersiv8df((__v8df)_mm512_undefined_pd(), \
8173 (double const *)(addr), \
8174 (__v8si)(__m256i)(index), (__mmask8)-1, \
8177 #define _mm512_mask_i32gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\
8178 (__m512d)__builtin_ia32_gathersiv8df((__v8df)(__m512d)(v1_old), \
8179 (double const *)(addr), \
8180 (__v8si)(__m256i)(index), \
8181 (__mmask8)(mask), (int)(scale)); })
8183 #define _mm512_i32gather_epi64(index, addr, scale) __extension__ ({\
8184 (__m512i)__builtin_ia32_gathersiv8di((__v8di)_mm512_undefined_epi32(), \
8185 (long long const *)(addr), \
8186 (__v8si)(__m256i)(index), (__mmask8)-1, \
8189 #define _mm512_mask_i32gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\
8190 (__m512i)__builtin_ia32_gathersiv8di((__v8di)(__m512i)(v1_old), \
8191 (long long const *)(addr), \
8192 (__v8si)(__m256i)(index), \
8193 (__mmask8)(mask), (int)(scale)); })
8195 #define _mm512_i64scatter_ps(addr, index, v1, scale) __extension__ ({\
8196 __builtin_ia32_scatterdiv16sf((float *)(addr), (__mmask8)-1, \
8197 (__v8di)(__m512i)(index), \
8198 (__v8sf)(__m256)(v1), (int)(scale)); })
8200 #define _mm512_mask_i64scatter_ps(addr, mask, index, v1, scale) __extension__ ({\
8201 __builtin_ia32_scatterdiv16sf((float *)(addr), (__mmask8)(mask), \
8202 (__v8di)(__m512i)(index), \
8203 (__v8sf)(__m256)(v1), (int)(scale)); })
8205 #define _mm512_i64scatter_epi32(addr, index, v1, scale) __extension__ ({\
8206 __builtin_ia32_scatterdiv16si((int *)(addr), (__mmask8)-1, \
8207 (__v8di)(__m512i)(index), \
8208 (__v8si)(__m256i)(v1), (int)(scale)); })
8210 #define _mm512_mask_i64scatter_epi32(addr, mask, index, v1, scale) __extension__ ({\
8211 __builtin_ia32_scatterdiv16si((int *)(addr), (__mmask8)(mask), \
8212 (__v8di)(__m512i)(index), \
8213 (__v8si)(__m256i)(v1), (int)(scale)); })
8215 #define _mm512_i64scatter_pd(addr, index, v1, scale) __extension__ ({\
8216 __builtin_ia32_scatterdiv8df((double *)(addr), (__mmask8)-1, \
8217 (__v8di)(__m512i)(index), \
8218 (__v8df)(__m512d)(v1), (int)(scale)); })
8220 #define _mm512_mask_i64scatter_pd(addr, mask, index, v1, scale) __extension__ ({\
8221 __builtin_ia32_scatterdiv8df((double *)(addr), (__mmask8)(mask), \
8222 (__v8di)(__m512i)(index), \
8223 (__v8df)(__m512d)(v1), (int)(scale)); })
8225 #define _mm512_i64scatter_epi64(addr, index, v1, scale) __extension__ ({\
8226 __builtin_ia32_scatterdiv8di((long long *)(addr), (__mmask8)-1, \
8227 (__v8di)(__m512i)(index), \
8228 (__v8di)(__m512i)(v1), (int)(scale)); })
8230 #define _mm512_mask_i64scatter_epi64(addr, mask, index, v1, scale) __extension__ ({\
8231 __builtin_ia32_scatterdiv8di((long long *)(addr), (__mmask8)(mask), \
8232 (__v8di)(__m512i)(index), \
8233 (__v8di)(__m512i)(v1), (int)(scale)); })
8235 #define _mm512_i32scatter_ps(addr, index, v1, scale) __extension__ ({\
8236 __builtin_ia32_scattersiv16sf((float *)(addr), (__mmask16)-1, \
8237 (__v16si)(__m512i)(index), \
8238 (__v16sf)(__m512)(v1), (int)(scale)); })
8240 #define _mm512_mask_i32scatter_ps(addr, mask, index, v1, scale) __extension__ ({\
8241 __builtin_ia32_scattersiv16sf((float *)(addr), (__mmask16)(mask), \
8242 (__v16si)(__m512i)(index), \
8243 (__v16sf)(__m512)(v1), (int)(scale)); })
8245 #define _mm512_i32scatter_epi32(addr, index, v1, scale) __extension__ ({\
8246 __builtin_ia32_scattersiv16si((int *)(addr), (__mmask16)-1, \
8247 (__v16si)(__m512i)(index), \
8248 (__v16si)(__m512i)(v1), (int)(scale)); })
8250 #define _mm512_mask_i32scatter_epi32(addr, mask, index, v1, scale) __extension__ ({\
8251 __builtin_ia32_scattersiv16si((int *)(addr), (__mmask16)(mask), \
8252 (__v16si)(__m512i)(index), \
8253 (__v16si)(__m512i)(v1), (int)(scale)); })
8255 #define _mm512_i32scatter_pd(addr, index, v1, scale) __extension__ ({\
8256 __builtin_ia32_scattersiv8df((double *)(addr), (__mmask8)-1, \
8257 (__v8si)(__m256i)(index), \
8258 (__v8df)(__m512d)(v1), (int)(scale)); })
8260 #define _mm512_mask_i32scatter_pd(addr, mask, index, v1, scale) __extension__ ({\
8261 __builtin_ia32_scattersiv8df((double *)(addr), (__mmask8)(mask), \
8262 (__v8si)(__m256i)(index), \
8263 (__v8df)(__m512d)(v1), (int)(scale)); })
8265 #define _mm512_i32scatter_epi64(addr, index, v1, scale) __extension__ ({\
8266 __builtin_ia32_scattersiv8di((long long *)(addr), (__mmask8)-1, \
8267 (__v8si)(__m256i)(index), \
8268 (__v8di)(__m512i)(v1), (int)(scale)); })
8270 #define _mm512_mask_i32scatter_epi64(addr, mask, index, v1, scale) __extension__ ({\
8271 __builtin_ia32_scattersiv8di((long long *)(addr), (__mmask8)(mask), \
8272 (__v8si)(__m256i)(index), \
8273 (__v8di)(__m512i)(v1), (int)(scale)); })
8278 return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __A,
8285 #define _mm_mask_fmadd_round_ss(W, U, A, B, R) __extension__({\
8286 (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
8287 (__v4sf)(__m128)(B), \
8288 (__v4sf)(__m128)(W), (__mmask8)(U), \
8294 return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __A,
8301 #define _mm_maskz_fmadd_round_ss(U, A, B, C, R) __extension__ ({\
8302 (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
8303 (__v4sf)(__m128)(B), \
8304 (__v4sf)(__m128)(C), (__mmask8)(U), \
8305 _MM_FROUND_CUR_DIRECTION); })
8310 return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W,
8317 #define _mm_mask3_fmadd_round_ss(W, X, Y, U, R) __extension__ ({\
8318 (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
8319 (__v4sf)(__m128)(X), \
8320 (__v4sf)(__m128)(Y), (__mmask8)(U), \
8326 return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __A,
8333 #define _mm_mask_fmsub_round_ss(W, U, A, B, R) __extension__ ({\
8334 (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
8335 -(__v4sf)(__m128)(B), \
8336 (__v4sf)(__m128)(W), (__mmask8)(U), \
8342 return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __A,
8349 #define _mm_maskz_fmsub_round_ss(U, A, B, C, R) __extension__ ({\
8350 (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
8351 (__v4sf)(__m128)(B), \
8352 -(__v4sf)(__m128)(C), (__mmask8)(U), \
8358 return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W,
8365 #define _mm_mask3_fmsub_round_ss(W, X, Y, U, R) __extension__ ({\
8366 (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
8367 (__v4sf)(__m128)(X), \
8368 -(__v4sf)(__m128)(Y), (__mmask8)(U), \
8374 return (__m128) __builtin_ia32_vfmaddss3_mask (-(__v4sf) __A,
8381 #define _mm_mask_fnmadd_round_ss(W, U, A, B, R) __extension__ ({\
8382 (__m128)__builtin_ia32_vfmaddss3_mask(-(__v4sf)(__m128)(A), \
8383 (__v4sf)(__m128)(B), \
8384 (__v4sf)(__m128)(W), (__mmask8)(U), \
8390 return (__m128) __builtin_ia32_vfmaddss3_maskz (-(__v4sf) __A,
8397 #define _mm_maskz_fnmadd_round_ss(U, A, B, C, R) __extension__ ({\
8398 (__m128)__builtin_ia32_vfmaddss3_maskz(-(__v4sf)(__m128)(A), \
8399 (__v4sf)(__m128)(B), \
8400 (__v4sf)(__m128)(C), (__mmask8)(U), \
8406 return (__m128) __builtin_ia32_vfmaddss3_mask3 (-(__v4sf) __W,
8413 #define _mm_mask3_fnmadd_round_ss(W, X, Y, U, R) __extension__({\
8414 (__m128)__builtin_ia32_vfmaddss3_mask3(-(__v4sf)(__m128)(W), \
8415 (__v4sf)(__m128)(X), \
8416 (__v4sf)(__m128)(Y), (__mmask8)(U), \
8422 return (__m128) __builtin_ia32_vfmaddss3_mask (-(__v4sf) __A,
8429 #define _mm_mask_fnmsub_round_ss(W, U, A, B, R) __extension__ ({\
8430 (__m128)__builtin_ia32_vfmaddss3_mask(-(__v4sf)(__m128)(A), \
8431 -(__v4sf)(__m128)(B), \
8432 (__v4sf)(__m128)(W), (__mmask8)(U), \
8438 return (__m128) __builtin_ia32_vfmaddss3_maskz (-(__v4sf) __A,
8445 #define _mm_maskz_fnmsub_round_ss(U, A, B, C, R) __extension__ ({\
8446 (__m128)__builtin_ia32_vfmaddss3_maskz(-(__v4sf)(__m128)(A), \
8447 (__v4sf)(__m128)(B), \
8448 -(__v4sf)(__m128)(C), (__mmask8)(U), \
8449 _MM_FROUND_CUR_DIRECTION); })
8454 return (__m128) __builtin_ia32_vfmaddss3_mask3 (-(__v4sf) __W,
8461 #define _mm_mask3_fnmsub_round_ss(W, X, Y, U, R) __extension__({\
8462 (__m128)__builtin_ia32_vfmaddss3_mask3(-(__v4sf)(__m128)(W), \
8463 (__v4sf)(__m128)(X), \
8464 -(__v4sf)(__m128)(Y), (__mmask8)(U), \
8470 return (__m128d) __builtin_ia32_vfmaddsd3_mask ( (__v2df) __A,
8477 #define _mm_mask_fmadd_round_sd(W, U, A, B, R) __extension__({\
8478 (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
8479 (__v2df)(__m128d)(B), \
8480 (__v2df)(__m128d)(W), (__mmask8)(U), \
8486 return (__m128d) __builtin_ia32_vfmaddsd3_maskz ( (__v2df) __A,
8493 #define _mm_maskz_fmadd_round_sd(U, A, B, C, R) __extension__ ({\
8494 (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
8495 (__v2df)(__m128d)(B), \
8496 (__v2df)(__m128d)(C), (__mmask8)(U), \
8497 _MM_FROUND_CUR_DIRECTION); })
8502 return (__m128d) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W,
8509 #define _mm_mask3_fmadd_round_sd(W, X, Y, U, R) __extension__ ({\
8510 (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
8511 (__v2df)(__m128d)(X), \
8512 (__v2df)(__m128d)(Y), (__mmask8)(U), \
8518 return (__m128d) __builtin_ia32_vfmaddsd3_mask ( (__v2df) __A,
8525 #define _mm_mask_fmsub_round_sd(W, U, A, B, R) __extension__ ({\
8526 (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
8527 -(__v2df)(__m128d)(B), \
8528 (__v2df)(__m128d)(W), (__mmask8)(U), \
8534 return (__m128d) __builtin_ia32_vfmaddsd3_maskz ( (__v2df) __A,
8541 #define _mm_maskz_fmsub_round_sd(U, A, B, C, R) __extension__ ({\
8542 (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
8543 (__v2df)(__m128d)(B), \
8544 -(__v2df)(__m128d)(C), \
8545 (__mmask8)(U), (int)(R)); })
8550 return (__m128d) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W,
8557 #define _mm_mask3_fmsub_round_sd(W, X, Y, U, R) __extension__ ({\
8558 (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
8559 (__v2df)(__m128d)(X), \
8560 -(__v2df)(__m128d)(Y), \
8561 (__mmask8)(U), (int)(R)); })
8566 return (__m128d) __builtin_ia32_vfmaddsd3_mask ( -(__v2df) __A,
8573 #define _mm_mask_fnmadd_round_sd(W, U, A, B, R) __extension__ ({\
8574 (__m128d)__builtin_ia32_vfmaddsd3_mask(-(__v2df)(__m128d)(A), \
8575 (__v2df)(__m128d)(B), \
8576 (__v2df)(__m128d)(W), (__mmask8)(U), \
8582 return (__m128d) __builtin_ia32_vfmaddsd3_maskz ( -(__v2df) __A,
8589 #define _mm_maskz_fnmadd_round_sd(U, A, B, C, R) __extension__ ({\
8590 (__m128d)__builtin_ia32_vfmaddsd3_maskz(-(__v2df)(__m128d)(A), \
8591 (__v2df)(__m128d)(B), \
8592 (__v2df)(__m128d)(C), (__mmask8)(U), \
8598 return (__m128d) __builtin_ia32_vfmaddsd3_mask3 (-(__v2df) __W,
8605 #define _mm_mask3_fnmadd_round_sd(W, X, Y, U, R) __extension__({\
8606 (__m128d)__builtin_ia32_vfmaddsd3_mask3(-(__v2df)(__m128d)(W), \
8607 (__v2df)(__m128d)(X), \
8608 (__v2df)(__m128d)(Y), (__mmask8)(U), \
8614 return (__m128d) __builtin_ia32_vfmaddsd3_mask ( -(__v2df) __A,
8621 #define _mm_mask_fnmsub_round_sd(W, U, A, B, R) __extension__ ({\
8622 (__m128d)__builtin_ia32_vfmaddsd3_mask(-(__v2df)(__m128d)(A), \
8623 -(__v2df)(__m128d)(B), \
8624 (__v2df)(__m128d)(W), (__mmask8)(U), \
8630 return (__m128d) __builtin_ia32_vfmaddsd3_maskz ( -(__v2df) __A,
8637 #define _mm_maskz_fnmsub_round_sd(U, A, B, C, R) __extension__ ({\
8638 (__m128d)__builtin_ia32_vfmaddsd3_maskz(-(__v2df)(__m128d)(A), \
8639 (__v2df)(__m128d)(B), \
8640 -(__v2df)(__m128d)(C), \
8642 _MM_FROUND_CUR_DIRECTION); })
8647 return (__m128d) __builtin_ia32_vfmaddsd3_mask3 (-(__v2df) (__W),
8654 #define _mm_mask3_fnmsub_round_sd(W, X, Y, U, R) __extension__({\
8655 (__m128d)__builtin_ia32_vfmaddsd3_mask3(-(__v2df)(__m128d)(W), \
8656 (__v2df)(__m128d)(X), \
8657 -(__v2df)(__m128d)(Y), \
8658 (__mmask8)(U), (int)(R)); })
8660 #define _mm512_permutex_pd(X, C) __extension__ ({ \
8661 (__m512d)__builtin_shufflevector((__v8df)(__m512d)(X), \
8662 (__v8df)_mm512_undefined_pd(), \
8663 0 + (((C) >> 0) & 0x3), \
8664 0 + (((C) >> 2) & 0x3), \
8665 0 + (((C) >> 4) & 0x3), \
8666 0 + (((C) >> 6) & 0x3), \
8667 4 + (((C) >> 0) & 0x3), \
8668 4 + (((C) >> 2) & 0x3), \
8669 4 + (((C) >> 4) & 0x3), \
8670 4 + (((C) >> 6) & 0x3)); })
8672 #define _mm512_mask_permutex_pd(W, U, X, C) __extension__ ({ \
8673 (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
8674 (__v8df)_mm512_permutex_pd((X), (C)), \
8675 (__v8df)(__m512d)(W)); })
8677 #define _mm512_maskz_permutex_pd(U, X, C) __extension__ ({ \
8678 (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
8679 (__v8df)_mm512_permutex_pd((X), (C)), \
8680 (__v8df)_mm512_setzero_pd()); })
8682 #define _mm512_permutex_epi64(X, C) __extension__ ({ \
8683 (__m512i)__builtin_shufflevector((__v8di)(__m512i)(X), \
8684 (__v8di)_mm512_undefined_epi32(), \
8685 0 + (((C) >> 0) & 0x3), \
8686 0 + (((C) >> 2) & 0x3), \
8687 0 + (((C) >> 4) & 0x3), \
8688 0 + (((C) >> 6) & 0x3), \
8689 4 + (((C) >> 0) & 0x3), \
8690 4 + (((C) >> 2) & 0x3), \
8691 4 + (((C) >> 4) & 0x3), \
8692 4 + (((C) >> 6) & 0x3)); })
8694 #define _mm512_mask_permutex_epi64(W, U, X, C) __extension__ ({ \
8695 (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
8696 (__v8di)_mm512_permutex_epi64((X), (C)), \
8697 (__v8di)(__m512i)(W)); })
8699 #define _mm512_maskz_permutex_epi64(U, X, C) __extension__ ({ \
8700 (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
8701 (__v8di)_mm512_permutex_epi64((X), (C)), \
8702 (__v8di)_mm512_setzero_si512()); })
8707 return (__m512d) __builtin_ia32_permvardf512_mask ((__v8df) __Y,
8716 return (__m512d) __builtin_ia32_permvardf512_mask ((__v8df) __Y,
8725 return (__m512d) __builtin_ia32_permvardf512_mask ((__v8df) __Y,
8734 return (__m512i) __builtin_ia32_permvardi512_mask ((__v8di) __Y,
8743 return (__m512i) __builtin_ia32_permvardi512_mask ((__v8di) __Y,
8753 return (__m512i) __builtin_ia32_permvardi512_mask ((__v8di) __Y,
8762 return (__m512) __builtin_ia32_permvarsf512_mask ((__v16sf) __Y,
8771 return (__m512) __builtin_ia32_permvarsf512_mask ((__v16sf) __Y,
8780 return (__m512) __builtin_ia32_permvarsf512_mask ((__v16sf) __Y,
8789 return (__m512i) __builtin_ia32_permvarsi512_mask ((__v16si) __Y,
8798 return (__m512i) __builtin_ia32_permvarsi512_mask ((__v16si) __Y,
8808 return (__m512i) __builtin_ia32_permvarsi512_mask ((__v16si) __Y,
8817 return (__mmask16) __builtin_ia32_kandhi ((__mmask16) __A, (__mmask16) __B);
8823 return (__mmask16) __builtin_ia32_kandnhi ((__mmask16) __A, (__mmask16) __B);
8829 return (__mmask16) __builtin_ia32_korhi ((__mmask16) __A, (__mmask16) __B);
8835 return __builtin_ia32_kortestchi ((__mmask16) __A, (__mmask16) __B);
8841 return __builtin_ia32_kortestzhi ((__mmask16) __A, (__mmask16) __B);
8847 return (__mmask16) __builtin_ia32_kunpckhi ((__mmask16) __A, (__mmask16) __B);
8853 return (__mmask16) __builtin_ia32_kxnorhi ((__mmask16) __A, (__mmask16) __B);
8859 return (__mmask16) __builtin_ia32_kxorhi ((__mmask16) __A, (__mmask16) __B);
8865 __builtin_nontemporal_store((__v8di)__A, (__v8di*)__P);
8871 return __builtin_ia32_movntdqa512 ((__v8di *)__P);
8877 __builtin_nontemporal_store((__v8df)__A, (__v8df*)__P);
8883 __builtin_nontemporal_store((__v16sf)__A, (__v16sf*)__P);
8889 return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A,
8897 return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A,
8906 return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A,
8914 return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A,
8923 return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A,
8931 return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A,
8940 return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A,
8948 return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A,
8954 #define _mm_cmp_round_ss_mask(X, Y, P, R) __extension__ ({ \
8955 (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
8956 (__v4sf)(__m128)(Y), (int)(P), \
8957 (__mmask8)-1, (int)(R)); })
8959 #define _mm_mask_cmp_round_ss_mask(M, X, Y, P, R) __extension__ ({ \
8960 (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
8961 (__v4sf)(__m128)(Y), (int)(P), \
8962 (__mmask8)(M), (int)(R)); })
8964 #define _mm_cmp_ss_mask(X, Y, P) __extension__ ({ \
8965 (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
8966 (__v4sf)(__m128)(Y), (int)(P), \
8968 _MM_FROUND_CUR_DIRECTION); })
8970 #define _mm_mask_cmp_ss_mask(M, X, Y, P) __extension__ ({ \
8971 (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
8972 (__v4sf)(__m128)(Y), (int)(P), \
8974 _MM_FROUND_CUR_DIRECTION); })
8976 #define _mm_cmp_round_sd_mask(X, Y, P, R) __extension__ ({ \
8977 (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
8978 (__v2df)(__m128d)(Y), (int)(P), \
8979 (__mmask8)-1, (int)(R)); })
8981 #define _mm_mask_cmp_round_sd_mask(M, X, Y, P, R) __extension__ ({ \
8982 (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
8983 (__v2df)(__m128d)(Y), (int)(P), \
8984 (__mmask8)(M), (int)(R)); })
8986 #define _mm_cmp_sd_mask(X, Y, P) __extension__ ({ \
8987 (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
8988 (__v2df)(__m128d)(Y), (int)(P), \
8990 _MM_FROUND_CUR_DIRECTION); })
8992 #define _mm_mask_cmp_sd_mask(M, X, Y, P) __extension__ ({ \
8993 (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
8994 (__v2df)(__m128d)(Y), (int)(P), \
8996 _MM_FROUND_CUR_DIRECTION); })
9001 return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A,
9002 1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15);
9008 return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
9016 return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
9024 return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A,
9025 0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14);
9031 return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
9039 return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
9044 #define _mm512_shuffle_epi32(A, I) __extension__ ({ \
9045 (__m512i)__builtin_shufflevector((__v16si)(__m512i)(A), \
9046 (__v16si)_mm512_undefined_epi32(), \
9047 0 + (((I) >> 0) & 0x3), \
9048 0 + (((I) >> 2) & 0x3), \
9049 0 + (((I) >> 4) & 0x3), \
9050 0 + (((I) >> 6) & 0x3), \
9051 4 + (((I) >> 0) & 0x3), \
9052 4 + (((I) >> 2) & 0x3), \
9053 4 + (((I) >> 4) & 0x3), \
9054 4 + (((I) >> 6) & 0x3), \
9055 8 + (((I) >> 0) & 0x3), \
9056 8 + (((I) >> 2) & 0x3), \
9057 8 + (((I) >> 4) & 0x3), \
9058 8 + (((I) >> 6) & 0x3), \
9059 12 + (((I) >> 0) & 0x3), \
9060 12 + (((I) >> 2) & 0x3), \
9061 12 + (((I) >> 4) & 0x3), \
9062 12 + (((I) >> 6) & 0x3)); })
9064 #define _mm512_mask_shuffle_epi32(W, U, A, I) __extension__ ({ \
9065 (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
9066 (__v16si)_mm512_shuffle_epi32((A), (I)), \
9067 (__v16si)(__m512i)(W)); })
9069 #define _mm512_maskz_shuffle_epi32(U, A, I) __extension__ ({ \
9070 (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
9071 (__v16si)_mm512_shuffle_epi32((A), (I)), \
9072 (__v16si)_mm512_setzero_si512()); })
9077 return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A,
9085 return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A,
9093 return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A,
9101 return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A,
9109 return (__m512d) __builtin_ia32_expandloaddf512_mask ((
const __v8df *)__P,
9117 return (__m512d) __builtin_ia32_expandloaddf512_mask ((
const __v8df *)__P,
9125 return (__m512i) __builtin_ia32_expandloaddi512_mask ((
const __v8di *)__P,
9133 return (__m512i) __builtin_ia32_expandloaddi512_mask ((
const __v8di *)__P,
9141 return (__m512) __builtin_ia32_expandloadsf512_mask ((
const __v16sf *)__P,
9149 return (__m512) __builtin_ia32_expandloadsf512_mask ((
const __v16sf *)__P,
9157 return (__m512i) __builtin_ia32_expandloadsi512_mask ((
const __v16si *)__P,
9165 return (__m512i) __builtin_ia32_expandloadsi512_mask ((
const __v16si *)__P,
9173 return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A,
9181 return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A,
9189 return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A,
9197 return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A,
9202 #define _mm512_cvt_roundps_pd(A, R) __extension__ ({ \
9203 (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
9204 (__v8df)_mm512_undefined_pd(), \
9205 (__mmask8)-1, (int)(R)); })
9207 #define _mm512_mask_cvt_roundps_pd(W, U, A, R) __extension__ ({ \
9208 (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
9209 (__v8df)(__m512d)(W), \
9210 (__mmask8)(U), (int)(R)); })
9212 #define _mm512_maskz_cvt_roundps_pd(U, A, R) __extension__ ({ \
9213 (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
9214 (__v8df)_mm512_setzero_pd(), \
9215 (__mmask8)(U), (int)(R)); })
9220 return (__m512d) __builtin_ia32_cvtps2pd512_mask ((__v8sf) __A,
9230 return (__m512d) __builtin_ia32_cvtps2pd512_mask ((__v8sf) __A,
9239 return (__m512d) __builtin_ia32_cvtps2pd512_mask ((__v8sf) __A,
9249 return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
9257 return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
9265 return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
9273 return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
9281 __builtin_ia32_compressstoredf512_mask ((__v8df *) __P, (__v8df) __A,
9288 __builtin_ia32_compressstoredi512_mask ((__v8di *) __P, (__v8di) __A,
9295 __builtin_ia32_compressstoresf512_mask ((__v16sf *) __P, (__v16sf) __A,
9302 __builtin_ia32_compressstoresi512_mask ((__v16si *) __P, (__v16si) __A,
9306 #define _mm_cvt_roundsd_ss(A, B, R) __extension__ ({ \
9307 (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
9308 (__v2df)(__m128d)(B), \
9309 (__v4sf)_mm_undefined_ps(), \
9310 (__mmask8)-1, (int)(R)); })
9312 #define _mm_mask_cvt_roundsd_ss(W, U, A, B, R) __extension__ ({ \
9313 (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
9314 (__v2df)(__m128d)(B), \
9315 (__v4sf)(__m128)(W), \
9316 (__mmask8)(U), (int)(R)); })
9318 #define _mm_maskz_cvt_roundsd_ss(U, A, B, R) __extension__ ({ \
9319 (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
9320 (__v2df)(__m128d)(B), \
9321 (__v4sf)_mm_setzero_ps(), \
9322 (__mmask8)(U), (int)(R)); })
9327 return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)(__A),
9336 return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)(__A),
9342 #define _mm_cvtss_i32 _mm_cvtss_si32
9343 #define _mm_cvtss_i64 _mm_cvtss_si64
9344 #define _mm_cvtsd_i32 _mm_cvtsd_si32
9345 #define _mm_cvtsd_i64 _mm_cvtsd_si64
9346 #define _mm_cvti32_sd _mm_cvtsi32_sd
9347 #define _mm_cvti64_sd _mm_cvtsi64_sd
9348 #define _mm_cvti32_ss _mm_cvtsi32_ss
9349 #define _mm_cvti64_ss _mm_cvtsi64_ss
9351 #define _mm_cvt_roundi64_sd(A, B, R) __extension__ ({ \
9352 (__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
9355 #define _mm_cvt_roundsi64_sd(A, B, R) __extension__ ({ \
9356 (__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
9359 #define _mm_cvt_roundsi32_ss(A, B, R) __extension__ ({ \
9360 (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)); })
9362 #define _mm_cvt_roundi32_ss(A, B, R) __extension__ ({ \
9363 (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)); })
9365 #define _mm_cvt_roundsi64_ss(A, B, R) __extension__ ({ \
9366 (__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
9369 #define _mm_cvt_roundi64_ss(A, B, R) __extension__ ({ \
9370 (__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
9373 #define _mm_cvt_roundss_sd(A, B, R) __extension__ ({ \
9374 (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
9375 (__v4sf)(__m128)(B), \
9376 (__v2df)_mm_undefined_pd(), \
9377 (__mmask8)-1, (int)(R)); })
9379 #define _mm_mask_cvt_roundss_sd(W, U, A, B, R) __extension__ ({ \
9380 (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
9381 (__v4sf)(__m128)(B), \
9382 (__v2df)(__m128d)(W), \
9383 (__mmask8)(U), (int)(R)); })
9385 #define _mm_maskz_cvt_roundss_sd(U, A, B, R) __extension__ ({ \
9386 (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
9387 (__v4sf)(__m128)(B), \
9388 (__v2df)_mm_setzero_pd(), \
9389 (__mmask8)(U), (int)(R)); })
9394 return __builtin_ia32_cvtss2sd_round_mask((__v2df)(__A),
9403 return __builtin_ia32_cvtss2sd_round_mask((__v2df)(__A),
9412 return (__m128d) __builtin_ia32_cvtusi2sd32 ((__v2df) __A, __B);
9415 #define _mm_cvt_roundu64_sd(A, B, R) __extension__ ({ \
9416 (__m128d)__builtin_ia32_cvtusi2sd64((__v2df)(__m128d)(A), \
9417 (unsigned long long)(B), (int)(R)); })
9422 return (__m128d) __builtin_ia32_cvtusi2sd64 ((__v2df) __A, __B,
9426 #define _mm_cvt_roundu32_ss(A, B, R) __extension__ ({ \
9427 (__m128)__builtin_ia32_cvtusi2ss32((__v4sf)(__m128)(A), (unsigned int)(B), \
9433 return (__m128) __builtin_ia32_cvtusi2ss32 ((__v4sf) __A, __B,
9437 #define _mm_cvt_roundu64_ss(A, B, R) __extension__ ({ \
9438 (__m128)__builtin_ia32_cvtusi2ss64((__v4sf)(__m128)(A), \
9439 (unsigned long long)(B), (int)(R)); })
9444 return (__m128) __builtin_ia32_cvtusi2ss64 ((__v4sf) __A, __B,
9451 return (__m512i) __builtin_ia32_pbroadcastd512_gpr_mask (__A, (__v16si) __O,
9458 return (__m512i) __builtin_ia32_pbroadcastq512_gpr_mask (__A, (__v8di) __O,
9464 int __E,
int __F,
int __G,
int __H,
9465 int __I,
int __J,
int __K,
int __L,
9466 int __M,
int __N,
int __O,
int __P)
9468 return __extension__ (__m512i)(__v16si)
9469 { __P, __O, __N, __M, __L, __K, __J, __I,
9470 __H, __G, __F, __E, __D, __C, __B, __A };
9473 #define _mm512_setr_epi32(e0,e1,e2,e3,e4,e5,e6,e7, \
9474 e8,e9,e10,e11,e12,e13,e14,e15) \
9475 _mm512_set_epi32((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6), \
9476 (e5),(e4),(e3),(e2),(e1),(e0))
9480 long long __D,
long long __E,
long long __F,
9481 long long __G,
long long __H)
9483 return __extension__ (__m512i) (__v8di)
9484 { __H, __G, __F, __E, __D, __C, __B, __A };
9487 #define _mm512_setr_epi64(e0,e1,e2,e3,e4,e5,e6,e7) \
9488 _mm512_set_epi64((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0))
9492 double __E,
double __F,
double __G,
double __H)
9494 return __extension__ (__m512d)
9495 { __H, __G, __F, __E, __D, __C, __B, __A };
9498 #define _mm512_setr_pd(e0,e1,e2,e3,e4,e5,e6,e7) \
9499 _mm512_set_pd((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0))
9503 float __E,
float __F,
float __G,
float __H,
9504 float __I,
float __J,
float __K,
float __L,
9505 float __M,
float __N,
float __O,
float __P)
9507 return __extension__ (__m512)
9508 { __P, __O, __N, __M, __L, __K, __J, __I,
9509 __H, __G, __F, __E, __D, __C, __B, __A };
9512 #define _mm512_setr_ps(e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10,e11,e12,e13,e14,e15) \
9513 _mm512_set_ps((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6),(e5), \
9514 (e4),(e3),(e2),(e1),(e0))
9540 #undef __DEFAULT_FN_ATTRS
9542 #endif // __AVX512FINTRIN_H
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_castsi128_si512(__m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask_sqrt_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_cvtph_ps(__m256i __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_cvtps_pd(__mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_cvtepi64_epi8(__m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvtepu8_epi64(__m128i __A)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_cmpeq_epi32_mask(__m512i __a, __m512i __b)
static __inline void __DEFAULT_FN_ATTRS _mm512_mask_store_pd(void *__P, __mmask8 __U, __m512d __A)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_mask_cmpgt_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_maskz_mullo_epi32(__mmask16 __M, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_permutexvar_epi32(__mmask16 __M, __m512i __X, __m512i __Y)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_cvtps_pd(__m512d __W, __mmask8 __U, __m256 __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_expand_epi32(__m512i __W, __mmask16 __U, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_xor_si512(__m512i __a, __m512i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_mask_cvtepi64_epi8(__m128i __O, __mmask8 __M, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_add_epi32(__mmask16 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_broadcast_i64x4(__m256i __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask3_fmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_unpackhi_ps(__m512 __a, __m512 __b)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_maskz_cvttpd_epi32(__mmask8 __U, __m512d __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_cvtepu32_ps(__m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_and_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_sub_epi32(__m512i __A, __m512i __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_cmpeq_epi64_mask(__m512i __a, __m512i __b)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_permutex2var_epi64(__m512i __A, __mmask8 __U, __m512i __I, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_mask_blend_epi64(__mmask8 __U, __m512i __A, __m512i __W)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_sll_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask3_fmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_kor(__mmask16 __A, __mmask16 __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask_mul_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_broadcast_i32x4(__m128i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvtepi32_epi64(__m256i __X)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_mask_cvtepi32_storeu_epi8(void *__P, __mmask16 __M, __m512i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_maskz_sub_ss(__mmask8 __U, __m128 __A, __m128 __B)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_mul_epu32(__m512i __X, __m512i __Y)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_sqrt_pd(__m512d __W, __mmask8 __U, __m512d __A)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_setzero_ps(void)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_sub_ps(__m512 __a, __m512 __b)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_fmaddsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_maskz_rsqrt14_sd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_expandloadu_ps(__m512 __W, __mmask16 __U, void const *__P)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_set_epi64(long long __A, long long __B, long long __C, long long __D, long long __E, long long __F, long long __G, long long __H)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_setzero_pd(void)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_rolv_epi64(__m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_add_epi64(__mmask8 __U, __m512i __A, __m512i __B)
static __inline__ unsigned long long __DEFAULT_FN_ATTRS _mm_cvtsd_u64(__m128d __A)
static __inline void __DEFAULT_FN_ATTRS _mm512_storeu_pd(void *__P, __m512d __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_cvtepu16_epi64(__m512i __W, __mmask8 __U, __m128i __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_broadcastsd_pd(__m128d __A)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_kunpackb(__mmask16 __A, __mmask16 __B)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_floor_pd(__m512d __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask3_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_scalef_ps(__mmask16 __U, __m512 __A, __m512 __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask_fmadd_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_fnmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_maskz_div_ss(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_maskz_mul_ss(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_maskz_cvttpd_epu32(__mmask8 __U, __m512d __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_max_epu64(__mmask8 __M, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_compress_epi32(__m512i __W, __mmask16 __U, __m512i __A)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_min_epu64(__m512i __A, __m512i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_maskz_scalef_sd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_sllv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
static __inline __m128 __DEFAULT_FN_ATTRS _mm512_castps512_ps128(__m512 __a)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_cvtusepi64_epi32(__m512i __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_cvtepi32_ps(__mmask16 __U, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_srl_epi32(__m512i __A, __m128i __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask3_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_stream_si512(__m512i *__P, __m512i __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_fmsubadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_broadcast_i32x4(__mmask16 __M, __m128i __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_unpacklo_pd(__mmask8 __U, __m512d __A, __m512d __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_broadcast_f32x4(__m512 __O, __mmask16 __M, __m128 __A)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_kxnor(__mmask16 __A, __mmask16 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_maskz_max_sd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_broadcast_f64x4(__m256d __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_getexp_pd(__m512d __W, __mmask8 __U, __m512d __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask2_permutex2var_epi64(__m512i __A, __m512i __I, __mmask8 __U, __m512i __B)
static __inline void __DEFAULT_FN_ATTRS _mm512_store_si512(void *__P, __m512i __A)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_cmpgt_epi32_mask(__m512i __a, __m512i __b)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_broadcastsd_pd(__mmask8 __M, __m128d __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_sub_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_max_epu64(__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask3_fmsub_ss(__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_maskz_min_sd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_cvtsepi64_epi32(__m512i __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_permutevar_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512i __C)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_floor_ps(__m512 __W, __mmask16 __U, __m512 __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_abs_pd(__m512d W, __mmask8 K, __m512d A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_mov_pd(__mmask8 __U, __m512d __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_rsqrt14_ps(__m512 __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_expandloadu_epi64(__mmask8 __U, void const *__P)
static __inline __m256 __DEFAULT_FN_ATTRS _mm512_castps512_ps256(__m512 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_maskz_scalef_ss(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask3_fmsub_sd(__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_and_epi64(__mmask8 __k, __m512i __a, __m512i __b)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_set1_epi8(char __w)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_cmple_epu64_mask(__m512i __a, __m512i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_mask_cvtsepi64_epi8(__m128i __O, __mmask8 __M, __m512i __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_cvtph_ps(__m512 __W, __mmask16 __U, __m256i __A)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_stream_ps(float *__P, __m512 __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask_div_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_min_epi64(__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_xor_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_cvtusepi64_epi8(__m512i __A)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_mask_cvtusepi64_storeu_epi8(void *__P, __mmask8 __M, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_or_epi64(__mmask8 __k, __m512i __a, __m512i __b)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_srl_epi64(__mmask8 __U, __m512i __A, __m128i __B)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_castps_si512(__m512 __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_scalef_pd(__mmask8 __U, __m512d __A, __m512d __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_broadcast_f64x4(__mmask8 __M, __m256d __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask2_permutex2var_pd(__m512d __A, __m512i __I, __mmask8 __U, __m512d __B)
static __inline __m256i __DEFAULT_FN_ATTRS _mm512_castsi512_si256(__m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_broadcastd_epi32(__mmask16 __M, __m128i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_cmpeq_epu64_mask(__m512i __a, __m512i __b)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_load_pd(double const *__p)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_rcp14_ps(__mmask16 __U, __m512 __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_cmpgt_epu64_mask(__m512i __a, __m512i __b)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_andnot_epi32(__mmask16 __U, __m512i __A, __m512i __B)
static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvttsd_i64(__m128d __A)
static __inline void __DEFAULT_FN_ATTRS _mm512_mask_store_ps(void *__P, __mmask16 __U, __m512 __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_xor_epi64(__mmask8 __k, __m512i __a, __m512i __b)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_maskz_max_ss(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_fmadd_pd(__m512d __A, __m512d __B, __m512d __C)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_fmaddsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_maskz_load_ps(__mmask16 __U, void const *__P)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_fmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_expandloadu_epi64(__m512i __W, __mmask8 __U, void const *__P)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_sra_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask_fnmsub_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_maskz_loadu_epi64(__mmask8 __U, void const *__P)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_mask_cvtsepi64_storeu_epi8(void *__P, __mmask8 __M, __m512i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask_sqrt_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_and_epi32(__m512i __a, __m512i __b)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_setzero_ps(void)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_expand_ps(__mmask16 __U, __m512 __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_srl_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
static __inline__ unsigned long long __DEFAULT_FN_ATTRS _mm_cvttsd_u64(__m128d __A)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_unpacklo_ps(__m512 __a, __m512 __b)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_sllv_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_min_epu32(__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
static __inline__ int __DEFAULT_FN_ATTRS _mm512_kortestz(__mmask16 __A, __mmask16 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_cvtsepi64_epi8(__m512i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_maskz_fmadd_ss(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_min_epu32(__mmask16 __M, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_set1_epi32(int __s)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_min_ps(__m512 __A, __m512 __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_srlv_epi32(__m512i __X, __m512i __Y)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_mask_cmplt_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_cmpneq_epu32_mask(__m512i __a, __m512i __b)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_mask_cmpge_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_max_epi64(__m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_rolv_epi64(__mmask8 __U, __m512i __A, __m512i __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_mask_test_epi64_mask(__mmask8 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_min_epi32(__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_permutex2var_epi64(__m512i __A, __m512i __I, __m512i __B)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_cmpgt_epu32_mask(__m512i __a, __m512i __b)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_unpackhi_pd(__mmask8 __U, __m512d __A, __m512d __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_compress_pd(__mmask8 __U, __m512d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask_max_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_undefined_ps(void)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_permutexvar_ps(__m512i __X, __m512 __Y)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
static __inline void __DEFAULT_FN_ATTRS _mm512_store_epi64(void *__P, __m512i __A)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_add_ps(__m512 __a, __m512 __b)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_set4_ps(float __A, float __B, float __C, float __D)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_castpd_ps(__m512d __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_sllv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_movehdup_ps(__m512 __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_sll_epi64(__m512i __A, __m128i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_srav_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_sqrt_pd(__m512d __a)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_permutexvar_epi32(__m512i __X, __m512i __Y)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_fnmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_maskz_set1_epi64(__mmask8 __M, long long __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvtepi16_epi32(__m256i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_or_epi64(__m512i __a, __m512i __b)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_srav_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_unpackhi_epi64(__mmask8 __U, __m512i __A, __m512i __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_movehdup_ps(__mmask16 __U, __m512 __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_srav_epi32(__m512i __X, __m512i __Y)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_cvtepu32_epi64(__mmask8 __U, __m256i __X)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask_fnmsub_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_max_epu32(__mmask16 __M, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_sllv_epi32(__m512i __X, __m512i __Y)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_loadu_ps(float const *__p)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_fmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_mask_cmpge_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask_rcp14_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_cvtps_pd(__m256 __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_max_ps(__mmask16 __U, __m512 __A, __m512 __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_set1_epi64(__m512i __O, __mmask8 __M, long long __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_cvtsepi32_epi16(__m512i __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_rcp14_pd(__m512d __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_sub_epi32(__mmask16 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_andnot_epi64(__m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_broadcastq_epi64(__m512i __O, __mmask8 __M, __m128i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_expand_epi64(__mmask8 __U, __m512i __A)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_mask_cmple_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_undefined_ps(void)
Create a 128-bit vector of [4 x float] with undefined values.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask3_fnmadd_sd(__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_sra_epi32(__mmask16 __U, __m512i __A, __m128i __B)
static __inline __m128i __DEFAULT_FN_ATTRS _mm512_castsi512_si128(__m512i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask_rsqrt14_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_mask_compressstoreu_ps(void *__P, __mmask16 __U, __m512 __A)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_min_epi64(__m512i __A, __m512i __B)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_cvtepu32_pd(__m256i __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_cvtepi32_ps(__m512 __W, __mmask16 __U, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_rorv_epi64(__m512i __A, __m512i __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_rcp14_ps(__m512 __W, __mmask16 __U, __m512 __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_unpackhi_epi32(__m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_abs_epi64(__mmask8 __U, __m512i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask_fnmadd_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_cmpgt_epi64_mask(__m512i __a, __m512i __b)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_sllv_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_cmpneq_epi64_mask(__m512i __a, __m512i __b)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_mask_load_pd(__m512d __W, __mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_mask_cvttpd_epi32(__m256i __W, __mmask8 __U, __m512d __A)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_kand(__mmask16 __A, __mmask16 __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_sll_epi32(__m512i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_rcp14_sd(__m128d __A, __m128d __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_andnot_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask_fmsub_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask3_fnmsub_ss(__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_permutex2var_ps(__mmask16 __U, __m512 __A, __m512i __I, __m512 __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_rorv_epi64(__mmask8 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_permutexvar_epi32(__m512i __W, __mmask16 __M, __m512i __X, __m512i __Y)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_srav_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_srl_epi32(__mmask16 __U, __m512i __A, __m128i __B)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_set1_pd(double __w)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_mask_cvtpd_epu32(__m256i __W, __mmask8 __U, __m512d __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_sra_epi64(__m512i __A, __m128i __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_mul_pd(__mmask8 __U, __m512d __A, __m512d __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_fnmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_maskz_cvtepi32_epi8(__mmask16 __M, __m512i __A)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_mask_compressstoreu_epi32(void *__P, __mmask16 __U, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_castsi256_si512(__m256i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_cvtps_epi32(__mmask16 __U, __m512 __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask3_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_maskz_loadu_pd(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_maskz_cvtepi64_epi32(__mmask8 __M, __m512i __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_fmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_ceil_pd(__m512d __W, __mmask8 __U, __m512d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_cvtusepi64_epi16(__m512i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_maskz_sqrt_ss(__mmask8 __U, __m128 __A, __m128 __B)
static __inline void __DEFAULT_FN_ATTRS _mm512_mask_storeu_epi64(void *__P, __mmask8 __U, __m512i __A)
static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvttss_i64(__m128 __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_fmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_maskz_loadu_epi32(__mmask16 __U, void const *__P)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_load_epi32(__mmask16 __U, void const *__P)
static __inline__ __m256 __DEFAULT_FN_ATTRS _mm512_maskz_cvtpd_ps(__mmask8 __U, __m512d __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_expandloadu_pd(__mmask8 __U, void const *__P)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_expand_pd(__mmask8 __U, __m512d __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_min_epu64(__mmask8 __M, __m512i __A, __m512i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask_mul_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_andnot_epi32(__m512i __A, __m512i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_mask_cvtusepi64_epi8(__m128i __O, __mmask8 __M, __m512i __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_min_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_cvtepu32_pd(__mmask8 __U, __m256i __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_loadu_pd(double const *__p)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_unpacklo_epi32(__mmask16 __U, __m512i __A, __m512i __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_cvtph_ps(__mmask16 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_maskz_cvtepi64_epi8(__mmask8 __M, __m512i __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_rcp14_pd(__mmask8 __U, __m512d __A)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_min_epi32(__m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_add_epi64(__m512i __A, __m512i __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_add_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_srlv_epi64(__m512i __X, __m512i __Y)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_abs_epi32(__mmask16 __U, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_expand_epi64(__m512i __W, __mmask8 __U, __m512i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_cvtsepi64_epi16(__m512i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_maskz_cvtusepi32_epi8(__mmask16 __M, __m512i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_undefined_si256(void)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_mask_cmplt_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_div_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_max_pd(__m512d __A, __m512d __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_mask_cmpgt_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_castpd128_pd512(__m128d __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_sra_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_mov_epi64(__mmask8 __U, __m512i __A)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_mask_cvtusepi64_storeu_epi32(void *__P, __mmask8 __M, __m512i __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_movedup_pd(__mmask8 __U, __m512d __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_unpackhi_epi32(__mmask16 __U, __m512i __A, __m512i __B)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_cmpeq_epu32_mask(__m512i __a, __m512i __b)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_set_epi32(int __A, int __B, int __C, int __D, int __E, int __F, int __G, int __H, int __I, int __J, int __K, int __L, int __M, int __N, int __O, int __P)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_mask_cvtusepi32_storeu_epi8(void *__P, __mmask16 __M, __m512i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask_sub_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_rolv_epi32(__mmask16 __U, __m512i __A, __m512i __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_cvtepi32_pd(__mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_maskz_cvtepi32_epi16(__mmask16 __M, __m512i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_cvtusepi32_epi16(__m512i __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_fnmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_cvttps_epu32(__mmask16 __U, __m512 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask_div_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_castsi512_pd(__m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_unpackhi_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_max_epi64(__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_abs_ps(__m512 W, __mmask16 K, __m512 A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_permutex2var_pd(__m512d __A, __mmask8 __U, __m512i __I, __m512d __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_rcp14_pd(__m512d __W, __mmask8 __U, __m512d __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_movehdup_ps(__m512 __W, __mmask16 __U, __m512 __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvtepu32_epi64(__m256i __X)
static __inline__ int __DEFAULT_FN_ATTRS _mm_cvttss_i32(__m128 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_getexp_sd(__m128d __A, __m128d __B)
static __inline __mmask8 __DEFAULT_FN_ATTRS _mm512_test_epi64_mask(__m512i __A, __m512i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask_max_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_mask_testn_epi64_mask(__mmask8 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_xor_epi32(__mmask16 __k, __m512i __a, __m512i __b)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask3_fnmsub_sd(__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_sll_epi32(__mmask16 __U, __m512i __A, __m128i __B)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_mask_cmpgt_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_permutevar_pd(__mmask8 __U, __m512d __A, __m512i __C)
static __inline unsigned char unsigned int unsigned int unsigned int * __p
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_max_epi32(__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_mask_store_epi32(void *__P, __mmask16 __U, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvtepi16_epi64(__m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_maskz_cvtpd_epi32(__mmask8 __U, __m512d __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_or_si512(__m512i __a, __m512i __b)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_mask_cvtepi64_storeu_epi32(void *__P, __mmask8 __M, __m512i __A)
char __v64qi __attribute__((__vector_size__(64)))
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_broadcastss_ps(__mmask16 __M, __m128 __A)
static __inline __mmask16 __DEFAULT_FN_ATTRS _mm512_knot(__mmask16 __M)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_undefined_epi32(void)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_srlv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_cvtepi32_pd(__m512d __W, __mmask8 __U, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask3_fmadd_sd(__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_maskz_set1_epi32(__mmask16 __M, int __A)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_mask_mul_epi32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_mask_loadu_epi32(__m512i __W, __mmask16 __U, void const *__P)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_mask_cmpge_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_rolv_epi32(__m512i __A, __m512i __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_unpacklo_ps(__mmask16 __U, __m512 __A, __m512 __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_unpackhi_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_scalef_ss(__m128 __A, __m128 __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_scalef_pd(__m512d __A, __m512d __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvtps_epu32(__m512 __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_fmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_unpackhi_pd(__m512d __a, __m512d __b)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_mask_cvtepi64_storeu_epi8(void *__P, __mmask8 __M, __m512i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_maskz_rsqrt14_ss(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_scalef_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_unpackhi_epi64(__m512i __A, __m512i __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_scalef_ps(__m512 __A, __m512 __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_floor_pd(__m512d __W, __mmask8 __U, __m512d __A)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_mask_cvtepi32_storeu_epi16(void *__P, __mmask16 __M, __m512i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_setzero_ps(void)
Constructs a 128-bit floating-point vector of [4 x float] initialized to zero.
static __inline__ __m256 __DEFAULT_FN_ATTRS _mm512_cvtpd_ps(__m512d __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_add_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_cvtepu32_epi64(__m512i __W, __mmask8 __U, __m256i __X)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_cvtps_epu32(__m512i __W, __mmask16 __U, __m512 __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_cvtps_epu32(__mmask16 __U, __m512 __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_abs_epi64(__m512i __W, __mmask8 __U, __m512i __A)
static __inline__ unsigned __DEFAULT_FN_ATTRS _mm_cvttss_u32(__m128 __A)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_mask_loadu_epi64(__m512i __W, __mmask8 __U, void const *__P)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_moveldup_ps(__m512 __A)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_cmpge_epi32_mask(__m512i __a, __m512i __b)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_mask_cmplt_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b)
static __inline void __DEFAULT_FN_ATTRS _mm512_storeu_ps(void *__P, __m512 __A)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_abs_epi64(__m512i __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask3_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_maskz_fmadd_sd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_min_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_broadcastsd_pd(__m512d __O, __mmask8 __M, __m128d __A)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_mask_loadu_ps(__m512 __W, __mmask16 __U, void const *__P)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setzero_pd(void)
static __inline__ __m256 __DEFAULT_FN_ATTRS _mm256_undefined_ps(void)
static __inline__ unsigned long long __DEFAULT_FN_ATTRS _mm_cvtss_u64(__m128 __A)
static __inline void __DEFAULT_FN_ATTRS _mm512_mask_storeu_ps(void *__P, __mmask16 __U, __m512 __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_fnmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_set1_ps(float __w)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_permutexvar_ps(__mmask16 __U, __m512i __X, __m512 __Y)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_sqrt_ps(__m512 __a)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask_fmadd_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_add_ps(__mmask16 __U, __m512 __A, __m512 __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask_min_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_kandn(__mmask16 __A, __mmask16 __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_mov_ps(__m512 __W, __mmask16 __U, __m512 __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_mul_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_load_epi64(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_cvtepi64_epi16(__m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_cvtepu8_epi32(__m512i __W, __mmask16 __U, __m128i __A)
static __inline__ unsigned __DEFAULT_FN_ATTRS _mm_cvttsd_u32(__m128d __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask3_fmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_maskz_cvtsd_ss(__mmask8 __U, __m128 __A, __m128d __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_permutex2var_epi64(__mmask8 __U, __m512i __A, __m512i __I, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_unpacklo_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_or_epi32(__mmask16 __k, __m512i __a, __m512i __b)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_movedup_pd(__m512d __W, __mmask8 __U, __m512d __A)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_cvttps_epi32(__m512 __a)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_sll_epi64(__mmask8 __U, __m512i __A, __m128i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_broadcastq_epi64(__mmask8 __M, __m128i __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_moveldup_ps(__mmask16 __U, __m512 __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_cmplt_epu64_mask(__m512i __a, __m512i __b)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_sqrt_pd(__mmask8 __U, __m512d __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_fnmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask_add_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtu64_ss(__m128 __A, unsigned long long __B)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_max_epu32(__m512i __A, __m512i __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_cvtepi32_epi64(__m512i __W, __mmask8 __U, __m256i __X)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_min_epu32(__m512i __A, __m512i __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_unpacklo_epi64(__m512i __A, __m512i __B)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_mask_cmplt_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_cvttps_epi32(__m512i __W, __mmask16 __U, __m512 __A)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_cmple_epi32_mask(__m512i __a, __m512i __b)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_or_epi32(__m512i __a, __m512i __b)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_mullo_epi32(__m512i __A, __m512i __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_testn_epi64_mask(__m512i __A, __m512i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m512i __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_min_pd(__m512d __A, __m512d __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask_getexp_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_unpackhi_ps(__mmask16 __U, __m512 __A, __m512 __B)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_set1_epi16(short __w)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_mask_cmple_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_rsqrt14_pd(__m512d __W, __mmask8 __U, __m512d __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_add_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_mov_epi32(__m512i __W, __mmask16 __U, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_and_epi64(__m512i __a, __m512i __b)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_permutexvar_pd(__m512d __W, __mmask8 __U, __m512i __X, __m512d __Y)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_rorv_epi32(__mmask16 __U, __m512i __A, __m512i __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_permutexvar_pd(__mmask8 __U, __m512i __X, __m512d __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_maskz_cvtsepi32_epi8(__mmask16 __M, __m512i __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_permutex2var_pd(__mmask8 __U, __m512d __A, __m512i __I, __m512d __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_compress_ps(__mmask16 __U, __m512 __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_permutex2var_epi32(__mmask16 __U, __m512i __A, __m512i __I, __m512i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_cvtepi64_epi32(__m512i __A)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_mask_compressstoreu_epi64(void *__P, __mmask8 __U, __m512i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS _mm512_mask_cvtpd_ps(__m256 __W, __mmask8 __U, __m512d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_maskz_cvtss_sd(__mmask8 __U, __m128d __A, __m128 __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
static __inline__ vector float vector float __b
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask_cvtss_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_cvttpd_epu32(__m512d __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_broadcastq_epi64(__m128i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_mask_cmpeq_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_mask_mul_epu32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_ceil_ps(__m512 __W, __mmask16 __U, __m512 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_rsqrt14_ss(__m128 __A, __m128 __B)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_load_si512(void const *__P)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_set1_epi32(__m512i __O, __mmask16 __M, int __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_permutevar_pd(__m512d __A, __m512i __C)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_unpackhi_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_max_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_min_epu64(__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_rsqrt14_ps(__m512 __W, __mmask16 __U, __m512 __A)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_mask_compressstoreu_pd(void *__P, __mmask8 __U, __m512d __A)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_ceil_pd(__m512d __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvtepi8_epi32(__m128i __A)
static __inline void __DEFAULT_FN_ATTRS _mm512_storeu_si512(void *__P, __m512i __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_rsqrt14_pd(__m512d __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_sqrt_ps(__mmask16 __U, __m512 __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvtps_epi32(__m512 __A)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_cmple_epu32_mask(__m512i __a, __m512i __b)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_and_epi32(__mmask16 __k, __m512i __a, __m512i __b)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_broadcastd_epi32(__m512i __O, __mmask16 __M, __m128i __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_abs_ps(__m512 A)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_maskz_load_pd(__mmask8 __U, void const *__P)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_kxor(__mmask16 __A, __mmask16 __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_compress_ps(__m512 __W, __mmask16 __U, __m512 __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_stream_load_si512(void *__P)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_getexp_ps(__m512 __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_expandloadu_epi32(__mmask16 __U, void const *__P)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_compress_pd(__m512d __W, __mmask8 __U, __m512d __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_cmpge_epu64_mask(__m512i __a, __m512i __b)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_broadcastss_ps(__m512 __O, __mmask16 __M, __m128 __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_sra_epi32(__m512i __A, __m128i __B)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_castsi512_ps(__m512i __A)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_mask_blend_pd(__mmask8 __U, __m512d __A, __m512d __W)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_rcp14_ss(__m128 __A, __m128 __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_maskz_fnmsub_ss(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_permutexvar_epi64(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_rorv_epi32(__m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_permutexvar_epi64(__m512i __X, __m512i __Y)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_cmplt_epi64_mask(__m512i __a, __m512i __b)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_compress_epi64(__mmask8 __U, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_andnot_epi64(__mmask8 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_broadcastd_epi32(__m128i __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_fmsub_pd(__m512d __A, __m512d __B, __m512d __C)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_maskz_mul_epi32(__mmask8 __M, __m512i __X, __m512i __Y)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_mov_epi64(__m512i __W, __mmask8 __U, __m512i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask3_fnmadd_ss(__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_sub_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_testn_epi32_mask(__m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_set4_epi64(long long __A, long long __B, long long __C, long long __D)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_cvtusepi32_epi8(__m512i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_mask_cmpgt_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b)
static __inline__ unsigned long long __DEFAULT_FN_ATTRS _mm_cvttss_u64(__m128 __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_cmpneq_epu64_mask(__m512i __a, __m512i __b)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_mul_pd(__m512d __a, __m512d __b)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_unpackhi_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_cvtepi32_ps(__m512i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_maskz_mul_sd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_sub_epi64(__mmask8 __U, __m512i __A, __m512i __B)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_castps256_ps512(__m256 __a)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_mov_ps(__mmask16 __U, __m512 __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_xor_epi64(__m512i __a, __m512i __b)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask_add_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_unpacklo_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_mask_cvtsepi32_epi16(__m256i __O, __mmask16 __M, __m512i __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_scalef_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_cvtepu32_ps(__m512 __W, __mmask16 __U, __m512i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask_min_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_srl_epi64(__m512i __A, __m128i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvtepu16_epi64(__m128i __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_rsqrt14_ps(__mmask16 __U, __m512 __A)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_mask_cvtepi64_storeu_epi16(void *__P, __mmask8 __M, __m512i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask_scalef_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_maskz_fnmsub_sd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline __m128d __DEFAULT_FN_ATTRS _mm512_castpd512_pd128(__m512d __a)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_castpd256_pd512(__m256d __a)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_add_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask2_permutex2var_epi32(__m512i __A, __m512i __I, __mmask16 __U, __m512i __B)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_mask_cvtsepi32_storeu_epi16(void *__P, __mmask16 __M, __m512i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_mask_cmpeq_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_or_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_mask_cmpneq_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask2_permutex2var_ps(__m512 __A, __m512i __I, __mmask16 __U, __m512 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_maskz_sqrt_sd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_load_epi32(void const *__P)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_castpd_si512(__m512d __A)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_mask_cvtsepi64_storeu_epi32(void *__P, __mmask8 __M, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_expand_epi32(__mmask16 __U, __m512i __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_broadcastss_ps(__m128 __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvtepu16_epi32(__m256i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_cvtepi16_epi64(__m512i __W, __mmask8 __U, __m128i __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_div_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_sub_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_max_epu32(__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
static __inline __m256i __DEFAULT_FN_ATTRS _mm512_cvttpd_epi32(__m512d __a)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_sll_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_cvtepi32_pd(__m256i __A)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_mask_loadu_pd(__m512d __W, __mmask8 __U, void const *__P)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_cvtepu8_epi64(__m512i __W, __mmask8 __U, __m128i __A)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_add_pd(__m512d __a, __m512d __b)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_rsqrt14_pd(__mmask8 __U, __m512d __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_cvtepu16_epi32(__m512i __W, __mmask16 __U, __m256i __A)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_div_pd(__m512d __a, __m512d __b)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_maskz_cvtsepi32_epi16(__mmask16 __M, __m512i __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_min_pd(__mmask8 __U, __m512d __A, __m512d __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvtepi8_epi64(__m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask_fnmadd_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask_fmsub_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_mask_cvtpd_epi32(__m256i __W, __mmask8 __U, __m512d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_mask_cvtepi32_epi16(__m256i __O, __mmask16 __M, __m512i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_maskz_cvtusepi64_epi32(__mmask8 __M, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_srav_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvtepu8_epi32(__m128i __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_movedup_pd(__m512d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setzero_si128(void)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_fmadd_ps(__m512 __A, __m512 __B, __m512 __C)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_mask_cmpeq_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_mask_cvtsepi64_epi16(__m128i __O, __mmask8 __M, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_mov_epi32(__mmask16 __U, __m512i __A)
static __inline __m256d __DEFAULT_FN_ATTRS _mm512_castpd512_pd256(__m512d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtu64_sd(__m128d __A, unsigned long long __B)
#define _MM_FROUND_CUR_DIRECTION
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_sub_pd(__mmask8 __U, __m512d __A, __m512d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_cvtpd_epi32(__m512d __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_maskz_rcp14_ss(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_mask_cvtsepi32_epi8(__m128i __O, __mmask16 __M, __m512i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_cmple_epi64_mask(__m512i __a, __m512i __b)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_fnmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
static __inline __mmask16 __DEFAULT_FN_ATTRS _mm512_test_epi32_mask(__m512i __A, __m512i __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_getexp_pd(__m512d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_mask_cvttpd_epu32(__m256i __W, __mmask8 __U, __m512d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_mask_cvtusepi64_epi32(__m256i __O, __mmask8 __M, __m512i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_mask_cvtusepi64_epi16(__m128i __O, __mmask8 __M, __m512i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtu32_ss(__m128 __A, unsigned __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_fmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_mask_cmple_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_cvttps_epi32(__mmask16 __U, __m512 __A)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_set4_pd(double __A, double __B, double __C, double __D)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_max_epi32(__mmask16 __M, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_srlv_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_mask_cmpneq_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_abs_pd(__m512d A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_srlv_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_kmov(__mmask16 __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_unpacklo_epi64(__mmask8 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_sllv_epi64(__m512i __X, __m512i __Y)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_permutex2var_pd(__m512d __A, __m512i __I, __m512d __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_scalef_sd(__m128d __A, __m128d __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_broadcast_i64x4(__m512i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_maskz_fnmadd_ss(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_mask_blend_ps(__mmask16 __U, __m512 __A, __m512 __W)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_cmpge_epi64_mask(__m512i __a, __m512i __b)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_broadcast_i32x4(__m512i __O, __mmask16 __M, __m128i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_permutexvar_epi64(__mmask8 __M, __m512i __X, __m512i __Y)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_load_epi64(void const *__P)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_srav_epi64(__m512i __X, __m512i __Y)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_expand_ps(__m512 __W, __mmask16 __U, __m512 __A)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_maskz_mul_epu32(__mmask8 __M, __m512i __X, __m512i __Y)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_div_pd(__mmask8 __U, __m512d __A, __m512d __B)
static __inline void __DEFAULT_FN_ATTRS _mm512_store_pd(void *__P, __m512d __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_sub_epi64(__m512i __A, __m512i __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_fmsubadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_fmsubadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_cvtepi16_epi32(__m512i __W, __mmask16 __U, __m256i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_cvtepu16_epi32(__mmask16 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_maskz_cvtsepi64_epi32(__mmask8 __M, __m512i __A)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_set1_epi64(long long __d)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_floor_ps(__m512 __A)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_mask_cvtsepi32_storeu_epi8(void *__P, __mmask16 __M, __m512i __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_broadcast_f64x4(__m512d __O, __mmask8 __M, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_mask_cvtusepi32_epi16(__m256i __O, __mmask16 __M, __m512i __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask3_fmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm512_mask_cmpneq_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_loadu_si512(void const *__P)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_mask_testn_epi32_mask(__mmask16 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_min_epi64(__mmask8 __M, __m512i __A, __m512i __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_max_pd(__mmask8 __U, __m512d __A, __m512d __B)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_set4_epi32(int __A, int __B, int __C, int __D)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_undefined_si128(void)
Generates a 128-bit vector of [4 x i32] with unspecified content.
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_mul_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask3_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_cvtsepi32_epi8(__m512i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_maskz_cvtusepi64_epi16(__mmask8 __M, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_cvtepi8_epi32(__m512i __W, __mmask16 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_maskz_cvtsepi64_epi8(__mmask8 __M, __m512i __A)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_castps_pd(__m512 __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_cvtepi8_epi64(__m512i __W, __mmask8 __U, __m128i __A)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_mask_mullo_epi32(__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_maskz_getexp_ss(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_mask_cvtepi32_epi8(__m128i __O, __mmask16 __M, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_and_si512(__m512i __a, __m512i __b)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_mask_store_epi64(void *__P, __mmask8 __U, __m512i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_maskz_rcp14_sd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_setzero_si512(void)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_rsqrt14_sd(__m128d __A, __m128d __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_cvtepu32_ps(__mmask16 __U, __m512i __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_getexp_ps(__m512 __W, __mmask16 __U, __m512 __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_srl_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_cvtepi32_epi16(__m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_and_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_sub_pd(__m512d __a, __m512d __b)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_maskz_fmsub_ss(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_expand_pd(__m512d __W, __mmask8 __U, __m512d __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_unpacklo_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_maskz_add_sd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask3_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_cvtepi16_epi32(__mmask16 __U, __m256i __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_undefined_pd(void)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_min_ps(__mmask16 __U, __m512 __A, __m512 __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_andnot_si512(__m512i __A, __m512i __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_fmaddsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_permutex2var_epi32(__m512i __A, __mmask16 __U, __m512i __I, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_mul_epi32(__m512i __X, __m512i __Y)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_mask_cvtsepi64_storeu_epi16(void *__P, __mmask8 __M, __m512i __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_fmsubadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_set_ps(float __A, float __B, float __C, float __D, float __E, float __F, float __G, float __H, float __I, float __J, float __K, float __L, float __M, float __N, float __O, float __P)
#define __DEFAULT_FN_ATTRS
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_abs_epi32(__m512i __W, __mmask16 __U, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_load_epi32(__m512i __W, __mmask16 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_maskz_cvtsepi64_epi16(__mmask8 __M, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_cvtepu8_epi32(__mmask16 __U, __m128i __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_castps128_ps512(__m128 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_maskz_getexp_sd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_xor_epi32(__m512i __a, __m512i __b)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setzero_si256(void)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_cvtepi32_epi8(__m512i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_maskz_fmsub_sd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_stream_pd(double *__P, __m512d __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask3_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_ceil_ps(__m512 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_maskz_sub_sd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_fmsub_ps(__m512 __A, __m512 __B, __m512 __C)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_expandloadu_pd(__m512d __W, __mmask8 __U, void const *__P)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_fmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_rolv_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_cvttps_epu32(__m512 __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_mul_ps(__mmask16 __U, __m512 __A, __m512 __B)
static __inline void __DEFAULT_FN_ATTRS _mm512_store_epi32(void *__P, __m512i __A)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_load_ps(float const *__p)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_maskz_div_sd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_sra_epi64(__mmask8 __U, __m512i __A, __m128i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_or_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_permutexvar_ps(__m512 __W, __mmask16 __U, __m512i __X, __m512 __Y)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_getexp_ss(__m128 __A, __m128 __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask_sub_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_expandloadu_ps(__mmask16 __U, void const *__P)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_cvttps_epu32(__m512i __W, __mmask16 __U, __m512 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask_scalef_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_mask_cvtusepi32_storeu_epi16(void *__P, __mmask16 __M, __m512i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask_cvtsd_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128d __B)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_mask_cmpneq_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_rcp14_ps(__m512 __A)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_mask_test_epi32_mask(__mmask16 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_cvtepi8_epi32(__mmask16 __U, __m128i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_rorv_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
static __inline void __DEFAULT_FN_ATTRS _mm512_mask_storeu_epi32(void *__P, __mmask16 __U, __m512i __A)
static __inline__ unsigned __DEFAULT_FN_ATTRS _mm_cvtsd_u32(__m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_maskz_cvtpd_epu32(__mmask8 __U, __m512d __A)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_permutex2var_ps(__m512 __A, __m512i __I, __m512 __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_fmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_sub_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_cmpge_epu32_mask(__m512i __a, __m512i __b)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_cmpneq_epi32_mask(__m512i __a, __m512i __b)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_rolv_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_max_epu64(__m512i __A, __m512i __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_permutex2var_ps(__m512 __A, __mmask16 __U, __m512i __I, __m512 __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_broadcast_f32x4(__mmask16 __M, __m128 __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_getexp_pd(__mmask8 __U, __m512d __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_broadcast_i64x4(__mmask8 __M, __m256i __A)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_mul_ps(__m512 __a, __m512 __b)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_maskz_add_ss(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_cvtps_epi32(__m512i __W, __mmask16 __U, __m512 __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_permutevar_ps(__m512 __A, __m512i __C)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_add_epi32(__m512i __A, __m512i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtu32_sd(__m128d __A, unsigned __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask_rcp14_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_unpacklo_epi32(__m512i __A, __m512i __B)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_cmplt_epu32_mask(__m512i __a, __m512i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_maskz_cvtepi64_epi16(__mmask8 __M, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_andnot_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_max_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_set_pd(double __A, double __B, double __C, double __D, double __E, double __F, double __G, double __H)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_permutex2var_epi32(__m512i __A, __m512i __I, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_rorv_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_mask_cvtepi64_epi32(__m256i __O, __mmask8 __M, __m512i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask_getexp_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_mask_load_ps(__m512 __W, __mmask16 __U, void const *__P)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_fmaddsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_mask_cmpeq_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_max_ps(__m512 __A, __m512 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask_rsqrt14_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_mov_pd(__m512d __W, __mmask8 __U, __m512d __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_sqrt_ps(__m512 __W, __mmask16 __U, __m512 __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_undefined(void)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_broadcast_f32x4(__m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_mask_cvtusepi32_epi8(__m128i __O, __mmask16 __M, __m512i __A)
static __inline__ unsigned __DEFAULT_FN_ATTRS _mm_cvtss_u32(__m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask3_fmadd_ss(__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_fnmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_cvtepi32_epi64(__mmask8 __U, __m256i __X)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_getexp_ps(__mmask16 __U, __m512 __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask3_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_cvtepu32_pd(__m512d __W, __mmask8 __U, __m256i __A)
static __inline __m512d __DEFAULT_FN_ATTRS _mm512_unpacklo_pd(__m512d __a, __m512d __b)
static __inline__ int __DEFAULT_FN_ATTRS _mm_cvttsd_i32(__m128d __A)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_abs_epi32(__m512i __A)
static __inline void __DEFAULT_FN_ATTRS _mm512_mask_storeu_pd(void *__P, __mmask8 __U, __m512d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm512_maskz_cvtusepi64_epi8(__mmask8 __M, __m512i __A)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_maskz_loadu_ps(__mmask16 __U, void const *__P)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_permutevar_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512i __C)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_add_pd(__mmask8 __U, __m512d __A, __m512d __B)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_unpacklo_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
static __inline__ void __DEFAULT_FN_ATTRS _mm512_mask_cvtusepi64_storeu_epi16(void *__P, __mmask8 __M, __m512i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_undefined_pd(void)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_srlv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_load_epi64(__m512i __W, __mmask8 __U, void const *__P)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_min_epi32(__mmask16 __M, __m512i __A, __m512i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_maskz_min_ss(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_mask_moveldup_ps(__m512 __W, __mmask16 __U, __m512 __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_sub_ps(__mmask16 __U, __m512 __A, __m512 __B)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_mask_cmpge_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_xor_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_compress_epi64(__m512i __W, __mmask8 __U, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_expandloadu_epi32(__m512i __W, __mmask16 __U, void const *__P)
static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_permutevar_ps(__mmask16 __U, __m512 __A, __m512i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_mask_cvtsepi64_epi32(__m256i __O, __mmask8 __M, __m512i __A)
static __inline __m512 __DEFAULT_FN_ATTRS _mm512_div_ps(__m512 __a, __m512 __b)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_max_epi64(__mmask8 __M, __m512i __A, __m512i __B)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_mask_cmple_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b)
static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_permutexvar_pd(__m512i __X, __m512d __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_cvtpd_epu32(__m512d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_maskz_fnmadd_sd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_maskz_cvtusepi32_epi16(__mmask16 __M, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_compress_epi32(__mmask16 __U, __m512i __A)
static __inline void __DEFAULT_FN_ATTRS _mm512_store_ps(void *__P, __m512 __A)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_cmplt_epi32_mask(__m512i __a, __m512i __b)
static __inline __m512i __DEFAULT_FN_ATTRS _mm512_max_epi32(__m512i __A, __m512i __B)
static __inline__ int __DEFAULT_FN_ATTRS _mm512_kortestc(__mmask16 __A, __mmask16 __B)