25 #error "Never use <avxintrin.h> directly; include <immintrin.h> instead."
33 typedef long long __v4di
__attribute__ ((__vector_size__ (32)));
39 typedef unsigned long long __v4du
__attribute__ ((__vector_size__ (32)));
40 typedef unsigned int __v8su
__attribute__ ((__vector_size__ (32)));
41 typedef unsigned short __v16hu
__attribute__ ((__vector_size__ (32)));
42 typedef unsigned char __v32qu
__attribute__ ((__vector_size__ (32)));
46 typedef signed char __v32qs
__attribute__((__vector_size__(32)));
50 typedef long long __m256i
__attribute__((__vector_size__(32)));
53 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx")))
71 return (__m256d)((__v4df)__a+(__v4df)
__b);
89 return (__m256)((__v8sf)__a+(__v8sf)
__b);
107 return (__m256d)((__v4df)__a-(__v4df)
__b);
125 return (__m256)((__v8sf)__a-(__v8sf)
__b);
144 return (__m256d)__builtin_ia32_addsubpd256((__v4df)__a, (__v4df)__b);
163 return (__m256)__builtin_ia32_addsubps256((__v8sf)__a, (__v8sf)__b);
181 return (__m256d)((__v4df)__a/(__v4df)
__b);
199 return (__m256)((__v8sf)__a/(__v8sf)
__b);
218 return (__m256d)__builtin_ia32_maxpd256((__v4df)__a, (__v4df)__b);
237 return (__m256)__builtin_ia32_maxps256((__v8sf)__a, (__v8sf)__b);
256 return (__m256d)__builtin_ia32_minpd256((__v4df)__a, (__v4df)__b);
275 return (__m256)__builtin_ia32_minps256((__v8sf)__a, (__v8sf)__b);
293 return (__m256d)((__v4df)__a * (__v4df)
__b);
311 return (__m256)((__v8sf)__a * (__v8sf)
__b);
328 return (__m256d)__builtin_ia32_sqrtpd256((__v4df)__a);
345 return (__m256)__builtin_ia32_sqrtps256((__v8sf)__a);
362 return (__m256)__builtin_ia32_rsqrtps256((__v8sf)__a);
379 return (__m256)__builtin_ia32_rcpps256((__v8sf)__a);
411 #define _mm256_round_pd(V, M) __extension__ ({ \
412 (__m256d)__builtin_ia32_roundpd256((__v4df)(__m256d)(V), (M)); })
443 #define _mm256_round_ps(V, M) __extension__ ({ \
444 (__m256)__builtin_ia32_roundps256((__v8sf)(__m256)(V), (M)); })
461 #define _mm256_ceil_pd(V) _mm256_round_pd((V), _MM_FROUND_CEIL)
479 #define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR)
496 #define _mm256_ceil_ps(V) _mm256_round_ps((V), _MM_FROUND_CEIL)
513 #define _mm256_floor_ps(V) _mm256_round_ps((V), _MM_FROUND_FLOOR)
531 return (__m256d)((__v4du)__a & (__v4du)
__b);
549 return (__m256)((__v8su)__a & (__v8su)
__b);
570 return (__m256d)(~(__v4du)__a & (__v4du)
__b);
591 return (__m256)(~(__v8su)__a & (__v8su)
__b);
609 return (__m256d)((__v4du)__a | (__v4du)
__b);
627 return (__m256)((__v8su)__a | (__v8su)
__b);
645 return (__m256d)((__v4du)__a ^ (__v4du)
__b);
663 return (__m256)((__v8su)__a ^ (__v8su)
__b);
687 return (__m256d)__builtin_ia32_haddpd256((__v4df)__a, (__v4df)__b);
710 return (__m256)__builtin_ia32_haddps256((__v8sf)__a, (__v8sf)__b);
733 return (__m256d)__builtin_ia32_hsubpd256((__v4df)__a, (__v4df)__b);
756 return (__m256)__builtin_ia32_hsubps256((__v8sf)__a, (__v8sf)__b);
786 return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)__a, (__v2di)__c);
825 return (__m256d)__builtin_ia32_vpermilvarpd256((__v4df)__a, (__v4di)__c);
880 return (__m128)__builtin_ia32_vpermilvarps((__v4sf)__a, (__v4si)__c);
971 return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)__a, (__v8si)__c);
1000 #define _mm_permute_pd(A, C) __extension__ ({ \
1001 (__m128d)__builtin_shufflevector((__v2df)(__m128d)(A), \
1002 (__v2df)_mm_undefined_pd(), \
1003 ((C) >> 0) & 0x1, ((C) >> 1) & 0x1); })
1041 #define _mm256_permute_pd(A, C) __extension__ ({ \
1042 (__m256d)__builtin_shufflevector((__v4df)(__m256d)(A), \
1043 (__v4df)_mm256_undefined_pd(), \
1044 0 + (((C) >> 0) & 0x1), \
1045 0 + (((C) >> 1) & 0x1), \
1046 2 + (((C) >> 2) & 0x1), \
1047 2 + (((C) >> 3) & 0x1)); })
1101 #define _mm_permute_ps(A, C) __extension__ ({ \
1102 (__m128)__builtin_shufflevector((__v4sf)(__m128)(A), \
1103 (__v4sf)_mm_undefined_ps(), \
1104 ((C) >> 0) & 0x3, ((C) >> 2) & 0x3, \
1105 ((C) >> 4) & 0x3, ((C) >> 6) & 0x3); })
1195 #define _mm256_permute_ps(A, C) __extension__ ({ \
1196 (__m256)__builtin_shufflevector((__v8sf)(__m256)(A), \
1197 (__v8sf)_mm256_undefined_ps(), \
1198 0 + (((C) >> 0) & 0x3), \
1199 0 + (((C) >> 2) & 0x3), \
1200 0 + (((C) >> 4) & 0x3), \
1201 0 + (((C) >> 6) & 0x3), \
1202 4 + (((C) >> 0) & 0x3), \
1203 4 + (((C) >> 2) & 0x3), \
1204 4 + (((C) >> 4) & 0x3), \
1205 4 + (((C) >> 6) & 0x3)); })
1244 #define _mm256_permute2f128_pd(V1, V2, M) __extension__ ({ \
1245 (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)(__m256d)(V1), \
1246 (__v4df)(__m256d)(V2), (M)); })
1285 #define _mm256_permute2f128_ps(V1, V2, M) __extension__ ({ \
1286 (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)(__m256)(V1), \
1287 (__v8sf)(__m256)(V2), (M)); })
1325 #define _mm256_permute2f128_si256(V1, V2, M) __extension__ ({ \
1326 (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)(__m256i)(V1), \
1327 (__v8si)(__m256i)(V2), (M)); })
1354 #define _mm256_blend_pd(V1, V2, M) __extension__ ({ \
1355 (__m256d)__builtin_shufflevector((__v4df)(__m256d)(V1), \
1356 (__v4df)(__m256d)(V2), \
1357 (((M) & 0x01) ? 4 : 0), \
1358 (((M) & 0x02) ? 5 : 1), \
1359 (((M) & 0x04) ? 6 : 2), \
1360 (((M) & 0x08) ? 7 : 3)); })
1386 #define _mm256_blend_ps(V1, V2, M) __extension__ ({ \
1387 (__m256)__builtin_shufflevector((__v8sf)(__m256)(V1), \
1388 (__v8sf)(__m256)(V2), \
1389 (((M) & 0x01) ? 8 : 0), \
1390 (((M) & 0x02) ? 9 : 1), \
1391 (((M) & 0x04) ? 10 : 2), \
1392 (((M) & 0x08) ? 11 : 3), \
1393 (((M) & 0x10) ? 12 : 4), \
1394 (((M) & 0x20) ? 13 : 5), \
1395 (((M) & 0x40) ? 14 : 6), \
1396 (((M) & 0x80) ? 15 : 7)); })
1422 return (__m256d)__builtin_ia32_blendvpd256(
1423 (__v4df)__a, (__v4df)__b, (__v4df)__c);
1450 return (__m256)__builtin_ia32_blendvps256(
1451 (__v8sf)__a, (__v8sf)__b, (__v8sf)__c);
1491 #define _mm256_dp_ps(V1, V2, M) __extension__ ({ \
1492 (__m256)__builtin_ia32_dpps256((__v8sf)(__m256)(V1), \
1493 (__v8sf)(__m256)(V2), (M)); })
1543 #define _mm256_shuffle_ps(a, b, mask) __extension__ ({ \
1544 (__m256)__builtin_shufflevector((__v8sf)(__m256)(a), \
1545 (__v8sf)(__m256)(b), \
1546 0 + (((mask) >> 0) & 0x3), \
1547 0 + (((mask) >> 2) & 0x3), \
1548 8 + (((mask) >> 4) & 0x3), \
1549 8 + (((mask) >> 6) & 0x3), \
1550 4 + (((mask) >> 0) & 0x3), \
1551 4 + (((mask) >> 2) & 0x3), \
1552 12 + (((mask) >> 4) & 0x3), \
1553 12 + (((mask) >> 6) & 0x3)); })
1596 #define _mm256_shuffle_pd(a, b, mask) __extension__ ({ \
1597 (__m256d)__builtin_shufflevector((__v4df)(__m256d)(a), \
1598 (__v4df)(__m256d)(b), \
1599 0 + (((mask) >> 0) & 0x1), \
1600 4 + (((mask) >> 1) & 0x1), \
1601 2 + (((mask) >> 2) & 0x1), \
1602 6 + (((mask) >> 3) & 0x1)); })
1605 #define _CMP_EQ_OQ 0x00
1606 #define _CMP_LT_OS 0x01
1607 #define _CMP_LE_OS 0x02
1608 #define _CMP_UNORD_Q 0x03
1609 #define _CMP_NEQ_UQ 0x04
1610 #define _CMP_NLT_US 0x05
1611 #define _CMP_NLE_US 0x06
1612 #define _CMP_ORD_Q 0x07
1613 #define _CMP_EQ_UQ 0x08
1614 #define _CMP_NGE_US 0x09
1615 #define _CMP_NGT_US 0x0a
1616 #define _CMP_FALSE_OQ 0x0b
1617 #define _CMP_NEQ_OQ 0x0c
1618 #define _CMP_GE_OS 0x0d
1619 #define _CMP_GT_OS 0x0e
1620 #define _CMP_TRUE_UQ 0x0f
1621 #define _CMP_EQ_OS 0x10
1622 #define _CMP_LT_OQ 0x11
1623 #define _CMP_LE_OQ 0x12
1624 #define _CMP_UNORD_S 0x13
1625 #define _CMP_NEQ_US 0x14
1626 #define _CMP_NLT_UQ 0x15
1627 #define _CMP_NLE_UQ 0x16
1628 #define _CMP_ORD_S 0x17
1629 #define _CMP_EQ_US 0x18
1630 #define _CMP_NGE_UQ 0x19
1631 #define _CMP_NGT_UQ 0x1a
1632 #define _CMP_FALSE_OS 0x1b
1633 #define _CMP_NEQ_OS 0x1c
1634 #define _CMP_GE_OQ 0x1d
1635 #define _CMP_GT_OQ 0x1e
1636 #define _CMP_TRUE_US 0x1f
1670 #define _mm_cmp_pd(a, b, c) __extension__ ({ \
1671 (__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), \
1672 (__v2df)(__m128d)(b), (c)); })
1706 #define _mm_cmp_ps(a, b, c) __extension__ ({ \
1707 (__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), \
1708 (__v4sf)(__m128)(b), (c)); })
1742 #define _mm256_cmp_pd(a, b, c) __extension__ ({ \
1743 (__m256d)__builtin_ia32_cmppd256((__v4df)(__m256d)(a), \
1744 (__v4df)(__m256d)(b), (c)); })
1778 #define _mm256_cmp_ps(a, b, c) __extension__ ({ \
1779 (__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \
1780 (__v8sf)(__m256)(b), (c)); })
1813 #define _mm_cmp_sd(a, b, c) __extension__ ({ \
1814 (__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), \
1815 (__v2df)(__m128d)(b), (c)); })
1848 #define _mm_cmp_ss(a, b, c) __extension__ ({ \
1849 (__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), \
1850 (__v4sf)(__m128)(b), (c)); })
1870 __v8si
__b = (__v8si)__a;
1871 return __b[__imm & 7];
1892 __v16hi
__b = (__v16hi)__a;
1893 return (
unsigned short)__b[__imm & 15];
1914 __v32qi
__b = (__v32qi)__a;
1915 return (
unsigned char)__b[__imm & 31];
1935 _mm256_extract_epi64(__m256i __a,
const int __imm)
1937 __v4di
__b = (__v4di)__a;
1938 return __b[__imm & 3];
1963 __v8si
__c = (__v8si)__a;
1964 __c[__imm & 7] =
__b;
1965 return (__m256i)
__c;
1990 __v16hi
__c = (__v16hi)__a;
1991 __c[__imm & 15] =
__b;
1992 return (__m256i)
__c;
2016 __v32qi
__c = (__v32qi)__a;
2017 __c[__imm & 31] =
__b;
2018 return (__m256i)
__c;
2041 _mm256_insert_epi64(__m256i __a,
long long __b,
int const __imm)
2043 __v4di
__c = (__v4di)__a;
2044 __c[__imm & 3] =
__b;
2045 return (__m256i)
__c;
2062 return (__m256d)__builtin_convertvector((__v4si)__a, __v4df);
2077 return (__m256)__builtin_ia32_cvtdq2ps256((__v8si) __a);
2093 return (__m128)__builtin_ia32_cvtpd2ps256((__v4df) __a);
2108 return (__m256i)__builtin_ia32_cvtps2dq256((__v8sf) __a);
2114 return (__m256d)__builtin_convertvector((__v4sf)__a, __v4df);
2120 return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) __a);
2126 return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) __a);
2132 return (__m256i)__builtin_ia32_cvttps2dq256((__v8sf) __a);
2144 __v8si __b = (__v8si)__a;
2158 return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 1, 1, 3, 3, 5, 5, 7, 7);
2164 return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 0, 2, 2, 4, 4, 6, 6);
2170 return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 0, 2, 2);
2177 return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 1, 5, 1+2, 5+2);
2183 return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 0, 4, 0+2, 4+2);
2189 return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1);
2195 return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1);
2202 return __builtin_ia32_vtestzpd((__v2df)__a, (__v2df)__b);
2208 return __builtin_ia32_vtestcpd((__v2df)__a, (__v2df)__b);
2214 return __builtin_ia32_vtestnzcpd((__v2df)__a, (__v2df)__b);
2220 return __builtin_ia32_vtestzps((__v4sf)__a, (__v4sf)__b);
2226 return __builtin_ia32_vtestcps((__v4sf)__a, (__v4sf)__b);
2232 return __builtin_ia32_vtestnzcps((__v4sf)__a, (__v4sf)__b);
2238 return __builtin_ia32_vtestzpd256((__v4df)__a, (__v4df)__b);
2244 return __builtin_ia32_vtestcpd256((__v4df)__a, (__v4df)__b);
2250 return __builtin_ia32_vtestnzcpd256((__v4df)__a, (__v4df)__b);
2256 return __builtin_ia32_vtestzps256((__v8sf)__a, (__v8sf)__b);
2262 return __builtin_ia32_vtestcps256((__v8sf)__a, (__v8sf)__b);
2268 return __builtin_ia32_vtestnzcps256((__v8sf)__a, (__v8sf)__b);
2274 return __builtin_ia32_ptestz256((__v4di)__a, (__v4di)__b);
2280 return __builtin_ia32_ptestc256((__v4di)__a, (__v4di)__b);
2286 return __builtin_ia32_ptestnzc256((__v4di)__a, (__v4di)__b);
2293 return __builtin_ia32_movmskpd256((__v4df)__a);
2299 return __builtin_ia32_movmskps256((__v8sf)__a);
2306 __builtin_ia32_vzeroall();
2312 __builtin_ia32_vzeroupper();
2320 return (__m128)(__v4sf){ __f, __f, __f, __f };
2327 return (__m256d)(__v4df){ __d, __d, __d, __d };
2334 return (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f };
2340 return (__m256d)__builtin_ia32_vbroadcastf128_pd256((__v2df
const *)__a);
2346 return (__m256)__builtin_ia32_vbroadcastf128_ps256((__v4sf
const *)__a);
2353 return *(__m256d *)__p;
2359 return *(__m256 *)__p;
2368 return ((
struct __loadu_pd*)__p)->__v;
2377 return ((
struct __loadu_ps*)__p)->__v;
2389 struct __loadu_si256 {
2392 return ((
struct __loadu_si256*)__p)->__v;
2398 return (__m256i)__builtin_ia32_lddqu256((
char const *)__p);
2405 *(__m256d *)__p = __a;
2411 *(__m256 *)__p = __a;
2417 struct __storeu_pd {
2420 ((
struct __storeu_pd*)__p)->__v = __a;
2426 struct __storeu_ps {
2429 ((
struct __storeu_ps*)__p)->__v = __a;
2441 struct __storeu_si256 {
2444 ((
struct __storeu_si256*)__p)->__v = __a;
2451 return (__m128d)__builtin_ia32_maskloadpd((
const __v2df *)__p, (__v2di)__m);
2457 return (__m256d)__builtin_ia32_maskloadpd256((
const __v4df *)__p,
2464 return (__m128)__builtin_ia32_maskloadps((
const __v4sf *)__p, (__v4si)__m);
2470 return (__m256)__builtin_ia32_maskloadps256((
const __v8sf *)__p, (__v8si)__m);
2477 __builtin_ia32_maskstoreps256((__v8sf *)__p, (__v8si)__m, (__v8sf)__a);
2483 __builtin_ia32_maskstorepd((__v2df *)__p, (__v2di)__m, (__v2df)__a);
2489 __builtin_ia32_maskstorepd256((__v4df *)__p, (__v4di)__m, (__v4df)__a);
2495 __builtin_ia32_maskstoreps((__v4sf *)__p, (__v4si)__m, (__v4sf)__a);
2502 __builtin_nontemporal_store((__v4di)__b, (__v4di*)__a);
2508 __builtin_nontemporal_store((__v4df)__b, (__v4df*)__a);
2514 __builtin_nontemporal_store((__v8sf)__a, (__v8sf*)__p);
2521 return (__m256d)__builtin_ia32_undef256();
2527 return (__m256)__builtin_ia32_undef256();
2533 return (__m256i)__builtin_ia32_undef256();
2539 return (__m256d){ __d,
__c,
__b, __a };
2544 float __e,
float __f,
float __g,
float __h)
2546 return (__m256){ __h, __g, __f, __e, __d,
__c,
__b, __a };
2551 int __i4,
int __i5,
int __i6,
int __i7)
2553 return (__m256i)(__v8si){ __i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0 };
2558 short __w11,
short __w10,
short __w09,
short __w08,
2559 short __w07,
short __w06,
short __w05,
short __w04,
2560 short __w03,
short __w02,
short __w01,
short __w00)
2562 return (__m256i)(__v16hi){ __w00, __w01, __w02, __w03, __w04, __w05, __w06,
2563 __w07, __w08, __w09, __w10, __w11, __w12, __w13, __w14, __w15 };
2568 char __b27,
char __b26,
char __b25,
char __b24,
2569 char __b23,
char __b22,
char __b21,
char __b20,
2570 char __b19,
char __b18,
char __b17,
char __b16,
2571 char __b15,
char __b14,
char __b13,
char __b12,
2572 char __b11,
char __b10,
char __b09,
char __b08,
2573 char __b07,
char __b06,
char __b05,
char __b04,
2574 char __b03,
char __b02,
char __b01,
char __b00)
2576 return (__m256i)(__v32qi){
2577 __b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07,
2578 __b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15,
2579 __b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23,
2580 __b24, __b25, __b26, __b27, __b28, __b29, __b30, __b31
2587 return (__m256i)(__v4di){ __d,
__c,
__b, __a };
2594 return (__m256d){ __a,
__b,
__c, __d };
2599 float __e,
float __f,
float __g,
float __h)
2601 return (__m256){ __a,
__b,
__c, __d, __e, __f, __g, __h };
2606 int __i4,
int __i5,
int __i6,
int __i7)
2608 return (__m256i)(__v8si){ __i0, __i1, __i2, __i3, __i4, __i5, __i6, __i7 };
2613 short __w11,
short __w10,
short __w09,
short __w08,
2614 short __w07,
short __w06,
short __w05,
short __w04,
2615 short __w03,
short __w02,
short __w01,
short __w00)
2617 return (__m256i)(__v16hi){ __w15, __w14, __w13, __w12, __w11, __w10, __w09,
2618 __w08, __w07, __w06, __w05, __w04, __w03, __w02, __w01, __w00 };
2623 char __b27,
char __b26,
char __b25,
char __b24,
2624 char __b23,
char __b22,
char __b21,
char __b20,
2625 char __b19,
char __b18,
char __b17,
char __b16,
2626 char __b15,
char __b14,
char __b13,
char __b12,
2627 char __b11,
char __b10,
char __b09,
char __b08,
2628 char __b07,
char __b06,
char __b05,
char __b04,
2629 char __b03,
char __b02,
char __b01,
char __b00)
2631 return (__m256i)(__v32qi){
2632 __b31, __b30, __b29, __b28, __b27, __b26, __b25, __b24,
2633 __b23, __b22, __b21, __b20, __b19, __b18, __b17, __b16,
2634 __b15, __b14, __b13, __b12, __b11, __b10, __b09, __b08,
2635 __b07, __b06, __b05, __b04, __b03, __b02, __b01, __b00 };
2641 return (__m256i)(__v4di){ __a,
__b,
__c, __d };
2648 return (__m256d){ __w, __w, __w, __w };
2654 return (__m256){ __w, __w, __w, __w, __w, __w, __w, __w };
2660 return (__m256i)(__v8si){ __i, __i, __i, __i, __i, __i, __i, __i };
2666 return (__m256i)(__v16hi){ __w, __w, __w, __w, __w, __w, __w, __w, __w, __w,
2667 __w, __w, __w, __w, __w, __w };
2673 return (__m256i)(__v32qi){
__b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
2674 __b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
2681 return (__m256i)(__v4di){ __q, __q, __q, __q };
2688 return (__m256d){ 0, 0, 0, 0 };
2694 return (__m256){ 0, 0, 0, 0, 0, 0, 0, 0 };
2700 return (__m256i){ 0LL, 0LL, 0LL, 0LL };
2713 return (__m256i)__a;
2719 return (__m256d)__a;
2725 return (__m256i)__a;
2737 return (__m256d)__a;
2743 return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 1);
2749 return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 1, 2, 3);
2755 return __builtin_shufflevector((__v4di)__a, (__v4di)__a, 0, 1);
2761 return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 1, -1, -1);
2767 return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1, 2, 3, -1, -1, -1, -1);
2773 return __builtin_shufflevector((__v2di)__a, (__v2di)__a, 0, 1, -1, -1);
2781 #define _mm256_insertf128_ps(V1, V2, M) __extension__ ({ \
2782 (__m256)__builtin_shufflevector( \
2783 (__v8sf)(__m256)(V1), \
2784 (__v8sf)_mm256_castps128_ps256((__m128)(V2)), \
2785 (((M) & 1) ? 0 : 8), \
2786 (((M) & 1) ? 1 : 9), \
2787 (((M) & 1) ? 2 : 10), \
2788 (((M) & 1) ? 3 : 11), \
2789 (((M) & 1) ? 8 : 4), \
2790 (((M) & 1) ? 9 : 5), \
2791 (((M) & 1) ? 10 : 6), \
2792 (((M) & 1) ? 11 : 7) );})
2794 #define _mm256_insertf128_pd(V1, V2, M) __extension__ ({ \
2795 (__m256d)__builtin_shufflevector( \
2796 (__v4df)(__m256d)(V1), \
2797 (__v4df)_mm256_castpd128_pd256((__m128d)(V2)), \
2798 (((M) & 1) ? 0 : 4), \
2799 (((M) & 1) ? 1 : 5), \
2800 (((M) & 1) ? 4 : 2), \
2801 (((M) & 1) ? 5 : 3) );})
2803 #define _mm256_insertf128_si256(V1, V2, M) __extension__ ({ \
2804 (__m256i)__builtin_shufflevector( \
2805 (__v4di)(__m256i)(V1), \
2806 (__v4di)_mm256_castsi128_si256((__m128i)(V2)), \
2807 (((M) & 1) ? 0 : 4), \
2808 (((M) & 1) ? 1 : 5), \
2809 (((M) & 1) ? 4 : 2), \
2810 (((M) & 1) ? 5 : 3) );})
2817 #define _mm256_extractf128_ps(V, M) __extension__ ({ \
2818 (__m128)__builtin_shufflevector( \
2819 (__v8sf)(__m256)(V), \
2820 (__v8sf)(_mm256_undefined_ps()), \
2821 (((M) & 1) ? 4 : 0), \
2822 (((M) & 1) ? 5 : 1), \
2823 (((M) & 1) ? 6 : 2), \
2824 (((M) & 1) ? 7 : 3) );})
2826 #define _mm256_extractf128_pd(V, M) __extension__ ({ \
2827 (__m128d)__builtin_shufflevector( \
2828 (__v4df)(__m256d)(V), \
2829 (__v4df)(_mm256_undefined_pd()), \
2830 (((M) & 1) ? 2 : 0), \
2831 (((M) & 1) ? 3 : 1) );})
2833 #define _mm256_extractf128_si256(V, M) __extension__ ({ \
2834 (__m128i)__builtin_shufflevector( \
2835 (__v4di)(__m256i)(V), \
2836 (__v4di)(_mm256_undefined_si256()), \
2837 (((M) & 1) ? 2 : 0), \
2838 (((M) & 1) ? 3 : 1) );})
2898 return (__m256) __builtin_shufflevector((__v4sf)__lo, (__v4sf)__hi, 0, 1, 2, 3, 4, 5, 6, 7);
2926 #undef __DEFAULT_FN_ATTRS
#define _mm256_extractf128_si256(V, M)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_hsub_ps(__m256 __a, __m256 __b)
Horizontally subtracts the adjacent pairs of values contained in two 256-bit vectors of [8 x float]...
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_rsqrt_ps(__m256 __a)
Calculates the reciprocal square roots of the values in a 256-bit vector of [8 x float].
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_loadu2_m128i(__m128i const *__addr_hi, __m128i const *__addr_lo)
static __inline int __DEFAULT_FN_ATTRS _mm256_extract_epi32(__m256i __a, const int __imm)
Takes a [8 x i32] vector and returns the vector element value indexed by the immediate constant opera...
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_hadd_ps(__m256 __a, __m256 __b)
Horizontally adds the adjacent pairs of values contained in two 256-bit vectors of [8 x float]...
static __inline__ __m256d __DEFAULT_FN_ATTRS _mm256_undefined_pd(void)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_permutevar_pd(__m256d __a, __m256i __c)
Copies the values in a 256-bit vector of [4 x double] as specified by the 256-bit integer vector oper...
static __inline void __DEFAULT_FN_ATTRS _mm256_maskstore_pd(double *__p, __m256i __m, __m256d __a)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_broadcast_sd(double const *__a)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_setzero_ps(void)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo)
static __inline float __DEFAULT_FN_ATTRS _mm256_cvtss_f32(__m256 __a)
static __inline int __DEFAULT_FN_ATTRS _mm_testnzc_ps(__m128 __a, __m128 __b)
static __inline __m128 __DEFAULT_FN_ATTRS _mm_permutevar_ps(__m128 __a, __m128i __c)
Copies the values stored in a 128-bit vector of [4 x float] as specified by the 128-bit integer vecto...
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_load_ps(float const *__p)
static __inline void __DEFAULT_FN_ATTRS _mm256_storeu2_m128(float *__addr_hi, float *__addr_lo, __m256 __a)
static __inline int __DEFAULT_FN_ATTRS _mm_testc_pd(__m128d __a, __m128d __b)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_moveldup_ps(__m256 __a)
static __inline void __DEFAULT_FN_ATTRS _mm256_stream_si256(__m256i *__a, __m256i __b)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set_m128i(__m128i __hi, __m128i __lo)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_or_ps(__m256 __a, __m256 __b)
Performs a bitwise OR of two 256-bit vectors of [8 x float].
#define _mm256_insertf128_ps(V1, V2, M)
static __inline void __DEFAULT_FN_ATTRS _mm256_zeroupper(void)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_rcp_ps(__m256 __a)
Calculates the reciprocals of the values in a 256-bit vector of [8 x float].
static __inline int __DEFAULT_FN_ATTRS _mm256_testnzc_pd(__m256d __a, __m256d __b)
static __inline int __DEFAULT_FN_ATTRS _mm256_movemask_ps(__m256 __a)
static __inline int __DEFAULT_FN_ATTRS _mm256_testz_pd(__m256d __a, __m256d __b)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set1_epi8(char __b)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_and_ps(__m256 __a, __m256 __b)
Performs a bitwise AND of two 256-bit vectors of [8 x float].
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_mul_ps(__m256 __a, __m256 __b)
Multiplies two 256-bit vectors of [8 x float].
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_load_si256(__m256i const *__p)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_unpackhi_pd(__m256d __a, __m256d __b)
static __inline void __DEFAULT_FN_ATTRS _mm256_store_si256(__m256i *__p, __m256i __a)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_unpackhi_ps(__m256 __a, __m256 __b)
#define _mm256_extractf128_ps(V, M)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28, char __b27, char __b26, char __b25, char __b24, char __b23, char __b22, char __b21, char __b20, char __b19, char __b18, char __b17, char __b16, char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b09, char __b08, char __b07, char __b06, char __b05, char __b04, char __b03, char __b02, char __b01, char __b00)
#define _mm256_insertf128_si256(V1, V2, M)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_setr_m128d(__m128d __lo, __m128d __hi)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12, short __w11, short __w10, short __w09, short __w08, short __w07, short __w06, short __w05, short __w04, short __w03, short __w02, short __w01, short __w00)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_and_pd(__m256d __a, __m256d __b)
Performs a bitwise AND of two 256-bit vectors of [4 x double].
static __inline __m128i __DEFAULT_FN_ATTRS _mm256_cvtpd_epi32(__m256d __a)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set1_epi64x(long long __q)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_hsub_pd(__m256d __a, __m256d __b)
Horizontally subtracts the adjacent pairs of values contained in two 256-bit vectors of [4 x double]...
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_sqrt_pd(__m256d __a)
Calculates the square roots of the values in a 256-bit vector of [4 x double].
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_insert_epi32(__m256i __a, int __b, int const __imm)
Takes a [8 x i32] vector and replaces the vector element value indexed by the immediate constant oper...
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_castsi256_pd(__m256i __a)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set_epi16(short __w15, short __w14, short __w13, short __w12, short __w11, short __w10, short __w09, short __w08, short __w07, short __w06, short __w05, short __w04, short __w03, short __w02, short __w01, short __w00)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_sub_pd(__m256d __a, __m256d __b)
Subtracts two 256-bit vectors of [4 x double].
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_hadd_pd(__m256d __a, __m256d __b)
Horizontally adds the adjacent pairs of values contained in two 256-bit vectors of [4 x double]...
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_sub_ps(__m256 __a, __m256 __b)
Subtracts two 256-bit vectors of [8 x float].
static __inline void __DEFAULT_FN_ATTRS _mm256_storeu2_m128i(__m128i *__addr_hi, __m128i *__addr_lo, __m256i __a)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadu_pd(double const *__dp)
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_undefined_si256(void)
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_ps(float *__p, __m128 __a)
Stores float values from a 128-bit vector of [4 x float] to an unaligned memory location.
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_set_m128d(__m128d __hi, __m128d __lo)
static __inline int __DEFAULT_FN_ATTRS _mm256_testnzc_ps(__m256 __a, __m256 __b)
#define _mm256_extractf128_pd(V, M)
static __inline int __DEFAULT_FN_ATTRS _mm256_testz_si256(__m256i __a, __m256i __b)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_set1_ps(float __w)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c)
Merges 64-bit double-precision data values stored in either of the two 256-bit vectors of [4 x double...
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_set_pd(double __a, double __b, double __c, double __d)
static __inline __m128i __DEFAULT_FN_ATTRS _mm256_cvttpd_epi32(__m256d __a)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_add_ps(__m256 __a, __m256 __b)
Adds two 256-bit vectors of [8 x float].
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_addsub_pd(__m256d __a, __m256d __b)
Adds the even-indexed values and subtracts the odd-indexed values of two 256-bit vectors of [4 x doub...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si128(__m128i const *__p)
Moves packed integer values from an unaligned 128-bit memory location to elements in a 128-bit intege...
static __inline __m128d __DEFAULT_FN_ATTRS _mm256_castpd256_pd128(__m256d __a)
static __inline void __DEFAULT_FN_ATTRS _mm256_store_pd(double *__p, __m256d __a)
static __inline unsigned char unsigned int unsigned int unsigned int * __p
static __inline double __DEFAULT_FN_ATTRS _mm256_cvtsd_f64(__m256d __a)
static __inline void __DEFAULT_FN_ATTRS _mm256_maskstore_ps(float *__p, __m256i __m, __m256 __a)
static __inline int __DEFAULT_FN_ATTRS _mm_testz_ps(__m128 __a, __m128 __b)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_castsi128_si256(__m128i __a)
static __inline int __DEFAULT_FN_ATTRS _mm256_cvtsi256_si32(__m256i __a)
static __inline __m128d __DEFAULT_FN_ATTRS _mm_maskload_pd(double const *__p, __m128i __m)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_broadcast_ps(__m128 const *__a)
static __inline __m128i __DEFAULT_FN_ATTRS _mm256_castsi256_si128(__m256i __a)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set_epi32(int __i0, int __i1, int __i2, int __i3, int __i4, int __i5, int __i6, int __i7)
static __inline int __DEFAULT_FN_ATTRS _mm256_testc_si256(__m256i __a, __m256i __b)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_min_ps(__m256 __a, __m256 __b)
Compares two 256-bit vectors of [8 x float] and returns the lesser of each pair of values...
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_set_ps(float __a, float __b, float __c, float __d, float __e, float __f, float __g, float __h)
static __inline int __DEFAULT_FN_ATTRS _mm256_testc_ps(__m256 __a, __m256 __b)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_movedup_pd(__m256d __a)
static __inline__ __m256 __DEFAULT_FN_ATTRS _mm256_undefined_ps(void)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_setr_pd(double __a, double __b, double __c, double __d)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setr_m128i(__m128i __lo, __m128i __hi)
static __inline void __DEFAULT_FN_ATTRS _mm256_storeu_pd(double *__p, __m256d __a)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set1_epi16(short __w)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_loadu_si256(__m256i const *__p)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_broadcast_pd(__m128d const *__a)
static __inline __m128 __DEFAULT_FN_ATTRS _mm256_cvtpd_ps(__m256d __a)
Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 x float].
static __inline__ vector float vector float __b
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_load_pd(double const *__p)
static __inline void __DEFAULT_FN_ATTRS _mm256_storeu_ps(float *__p, __m256 __a)
static __inline void __DEFAULT_FN_ATTRS _mm_maskstore_pd(double *__p, __m128i __m, __m128d __a)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_castpd_si256(__m256d __a)
static __inline int __DEFAULT_FN_ATTRS _mm_testz_pd(__m128d __a, __m128d __b)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_castps_pd(__m256 __a)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_unpacklo_pd(__m256d __a, __m256d __b)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_div_ps(__m256 __a, __m256 __b)
Divides two 256-bit vectors of [8 x float].
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_broadcast_ss(float const *__a)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_max_ps(__m256 __a, __m256 __b)
Compares two 256-bit vectors of [8 x float] and returns the greater of each pair of values...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_pd(double *__dp, __m128d __a)
static __inline int __DEFAULT_FN_ATTRS _mm256_extract_epi8(__m256i __a, const int __imm)
Takes a [32 x i8] vector and returns the vector element value indexed by the immediate constant opera...
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_cvtepi32_ps(__m256i __a)
Converts a vector of [8 x i32] into a vector of [8 x float].
static __inline int __DEFAULT_FN_ATTRS _mm256_extract_epi16(__m256i __a, const int __imm)
Takes a [16 x i16] vector and returns the vector element value indexed by the immediate constant oper...
static __inline __m128 __DEFAULT_FN_ATTRS _mm_broadcast_ss(float const *__a)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_lddqu_si256(__m256i const *__p)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_andnot_pd(__m256d __a, __m256d __b)
Performs a bitwise AND of two 256-bit vectors of [4 x double], using the one's complement of the valu...
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_div_pd(__m256d __a, __m256d __b)
Divides two 256-bit vectors of [4 x double].
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_castpd128_pd256(__m128d __a)
static __inline __m128d __DEFAULT_FN_ATTRS _mm_permutevar_pd(__m128d __a, __m128i __c)
Copies the values in a 128-bit vector of [2 x double] as specified by the 128-bit integer vector oper...
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_loadu_pd(double const *__p)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_maskload_ps(float const *__p, __m256i __m)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_insert_epi8(__m256i __a, int __b, int const __imm)
Takes a [32 x i8] vector and replaces the vector element value indexed by the immediate constant oper...
static __inline int __DEFAULT_FN_ATTRS _mm_testnzc_pd(__m128d __a, __m128d __b)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_castsi256_ps(__m256i __a)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_setr_m128(__m128 __lo, __m128 __hi)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_xor_ps(__m256 __a, __m256 __b)
Performs a bitwise XOR of two 256-bit vectors of [8 x float].
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_castps128_ps256(__m128 __a)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_andnot_ps(__m256 __a, __m256 __b)
Performs a bitwise AND of two 256-bit vectors of [8 x float], using the one's complement of the value...
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_insert_epi16(__m256i __a, int __b, int const __imm)
Takes a [16 x i16] vector and replaces the vector element value indexed by the immediate constant ope...
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_mul_pd(__m256d __a, __m256d __b)
Multiplies two 256-bit vectors of [4 x double].
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_sqrt_ps(__m256 __a)
Calculates the square roots of the values in a 256-bit vector of [8 x float].
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_set_m128(__m128 __hi, __m128 __lo)
#define _mm256_insertf128_pd(V1, V2, M)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_add_pd(__m256d __a, __m256d __b)
Adds two 256-bit vectors of [4 x double].
static __inline void __DEFAULT_FN_ATTRS _mm256_storeu_si256(__m256i *__p, __m256i __a)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_loadu_ps(float const *__p)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set1_epi32(int __i)
static __inline int __DEFAULT_FN_ATTRS _mm256_testc_pd(__m256d __a, __m256d __b)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_permutevar_ps(__m256 __a, __m256i __c)
Copies the values stored in a 256-bit vector of [8 x float] as specified by the 256-bit integer vecto...
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_cvttps_epi32(__m256 __a)
static __inline void __DEFAULT_FN_ATTRS _mm256_storeu2_m128d(double *__addr_hi, double *__addr_lo, __m256d __a)
static __inline void __DEFAULT_FN_ATTRS _mm256_stream_ps(float *__p, __m256 __a)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_castps_si256(__m256 __a)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_maskload_pd(double const *__p, __m256i __m)
double __v4df __attribute__((__vector_size__(32)))
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_xor_pd(__m256d __a, __m256d __b)
Performs a bitwise XOR of two 256-bit vectors of [4 x double].
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_cvtepi32_pd(__m128i __a)
Converts a vector of [4 x i32] into a vector of [4 x double].
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_setr_ps(float __a, float __b, float __c, float __d, float __e, float __f, float __g, float __h)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_addsub_ps(__m256 __a, __m256 __b)
Adds the even-indexed values and subtracts the odd-indexed values of two 256-bit vectors of [8 x floa...
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_unpacklo_ps(__m256 __a, __m256 __b)
static __inline int __DEFAULT_FN_ATTRS _mm256_movemask_pd(__m256d __a)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_cvtps_epi32(__m256 __a)
Converts a vector of [8 x float] into a vector of [8 x i32].
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
Merges 32-bit single-precision data values stored in either of the two 256-bit vectors of [8 x float]...
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setzero_si256(void)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_movehdup_ps(__m256 __a)
static __inline void __DEFAULT_FN_ATTRS _mm256_stream_pd(double *__a, __m256d __b)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set_epi8(char __b31, char __b30, char __b29, char __b28, char __b27, char __b26, char __b25, char __b24, char __b23, char __b22, char __b21, char __b20, char __b19, char __b18, char __b17, char __b16, char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b09, char __b08, char __b07, char __b06, char __b05, char __b04, char __b03, char __b02, char __b01, char __b00)
static __inline void __DEFAULT_FN_ATTRS _mm256_zeroall(void)
static __inline int __DEFAULT_FN_ATTRS _mm256_testnzc_si256(__m256i __a, __m256i __b)
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si128(__m128i *__p, __m128i __b)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_castpd_ps(__m256d __a)
static __inline int __DEFAULT_FN_ATTRS _mm_testc_ps(__m128 __a, __m128 __b)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_set1_pd(double __w)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setr_epi32(int __i0, int __i1, int __i2, int __i3, int __i4, int __i5, int __i6, int __i7)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setr_epi64x(long long __a, long long __b, long long __c, long long __d)
static __inline __m128 __DEFAULT_FN_ATTRS _mm_maskload_ps(float const *__p, __m128i __m)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_loadu_ps(const float *__p)
Loads a 128-bit floating-point vector of [4 x float] from an unaligned memory location.
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_setzero_pd(void)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_max_pd(__m256d __a, __m256d __b)
Compares two 256-bit vectors of [4 x double] and returns the greater of each pair of values...
#define __DEFAULT_FN_ATTRS
static __inline void __DEFAULT_FN_ATTRS _mm_maskstore_ps(float *__p, __m128i __m, __m128 __a)
static __inline__ vector float vector float vector float __c
static __inline void __DEFAULT_FN_ATTRS _mm256_store_ps(float *__p, __m256 __a)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_cvtps_pd(__m128 __a)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_or_pd(__m256d __a, __m256d __b)
Performs a bitwise OR of two 256-bit vectors of [4 x double].
static __inline int __DEFAULT_FN_ATTRS _mm256_testz_ps(__m256 __a, __m256 __b)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_min_pd(__m256d __a, __m256d __b)
Compares two 256-bit vectors of [4 x double] and returns the lesser of each pair of values...
static __inline __m128 __DEFAULT_FN_ATTRS _mm256_castps256_ps128(__m256 __a)