* the available xmm registers, this optimized version may not be faster than
* the pure C version anyway. Note that all x86-64 CPUs support at least SSE2.
*
- * This file is compiled using GCC 4.8+'s C++ front end to allow the use of
- * the target attribute, selecting the fastest code path based on runtime
- * detection of CPU capabilities.
+ * This file is compiled using GCC 4.8+/clang 6+'s C++ front end to allow the
+ * use of the target attribute, selecting the fastest code path based on
+ * dispatch priority (GCC 5) or runtime detection of CPU capabilities (GCC 6+).
+ * GCC 4.x are not supported to ease configure.ac logic.
*/
#ifdef __x86_64__
#include <immintrin.h>
-/* Compatibility functions to let our SSSE3 algorithm run on SSE2 */
-
-__attribute__ ((target("sse2"))) static inline __m128i sse_interleave_odd_epi16(__m128i a, __m128i b)
-{
- return _mm_packs_epi32(
- _mm_srai_epi32(a, 16),
- _mm_srai_epi32(b, 16)
- );
-}
-
-__attribute__ ((target("sse2"))) static inline __m128i sse_interleave_even_epi16(__m128i a, __m128i b)
-{
- return sse_interleave_odd_epi16(
- _mm_slli_si128(a, 2),
- _mm_slli_si128(b, 2)
- );
-}
-
-__attribute__ ((target("sse2"))) static inline __m128i sse_mulu_odd_epi8(__m128i a, __m128i b)
-{
- return _mm_mullo_epi16(
- _mm_srli_epi16(a, 8),
- _mm_srai_epi16(b, 8)
- );
-}
+/* Some clang versions don't like it when you use static with multi-versioned functions: linker errors */
+#ifdef __clang__
+#define MVSTATIC
+#else
+#define MVSTATIC static
+#endif
-__attribute__ ((target("sse2"))) static inline __m128i sse_mulu_even_epi8(__m128i a, __m128i b)
-{
- return _mm_mullo_epi16(
- _mm_and_si128(a, _mm_set1_epi16(0xFF)),
- _mm_srai_epi16(_mm_slli_si128(b, 1), 8)
- );
-}
+// Missing from the headers on gcc 6 and older, clang 8 and older
+typedef long long __m128i_u __attribute__((__vector_size__(16), __may_alias__, __aligned__(1)));
+typedef long long __m256i_u __attribute__((__vector_size__(32), __may_alias__, __aligned__(1)));
-__attribute__ ((target("sse2"))) static inline __m128i sse_hadds_epi16(__m128i a, __m128i b)
-{
- return _mm_adds_epi16(
- sse_interleave_even_epi16(a, b),
- sse_interleave_odd_epi16(a, b)
- );
-}
+/* Compatibility macros to let our SSSE3 algorithm run with only SSE2.
+ These used to be neat individual functions with target attributes switching between SSE2 and SSSE3 implementations
+ as needed, but though this works perfectly with GCC, clang fails to inline those properly leading to a near 50%
+ performance drop - combined with static and inline modifiers gets you linker errors and even compiler crashes...
+*/
-__attribute__ ((target("ssse3"))) static inline __m128i sse_hadds_epi16(__m128i a, __m128i b)
-{
- return _mm_hadds_epi16(a, b);
-}
-
-__attribute__ ((target("sse2"))) static inline __m128i sse_maddubs_epi16(__m128i a, __m128i b)
-{
- return _mm_adds_epi16(
- sse_mulu_even_epi8(a, b),
- sse_mulu_odd_epi8(a, b)
- );
-}
+#define SSE2_INTERLEAVE_ODD_EPI16(a, b) _mm_packs_epi32(_mm_srai_epi32(a, 16), _mm_srai_epi32(b, 16))
+#define SSE2_INTERLEAVE_EVEN_EPI16(a, b) SSE2_INTERLEAVE_ODD_EPI16(_mm_slli_si128(a, 2), _mm_slli_si128(b, 2))
+#define SSE2_MULU_ODD_EPI8(a, b) _mm_mullo_epi16(_mm_srli_epi16(a, 8), _mm_srai_epi16(b, 8))
+#define SSE2_MULU_EVEN_EPI8(a, b) _mm_mullo_epi16(_mm_and_si128(a, _mm_set1_epi16(0xFF)), _mm_srai_epi16(_mm_slli_si128(b, 1), 8))
-__attribute__ ((target("ssse3"))) static inline __m128i sse_maddubs_epi16(__m128i a, __m128i b)
-{
- return _mm_maddubs_epi16(a, b);
-}
+#define SSE2_HADDS_EPI16(a, b) _mm_adds_epi16(SSE2_INTERLEAVE_EVEN_EPI16(a, b), SSE2_INTERLEAVE_ODD_EPI16(a, b))
+#define SSE2_MADDUBS_EPI16(a, b) _mm_adds_epi16(SSE2_MULU_EVEN_EPI8(a, b), SSE2_MULU_ODD_EPI8(a, b))
-/* These don't actually get called, but we need to define them. */
-__attribute__ ((target("default"))) static inline __m128i sse_interleave_odd_epi16(__m128i a, __m128i b) { return a; }
-__attribute__ ((target("default"))) static inline __m128i sse_interleave_even_epi16(__m128i a, __m128i b) { return a; }
-__attribute__ ((target("default"))) static inline __m128i sse_mulu_odd_epi8(__m128i a, __m128i b) { return a; }
-__attribute__ ((target("default"))) static inline __m128i sse_mulu_even_epi8(__m128i a, __m128i b) { return a; }
-__attribute__ ((target("default"))) static inline __m128i sse_hadds_epi16(__m128i a, __m128i b) { return a; }
-__attribute__ ((target("default"))) static inline __m128i sse_maddubs_epi16(__m128i a, __m128i b) { return a; }
+__attribute__ ((target("default"))) MVSTATIC int32 get_checksum1_avx2_64(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2) { return i; }
+__attribute__ ((target("default"))) MVSTATIC int32 get_checksum1_ssse3_32(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2) { return i; }
+__attribute__ ((target("default"))) MVSTATIC int32 get_checksum1_sse2_32(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2) { return i; }
/*
Original loop per 4 bytes:
s1 += (uint32)(t1[0] + t1[1] + t1[2] + t1[3] + t1[4] + t1[5] + t1[6] + t1[7]) +
32*CHAR_OFFSET;
*/
+__attribute__ ((target("ssse3"))) MVSTATIC int32 get_checksum1_ssse3_32(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2)
+{
+ if (len > 32) {
+ int aligned = ((uintptr_t)buf & 15) == 0;
+
+ uint32 x[4] = {0};
+ x[0] = *ps1;
+ __m128i ss1 = _mm_loadu_si128((__m128i_u*)x);
+ x[0] = *ps2;
+ __m128i ss2 = _mm_loadu_si128((__m128i_u*)x);
+
+ const int16 mul_t1_buf[8] = {28, 24, 20, 16, 12, 8, 4, 0};
+ __m128i mul_t1 = _mm_loadu_si128((__m128i_u*)mul_t1_buf);
+
+ for (; i < (len-32); i+=32) {
+ // Load ... 2*[int8*16]
+ __m128i in8_1, in8_2;
+ if (!aligned) {
+ // Synonymous with _mm_loadu_si128 on all but a handful of old CPUs
+ in8_1 = _mm_lddqu_si128((__m128i_u*)&buf[i]);
+ in8_2 = _mm_lddqu_si128((__m128i_u*)&buf[i + 16]);
+ } else {
+ in8_1 = _mm_load_si128((__m128i_u*)&buf[i]);
+ in8_2 = _mm_load_si128((__m128i_u*)&buf[i + 16]);
+ }
+
+ // (1*buf[i] + 1*buf[i+1]), (1*buf[i+2], 1*buf[i+3]), ... 2*[int16*8]
+ // Fastest, even though multiply by 1
+ __m128i mul_one = _mm_set1_epi8(1);
+ __m128i add16_1 = _mm_maddubs_epi16(mul_one, in8_1);
+ __m128i add16_2 = _mm_maddubs_epi16(mul_one, in8_2);
+
+ // (4*buf[i] + 3*buf[i+1]), (2*buf[i+2], buf[i+3]), ... 2*[int16*8]
+ __m128i mul_const = _mm_set1_epi32(4 + (3 << 8) + (2 << 16) + (1 << 24));
+ __m128i mul_add16_1 = _mm_maddubs_epi16(mul_const, in8_1);
+ __m128i mul_add16_2 = _mm_maddubs_epi16(mul_const, in8_2);
+
+ // s2 += 32*s1
+ ss2 = _mm_add_epi32(ss2, _mm_slli_epi32(ss1, 5));
+
+ // [sum(t1[0]..t1[7]), X, X, X] [int32*4]; faster than multiple _mm_hadds_epi16
+ // Shifting left, then shifting right again and shuffling (rather than just
+ // shifting right as with mul32 below) to cheaply end up with the correct sign
+ // extension as we go from int16 to int32.
+ __m128i sum_add32 = _mm_add_epi16(add16_1, add16_2);
+ sum_add32 = _mm_add_epi16(sum_add32, _mm_slli_si128(sum_add32, 2));
+ sum_add32 = _mm_add_epi16(sum_add32, _mm_slli_si128(sum_add32, 4));
+ sum_add32 = _mm_add_epi16(sum_add32, _mm_slli_si128(sum_add32, 8));
+ sum_add32 = _mm_srai_epi32(sum_add32, 16);
+ sum_add32 = _mm_shuffle_epi32(sum_add32, 3);
+
+ // [sum(t2[0]..t2[7]), X, X, X] [int32*4]; faster than multiple _mm_hadds_epi16
+ __m128i sum_mul_add32 = _mm_add_epi16(mul_add16_1, mul_add16_2);
+ sum_mul_add32 = _mm_add_epi16(sum_mul_add32, _mm_slli_si128(sum_mul_add32, 2));
+ sum_mul_add32 = _mm_add_epi16(sum_mul_add32, _mm_slli_si128(sum_mul_add32, 4));
+ sum_mul_add32 = _mm_add_epi16(sum_mul_add32, _mm_slli_si128(sum_mul_add32, 8));
+ sum_mul_add32 = _mm_srai_epi32(sum_mul_add32, 16);
+ sum_mul_add32 = _mm_shuffle_epi32(sum_mul_add32, 3);
+
+ // s1 += t1[0] + t1[1] + t1[2] + t1[3] + t1[4] + t1[5] + t1[6] + t1[7]
+ ss1 = _mm_add_epi32(ss1, sum_add32);
+
+ // s2 += t2[0] + t2[1] + t2[2] + t2[3] + t2[4] + t2[5] + t2[6] + t2[7]
+ ss2 = _mm_add_epi32(ss2, sum_mul_add32);
+
+ // [t1[0] + t1[1], t1[2] + t1[3] ...] [int16*8]
+ // We could've combined this with generating sum_add32 above and
+ // save an instruction but benchmarking shows that as being slower
+ __m128i add16 = _mm_hadds_epi16(add16_1, add16_2);
+
+ // [t1[0], t1[1], ...] -> [t1[0]*28 + t1[1]*24, ...] [int32*4]
+ __m128i mul32 = _mm_madd_epi16(add16, mul_t1);
+
+ // [sum(mul32), X, X, X] [int32*4]; faster than multiple _mm_hadd_epi32
+ mul32 = _mm_add_epi32(mul32, _mm_srli_si128(mul32, 4));
+ mul32 = _mm_add_epi32(mul32, _mm_srli_si128(mul32, 8));
+
+ // s2 += 28*t1[0] + 24*t1[1] + 20*t1[2] + 16*t1[3] + 12*t1[4] + 8*t1[5] + 4*t1[6]
+ ss2 = _mm_add_epi32(ss2, mul32);
+
+#if CHAR_OFFSET != 0
+ // s1 += 32*CHAR_OFFSET
+ __m128i char_offset_multiplier = _mm_set1_epi32(32 * CHAR_OFFSET);
+ ss1 = _mm_add_epi32(ss1, char_offset_multiplier);
+
+ // s2 += 528*CHAR_OFFSET
+ char_offset_multiplier = _mm_set1_epi32(528 * CHAR_OFFSET);
+ ss2 = _mm_add_epi32(ss2, char_offset_multiplier);
+#endif
+ }
+
+ _mm_store_si128((__m128i_u*)x, ss1);
+ *ps1 = x[0];
+ _mm_store_si128((__m128i_u*)x, ss2);
+ *ps2 = x[0];
+ }
+ return i;
+}
+
/*
- Both sse2 and ssse3 targets must be specified here or we lose (a lot) of
- performance, possibly due to not unrolling+inlining the called targeted
- functions.
+ Same as SSSE3 version, but using macros defined above to emulate SSSE3 calls that are not available with SSE2.
+ For GCC-only the SSE2 and SSSE3 versions could be a single function calling other functions with the right
+ target attributes to emulate SSSE3 calls on SSE2 if needed, but clang doesn't inline those properly leading
+ to a near 50% performance drop.
*/
-__attribute__ ((target("sse2", "ssse3"))) static int32 get_checksum1_sse2_32(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2)
+__attribute__ ((target("sse2"))) MVSTATIC int32 get_checksum1_sse2_32(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2)
{
if (len > 32) {
int aligned = ((uintptr_t)buf & 15) == 0;
for (; i < (len-32); i+=32) {
// Load ... 2*[int8*16]
- // SSSE3 has _mm_lqqdu_si128, but this requires another
- // target function for each SSE2 and SSSE3 loads. For reasons
- // unknown (to me) we lose about 10% performance on some CPUs if
- // we do that right here. We just use _mm_loadu_si128 as for all
- // but a handful of specific old CPUs they are synonymous, and
- // take the 1-5% hit on those specific CPUs where it isn't.
__m128i in8_1, in8_2;
if (!aligned) {
in8_1 = _mm_loadu_si128((__m128i_u*)&buf[i]);
// (1*buf[i] + 1*buf[i+1]), (1*buf[i+2], 1*buf[i+3]), ... 2*[int16*8]
// Fastest, even though multiply by 1
__m128i mul_one = _mm_set1_epi8(1);
- __m128i add16_1 = sse_maddubs_epi16(mul_one, in8_1);
- __m128i add16_2 = sse_maddubs_epi16(mul_one, in8_2);
+ __m128i add16_1 = SSE2_MADDUBS_EPI16(mul_one, in8_1);
+ __m128i add16_2 = SSE2_MADDUBS_EPI16(mul_one, in8_2);
// (4*buf[i] + 3*buf[i+1]), (2*buf[i+2], buf[i+3]), ... 2*[int16*8]
__m128i mul_const = _mm_set1_epi32(4 + (3 << 8) + (2 << 16) + (1 << 24));
- __m128i mul_add16_1 = sse_maddubs_epi16(mul_const, in8_1);
- __m128i mul_add16_2 = sse_maddubs_epi16(mul_const, in8_2);
+ __m128i mul_add16_1 = SSE2_MADDUBS_EPI16(mul_const, in8_1);
+ __m128i mul_add16_2 = SSE2_MADDUBS_EPI16(mul_const, in8_2);
// s2 += 32*s1
ss2 = _mm_add_epi32(ss2, _mm_slli_epi32(ss1, 5));
// [t1[0] + t1[1], t1[2] + t1[3] ...] [int16*8]
// We could've combined this with generating sum_add32 above and
// save an instruction but benchmarking shows that as being slower
- __m128i add16 = sse_hadds_epi16(add16_1, add16_2);
+ __m128i add16 = SSE2_HADDS_EPI16(add16_1, add16_2);
// [t1[0], t1[1], ...] -> [t1[0]*28 + t1[1]*24, ...] [int32*4]
__m128i mul32 = _mm_madd_epi16(add16, mul_t1);
s1 += (uint32)(t1[0] + t1[1] + t1[2] + t1[3] + t1[4] + t1[5] + t1[6] + t1[7] + t1[8] + t1[9] + t1[10] + t1[11] + t1[12] + t1[13] + t1[14] + t1[15]) +
64*CHAR_OFFSET;
*/
-__attribute__ ((target("avx2"))) static int32 get_checksum1_avx2_64(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2)
+__attribute__ ((target("avx2"))) MVSTATIC int32 get_checksum1_avx2_64(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2)
{
if (len > 64) {
// Instructions reshuffled compared to SSE2 for slightly better performance
return i;
}
-__attribute__ ((target("default"))) static int32 get_checksum1_avx2_64(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2)
-{
- return i;
-}
-
-__attribute__ ((target("default"))) static int32 get_checksum1_sse2_32(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2)
-{
- return i;
-}
-
-static inline int32 get_checksum1_default_1(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2)
+static int32 get_checksum1_default_1(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2)
{
uint32 s1 = *ps1;
uint32 s2 = *ps2;
return i;
}
-extern "C" {
-
-uint32 get_checksum1(char *buf1, int32 len)
+/* With GCC 10 putting this implementation inside 'extern "C"' causes an
+ assembler error. That worked fine on GCC 5-9 and clang 6-10...
+ */
+static inline uint32 get_checksum1_cpp(char *buf1, int32 len)
{
int32 i = 0;
uint32 s1 = 0;
// multiples of 64 bytes using AVX2 (if available)
i = get_checksum1_avx2_64((schar*)buf1, len, i, &s1, &s2);
- // multiples of 32 bytes using SSE2/SSSE3 (if available)
+ // multiples of 32 bytes using SSSE3 (if available)
+ i = get_checksum1_ssse3_32((schar*)buf1, len, i, &s1, &s2);
+
+ // multiples of 32 bytes using SSE2 (if available)
i = get_checksum1_sse2_32((schar*)buf1, len, i, &s1, &s2);
// whatever is left
return (s1 & 0xffff) + (s2 << 16);
}
-} // "C"
+extern "C" {
+
+uint32 get_checksum1(char *buf1, int32 len)
+{
+ return get_checksum1_cpp(buf1, len);
+}
+
+} // extern "C"
+
+#ifdef BENCHMARK_SIMD_CHECKSUM1
+#pragma clang optimize off
+#pragma GCC push_options
+#pragma GCC optimize ("O0")
+
+#define ROUNDS 1024
+#define BLOCK_LEN 1024*1024
+
+#ifndef CLOCK_MONOTONIC_RAW
+#define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC
+#endif
+
+static void benchmark(const char* desc, int32 (*func)(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2), schar* buf, int32 len) {
+ struct timespec start, end;
+ uint64_t us;
+ uint32_t cs, s1, s2;
+ int i, next;
+
+ clock_gettime(CLOCK_MONOTONIC_RAW, &start);
+ for (i = 0; i < ROUNDS; i++) {
+ s1 = s2 = 0;
+ next = func((schar*)buf, len, 0, &s1, &s2);
+ get_checksum1_default_1((schar*)buf, len, next, &s1, &s2);
+ }
+ clock_gettime(CLOCK_MONOTONIC_RAW, &end);
+ us = next == 0 ? 0 : (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
+ cs = next == 0 ? 0 : (s1 & 0xffff) + (s2 << 16);
+ printf("%-5s :: %5.0f MB/s :: %08x\n", desc, us ? (float)(len / (1024 * 1024) * ROUNDS) / ((float)us / 1000000.0f) : 0, cs);
+}
+
+static int32 get_checksum1_auto(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2) {
+ uint32 cs = get_checksum1((char*)buf, len);
+ *ps1 = cs & 0xffff;
+ *ps2 = cs >> 16;
+ return len;
+}
+
+int main() {
+ int i;
+ unsigned char* buf = (unsigned char*)malloc(BLOCK_LEN);
+ for (i = 0; i < BLOCK_LEN; i++) buf[i] = (i + (i % 3) + (i % 11)) % 256;
+
+ benchmark("Auto", get_checksum1_auto, (schar*)buf, BLOCK_LEN);
+ benchmark("Raw-C", get_checksum1_default_1, (schar*)buf, BLOCK_LEN);
+ benchmark("SSE2", get_checksum1_sse2_32, (schar*)buf, BLOCK_LEN);
+ benchmark("SSSE3", get_checksum1_ssse3_32, (schar*)buf, BLOCK_LEN);
+ benchmark("AVX2", get_checksum1_avx2_64, (schar*)buf, BLOCK_LEN);
+
+ free(buf);
+ return 0;
+}
+
+#pragma GCC pop_options
+#pragma clang optimize on
+#endif /* BENCHMARK_SIMD_CHECKSUM1 */
#endif /* HAVE_SIMD */
#endif /* __cplusplus */