Quick lookup tables using AVX2

I am trying to speed up an algorithm that runs a series of lookup tables. I would like to use SSE2 or AVX2. I tried using the _mm256_i32gather_epi32 command, but it is 31% slower. Does anyone have any suggestions for any improvements or another approach?

Timings: C code = 234 Collects = 340

static const int32_t g_tables[2][64];  // values between 0 and 63

template <int8_t which, class T>
static void lookup_data(int16_t * dst, T * src)
{
    const int32_t * lut = g_tables[which];

    // Leave this code for Broadwell or Skylake since it 31% slower than C code
    // (gather is 12 for Haswell, 7 for Broadwell and 5 for Skylake)

#if 0
    if (sizeof(T) == sizeof(int16_t)) {
        __m256i avx0, avx1, avx2, avx3, avx4, avx5, avx6, avx7;
        __m128i sse0, sse1, sse2, sse3, sse4, sse5, sse6, sse7;
        __m256i mask = _mm256_set1_epi32(0xffff);

        avx0 = _mm256_loadu_si256((__m256i *)(lut));
        avx1 = _mm256_loadu_si256((__m256i *)(lut + 8));
        avx2 = _mm256_loadu_si256((__m256i *)(lut + 16));
        avx3 = _mm256_loadu_si256((__m256i *)(lut + 24));
        avx4 = _mm256_loadu_si256((__m256i *)(lut + 32));
        avx5 = _mm256_loadu_si256((__m256i *)(lut + 40));
        avx6 = _mm256_loadu_si256((__m256i *)(lut + 48));
        avx7 = _mm256_loadu_si256((__m256i *)(lut + 56));
        avx0 = _mm256_i32gather_epi32((int32_t *)(src), avx0, 2);
        avx1 = _mm256_i32gather_epi32((int32_t *)(src), avx1, 2);
        avx2 = _mm256_i32gather_epi32((int32_t *)(src), avx2, 2);
        avx3 = _mm256_i32gather_epi32((int32_t *)(src), avx3, 2);
        avx4 = _mm256_i32gather_epi32((int32_t *)(src), avx4, 2);
        avx5 = _mm256_i32gather_epi32((int32_t *)(src), avx5, 2);
        avx6 = _mm256_i32gather_epi32((int32_t *)(src), avx6, 2);
        avx7 = _mm256_i32gather_epi32((int32_t *)(src), avx7, 2);
        avx0 = _mm256_and_si256(avx0, mask);
        avx1 = _mm256_and_si256(avx1, mask);
        avx2 = _mm256_and_si256(avx2, mask);
        avx3 = _mm256_and_si256(avx3, mask);
        avx4 = _mm256_and_si256(avx4, mask);
        avx5 = _mm256_and_si256(avx5, mask);
        avx6 = _mm256_and_si256(avx6, mask);
        avx7 = _mm256_and_si256(avx7, mask);
        sse0 = _mm_packus_epi32(_mm256_castsi256_si128(avx0), _mm256_extracti128_si256(avx0, 1));
        sse1 = _mm_packus_epi32(_mm256_castsi256_si128(avx1), _mm256_extracti128_si256(avx1, 1));
        sse2 = _mm_packus_epi32(_mm256_castsi256_si128(avx2), _mm256_extracti128_si256(avx2, 1));
        sse3 = _mm_packus_epi32(_mm256_castsi256_si128(avx3), _mm256_extracti128_si256(avx3, 1));
        sse4 = _mm_packus_epi32(_mm256_castsi256_si128(avx4), _mm256_extracti128_si256(avx4, 1));
        sse5 = _mm_packus_epi32(_mm256_castsi256_si128(avx5), _mm256_extracti128_si256(avx5, 1));
        sse6 = _mm_packus_epi32(_mm256_castsi256_si128(avx6), _mm256_extracti128_si256(avx6, 1));
        sse7 = _mm_packus_epi32(_mm256_castsi256_si128(avx7), _mm256_extracti128_si256(avx7, 1));
        _mm_storeu_si128((__m128i *)(dst),      sse0);
        _mm_storeu_si128((__m128i *)(dst + 8),  sse1);
        _mm_storeu_si128((__m128i *)(dst + 16), sse2);
        _mm_storeu_si128((__m128i *)(dst + 24), sse3);
        _mm_storeu_si128((__m128i *)(dst + 32), sse4);
        _mm_storeu_si128((__m128i *)(dst + 40), sse5);
        _mm_storeu_si128((__m128i *)(dst + 48), sse6);
        _mm_storeu_si128((__m128i *)(dst + 56), sse7);
    }
    else
#endif
    {
        for (int32_t i = 0; i < 64; i += 4)
        {
            *dst++ = src[*lut++];
            *dst++ = src[*lut++];
            *dst++ = src[*lut++];
            *dst++ = src[*lut++];
        }
    }
}
+4
source share
1 answer

, , PINSRD . , . (. wiki , Agner Fog insn tables, microarch pdf )


, pshufb LUT 4- . 8- , , punpcklbw, 16- . ( LUT 4- ).

, GF16 . (, -.) , .


AVX2 128b pshufb , 256b. AVX512F: __m512i _mm512_permutex2var_epi32 (__m512i a, __m512i idx, __m512i b). (vpermi2b AVX512VBMI), (vpermi2w AVX512BW), dword (, vpermi2d AVX512F) qword (vpermi2q AVX512F). , . ( AMD XOP vpperm).

(vpermt2d/vpermi2d) . , .


:

*dst++ = src[*lut++];

lookup src, , lut. lut , src.

g_tables uint8_t . 0..63, . - , , . AVX2, vpmovzxbd. , , int64_t *, __m256i _mm256_cvtepu8_epi32 (__m128i a), __m128i. , IMO.

. , , . , SIMD 64 int16_t . , , if (sizeof...), .:( , , avx0... x86 , 4B, , . pack .

AVX512 sizeof(T) == sizeof(int8_t) sizeof(T) == sizeof(int16_t), src zmm.


g_tables LUT, AVX512 , vpermi2b. AVX512, 64 pshufb. (16B) pshufb : 0..15, 16..31 .. pcmpgtb - . . .


:

g_tables, . src, pshufb pshufd, . ( , pextrd pextrq, movq . movdqu).

, src shufps. , Nehalem (, , Core2). punpcklwd/dq/qdq ( punpckhwd ..) , shufps.

16B- , .

g_tables , , JIT- . , , .

+8

All Articles