25 namespace seqan3::detail
30 template <simd::simd_concept simd_t>
31 constexpr simd_t load_sse4(
void const * mem_addr);
36 template <simd::simd_concept simd_t>
37 inline void transpose_matrix_sse4(
std::array<simd_t, simd_traits<simd_t>::length> & matrix);
42 template <simd::simd_concept target_simd_t, simd::simd_concept source_simd_t>
43 constexpr target_simd_t upcast_signed_sse4(source_simd_t
const & src);
48 template <simd::simd_concept target_simd_t, simd::simd_concept source_simd_t>
49 constexpr target_simd_t upcast_unsigned_sse4(source_simd_t
const & src);
54 template <u
int8_t index, simd::simd_concept simd_t>
55 constexpr simd_t extract_half_sse4(simd_t
const & src);
60 template <u
int8_t index, simd::simd_concept simd_t>
61 constexpr simd_t extract_quarter_sse4(simd_t
const & src);
66 template <u
int8_t index, simd::simd_concept simd_t>
67 constexpr simd_t extract_eighth_sse4(simd_t
const & src);
77 namespace seqan3::detail
80 template <simd::simd_concept simd_t>
81 constexpr simd_t load_sse4(
void const * mem_addr)
83 return reinterpret_cast<simd_t
>(_mm_loadu_si128(
reinterpret_cast<__m128i
const *
>(mem_addr)));
86 template <simd::simd_concept simd_t>
87 inline void transpose_matrix_sse4(
std::array<simd_t, simd_traits<simd_t>::length> & matrix)
89 static_assert(simd_traits<simd_t>::length == simd_traits<simd_t>::max_length,
"Expects byte scalar type.");
90 static_assert(is_native_builtin_simd_v<simd_t>,
"The passed simd vector is not a native SSE4 simd vector type.");
91 static_assert(is_builtin_simd_v<simd_t>,
"The passed simd vector is not a builtin vector type.");
95 constexpr
std::array<char, 16> bit_reverse{0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15};
105 for (
int i = 0; i < 8; ++i)
107 tmp1[i] = _mm_unpacklo_epi8(
reinterpret_cast<__m128i &
>(matrix[2*i]),
108 reinterpret_cast<__m128i &
>(matrix[2*i+1]));
109 tmp1[i+8] = _mm_unpackhi_epi8(
reinterpret_cast<__m128i &
>(matrix[2*i]),
110 reinterpret_cast<__m128i &
>(matrix[2*i+1]));
120 for (
int i = 0; i < 8; ++i)
122 tmp2[i] = _mm_unpacklo_epi16(tmp1[2*i], tmp1[2*i+1]);
123 tmp2[i+8] = _mm_unpackhi_epi16(tmp1[2*i], tmp1[2*i+1]);
137 for (
int i = 0; i < 8; ++i)
139 tmp1[i] = _mm_unpacklo_epi32(tmp2[2*i], tmp2[2*i+1]);
140 tmp1[i+8] = _mm_unpackhi_epi32(tmp2[2*i], tmp2[2*i+1]);
147 for (
int i = 0; i < 8; ++i)
149 matrix[bit_reverse[i]] =
reinterpret_cast<simd_t
>(_mm_unpacklo_epi64(tmp1[2*i], tmp1[2*i+1]));
150 matrix[bit_reverse[i+8]] =
reinterpret_cast<simd_t
>(_mm_unpackhi_epi64(tmp1[2*i], tmp1[2*i+1]));
154 template <simd::simd_concept target_simd_t, simd::simd_concept source_simd_t>
155 constexpr target_simd_t upcast_signed_sse4(source_simd_t
const & src)
157 if constexpr (simd_traits<source_simd_t>::length == 16)
159 if constexpr (simd_traits<target_simd_t>::length == 8)
160 return reinterpret_cast<target_simd_t>(_mm_cvtepi8_epi16(reinterpret_cast<__m128i const &>(src)));
161 if constexpr (simd_traits<target_simd_t>::length == 4)
162 return reinterpret_cast<target_simd_t>(_mm_cvtepi8_epi32(reinterpret_cast<__m128i const &>(src)));
163 if constexpr (simd_traits<target_simd_t>::length == 2)
164 return reinterpret_cast<target_simd_t>(_mm_cvtepi8_epi64(reinterpret_cast<__m128i const &>(src)));
166 else if constexpr (simd_traits<source_simd_t>::length == 8)
168 if constexpr (simd_traits<target_simd_t>::length == 4)
169 return reinterpret_cast<target_simd_t>(_mm_cvtepi16_epi32(reinterpret_cast<__m128i const &>(src)));
170 if constexpr (simd_traits<target_simd_t>::length == 2)
171 return reinterpret_cast<target_simd_t>(_mm_cvtepi16_epi64(reinterpret_cast<__m128i const &>(src)));
175 static_assert(simd_traits<source_simd_t>::length == 4,
"Expected 32 bit scalar type.");
176 return reinterpret_cast<target_simd_t
>(_mm_cvtepi32_epi64(
reinterpret_cast<__m128i
const &
>(src)));
180 template <simd::simd_concept target_simd_t, simd::simd_concept source_simd_t>
181 constexpr target_simd_t upcast_unsigned_sse4(source_simd_t
const & src)
183 if constexpr (simd_traits<source_simd_t>::length == 16)
185 if constexpr (simd_traits<target_simd_t>::length == 8)
186 return reinterpret_cast<target_simd_t>(_mm_cvtepu8_epi16(reinterpret_cast<__m128i const &>(src)));
187 if constexpr (simd_traits<target_simd_t>::length == 4)
188 return reinterpret_cast<target_simd_t>(_mm_cvtepu8_epi32(reinterpret_cast<__m128i const &>(src)));
189 if constexpr (simd_traits<target_simd_t>::length == 2)
190 return reinterpret_cast<target_simd_t>(_mm_cvtepu8_epi64(reinterpret_cast<__m128i const &>(src)));
192 else if constexpr (simd_traits<source_simd_t>::length == 8)
194 if constexpr (simd_traits<target_simd_t>::length == 4)
195 return reinterpret_cast<target_simd_t>(_mm_cvtepu16_epi32(reinterpret_cast<__m128i const &>(src)));
196 if constexpr (simd_traits<target_simd_t>::length == 2)
197 return reinterpret_cast<target_simd_t>(_mm_cvtepu16_epi64(reinterpret_cast<__m128i const &>(src)));
201 static_assert(simd_traits<source_simd_t>::length == 4,
"Expected 32 bit scalar type.");
202 return reinterpret_cast<target_simd_t
>(_mm_cvtepu32_epi64(
reinterpret_cast<__m128i
const &
>(src)));
206 template <u
int8_t index, simd::simd_concept simd_t>
207 constexpr simd_t extract_half_sse4(simd_t
const & src)
209 return reinterpret_cast<simd_t
>(_mm_srli_si128(
reinterpret_cast<__m128i
const &
>(src), (index) << 3));
212 template <u
int8_t index, simd::simd_concept simd_t>
213 constexpr simd_t extract_quarter_sse4(simd_t
const & src)
215 return reinterpret_cast<simd_t
>(_mm_srli_si128(
reinterpret_cast<__m128i
const &
>(src), index << 2));
218 template <u
int8_t index, simd::simd_concept simd_t>
219 constexpr simd_t extract_eighth_sse4(simd_t
const & src)
221 return reinterpret_cast<simd_t
>(_mm_srli_si128(
reinterpret_cast<__m128i
const &
>(src), index << 1));