SeqAn3 3.1.0
The Modern C++ library for sequence analysis.
to_simd.hpp
Go to the documentation of this file.
1// -----------------------------------------------------------------------------------------------------
2// Copyright (c) 2006-2021, Knut Reinert & Freie Universität Berlin
3// Copyright (c) 2016-2021, Knut Reinert & MPI für molekulare Genetik
4// This file may be used, modified and/or redistributed under the terms of the 3-clause BSD-License
5// shipped with this file and also available at: https://github.com/seqan/seqan3/blob/master/LICENSE.md
6// -----------------------------------------------------------------------------------------------------
7
13#pragma once
14
15#include <seqan3/std/algorithm>
16#include <seqan3/std/iterator>
17#include <seqan3/std/ranges>
18
29
30namespace seqan3::detail
31{
32
58template <std::ranges::view urng_t, simd::simd_concept simd_t>
59class view_to_simd : public std::ranges::view_interface<view_to_simd<urng_t, simd_t>>
60{
61private:
62
63 static_assert(std::ranges::forward_range<urng_t>,
64 "The underlying range must model forward_range.");
65 static_assert(std::ranges::input_range<std::ranges::range_value_t<urng_t>>,
66 "Expects the value type of the underlying range to be an input_range.");
67 static_assert(std::default_initializable<std::ranges::range_value_t<urng_t>>,
68 "Expects the inner range to be default constructible.");
70 "Expects semi-alphabet as value type of the inner range.");
71
75 using inner_range_type = std::ranges::range_value_t<urng_t>;
77 using scalar_type = typename simd_traits<simd_t>::scalar_type;
79 using max_simd_type = simd_type_t<uint8_t, simd_traits<simd_t>::max_length>;
81
86 static constexpr bool fast_load = std::ranges::contiguous_range<inner_range_type> &&
87 std::sized_sentinel_for<std::ranges::iterator_t<inner_range_type>,
88 std::ranges::sentinel_t<inner_range_type>> &&
89 sizeof(alphabet_rank_t<std::ranges::range_value_t<inner_range_type>>) == 1;
90
92 static constexpr uint8_t chunk_size = simd_traits<simd_t>::length;
94 static constexpr uint8_t chunks_per_load = simd_traits<simd_t>::max_length / chunk_size;
96 static constexpr uint8_t total_chunks = fast_load ? (chunks_per_load * chunks_per_load) : 1;
98 static constexpr auto alphabet_size = alphabet_size<std::ranges::range_value_t<inner_range_type>>;
100
101 // Forward declare class' iterator type. See definition below.
102 struct iterator_type;
103
104public:
105
109 constexpr view_to_simd() = default;
110 constexpr view_to_simd(view_to_simd const &) = default;
111 constexpr view_to_simd(view_to_simd &&) = default;
112 constexpr view_to_simd & operator=(view_to_simd const &) = default;
113 constexpr view_to_simd & operator=(view_to_simd &&) = default;
114 ~view_to_simd() = default;
115
120 constexpr view_to_simd(urng_t urng, scalar_type const padding_value = alphabet_size) :
121 urng{std::move(urng)},
122 padding_simd_vector{simd::fill<simd_t>(padding_value)},
123 padding_value{padding_value}
124 {
125 // Check if the size is less or equal the simd size.
126 if (std::ranges::distance(urng) > chunk_size)
127 throw std::invalid_argument{"The size of the underlying range must be less than or equal to the size of "
128 "the given simd type!"};
129 }
130
132 template <typename other_urng_t>
134 requires (!std::same_as<std::remove_cvref_t<other_urng_t>, view_to_simd>) &&
135 (!std::same_as<other_urng_t, urng_t>) &&
136 std::ranges::viewable_range<other_urng_t>
138 constexpr view_to_simd(other_urng_t && urng, scalar_type const padding_value = alphabet_size) :
139 view_to_simd{views::type_reduce(std::forward<other_urng_t>(urng)), padding_value}
140 {}
142
147 constexpr iterator_type begin() noexcept
148 {
149 return {*this};
150 }
151
153 constexpr void begin() const noexcept = delete;
154
156 constexpr std::default_sentinel_t end() noexcept
157 {
158 return std::default_sentinel;
159 }
160
162 constexpr void end() const noexcept = delete;
164
166 constexpr bool empty() const noexcept
168 requires std::ranges::forward_range<inner_range_type>
170 {
171 return std::ranges::all_of(urng, [] (auto & rng)
172 {
173 return std::ranges::empty(rng);
174 });
175 }
176
183 constexpr size_t size() const noexcept
185 requires std::ranges::sized_range<inner_range_type>
187 {
188 auto it = std::ranges::max_element(urng, [] (auto & lhs, auto & rhs)
189 {
190 return std::ranges::size(lhs) < std::ranges::size(rhs);
191 });
192
193 return (it != std::ranges::end(urng)) ? (std::ranges::size(*it) + chunk_size - 1) / chunk_size : 0;
194 }
195
196private:
197
198 urng_t urng{};
199 std::array<chunk_type, total_chunks> cached_simd_chunks{};
200 simd_t padding_simd_vector{};
201 scalar_type padding_value{};
202};
203
211template <std::ranges::view urng_t, simd::simd_concept simd_t>
212class view_to_simd<urng_t, simd_t>::iterator_type
213{
214public:
219 using value_type = reference;
220 using pointer = void;
221 using difference_type = ptrdiff_t;
222 using iterator_category = std::input_iterator_tag;
223 using iterator_concept = iterator_category;
225
229 constexpr iterator_type() = default;
230 constexpr iterator_type(iterator_type const &) = default;
231 constexpr iterator_type(iterator_type &&) = default;
232 constexpr iterator_type & operator=(iterator_type const &) = default;
233 constexpr iterator_type & operator=(iterator_type &&) = default;
234 ~iterator_type() = default;
235
244 constexpr iterator_type(view_to_simd & this_view) : this_view{&this_view}, current_chunk_pos{0}
245 {
246 // Initialise the iterator of the sub ranges.
247 size_t seq_id = 0;
248 for (auto it = std::ranges::begin(this_view.urng); it != std::ranges::end(this_view.urng); ++it, ++seq_id)
249 {
250 cached_iter[seq_id] = std::ranges::begin(*it);
251 cached_sentinel[seq_id] = std::ranges::end(*it);
252 }
253
254 // The batch is empty and by default the constructed iterator is pointing to the end.
255 if (seq_id == 0)
256 return;
257
258 // The batch is not empty but might not be full either.
259 // If a slot is supposed to be empty, it will be initialised with the iterator of the first sequence set to the
260 // end emulating an empty sequence.
261 auto sentinel_it = std::ranges::next(cached_iter[0], cached_sentinel[0]);
262 for (; seq_id < chunk_size; ++seq_id)
263 {
264 cached_iter[seq_id] = sentinel_it;
265 cached_sentinel[seq_id] = cached_sentinel[0];
266 }
267
268 // Check if this is the final chunk already.
269 final_chunk = all_iterators_reached_sentinel();
270
271 // Fetch the next available input characters from the sequences and transform them into simd vectors.
272 underflow();
273 }
275
280 constexpr reference operator*() const noexcept
281 {
282 assert(this_view != nullptr);
283 return std::span{this_view->cached_simd_chunks[current_chunk_pos].begin(),
284 (current_chunk_pos == final_chunk_pos) ? final_chunk_size : chunk_size};
285 }
287
292 constexpr iterator_type & operator++(/*pre-increment*/)
293 {
294 if constexpr (fast_load)
295 { // Check if cached chunks have been already consumed and we need to fetch the next chunks.
296 if (current_chunk_pos == final_chunk_pos)
297 {
298 underflow();
299 current_chunk_pos = 0;
300 }
301 else
302 {
303 ++current_chunk_pos;
304 }
305 }
306 else // In case fast load is not available only one chunk is filled at a time.
307 {
308 underflow();
309 }
310
311 return *this;
312 }
313
315 constexpr value_type operator++(int /*post-increment*/)
316 {
317 value_type tmp = this->operator*();
318 ++(*this);
319 return tmp;
320 }
322
327 constexpr bool operator==(std::default_sentinel_t const &) const noexcept
328 {
329 return at_end;
330 }
331
333 friend constexpr bool operator==(std::default_sentinel_t const &, iterator_type const & rhs) noexcept
334 {
335 return rhs.at_end;
336 }
337
339 constexpr bool operator!=(std::default_sentinel_t const &) const noexcept
340 {
341 return !at_end;
342 }
343
345 friend constexpr bool operator!=(std::default_sentinel_t const &, iterator_type const & rhs) noexcept
346 {
347 return !rhs.at_end;
348 }
350
351private:
362 auto unpack(max_simd_type const & row) const
363 {
364 if constexpr (chunk_size == simd_traits<max_simd_type>::length / 2) // upcast into 2 vectors.
365 {
366 return std::array{simd::upcast<simd_t>(extract_half<0>(row)), // 1. half
367 simd::upcast<simd_t>(extract_half<1>(row))}; // 2. half
368 }
369 else if constexpr (chunk_size == simd_traits<max_simd_type>::length / 4) // upcast into 4 vectors.
370 {
371 return std::array{simd::upcast<simd_t>(extract_quarter<0>(row)), // 1. quarter
372 simd::upcast<simd_t>(extract_quarter<1>(row)), // 2. quarter
373 simd::upcast<simd_t>(extract_quarter<2>(row)), // 3. quarter
374 simd::upcast<simd_t>(extract_quarter<3>(row))}; // 4. quarter
375 }
376 else if constexpr (chunk_size == simd_traits<max_simd_type>::length / 8) // upcast into 8 vectors.
377 {
378 return std::array{simd::upcast<simd_t>(extract_eighth<0>(row)), // 1. eighth
379 simd::upcast<simd_t>(extract_eighth<1>(row)), // 2. eighth
380 simd::upcast<simd_t>(extract_eighth<2>(row)), // 3. eighth
381 simd::upcast<simd_t>(extract_eighth<3>(row)), // 4. eighth
382 simd::upcast<simd_t>(extract_eighth<4>(row)), // 5. eighth
383 simd::upcast<simd_t>(extract_eighth<5>(row)), // 6. eighth
384 simd::upcast<simd_t>(extract_eighth<6>(row)), // 7. eighth
385 simd::upcast<simd_t>(extract_eighth<7>(row))}; // 8. eighth
386 }
387 else
388 {
389 return std::array{simd::upcast<simd_t>(row)};
390 }
391 }
392
403 constexpr void split_into_sub_matrices(std::array<max_simd_type, simd_traits<max_simd_type>::length> matrix) const
404 {
405 auto apply_padding = [this] (simd_t const vec)
406 {
407 return (vec == simd::fill<simd_t>(static_cast<uint8_t>(~0))) ? this_view->padding_simd_vector : vec;
408 };
409
410 // Iterate over the rows of the matrix
411 for (uint8_t row = 0; row < static_cast<uint8_t>(matrix.size()); ++row)
412 {
413 // split a row into multiple chunks of size `chunk_size`
414 auto chunked_row = unpack(matrix[row]);
415
416 if constexpr (chunked_row.size() == 1)
417 {
418 this_view->cached_simd_chunks[0][row] = apply_padding(std::move(chunked_row[0]));
419 }
420 else // Parse the tuple elements and store them in the cached simd chunks.
421 {
422 static_assert(chunked_row.size() == chunks_per_load, "Expected chunks_per_load many simd vectors.");
423
424 for (uint8_t chunk = 0; chunk < chunks_per_load; ++chunk) // store chunks in respective cached entries.
425 {
426 size_t idx = chunk * chunks_per_load + row / chunk_size;
427 this_view->cached_simd_chunks[idx][row % chunk_size] = apply_padding(std::move(chunked_row[chunk]));
428 }
429 }
430 }
431 }
432
436 constexpr bool all_iterators_reached_sentinel() const noexcept
437 {
438 using std::get;
439
440 return std::ranges::all_of(views::zip(cached_iter, cached_sentinel), [] (auto && iterator_sentinel_pair)
441 {
442 return get<0>(iterator_sentinel_pair) == get<1>(iterator_sentinel_pair);
443 });
444 }
445
456 constexpr simd_t convert_single_column()
457 noexcept
458 {
459 simd_t simd_column{};
460 for (size_t idx = 0u; idx < chunk_size; ++idx)
461 {
462 if (cached_iter[idx] == cached_sentinel[idx])
463 {
464 simd_column[idx] = this_view->padding_value;
465 }
466 else
467 {
468 simd_column[idx] = static_cast<scalar_type>(seqan3::to_rank(*cached_iter[idx]));
469 ++cached_iter[idx];
470 }
471 };
472 return simd_column;
473 }
474
485 template <typename array_t>
486 constexpr void update_final_chunk_position(array_t const & iterators_before_update) noexcept
487 {
488 size_t max_distance = 0;
489 for (auto && [it, sent] : views::zip(iterators_before_update, cached_sentinel))
490 max_distance = std::max<size_t>(std::ranges::distance(it, sent), max_distance);
491
492 assert(max_distance > 0);
493 assert(max_distance <= (total_chunks * chunk_size));
494
495 --max_distance;
496 final_chunk_pos = max_distance / chunk_size;
497 // first we should be able to check the chunk position.
498 final_chunk_size = (max_distance % chunk_size) + 1;
499 }
500
502 constexpr void underflow()
504 requires fast_load
506 {
507 at_end = final_chunk;
508 if (at_end) // reached end of stream.
509 return;
510 // For the efficient load we assume at most one byte sized alphabets.
511 // Hence we can load `simd_traits<simd_t>::max_length` length many elements at once.
512 // Depending on the packing of `simd_t` we can prefetch blocks and store them in the `cached_simd_chunks`.
513 // E.g. assume `simd_t` with length 8 on SSE4 with max length 16.
514 // To fill the 16x16 matrix we need four 8x8 matrices.
515 // Thus, for the 8 sequences we need to load two times 16 consecutive bytes to fill the matrix, i.e. two loads
516 // see figure below.
517 //
518 // 0 1 ... 7 | 8 9 ... 15
519 // 0 [a00, a01, ..., a07]|[a08, a09, ..., a15] // first load of seq a reads 16 characters
520 // 1 [b00, b01, ..., b07]|[b08, b09, ..., b15] // first load of seq b reads 16 characters
521 // ... | ...
522 // 7 [g00, g01, ..., g07]|[g08, g09, ..., g15] // first load of seq g reads 16 characters
523 // ----------------------------------------
524 // 8 [a16, a17, ..., a23]|[a24, a25, ..., a31] // second load of seq a reads next 16 characters
525 // 9 [b16, b17, ..., b23]|[b24, b25, ..., b31] // second load of seq b reads next 16 characters
526 // ... | ...
527 // 15 [g16, g17, ..., g23]|[g24, g25, ..., g31] // second load of seq g reads next 16 characters
528 //
529 // This quadratic byte matrix can be transposed efficiently with simd instructions.
530 // If the target simd scalar type is bigger we can apply the same mechanism but have then 16 4x4 matrices
531 // (32 bit) or 256 2x2 matrices (64 bit).
532
533 constexpr int8_t max_size = simd_traits<simd_t>::max_length;
535 decltype(cached_iter) iterators_before_update{cached_iter}; // Keep track of iterators before the update.
536 // Iterate over each sequence.
537 for (uint8_t sequence_pos = 0; sequence_pos < chunk_size; ++sequence_pos)
538 { // Iterate over each block depending on the packing of the target simd vector.
539 for (uint8_t chunk_pos = 0; chunk_pos < chunks_per_load; ++chunk_pos)
540 {
541 uint8_t pos = chunk_pos * chunk_size + sequence_pos; // matrix entry to fill
542 if (cached_sentinel[sequence_pos] - cached_iter[sequence_pos] >= max_size)
543 { // Not in final block, thus load directly from memory.
544 matrix[pos] = simd::load<max_simd_type>(std::addressof(*cached_iter[sequence_pos]));
545 std::advance(cached_iter[sequence_pos], max_size);
546 }
547 else // Loads the final block byte wise in order to not load from uninitialised memory.
548 {
549 matrix[pos] = simd::fill<max_simd_type>(~0);
550 auto & sequence_it = cached_iter[sequence_pos];
551 for (int8_t idx = 0; sequence_it != cached_sentinel[sequence_pos]; ++sequence_it, ++idx)
552 matrix[pos][idx] = seqan3::to_rank(*sequence_it);
553 }
554 }
555 }
556
557 // Handle final chunk which might not end at an offset which is not a multiple of `chunk_size`.
558 final_chunk = all_iterators_reached_sentinel();
559
560 if (final_chunk)
561 update_final_chunk_position(iterators_before_update);
562
563 simd::transpose(matrix);
564 split_into_sub_matrices(std::move(matrix));
565 }
566
568 constexpr void underflow()
570 requires (!fast_load)
572 {
573 at_end = final_chunk;
574 if (at_end) // reached end of stream.
575 return;
576
577 decltype(cached_iter) iterators_before_update{cached_iter}; // Keep track of iterators before the update.
578 for (size_t i = 0; i < chunk_size; ++i)
579 this_view->cached_simd_chunks[0][i] = convert_single_column();
580
581 final_chunk = all_iterators_reached_sentinel();
582
583 if (final_chunk)
584 update_final_chunk_position(iterators_before_update);
585 }
586
590 std::array<std::ranges::sentinel_t<inner_range_type>, chunk_size> cached_sentinel{};
592 view_to_simd * this_view{nullptr};
594 uint8_t final_chunk_size{chunk_size};
596 uint8_t final_chunk_pos{total_chunks - 1};
598 uint8_t current_chunk_pos{0};
600 bool final_chunk{true};
602 bool at_end{true};
603};
604
605// ============================================================================
606// to_simd_fn (adaptor definition)
607// ============================================================================
608
617template <simd::simd_concept simd_t>
618struct to_simd_fn
619{
621 using padding_t = typename simd_traits<simd_t>::scalar_type;
622
626 constexpr auto operator()(padding_t const padding_value) const noexcept
627 {
628 return detail::adaptor_from_functor{*this, padding_value};
629 }
630
632 constexpr auto operator()() const noexcept
633 {
634 return detail::adaptor_from_functor{*this};
635 }
636
642 template <std::ranges::range urng_t>
643 constexpr auto operator()(urng_t && urange, padding_t const padding_value) const noexcept
644 {
645 static_assert(std::ranges::forward_range<urng_t>,
646 "The underlying range in views::to_simd must model std::ranges::forward_range.");
647 static_assert(std::ranges::viewable_range<urng_t>,
648 "The underlying range in views::to_simd must model std::ranges::viewable_range.");
649 static_assert(std::ranges::input_range<std::ranges::range_value_t<urng_t>>,
650 "The value type of the underlying range must model std::ranges::input_range.");
652 "The value type of the inner ranges must model seqan3::semialphabet.");
653
654 return view_to_simd<type_reduce_t<urng_t>, simd_t>{std::forward<urng_t>(urange), padding_value};
655 }
656
661 template <std::ranges::range urng_t>
662 constexpr auto operator()(urng_t && urange) const noexcept
663 {
664 static_assert(std::ranges::forward_range<urng_t>,
665 "The underlying range in views::to_simd must model std::ranges::forward_range.");
666 static_assert(std::ranges::viewable_range<urng_t>,
667 "The underlying range in views::to_simd must model std::ranges::viewable_range.");
668 static_assert(std::ranges::input_range<std::ranges::range_value_t<urng_t>>,
669 "The value type of the underlying range must model std::ranges::input_range.");
671 "The value type of the inner ranges must model seqan3::semialphabet.");
672
673 return view_to_simd<type_reduce_t<urng_t>, simd_t>{std::forward<urng_t>(urange)};
674 }
675
677 template <std::ranges::range urng_t>
678 constexpr friend auto operator|(urng_t && urange, to_simd_fn const & me)
679 {
680 return me(std::forward<urng_t>(urange));
681 }
682};
683
684} // namespace seqan3::detail
685
686namespace seqan3::views
687{
688
790template <simd::simd_concept simd_t>
791inline constexpr auto to_simd = detail::to_simd_fn<simd_t>{};
792
793} // namespace seqan3::views
Provides seqan3::detail::adaptor_from_functor.
T addressof(T... args)
T advance(T... args)
Provides algorithms to modify seqan3::simd::simd_type.
The <algorithm> header from C++20's standard library.
Core alphabet concept and free function/type trait wrappers.
T begin(T... args)
Provides various transformation traits used by the range module.
T end(T... args)
T fill(T... args)
T forward(T... args)
constexpr auto alphabet_size
A type trait that holds the size of a (semi-)alphabet.
Definition: concept.hpp:861
constexpr auto to_rank
Return the rank representation of a (semi-)alphabet object.
Definition: concept.hpp:155
auto operator|(validator1_type &&vali1, validator2_type &&vali2)
Enables the chaining of validators.
Definition: validators.hpp:1120
constexpr size_t size
The size of a type pack.
Definition: traits.hpp:151
constexpr auto chunk
A chunk view.
Definition: chunk.hpp:29
constexpr auto zip
A zip view.
Definition: zip.hpp:29
constexpr auto type_reduce
A view adaptor that behaves like std::views::all, but type erases certain ranges.
Definition: type_reduce.hpp:153
The basis for seqan3::alphabet, but requires only rank interface (not char).
The <iterator> header from C++20's standard library.
The SeqAn namespace for views.
Definition: char_to.hpp:22
constexpr auto to_simd
A view that transforms a range of ranges into chunks of seqan3::simd vectors.
Definition: to_simd.hpp:791
constexpr auto const & get(configuration< configs_t... > const &config) noexcept
This is an overloaded member function, provided for convenience. It differs from the above function o...
Definition: configuration.hpp:429
SeqAn specific customisations in the standard namespace.
T operator!=(T... args)
The <ranges> header from C++20's standard library.
Provides seqan3::simd::simd_type.
Provides seqan3::simd::simd_traits.
Provides type traits for working with templates.
Provides seqan3::views::type_reduce.
Provides seqan3::simd::simd_concept.
Provides seqan3::views::zip.