aboutsummaryrefslogtreecommitdiff
path: root/libstdc++-v3/include/experimental/bits/simd_converter.h
blob: 44e476fa59831b029aad0203b08b1430c06696d6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
// Generic simd conversions -*- C++ -*-

// Copyright (C) 2020-2025 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library.  This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.

// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU General Public License for more details.

// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.

// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
// <http://www.gnu.org/licenses/>.

#ifndef _GLIBCXX_EXPERIMENTAL_SIMD_CONVERTER_H_
#define _GLIBCXX_EXPERIMENTAL_SIMD_CONVERTER_H_

#if __cplusplus >= 201703L

_GLIBCXX_SIMD_BEGIN_NAMESPACE

template <typename _Arg, typename _Ret, typename _To, size_t _Np>
  _Ret __converter_fallback(_Arg __a)
  {
    _Ret __ret{};
    __execute_n_times<_Np>(
      [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
	__ret._M_set(__i, static_cast<_To>(__a[__i]));
      });
    return __ret;
  }

// _SimdConverter scalar -> scalar {{{
template <typename _From, typename _To>
  struct _SimdConverter<_From, simd_abi::scalar, _To, simd_abi::scalar,
			enable_if_t<!is_same_v<_From, _To>>>
  {
    _GLIBCXX_SIMD_INTRINSIC constexpr _To operator()(_From __a) const noexcept
    { return static_cast<_To>(__a); }
  };

// }}}
// _SimdConverter scalar -> "native" {{{
template <typename _From, typename _To, typename _Abi>
  struct _SimdConverter<_From, simd_abi::scalar, _To, _Abi,
			enable_if_t<!is_same_v<_Abi, simd_abi::scalar>>>
  {
    using _Ret = typename _Abi::template __traits<_To>::_SimdMember;

    template <typename... _More>
      _GLIBCXX_SIMD_INTRINSIC constexpr _Ret
      operator()(_From __a, _More... __more) const noexcept
      {
	static_assert(sizeof...(_More) + 1 == _Abi::template _S_size<_To>);
	static_assert(conjunction_v<is_same<_From, _More>...>);
	return __make_vector<_To>(__a, __more...);
      }
  };

// }}}
// _SimdConverter "native non-sve 1" -> "native non-sve 2" {{{
template <typename _From, typename _To, typename _AFrom, typename _ATo>
  struct _SimdConverter<
    _From, _AFrom, _To, _ATo,
    enable_if_t<!disjunction_v<
      __is_fixed_size_abi<_AFrom>, __is_fixed_size_abi<_ATo>,
      is_same<_AFrom, simd_abi::scalar>, is_same<_ATo, simd_abi::scalar>,
      conjunction<is_same<_From, _To>, is_same<_AFrom, _ATo>>>
		  && !(__is_sve_abi<_AFrom>() || __is_sve_abi<_ATo>())>>
  {
    using _Arg = typename _AFrom::template __traits<_From>::_SimdMember;
    using _Ret = typename _ATo::template __traits<_To>::_SimdMember;
    using _V = __vector_type_t<_To, simd_size_v<_To, _ATo>>;

    template <typename... _More>
      _GLIBCXX_SIMD_INTRINSIC constexpr _Ret
      operator()(_Arg __a, _More... __more) const noexcept
      { return __vector_convert<_V>(__a, __more...); }
  };

// }}}
// _SimdConverter "native 1" -> "native 2" {{{
template <typename _From, typename _To, typename _AFrom, typename _ATo>
  struct _SimdConverter<
	   _From, _AFrom, _To, _ATo,
	   enable_if_t<!disjunction_v<
			  __is_fixed_size_abi<_AFrom>, __is_fixed_size_abi<_ATo>,
			  is_same<_AFrom, simd_abi::scalar>, is_same<_ATo, simd_abi::scalar>,
			  conjunction<is_same<_From, _To>, is_same<_AFrom, _ATo>>>
			 && (__is_sve_abi<_AFrom>() || __is_sve_abi<_ATo>())
	 >>
  {
    using _Arg = typename _AFrom::template __traits<_From>::_SimdMember;
    using _Ret = typename _ATo::template __traits<_To>::_SimdMember;

    _GLIBCXX_SIMD_INTRINSIC constexpr _Ret
    operator()(_Arg __x) const noexcept
    { return __converter_fallback<_Arg, _Ret, _To, simd_size_v<_To, _ATo>>(__x); }
  };

// }}}
// _SimdConverter scalar -> fixed_size<1> {{{1
template <typename _From, typename _To>
  struct _SimdConverter<_From, simd_abi::scalar, _To, simd_abi::fixed_size<1>,
			void>
  {
    _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple<_To, simd_abi::scalar>
    operator()(_From __x) const noexcept
    { return {static_cast<_To>(__x)}; }
  };

// _SimdConverter fixed_size<1> -> scalar {{{1
template <typename _From, typename _To>
  struct _SimdConverter<_From, simd_abi::fixed_size<1>, _To, simd_abi::scalar,
			void>
  {
    _GLIBCXX_SIMD_INTRINSIC constexpr _To
    operator()(_SimdTuple<_From, simd_abi::scalar> __x) const noexcept
    { return {static_cast<_To>(__x.first)}; }
  };

// _SimdConverter fixed_size<_Np> -> fixed_size<_Np> {{{1
template <typename _From, typename _To, int _Np>
  struct _SimdConverter<_From, simd_abi::fixed_size<_Np>, _To,
			simd_abi::fixed_size<_Np>,
			enable_if_t<!is_same_v<_From, _To>>>
  {
    using _Ret = __fixed_size_storage_t<_To, _Np>;
    using _Arg = __fixed_size_storage_t<_From, _Np>;

    _GLIBCXX_SIMD_INTRINSIC constexpr _Ret
    operator()(const _Arg& __x) const noexcept
    {
      if constexpr (is_same_v<_From, _To>)
	return __x;

      // fallback to sequential when sve is available
      else if constexpr (__have_sve)
	return __converter_fallback<_Arg, _Ret, _To, _Np>(__x);

      // special case (optimize) int signedness casts
      else if constexpr (sizeof(_From) == sizeof(_To)
			 && is_integral_v<_From> && is_integral_v<_To>)
	return __bit_cast<_Ret>(__x);

      // special case if all ABI tags in _Ret are scalar
      else if constexpr (__is_scalar_abi<typename _Ret::_FirstAbi>())
	{
	  return __call_with_subscripts(
	    __x, make_index_sequence<_Np>(),
	    [](auto... __values) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA -> _Ret {
	      return __make_simd_tuple<_To, decltype((void) __values,
						     simd_abi::scalar())...>(
		static_cast<_To>(__values)...);
	    });
	}

      // from one vector to one vector
      else if constexpr (_Arg::_S_first_size == _Ret::_S_first_size)
	{
	  _SimdConverter<_From, typename _Arg::_FirstAbi, _To,
			 typename _Ret::_FirstAbi>
	    __native_cvt;
	  if constexpr (_Arg::_S_tuple_size == 1)
	    return {__native_cvt(__x.first)};
	  else
	    {
	      constexpr size_t _NRemain = _Np - _Arg::_S_first_size;
	      _SimdConverter<_From, simd_abi::fixed_size<_NRemain>, _To,
			     simd_abi::fixed_size<_NRemain>>
		__remainder_cvt;
	      return {__native_cvt(__x.first), __remainder_cvt(__x.second)};
	    }
	}

      // from one vector to multiple vectors
      else if constexpr (_Arg::_S_first_size > _Ret::_S_first_size)
	{
	  const auto __multiple_return_chunks
	    = __convert_all<__vector_type_t<_To, _Ret::_S_first_size>>(
	      __x.first);
	  constexpr auto __converted = __multiple_return_chunks.size()
				       * _Ret::_FirstAbi::template _S_size<_To>;
	  constexpr auto __remaining = _Np - __converted;
	  if constexpr (_Arg::_S_tuple_size == 1 && __remaining == 0)
	    return __to_simd_tuple<_To, _Np>(__multiple_return_chunks);
	  else if constexpr (_Arg::_S_tuple_size == 1)
	    { // e.g. <int, 3> -> <double, 2, 1> or <short, 7> -> <double, 4, 2,
	      // 1>
	      using _RetRem
		= __remove_cvref_t<decltype(__simd_tuple_pop_front<__converted>(
		  _Ret()))>;
	      const auto __return_chunks2
		= __convert_all<__vector_type_t<_To, _RetRem::_S_first_size>, 0,
				__converted>(__x.first);
	      constexpr auto __converted2
		= __converted
		  + __return_chunks2.size() * _RetRem::_S_first_size;
	      if constexpr (__converted2 == _Np)
		return __to_simd_tuple<_To, _Np>(__multiple_return_chunks,
						 __return_chunks2);
	      else
		{
		  using _RetRem2 = __remove_cvref_t<
		    decltype(__simd_tuple_pop_front<__return_chunks2.size()
						    * _RetRem::_S_first_size>(
		      _RetRem()))>;
		  const auto __return_chunks3 = __convert_all<
		    __vector_type_t<_To, _RetRem2::_S_first_size>, 0,
		    __converted2>(__x.first);
		  constexpr auto __converted3
		    = __converted2
		      + __return_chunks3.size() * _RetRem2::_S_first_size;
		  if constexpr (__converted3 == _Np)
		    return __to_simd_tuple<_To, _Np>(__multiple_return_chunks,
						     __return_chunks2,
						     __return_chunks3);
		  else
		    {
		      using _RetRem3
			= __remove_cvref_t<decltype(__simd_tuple_pop_front<
						    __return_chunks3.size()
						    * _RetRem2::_S_first_size>(
			  _RetRem2()))>;
		      const auto __return_chunks4 = __convert_all<
			__vector_type_t<_To, _RetRem3::_S_first_size>, 0,
			__converted3>(__x.first);
		      constexpr auto __converted4
			= __converted3
			  + __return_chunks4.size() * _RetRem3::_S_first_size;
		      if constexpr (__converted4 == _Np)
			return __to_simd_tuple<_To, _Np>(
			  __multiple_return_chunks, __return_chunks2,
			  __return_chunks3, __return_chunks4);
		      else
			__assert_unreachable<_To>();
		    }
		}
	    }
	  else
	    {
	      constexpr size_t _NRemain = _Np - _Arg::_S_first_size;
	      _SimdConverter<_From, simd_abi::fixed_size<_NRemain>, _To,
			     simd_abi::fixed_size<_NRemain>>
		__remainder_cvt;
	      return __simd_tuple_concat(
		__to_simd_tuple<_To, _Arg::_S_first_size>(
		  __multiple_return_chunks),
		__remainder_cvt(__x.second));
	    }
	}

      // from multiple vectors to one vector
      // _Arg::_S_first_size < _Ret::_S_first_size
      // a) heterogeneous input at the end of the tuple (possible with partial
      //    native registers in _Ret)
      else if constexpr (_Ret::_S_tuple_size == 1
			 && _Np % _Arg::_S_first_size != 0)
	{
	  static_assert(_Ret::_FirstAbi::template _S_is_partial<_To>);
	  return _Ret{__generate_from_n_evaluations<
	    _Np, typename _VectorTraits<typename _Ret::_FirstType>::type>(
	    [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
	      return static_cast<_To>(__x[__i]);
	    })};
	}
      else
	{
	  static_assert(_Arg::_S_tuple_size > 1);
	  constexpr auto __n
	    = __div_roundup(_Ret::_S_first_size, _Arg::_S_first_size);
	  return __call_with_n_evaluations<__n>(
	    [&__x](auto... __uncvted) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
	      // assuming _Arg Abi tags for all __i are _Arg::_FirstAbi
	      _SimdConverter<_From, typename _Arg::_FirstAbi, _To,
			     typename _Ret::_FirstAbi>
		__native_cvt;
	      if constexpr (_Ret::_S_tuple_size == 1)
		return _Ret{__native_cvt(__uncvted...)};
	      else
		return _Ret{
		  __native_cvt(__uncvted...),
		  _SimdConverter<
		    _From, simd_abi::fixed_size<_Np - _Ret::_S_first_size>, _To,
		    simd_abi::fixed_size<_Np - _Ret::_S_first_size>>()(
		    __simd_tuple_pop_front<_Ret::_S_first_size>(__x))};
	    }, [&__x](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
	      return __get_tuple_at<__i>(__x);
	    });
	}
    }
  };

// _SimdConverter "native" -> fixed_size<_Np> {{{1
// i.e. 1 register to ? registers
template <typename _From, typename _Ap, typename _To, int _Np>
  struct _SimdConverter<_From, _Ap, _To, simd_abi::fixed_size<_Np>,
			enable_if_t<!__is_fixed_size_abi_v<_Ap>>>
  {
    static_assert(
      _Np == simd_size_v<_From, _Ap>,
      "_SimdConverter to fixed_size only works for equal element counts");

    using _Ret = __fixed_size_storage_t<_To, _Np>;
    using _Arg = typename _SimdTraits<_From, _Ap>::_SimdMember;

    _GLIBCXX_SIMD_INTRINSIC constexpr _Ret
    operator()(_Arg __x) const noexcept
    {
      if constexpr (__have_sve)
	return __converter_fallback<_Arg, _Ret, _To, _Np>(__x);
      else if constexpr (_Ret::_S_tuple_size == 1)
	return {__vector_convert<typename _Ret::_FirstType::_BuiltinType>(__x)};
      else
	{
	  using _FixedNp = simd_abi::fixed_size<_Np>;
	  _SimdConverter<_From, _FixedNp, _To, _FixedNp> __fixed_cvt;
	  using _FromFixedStorage = __fixed_size_storage_t<_From, _Np>;
	  if constexpr (_FromFixedStorage::_S_tuple_size == 1)
	    return __fixed_cvt(_FromFixedStorage{__x});
	  else if constexpr (_FromFixedStorage::_S_tuple_size == 2)
	    {
	      _FromFixedStorage __tmp;
	      static_assert(sizeof(__tmp) <= sizeof(__x));
	      __builtin_memcpy(&__tmp.first, &__x, sizeof(__tmp.first));
	      __builtin_memcpy(&__tmp.second.first,
			       reinterpret_cast<const char*>(&__x)
				 + sizeof(__tmp.first),
			       sizeof(__tmp.second.first));
	      return __fixed_cvt(__tmp);
	    }
	  else
	    __assert_unreachable<_From>();
	}
    }
  };

// _SimdConverter fixed_size<_Np> -> "native" {{{1
// i.e. ? register to 1 registers
template <typename _From, int _Np, typename _To, typename _Ap>
  struct _SimdConverter<_From, simd_abi::fixed_size<_Np>, _To, _Ap,
			enable_if_t<!__is_fixed_size_abi_v<_Ap>>>
  {
    static_assert(
      _Np == simd_size_v<_To, _Ap>,
      "_SimdConverter to fixed_size only works for equal element counts");

    using _Arg = __fixed_size_storage_t<_From, _Np>;
    using _Ret = typename _SimdTraits<_To, _Ap>::_SimdMember;

    _GLIBCXX_SIMD_INTRINSIC constexpr
    _Ret
    operator()(const _Arg& __x) const noexcept
    {
      if constexpr(__have_sve)
	return __converter_fallback<_Arg, _Ret, _To, _Np>(__x);
      else if constexpr (_Arg::_S_tuple_size == 1)
	return __vector_convert<__vector_type_t<_To, _Np>>(__x.first);
      else if constexpr (_Arg::_S_is_homogeneous)
	return __call_with_n_evaluations<_Arg::_S_tuple_size>(
	  [](auto... __members) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
	    if constexpr ((is_convertible_v<decltype(__members), _To> && ...))
	      return __vector_type_t<_To, _Np>{static_cast<_To>(__members)...};
	    else
	      return __vector_convert<__vector_type_t<_To, _Np>>(__members...);
	  }, [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
	    return __get_tuple_at<__i>(__x);
	  });
      else if constexpr (__fixed_size_storage_t<_To, _Np>::_S_tuple_size == 1)
	{
	  _SimdConverter<_From, simd_abi::fixed_size<_Np>, _To,
			 simd_abi::fixed_size<_Np>>
	    __fixed_cvt;
	  return __fixed_cvt(__x).first;
	}
      else
	{
	  const _SimdWrapper<_From, _Np> __xv
	    = __generate_from_n_evaluations<_Np, __vector_type_t<_From, _Np>>(
		[&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { return __x[__i]; });
	  return __vector_convert<__vector_type_t<_To, _Np>>(__xv);
	}
    }
  };

// }}}1
_GLIBCXX_SIMD_END_NAMESPACE
#endif // __cplusplus >= 201703L
#endif // _GLIBCXX_EXPERIMENTAL_SIMD_CONVERTER_H_

// vim: foldmethod=marker sw=2 noet ts=8 sts=2 tw=80