1#include "sleef_cl.h"
2
3_CL_OVERLOADABLE
4float
5_cl_modf (float x, global float *iptr)
6{
7  Sleef_float2 temp;
8  temp = Sleef_modff (x);
9  *iptr = temp.y;
10  return temp.x;
11}
12
13_CL_OVERLOADABLE
14float3
15_cl_modf (float3 x, global float3 *iptr)
16{
17  float4 temp;
18  float4 x_3to4;
19  x_3to4.xyz = x;
20  float4 r = _cl_modf (x_3to4, &temp);
21  *iptr = temp.xyz;
22  return r.xyz;
23}
24
25_CL_OVERLOADABLE
26float2
27_cl_modf (float2 x, global float2 *iptr)
28{
29  float plo, phi;
30  float lo = _cl_modf (x.lo, &plo);
31  float hi = _cl_modf (x.hi, &phi);
32
33  *iptr = (float2) (plo, phi);
34  return (float2) (lo, hi);
35}
36
37_CL_OVERLOADABLE
38float4
39_cl_modf (float4 x, global float4 *iptr)
40{
41#if defined(SLEEF_VEC_128_AVAILABLE)
42  Sleef_float4_2 temp;
43  temp = Sleef_modff4 (x);
44  *iptr = temp.y;
45  return temp.x;
46#else
47
48  float2 plo, phi;
49  float2 lo = _cl_modf (x.lo, &plo);
50  float2 hi = _cl_modf (x.hi, &phi);
51
52  *iptr = (float4) (plo, phi);
53  return (float4) (lo, hi);
54
55#endif
56}
57
58_CL_OVERLOADABLE
59float8
60_cl_modf (float8 x, global float8 *iptr)
61{
62#if defined(SLEEF_VEC_256_AVAILABLE)
63  Sleef_float8_2 temp;
64  temp = Sleef_modff8 (x);
65  *iptr = temp.y;
66  return temp.x;
67#else
68
69  float4 plo, phi;
70  float4 lo = _cl_modf (x.lo, &plo);
71  float4 hi = _cl_modf (x.hi, &phi);
72
73  *iptr = (float8) (plo, phi);
74  return (float8) (lo, hi);
75
76#endif
77}
78
79_CL_OVERLOADABLE
80float16
81_cl_modf (float16 x, global float16 *iptr)
82{
83#if defined(SLEEF_VEC_512_AVAILABLE)
84  Sleef_float16_2 temp;
85  temp = Sleef_modff16 (x);
86  *iptr = temp.y;
87  return temp.x;
88#else
89
90  float8 plo, phi;
91  float8 lo = _cl_modf (x.lo, &plo);
92  float8 hi = _cl_modf (x.hi, &phi);
93
94  *iptr = (float16) (plo, phi);
95  return (float16) (lo, hi);
96
97#endif
98}
99
100#ifdef cl_khr_fp64
101
102_CL_OVERLOADABLE
103double
104_cl_modf (double x, global double *iptr)
105{
106  Sleef_double2 temp;
107  temp = Sleef_modf (x);
108  *iptr = temp.y;
109  return temp.x;
110}
111
112_CL_OVERLOADABLE
113double3
114_cl_modf (double3 x, global double3 *iptr)
115{
116  double4 temp;
117  double4 x_3to4;
118  x_3to4.xyz = x;
119  double4 r = _cl_modf (x_3to4, &temp);
120  *iptr = temp.xyz;
121  return r.xyz;
122}
123
124_CL_OVERLOADABLE
125double16
126_cl_modf (double16 x, global double16 *iptr)
127{
128  double8 plo, phi;
129  double8 lo = _cl_modf (x.lo, &plo);
130  double8 hi = _cl_modf (x.hi, &phi);
131
132  *iptr = (double16) (plo, phi);
133  return (double16) (lo, hi);
134}
135
136_CL_OVERLOADABLE
137double2
138_cl_modf (double2 x, global double2 *iptr)
139{
140#if defined(SLEEF_VEC_128_AVAILABLE) && defined(SLEEF_DOUBLE_VEC_AVAILABLE)
141  Sleef_double2_2 temp;
142  temp = Sleef_modfd2 (x);
143  *iptr = temp.y;
144  return temp.x;
145#else
146
147  double plo, phi;
148  double lo = _cl_modf (x.lo, &plo);
149  double hi = _cl_modf (x.hi, &phi);
150
151  *iptr = (double2) (plo, phi);
152  return (double2) (lo, hi);
153
154#endif
155}
156
157_CL_OVERLOADABLE
158double4
159_cl_modf (double4 x, global double4 *iptr)
160{
161#if defined(SLEEF_VEC_256_AVAILABLE) && defined(SLEEF_DOUBLE_VEC_AVAILABLE)
162  Sleef_double4_2 temp;
163  temp = Sleef_modfd4 (x);
164  *iptr = temp.y;
165  return temp.x;
166#else
167
168  double2 plo, phi;
169  double2 lo = _cl_modf (x.lo, &plo);
170  double2 hi = _cl_modf (x.hi, &phi);
171
172  *iptr = (double4) (plo, phi);
173  return (double4) (lo, hi);
174
175#endif
176}
177
178_CL_OVERLOADABLE
179double8
180_cl_modf (double8 x, global double8 *iptr)
181{
182#if defined(SLEEF_VEC_512_AVAILABLE) && defined(SLEEF_DOUBLE_VEC_AVAILABLE)
183  Sleef_double8_2 temp;
184  temp = Sleef_modfd8 (x);
185  *iptr = temp.y;
186  return temp.x;
187#else
188
189  double4 plo, phi;
190  double4 lo = _cl_modf (x.lo, &plo);
191  double4 hi = _cl_modf (x.hi, &phi);
192
193  *iptr = (double8) (plo, phi);
194  return (double8) (lo, hi);
195
196#endif
197}
198
199#endif /* cl_khr_fp64 */
200
201_CL_OVERLOADABLE
202float
203_cl_modf (float x, local float *iptr)
204{
205  Sleef_float2 temp;
206  temp = Sleef_modff (x);
207  *iptr = temp.y;
208  return temp.x;
209}
210
211_CL_OVERLOADABLE
212float3
213_cl_modf (float3 x, local float3 *iptr)
214{
215  float4 temp;
216  float4 x_3to4;
217  x_3to4.xyz = x;
218  float4 r = _cl_modf (x_3to4, &temp);
219  *iptr = temp.xyz;
220  return r.xyz;
221}
222
223_CL_OVERLOADABLE
224float2
225_cl_modf (float2 x, local float2 *iptr)
226{
227  float plo, phi;
228  float lo = _cl_modf (x.lo, &plo);
229  float hi = _cl_modf (x.hi, &phi);
230
231  *iptr = (float2) (plo, phi);
232  return (float2) (lo, hi);
233}
234
235_CL_OVERLOADABLE
236float4
237_cl_modf (float4 x, local float4 *iptr)
238{
239#if defined(SLEEF_VEC_128_AVAILABLE)
240  Sleef_float4_2 temp;
241  temp = Sleef_modff4 (x);
242  *iptr = temp.y;
243  return temp.x;
244#else
245
246  float2 plo, phi;
247  float2 lo = _cl_modf (x.lo, &plo);
248  float2 hi = _cl_modf (x.hi, &phi);
249
250  *iptr = (float4) (plo, phi);
251  return (float4) (lo, hi);
252
253#endif
254}
255
256_CL_OVERLOADABLE
257float8
258_cl_modf (float8 x, local float8 *iptr)
259{
260#if defined(SLEEF_VEC_256_AVAILABLE)
261  Sleef_float8_2 temp;
262  temp = Sleef_modff8 (x);
263  *iptr = temp.y;
264  return temp.x;
265#else
266
267  float4 plo, phi;
268  float4 lo = _cl_modf (x.lo, &plo);
269  float4 hi = _cl_modf (x.hi, &phi);
270
271  *iptr = (float8) (plo, phi);
272  return (float8) (lo, hi);
273
274#endif
275}
276
277_CL_OVERLOADABLE
278float16
279_cl_modf (float16 x, local float16 *iptr)
280{
281#if defined(SLEEF_VEC_512_AVAILABLE)
282  Sleef_float16_2 temp;
283  temp = Sleef_modff16 (x);
284  *iptr = temp.y;
285  return temp.x;
286#else
287
288  float8 plo, phi;
289  float8 lo = _cl_modf (x.lo, &plo);
290  float8 hi = _cl_modf (x.hi, &phi);
291
292  *iptr = (float16) (plo, phi);
293  return (float16) (lo, hi);
294
295#endif
296}
297
298#ifdef cl_khr_fp64
299
300_CL_OVERLOADABLE
301double
302_cl_modf (double x, local double *iptr)
303{
304  Sleef_double2 temp;
305  temp = Sleef_modf (x);
306  *iptr = temp.y;
307  return temp.x;
308}
309
310_CL_OVERLOADABLE
311double3
312_cl_modf (double3 x, local double3 *iptr)
313{
314  double4 temp;
315  double4 x_3to4;
316  x_3to4.xyz = x;
317  double4 r = _cl_modf (x_3to4, &temp);
318  *iptr = temp.xyz;
319  return r.xyz;
320}
321
322_CL_OVERLOADABLE
323double16
324_cl_modf (double16 x, local double16 *iptr)
325{
326  double8 plo, phi;
327  double8 lo = _cl_modf (x.lo, &plo);
328  double8 hi = _cl_modf (x.hi, &phi);
329
330  *iptr = (double16) (plo, phi);
331  return (double16) (lo, hi);
332}
333
334_CL_OVERLOADABLE
335double2
336_cl_modf (double2 x, local double2 *iptr)
337{
338#if defined(SLEEF_VEC_128_AVAILABLE) && defined(SLEEF_DOUBLE_VEC_AVAILABLE)
339  Sleef_double2_2 temp;
340  temp = Sleef_modfd2 (x);
341  *iptr = temp.y;
342  return temp.x;
343#else
344
345  double plo, phi;
346  double lo = _cl_modf (x.lo, &plo);
347  double hi = _cl_modf (x.hi, &phi);
348
349  *iptr = (double2) (plo, phi);
350  return (double2) (lo, hi);
351
352#endif
353}
354
355_CL_OVERLOADABLE
356double4
357_cl_modf (double4 x, local double4 *iptr)
358{
359#if defined(SLEEF_VEC_256_AVAILABLE) && defined(SLEEF_DOUBLE_VEC_AVAILABLE)
360  Sleef_double4_2 temp;
361  temp = Sleef_modfd4 (x);
362  *iptr = temp.y;
363  return temp.x;
364#else
365
366  double2 plo, phi;
367  double2 lo = _cl_modf (x.lo, &plo);
368  double2 hi = _cl_modf (x.hi, &phi);
369
370  *iptr = (double4) (plo, phi);
371  return (double4) (lo, hi);
372
373#endif
374}
375
376_CL_OVERLOADABLE
377double8
378_cl_modf (double8 x, local double8 *iptr)
379{
380#if defined(SLEEF_VEC_512_AVAILABLE) && defined(SLEEF_DOUBLE_VEC_AVAILABLE)
381  Sleef_double8_2 temp;
382  temp = Sleef_modfd8 (x);
383  *iptr = temp.y;
384  return temp.x;
385#else
386
387  double4 plo, phi;
388  double4 lo = _cl_modf (x.lo, &plo);
389  double4 hi = _cl_modf (x.hi, &phi);
390
391  *iptr = (double8) (plo, phi);
392  return (double8) (lo, hi);
393
394#endif
395}
396
397#endif /* cl_khr_fp64 */
398
399_CL_OVERLOADABLE
400float
401_cl_modf (float x, private float *iptr)
402{
403  Sleef_float2 temp;
404  temp = Sleef_modff (x);
405  *iptr = temp.y;
406  return temp.x;
407}
408
409_CL_OVERLOADABLE
410float3
411_cl_modf (float3 x, private float3 *iptr)
412{
413  float4 temp;
414  float4 x_3to4;
415  x_3to4.xyz = x;
416  float4 r = _cl_modf (x_3to4, &temp);
417  *iptr = temp.xyz;
418  return r.xyz;
419}
420
421_CL_OVERLOADABLE
422float2
423_cl_modf (float2 x, private float2 *iptr)
424{
425  float plo, phi;
426  float lo = _cl_modf (x.lo, &plo);
427  float hi = _cl_modf (x.hi, &phi);
428
429  *iptr = (float2) (plo, phi);
430  return (float2) (lo, hi);
431}
432
433_CL_OVERLOADABLE
434float4
435_cl_modf (float4 x, private float4 *iptr)
436{
437#if defined(SLEEF_VEC_128_AVAILABLE)
438  Sleef_float4_2 temp;
439  temp = Sleef_modff4 (x);
440  *iptr = temp.y;
441  return temp.x;
442#else
443
444  float2 plo, phi;
445  float2 lo = _cl_modf (x.lo, &plo);
446  float2 hi = _cl_modf (x.hi, &phi);
447
448  *iptr = (float4) (plo, phi);
449  return (float4) (lo, hi);
450
451#endif
452}
453
454_CL_OVERLOADABLE
455float8
456_cl_modf (float8 x, private float8 *iptr)
457{
458#if defined(SLEEF_VEC_256_AVAILABLE)
459  Sleef_float8_2 temp;
460  temp = Sleef_modff8 (x);
461  *iptr = temp.y;
462  return temp.x;
463#else
464
465  float4 plo, phi;
466  float4 lo = _cl_modf (x.lo, &plo);
467  float4 hi = _cl_modf (x.hi, &phi);
468
469  *iptr = (float8) (plo, phi);
470  return (float8) (lo, hi);
471
472#endif
473}
474
475_CL_OVERLOADABLE
476float16
477_cl_modf (float16 x, private float16 *iptr)
478{
479#if defined(SLEEF_VEC_512_AVAILABLE)
480  Sleef_float16_2 temp;
481  temp = Sleef_modff16 (x);
482  *iptr = temp.y;
483  return temp.x;
484#else
485
486  float8 plo, phi;
487  float8 lo = _cl_modf (x.lo, &plo);
488  float8 hi = _cl_modf (x.hi, &phi);
489
490  *iptr = (float16) (plo, phi);
491  return (float16) (lo, hi);
492
493#endif
494}
495
496#ifdef cl_khr_fp64
497
498_CL_OVERLOADABLE
499double
500_cl_modf (double x, private double *iptr)
501{
502  Sleef_double2 temp;
503  temp = Sleef_modf (x);
504  *iptr = temp.y;
505  return temp.x;
506}
507
508_CL_OVERLOADABLE
509double3
510_cl_modf (double3 x, private double3 *iptr)
511{
512  double4 temp;
513  double4 x_3to4;
514  x_3to4.xyz = x;
515  double4 r = _cl_modf (x_3to4, &temp);
516  *iptr = temp.xyz;
517  return r.xyz;
518}
519
520_CL_OVERLOADABLE
521double16
522_cl_modf (double16 x, private double16 *iptr)
523{
524  double8 plo, phi;
525  double8 lo = _cl_modf (x.lo, &plo);
526  double8 hi = _cl_modf (x.hi, &phi);
527
528  *iptr = (double16) (plo, phi);
529  return (double16) (lo, hi);
530}
531
532_CL_OVERLOADABLE
533double2
534_cl_modf (double2 x, private double2 *iptr)
535{
536#if defined(SLEEF_VEC_128_AVAILABLE) && defined(SLEEF_DOUBLE_VEC_AVAILABLE)
537  Sleef_double2_2 temp;
538  temp = Sleef_modfd2 (x);
539  *iptr = temp.y;
540  return temp.x;
541#else
542
543  double plo, phi;
544  double lo = _cl_modf (x.lo, &plo);
545  double hi = _cl_modf (x.hi, &phi);
546
547  *iptr = (double2) (plo, phi);
548  return (double2) (lo, hi);
549
550#endif
551}
552
553_CL_OVERLOADABLE
554double4
555_cl_modf (double4 x, private double4 *iptr)
556{
557#if defined(SLEEF_VEC_256_AVAILABLE) && defined(SLEEF_DOUBLE_VEC_AVAILABLE)
558  Sleef_double4_2 temp;
559  temp = Sleef_modfd4 (x);
560  *iptr = temp.y;
561  return temp.x;
562#else
563
564  double2 plo, phi;
565  double2 lo = _cl_modf (x.lo, &plo);
566  double2 hi = _cl_modf (x.hi, &phi);
567
568  *iptr = (double4) (plo, phi);
569  return (double4) (lo, hi);
570
571#endif
572}
573
574_CL_OVERLOADABLE
575double8
576_cl_modf (double8 x, private double8 *iptr)
577{
578#if defined(SLEEF_VEC_512_AVAILABLE) && defined(SLEEF_DOUBLE_VEC_AVAILABLE)
579  Sleef_double8_2 temp;
580  temp = Sleef_modfd8 (x);
581  *iptr = temp.y;
582  return temp.x;
583#else
584
585  double4 plo, phi;
586  double4 lo = _cl_modf (x.lo, &plo);
587  double4 hi = _cl_modf (x.hi, &phi);
588
589  *iptr = (double8) (plo, phi);
590  return (double8) (lo, hi);
591
592#endif
593}
594
595#endif /* cl_khr_fp64 */
596