xref: /qemu/tests/tcg/xtensa/test_dfp0_arith.S (revision 6e0bc06e)
1#include "macros.inc"
2#include "fpu.h"
3
4test_suite fp0_arith
5
6#if XCHAL_HAVE_DFP
7
8.macro movfp fr, v
9    movi    a2, ((\v) >> 32) & 0xffffffff
10    movi    a3, ((\v) & 0xffffffff)
11    wfrd    \fr, a2, a3
12.endm
13
14.macro check_res fr, r, sr
15    rfrd    a2, \fr
16    dump    a2
17    movi    a3, ((\r) >> 32) & 0xffffffff
18    assert  eq, a2, a3
19    rfr    a2, \fr
20    dump    a2
21    movi    a3, ((\r) & 0xffffffff)
22    assert  eq, a2, a3
23    rur     a2, fsr
24    movi    a3, \sr
25    assert  eq, a2, a3
26.endm
27
28test add_d
29    movi    a2, 1
30    wsr     a2, cpenable
31
32    /* MAX_FLOAT + MAX_FLOAT = +inf/MAX_FLOAT  */
33    test_op2 add.d, f6, f7, f8, F64_MAX, F64_MAX, \
34        F64_PINF, F64_MAX, F64_PINF, F64_MAX, \
35          FSR_OI,  FSR_OI,   FSR_OI,  FSR_OI
36test_end
37
38test add_d_inf
39    /* 1 + +inf = +inf  */
40    test_op2 add.d, f6, f7, f8, F64_1, F64_PINF, \
41        F64_PINF, F64_PINF, F64_PINF, F64_PINF, \
42           FSR__,    FSR__,    FSR__,    FSR__
43
44    /* +inf + -inf = default NaN */
45    test_op2 add.d, f0, f1, f2, F64_PINF, F64_NINF, \
46        F64_DNAN, F64_DNAN, F64_DNAN, F64_DNAN, \
47           FSR_V,    FSR_V,    FSR_V,    FSR_V
48test_end
49
50test add_d_nan_dfpu
51    /* 1 + QNaN = QNaN  */
52    test_op2 add.d, f9, f10, f11, F64_1, F64_QNAN(1), \
53        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
54              FSR__,       FSR__,       FSR__,       FSR__
55    /* 1 + SNaN = QNaN  */
56    test_op2 add.d, f12, f13, f14, F64_1, F64_SNAN(1), \
57        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
58              FSR_V,       FSR_V,       FSR_V,       FSR_V
59
60    /* SNaN1 + SNaN2 = QNaN2 */
61    test_op2 add.d, f15, f0, f1, F64_SNAN(1), F64_SNAN(2), \
62        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
63              FSR_V,       FSR_V,       FSR_V,       FSR_V
64    /* QNaN1 + SNaN2 = QNaN2 */
65    test_op2 add.d, f5, f6, f7, F64_QNAN(1), F64_SNAN(2), \
66        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
67              FSR_V,       FSR_V,       FSR_V,       FSR_V
68    /* SNaN1 + QNaN2 = QNaN2 */
69    test_op2 add.d, f8, f9, f10, F64_SNAN(1), F64_QNAN(2), \
70        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
71              FSR_V,       FSR_V,       FSR_V,       FSR_V
72test_end
73
74test sub_d
75    /* norm - norm = denorm */
76    test_op2 sub.d, f6, f7, f8, F64_MIN_NORM | 1, F64_MIN_NORM, \
77        0x00000001, 0x00000001, 0x00000001, 0x00000001, \
78             FSR__,      FSR__,      FSR__,      FSR__
79test_end
80
81test mul_d
82    test_op2 mul.d, f0, f1, f2, F64_1 | 1, F64_1 | 1, \
83        F64_1 | 2, F64_1 | 2, F64_1 | 3, F64_1 | 2, \
84            FSR_I,     FSR_I,     FSR_I,     FSR_I
85    /* MAX_FLOAT/2 * MAX_FLOAT/2 = +inf/MAX_FLOAT  */
86    test_op2 mul.d, f6, f7, f8, F64_MAX_2, F64_MAX_2, \
87        F64_PINF, F64_MAX, F64_PINF, F64_MAX, \
88          FSR_OI,  FSR_OI,   FSR_OI,  FSR_OI
89    /* min norm * min norm = 0/denorm */
90    test_op2 mul.d, f6, f7, f8, F64_MIN_NORM, F64_MIN_NORM, \
91         F64_0,  F64_0, 0x00000001,  F64_0, \
92        FSR_UI, FSR_UI,     FSR_UI, FSR_UI
93    /* inf * 0 = default NaN */
94    test_op2 mul.d, f6, f7, f8, F64_PINF, F64_0, \
95        F64_DNAN, F64_DNAN, F64_DNAN, F64_DNAN, \
96           FSR_V,    FSR_V,    FSR_V,    FSR_V
97test_end
98
99test madd_d
100    test_op3 madd.d, f0, f1, f2, f0, F64_0, F64_1 | 1, F64_1 | 1, \
101        F64_1 | 2, F64_1 | 2, F64_1 | 3, F64_1 | 2, \
102            FSR_I,     FSR_I,     FSR_I,     FSR_I
103test_end
104
105test madd_d_precision
106    test_op3 madd.d, f0, f1, f2, f0, \
107        F64_MINUS | F64_1 | 2, F64_1 | 1, F64_1 | 1, \
108        0x3970000000000000, 0x3970000000000000, 0x3970000000000000, 0x3970000000000000, \
109             FSR__,      FSR__,      FSR__,      FSR__
110test_end
111
112test madd_d_nan_dfpu
113    /* DFPU madd/msub NaN1, NaN2, NaN3 priority: NaN1, NaN3, NaN2 */
114    test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_1, F64_1, \
115        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
116              FSR__,       FSR__,       FSR__,       FSR__
117    test_op3 madd.d, f0, f1, f2, f0, F64_1, F64_QNAN(2), F64_1, \
118        F64_QNAN(2), F64_QNAN(2), F64_QNAN(2), F64_QNAN(2), \
119              FSR__,       FSR__,       FSR__,       FSR__
120    test_op3 madd.d, f0, f1, f2, f0, F64_1, F64_1, F64_QNAN(3), \
121        F64_QNAN(3), F64_QNAN(3), F64_QNAN(3), F64_QNAN(3), \
122              FSR__,       FSR__,       FSR__,       FSR__
123
124    test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_QNAN(2), F64_1, \
125        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
126              FSR__,       FSR__,       FSR__,       FSR__
127    test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_1, F64_QNAN(3), \
128        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
129              FSR__,       FSR__,       FSR__,       FSR__
130    test_op3 madd.d, f0, f1, f2, f0, F64_1, F64_QNAN(2), F64_QNAN(3), \
131        F64_QNAN(3), F64_QNAN(3), F64_QNAN(3), F64_QNAN(3), \
132              FSR__,       FSR__,       FSR__,       FSR__
133
134    test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_QNAN(2), F64_QNAN(3), \
135        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
136              FSR__,       FSR__,       FSR__,       FSR__
137
138    /* inf * 0 = default NaN */
139    test_op3 madd.d, f0, f1, f2, f0, F64_1, F64_PINF, F64_0, \
140        F64_DNAN, F64_DNAN, F64_DNAN, F64_DNAN, \
141           FSR_V,    FSR_V,    FSR_V,    FSR_V
142    /* inf * 0 + SNaN1 = QNaN1 */
143    test_op3 madd.d, f0, f1, f2, f0, F64_SNAN(1), F64_PINF, F64_0, \
144        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
145              FSR_V,       FSR_V,       FSR_V,       FSR_V
146    /* inf * 0 + QNaN1 = QNaN1 */
147    test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_PINF, F64_0, \
148        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
149              FSR_V,       FSR_V,       FSR_V,       FSR_V
150
151    /* madd/msub SNaN turns to QNaN and sets Invalid flag */
152    test_op3 madd.d, f0, f1, f2, f0, F64_SNAN(1), F64_1, F64_1, \
153        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
154              FSR_V,       FSR_V,       FSR_V,       FSR_V
155    test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_SNAN(2), F64_1, \
156        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
157              FSR_V,       FSR_V,       FSR_V,       FSR_V
158test_end
159
160#endif
161
162test_suite_end
163