1 /*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright © 2023 Keith Packard
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
17 *
18 * 3. Neither the name of the copyright holder nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
33 * OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36 #define _M68K_EXCEPT_SHIFT 6
37
feclearexcept(int excepts)38 __declare_fenv_inline(int) feclearexcept(int excepts)
39 {
40 /* Mask excepts to be sure only supported flag bits are set */
41
42 excepts &= FE_ALL_EXCEPT;
43
44 /* Clear the requested flags */
45
46 fexcept_t fpsr;
47
48 __asm__ volatile("fmove.l %%fpsr, %0" : "=d" (fpsr));
49 fpsr &= ~excepts;
50 __asm__ volatile("fmove.l %0, %%fpsr" : : "d" (fpsr));
51
52 return 0;
53 }
54
fegetenv(fenv_t * envp)55 __declare_fenv_inline(int) fegetenv(fenv_t *envp)
56 {
57
58 /* Get the current fpcr and fpsr */
59
60 fenv_t fpcr;
61 fexcept_t fpsr;
62
63 __asm__ volatile ("fmove.l %%fpcr, %0" : "=d"(fpcr));
64 __asm__ volatile ("fmove.l %%fpsr, %0" : "=d"(fpsr));
65
66 /* Mix the exceptions and rounding mode together */
67
68 *envp = (fpcr & 0xff00) | ((fpcr >> 4) & 3) | (fpsr & 0xf8);
69
70 return 0;
71 }
72
fegetexceptflag(fexcept_t * flagp,int excepts)73 __declare_fenv_inline(int) fegetexceptflag(fexcept_t *flagp, int excepts)
74 {
75 /* Mask excepts to be sure only supported flag bits are set */
76
77 excepts &= FE_ALL_EXCEPT;
78
79 /* Read the current fpsr */
80
81 fexcept_t fpsr;
82
83 __asm__ volatile("fmove.l %%fpsr, %0" : "=d"(fpsr));
84
85 *flagp = (fpsr & excepts);
86
87 return 0;
88 }
89
fegetround(void)90 __declare_fenv_inline(int) fegetround(void)
91 {
92 fenv_t fpcr;
93
94 __asm__ volatile("fmove.l %%fpcr, %0" : "=d"(fpcr));
95
96 return (fpcr >> 4) & 3;
97 }
98
feholdexcept(fenv_t * envp)99 __declare_fenv_inline(int) feholdexcept(fenv_t *envp)
100 {
101 fenv_t fpcr;
102 fexcept_t fpsr;
103
104 __asm__ volatile("fmove.l %%fpcr, %0" : "=d"(fpcr));
105 __asm__ volatile("fmove.l %%fpsr, %0" : "=d"(fpsr));
106
107 int excepts = fpsr & FE_ALL_EXCEPT;
108
109 *envp = (fpcr & 0xff00) | ((fpcr >> 4) & 3) | (fpsr & 0xf8);
110
111 /* map except flags to except enables and clear them */
112
113 fpcr &= ~(excepts << 6);
114
115 fpsr &= ~excepts;
116
117 /* Save to registers */
118
119 __asm__ volatile("fmove.l %0, %%fpcr" : : "d"(fpcr));
120 __asm__ volatile("fmove.l %0, %%fpsr" : : "d"(fpsr));
121
122 return 0;
123 }
124
feraiseexcept(int excepts)125 __declare_fenv_inline(int) feraiseexcept(int excepts)
126 {
127
128 /* Mask excepts to be sure only supported flag bits are set */
129
130 excepts &= FE_ALL_EXCEPT;
131
132 /* Set the requested exception flags */
133
134 fexcept_t fpsr;
135
136 __asm__ volatile("fmove.l %%fpsr, %0" : "=d"(fpsr));
137
138 fpsr |= excepts;
139
140 __asm__ volatile("fmove.l %0, %%fpsr" : : "d"(fpsr));
141
142 return 0;
143 }
144
fesetexcept(int excepts)145 __declare_fenv_inline(int) fesetexcept(int excepts)
146 {
147 return feraiseexcept(excepts);
148 }
149
fesetenv(const fenv_t * envp)150 __declare_fenv_inline(int) fesetenv(const fenv_t *envp)
151 {
152 fenv_t env = *envp;
153
154 /* Get current fpcr and fpsr */
155
156 fenv_t fpcr;
157 fexcept_t fpsr;
158
159 __asm__ volatile("fmove.l %%fpcr, %0" : "=d"(fpcr));
160 __asm__ volatile("fmove.l %%fpsr, %0" : "=d"(fpsr));
161
162 /* Set the rounding mode */
163
164 fpcr = (fpcr & ~(0x3 << 4)) | (env & 3);
165
166 /* Set the exception enables */
167
168 fpcr = (fpcr & 0xff) | (env & 0xff00);
169
170 /* Set the exceptions */
171
172 fpsr = (fpsr & ~0xf8) | (env & 0xf8);
173
174 /* Save to registers */
175
176 __asm__ volatile("fmove.l %0, %%fpcr" : : "d"(fpcr));
177 __asm__ volatile("fmove.l %0, %%fpsr" : : "d"(fpsr));
178
179 return 0;
180
181 }
182
fesetexceptflag(const fexcept_t * flagp,int excepts)183 __declare_fenv_inline(int) fesetexceptflag(const fexcept_t *flagp, int excepts)
184 {
185 excepts &= FE_ALL_EXCEPT;
186
187 fexcept_t fpsr;
188
189 __asm__ volatile("fmove.l %%fpsr, %0" : "=d"(fpsr));
190
191 fpsr &= ~excepts;
192 fpsr |= (*flagp & excepts);
193
194 __asm__ volatile("fmove.l %0, %%fpsr" : : "d"(fpsr));
195
196 return 0;
197 }
198
fesetround(int rounding_mode)199 __declare_fenv_inline(int) fesetround(int rounding_mode)
200 {
201 fenv_t fpcr;
202
203 if (rounding_mode & ~3)
204 return 1;
205
206 __asm__ volatile("fmove.l %%fpcr, %0" : "=d"(fpcr));
207
208 fpcr = (fpcr & ~(3 << 4)) | (rounding_mode << 4);
209
210 __asm__ volatile("fmove.l %0, %%fpcr" : : "d"(fpcr));
211
212 return 0;
213 }
214
fetestexcept(int excepts)215 __declare_fenv_inline(int) fetestexcept(int excepts)
216 {
217 /* Mask excepts to be sure only supported flag bits are set */
218
219 excepts &= FE_ALL_EXCEPT;
220
221 /* Read the current fpsr */
222
223 fexcept_t fpsr;
224
225 __asm__ volatile ("fmove.l %%fpsr, %0" : "=d"(fpsr));
226
227 return (fpsr & excepts);
228 }
229
feupdateenv(const fenv_t * envp)230 __declare_fenv_inline(int) feupdateenv(const fenv_t *envp)
231 {
232 fenv_t env = *envp;
233
234 /* Get current fpcr and fpsr */
235
236 fenv_t fpcr;
237 fexcept_t fpsr;
238
239 __asm__ volatile("fmove.l %%fpcr, %0" : "=d"(fpcr));
240 __asm__ volatile("fmove.l %%fpsr, %0" : "=d"(fpsr));
241
242 /* Set the rounding mode */
243
244 fpcr = (fpcr & ~(0x3 << 4)) | (env & 3);
245
246 /* Set the exception enables */
247
248 fpcr = (fpcr & 0xff) | (env & 0xff00);
249
250 /* Merge in exceptions */
251
252 fpsr |= (env & 0xf8);
253
254 /* Save to registers */
255
256 __asm__ volatile("fmove.l %0, %%fpcr" : : "d"(fpcr));
257 __asm__ volatile("fmove.l %0, %%fpsr" : : "d"(fpsr));
258
259 return 0;
260
261 }
262
feenableexcept(int excepts)263 __declare_fenv_inline(int) feenableexcept(int excepts)
264 {
265 fenv_t old_fpcr, new_fpcr;
266
267 __asm__ volatile("fmove.l %%fpcr, %0" : "=d"(old_fpcr));
268
269 /* Enable exceptions */
270
271 new_fpcr = old_fpcr | ((excepts & FE_ALL_EXCEPT) << _M68K_EXCEPT_SHIFT);
272
273 __asm__ volatile("fmove.l %0, %%fpcr" : : "d"(new_fpcr));
274 return (old_fpcr >> _M68K_EXCEPT_SHIFT) & FE_ALL_EXCEPT;
275 }
276
fedisableexcept(int excepts)277 __declare_fenv_inline(int) fedisableexcept(int excepts)
278 {
279 fenv_t old_fpcr, new_fpcr;
280
281 __asm__ volatile("fmove.l %%fpcr, %0" : "=d"(old_fpcr));
282
283 /* Disable exceptions */
284
285 new_fpcr = old_fpcr & ~((excepts & FE_ALL_EXCEPT) << _M68K_EXCEPT_SHIFT);
286
287 __asm__ volatile("fmove.l %0, %%fpcr" : : "d"(new_fpcr));
288 return (old_fpcr >> _M68K_EXCEPT_SHIFT) & FE_ALL_EXCEPT;
289 }
290
fegetexcept(void)291 __declare_fenv_inline(int) fegetexcept(void)
292 {
293 fenv_t fpcr;
294
295 __asm__ volatile("fmove.l %%fpcr, %0" : "=d"(fpcr));
296
297 return (fpcr >> _M68K_EXCEPT_SHIFT) & FE_ALL_EXCEPT;
298 }
299