1 /*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright © 2023 Keith Packard
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
17 *
18 * 3. Neither the name of the copyright holder nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
33 * OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36 #define _M68K_EXCEPT_SHIFT 6
37
feclearexcept(int excepts)38 __declare_fenv_inline(int) feclearexcept(int excepts)
39 {
40 /* Mask excepts to be sure only supported flag bits are set */
41
42 excepts &= FE_ALL_EXCEPT;
43
44 /* Clear the requested flags */
45
46 fexcept_t fpsr;
47
48 __asm__ volatile("fmove.l %%fpsr, %0" : "=d" (fpsr));
49 fpsr &= ~excepts;
50 __asm__ volatile("fmove.l %0, %%fpsr" : : "d" (fpsr));
51
52 return 0;
53 }
54
fegetenv(fenv_t * envp)55 __declare_fenv_inline(int) fegetenv(fenv_t *envp)
56 {
57
58 /* Get the current fpcr and fpsr */
59
60 fenv_t fpcr;
61 fexcept_t fpsr;
62
63 __asm__ volatile ("fmove.l %%fpcr, %0" : "=d"(fpcr));
64 __asm__ volatile ("fmove.l %%fpsr, %0" : "=d"(fpsr));
65
66 /* Mix the exceptions and rounding mode together */
67
68 *envp = (fpcr & 0xff00) | ((fpcr >> 4) & 3) | (fpsr & 0xf8);
69
70 return 0;
71 }
72
fegetexceptflag(fexcept_t * flagp,int excepts)73 __declare_fenv_inline(int) fegetexceptflag(fexcept_t *flagp, int excepts)
74 {
75 /* Mask excepts to be sure only supported flag bits are set */
76
77 excepts &= FE_ALL_EXCEPT;
78
79 /* Read the current fpsr */
80
81 fexcept_t fpsr;
82
83 __asm__ volatile("fmove.l %%fpsr, %0" : "=d"(fpsr));
84
85 *flagp = (fpsr & excepts);
86
87 return 0;
88 }
89
fegetround(void)90 __declare_fenv_inline(int) fegetround(void)
91 {
92 fenv_t fpcr;
93
94 __asm__ volatile("fmove.l %%fpcr, %0" : "=d"(fpcr));
95
96 return (fpcr >> 4) & 3;
97 }
98
feholdexcept(fenv_t * envp)99 __declare_fenv_inline(int) feholdexcept(fenv_t *envp)
100 {
101 fenv_t fpcr;
102 fexcept_t fpsr;
103
104 __asm__ volatile("fmove.l %%fpcr, %0" : "=d"(fpcr));
105 __asm__ volatile("fmove.l %%fpsr, %0" : "=d"(fpsr));
106
107 int excepts = fpsr & FE_ALL_EXCEPT;
108
109 *envp = (fpcr & 0xff00) | ((fpcr >> 4) & 3) | (fpsr & 0xf8);
110
111 /* map except flags to except enables and clear them */
112
113 fpcr &= ~(excepts << 6);
114
115 fpsr &= ~excepts;
116
117 /* Save to registers */
118
119 __asm__ volatile("fmove.l %0, %%fpcr" : : "d"(fpcr));
120 __asm__ volatile("fmove.l %0, %%fpsr" : : "d"(fpsr));
121
122 return 0;
123 }
124
feraiseexcept(int excepts)125 __declare_fenv_inline(int) feraiseexcept(int excepts)
126 {
127
128 /* Mask excepts to be sure only supported flag bits are set */
129
130 excepts &= FE_ALL_EXCEPT;
131
132 /* Set the requested exception flags */
133
134 fexcept_t fpsr;
135
136 __asm__ volatile("fmove.l %%fpsr, %0" : "=d"(fpsr));
137
138 fpsr |= excepts;
139
140 __asm__ volatile("fmove.l %0, %%fpsr" : : "d"(fpsr));
141
142 return 0;
143 }
144
fesetexcept(int excepts)145 __declare_fenv_inline(int) fesetexcept(int excepts)
146 {
147 return feraiseexcept(excepts);
148 }
149
fesetenv(const fenv_t * envp)150 __declare_fenv_inline(int) fesetenv(const fenv_t *envp)
151 {
152 fenv_t env = *envp;
153
154 /* Get current fpcr and fpsr */
155
156 fenv_t fpcr;
157 fexcept_t fpsr;
158
159 __asm__ volatile("fmove.l %%fpcr, %0" : "=d"(fpcr));
160 __asm__ volatile("fmove.l %%fpsr, %0" : "=d"(fpsr));
161
162 /* Set the rounding mode */
163
164 fpcr = (fpcr & ~(0x3 << 4)) | (env & 3);
165
166 /* Set the exception enables */
167
168 fpcr = (fpcr & 0xff) | (env & 0xff00);
169
170 /* Set the exceptions */
171
172 fpsr = (fpsr & ~0xf8) | (env & 0xf8);
173
174 /* Save to registers */
175
176 __asm__ volatile("fmove.l %0, %%fpcr" : : "d"(fpcr));
177 __asm__ volatile("fmove.l %0, %%fpsr" : : "d"(fpsr));
178
179 return 0;
180
181 }
182
fesetexceptflag(const fexcept_t * flagp,int excepts)183 __declare_fenv_inline(int) fesetexceptflag(const fexcept_t *flagp, int excepts)
184 {
185 excepts &= FE_ALL_EXCEPT;
186
187 fexcept_t fpsr;
188
189 __asm__ volatile("fmove.l %%fpsr, %0" : "=d"(fpsr));
190
191 fpsr &= ~excepts;
192 fpsr |= (*flagp & excepts);
193
194 __asm__ volatile("fmove.l %0, %%fpsr" : : "d"(fpsr));
195
196 return 0;
197 }
198
fesetround(int rounding_mode)199 __declare_fenv_inline(int) fesetround(int rounding_mode)
200 {
201 fenv_t fpcr;
202
203 __asm__ volatile("fmove.l %%fpcr, %0" : "=d"(fpcr));
204
205 fpcr = (fpcr & ~(3 << 4)) | ((rounding_mode & 3) << 4);
206
207 __asm__ volatile("fmove.l %0, %%fpcr" : : "d"(fpcr));
208
209 return 0;
210 }
211
fetestexcept(int excepts)212 __declare_fenv_inline(int) fetestexcept(int excepts)
213 {
214 /* Mask excepts to be sure only supported flag bits are set */
215
216 excepts &= FE_ALL_EXCEPT;
217
218 /* Read the current fpsr */
219
220 fexcept_t fpsr;
221
222 __asm__ volatile ("fmove.l %%fpsr, %0" : "=d"(fpsr));
223
224 return (fpsr & excepts);
225 }
226
feupdateenv(const fenv_t * envp)227 __declare_fenv_inline(int) feupdateenv(const fenv_t *envp)
228 {
229 fenv_t env = *envp;
230
231 /* Get current fpcr and fpsr */
232
233 fenv_t fpcr;
234 fexcept_t fpsr;
235
236 __asm__ volatile("fmove.l %%fpcr, %0" : "=d"(fpcr));
237 __asm__ volatile("fmove.l %%fpsr, %0" : "=d"(fpsr));
238
239 /* Set the rounding mode */
240
241 fpcr = (fpcr & ~(0x3 << 4)) | (env & 3);
242
243 /* Set the exception enables */
244
245 fpcr = (fpcr & 0xff) | (env & 0xff00);
246
247 /* Merge in exceptions */
248
249 fpsr |= (env & 0xf8);
250
251 /* Save to registers */
252
253 __asm__ volatile("fmove.l %0, %%fpcr" : : "d"(fpcr));
254 __asm__ volatile("fmove.l %0, %%fpsr" : : "d"(fpsr));
255
256 return 0;
257
258 }
259
feenableexcept(int excepts)260 __declare_fenv_inline(int) feenableexcept(int excepts)
261 {
262 fenv_t old_fpcr, new_fpcr;
263
264 __asm__ volatile("fmove.l %%fpcr, %0" : "=d"(old_fpcr));
265
266 /* Enable exceptions */
267
268 new_fpcr = old_fpcr | ((excepts & FE_ALL_EXCEPT) << _M68K_EXCEPT_SHIFT);
269
270 __asm__ volatile("fmove.l %0, %%fpcr" : : "d"(new_fpcr));
271 return (old_fpcr >> _M68K_EXCEPT_SHIFT) & FE_ALL_EXCEPT;
272 }
273
fedisableexcept(int excepts)274 __declare_fenv_inline(int) fedisableexcept(int excepts)
275 {
276 fenv_t old_fpcr, new_fpcr;
277
278 __asm__ volatile("fmove.l %%fpcr, %0" : "=d"(old_fpcr));
279
280 /* Disable exceptions */
281
282 new_fpcr = old_fpcr & ~((excepts & FE_ALL_EXCEPT) << _M68K_EXCEPT_SHIFT);
283
284 __asm__ volatile("fmove.l %0, %%fpcr" : : "d"(new_fpcr));
285 return (old_fpcr >> _M68K_EXCEPT_SHIFT) & FE_ALL_EXCEPT;
286 }
287
fegetexcept(void)288 __declare_fenv_inline(int) fegetexcept(void)
289 {
290 fenv_t fpcr;
291
292 __asm__ volatile("fmove.l %%fpcr, %0" : "=d"(fpcr));
293
294 return (fpcr >> _M68K_EXCEPT_SHIFT) & FE_ALL_EXCEPT;
295 }
296