1 /*
2 (C) Copyright IBM Corp. 2007, 2008
3
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 * Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11 * Redistributions in binary form must reproduce the above copyright
12 notice, this list of conditions and the following disclaimer in the
13 documentation and/or other materials provided with the distribution.
14 * Neither the name of IBM nor the names of its contributors may be
15 used to endorse or promote products derived from this software without
16 specific prior written permission.
17
18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 POSSIBILITY OF SUCH DAMAGE.
29
30 */
31
32 #ifndef __EA_INTERNAL_H
33 #define __EA_INTERNAL_H
34
35 #include <ea.h>
36 #define JSRE_POSIX1_SIGNALCODE 0x2101
37 #define SPE_POSIX1_FTOK 0x05
38 #define SPE_POSIX1_MMAP 0x0b
39 #define SPE_POSIX1_MUNMAP 0x0e
40 /* implemented to here */
41 #define SPE_POSIX1_MREMAP 0x0c
42 #define SPE_POSIX1_MSYNC 0x0d
43 #define SPE_POSIX1_SHMGET 0x14
44 #define SPE_POSIX1_SHMCTL 0x12
45 #define SPE_POSIX1_SHMAT 0x11
46 #define SPE_POSIX1_SHMDT 0x13
47 #define SPE_POSIX1_SHM_OPEN 0x15
48 #define SPE_POSIX1_SHM_UNLINK 0x16
49
50 #define JSRE_LIBEA_SIGNALCODE 0x2105
51 #define SPE_LIBEA_CALLOC 0x01
52 #define SPE_LIBEA_FREE 0x02
53 #define SPE_LIBEA_MALLOC 0x03
54 #define SPE_LIBEA_REALLOC 0x04
55 #define SPE_LIBEA_POSIX_MEMALIGN 0x05
56
57 #define PAD_INT 3
58 #ifdef __EA64__
59 #define PAD_LONG 2
60 #else /* 32 bit */
61 #define PAD_LONG 3
62 #endif
63
64 #define ROUND_UP_NEXT_128(x) (((x) + 128) & (~127))
65 #define ROUND_DOWN_128(x) ((x) & (~127))
66
67 /* Macro that generates an __ea alias. */
68 #ifdef __EA64__
69 #define COMPAT_EA_ALIAS(name) __asm__(".global\t__" #name "64\n\t.set\t__" #name "64," #name)
70 #else
71 #define COMPAT_EA_ALIAS(name) __asm__(".global\t__" #name "32\n\t.set\t__" #name "32," #name)
72 #endif
73
round_down_128_ea(__ea void * x)74 static inline __ea void* round_down_128_ea(__ea void* x)
75 {
76 size_ea_t tmp = (size_ea_t) x;
77 tmp &= (~127);
78 return (__ea void*)tmp;
79 }
80
81 static
round_up_next_128_ea(__ea void * x)82 inline __ea void* round_up_next_128_ea(__ea void* x)
83 {
84 size_ea_t tmp = (size_ea_t) x;
85 tmp += 128;
86 tmp &= (~127);
87 return (__ea void*)tmp;
88 }
89
90 #define __cache_fetch_dirty_all(x) \
91 __cache_fetch_dirty(round_down_128_ea(x), 128)
92
93 /* please optimize, this hurts my eyes */
94 static inline size_t
three_way_min(size_t x,size_t y,size_t z)95 three_way_min(size_t x, size_t y, size_t z)
96 {
97 if (x < y)
98 if (x < z)
99 return x;
100 else
101 return z;
102 else
103 if (y < z)
104 return y;
105 else
106 return z;
107 }
108
109 #undef eavoid_to_ul
110 #define eavoid_to_ul(X) ({ \
111 unsigned long _y; \
112 __asm__ ("# %0 %1" : "=r" (_y) : "0" (X)); \
113 _y; \
114 })
115
116 #undef eavoid_to_ull
117 #define eavoid_to_ull(X) ({ \
118 unsigned long long _y; \
119 __asm__ ("# %0 %1" : "=r" (_y) : "0" (X)); \
120 _y; \
121 })
122
123 #ifdef __EA32__
124 #undef ull_to_eavoid
125 #define ull_to_eavoid(X) ({ \
126 __ea void* _y; \
127 unsigned long long X2; \
128 (X2) = (X) << 32;\
129 __asm__ ("# %0 %1" : "=r" (_y) : "0" (X2)); \
130 _y; \
131 })
132 #else /*__EA64__*/
133 #define ull_to_eavoid(X) ({ \
134 __ea void* _y; \
135 __asm__ ("# %0 %1" : "=r" (_y) : "0" (X)); \
136 _y; \
137 })
138 #endif
139
140 #undef ul_to_eavoid
141 #define ul_to_eavoid(X) ({ \
142 __ea void* _y; \
143 __asm__ ("# %0 %1" : "=r" (_y) : "0" (X)); \
144 _y; \
145 })
146
147 #endif /*__EA_INTERNAL_H*/
148