1 /*
2  * Copyright (c) 2016 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #ifndef ZEPHYR_INCLUDE_ARCH_X86_IA32_SEGMENTATION_H_
8 #define ZEPHYR_INCLUDE_ARCH_X86_IA32_SEGMENTATION_H_
9 
10 #include <zephyr/types.h>
11 
12 /* Host gen_idt uses this header as well, don't depend on toolchain.h */
13 #ifndef __packed
14 #define __packed __attribute__((packed))
15 #endif
16 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 /* NOTE: We currently do not have definitions for 16-bit segment, currently
22  * assume everything we are working with is 32-bit
23  */
24 
25 #define SEG_TYPE_LDT		0x2
26 #define SEG_TYPE_TASK_GATE	0x5
27 #define SEG_TYPE_TSS		0x9
28 #define SEG_TYPE_TSS_BUSY	0xB
29 #define SEG_TYPE_CALL_GATE	0xC
30 #define SEG_TYPE_IRQ_GATE	0xE
31 #define SEG_TYPE_TRAP_GATE	0xF
32 
33 #define DT_GRAN_BYTE	0
34 #define DT_GRAN_PAGE	1
35 
36 #define DT_READABLE	1
37 #define DT_NON_READABLE	0
38 
39 #define DT_WRITABLE	1
40 #define DT_NON_WRITABLE	0
41 
42 #define DT_EXPAND_DOWN	1
43 #define DT_EXPAND_UP	0
44 
45 #define DT_CONFORM	1
46 #define DT_NONCONFORM	0
47 
48 #define DT_TYPE_SYSTEM		0
49 #define DT_TYPE_CODEDATA	1
50 
51 #ifndef _ASMLANGUAGE
52 
53 /* Section 7.2.1 of IA architecture SW developer manual, Vol 3. */
54 struct __packed task_state_segment {
55 	uint16_t backlink;
56 	uint16_t reserved_1;
57 	uint32_t esp0;
58 	uint16_t ss0;
59 	uint16_t reserved_2;
60 	uint32_t esp1;
61 	uint16_t ss1;
62 	uint16_t reserved_3;
63 	uint32_t esp2;
64 	uint16_t ss2;
65 	uint16_t reserved_4;
66 	uint32_t cr3;
67 	uint32_t eip;
68 	uint32_t eflags;
69 	uint32_t eax;
70 	uint32_t ecx;
71 	uint32_t edx;
72 	uint32_t ebx;
73 	uint32_t esp;
74 	uint32_t ebp;
75 	uint32_t esi;
76 	uint32_t edi;
77 	uint16_t es;
78 	uint16_t reserved_5;
79 	uint16_t cs;
80 	uint16_t reserved_6;
81 	uint16_t ss;
82 	uint16_t reserved_7;
83 	uint16_t ds;
84 	uint16_t reserved_8;
85 	uint16_t fs;
86 	uint16_t reserved_9;
87 	uint16_t gs;
88 	uint16_t reserved_10;
89 	uint16_t ldt_ss;
90 	uint16_t reserved_11;
91 	uint8_t t:1;		/* Trap bit */
92 	uint16_t reserved_12:15;
93 	uint16_t iomap;
94 };
95 
96 #define SEG_SELECTOR(index, table, dpl) (index << 3 | table << 2 | dpl)
97 
98 /* References
99  *
100  * Section 5.8.3 (Call gates)
101  * Section 7.2.2 (TSS Descriptor)
102  * Section 3.4.5 (Segment descriptors)
103  * Section 6.11 (IDT Descriptors)
104  *
105  * IA architecture SW developer manual, Vol 3.
106  */
107 struct __packed segment_descriptor {
108 
109 	/* First DWORD: 0-15 */
110 	union {
111 		/* IRQ, call, trap gates */
112 		uint16_t limit_low;
113 
114 		/* Task gates */
115 		uint16_t reserved_task_gate_0;
116 
117 		/* Everything else */
118 		uint16_t offset_low;
119 	};
120 
121 	/* First DWORD: 16-31 */
122 	union {
123 		/* Call/Task/Interrupt/Trap gates */
124 		uint16_t segment_selector;
125 
126 		/* TSS/LDT/Segments */
127 		uint16_t base_low;	/* Bits 0-15 */
128 	};
129 
130 	/* Second DWORD: 0-7 */
131 	union {
132 		/* TSS/LDT/Segments */
133 		uint8_t base_mid;	/* Bits 16-23 */
134 
135 		/* Task gates */
136 		uint8_t reserved_task_gate_1;
137 
138 		/* IRQ/Trap/Call Gates */
139 		struct {
140 			/* Reserved except in case of call gates */
141 			uint8_t reserved_or_param:5;
142 
143 			/* Bits 5-7 0 0 0 per CPU manual */
144 			uint8_t always_0_0:3;
145 		};
146 	};
147 
148 	/* Second DWORD: 8-15 */
149 	union {
150 		/* Code or data Segments */
151 		struct {
152 			/* Set by the processor, init to 0 */
153 			uint8_t accessed:1;
154 
155 			/* executable ? readable : writable */
156 			uint8_t rw:1;
157 			/* executable ? conforming : direction */
158 			uint8_t cd:1;
159 			/* 1=code 0=data */
160 			uint8_t executable:1;
161 
162 			/* Next 3 fields actually common to all */
163 
164 			/* 1=code or data, 0=system type */
165 			uint8_t descriptor_type:1;
166 
167 			uint8_t dpl:2;
168 			uint8_t present:1;
169 		};
170 
171 		/* System types */
172 		struct {
173 			/* One of the SEG_TYPE_* macros above */
174 			uint8_t type:4;
175 
176 			/* Alas, C doesn't let you do a union of the first
177 			 * 4 bits of a bitfield and put the rest outside of it,
178 			 * it ends up getting padded.
179 			 */
180 			uint8_t use_other_union:4;
181 		};
182 	};
183 
184 	/* Second DWORD: 16-31 */
185 	union {
186 		/* Call/IRQ/trap gates */
187 		uint16_t offset_hi;
188 
189 		/* Task Gates */
190 		uint16_t reserved_task_gate_2;
191 
192 		/* segment/LDT/TSS */
193 		struct {
194 			uint8_t limit_hi:4;
195 
196 			/* flags */
197 			uint8_t avl:1;		/* CPU ignores this */
198 
199 			/* 1=Indicates 64-bit code segment in IA-32e mode */
200 			uint8_t flags_l:1; /* L field */
201 
202 			uint8_t db:1; /* D/B field 1=32-bit 0=16-bit*/
203 			uint8_t granularity:1;
204 
205 			uint8_t base_hi;	/* Bits 24-31 */
206 		};
207 	};
208 
209 };
210 
211 
212 /* Address of this passed to lidt/lgdt.
213  * IA manual calls this a 'pseudo descriptor'.
214  */
215 struct __packed pseudo_descriptor {
216 	uint16_t size;
217 	struct segment_descriptor *entries;
218 };
219 
220 
221 /*
222  * Full linear address (segment selector+offset), for far jumps/calls
223  */
224 struct __packed far_ptr {
225 	/** Far pointer offset, unused when invoking a task. */
226 	void *offset;
227 	/** Far pointer segment/gate selector. */
228 	uint16_t sel;
229 };
230 
231 
232 #define DT_ZERO_ENTRY { { 0 } }
233 
234 /* NOTE: the below macros only work for fixed addresses provided at build time.
235  * Base addresses or offsets cannot be &some_variable, as pointer values are not
236  * known until link time and the compiler has to split the address into various
237  * fields in the segment selector well before that.
238  *
239  * If you really need to put &some_variable as the base address in some
240  * segment descriptor, you will either need to do the assignment at runtime
241  * or implement some tool to populate values post-link like gen_idt does.
242  */
243 #define _LIMIT_AND_BASE(base_p, limit_p, granularity_p) \
244 	.base_low = (((uint32_t)base_p) & 0xFFFF), \
245 	.base_mid = (((base_p) >> 16) & 0xFF), \
246 	.base_hi = (((base_p) >> 24) & 0xFF), \
247 	.limit_low = ((limit_p) & 0xFFFF), \
248 	.limit_hi = (((limit_p) >> 16) & 0xF), \
249 	.granularity = (granularity_p), \
250 	.flags_l = 0, \
251 	.db = 1, \
252 	.avl = 0
253 
254 #define _SEGMENT_AND_OFFSET(segment_p, offset_p) \
255 	.segment_selector = (segment_p), \
256 	.offset_low = ((offset_p) & 0xFFFF), \
257 	.offset_hi = ((offset_p) >> 16)
258 
259 #define _DESC_COMMON(dpl_p) \
260 	.dpl = (dpl_p), \
261 	.present = 1
262 
263 #define _SYS_DESC(type_p) \
264 	.type = type_p, \
265 	.descriptor_type = 0
266 
267 #define DT_CODE_SEG_ENTRY(base_p, limit_p, granularity_p, dpl_p, readable_p, \
268 		       conforming_p) \
269 	{ \
270 		_DESC_COMMON(dpl_p), \
271 		_LIMIT_AND_BASE(base_p, limit_p, granularity_p), \
272 		.accessed = 0, \
273 		.rw = (readable_p), \
274 		.cd = (conforming_p), \
275 		.executable = 1, \
276 		.descriptor_type = 1 \
277 	}
278 
279 #define DT_DATA_SEG_ENTRY(base_p, limit_p, granularity_p, dpl_p, writable_p, \
280 		       direction_p) \
281 	{ \
282 		_DESC_COMMON(dpl_p), \
283 		_LIMIT_AND_BASE(base_p, limit_p, granularity_p), \
284 		.accessed = 0, \
285 		.rw = (writable_p), \
286 		.cd = (direction_p), \
287 		.executable = 0, \
288 		.descriptor_type = 1 \
289 	}
290 
291 #define DT_LDT_ENTRY(base_p, limit_p, granularity_p, dpl_p) \
292 	{ \
293 		_DESC_COMMON(dpl_p), \
294 		_LIMIT_AND_BASE(base_p, limit_p, granularity_p), \
295 		_SYS_DESC(SEG_TYPE_LDT) \
296 	}
297 
298 #define DT_TSS_ENTRY(base_p, limit_p, granularity_p, dpl_p) \
299 	{ \
300 		_DESC_COMMON(dpl_p), \
301 		_LIMIT_AND_BASE(base_p, limit_p, granularity_p), \
302 		_SYS_DESC(SEG_TYPE_TSS) \
303 	}
304 
305 /* "standard" TSS segments that don't stuff extra data past the end of the
306  * TSS struct
307  */
308 #define DT_TSS_STD_ENTRY(base_p, dpl_p) \
309 	DT_TSS_ENTRY(base_p, sizeof(struct task_state_segment), DT_GRAN_BYTE, \
310 		     dpl_p)
311 
312 #define DT_TASK_GATE_ENTRY(segment_p, dpl_p) \
313 	{ \
314 		_DESC_COMMON(dpl_p), \
315 		_SYS_DESC(SEG_TYPE_TASK_GATE), \
316 		.segment_selector = (segment_p) \
317 	}
318 
319 #define DT_IRQ_GATE_ENTRY(segment_p, offset_p, dpl_p) \
320 	{ \
321 		_DESC_COMMON(dpl_p), \
322 		_SEGMENT_AND_OFFSET(segment_p, offset_p), \
323 		_SYS_DESC(SEG_TYPE_IRQ_GATE), \
324 		.always_0_0 = 0 \
325 	}
326 
327 #define DT_TRAP_GATE_ENTRY(segment_p, offset_p, dpl_p) \
328 	{ \
329 		_DESC_COMMON(dpl_p), \
330 		_SEGMENT_AND_OFFSET(segment_p, offset_p), \
331 		_SYS_DESC(SEG_TYPE_TRAP_GATE), \
332 		.always_0_0 = 0 \
333 	}
334 
335 #define DT_CALL_GATE_ENTRY(segment_p, offset_p, dpl_p, param_count_p) \
336 	{ \
337 		_DESC_COMMON(dpl_p), \
338 		_SEGMENT_AND_OFFSET(segment_p, offset_p), \
339 		_SYS_DESC(SEG_TYPE_TRAP_GATE), \
340 		.reserved_or_param = (param_count_p), \
341 		.always_0_0 = 0 \
342 	}
343 
344 #define DTE_BASE(dt_entry) ((dt_entry)->base_low | \
345 			    ((dt_entry)->base_mid << 16) | \
346 			    ((dt_entry)->base_hi << 24))
347 
348 #define DTE_LIMIT(dt_entry) ((dt_entry)->limit_low | \
349 			     ((dt_entry)->limit_hi << 16))
350 
351 #define DTE_OFFSET(dt_entry) ((dt_entry)->offset_low | \
352 			      ((dt_entry)->offset_hi << 16))
353 
354 #define DT_INIT(entries) { sizeof(entries) - 1, &entries[0] }
355 
356 #ifdef CONFIG_SET_GDT
357 /* This is either the ROM-based GDT in crt0.S or generated by gen_gdt.py,
358  * depending on CONFIG_GDT_DYNAMIC
359  */
360 extern struct pseudo_descriptor _gdt;
361 #endif
362 
363 extern const struct pseudo_descriptor z_idt;
364 
365 /**
366  * Properly set the segment descriptor segment and offset
367  *
368  * Used for call/interrupt/trap gates
369  *
370  * @param sd Segment descriptor
371  * @param offset Offset within segment
372  * @param segment_selector Segment selector
373  */
z_sd_set_seg_offset(struct segment_descriptor * sd,uint16_t segment_selector,uint32_t offset)374 static inline void z_sd_set_seg_offset(struct segment_descriptor *sd,
375 				      uint16_t segment_selector,
376 				      uint32_t offset)
377 {
378 	sd->offset_low = offset & 0xFFFFU;
379 	sd->offset_hi = offset >> 16U;
380 	sd->segment_selector = segment_selector;
381 	sd->always_0_0 = 0U;
382 }
383 
384 
385 /**
386  * Initialize an segment descriptor to be a 32-bit IRQ gate
387  *
388  * @param sd Segment descriptor memory
389  * @param seg_selector Segment selector of handler
390  * @param offset offset of handler
391  * @param dpl descriptor privilege level
392  */
z_init_irq_gate(struct segment_descriptor * sd,uint16_t seg_selector,uint32_t offset,uint32_t dpl)393 static inline void z_init_irq_gate(struct segment_descriptor *sd,
394 				  uint16_t seg_selector, uint32_t offset,
395 				  uint32_t dpl)
396 {
397 	z_sd_set_seg_offset(sd, seg_selector, offset);
398 	sd->dpl = dpl;
399 	sd->descriptor_type = DT_TYPE_SYSTEM;
400 	sd->present = 1U;
401 	sd->type = SEG_TYPE_IRQ_GATE;
402 }
403 
404 /**
405  * Set current IA task TSS
406  *
407  * @param sel Segment selector in GDT for desired TSS
408  */
_set_tss(uint16_t sel)409 static inline void _set_tss(uint16_t sel)
410 {
411 	__asm__ __volatile__ ("ltr %0" :: "r" (sel));
412 }
413 
414 
415 /**
416  * Get the TSS segment selector in the GDT for the current IA task
417  *
418  * @return Segment selector for current IA task
419  */
_get_tss(void)420 static inline uint16_t _get_tss(void)
421 {
422 	uint16_t sel;
423 
424 	__asm__ __volatile__ ("str %0" : "=r" (sel));
425 	return sel;
426 }
427 
428 
429 /**
430  * Get the current global descriptor table
431  *
432  * @param gdt Pointer to memory to receive GDT pseudo descriptor information
433  */
_get_gdt(struct pseudo_descriptor * gdt)434 static inline void _get_gdt(struct pseudo_descriptor *gdt)
435 {
436 	__asm__ __volatile__ ("sgdt %0" : "=m" (*gdt));
437 }
438 
439 
440 /**
441  * Get the current interrupt descriptor table
442  *
443  * @param idt Pointer to memory to receive IDT pseudo descriptor information
444  */
_get_idt(struct pseudo_descriptor * idt)445 static inline void _get_idt(struct pseudo_descriptor *idt)
446 {
447 	__asm__ __volatile__ ("sidt %0" : "=m" (*idt));
448 }
449 
450 
451 /**
452  * Get the current local descriptor table (LDT)
453  *
454  * @return Segment selector in the GDT for the current LDT
455  */
_get_ldt(void)456 static inline uint16_t _get_ldt(void)
457 {
458 	uint16_t ret;
459 
460 	__asm__ __volatile__ ("sldt %0" : "=m" (ret));
461 	return ret;
462 }
463 
464 
465 /**
466  * Set the local descriptor table for the current IA Task
467  *
468  * @param ldt Segment selector in the GDT for an LDT
469  */
_set_ldt(uint16_t ldt)470 static inline void _set_ldt(uint16_t ldt)
471 {
472 	__asm__ __volatile__ ("lldt %0" :: "m" (ldt));
473 
474 }
475 
476 /**
477  * Set the global descriptor table
478  *
479  * You will most likely need to update all the data segment registers
480  * and do a far call to the code segment.
481  *
482  * @param gdt Pointer to GDT pseudo descriptor.
483  */
_set_gdt(const struct pseudo_descriptor * gdt)484 static inline void _set_gdt(const struct pseudo_descriptor *gdt)
485 {
486 	__asm__ __volatile__ ("lgdt %0" :: "m" (*gdt));
487 }
488 
489 
490 /**
491  * Set the interrupt descriptor table
492  *
493  * @param idt Pointer to IDT pseudo descriptor.
494  */
z_set_idt(const struct pseudo_descriptor * idt)495 static inline void z_set_idt(const struct pseudo_descriptor *idt)
496 {
497 	__asm__ __volatile__ ("lidt %0" :: "m" (*idt));
498 }
499 
500 
501 /**
502  * Get the segment selector for the current code segment
503  *
504  * @return Segment selector
505  */
_get_cs(void)506 static inline uint16_t _get_cs(void)
507 {
508 	uint16_t cs = 0U;
509 
510 	__asm__ __volatile__ ("mov %%cs, %0" : "=r" (cs));
511 	return cs;
512 }
513 
514 
515 /**
516  * Get the segment selector for the current data segment
517  *
518  * @return Segment selector
519  */
_get_ds(void)520 static inline uint16_t _get_ds(void)
521 {
522 	uint16_t ds = 0U;
523 
524 	__asm__ __volatile__ ("mov %%ds, %0" : "=r" (ds));
525 	return ds;
526 }
527 
528 
529 #endif /* _ASMLANGUAGE */
530 
531 #ifdef __cplusplus
532 }
533 #endif
534 
535 #endif /* ZEPHYR_INCLUDE_ARCH_X86_IA32_SEGMENTATION_H_ */
536