1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_VM86_H
3 #define _ASM_X86_VM86_H
4
5 #include <asm/ptrace.h>
6 #include <uapi/asm/vm86.h>
7
8 /*
9 * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86
10 * mode - the main change is that the old segment descriptors aren't
11 * useful any more and are forced to be zero by the kernel (and the
12 * hardware when a trap occurs), and the real segment descriptors are
13 * at the end of the structure. Look at ptrace.h to see the "normal"
14 * setup. For user space layout see 'struct vm86_regs' above.
15 */
16
17 struct kernel_vm86_regs {
18 /*
19 * normal regs, with special meaning for the segment descriptors..
20 */
21 struct pt_regs pt;
22 /*
23 * these are specific to v86 mode:
24 */
25 unsigned short es, __esh;
26 unsigned short ds, __dsh;
27 unsigned short fs, __fsh;
28 unsigned short gs, __gsh;
29 };
30
31 struct vm86 {
32 struct vm86plus_struct __user *user_vm86;
33 struct pt_regs regs32;
34 unsigned long veflags;
35 unsigned long veflags_mask;
36 unsigned long saved_sp0;
37
38 unsigned long flags;
39 unsigned long screen_bitmap;
40 unsigned long cpu_type;
41 struct revectored_struct int_revectored;
42 struct revectored_struct int21_revectored;
43 struct vm86plus_info_struct vm86plus;
44 };
45
46 #ifdef CONFIG_VM86
47
48 void handle_vm86_fault(struct kernel_vm86_regs *, long);
49 int handle_vm86_trap(struct kernel_vm86_regs *, long, int);
50 void save_v86_state(struct kernel_vm86_regs *, int);
51
52 struct task_struct;
53
54 #define free_vm86(t) do { \
55 struct thread_struct *__t = (t); \
56 if (__t->vm86 != NULL) { \
57 kfree(__t->vm86); \
58 __t->vm86 = NULL; \
59 } \
60 } while (0)
61
62 /*
63 * Support for VM86 programs to request interrupts for
64 * real mode hardware drivers:
65 */
66 #define FIRST_VM86_IRQ 3
67 #define LAST_VM86_IRQ 15
68
invalid_vm86_irq(int irq)69 static inline int invalid_vm86_irq(int irq)
70 {
71 return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ;
72 }
73
74 void release_vm86_irqs(struct task_struct *);
75
76 #else
77
78 #define handle_vm86_fault(a, b)
79 #define release_vm86_irqs(a)
80
handle_vm86_trap(struct kernel_vm86_regs * a,long b,int c)81 static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
82 {
83 return 0;
84 }
85
save_v86_state(struct kernel_vm86_regs * a,int b)86 static inline void save_v86_state(struct kernel_vm86_regs *a, int b) { }
87
88 #define free_vm86(t) do { } while(0)
89
90 #endif /* CONFIG_VM86 */
91
92 #endif /* _ASM_X86_VM86_H */
93