1  /* SPDX-License-Identifier: GPL-2.0-or-later */
2  /*
3   * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
4   */
5  #include <asm/inst.h>
6  
7  struct pt_regs;
8  
9  /*
10   * We don't allow single-stepping an mtmsrd that would clear
11   * MSR_RI, since that would make the exception unrecoverable.
12   * Since we need to single-step to proceed from a breakpoint,
13   * we don't allow putting a breakpoint on an mtmsrd instruction.
14   * Similarly we don't allow breakpoints on rfid instructions.
15   * These macros tell us if an instruction is a mtmsrd or rfid.
16   * Note that these return true for both mtmsr/rfi (32-bit)
17   * and mtmsrd/rfid (64-bit).
18   */
19  #define IS_MTMSRD(instr)	((ppc_inst_val(instr) & 0xfc0007be) == 0x7c000124)
20  #define IS_RFID(instr)		((ppc_inst_val(instr) & 0xfc0007be) == 0x4c000024)
21  
22  enum instruction_type {
23  	COMPUTE,		/* arith/logical/CR op, etc. */
24  	LOAD,			/* load and store types need to be contiguous */
25  	LOAD_MULTI,
26  	LOAD_FP,
27  	LOAD_VMX,
28  	LOAD_VSX,
29  	STORE,
30  	STORE_MULTI,
31  	STORE_FP,
32  	STORE_VMX,
33  	STORE_VSX,
34  	LARX,
35  	STCX,
36  	BRANCH,
37  	MFSPR,
38  	MTSPR,
39  	CACHEOP,
40  	BARRIER,
41  	SYSCALL,
42  	SYSCALL_VECTORED_0,
43  	MFMSR,
44  	MTMSR,
45  	RFI,
46  	INTERRUPT,
47  	UNKNOWN
48  };
49  
50  #define INSTR_TYPE_MASK	0x1f
51  
52  #define OP_IS_LOAD(type)	((LOAD <= (type) && (type) <= LOAD_VSX) || (type) == LARX)
53  #define OP_IS_STORE(type)	((STORE <= (type) && (type) <= STORE_VSX) || (type) == STCX)
54  #define OP_IS_LOAD_STORE(type)	(LOAD <= (type) && (type) <= STCX)
55  
56  /* Compute flags, ORed in with type */
57  #define SETREG		0x20
58  #define SETCC		0x40
59  #define SETXER		0x80
60  
61  /* Branch flags, ORed in with type */
62  #define SETLK		0x20
63  #define BRTAKEN		0x40
64  #define DECCTR		0x80
65  
66  /* Load/store flags, ORed in with type */
67  #define SIGNEXT		0x20
68  #define UPDATE		0x40	/* matches bit in opcode 31 instructions */
69  #define BYTEREV		0x80
70  #define FPCONV		0x100
71  
72  /* Barrier type field, ORed in with type */
73  #define BARRIER_MASK	0xe0
74  #define BARRIER_SYNC	0x00
75  #define BARRIER_ISYNC	0x20
76  #define BARRIER_EIEIO	0x40
77  #define BARRIER_LWSYNC	0x60
78  #define BARRIER_PTESYNC	0x80
79  
80  /* Cacheop values, ORed in with type */
81  #define CACHEOP_MASK	0x700
82  #define DCBST		0
83  #define DCBF		0x100
84  #define DCBTST		0x200
85  #define DCBT		0x300
86  #define ICBI		0x400
87  #define DCBZ		0x500
88  
89  /* VSX flags values */
90  #define VSX_FPCONV	1	/* do floating point SP/DP conversion */
91  #define VSX_SPLAT	2	/* store loaded value into all elements */
92  #define VSX_LDLEFT	4	/* load VSX register from left */
93  #define VSX_CHECK_VEC	8	/* check MSR_VEC not MSR_VSX for reg >= 32 */
94  
95  /* Prefixed flag, ORed in with type */
96  #define PREFIXED       0x800
97  
98  /* Size field in type word */
99  #define SIZE(n)		((n) << 12)
100  #define GETSIZE(w)	((w) >> 12)
101  
102  #define GETTYPE(t)	((t) & INSTR_TYPE_MASK)
103  #define GETLENGTH(t)   (((t) & PREFIXED) ? 8 : 4)
104  
105  #define MKOP(t, f, s)	((t) | (f) | SIZE(s))
106  
107  /* Prefix instruction operands */
108  #define GET_PREFIX_RA(i)	(((i) >> 16) & 0x1f)
109  #define GET_PREFIX_R(i)		((i) & (1ul << 20))
110  
111  extern s32 patch__exec_instr;
112  
113  struct instruction_op {
114  	int type;
115  	int reg;
116  	unsigned long val;
117  	/* For LOAD/STORE/LARX/STCX */
118  	unsigned long ea;
119  	int update_reg;
120  	/* For MFSPR */
121  	int spr;
122  	u32 ccval;
123  	u32 xerval;
124  	u8 element_size;	/* for VSX/VMX loads/stores */
125  	u8 vsx_flags;
126  };
127  
128  union vsx_reg {
129  	u8	b[16];
130  	u16	h[8];
131  	u32	w[4];
132  	unsigned long d[2];
133  	float	fp[4];
134  	double	dp[2];
135  	__vector128 v;
136  };
137  
138  /*
139   * Decode an instruction, and return information about it in *op
140   * without changing *regs.
141   *
142   * Return value is 1 if the instruction can be emulated just by
143   * updating *regs with the information in *op, -1 if we need the
144   * GPRs but *regs doesn't contain the full register set, or 0
145   * otherwise.
146   */
147  extern int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
148  			 ppc_inst_t instr);
149  
150  /*
151   * Emulate an instruction that can be executed just by updating
152   * fields in *regs.
153   */
154  void emulate_update_regs(struct pt_regs *reg, struct instruction_op *op);
155  
156  /*
157   * Emulate instructions that cause a transfer of control,
158   * arithmetic/logical instructions, loads and stores,
159   * cache operations and barriers.
160   *
161   * Returns 1 if the instruction was emulated successfully,
162   * 0 if it could not be emulated, or -1 for an instruction that
163   * should not be emulated (rfid, mtmsrd clearing MSR_RI, etc.).
164   */
165  int emulate_step(struct pt_regs *regs, ppc_inst_t instr);
166  
167  /*
168   * Emulate a load or store instruction by reading/writing the
169   * memory of the current process.  FP/VMX/VSX registers are assumed
170   * to hold live values if the appropriate enable bit in regs->msr is
171   * set; otherwise this will use the saved values in the thread struct
172   * for user-mode accesses.
173   */
174  extern int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op);
175  
176  extern void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
177  			     const void *mem, bool cross_endian);
178  extern void emulate_vsx_store(struct instruction_op *op,
179  			      const union vsx_reg *reg, void *mem,
180  			      bool cross_endian);
181  extern int emulate_dcbz(unsigned long ea, struct pt_regs *regs);
182