1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef _ASM_POWERPC_MMU_8XX_H_
3  #define _ASM_POWERPC_MMU_8XX_H_
4  /*
5   * PPC8xx support
6   */
7  
8  /* Control/status registers for the MPC8xx.
9   * A write operation to these registers causes serialized access.
10   * During software tablewalk, the registers used perform mask/shift-add
11   * operations when written/read.  A TLB entry is created when the Mx_RPN
12   * is written, and the contents of several registers are used to
13   * create the entry.
14   */
15  #define SPRN_MI_CTR	784	/* Instruction TLB control register */
16  #define MI_GPM		0x80000000	/* Set domain manager mode */
17  #define MI_PPM		0x40000000	/* Set subpage protection */
18  #define MI_CIDEF	0x20000000	/* Set cache inhibit when MMU dis */
19  #define MI_RSV4I	0x08000000	/* Reserve 4 TLB entries */
20  #define MI_PPCS		0x02000000	/* Use MI_RPN prob/priv state */
21  #define MI_IDXMASK	0x00001f00	/* TLB index to be loaded */
22  #define MI_RESETVAL	0x00000000	/* Value of register at reset */
23  
24  /* These are the Ks and Kp from the PowerPC books.  For proper operation,
25   * Ks = 0, Kp = 1.
26   */
27  #define SPRN_MI_AP	786
28  #define MI_Ks		0x80000000	/* Should not be set */
29  #define MI_Kp		0x40000000	/* Should always be set */
30  
31  /*
32   * All pages' PP data bits are set to either 001 or 011 by copying _PAGE_EXEC
33   * into bit 21 in the ITLBmiss handler (bit 21 is the middle bit), which means
34   * respectively NA for All or X for Supervisor and no access for User.
35   * Then we use the APG to say whether accesses are according to Page rules or
36   * "all Supervisor" rules (Access to all)
37   * Therefore, we define 2 APG groups. lsb is _PMD_USER
38   * 0 => Kernel => 01 (all accesses performed according to page definition)
39   * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
40   * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
41   */
42  #define MI_APG_INIT	0x4fffffff
43  
44  /*
45   * 0 => Kernel => 01 (all accesses performed according to page definition)
46   * 1 => User => 10 (all accesses performed according to swaped page definition)
47   * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
48   */
49  #define MI_APG_KUEP	0x6fffffff
50  
51  /* The effective page number register.  When read, contains the information
52   * about the last instruction TLB miss.  When MI_RPN is written, bits in
53   * this register are used to create the TLB entry.
54   */
55  #define SPRN_MI_EPN	787
56  #define MI_EPNMASK	0xfffff000	/* Effective page number for entry */
57  #define MI_EVALID	0x00000200	/* Entry is valid */
58  #define MI_ASIDMASK	0x0000000f	/* ASID match value */
59  					/* Reset value is undefined */
60  
61  /* A "level 1" or "segment" or whatever you want to call it register.
62   * For the instruction TLB, it contains bits that get loaded into the
63   * TLB entry when the MI_RPN is written.
64   */
65  #define SPRN_MI_TWC	789
66  #define MI_APG		0x000001e0	/* Access protection group (0) */
67  #define MI_GUARDED	0x00000010	/* Guarded storage */
68  #define MI_PSMASK	0x0000000c	/* Mask of page size bits */
69  #define MI_PS8MEG	0x0000000c	/* 8M page size */
70  #define MI_PS512K	0x00000004	/* 512K page size */
71  #define MI_PS4K_16K	0x00000000	/* 4K or 16K page size */
72  #define MI_SVALID	0x00000001	/* Segment entry is valid */
73  					/* Reset value is undefined */
74  
75  /* Real page number.  Defined by the pte.  Writing this register
76   * causes a TLB entry to be created for the instruction TLB, using
77   * additional information from the MI_EPN, and MI_TWC registers.
78   */
79  #define SPRN_MI_RPN	790
80  #define MI_SPS16K	0x00000008	/* Small page size (0 = 4k, 1 = 16k) */
81  
82  /* Define an RPN value for mapping kernel memory to large virtual
83   * pages for boot initialization.  This has real page number of 0,
84   * large page size, shared page, cache enabled, and valid.
85   * Also mark all subpages valid and write access.
86   */
87  #define MI_BOOTINIT	0x000001fd
88  
89  #define SPRN_MD_CTR	792	/* Data TLB control register */
90  #define MD_GPM		0x80000000	/* Set domain manager mode */
91  #define MD_PPM		0x40000000	/* Set subpage protection */
92  #define MD_CIDEF	0x20000000	/* Set cache inhibit when MMU dis */
93  #define MD_WTDEF	0x10000000	/* Set writethrough when MMU dis */
94  #define MD_RSV4I	0x08000000	/* Reserve 4 TLB entries */
95  #define MD_TWAM		0x04000000	/* Use 4K page hardware assist */
96  #define MD_PPCS		0x02000000	/* Use MI_RPN prob/priv state */
97  #define MD_IDXMASK	0x00001f00	/* TLB index to be loaded */
98  #define MD_RESETVAL	0x04000000	/* Value of register at reset */
99  
100  #define SPRN_M_CASID	793	/* Address space ID (context) to match */
101  #define MC_ASIDMASK	0x0000000f	/* Bits used for ASID value */
102  
103  
104  /* These are the Ks and Kp from the PowerPC books.  For proper operation,
105   * Ks = 0, Kp = 1.
106   */
107  #define SPRN_MD_AP	794
108  #define MD_Ks		0x80000000	/* Should not be set */
109  #define MD_Kp		0x40000000	/* Should always be set */
110  
111  /*
112   * All pages' PP data bits are set to either 000 or 011 or 001, which means
113   * respectively RW for Supervisor and no access for User, or RO for
114   * Supervisor and no access for user and NA for ALL.
115   * Then we use the APG to say whether accesses are according to Page rules or
116   * "all Supervisor" rules (Access to all)
117   * Therefore, we define 2 APG groups. lsb is _PMD_USER
118   * 0 => Kernel => 01 (all accesses performed according to page definition)
119   * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
120   * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
121   */
122  #define MD_APG_INIT	0x4fffffff
123  
124  /*
125   * 0 => No user => 01 (all accesses performed according to page definition)
126   * 1 => User => 10 (all accesses performed according to swaped page definition)
127   * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
128   */
129  #define MD_APG_KUAP	0x6fffffff
130  
131  /* The effective page number register.  When read, contains the information
132   * about the last instruction TLB miss.  When MD_RPN is written, bits in
133   * this register are used to create the TLB entry.
134   */
135  #define SPRN_MD_EPN	795
136  #define MD_EPNMASK	0xfffff000	/* Effective page number for entry */
137  #define MD_EVALID	0x00000200	/* Entry is valid */
138  #define MD_ASIDMASK	0x0000000f	/* ASID match value */
139  					/* Reset value is undefined */
140  
141  /* The pointer to the base address of the first level page table.
142   * During a software tablewalk, reading this register provides the address
143   * of the entry associated with MD_EPN.
144   */
145  #define SPRN_M_TWB	796
146  #define	M_L1TB		0xfffff000	/* Level 1 table base address */
147  #define M_L1INDX	0x00000ffc	/* Level 1 index, when read */
148  					/* Reset value is undefined */
149  
150  /* A "level 1" or "segment" or whatever you want to call it register.
151   * For the data TLB, it contains bits that get loaded into the TLB entry
152   * when the MD_RPN is written.  It is also provides the hardware assist
153   * for finding the PTE address during software tablewalk.
154   */
155  #define SPRN_MD_TWC	797
156  #define MD_L2TB		0xfffff000	/* Level 2 table base address */
157  #define MD_L2INDX	0xfffffe00	/* Level 2 index (*pte), when read */
158  #define MD_APG		0x000001e0	/* Access protection group (0) */
159  #define MD_GUARDED	0x00000010	/* Guarded storage */
160  #define MD_PSMASK	0x0000000c	/* Mask of page size bits */
161  #define MD_PS8MEG	0x0000000c	/* 8M page size */
162  #define MD_PS512K	0x00000004	/* 512K page size */
163  #define MD_PS4K_16K	0x00000000	/* 4K or 16K page size */
164  #define MD_WT		0x00000002	/* Use writethrough page attribute */
165  #define MD_SVALID	0x00000001	/* Segment entry is valid */
166  					/* Reset value is undefined */
167  
168  
169  /* Real page number.  Defined by the pte.  Writing this register
170   * causes a TLB entry to be created for the data TLB, using
171   * additional information from the MD_EPN, and MD_TWC registers.
172   */
173  #define SPRN_MD_RPN	798
174  #define MD_SPS16K	0x00000008	/* Small page size (0 = 4k, 1 = 16k) */
175  
176  /* This is a temporary storage register that could be used to save
177   * a processor working register during a tablewalk.
178   */
179  #define SPRN_M_TW	799
180  
181  #ifdef CONFIG_PPC_MM_SLICES
182  #include <asm/nohash/32/slice.h>
183  #define SLICE_ARRAY_SIZE	(1 << (32 - SLICE_LOW_SHIFT - 1))
184  #define LOW_SLICE_ARRAY_SZ	SLICE_ARRAY_SIZE
185  #endif
186  
187  #if defined(CONFIG_PPC_4K_PAGES)
188  #define mmu_virtual_psize	MMU_PAGE_4K
189  #elif defined(CONFIG_PPC_16K_PAGES)
190  #define mmu_virtual_psize	MMU_PAGE_16K
191  #define PTE_FRAG_NR		4
192  #define PTE_FRAG_SIZE_SHIFT	12
193  #define PTE_FRAG_SIZE		(1UL << 12)
194  #else
195  #error "Unsupported PAGE_SIZE"
196  #endif
197  
198  #define mmu_linear_psize	MMU_PAGE_8M
199  
200  #ifndef __ASSEMBLY__
201  
202  #include <linux/mmdebug.h>
203  
204  struct slice_mask {
205  	u64 low_slices;
206  	DECLARE_BITMAP(high_slices, 0);
207  };
208  
209  typedef struct {
210  	unsigned int id;
211  	unsigned int active;
212  	unsigned long vdso_base;
213  #ifdef CONFIG_PPC_MM_SLICES
214  	u16 user_psize;		/* page size index */
215  	unsigned char low_slices_psize[SLICE_ARRAY_SIZE];
216  	unsigned char high_slices_psize[0];
217  	unsigned long slb_addr_limit;
218  	struct slice_mask mask_base_psize; /* 4k or 16k */
219  	struct slice_mask mask_512k;
220  	struct slice_mask mask_8m;
221  #endif
222  	void *pte_frag;
223  } mm_context_t;
224  
225  #ifdef CONFIG_PPC_MM_SLICES
mm_ctx_user_psize(mm_context_t * ctx)226  static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
227  {
228  	return ctx->user_psize;
229  }
230  
mm_ctx_set_user_psize(mm_context_t * ctx,u16 user_psize)231  static inline void mm_ctx_set_user_psize(mm_context_t *ctx, u16 user_psize)
232  {
233  	ctx->user_psize = user_psize;
234  }
235  
mm_ctx_low_slices(mm_context_t * ctx)236  static inline unsigned char *mm_ctx_low_slices(mm_context_t *ctx)
237  {
238  	return ctx->low_slices_psize;
239  }
240  
mm_ctx_high_slices(mm_context_t * ctx)241  static inline unsigned char *mm_ctx_high_slices(mm_context_t *ctx)
242  {
243  	return ctx->high_slices_psize;
244  }
245  
mm_ctx_slb_addr_limit(mm_context_t * ctx)246  static inline unsigned long mm_ctx_slb_addr_limit(mm_context_t *ctx)
247  {
248  	return ctx->slb_addr_limit;
249  }
250  
mm_ctx_set_slb_addr_limit(mm_context_t * ctx,unsigned long limit)251  static inline void mm_ctx_set_slb_addr_limit(mm_context_t *ctx, unsigned long limit)
252  {
253  	ctx->slb_addr_limit = limit;
254  }
255  
slice_mask_for_size(mm_context_t * ctx,int psize)256  static inline struct slice_mask *slice_mask_for_size(mm_context_t *ctx, int psize)
257  {
258  	if (psize == MMU_PAGE_512K)
259  		return &ctx->mask_512k;
260  	if (psize == MMU_PAGE_8M)
261  		return &ctx->mask_8m;
262  
263  	BUG_ON(psize != mmu_virtual_psize);
264  
265  	return &ctx->mask_base_psize;
266  }
267  #endif /* CONFIG_PPC_MM_SLICE */
268  
269  #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
270  #define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
271  
272  /* Page size definitions, common between 32 and 64-bit
273   *
274   *    shift : is the "PAGE_SHIFT" value for that page size
275   *    penc  : is the pte encoding mask
276   *
277   */
278  struct mmu_psize_def {
279  	unsigned int	shift;	/* number of bits */
280  	unsigned int	enc;	/* PTE encoding */
281  	unsigned int    ind;    /* Corresponding indirect page size shift */
282  	unsigned int	flags;
283  #define MMU_PAGE_SIZE_DIRECT	0x1	/* Supported as a direct size */
284  #define MMU_PAGE_SIZE_INDIRECT	0x2	/* Supported as an indirect size */
285  };
286  
287  extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
288  
shift_to_mmu_psize(unsigned int shift)289  static inline int shift_to_mmu_psize(unsigned int shift)
290  {
291  	int psize;
292  
293  	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
294  		if (mmu_psize_defs[psize].shift == shift)
295  			return psize;
296  	return -1;
297  }
298  
mmu_psize_to_shift(unsigned int mmu_psize)299  static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
300  {
301  	if (mmu_psize_defs[mmu_psize].shift)
302  		return mmu_psize_defs[mmu_psize].shift;
303  	BUG();
304  }
305  
306  /* patch sites */
307  extern s32 patch__itlbmiss_linmem_top, patch__itlbmiss_linmem_top8;
308  extern s32 patch__dtlbmiss_linmem_top, patch__dtlbmiss_immr_jmp;
309  extern s32 patch__fixupdar_linmem_top;
310  extern s32 patch__dtlbmiss_romem_top, patch__dtlbmiss_romem_top8;
311  
312  extern s32 patch__itlbmiss_exit_1, patch__itlbmiss_exit_2;
313  extern s32 patch__dtlbmiss_exit_1, patch__dtlbmiss_exit_2, patch__dtlbmiss_exit_3;
314  extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf;
315  
316  #endif /* !__ASSEMBLY__ */
317  
318  #endif /* _ASM_POWERPC_MMU_8XX_H_ */
319