Lines Matching +full:0 +full:- +full:9 +full:a +full:- +full:e

1 /* SPDX-License-Identifier: GPL-2.0 */
7 * the IA-64 page table tree.
9 * This hopefully works with any (fixed) IA-64 page-size, as defined
12 * Copyright (C) 1998-2005 Hewlett-Packard Co
13 * David Mosberger-Tang <davidm@hpl.hp.com>
25 * First, define the various bits in a PTE. Note that the PTE format
29 #define _PAGE_P_BIT 0
34 #define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute */
35 #define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute */
36 #define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */
37 #define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory attribute */
38 #define _PAGE_MA_NAT (0x7 << 2) /* not-a-thing attribute */
39 #define _PAGE_MA_MASK (0x7 << 2)
40 #define _PAGE_PL_0 (0 << 7) /* privilege level 0 (kernel) */
45 #define _PAGE_AR_R (0 << 9) /* read only */
46 #define _PAGE_AR_RX (1 << 9) /* read & execute */
47 #define _PAGE_AR_RW (2 << 9) /* read & write */
48 #define _PAGE_AR_RWX (3 << 9) /* read, write & execute */
49 #define _PAGE_AR_R_RW (4 << 9) /* read / read & write */
50 #define _PAGE_AR_RX_RWX (5 << 9) /* read & exec / read, write & exec */
51 #define _PAGE_AR_RWX_RW (6 << 9) /* read, write & exec / read & write */
52 #define _PAGE_AR_X_RX (7 << 9) /* exec & promote / read & exec */
53 #define _PAGE_AR_MASK (7 << 9)
54 #define _PAGE_AR_SHIFT 9
57 #define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL)
83 * How many pointers will a page table level hold expressed in shift
85 #define PTRS_PER_PTD_SHIFT (PAGE_SHIFT-3)
95 * PMD_SHIFT determines the size of the area a third-level page table
100 #define PMD_MASK (~(PMD_SIZE-1))
107 * PUD_SHIFT determines the size of the area a second-level page table
112 #define PUD_MASK (~(PUD_SIZE-1))
119 * PGDIR_SHIFT determines what a first-level page table entry can map.
127 #define PGDIR_MASK (~(PGDIR_SIZE-1))
130 #define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */
135 * page-out routines.
158 * _P version gets used for a private shared memory segment, the _S
159 * version gets used for a shared memory segment with MAP_SHARED on.
160 * In a private shared memory segment, we do a copy-on-write if a task
164 #define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) argument
166 #define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) argument
168 #define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) argument
169 #define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) argument
177 /* Quick test to see if ADDR is a (potentially) valid physical address. */
181 return (addr & (local_cpu_data->unimpl_pa_mask)) == 0; in ia64_phys_addr_valid()
187 * PAGE_OFFSET. This operation can be relatively expensive (e.g.,
188 * require a hash-, or multi-level tree-lookup or something of that
192 * access would not cause an error (e.g., this is typically true for
195 * XXX Need to implement this for IA-64.
201 * Now come the defines and routines to manage and access the three-level
206 #define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
209 # define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 10)))
212 # define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
216 #define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE))
219 #define RGN_MAP_SHIFT (PGDIR_SHIFT + PTRS_PER_PGD_SHIFT - 3)
220 #define RGN_MAP_LIMIT ((1UL << RGN_MAP_SHIFT) - PAGE_SIZE) /* per region addr limit */
223 * Conversion functions: convert page frame number (pfn) and a protection value to a page
234 /* This takes a physical page address that is used by the remapping functions */
243 #define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL)
249 #define pmd_present(pmd) (pmd_val(pmd) != 0UL)
250 #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
257 #define pud_present(pud) (pud_val(pud) != 0UL)
258 #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
265 #define p4d_present(p4d) (p4d_val(p4d) != 0UL)
266 #define p4d_clear(p4dp) (p4d_val(*(p4dp)) = 0UL)
274 #define pte_write(pte) ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4)
275 #define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0)
276 #define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0)
277 #define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0)
292 * Because ia64's Icache and Dcache is not coherent (on a cpu), we need to
322 * Make page protection values cacheable, uncacheable, or write-
323 * combining. Note that "protection" is really a misnomer here as the
340 unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); in pgd_index()
342 return (region << (PAGE_SHIFT - 6)) | l1index; in pgd_index()
353 (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
355 /* Look up a pgd entry in the gate area. On IA-64, the gate-area
356 resides in the kernel-mapped segment, hence we use pgd_offset_k()
367 return 0; in ptep_test_and_clear_young()
372 return 0; in ptep_test_and_clear_young()
373 set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); in ptep_test_and_clear_young()
382 return __pte(xchg((long *) ptep, 0)); in ptep_get_and_clear()
407 pte_same (pte_t a, pte_t b) in pte_same() argument
409 return pte_val(a) == pte_val(b); in pte_same()
412 #define update_mmu_cache(vma, address, ptep) do { } while (0)
419 * bits in the swap-type field of the swap pte. It would be nice to
424 * bit 0 : present bit (must be zero)
425 * bits 1- 7: swap-type
426 * bits 8-62: swap offset
429 #define __swp_type(entry) (((entry).val >> 1) & 0x7f)
430 #define __swp_offset(entry) (((entry).val << 1) >> 9)
436 * ZERO_PAGE is a global shared page that is always zero: used
437 * for zero-mapped memory areas etc..
447 #define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3))
449 #define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1))
455 * Update PTEP with ENTRY, which is guaranteed to be a less
467 * On ia64, we could implement this routine with a cmpxchg()-loop
469 * However, like on x86, we can get a more streamlined version by
472 * result in an extra Access-bit fault, which would then turn on the
473 * ACCESSED bit in the low-level fault handler (iaccess_bit or
491 set_pte_at((__vma)->vm_mm, (__addr), __ptep, __entry); \
500 * Identity-mapped regions use a large page size. We'll call such large pages
501 * "granules". If you can think of a better name that's unambiguous, let me
516 /* These tell get_user_pages() that the first gate page is accessible from user-level. */
532 #include <asm-generic/pgtable-nopud.h>
534 #include <asm-generic/pgtable-nop4d.h>