Lines Matching +full:high +full:- +full:side
1 /* SPDX-License-Identifier: GPL-2.0 */
8 * Intel Physical Address Extension (PAE) Mode - three-level page
28 * value and then use set_pte to update it. -ben
32 ptep->pte_high = pte.pte_high; in native_set_pte()
34 ptep->pte_low = pte.pte_low; in native_set_pte()
39 * pte_offset_map_lock() on 32-bit PAE kernels was reading the pmd_t with
43 * vs page faults vs MADV_DONTNEED. On the page fault side
45 * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
46 * because GCC will not read the 64-bit value of the pmd atomically.
67 * In some cases the high and low part of the pmdval returned may not be
69 * mapped hugepage, while the high part may point to a more recently
83 * If the low part is null, we must not read the high part in pmd_read_atomic()
106 pud.p4d.pgd = pti_set_user_pgtbl(&pudp->p4d.pgd, pud.p4d.pgd); in native_set_pud()
112 * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
119 ptep->pte_low = 0; in native_pte_clear()
121 ptep->pte_high = 0; in native_pte_clear()
141 * According to Intel App note "TLBs, Paging-Structure Caches, in pud_clear()
142 * and Their Invalidation", April 2007, document 317080-001, in pud_clear()
144 * TLB via cr3 if the top-level pgd is changed... in pud_clear()
178 /* xchg acts as a barrier before setting of the high bits */ in native_pmdp_get_and_clear()
179 res.pmd_low = xchg(&orig->pmd_low, 0); in native_pmdp_get_and_clear()
180 res.pmd_high = orig->pmd_high; in native_pmdp_get_and_clear()
181 orig->pmd_high = 0; in native_pmdp_get_and_clear()
198 * cmpxchg64: we can update pmdp half-by-half without racing with in pmdp_establish()
208 /* xchg acts as a barrier before setting of the high bits */ in pmdp_establish()
209 old.pmd_low = xchg(&ptr->pmd_low, new.pmd_low); in pmdp_establish()
210 old.pmd_high = ptr->pmd_high; in pmdp_establish()
211 ptr->pmd_high = new.pmd_high; in pmdp_establish()
217 } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd); in pmdp_establish()
237 pti_set_user_pgtbl(&pudp->p4d.pgd, __pgd(0)); in native_pudp_get_and_clear()
240 /* xchg acts as a barrier before setting of the high bits */ in native_pudp_get_and_clear()
241 res.pud_low = xchg(&orig->pud_low, 0); in native_pudp_get_and_clear()
242 res.pud_high = orig->pud_high; in native_pudp_get_and_clear()
243 orig->pud_high = 0; in native_pudp_get_and_clear()
251 /* Encode and de-code a swap entry */
265 * Normally, __swp_entry() converts from arch-independent swp_entry_t to
266 * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result
268 * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to
274 | ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) })
279 * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent
284 #define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS)))
290 #include <asm/pgtable-invert.h>