Lines Matching +full:half +full:-

1 // SPDX-License-Identifier: GPL-2.0
48 return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM; in page_table_register_sysctl()
74 if (current->active_mm == mm) { in __crst_table_upgrade()
75 S390_lowcore.user_asce = mm->context.asce; in __crst_table_upgrade()
84 unsigned long asce_limit = mm->context.asce_limit; in crst_table_upgrade()
105 spin_lock_bh(&mm->page_table_lock); in crst_table_upgrade()
112 VM_BUG_ON(asce_limit != mm->context.asce_limit); in crst_table_upgrade()
115 __pgd = (unsigned long *) mm->pgd; in crst_table_upgrade()
117 mm->pgd = (pgd_t *) p4d; in crst_table_upgrade()
118 mm->context.asce_limit = _REGION1_SIZE; in crst_table_upgrade()
119 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | in crst_table_upgrade()
124 __pgd = (unsigned long *) mm->pgd; in crst_table_upgrade()
126 mm->pgd = (pgd_t *) pgd; in crst_table_upgrade()
127 mm->context.asce_limit = TASK_SIZE_MAX; in crst_table_upgrade()
128 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | in crst_table_upgrade()
132 spin_unlock_bh(&mm->page_table_lock); in crst_table_upgrade()
141 return -ENOMEM; in crst_table_upgrade()
179 * A 2KB-pgtable is either upper or lower half of a normal page.
180 * The second half of the page may be unused or used as another
181 * 2KB-pgtable.
183 * Whenever possible the parent page for a new 2KB-pgtable is picked
188 * When a parent page gets fully allocated it contains 2KB-pgtables in both
191 * When 2KB-pgtable is freed from to fully allocated parent page that
194 * If 2KB-pgtable is freed from the partially allocated parent page that
201 * The upper byte (bits 24-31) of the parent page _refcount is used
202 * for tracking contained 2KB-pgtables and has the following format:
205 * 01234567 upper byte (bits 24-31) of struct page::_refcount
207 * || |+--- upper 2KB-pgtable is allocated
208 * || +---- lower 2KB-pgtable is allocated
209 * |+------- upper 2KB-pgtable is pending for removal
210 * +-------- lower 2KB-pgtable is pending for removal
215 * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
217 * - added to mm_context_t::pgtable_list in case the second half of the
219 * - removed from mm_context_t::pgtable_list in case both hales of the
223 * When 2KB-pgtable is deallocated the corresponding AA bit is set to 0
225 * Thus, PP and AA bits corresponding to the same 2KB-pgtable are mutually
228 * - added to mm_context_t::pgtable_list in case the second half of the
230 * - removed from mm_context_t::pgtable_list in case the second half of
240 * parent page does not contain any 2KB-pgtable fragment anymore, and it has
244 * PGSTE memory spaces use full 4KB-pgtables and do not need most of the
245 * logic described above. Both AA bits are set to 1 to denote a 4KB-pgtable
258 spin_lock_bh(&mm->context.lock); in page_table_alloc()
259 if (!list_empty(&mm->context.pgtable_list)) { in page_table_alloc()
260 page = list_first_entry(&mm->context.pgtable_list, in page_table_alloc()
262 mask = atomic_read(&page->_refcount) >> 24; in page_table_alloc()
275 bit = mask & 1; /* =1 -> second 2K */ in page_table_alloc()
278 atomic_xor_bits(&page->_refcount, in page_table_alloc()
280 list_del(&page->lru); in page_table_alloc()
283 spin_unlock_bh(&mm->context.lock); in page_table_alloc()
300 atomic_xor_bits(&page->_refcount, 0x03U << 24); in page_table_alloc()
305 atomic_xor_bits(&page->_refcount, 0x01U << 24); in page_table_alloc()
307 spin_lock_bh(&mm->context.lock); in page_table_alloc()
308 list_add(&page->lru, &mm->context.pgtable_list); in page_table_alloc()
309 spin_unlock_bh(&mm->context.lock); in page_table_alloc()
315 unsigned int half, unsigned int mask) in page_table_release_check() argument
322 "Invalid pgtable %p release half 0x%02x mask 0x%02x", in page_table_release_check()
323 table, half, mask); in page_table_release_check()
329 unsigned int mask, bit, half; in page_table_free() local
336 spin_lock_bh(&mm->context.lock); in page_table_free()
342 mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24)); in page_table_free()
345 list_add(&page->lru, &mm->context.pgtable_list); in page_table_free()
347 list_del(&page->lru); in page_table_free()
348 spin_unlock_bh(&mm->context.lock); in page_table_free()
349 mask = atomic_xor_bits(&page->_refcount, 0x10U << (bit + 24)); in page_table_free()
353 half = 0x01U << bit; in page_table_free()
355 half = 0x03U; in page_table_free()
356 mask = atomic_xor_bits(&page->_refcount, 0x03U << 24); in page_table_free()
360 page_table_release_check(page, table, half, mask); in page_table_free()
372 mm = tlb->mm; in page_table_free_rcu()
381 spin_lock_bh(&mm->context.lock); in page_table_free_rcu()
387 mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24)); in page_table_free_rcu()
390 list_add_tail(&page->lru, &mm->context.pgtable_list); in page_table_free_rcu()
392 list_del(&page->lru); in page_table_free_rcu()
393 spin_unlock_bh(&mm->context.lock); in page_table_free_rcu()
400 unsigned int mask = (unsigned long) _table & 0x03U, half = mask; in __tlb_remove_table() local
404 switch (half) { in __tlb_remove_table()
410 mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24)); in __tlb_remove_table()
416 mask = atomic_xor_bits(&page->_refcount, 0x03U << 24); in __tlb_remove_table()
421 page_table_release_check(page, table, half, mask); in __tlb_remove_table()
467 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \
469 return (next - 1) < (end - 1) ? next : end; \
519 return -ENOMEM; in base_segment_walk()
548 return -ENOMEM; in base_region3_walk()
576 return -ENOMEM; in base_region2_walk()
604 return -ENOMEM; in base_region1_walk()
618 * base_asce_free - free asce and tables returned from base_asce_alloc()
658 return base_pgt_cache ? 0 : -ENOMEM; in base_pgt_cache_init()
662 * base_asce_alloc - create kernel mapping without enhanced DAT features