Lines Matching +full:set +full:- +full:top

1 // SPDX-License-Identifier: GPL-2.0-or-later
7 * -- paulus
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
28 #include <asm/code-patching.h>
60 return bat_addrs[b].phys + (va - bat_addrs[b].start); in v_block_mapped()
72 && pa < (bat_addrs[b].limit-bat_addrs[b].start) in p_block_mapped()
74 return bat_addrs[b].start+(pa-bat_addrs[b].phys); in p_block_mapped()
89 return -1; in find_free_bat()
95 * - max block size is 256 on 6xx.
96 * - base address must be aligned to the block size. So the maximum block size
97 * is identified by the lowest bit set to 1 in the base address (for instance
99 * - block size has to be a power of two. This is calculated by finding the
100 * highest bit set to 1.
102 unsigned int bat_block_size(unsigned long base, unsigned long top) in bat_block_size() argument
105 unsigned int base_shift = (ffs(base) - 1) & 31; in bat_block_size()
106 unsigned int block_shift = (fls(top - base) - 1) & 31; in bat_block_size()
112 * Set up one of the IBAT (block address translation) register pairs.
119 unsigned int bl = (size >> 17) - 1; in setibat()
142 static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long top) in __mmu_mapin_ram() argument
146 while ((idx = find_free_bat()) != -1 && base != top) { in __mmu_mapin_ram()
147 unsigned int size = bat_block_size(base, top); in __mmu_mapin_ram()
158 unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) in mmu_mapin_ram() argument
161 unsigned long border = (unsigned long)__srwx_boundary - PAGE_OFFSET; in mmu_mapin_ram()
164 size = roundup_pow_of_two((unsigned long)_einittext - PAGE_OFFSET); in mmu_mapin_ram()
168 pr_debug_once("Read-Write memory mapped without BATs\n"); in mmu_mapin_ram()
171 if (top >= border) in mmu_mapin_ram()
172 top = border; in mmu_mapin_ram()
175 if (!strict_kernel_rwx_enabled() || base >= border || top <= border) in mmu_mapin_ram()
176 return __mmu_mapin_ram(base, top); in mmu_mapin_ram()
182 return __mmu_mapin_ram(border, top); in mmu_mapin_ram()
191 if (addr > ALIGN(MODULES_END, SZ_256M) - 1) in is_module_segment()
200 unsigned long base = (unsigned long)_stext - PAGE_OFFSET; in mmu_mark_initmem_nx()
201 unsigned long top = ALIGN((unsigned long)_etext - PAGE_OFFSET, SZ_128K); in mmu_mark_initmem_nx() local
202 unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET; in mmu_mark_initmem_nx()
205 for (i = 0; i < nb - 1 && base < top;) { in mmu_mark_initmem_nx()
206 size = bat_block_size(base, top); in mmu_mark_initmem_nx()
210 if (base < top) { in mmu_mark_initmem_nx()
211 size = bat_block_size(base, top); in mmu_mark_initmem_nx()
212 if ((top - base) > size) { in mmu_mark_initmem_nx()
227 /* Do not set NX on VM space for modules */ in mmu_mark_initmem_nx()
251 * Set up one of the D BAT (block address translation) register pairs.
263 if (index == -1) in setbat()
265 if (index == -1) { in setbat()
276 bl = (size >> 17) - 1; in setbat()
291 bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1; in setbat()
306 add_hash_page(mm->context.id, ea, pmd_val(*pmd)); in hash_preload()
322 * called with either mm->page_table_lock held or ptl lock held in __update_mmu_cache()
325 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ in __update_mmu_cache()
330 if (!current->thread.regs) in __update_mmu_cache()
334 if (TRAP(current->thread.regs) != 0x300 && TRAP(current->thread.regs) != 0x400) in __update_mmu_cache()
337 hash_preload(vma->vm_mm, address); in __update_mmu_cache()
353 #define SDR1_LOW_BITS ((n_hpteg - 1) >> 10) in MMU_init_hw()
365 if (n_hpteg & (n_hpteg - 1)) { in MMU_init_hw()
385 Hash_mask = n_hpteg - 1; in MMU_init_hw()
386 hash_mb2 = hash_mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg; in MMU_init_hw()
388 hash_mb2 = 16 - LG_HPTEG_SIZE; in MMU_init_hw()
393 unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE); in MMU_init_hw_patch()
394 unsigned int hash = (unsigned int)Hash - PAGE_OFFSET; in MMU_init_hw_patch()