Lines Matching full:addr

22 				unsigned long addr;  member
29 unsigned long addr; member
33 unsigned long addr; member
73 ret = map(&hvc->mm->context.id, op->u.mmap.addr, in do_ops()
79 map_memory(op->u.mmap.addr, op->u.mmap.offset, in do_ops()
85 op->u.munmap.addr, in do_ops()
90 (void *) op->u.munmap.addr, in do_ops()
97 op->u.mprotect.addr, in do_ops()
103 (void *) op->u.mprotect.addr, in do_ops()
135 (last->u.mmap.addr + last->u.mmap.len == virt) && in add_mmap()
150 .u = { .mmap = { .addr = virt, in add_mmap()
159 static int add_munmap(unsigned long addr, unsigned long len, in add_munmap() argument
168 (last->u.munmap.addr + last->u.mmap.len == addr)) { in add_munmap()
181 .u = { .munmap = { .addr = addr, in add_munmap()
186 static int add_mprotect(unsigned long addr, unsigned long len, in add_mprotect() argument
195 (last->u.mprotect.addr + last->u.mprotect.len == addr) && in add_mprotect()
209 .u = { .mprotect = { .addr = addr, in add_mprotect()
217 static inline int update_pte_range(pmd_t *pmd, unsigned long addr, in update_pte_range() argument
224 pte = pte_offset_kernel(pmd, addr); in update_pte_range()
240 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK, in update_pte_range()
243 ret = add_munmap(addr, PAGE_SIZE, hvc); in update_pte_range()
245 ret = add_mprotect(addr, PAGE_SIZE, prot, hvc); in update_pte_range()
247 } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret)); in update_pte_range()
251 static inline int update_pmd_range(pud_t *pud, unsigned long addr, in update_pmd_range() argument
259 pmd = pmd_offset(pud, addr); in update_pmd_range()
261 next = pmd_addr_end(addr, end); in update_pmd_range()
264 ret = add_munmap(addr, next - addr, hvc); in update_pmd_range()
268 else ret = update_pte_range(pmd, addr, next, hvc); in update_pmd_range()
269 } while (pmd++, addr = next, ((addr < end) && !ret)); in update_pmd_range()
273 static inline int update_pud_range(p4d_t *p4d, unsigned long addr, in update_pud_range() argument
281 pud = pud_offset(p4d, addr); in update_pud_range()
283 next = pud_addr_end(addr, end); in update_pud_range()
286 ret = add_munmap(addr, next - addr, hvc); in update_pud_range()
290 else ret = update_pmd_range(pud, addr, next, hvc); in update_pud_range()
291 } while (pud++, addr = next, ((addr < end) && !ret)); in update_pud_range()
295 static inline int update_p4d_range(pgd_t *pgd, unsigned long addr, in update_p4d_range() argument
303 p4d = p4d_offset(pgd, addr); in update_p4d_range()
305 next = p4d_addr_end(addr, end); in update_p4d_range()
308 ret = add_munmap(addr, next - addr, hvc); in update_p4d_range()
312 ret = update_pud_range(p4d, addr, next, hvc); in update_p4d_range()
313 } while (p4d++, addr = next, ((addr < end) && !ret)); in update_p4d_range()
322 unsigned long addr = start_addr, next; in fix_range_common() local
326 pgd = pgd_offset(mm, addr); in fix_range_common()
328 next = pgd_addr_end(addr, end_addr); in fix_range_common()
331 ret = add_munmap(addr, next - addr, &hvc); in fix_range_common()
335 ret = update_p4d_range(pgd, addr, next, &hvc); in fix_range_common()
336 } while (pgd++, addr = next, ((addr < end_addr) && !ret)); in fix_range_common()
359 unsigned long addr, last; in flush_tlb_kernel_range_common() local
365 for (addr = start; addr < end;) { in flush_tlb_kernel_range_common()
366 pgd = pgd_offset(mm, addr); in flush_tlb_kernel_range_common()
368 last = ADD_ROUND(addr, PGDIR_SIZE); in flush_tlb_kernel_range_common()
373 err = add_munmap(addr, last - addr, &hvc); in flush_tlb_kernel_range_common()
378 addr = last; in flush_tlb_kernel_range_common()
382 p4d = p4d_offset(pgd, addr); in flush_tlb_kernel_range_common()
384 last = ADD_ROUND(addr, P4D_SIZE); in flush_tlb_kernel_range_common()
389 err = add_munmap(addr, last - addr, &hvc); in flush_tlb_kernel_range_common()
394 addr = last; in flush_tlb_kernel_range_common()
398 pud = pud_offset(p4d, addr); in flush_tlb_kernel_range_common()
400 last = ADD_ROUND(addr, PUD_SIZE); in flush_tlb_kernel_range_common()
405 err = add_munmap(addr, last - addr, &hvc); in flush_tlb_kernel_range_common()
410 addr = last; in flush_tlb_kernel_range_common()
414 pmd = pmd_offset(pud, addr); in flush_tlb_kernel_range_common()
416 last = ADD_ROUND(addr, PMD_SIZE); in flush_tlb_kernel_range_common()
421 err = add_munmap(addr, last - addr, &hvc); in flush_tlb_kernel_range_common()
426 addr = last; in flush_tlb_kernel_range_common()
430 pte = pte_offset_kernel(pmd, addr); in flush_tlb_kernel_range_common()
433 err = add_munmap(addr, PAGE_SIZE, &hvc); in flush_tlb_kernel_range_common()
438 err = add_mmap(addr, pte_val(*pte) & PAGE_MASK, in flush_tlb_kernel_range_common()
443 err = add_mprotect(addr, PAGE_SIZE, 0, &hvc); in flush_tlb_kernel_range_common()
445 addr += PAGE_SIZE; in flush_tlb_kernel_range_common()
552 void __flush_tlb_one(unsigned long addr) in __flush_tlb_one() argument
554 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE); in __flush_tlb_one()