Lines Matching refs:page_addr

195 	void *page_addr;  in binder_update_page_range()  local
214 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { in binder_update_page_range()
215 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; in binder_update_page_range()
237 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { in binder_update_page_range()
242 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_update_page_range()
264 alloc->pid, page_addr); in binder_update_page_range()
270 ret = map_kernel_range_noflush((unsigned long)page_addr, in binder_update_page_range()
273 flush_cache_vmap((unsigned long)page_addr, in binder_update_page_range()
274 (unsigned long)page_addr + PAGE_SIZE); in binder_update_page_range()
277 alloc->pid, page_addr); in binder_update_page_range()
281 (uintptr_t)page_addr + alloc->user_buffer_offset; in binder_update_page_range()
302 for (page_addr = end - PAGE_SIZE; page_addr >= start; in binder_update_page_range()
303 page_addr -= PAGE_SIZE) { in binder_update_page_range()
307 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_update_page_range()
319 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); in binder_update_page_range()
813 void *page_addr; in binder_alloc_deferred_release() local
821 page_addr = alloc->buffer + i * PAGE_SIZE; in binder_alloc_deferred_release()
824 __func__, alloc->pid, i, page_addr, in binder_alloc_deferred_release()
826 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); in binder_alloc_deferred_release()
952 uintptr_t page_addr; in binder_alloc_free_page() local
964 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; in binder_alloc_free_page()
981 page_addr + alloc->user_buffer_offset, in binder_alloc_free_page()
992 unmap_kernel_range(page_addr, PAGE_SIZE); in binder_alloc_free_page()