Lines Matching full:hvc

62 static int do_ops(struct host_vm_change *hvc, int end,  in do_ops()  argument
69 op = &hvc->ops[i]; in do_ops()
72 if (hvc->userspace) in do_ops()
73 ret = map(&hvc->mm->context.id, op->u.mmap.addr, in do_ops()
77 &hvc->data); in do_ops()
83 if (hvc->userspace) in do_ops()
84 ret = unmap(&hvc->mm->context.id, in do_ops()
87 &hvc->data); in do_ops()
95 if (hvc->userspace) in do_ops()
96 ret = protect(&hvc->mm->context.id, in do_ops()
100 finished, &hvc->data); in do_ops()
122 unsigned int prot, struct host_vm_change *hvc) in add_mmap() argument
128 if (hvc->userspace) in add_mmap()
132 if (hvc->index != 0) { in add_mmap()
133 last = &hvc->ops[hvc->index - 1]; in add_mmap()
143 if (hvc->index == ARRAY_SIZE(hvc->ops)) { in add_mmap()
144 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0); in add_mmap()
145 hvc->index = 0; in add_mmap()
148 hvc->ops[hvc->index++] = ((struct host_vm_op) in add_mmap()
160 struct host_vm_change *hvc) in add_munmap() argument
165 if (hvc->index != 0) { in add_munmap()
166 last = &hvc->ops[hvc->index - 1]; in add_munmap()
174 if (hvc->index == ARRAY_SIZE(hvc->ops)) { in add_munmap()
175 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0); in add_munmap()
176 hvc->index = 0; in add_munmap()
179 hvc->ops[hvc->index++] = ((struct host_vm_op) in add_munmap()
187 unsigned int prot, struct host_vm_change *hvc) in add_mprotect() argument
192 if (hvc->index != 0) { in add_mprotect()
193 last = &hvc->ops[hvc->index - 1]; in add_mprotect()
202 if (hvc->index == ARRAY_SIZE(hvc->ops)) { in add_mprotect()
203 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0); in add_mprotect()
204 hvc->index = 0; in add_mprotect()
207 hvc->ops[hvc->index++] = ((struct host_vm_op) in add_mprotect()
219 struct host_vm_change *hvc) in update_pte_range() argument
237 if (hvc->force || pte_newpage(*pte)) { in update_pte_range()
241 PAGE_SIZE, prot, hvc); in update_pte_range()
243 ret = add_munmap(addr, PAGE_SIZE, hvc); in update_pte_range()
245 ret = add_mprotect(addr, PAGE_SIZE, prot, hvc); in update_pte_range()
253 struct host_vm_change *hvc) in update_pmd_range() argument
263 if (hvc->force || pmd_newpage(*pmd)) { in update_pmd_range()
264 ret = add_munmap(addr, next - addr, hvc); in update_pmd_range()
268 else ret = update_pte_range(pmd, addr, next, hvc); in update_pmd_range()
275 struct host_vm_change *hvc) in update_pud_range() argument
285 if (hvc->force || pud_newpage(*pud)) { in update_pud_range()
286 ret = add_munmap(addr, next - addr, hvc); in update_pud_range()
290 else ret = update_pmd_range(pud, addr, next, hvc); in update_pud_range()
297 struct host_vm_change *hvc) in update_p4d_range() argument
307 if (hvc->force || p4d_newpage(*p4d)) { in update_p4d_range()
308 ret = add_munmap(addr, next - addr, hvc); in update_p4d_range()
312 ret = update_pud_range(p4d, addr, next, hvc); in update_p4d_range()
321 struct host_vm_change hvc; in fix_range_common() local
325 hvc = INIT_HVC(mm, force, userspace); in fix_range_common()
331 ret = add_munmap(addr, next - addr, &hvc); in fix_range_common()
335 ret = update_p4d_range(pgd, addr, next, &hvc); in fix_range_common()
339 ret = do_ops(&hvc, hvc.index, 1); in fix_range_common()
361 struct host_vm_change hvc; in flush_tlb_kernel_range_common() local
364 hvc = INIT_HVC(mm, force, userspace); in flush_tlb_kernel_range_common()
373 err = add_munmap(addr, last - addr, &hvc); in flush_tlb_kernel_range_common()
389 err = add_munmap(addr, last - addr, &hvc); in flush_tlb_kernel_range_common()
405 err = add_munmap(addr, last - addr, &hvc); in flush_tlb_kernel_range_common()
421 err = add_munmap(addr, last - addr, &hvc); in flush_tlb_kernel_range_common()
433 err = add_munmap(addr, PAGE_SIZE, &hvc); in flush_tlb_kernel_range_common()
439 PAGE_SIZE, 0, &hvc); in flush_tlb_kernel_range_common()
443 err = add_mprotect(addr, PAGE_SIZE, 0, &hvc); in flush_tlb_kernel_range_common()
448 err = do_ops(&hvc, hvc.index, 1); in flush_tlb_kernel_range_common()