Lines Matching refs:hvc
63 static int do_ops(struct host_vm_change *hvc, int end, in do_ops() argument
70 op = &hvc->ops[i]; in do_ops()
73 if (hvc->userspace) in do_ops()
74 ret = map(&hvc->mm->context.id, op->u.mmap.addr, in do_ops()
78 &hvc->data); in do_ops()
84 if (hvc->userspace) in do_ops()
85 ret = unmap(&hvc->mm->context.id, in do_ops()
88 &hvc->data); in do_ops()
96 if (hvc->userspace) in do_ops()
97 ret = protect(&hvc->mm->context.id, in do_ops()
101 finished, &hvc->data); in do_ops()
123 unsigned int prot, struct host_vm_change *hvc) in add_mmap() argument
129 if (hvc->userspace) in add_mmap()
133 if (hvc->index != 0) { in add_mmap()
134 last = &hvc->ops[hvc->index - 1]; in add_mmap()
144 if (hvc->index == ARRAY_SIZE(hvc->ops)) { in add_mmap()
145 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0); in add_mmap()
146 hvc->index = 0; in add_mmap()
149 hvc->ops[hvc->index++] = ((struct host_vm_op) in add_mmap()
161 struct host_vm_change *hvc) in add_munmap() argument
169 if (hvc->index != 0) { in add_munmap()
170 last = &hvc->ops[hvc->index - 1]; in add_munmap()
178 if (hvc->index == ARRAY_SIZE(hvc->ops)) { in add_munmap()
179 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0); in add_munmap()
180 hvc->index = 0; in add_munmap()
183 hvc->ops[hvc->index++] = ((struct host_vm_op) in add_munmap()
191 unsigned int prot, struct host_vm_change *hvc) in add_mprotect() argument
196 if (hvc->index != 0) { in add_mprotect()
197 last = &hvc->ops[hvc->index - 1]; in add_mprotect()
206 if (hvc->index == ARRAY_SIZE(hvc->ops)) { in add_mprotect()
207 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0); in add_mprotect()
208 hvc->index = 0; in add_mprotect()
211 hvc->ops[hvc->index++] = ((struct host_vm_op) in add_mprotect()
223 struct host_vm_change *hvc) in update_pte_range() argument
244 if (hvc->force || pte_newpage(*pte)) { in update_pte_range()
248 PAGE_SIZE, prot, hvc); in update_pte_range()
250 ret = add_munmap(addr, PAGE_SIZE, hvc); in update_pte_range()
252 ret = add_mprotect(addr, PAGE_SIZE, prot, hvc); in update_pte_range()
260 struct host_vm_change *hvc) in update_pmd_range() argument
270 if (hvc->force || pmd_newpage(*pmd)) { in update_pmd_range()
271 ret = add_munmap(addr, next - addr, hvc); in update_pmd_range()
275 else ret = update_pte_range(pmd, addr, next, hvc); in update_pmd_range()
282 struct host_vm_change *hvc) in update_pud_range() argument
292 if (hvc->force || pud_newpage(*pud)) { in update_pud_range()
293 ret = add_munmap(addr, next - addr, hvc); in update_pud_range()
297 else ret = update_pmd_range(pud, addr, next, hvc); in update_pud_range()
306 struct host_vm_change hvc; in fix_range_common() local
310 hvc = INIT_HVC(mm, force, userspace); in fix_range_common()
316 ret = add_munmap(addr, next - addr, &hvc); in fix_range_common()
320 else ret = update_pud_range(pgd, addr, next, &hvc); in fix_range_common()
324 ret = do_ops(&hvc, hvc.index, 1); in fix_range_common()
346 struct host_vm_change hvc; in flush_tlb_kernel_range_common() local
349 hvc = INIT_HVC(mm, force, userspace); in flush_tlb_kernel_range_common()
358 err = add_munmap(addr, last - addr, &hvc); in flush_tlb_kernel_range_common()
374 err = add_munmap(addr, last - addr, &hvc); in flush_tlb_kernel_range_common()
390 err = add_munmap(addr, last - addr, &hvc); in flush_tlb_kernel_range_common()
402 err = add_munmap(addr, PAGE_SIZE, &hvc); in flush_tlb_kernel_range_common()
408 PAGE_SIZE, 0, &hvc); in flush_tlb_kernel_range_common()
412 err = add_mprotect(addr, PAGE_SIZE, 0, &hvc); in flush_tlb_kernel_range_common()
417 err = do_ops(&hvc, hvc.index, 1); in flush_tlb_kernel_range_common()