Lines Matching +full:start +full:- +full:up
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
31 * VMW_BO_DIRTY_PAGETABLE - Scan the pagetable for hardware dirty bits
32 * VMW_BO_DIRTY_MKWRITE - Write-protect page table entries and record write-
52 * struct vmw_bo_dirty - Dirty information for buffer objects
53 * @start: First currently dirty bit
64 unsigned long start; member
74 * vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits
79 * dirty-tracking method.
83 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_scan_pagetable()
84 pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node); in vmw_bo_dirty_scan_pagetable()
85 struct address_space *mapping = vbo->base.bdev->dev_mapping; in vmw_bo_dirty_scan_pagetable()
90 offset, dirty->bitmap_size, in vmw_bo_dirty_scan_pagetable()
91 offset, &dirty->bitmap[0], in vmw_bo_dirty_scan_pagetable()
92 &dirty->start, &dirty->end); in vmw_bo_dirty_scan_pagetable()
94 dirty->change_count++; in vmw_bo_dirty_scan_pagetable()
96 dirty->change_count = 0; in vmw_bo_dirty_scan_pagetable()
98 if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) { in vmw_bo_dirty_scan_pagetable()
99 dirty->change_count = 0; in vmw_bo_dirty_scan_pagetable()
100 dirty->method = VMW_BO_DIRTY_MKWRITE; in vmw_bo_dirty_scan_pagetable()
102 offset, dirty->bitmap_size); in vmw_bo_dirty_scan_pagetable()
104 offset, dirty->bitmap_size, in vmw_bo_dirty_scan_pagetable()
105 offset, &dirty->bitmap[0], in vmw_bo_dirty_scan_pagetable()
106 &dirty->start, &dirty->end); in vmw_bo_dirty_scan_pagetable()
111 * vmw_bo_dirty_scan_mkwrite - Reset the mkwrite dirty-tracking method
114 * Write-protect pages written to so that consecutive write accesses will
117 * This function may change the dirty-tracking method.
121 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_scan_mkwrite()
122 unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node); in vmw_bo_dirty_scan_mkwrite()
123 struct address_space *mapping = vbo->base.bdev->dev_mapping; in vmw_bo_dirty_scan_mkwrite()
126 if (dirty->end <= dirty->start) in vmw_bo_dirty_scan_mkwrite()
129 num_marked = wp_shared_mapping_range(vbo->base.bdev->dev_mapping, in vmw_bo_dirty_scan_mkwrite()
130 dirty->start + offset, in vmw_bo_dirty_scan_mkwrite()
131 dirty->end - dirty->start); in vmw_bo_dirty_scan_mkwrite()
133 if (100UL * num_marked / dirty->bitmap_size > in vmw_bo_dirty_scan_mkwrite()
135 dirty->change_count++; in vmw_bo_dirty_scan_mkwrite()
137 dirty->change_count = 0; in vmw_bo_dirty_scan_mkwrite()
140 if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) { in vmw_bo_dirty_scan_mkwrite()
141 pgoff_t start = 0; in vmw_bo_dirty_scan_mkwrite() local
142 pgoff_t end = dirty->bitmap_size; in vmw_bo_dirty_scan_mkwrite()
144 dirty->method = VMW_BO_DIRTY_PAGETABLE; in vmw_bo_dirty_scan_mkwrite()
146 &dirty->bitmap[0], in vmw_bo_dirty_scan_mkwrite()
147 &start, &end); in vmw_bo_dirty_scan_mkwrite()
148 bitmap_clear(&dirty->bitmap[0], 0, dirty->bitmap_size); in vmw_bo_dirty_scan_mkwrite()
149 if (dirty->start < dirty->end) in vmw_bo_dirty_scan_mkwrite()
150 bitmap_set(&dirty->bitmap[0], dirty->start, in vmw_bo_dirty_scan_mkwrite()
151 dirty->end - dirty->start); in vmw_bo_dirty_scan_mkwrite()
152 dirty->change_count = 0; in vmw_bo_dirty_scan_mkwrite()
157 * vmw_bo_dirty_scan - Scan for dirty pages and add them to the dirty
165 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_scan()
167 if (dirty->method == VMW_BO_DIRTY_PAGETABLE) in vmw_bo_dirty_scan()
174 * vmw_bo_dirty_pre_unmap - write-protect and pick up dirty pages before
177 * @start: First page of the range within the buffer object.
182 * up all dirty pages.
185 pgoff_t start, pgoff_t end) in vmw_bo_dirty_pre_unmap() argument
187 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_pre_unmap()
188 unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node); in vmw_bo_dirty_pre_unmap()
189 struct address_space *mapping = vbo->base.bdev->dev_mapping; in vmw_bo_dirty_pre_unmap()
191 if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end) in vmw_bo_dirty_pre_unmap()
194 wp_shared_mapping_range(mapping, start + offset, end - start); in vmw_bo_dirty_pre_unmap()
195 clean_record_shared_mapping_range(mapping, start + offset, in vmw_bo_dirty_pre_unmap()
196 end - start, offset, in vmw_bo_dirty_pre_unmap()
197 &dirty->bitmap[0], &dirty->start, in vmw_bo_dirty_pre_unmap()
198 &dirty->end); in vmw_bo_dirty_pre_unmap()
202 * vmw_bo_dirty_unmap - Clear all ptes pointing to a range within a bo
204 * @start: First page of the range within the buffer object.
210 pgoff_t start, pgoff_t end) in vmw_bo_dirty_unmap() argument
212 unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node); in vmw_bo_dirty_unmap()
213 struct address_space *mapping = vbo->base.bdev->dev_mapping; in vmw_bo_dirty_unmap()
215 vmw_bo_dirty_pre_unmap(vbo, start, end); in vmw_bo_dirty_unmap()
216 unmap_shared_mapping_range(mapping, (offset + start) << PAGE_SHIFT, in vmw_bo_dirty_unmap()
217 (loff_t) (end - start) << PAGE_SHIFT); in vmw_bo_dirty_unmap()
221 * vmw_bo_dirty_add - Add a dirty-tracking user to a buffer object
224 * This function registers a dirty-tracking user to a buffer object.
225 * A user can be for example a resource or a vma in a special user-space
228 * Return: Zero on success, -ENOMEM on memory allocation failure.
232 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_add()
233 pgoff_t num_pages = vbo->base.resource->num_pages; in vmw_bo_dirty_add()
238 dirty->ref_count++; in vmw_bo_dirty_add()
245 ret = -ENOMEM; in vmw_bo_dirty_add()
249 dirty->bitmap_size = num_pages; in vmw_bo_dirty_add()
250 dirty->start = dirty->bitmap_size; in vmw_bo_dirty_add()
251 dirty->end = 0; in vmw_bo_dirty_add()
252 dirty->ref_count = 1; in vmw_bo_dirty_add()
254 dirty->method = VMW_BO_DIRTY_PAGETABLE; in vmw_bo_dirty_add()
256 struct address_space *mapping = vbo->base.bdev->dev_mapping; in vmw_bo_dirty_add()
257 pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node); in vmw_bo_dirty_add()
259 dirty->method = VMW_BO_DIRTY_MKWRITE; in vmw_bo_dirty_add()
261 /* Write-protect and then pick up already dirty bits */ in vmw_bo_dirty_add()
265 &dirty->bitmap[0], in vmw_bo_dirty_add()
266 &dirty->start, &dirty->end); in vmw_bo_dirty_add()
269 vbo->dirty = dirty; in vmw_bo_dirty_add()
278 * vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object
281 * This function releases a dirty-tracking user from a buffer object.
282 * If the reference count reaches zero, then the dirty-tracking object is
285 * Return: Zero on success, -ENOMEM on memory allocation failure.
289 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_release()
291 if (dirty && --dirty->ref_count == 0) { in vmw_bo_dirty_release()
293 vbo->dirty = NULL; in vmw_bo_dirty_release()
298 * vmw_bo_dirty_transfer_to_res - Pick up a resource's dirty region from
302 * This function will pick up all dirty ranges affecting the resource from
309 struct vmw_buffer_object *vbo = res->backup; in vmw_bo_dirty_transfer_to_res()
310 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_transfer_to_res()
311 pgoff_t start, cur, end; in vmw_bo_dirty_transfer_to_res() local
312 unsigned long res_start = res->backup_offset; in vmw_bo_dirty_transfer_to_res()
313 unsigned long res_end = res->backup_offset + res->backup_size; in vmw_bo_dirty_transfer_to_res()
319 if (res_start >= dirty->end || res_end <= dirty->start) in vmw_bo_dirty_transfer_to_res()
322 cur = max(res_start, dirty->start); in vmw_bo_dirty_transfer_to_res()
323 res_end = max(res_end, dirty->end); in vmw_bo_dirty_transfer_to_res()
327 start = find_next_bit(&dirty->bitmap[0], res_end, cur); in vmw_bo_dirty_transfer_to_res()
328 if (start >= res_end) in vmw_bo_dirty_transfer_to_res()
331 end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1); in vmw_bo_dirty_transfer_to_res()
333 num = end - start; in vmw_bo_dirty_transfer_to_res()
334 bitmap_clear(&dirty->bitmap[0], start, num); in vmw_bo_dirty_transfer_to_res()
335 vmw_resource_dirty_update(res, start, end); in vmw_bo_dirty_transfer_to_res()
338 if (res_start <= dirty->start && res_end > dirty->start) in vmw_bo_dirty_transfer_to_res()
339 dirty->start = res_end; in vmw_bo_dirty_transfer_to_res()
340 if (res_start < dirty->end && res_end >= dirty->end) in vmw_bo_dirty_transfer_to_res()
341 dirty->end = res_start; in vmw_bo_dirty_transfer_to_res()
345 * vmw_bo_dirty_clear_res - Clear a resource's dirty region from
354 unsigned long res_start = res->backup_offset; in vmw_bo_dirty_clear_res()
355 unsigned long res_end = res->backup_offset + res->backup_size; in vmw_bo_dirty_clear_res()
356 struct vmw_buffer_object *vbo = res->backup; in vmw_bo_dirty_clear_res()
357 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_clear_res()
362 if (res_start >= dirty->end || res_end <= dirty->start) in vmw_bo_dirty_clear_res()
365 res_start = max(res_start, dirty->start); in vmw_bo_dirty_clear_res()
366 res_end = min(res_end, dirty->end); in vmw_bo_dirty_clear_res()
367 bitmap_clear(&dirty->bitmap[0], res_start, res_end - res_start); in vmw_bo_dirty_clear_res()
369 if (res_start <= dirty->start && res_end > dirty->start) in vmw_bo_dirty_clear_res()
370 dirty->start = res_end; in vmw_bo_dirty_clear_res()
371 if (res_start < dirty->end && res_end >= dirty->end) in vmw_bo_dirty_clear_res()
372 dirty->end = res_start; in vmw_bo_dirty_clear_res()
377 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_mkwrite()
379 vma->vm_private_data; in vmw_bo_vm_mkwrite()
390 save_flags = vmf->flags; in vmw_bo_vm_mkwrite()
391 vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY; in vmw_bo_vm_mkwrite()
393 vmf->flags = save_flags; in vmw_bo_vm_mkwrite()
397 page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); in vmw_bo_vm_mkwrite()
398 if (unlikely(page_offset >= bo->resource->num_pages)) { in vmw_bo_vm_mkwrite()
403 if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE && in vmw_bo_vm_mkwrite()
404 !test_bit(page_offset, &vbo->dirty->bitmap[0])) { in vmw_bo_vm_mkwrite()
405 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_vm_mkwrite()
407 __set_bit(page_offset, &dirty->bitmap[0]); in vmw_bo_vm_mkwrite()
408 dirty->start = min(dirty->start, page_offset); in vmw_bo_vm_mkwrite()
409 dirty->end = max(dirty->end, page_offset + 1); in vmw_bo_vm_mkwrite()
413 dma_resv_unlock(bo->base.resv); in vmw_bo_vm_mkwrite()
419 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_fault()
421 vma->vm_private_data; in vmw_bo_vm_fault()
432 num_prefault = (vma->vm_flags & VM_RAND_READ) ? 1 : in vmw_bo_vm_fault()
435 if (vbo->dirty) { in vmw_bo_vm_fault()
439 page_offset = vmf->pgoff - in vmw_bo_vm_fault()
440 drm_vma_node_start(&bo->base.vma_node); in vmw_bo_vm_fault()
441 if (page_offset >= bo->resource->num_pages || in vmw_bo_vm_fault()
454 * sure the page protection is write-enabled so we don't get in vmw_bo_vm_fault()
457 if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE) in vmw_bo_vm_fault()
458 prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED); in vmw_bo_vm_fault()
460 prot = vm_get_page_prot(vma->vm_flags); in vmw_bo_vm_fault()
463 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) in vmw_bo_vm_fault()
467 dma_resv_unlock(bo->base.resv); in vmw_bo_vm_fault()