/Linux-v6.1/drivers/gpu/drm/msm/ |
D | msm_gem_shrinker.c | 184 unsigned idx, unmapped = 0; in msm_gem_shrinker_vmap() local 186 for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) { in msm_gem_shrinker_vmap() 187 unmapped += drm_gem_lru_scan(lrus[idx], in msm_gem_shrinker_vmap() 188 vmap_shrink_limit - unmapped, in msm_gem_shrinker_vmap() 192 *(unsigned long *)ptr += unmapped; in msm_gem_shrinker_vmap() 194 if (unmapped > 0) in msm_gem_shrinker_vmap() 195 trace_msm_gem_purge_vmaps(unmapped); in msm_gem_shrinker_vmap()
|
D | msm_gpu_trace.h | 143 TP_PROTO(u32 unmapped), 144 TP_ARGS(unmapped), 146 __field(u32, unmapped) 149 __entry->unmapped = unmapped; 151 TP_printk("Purging %u vmaps", __entry->unmapped)
|
D | msm_iommu.c | 96 size_t unmapped, pgsize, count; in msm_iommu_pagetable_unmap() local 100 unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL); in msm_iommu_pagetable_unmap() 101 if (!unmapped) in msm_iommu_pagetable_unmap() 104 iova += unmapped; in msm_iommu_pagetable_unmap() 105 size -= unmapped; in msm_iommu_pagetable_unmap()
|
/Linux-v6.1/include/trace/events/ |
D | huge_memory.h | 56 int referenced, int none_or_zero, int status, int unmapped), 58 TP_ARGS(mm, page, writable, referenced, none_or_zero, status, unmapped), 67 __field(int, unmapped) 77 __entry->unmapped = unmapped; 87 __entry->unmapped)
|
/Linux-v6.1/drivers/iommu/amd/ |
D | io_pgtable.c | 444 unsigned long long unmapped; in iommu_v1_unmap_pages() local 451 unmapped = 0; in iommu_v1_unmap_pages() 453 while (unmapped < size) { in iommu_v1_unmap_pages() 462 return unmapped; in iommu_v1_unmap_pages() 466 unmapped += unmap_size; in iommu_v1_unmap_pages() 469 return unmapped; in iommu_v1_unmap_pages()
|
D | io_pgtable_v2.c | 301 unsigned long unmapped = 0; in iommu_v2_unmap_pages() local 308 while (unmapped < size) { in iommu_v2_unmap_pages() 311 return unmapped; in iommu_v2_unmap_pages() 316 unmapped += unmap_size; in iommu_v2_unmap_pages() 319 return unmapped; in iommu_v2_unmap_pages()
|
/Linux-v6.1/mm/ |
D | migrate_device.c | 65 unsigned long addr = start, unmapped = 0; in migrate_vma_collect_pmd() local 265 unmapped++; in migrate_vma_collect_pmd() 277 if (unmapped) in migrate_vma_collect_pmd() 369 unsigned long unmapped = 0; in migrate_device_unmap() local 379 unmapped++; in migrate_device_unmap() 417 unmapped++; in migrate_device_unmap() 436 return unmapped; in migrate_device_unmap()
|
D | khugepaged.c | 966 int referenced, int unmapped, in collapse_huge_page() argument 1006 if (unmapped) { in collapse_huge_page() 1132 int node = NUMA_NO_NODE, unmapped = 0; in hpage_collapse_scan_pmd() local 1148 ++unmapped; in hpage_collapse_scan_pmd() 1150 unmapped <= khugepaged_max_ptes_swap) { in hpage_collapse_scan_pmd() 1274 (unmapped && referenced < HPAGE_PMD_NR / 2))) { in hpage_collapse_scan_pmd() 1283 unmapped, cc); in hpage_collapse_scan_pmd() 1289 none_or_zero, result, unmapped); in hpage_collapse_scan_pmd()
|
/Linux-v6.1/drivers/vfio/ |
D | vfio_iommu_type1.c | 1036 size_t unmapped = 0; in unmap_unpin_fast() local 1040 unmapped = iommu_unmap_fast(domain->domain, *iova, len, in unmap_unpin_fast() 1043 if (!unmapped) { in unmap_unpin_fast() 1048 entry->len = unmapped; in unmap_unpin_fast() 1051 *iova += unmapped; in unmap_unpin_fast() 1060 if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) { in unmap_unpin_fast() 1066 return unmapped; in unmap_unpin_fast() 1074 size_t unmapped = iommu_unmap(domain->domain, *iova, len); in unmap_unpin_slow() local 1076 if (unmapped) { in unmap_unpin_slow() 1079 unmapped >> PAGE_SHIFT, in unmap_unpin_slow() [all …]
|
/Linux-v6.1/drivers/staging/media/ipu3/ |
D | ipu3-mmu.c | 383 size_t unmapped_page, unmapped = 0; in imgu_mmu_unmap() local 402 while (unmapped < size) { in imgu_mmu_unmap() 411 unmapped += unmapped_page; in imgu_mmu_unmap() 416 return unmapped; in imgu_mmu_unmap()
|
/Linux-v6.1/drivers/iommu/ |
D | virtio-iommu.c | 347 size_t unmapped = 0; in viommu_del_mappings() local 367 unmapped += mapping->iova.last - mapping->iova.start + 1; in viommu_del_mappings() 374 return unmapped; in viommu_del_mappings() 840 size_t unmapped; in viommu_unmap_pages() local 845 unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1); in viommu_unmap_pages() 846 if (unmapped < size) in viommu_unmap_pages() 851 return unmapped; in viommu_unmap_pages() 857 .virt_end = cpu_to_le64(iova + unmapped - 1), in viommu_unmap_pages() 861 return ret ? 0 : unmapped; in viommu_unmap_pages()
|
D | io-pgtable-arm-v7s.c | 750 size_t unmapped = 0, ret; in arm_v7s_unmap_pages() local 760 unmapped += pgsize; in arm_v7s_unmap_pages() 764 return unmapped; in arm_v7s_unmap_pages()
|
D | iommu.c | 2342 size_t unmapped_page, unmapped = 0; in __iommu_unmap() local 2373 while (unmapped < size) { in __iommu_unmap() 2375 size - unmapped, in __iommu_unmap() 2384 unmapped += unmapped_page; in __iommu_unmap() 2387 trace_unmap(orig_iova, size, unmapped); in __iommu_unmap() 2388 return unmapped; in __iommu_unmap()
|
/Linux-v6.1/drivers/media/platform/qcom/venus/ |
D | firmware.c | 174 size_t unmapped; in venus_shutdown_no_tz() local 197 unmapped = iommu_unmap(iommu, VENUS_FW_START_ADDR, mapped); in venus_shutdown_no_tz() 199 if (unmapped != mapped) in venus_shutdown_no_tz()
|
/Linux-v6.1/Documentation/mm/damon/ |
D | design.rst | 50 mapped to the physical memory and accessed. Thus, tracking the unmapped 54 cases. That said, too huge unmapped areas inside the monitoring target should 59 gaps between the three regions are the two biggest unmapped areas in the given 60 address space. The two biggest unmapped areas would be the gap between the 169 virtual memory could be dynamically mapped and unmapped. Physical memory could
|
/Linux-v6.1/Documentation/features/vm/TLB/ |
D | arch-support.txt | 4 # description: arch supports deferral of TLB flush until multiple pages are unmapped
|
/Linux-v6.1/Documentation/x86/x86_64/ |
D | 5level-paging.rst | 49 to look for unmapped area by specified address. If it's already 50 occupied, we look for unmapped area in *full* address space, rather than
|
/Linux-v6.1/Documentation/networking/device_drivers/ethernet/marvell/ |
D | octeontx2.rst | 171 - Error due to operation of unmapped PF. 185 - Error due to unmapped slot. 235 - Receive packet on an unmapped PF. 247 - Error due to unmapped slot. 289 Rx on unmapped PF_FUNC
|
/Linux-v6.1/drivers/gpu/drm/etnaviv/ |
D | etnaviv_mmu.c | 19 size_t unmapped_page, unmapped = 0; in etnaviv_context_unmap() local 28 while (unmapped < size) { in etnaviv_context_unmap() 35 unmapped += unmapped_page; in etnaviv_context_unmap()
|
/Linux-v6.1/Documentation/userspace-api/media/v4l/ |
D | vidioc-reqbufs.rst | 67 orphaned and will be freed when they are unmapped or when the exported DMABUF 153 when they are unmapped or when the exported DMABUF fds are closed.
|
/Linux-v6.1/Documentation/ABI/testing/ |
D | sysfs-class-rnbd-server | 32 When the device is unmapped by that client, the directory will be removed.
|
D | sysfs-block-rnbd | 9 is using the device. When "force" is used, the device is also unmapped
|
/Linux-v6.1/Documentation/mm/ |
D | zsmalloc.rst | 34 unmapped using zs_unmap_object().
|
/Linux-v6.1/Documentation/admin-guide/device-mapper/ |
D | dm-zoned.rst | 180 of unmapped (ie free) random zones, <nr_rnd> the total number of zones, 181 <nr_unmap_seq> the number of unmapped sequential zones, and <nr_seq> the
|
/Linux-v6.1/arch/arm64/kvm/hyp/ |
D | pgtable.c | 455 u64 unmapped; member 486 data->unmapped += granule; in hyp_unmap_walker() 514 return unmap_data.unmapped; in kvm_pgtable_hyp_unmap()
|