/Linux-v6.1/mm/ |
D | truncate.c | 32 static inline void __clear_shadow_entry(struct address_space *mapping, in __clear_shadow_entry() argument 35 XA_STATE(xas, &mapping->i_pages, index); in __clear_shadow_entry() 43 static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, in clear_shadow_entry() argument 46 spin_lock(&mapping->host->i_lock); in clear_shadow_entry() 47 xa_lock_irq(&mapping->i_pages); in clear_shadow_entry() 48 __clear_shadow_entry(mapping, index, entry); in clear_shadow_entry() 49 xa_unlock_irq(&mapping->i_pages); in clear_shadow_entry() 50 if (mapping_shrinkable(mapping)) in clear_shadow_entry() 51 inode_add_lru(mapping->host); in clear_shadow_entry() 52 spin_unlock(&mapping->host->i_lock); in clear_shadow_entry() [all …]
|
D | filemap.c | 124 static void page_cache_delete(struct address_space *mapping, in page_cache_delete() argument 127 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete() 130 mapping_set_update(&xas, mapping); in page_cache_delete() 143 folio->mapping = NULL; in page_cache_delete() 145 mapping->nrpages -= nr; in page_cache_delete() 148 static void filemap_unaccount_folio(struct address_space *mapping, in filemap_unaccount_folio() argument 161 if (mapping_exiting(mapping) && !folio_test_large(folio)) { in filemap_unaccount_folio() 190 filemap_nr_thps_dec(mapping); in filemap_unaccount_folio() 208 mapping_can_writeback(mapping))) in filemap_unaccount_folio() 209 folio_account_cleaned(folio, inode_to_wb(mapping->host)); in filemap_unaccount_folio() [all …]
|
/Linux-v6.1/include/linux/ |
D | pagemap.h | 21 unsigned long invalidate_mapping_pages(struct address_space *mapping, 30 int invalidate_inode_pages2(struct address_space *mapping); 31 int invalidate_inode_pages2_range(struct address_space *mapping, 36 int filemap_fdatawait_keep_errors(struct address_space *mapping); 38 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, 41 static inline int filemap_fdatawait(struct address_space *mapping) in filemap_fdatawait() argument 43 return filemap_fdatawait_range(mapping, 0, LLONG_MAX); in filemap_fdatawait() 47 int filemap_write_and_wait_range(struct address_space *mapping, 49 int __filemap_fdatawrite_range(struct address_space *mapping, 51 int filemap_fdatawrite_range(struct address_space *mapping, [all …]
|
D | io-mapping.h | 17 * The io_mapping mechanism provides an abstraction for mapping 20 * See Documentation/driver-api/io-mapping.rst 35 * For small address space machines, mapping large objects 58 io_mapping_fini(struct io_mapping *mapping) in io_mapping_fini() argument 60 iomap_free(mapping->base, mapping->size); in io_mapping_fini() 65 io_mapping_map_atomic_wc(struct io_mapping *mapping, in io_mapping_map_atomic_wc() argument 70 BUG_ON(offset >= mapping->size); in io_mapping_map_atomic_wc() 71 phys_addr = mapping->base + offset; in io_mapping_map_atomic_wc() 74 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); in io_mapping_map_atomic_wc() 86 io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset) in io_mapping_map_local_wc() argument [all …]
|
D | tpm_eventlog.h | 166 void *mapping = NULL; in __calc_tpm2_event_size() local 186 mapping = TPM_MEMREMAP((unsigned long)marker_start, in __calc_tpm2_event_size() 188 if (!mapping) { in __calc_tpm2_event_size() 193 mapping = marker_start; in __calc_tpm2_event_size() 196 event = (struct tcg_pcr_event2_head *)mapping; in __calc_tpm2_event_size() 233 TPM_MEMUNMAP(mapping, mapping_size); in __calc_tpm2_event_size() 235 mapping = TPM_MEMREMAP((unsigned long)marker, in __calc_tpm2_event_size() 237 if (!mapping) { in __calc_tpm2_event_size() 242 mapping = marker; in __calc_tpm2_event_size() 245 memcpy(&halg, mapping, halg_size); in __calc_tpm2_event_size() [all …]
|
D | dax.h | 70 * Check if given mapping is supported by the file / underlying device. 157 int dax_writeback_mapping_range(struct address_space *mapping, 160 struct page *dax_layout_busy_page(struct address_space *mapping); 161 struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end); 164 dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, 166 void dax_unlock_mapping_entry(struct address_space *mapping, 169 static inline struct page *dax_layout_busy_page(struct address_space *mapping) in dax_layout_busy_page() argument 174 static inline struct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start,… in dax_layout_busy_page_range() argument 179 static inline int dax_writeback_mapping_range(struct address_space *mapping, in dax_writeback_mapping_range() argument 187 if (IS_DAX(page->mapping->host)) in dax_lock_page() [all …]
|
/Linux-v6.1/drivers/gpu/drm/panfrost/ |
D | panfrost_gem.c | 7 #include <linux/dma-mapping.h> 59 struct panfrost_gem_mapping *iter, *mapping = NULL; in panfrost_gem_mapping_get() local 65 mapping = iter; in panfrost_gem_mapping_get() 71 return mapping; in panfrost_gem_mapping_get() 75 panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping) in panfrost_gem_teardown_mapping() argument 77 if (mapping->active) in panfrost_gem_teardown_mapping() 78 panfrost_mmu_unmap(mapping); in panfrost_gem_teardown_mapping() 80 spin_lock(&mapping->mmu->mm_lock); in panfrost_gem_teardown_mapping() 81 if (drm_mm_node_allocated(&mapping->mmnode)) in panfrost_gem_teardown_mapping() 82 drm_mm_remove_node(&mapping->mmnode); in panfrost_gem_teardown_mapping() [all …]
|
/Linux-v6.1/drivers/gpu/drm/tegra/ |
D | uapi.c | 17 struct tegra_drm_mapping *mapping = in tegra_drm_mapping_release() local 20 host1x_bo_unpin(mapping->map); in tegra_drm_mapping_release() 21 host1x_bo_put(mapping->bo); in tegra_drm_mapping_release() 23 kfree(mapping); in tegra_drm_mapping_release() 26 void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping) in tegra_drm_mapping_put() argument 28 kref_put(&mapping->ref, tegra_drm_mapping_release); in tegra_drm_mapping_put() 33 struct tegra_drm_mapping *mapping; in tegra_drm_channel_context_close() local 39 xa_for_each(&context->mappings, id, mapping) in tegra_drm_channel_context_close() 40 tegra_drm_mapping_put(mapping); in tegra_drm_channel_context_close() 189 struct tegra_drm_mapping *mapping; in tegra_drm_ioctl_channel_map() local [all …]
|
/Linux-v6.1/Documentation/admin-guide/mm/ |
D | nommu-mmap.rst | 2 No-MMU memory mapping support 5 The kernel has limited support for memory mapping under no-MMU conditions, such 7 mapping is made use of in conjunction with the mmap() system call, the shmat() 9 mapping is actually performed by the binfmt drivers, which call back into the 12 Memory mapping behaviour also involves the way fork(), vfork(), clone() and 19 (#) Anonymous mapping, MAP_PRIVATE 27 (#) Anonymous mapping, MAP_SHARED 37 the underlying file are reflected in the mapping; copied across fork. 41 - If one exists, the kernel will re-use an existing mapping to the 45 - If possible, the file mapping will be directly on the backing device [all …]
|
/Linux-v6.1/arch/arm/mm/ |
D | dma-mapping.c | 3 * linux/arch/arm/mm/dma-mapping.c 7 * DMA uncached mapping support. 288 * Clear previous low-memory mapping to ensure that the in dma_contiguous_remap() 609 * Free a buffer as defined by the above mapping. 683 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 758 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 760 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, in __alloc_iova() argument 766 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __alloc_iova() 777 spin_lock_irqsave(&mapping->lock, flags); in __alloc_iova() 778 for (i = 0; i < mapping->nr_bitmaps; i++) { in __alloc_iova() [all …]
|
D | flush.c | 199 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument 202 * Writeback any data associated with the kernel mapping of this in __flush_dcache_page() 204 * coherent with the kernels mapping. in __flush_dcache_page() 232 if (mapping && cache_is_vipt_aliasing()) in __flush_dcache_page() 237 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) in __flush_dcache_aliases() argument 247 * - aliasing VIPT: we only need to find one mapping of this page. in __flush_dcache_aliases() 251 flush_dcache_mmap_lock(mapping); in __flush_dcache_aliases() 252 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { in __flush_dcache_aliases() 265 flush_dcache_mmap_unlock(mapping); in __flush_dcache_aliases() 273 struct address_space *mapping; in __sync_icache_dcache() local [all …]
|
/Linux-v6.1/drivers/gpu/drm/exynos/ |
D | exynos_drm_dma.c | 34 * drm_iommu_attach_device- attach device to iommu mapping 40 * mapping. 57 * Keep the original DMA mapping of the sub-device and in drm_iommu_attach_device() 66 ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); in drm_iommu_attach_device() 68 ret = iommu_attach_device(priv->mapping, subdrv_dev); in drm_iommu_attach_device() 75 * drm_iommu_detach_device -detach device address space mapping from device 81 * mapping 92 iommu_detach_device(priv->mapping, subdrv_dev); in drm_iommu_detach_device() 102 DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n", in exynos_drm_register_dma() 109 if (!priv->mapping) { in exynos_drm_register_dma() [all …]
|
/Linux-v6.1/drivers/media/usb/uvc/ |
D | uvc_ctrl.c | 380 static s32 uvc_ctrl_get_zoom(struct uvc_control_mapping *mapping, in uvc_ctrl_get_zoom() argument 398 static void uvc_ctrl_set_zoom(struct uvc_control_mapping *mapping, in uvc_ctrl_set_zoom() argument 405 static s32 uvc_ctrl_get_rel_speed(struct uvc_control_mapping *mapping, in uvc_ctrl_get_rel_speed() argument 408 unsigned int first = mapping->offset / 8; in uvc_ctrl_get_rel_speed() 425 static void uvc_ctrl_set_rel_speed(struct uvc_control_mapping *mapping, in uvc_ctrl_set_rel_speed() argument 428 unsigned int first = mapping->offset / 8; in uvc_ctrl_set_rel_speed() 772 * Extract the bit string specified by mapping->offset and mapping->size 774 * a signed 32bit integer. Sign extension will be performed if the mapping 777 static s32 uvc_get_le_value(struct uvc_control_mapping *mapping, in uvc_get_le_value() argument 780 int bits = mapping->size; in uvc_get_le_value() [all …]
|
/Linux-v6.1/Documentation/driver-api/ |
D | io-mapping.rst | 8 The io_mapping functions in linux/io-mapping.h provide an abstraction for 9 efficiently mapping small regions of an I/O device to the CPU. The initial 14 A mapping object is created during driver initialization using:: 20 mappable, while 'size' indicates how large a mapping region to 23 This _wc variant provides a mapping which may only be used with 27 With this mapping object, individual pages can be mapped either temporarily 31 void *io_mapping_map_local_wc(struct io_mapping *mapping, 34 void *io_mapping_map_atomic_wc(struct io_mapping *mapping, 37 'offset' is the offset within the defined mapping region. Accessing 46 Temporary mappings are only valid in the context of the caller. The mapping [all …]
|
/Linux-v6.1/tools/testing/selftests/vm/ |
D | mremap_dontunmap.c | 62 "unable to unmap destination mapping"); in kernel_support_for_mremap_dontunmap() 66 "unable to unmap source mapping"); in kernel_support_for_mremap_dontunmap() 70 // This helper will just validate that an entire mapping contains the expected 97 // the source mapping mapped. 109 // Try to just move the whole mapping anywhere (not fixed). in mremap_dontunmap_simple() 125 "unable to unmap destination mapping"); in mremap_dontunmap_simple() 127 "unable to unmap source mapping"); in mremap_dontunmap_simple() 130 // This test validates that MREMAP_DONTUNMAP on a shared mapping works as expected. 150 // Try to just move the whole mapping anywhere (not fixed). in mremap_dontunmap_simple_shmem() 157 "unable to unmap source mapping"); in mremap_dontunmap_simple_shmem() [all …]
|
/Linux-v6.1/tools/testing/selftests/arm64/mte/ |
D | check_mmap_options.c | 60 static int check_anonymous_memory_mapping(int mem_type, int mode, int mapping, int tag_check) in check_anonymous_memory_mapping() argument 70 map_ptr = (char *)mte_allocate_memory(map_size, mem_type, mapping, false); in check_anonymous_memory_mapping() 92 static int check_file_memory_mapping(int mem_type, int mode, int mapping, int tag_check) in check_file_memory_mapping() argument 106 map_ptr = (char *)mte_allocate_file_memory(map_size, mem_type, mapping, false, fd); in check_file_memory_mapping() 131 static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping) in check_clear_prot_mte_flag() argument 141 ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping, in check_clear_prot_mte_flag() 162 ptr = (char *)mte_allocate_file_memory_tag_range(sizes[run], mem_type, mapping, in check_clear_prot_mte_flag() 214 "Check anonymous memory with private mapping, sync error mode, mmap memory and tag check off\n"); in main() 216 …"Check file memory with private mapping, sync error mode, mmap/mprotect memory and tag check off\n… in main() 220 "Check anonymous memory with private mapping, no error mode, mmap memory and tag check off\n"); in main() [all …]
|
D | check_child_memory.c | 84 static int check_child_memory_mapping(int mem_type, int mode, int mapping) in check_child_memory_mapping() argument 93 ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping, in check_child_memory_mapping() 106 static int check_child_file_mapping(int mem_type, int mode, int mapping) in check_child_file_mapping() argument 119 map_ptr = (char *)mte_allocate_file_memory(map_size, mem_type, mapping, false, fd); in check_child_file_mapping() 170 "Check child anonymous memory with private mapping, precise mode and mmap memory\n"); in main() 172 "Check child anonymous memory with shared mapping, precise mode and mmap memory\n"); in main() 174 "Check child anonymous memory with private mapping, imprecise mode and mmap memory\n"); in main() 176 "Check child anonymous memory with shared mapping, imprecise mode and mmap memory\n"); in main() 178 "Check child anonymous memory with private mapping, precise mode and mmap/mprotect memory\n"); in main() 180 "Check child anonymous memory with shared mapping, precise mode and mmap/mprotect memory\n"); in main() [all …]
|
/Linux-v6.1/drivers/gpu/drm/etnaviv/ |
D | etnaviv_mmu.c | 6 #include <linux/dma-mapping.h> 65 /* unroll mapping in case something went wrong */ in etnaviv_context_map() 127 struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_remove_mapping() argument 129 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_iommu_remove_mapping() 133 etnaviv_iommu_unmap(context, mapping->vram_node.start, in etnaviv_iommu_remove_mapping() 135 drm_mm_remove_node(&mapping->vram_node); in etnaviv_iommu_remove_mapping() 170 * so we must keep its mapping. in etnaviv_iommu_find_iova() 202 * this mapping. in etnaviv_iommu_find_iova() 239 * When we can't insert the node, due to a existing mapping blocking in etnaviv_iommu_insert_exact() 246 * here to make space for the new mapping. in etnaviv_iommu_insert_exact() [all …]
|
D | etnaviv_gem.c | 7 #include <linux/dma-mapping.h> 218 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_get_vram_mapping() local 220 list_for_each_entry(mapping, &obj->vram_list, obj_node) { in etnaviv_gem_get_vram_mapping() 221 if (mapping->context == context) in etnaviv_gem_get_vram_mapping() 222 return mapping; in etnaviv_gem_get_vram_mapping() 228 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping) in etnaviv_gem_mapping_unreference() argument 230 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_gem_mapping_unreference() 233 WARN_ON(mapping->use == 0); in etnaviv_gem_mapping_unreference() 234 mapping->use -= 1; in etnaviv_gem_mapping_unreference() 245 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_mapping_get() local [all …]
|
/Linux-v6.1/fs/gfs2/ |
D | aops.c | 95 struct inode * const inode = page->mapping->host; in gfs2_write_jdata_page() 128 struct inode *inode = page->mapping->host; in __gfs2_jdata_writepage() 154 struct inode *inode = page->mapping->host; in gfs2_jdata_writepage() 173 * @mapping: The mapping to write 178 static int gfs2_writepages(struct address_space *mapping, in gfs2_writepages() argument 181 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); in gfs2_writepages() 191 ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops); in gfs2_writepages() 199 * @mapping: The mapping 208 static int gfs2_write_jdata_pagevec(struct address_space *mapping, in gfs2_write_jdata_pagevec() argument 214 struct inode *inode = mapping->host; in gfs2_write_jdata_pagevec() [all …]
|
/Linux-v6.1/drivers/sh/clk/ |
D | core.c | 340 struct clk_mapping *mapping = clk->mapping; in clk_establish_mapping() local 345 if (!mapping) { in clk_establish_mapping() 349 * dummy mapping for root clocks with no specified ranges in clk_establish_mapping() 352 clk->mapping = &dummy_mapping; in clk_establish_mapping() 357 * If we're on a child clock and it provides no mapping of its in clk_establish_mapping() 358 * own, inherit the mapping from its root clock. in clk_establish_mapping() 361 mapping = clkp->mapping; in clk_establish_mapping() 362 BUG_ON(!mapping); in clk_establish_mapping() 366 * Establish initial mapping. in clk_establish_mapping() 368 if (!mapping->base && mapping->phys) { in clk_establish_mapping() [all …]
|
/Linux-v6.1/fs/ |
D | dax.c | 191 * @entry may no longer be the entry at the index in the mapping. 337 static inline bool dax_mapping_is_cow(struct address_space *mapping) in dax_mapping_is_cow() argument 339 return (unsigned long)mapping == PAGE_MAPPING_DAX_COW; in dax_mapping_is_cow() 343 * Set the page->mapping with FS_DAX_MAPPING_COW flag, increase the refcount. 347 if ((uintptr_t)page->mapping != PAGE_MAPPING_DAX_COW) { in dax_mapping_set_cow() 352 if (page->mapping) in dax_mapping_set_cow() 354 page->mapping = (void *)PAGE_MAPPING_DAX_COW; in dax_mapping_set_cow() 361 * whether this entry is shared by multiple files. If so, set the page->mapping 364 static void dax_associate_entry(void *entry, struct address_space *mapping, in dax_associate_entry() argument 380 WARN_ON_ONCE(page->mapping); in dax_associate_entry() [all …]
|
/Linux-v6.1/arch/nios2/mm/ |
D | cacheflush.c | 74 static void flush_aliases(struct address_space *mapping, struct page *page) in flush_aliases() argument 82 flush_dcache_mmap_lock(mapping); in flush_aliases() 83 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { in flush_aliases() 95 flush_dcache_mmap_unlock(mapping); in flush_aliases() 161 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument 164 * Writeback any data associated with the kernel mapping of this in __flush_dcache_page() 166 * coherent with the kernels mapping. in __flush_dcache_page() 175 struct address_space *mapping; in flush_dcache_page() local 184 mapping = page_mapping_file(page); in flush_dcache_page() 187 if (mapping && !mapping_mapped(mapping)) { in flush_dcache_page() [all …]
|
/Linux-v6.1/drivers/net/ethernet/broadcom/bnxt/ |
D | bnxt_xdp.c | 27 dma_addr_t mapping, u32 len, in bnxt_xmit_bd() argument 56 txbd->tx_bd_haddr = cpu_to_le64(mapping); in bnxt_xmit_bd() 82 dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping); in bnxt_xmit_bd() 103 dma_addr_t mapping, u32 len, u16 rx_prod, in __bnxt_xmit_xdp() argument 108 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp); in __bnxt_xmit_xdp() 116 dma_addr_t mapping, u32 len, in __bnxt_xmit_xdp_redirect() argument 121 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL); in __bnxt_xmit_xdp_redirect() 124 dma_unmap_addr_set(tx_buf, mapping, mapping); in __bnxt_xmit_xdp_redirect() 145 dma_unmap_addr(tx_buf, mapping), in bnxt_tx_int_xdp() 186 dma_addr_t mapping; in bnxt_xdp_buff_init() local [all …]
|
/Linux-v6.1/Documentation/mm/ |
D | highmem.rst | 20 The part of (physical) memory not covered by a permanent mapping is what we 62 These mappings are thread-local and CPU-local, meaning that the mapping 64 CPU while the mapping is active. Although preemption is never disabled by 66 CPU-hotplug until the mapping is disposed. 69 in which the local mapping is acquired does not allow it for other reasons. 80 virtual address of the direct mapping. Only real highmem pages are 93 therefore try to design their code to avoid the use of kmap() by mapping 103 * kmap_atomic(). This permits a very short duration mapping of a single 104 page. Since the mapping is restricted to the CPU that issued it, it 118 * kmap(). This should be used to make short duration mapping of a single [all …]
|