Home
last modified time | relevance | path

Searched refs:mapping (Results 1 – 25 of 1217) sorted by relevance

12345678910>>...49

/Linux-v6.1/mm/
Dtruncate.c32 static inline void __clear_shadow_entry(struct address_space *mapping, in __clear_shadow_entry() argument
35 XA_STATE(xas, &mapping->i_pages, index); in __clear_shadow_entry()
43 static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, in clear_shadow_entry() argument
46 spin_lock(&mapping->host->i_lock); in clear_shadow_entry()
47 xa_lock_irq(&mapping->i_pages); in clear_shadow_entry()
48 __clear_shadow_entry(mapping, index, entry); in clear_shadow_entry()
49 xa_unlock_irq(&mapping->i_pages); in clear_shadow_entry()
50 if (mapping_shrinkable(mapping)) in clear_shadow_entry()
51 inode_add_lru(mapping->host); in clear_shadow_entry()
52 spin_unlock(&mapping->host->i_lock); in clear_shadow_entry()
[all …]
Dfilemap.c124 static void page_cache_delete(struct address_space *mapping, in page_cache_delete() argument
127 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete()
130 mapping_set_update(&xas, mapping); in page_cache_delete()
143 folio->mapping = NULL; in page_cache_delete()
145 mapping->nrpages -= nr; in page_cache_delete()
148 static void filemap_unaccount_folio(struct address_space *mapping, in filemap_unaccount_folio() argument
161 if (mapping_exiting(mapping) && !folio_test_large(folio)) { in filemap_unaccount_folio()
190 filemap_nr_thps_dec(mapping); in filemap_unaccount_folio()
208 mapping_can_writeback(mapping))) in filemap_unaccount_folio()
209 folio_account_cleaned(folio, inode_to_wb(mapping->host)); in filemap_unaccount_folio()
[all …]
Dreadahead.c140 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) in file_ra_state_init() argument
142 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init()
149 const struct address_space_operations *aops = rac->mapping->a_ops; in read_pages()
209 struct address_space *mapping = ractl->mapping; in page_cache_ra_unbounded() local
211 gfp_t gfp_mask = readahead_gfp_mask(mapping); in page_cache_ra_unbounded()
226 filemap_invalidate_lock_shared(mapping); in page_cache_ra_unbounded()
231 struct folio *folio = xa_load(&mapping->i_pages, index + i); in page_cache_ra_unbounded()
251 if (filemap_add_folio(mapping, folio, index + i, in page_cache_ra_unbounded()
271 filemap_invalidate_unlock_shared(mapping); in page_cache_ra_unbounded()
285 struct inode *inode = ractl->mapping->host; in do_page_cache_ra()
[all …]
Dpage-writeback.c1880 int balance_dirty_pages_ratelimited_flags(struct address_space *mapping, in balance_dirty_pages_ratelimited_flags() argument
1883 struct inode *inode = mapping->host; in balance_dirty_pages_ratelimited_flags()
1950 void balance_dirty_pages_ratelimited(struct address_space *mapping) in balance_dirty_pages_ratelimited() argument
1952 balance_dirty_pages_ratelimited_flags(mapping, 0); in balance_dirty_pages_ratelimited()
2229 void tag_pages_for_writeback(struct address_space *mapping, in tag_pages_for_writeback() argument
2232 XA_STATE(xas, &mapping->i_pages, start); in tag_pages_for_writeback()
2282 int write_cache_pages(struct address_space *mapping, in write_cache_pages() argument
2299 index = mapping->writeback_index; /* prev offset */ in write_cache_pages()
2308 tag_pages_for_writeback(mapping, index, end); in write_cache_pages()
2317 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, in write_cache_pages()
[all …]
/Linux-v6.1/include/linux/
Dpagemap.h21 unsigned long invalidate_mapping_pages(struct address_space *mapping,
30 int invalidate_inode_pages2(struct address_space *mapping);
31 int invalidate_inode_pages2_range(struct address_space *mapping,
36 int filemap_fdatawait_keep_errors(struct address_space *mapping);
38 int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
41 static inline int filemap_fdatawait(struct address_space *mapping) in filemap_fdatawait() argument
43 return filemap_fdatawait_range(mapping, 0, LLONG_MAX); in filemap_fdatawait()
47 int filemap_write_and_wait_range(struct address_space *mapping,
49 int __filemap_fdatawrite_range(struct address_space *mapping,
51 int filemap_fdatawrite_range(struct address_space *mapping,
[all …]
Dio-mapping.h58 io_mapping_fini(struct io_mapping *mapping) in io_mapping_fini() argument
60 iomap_free(mapping->base, mapping->size); in io_mapping_fini()
65 io_mapping_map_atomic_wc(struct io_mapping *mapping, in io_mapping_map_atomic_wc() argument
70 BUG_ON(offset >= mapping->size); in io_mapping_map_atomic_wc()
71 phys_addr = mapping->base + offset; in io_mapping_map_atomic_wc()
74 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); in io_mapping_map_atomic_wc()
86 io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset) in io_mapping_map_local_wc() argument
90 BUG_ON(offset >= mapping->size); in io_mapping_map_local_wc()
91 phys_addr = mapping->base + offset; in io_mapping_map_local_wc()
92 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); in io_mapping_map_local_wc()
[all …]
Dtpm_eventlog.h166 void *mapping = NULL; in __calc_tpm2_event_size() local
186 mapping = TPM_MEMREMAP((unsigned long)marker_start, in __calc_tpm2_event_size()
188 if (!mapping) { in __calc_tpm2_event_size()
193 mapping = marker_start; in __calc_tpm2_event_size()
196 event = (struct tcg_pcr_event2_head *)mapping; in __calc_tpm2_event_size()
233 TPM_MEMUNMAP(mapping, mapping_size); in __calc_tpm2_event_size()
235 mapping = TPM_MEMREMAP((unsigned long)marker, in __calc_tpm2_event_size()
237 if (!mapping) { in __calc_tpm2_event_size()
242 mapping = marker; in __calc_tpm2_event_size()
245 memcpy(&halg, mapping, halg_size); in __calc_tpm2_event_size()
[all …]
Dsecretmem.h11 struct address_space *mapping; in page_is_secretmem() local
23 mapping = (struct address_space *) in page_is_secretmem()
24 ((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); in page_is_secretmem()
26 if (!mapping || mapping != page->mapping) in page_is_secretmem()
29 return mapping->a_ops == &secretmem_aops; in page_is_secretmem()
Ddax.h157 int dax_writeback_mapping_range(struct address_space *mapping,
160 struct page *dax_layout_busy_page(struct address_space *mapping);
161 struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end);
164 dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
166 void dax_unlock_mapping_entry(struct address_space *mapping,
169 static inline struct page *dax_layout_busy_page(struct address_space *mapping) in dax_layout_busy_page() argument
174 static inline struct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start,… in dax_layout_busy_page_range() argument
179 static inline int dax_writeback_mapping_range(struct address_space *mapping, in dax_writeback_mapping_range() argument
187 if (IS_DAX(page->mapping->host)) in dax_lock_page()
196 static inline dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, in dax_lock_mapping_entry() argument
[all …]
/Linux-v6.1/drivers/gpu/drm/panfrost/
Dpanfrost_gem.c59 struct panfrost_gem_mapping *iter, *mapping = NULL; in panfrost_gem_mapping_get() local
65 mapping = iter; in panfrost_gem_mapping_get()
71 return mapping; in panfrost_gem_mapping_get()
75 panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping) in panfrost_gem_teardown_mapping() argument
77 if (mapping->active) in panfrost_gem_teardown_mapping()
78 panfrost_mmu_unmap(mapping); in panfrost_gem_teardown_mapping()
80 spin_lock(&mapping->mmu->mm_lock); in panfrost_gem_teardown_mapping()
81 if (drm_mm_node_allocated(&mapping->mmnode)) in panfrost_gem_teardown_mapping()
82 drm_mm_remove_node(&mapping->mmnode); in panfrost_gem_teardown_mapping()
83 spin_unlock(&mapping->mmu->mm_lock); in panfrost_gem_teardown_mapping()
[all …]
/Linux-v6.1/drivers/gpu/drm/tegra/
Duapi.c17 struct tegra_drm_mapping *mapping = in tegra_drm_mapping_release() local
20 host1x_bo_unpin(mapping->map); in tegra_drm_mapping_release()
21 host1x_bo_put(mapping->bo); in tegra_drm_mapping_release()
23 kfree(mapping); in tegra_drm_mapping_release()
26 void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping) in tegra_drm_mapping_put() argument
28 kref_put(&mapping->ref, tegra_drm_mapping_release); in tegra_drm_mapping_put()
33 struct tegra_drm_mapping *mapping; in tegra_drm_channel_context_close() local
39 xa_for_each(&context->mappings, id, mapping) in tegra_drm_channel_context_close()
40 tegra_drm_mapping_put(mapping); in tegra_drm_channel_context_close()
189 struct tegra_drm_mapping *mapping; in tegra_drm_ioctl_channel_map() local
[all …]
/Linux-v6.1/drivers/media/usb/uvc/
Duvc_ctrl.c380 static s32 uvc_ctrl_get_zoom(struct uvc_control_mapping *mapping, in uvc_ctrl_get_zoom() argument
398 static void uvc_ctrl_set_zoom(struct uvc_control_mapping *mapping, in uvc_ctrl_set_zoom() argument
405 static s32 uvc_ctrl_get_rel_speed(struct uvc_control_mapping *mapping, in uvc_ctrl_get_rel_speed() argument
408 unsigned int first = mapping->offset / 8; in uvc_ctrl_get_rel_speed()
425 static void uvc_ctrl_set_rel_speed(struct uvc_control_mapping *mapping, in uvc_ctrl_set_rel_speed() argument
428 unsigned int first = mapping->offset / 8; in uvc_ctrl_set_rel_speed()
777 static s32 uvc_get_le_value(struct uvc_control_mapping *mapping, in uvc_get_le_value() argument
780 int bits = mapping->size; in uvc_get_le_value()
781 int offset = mapping->offset; in uvc_get_le_value()
802 if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED) in uvc_get_le_value()
[all …]
/Linux-v6.1/arch/arm/mm/
Ddma-mapping.c758 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
760 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, in __alloc_iova() argument
766 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __alloc_iova()
777 spin_lock_irqsave(&mapping->lock, flags); in __alloc_iova()
778 for (i = 0; i < mapping->nr_bitmaps; i++) { in __alloc_iova()
779 start = bitmap_find_next_zero_area(mapping->bitmaps[i], in __alloc_iova()
780 mapping->bits, 0, count, align); in __alloc_iova()
782 if (start > mapping->bits) in __alloc_iova()
785 bitmap_set(mapping->bitmaps[i], start, count); in __alloc_iova()
794 if (i == mapping->nr_bitmaps) { in __alloc_iova()
[all …]
Dflush.c199 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument
232 if (mapping && cache_is_vipt_aliasing()) in __flush_dcache_page()
237 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) in __flush_dcache_aliases() argument
251 flush_dcache_mmap_lock(mapping); in __flush_dcache_aliases()
252 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { in __flush_dcache_aliases()
265 flush_dcache_mmap_unlock(mapping); in __flush_dcache_aliases()
273 struct address_space *mapping; in __sync_icache_dcache() local
284 mapping = page_mapping_file(page); in __sync_icache_dcache()
286 mapping = NULL; in __sync_icache_dcache()
289 __flush_dcache_page(mapping, page); in __sync_icache_dcache()
[all …]
/Linux-v6.1/drivers/gpu/drm/exynos/
Dexynos_drm_dma.c66 ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); in drm_iommu_attach_device()
68 ret = iommu_attach_device(priv->mapping, subdrv_dev); in drm_iommu_attach_device()
92 iommu_detach_device(priv->mapping, subdrv_dev); in drm_iommu_detach_device()
109 if (!priv->mapping) { in exynos_drm_register_dma()
110 void *mapping; in exynos_drm_register_dma() local
113 mapping = arm_iommu_create_mapping(&platform_bus_type, in exynos_drm_register_dma()
116 mapping = iommu_get_domain_for_dev(priv->dma_dev); in exynos_drm_register_dma()
118 mapping = ERR_PTR(-ENODEV); in exynos_drm_register_dma()
120 if (IS_ERR(mapping)) in exynos_drm_register_dma()
121 return PTR_ERR(mapping); in exynos_drm_register_dma()
[all …]
/Linux-v6.1/drivers/gpu/drm/etnaviv/
Detnaviv_gem.c218 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_get_vram_mapping() local
220 list_for_each_entry(mapping, &obj->vram_list, obj_node) { in etnaviv_gem_get_vram_mapping()
221 if (mapping->context == context) in etnaviv_gem_get_vram_mapping()
222 return mapping; in etnaviv_gem_get_vram_mapping()
228 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping) in etnaviv_gem_mapping_unreference() argument
230 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_gem_mapping_unreference()
233 WARN_ON(mapping->use == 0); in etnaviv_gem_mapping_unreference()
234 mapping->use -= 1; in etnaviv_gem_mapping_unreference()
245 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_mapping_get() local
250 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context); in etnaviv_gem_mapping_get()
[all …]
Detnaviv_mmu.c127 struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_remove_mapping() argument
129 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_iommu_remove_mapping()
133 etnaviv_iommu_unmap(context, mapping->vram_node.start, in etnaviv_iommu_remove_mapping()
135 drm_mm_remove_node(&mapping->vram_node); in etnaviv_iommu_remove_mapping()
273 struct etnaviv_vram_mapping *mapping, u64 va) in etnaviv_iommu_map_gem() argument
290 mapping->iova = iova; in etnaviv_iommu_map_gem()
291 mapping->context = etnaviv_iommu_context_get(context); in etnaviv_iommu_map_gem()
292 list_add_tail(&mapping->mmu_node, &context->mappings); in etnaviv_iommu_map_gem()
298 node = &mapping->vram_node; in etnaviv_iommu_map_gem()
309 mapping->iova = node->start; in etnaviv_iommu_map_gem()
[all …]
/Linux-v6.1/fs/gfs2/
Daops.c95 struct inode * const inode = page->mapping->host; in gfs2_write_jdata_page()
128 struct inode *inode = page->mapping->host; in __gfs2_jdata_writepage()
154 struct inode *inode = page->mapping->host; in gfs2_jdata_writepage()
178 static int gfs2_writepages(struct address_space *mapping, in gfs2_writepages() argument
181 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); in gfs2_writepages()
191 ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops); in gfs2_writepages()
208 static int gfs2_write_jdata_pagevec(struct address_space *mapping, in gfs2_write_jdata_pagevec() argument
214 struct inode *inode = mapping->host; in gfs2_write_jdata_pagevec()
231 if (unlikely(page->mapping != mapping)) { in gfs2_write_jdata_pagevec()
303 static int gfs2_write_cache_jdata(struct address_space *mapping, in gfs2_write_cache_jdata() argument
[all …]
/Linux-v6.1/fs/
Ddax.c337 static inline bool dax_mapping_is_cow(struct address_space *mapping) in dax_mapping_is_cow() argument
339 return (unsigned long)mapping == PAGE_MAPPING_DAX_COW; in dax_mapping_is_cow()
347 if ((uintptr_t)page->mapping != PAGE_MAPPING_DAX_COW) { in dax_mapping_set_cow()
352 if (page->mapping) in dax_mapping_set_cow()
354 page->mapping = (void *)PAGE_MAPPING_DAX_COW; in dax_mapping_set_cow()
364 static void dax_associate_entry(void *entry, struct address_space *mapping, in dax_associate_entry() argument
380 WARN_ON_ONCE(page->mapping); in dax_associate_entry()
381 page->mapping = mapping; in dax_associate_entry()
387 static void dax_disassociate_entry(void *entry, struct address_space *mapping, in dax_disassociate_entry() argument
399 if (dax_mapping_is_cow(page->mapping)) { in dax_disassociate_entry()
[all …]
/Linux-v6.1/arch/nios2/mm/
Dcacheflush.c74 static void flush_aliases(struct address_space *mapping, struct page *page) in flush_aliases() argument
82 flush_dcache_mmap_lock(mapping); in flush_aliases()
83 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { in flush_aliases()
95 flush_dcache_mmap_unlock(mapping); in flush_aliases()
161 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument
175 struct address_space *mapping; in flush_dcache_page() local
184 mapping = page_mapping_file(page); in flush_dcache_page()
187 if (mapping && !mapping_mapped(mapping)) { in flush_dcache_page()
190 __flush_dcache_page(mapping, page); in flush_dcache_page()
191 if (mapping) { in flush_dcache_page()
[all …]
/Linux-v6.1/Documentation/translations/zh_CN/mm/
Dpage_migration.rst143 2. ``int (*migratepage) (struct address_space *mapping,``
168 void __SetPageMovable(struct page *page, struct address_space *mapping)
171 PG_movable不是struct page的一个真正的标志。相反,VM复用了page->mapping的低
175 page->mapping = page->mapping | PAGE_MAPPING_MOVABLE;
177 所以驱动不应该直接访问page->mapping。相反,驱动应该使用page_mapping(),它可
178 以在页面锁下屏蔽掉page->mapping的低2位,从而获得正确的struct address_space。
181 非LRU可移动页面,因为page->mapping字段与struct page中的其他变量是统一的。如
182 果驱动程序在被虚拟机隔离后释放了页面,尽管page->mapping设置了PAGE_MAPPING_MOVABLE,
185 page->mapping中不可能有PAGE_MAPPING_MOVABLE设置。在用pfn扫描中的lock_page()
189 同,PageMovable()在lock_page()下验证page->mapping
[all …]
/Linux-v6.1/drivers/net/wireless/marvell/mwifiex/
Dutil.h57 struct mwifiex_dma_mapping *mapping) in mwifiex_store_mapping() argument
61 memcpy(&cb->dma_mapping, mapping, sizeof(*mapping)); in mwifiex_store_mapping()
65 struct mwifiex_dma_mapping *mapping) in mwifiex_get_mapping() argument
69 memcpy(mapping, &cb->dma_mapping, sizeof(*mapping)); in mwifiex_get_mapping()
74 struct mwifiex_dma_mapping mapping; in MWIFIEX_SKB_DMA_ADDR() local
76 mwifiex_get_mapping(skb, &mapping); in MWIFIEX_SKB_DMA_ADDR()
78 return mapping.addr; in MWIFIEX_SKB_DMA_ADDR()
/Linux-v6.1/drivers/sh/clk/
Dcore.c340 struct clk_mapping *mapping = clk->mapping; in clk_establish_mapping() local
345 if (!mapping) { in clk_establish_mapping()
352 clk->mapping = &dummy_mapping; in clk_establish_mapping()
361 mapping = clkp->mapping; in clk_establish_mapping()
362 BUG_ON(!mapping); in clk_establish_mapping()
368 if (!mapping->base && mapping->phys) { in clk_establish_mapping()
369 kref_init(&mapping->ref); in clk_establish_mapping()
371 mapping->base = ioremap(mapping->phys, mapping->len); in clk_establish_mapping()
372 if (unlikely(!mapping->base)) in clk_establish_mapping()
374 } else if (mapping->base) { in clk_establish_mapping()
[all …]
/Linux-v6.1/Documentation/driver-api/
Dio-mapping.rst8 The io_mapping functions in linux/io-mapping.h provide an abstraction for
9 efficiently mapping small regions of an I/O device to the CPU. The initial
14 A mapping object is created during driver initialization using::
20 mappable, while 'size' indicates how large a mapping region to
23 This _wc variant provides a mapping which may only be used with
27 With this mapping object, individual pages can be mapped either temporarily
31 void *io_mapping_map_local_wc(struct io_mapping *mapping,
34 void *io_mapping_map_atomic_wc(struct io_mapping *mapping,
37 'offset' is the offset within the defined mapping region. Accessing
46 Temporary mappings are only valid in the context of the caller. The mapping
[all …]
/Linux-v6.1/drivers/gpu/drm/amd/amdgpu/
Damdgpu_vm.c973 struct amdgpu_bo_va_mapping *mapping; in amdgpu_vm_bo_update() local
1033 list_for_each_entry(mapping, &bo_va->invalids, list) { in amdgpu_vm_bo_update()
1039 if (!(mapping->flags & AMDGPU_PTE_READABLE)) in amdgpu_vm_bo_update()
1041 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) in amdgpu_vm_bo_update()
1045 amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags); in amdgpu_vm_bo_update()
1047 trace_amdgpu_vm_bo_update(mapping); in amdgpu_vm_bo_update()
1050 resv, mapping->start, mapping->last, in amdgpu_vm_bo_update()
1051 update_flags, mapping->offset, in amdgpu_vm_bo_update()
1079 list_for_each_entry(mapping, &bo_va->valids, list) in amdgpu_vm_bo_update()
1080 trace_amdgpu_vm_bo_mapping(mapping); in amdgpu_vm_bo_update()
[all …]

12345678910>>...49