Home
last modified time | relevance | path

Searched refs:mapping (Results 1 – 25 of 1086) sorted by relevance

12345678910>>...44

/Linux-v4.19/drivers/staging/gasket/
Dgasket_sysfs.c77 static void put_mapping(struct gasket_sysfs_mapping *mapping) in put_mapping() argument
84 if (!mapping) { in put_mapping()
89 mutex_lock(&mapping->mutex); in put_mapping()
90 if (kref_put(&mapping->refcount, release_entry)) { in put_mapping()
91 dev_dbg(mapping->device, "Removing Gasket sysfs mapping\n"); in put_mapping()
99 device = mapping->device; in put_mapping()
100 num_files_to_remove = mapping->attribute_count; in put_mapping()
107 mapping->attributes[i].attr; in put_mapping()
111 kfree(mapping->attributes); in put_mapping()
112 mapping->attributes = NULL; in put_mapping()
[all …]
/Linux-v4.19/mm/
Dtruncate.c33 static inline void __clear_shadow_entry(struct address_space *mapping, in __clear_shadow_entry() argument
39 if (!__radix_tree_lookup(&mapping->i_pages, index, &node, &slot)) in __clear_shadow_entry()
43 __radix_tree_replace(&mapping->i_pages, node, slot, NULL, in __clear_shadow_entry()
45 mapping->nrexceptional--; in __clear_shadow_entry()
48 static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, in clear_shadow_entry() argument
51 xa_lock_irq(&mapping->i_pages); in clear_shadow_entry()
52 __clear_shadow_entry(mapping, index, entry); in clear_shadow_entry()
53 xa_unlock_irq(&mapping->i_pages); in clear_shadow_entry()
61 static void truncate_exceptional_pvec_entries(struct address_space *mapping, in truncate_exceptional_pvec_entries() argument
69 if (shmem_mapping(mapping)) in truncate_exceptional_pvec_entries()
[all …]
Dfilemap.c114 static int page_cache_tree_insert(struct address_space *mapping, in page_cache_tree_insert() argument
121 error = __radix_tree_create(&mapping->i_pages, page->index, 0, in page_cache_tree_insert()
129 &mapping->i_pages.xa_lock); in page_cache_tree_insert()
133 mapping->nrexceptional--; in page_cache_tree_insert()
137 __radix_tree_replace(&mapping->i_pages, node, slot, page, in page_cache_tree_insert()
138 workingset_lookup_update(mapping)); in page_cache_tree_insert()
139 mapping->nrpages++; in page_cache_tree_insert()
143 static void page_cache_tree_delete(struct address_space *mapping, in page_cache_tree_delete() argument
159 __radix_tree_lookup(&mapping->i_pages, page->index + i, in page_cache_tree_delete()
164 radix_tree_clear_tags(&mapping->i_pages, node, slot); in page_cache_tree_delete()
[all …]
Dreadahead.c32 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) in file_ra_state_init() argument
34 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init()
46 static void read_cache_pages_invalidate_page(struct address_space *mapping, in read_cache_pages_invalidate_page() argument
52 page->mapping = mapping; in read_cache_pages_invalidate_page()
54 page->mapping = NULL; in read_cache_pages_invalidate_page()
63 static void read_cache_pages_invalidate_pages(struct address_space *mapping, in read_cache_pages_invalidate_pages() argument
71 read_cache_pages_invalidate_page(mapping, victim); in read_cache_pages_invalidate_pages()
85 int read_cache_pages(struct address_space *mapping, struct list_head *pages, in read_cache_pages() argument
94 if (add_to_page_cache_lru(page, mapping, page->index, in read_cache_pages()
95 readahead_gfp_mask(mapping))) { in read_cache_pages()
[all …]
Dpage-writeback.c1861 void balance_dirty_pages_ratelimited(struct address_space *mapping) in balance_dirty_pages_ratelimited() argument
1863 struct inode *inode = mapping->host; in balance_dirty_pages_ratelimited()
2104 void tag_pages_for_writeback(struct address_space *mapping, in tag_pages_for_writeback() argument
2112 xa_lock_irq(&mapping->i_pages); in tag_pages_for_writeback()
2113 radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, start, in tag_pages_for_writeback()
2117 radix_tree_iter_tag_set(&mapping->i_pages, &iter, in tag_pages_for_writeback()
2123 xa_unlock_irq(&mapping->i_pages); in tag_pages_for_writeback()
2125 xa_lock_irq(&mapping->i_pages); in tag_pages_for_writeback()
2127 xa_unlock_irq(&mapping->i_pages); in tag_pages_for_writeback()
2153 int write_cache_pages(struct address_space *mapping, in write_cache_pages() argument
[all …]
/Linux-v4.19/include/linux/
Dpagemap.h48 static inline void mapping_set_error(struct address_space *mapping, int error) in mapping_set_error() argument
54 filemap_set_wb_err(mapping, error); in mapping_set_error()
58 set_bit(AS_ENOSPC, &mapping->flags); in mapping_set_error()
60 set_bit(AS_EIO, &mapping->flags); in mapping_set_error()
63 static inline void mapping_set_unevictable(struct address_space *mapping) in mapping_set_unevictable() argument
65 set_bit(AS_UNEVICTABLE, &mapping->flags); in mapping_set_unevictable()
68 static inline void mapping_clear_unevictable(struct address_space *mapping) in mapping_clear_unevictable() argument
70 clear_bit(AS_UNEVICTABLE, &mapping->flags); in mapping_clear_unevictable()
73 static inline int mapping_unevictable(struct address_space *mapping) in mapping_unevictable() argument
75 if (mapping) in mapping_unevictable()
[all …]
Dio-mapping.h68 io_mapping_fini(struct io_mapping *mapping) in io_mapping_fini() argument
70 iomap_free(mapping->base, mapping->size); in io_mapping_fini()
75 io_mapping_map_atomic_wc(struct io_mapping *mapping, in io_mapping_map_atomic_wc() argument
81 BUG_ON(offset >= mapping->size); in io_mapping_map_atomic_wc()
82 phys_addr = mapping->base + offset; in io_mapping_map_atomic_wc()
84 return iomap_atomic_prot_pfn(pfn, mapping->prot); in io_mapping_map_atomic_wc()
94 io_mapping_map_wc(struct io_mapping *mapping, in io_mapping_map_wc() argument
100 BUG_ON(offset >= mapping->size); in io_mapping_map_wc()
101 phys_addr = mapping->base + offset; in io_mapping_map_wc()
138 io_mapping_fini(struct io_mapping *mapping) in io_mapping_fini() argument
[all …]
Dcleancache.h52 static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping) in cleancache_fs_enabled_mapping() argument
54 return mapping->host->i_sb->cleancache_poolid >= 0; in cleancache_fs_enabled_mapping()
58 return cleancache_fs_enabled_mapping(page->mapping); in cleancache_fs_enabled()
104 static inline void cleancache_invalidate_page(struct address_space *mapping, in cleancache_invalidate_page() argument
108 if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) in cleancache_invalidate_page()
109 __cleancache_invalidate_page(mapping, page); in cleancache_invalidate_page()
112 static inline void cleancache_invalidate_inode(struct address_space *mapping) in cleancache_invalidate_inode() argument
114 if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) in cleancache_invalidate_inode()
115 __cleancache_invalidate_inode(mapping); in cleancache_invalidate_inode()
Dpagevec.h27 struct address_space *mapping,
32 struct address_space *mapping,
35 struct address_space *mapping, in pagevec_lookup() argument
38 return pagevec_lookup_range(pvec, mapping, start, (pgoff_t)-1); in pagevec_lookup()
42 struct address_space *mapping, pgoff_t *index, pgoff_t end,
45 struct address_space *mapping, pgoff_t *index, pgoff_t end,
48 struct address_space *mapping, pgoff_t *index, int tag) in pagevec_lookup_tag() argument
50 return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag); in pagevec_lookup_tag()
/Linux-v4.19/fs/
Ddax.c118 struct address_space *mapping; member
127 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, in dax_entry_waitqueue() argument
140 key->mapping = mapping; in dax_entry_waitqueue()
143 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS); in dax_entry_waitqueue()
154 if (key->mapping != ewait->key.mapping || in wake_exceptional_entry_func()
165 static void dax_wake_mapping_entry_waiter(struct address_space *mapping, in dax_wake_mapping_entry_waiter() argument
171 wq = dax_entry_waitqueue(mapping, index, entry, &key); in dax_wake_mapping_entry_waiter()
187 static inline int slot_locked(struct address_space *mapping, void **slot) in slot_locked() argument
190 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); in slot_locked()
197 static inline void *lock_slot(struct address_space *mapping, void **slot) in lock_slot() argument
[all …]
Dbuffer.c514 int sync_mapping_buffers(struct address_space *mapping) in sync_mapping_buffers() argument
516 struct address_space *buffer_mapping = mapping->private_data; in sync_mapping_buffers()
518 if (buffer_mapping == NULL || list_empty(&mapping->private_list)) in sync_mapping_buffers()
522 &mapping->private_list); in sync_mapping_buffers()
545 struct address_space *mapping = inode->i_mapping; in mark_buffer_dirty_inode() local
546 struct address_space *buffer_mapping = bh->b_page->mapping; in mark_buffer_dirty_inode()
549 if (!mapping->private_data) { in mark_buffer_dirty_inode()
550 mapping->private_data = buffer_mapping; in mark_buffer_dirty_inode()
552 BUG_ON(mapping->private_data != buffer_mapping); in mark_buffer_dirty_inode()
557 &mapping->private_list); in mark_buffer_dirty_inode()
[all …]
/Linux-v4.19/drivers/media/usb/uvc/
Duvc_ctrl.c370 static s32 uvc_ctrl_get_zoom(struct uvc_control_mapping *mapping, in uvc_ctrl_get_zoom() argument
388 static void uvc_ctrl_set_zoom(struct uvc_control_mapping *mapping, in uvc_ctrl_set_zoom() argument
395 static s32 uvc_ctrl_get_rel_speed(struct uvc_control_mapping *mapping, in uvc_ctrl_get_rel_speed() argument
398 unsigned int first = mapping->offset / 8; in uvc_ctrl_get_rel_speed()
415 static void uvc_ctrl_set_rel_speed(struct uvc_control_mapping *mapping, in uvc_ctrl_set_rel_speed() argument
418 unsigned int first = mapping->offset / 8; in uvc_ctrl_set_rel_speed()
769 static s32 uvc_get_le_value(struct uvc_control_mapping *mapping, in uvc_get_le_value() argument
772 int bits = mapping->size; in uvc_get_le_value()
773 int offset = mapping->offset; in uvc_get_le_value()
790 if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED) in uvc_get_le_value()
[all …]
/Linux-v4.19/drivers/gpu/drm/etnaviv/
Detnaviv_gem.c227 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_get_vram_mapping() local
229 list_for_each_entry(mapping, &obj->vram_list, obj_node) { in etnaviv_gem_get_vram_mapping()
230 if (mapping->mmu == mmu) in etnaviv_gem_get_vram_mapping()
231 return mapping; in etnaviv_gem_get_vram_mapping()
237 void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping) in etnaviv_gem_mapping_reference() argument
239 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_gem_mapping_reference()
244 WARN_ON(mapping->use == 0); in etnaviv_gem_mapping_reference()
245 mapping->use += 1; in etnaviv_gem_mapping_reference()
249 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping) in etnaviv_gem_mapping_unreference() argument
251 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_gem_mapping_unreference()
[all …]
/Linux-v4.19/arch/arm/mm/
Dflush.c202 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument
236 if (mapping && cache_is_vipt_aliasing()) in __flush_dcache_page()
241 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) in __flush_dcache_aliases() argument
255 flush_dcache_mmap_lock(mapping); in __flush_dcache_aliases()
256 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { in __flush_dcache_aliases()
269 flush_dcache_mmap_unlock(mapping); in __flush_dcache_aliases()
277 struct address_space *mapping; in __sync_icache_dcache() local
288 mapping = page_mapping_file(page); in __sync_icache_dcache()
290 mapping = NULL; in __sync_icache_dcache()
293 __flush_dcache_page(mapping, page); in __sync_icache_dcache()
[all …]
Ddma-mapping.c1182 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
1184 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, in __alloc_iova() argument
1190 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __alloc_iova()
1201 spin_lock_irqsave(&mapping->lock, flags); in __alloc_iova()
1202 for (i = 0; i < mapping->nr_bitmaps; i++) { in __alloc_iova()
1203 start = bitmap_find_next_zero_area(mapping->bitmaps[i], in __alloc_iova()
1204 mapping->bits, 0, count, align); in __alloc_iova()
1206 if (start > mapping->bits) in __alloc_iova()
1209 bitmap_set(mapping->bitmaps[i], start, count); in __alloc_iova()
1218 if (i == mapping->nr_bitmaps) { in __alloc_iova()
[all …]
/Linux-v4.19/drivers/gpu/drm/exynos/
Dexynos_drm_iommu.h26 priv->mapping = arm_iommu_create_mapping(&platform_bus_type, start, in __exynos_iommu_create_mapping()
28 return IS_ERR(priv->mapping); in __exynos_iommu_create_mapping()
34 arm_iommu_release_mapping(priv->mapping); in __exynos_iommu_release_mapping()
40 if (dev->archdata.mapping) in __exynos_iommu_attach()
43 return arm_iommu_attach_device(dev, priv->mapping); in __exynos_iommu_attach()
58 priv->mapping = iommu_get_domain_for_dev(priv->dma_dev); in __exynos_iommu_create_mapping()
64 priv->mapping = NULL; in __exynos_iommu_release_mapping()
70 struct iommu_domain *domain = priv->mapping; in __exynos_iommu_attach()
80 struct iommu_domain *domain = priv->mapping; in __exynos_iommu_detach()
86 #error Unsupported architecture and IOMMU/DMA-mapping glue code
[all …]
/Linux-v4.19/fs/gfs2/
Daops.c98 struct inode *inode = page->mapping->host; in gfs2_writepage_common()
112 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); in gfs2_writepage_common()
147 struct inode * const inode = page->mapping->host; in gfs2_write_full_page()
180 struct inode *inode = page->mapping->host; in __gfs2_jdata_writepage()
206 struct inode *inode = page->mapping->host; in gfs2_jdata_writepage()
232 static int gfs2_writepages(struct address_space *mapping, in gfs2_writepages() argument
235 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); in gfs2_writepages()
236 int ret = mpage_writepages(mapping, wbc, gfs2_get_block_noalloc); in gfs2_writepages()
261 static int gfs2_write_jdata_pagevec(struct address_space *mapping, in gfs2_write_jdata_pagevec() argument
267 struct inode *inode = mapping->host; in gfs2_write_jdata_pagevec()
[all …]
/Linux-v4.19/arch/nios2/mm/
Dcacheflush.c73 static void flush_aliases(struct address_space *mapping, struct page *page) in flush_aliases() argument
81 flush_dcache_mmap_lock(mapping); in flush_aliases()
82 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { in flush_aliases()
94 flush_dcache_mmap_unlock(mapping); in flush_aliases()
160 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument
174 struct address_space *mapping; in flush_dcache_page() local
183 mapping = page_mapping_file(page); in flush_dcache_page()
186 if (mapping && !mapping_mapped(mapping)) { in flush_dcache_page()
189 __flush_dcache_page(mapping, page); in flush_dcache_page()
190 if (mapping) { in flush_dcache_page()
[all …]
/Linux-v4.19/drivers/net/wireless/marvell/mwifiex/
Dutil.h69 struct mwifiex_dma_mapping *mapping) in mwifiex_store_mapping() argument
73 memcpy(&cb->dma_mapping, mapping, sizeof(*mapping)); in mwifiex_store_mapping()
77 struct mwifiex_dma_mapping *mapping) in mwifiex_get_mapping() argument
81 memcpy(mapping, &cb->dma_mapping, sizeof(*mapping)); in mwifiex_get_mapping()
86 struct mwifiex_dma_mapping mapping; in MWIFIEX_SKB_DMA_ADDR() local
88 mwifiex_get_mapping(skb, &mapping); in MWIFIEX_SKB_DMA_ADDR()
90 return mapping.addr; in MWIFIEX_SKB_DMA_ADDR()
/Linux-v4.19/fs/afs/
Dwrite.c71 int afs_write_begin(struct file *file, struct address_space *mapping, in afs_write_begin() argument
92 page = grab_cache_page_write_begin(mapping, index, flags); in afs_write_begin()
175 int afs_write_end(struct file *file, struct address_space *mapping, in afs_write_end() argument
226 static void afs_kill_pages(struct address_space *mapping, in afs_kill_pages() argument
229 struct afs_vnode *vnode = AFS_FS_I(mapping->host); in afs_kill_pages()
244 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages); in afs_kill_pages()
255 generic_error_remove_page(mapping, page); in afs_kill_pages()
268 struct address_space *mapping, in afs_redirty_pages() argument
271 struct afs_vnode *vnode = AFS_FS_I(mapping->host); in afs_redirty_pages()
286 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages); in afs_redirty_pages()
[all …]
/Linux-v4.19/drivers/sh/clk/
Dcore.c340 struct clk_mapping *mapping = clk->mapping; in clk_establish_mapping() local
345 if (!mapping) { in clk_establish_mapping()
352 clk->mapping = &dummy_mapping; in clk_establish_mapping()
361 mapping = clkp->mapping; in clk_establish_mapping()
362 BUG_ON(!mapping); in clk_establish_mapping()
368 if (!mapping->base && mapping->phys) { in clk_establish_mapping()
369 kref_init(&mapping->ref); in clk_establish_mapping()
371 mapping->base = ioremap_nocache(mapping->phys, mapping->len); in clk_establish_mapping()
372 if (unlikely(!mapping->base)) in clk_establish_mapping()
374 } else if (mapping->base) { in clk_establish_mapping()
[all …]
/Linux-v4.19/fs/hpfs/
Dfile.c128 static int hpfs_readpages(struct file *file, struct address_space *mapping, in hpfs_readpages() argument
131 return mpage_readpages(mapping, pages, nr_pages, hpfs_get_block); in hpfs_readpages()
134 static int hpfs_writepages(struct address_space *mapping, in hpfs_writepages() argument
137 return mpage_writepages(mapping, wbc, hpfs_get_block); in hpfs_writepages()
140 static void hpfs_write_failed(struct address_space *mapping, loff_t to) in hpfs_write_failed() argument
142 struct inode *inode = mapping->host; in hpfs_write_failed()
154 static int hpfs_write_begin(struct file *file, struct address_space *mapping, in hpfs_write_begin() argument
161 ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, in hpfs_write_begin()
163 &hpfs_i(mapping->host)->mmu_private); in hpfs_write_begin()
165 hpfs_write_failed(mapping, pos + len); in hpfs_write_begin()
[all …]
/Linux-v4.19/fs/9p/
Dvfs_addr.c55 struct inode *inode = page->mapping->host; in v9fs_fid_readpage()
112 static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping, in v9fs_vfs_readpages() argument
118 inode = mapping->host; in v9fs_vfs_readpages()
121 ret = v9fs_readpages_from_fscache(inode, mapping, pages, &nr_pages); in v9fs_vfs_readpages()
125 ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp); in v9fs_vfs_readpages()
163 struct inode *inode = page->mapping->host; in v9fs_vfs_writepage_locked()
204 mapping_set_error(page->mapping, retval); in v9fs_vfs_writepage()
221 struct inode *inode = page->mapping->host; in v9fs_launder_page()
268 static int v9fs_write_begin(struct file *filp, struct address_space *mapping, in v9fs_write_begin() argument
276 struct inode *inode = mapping->host; in v9fs_write_begin()
[all …]
/Linux-v4.19/arch/unicore32/mm/
Dflush.c61 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument
77 struct address_space *mapping; in flush_dcache_page() local
86 mapping = page_mapping_file(page); in flush_dcache_page()
88 if (mapping && !mapping_mapped(mapping)) in flush_dcache_page()
91 __flush_dcache_page(mapping, page); in flush_dcache_page()
92 if (mapping) in flush_dcache_page()
/Linux-v4.19/include/trace/events/
Dfilemap.h31 __entry->i_ino = page->mapping->host->i_ino;
33 if (page->mapping->host->i_sb)
34 __entry->s_dev = page->mapping->host->i_sb->s_dev;
36 __entry->s_dev = page->mapping->host->i_rdev;
58 TP_PROTO(struct address_space *mapping, errseq_t eseq),
60 TP_ARGS(mapping, eseq),
69 __entry->i_ino = mapping->host->i_ino;
71 if (mapping->host->i_sb)
72 __entry->s_dev = mapping->host->i_sb->s_dev;
74 __entry->s_dev = mapping->host->i_rdev;

12345678910>>...44