Home
last modified time | relevance | path

Searched refs:mm (Results 1 – 25 of 1352) sorted by relevance

12345678910>>...55

/Linux-v6.1/include/linux/
Dmmap_lock.h20 void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write);
21 void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
23 void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write);
25 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm, in __mmap_lock_trace_start_locking() argument
29 __mmap_lock_do_trace_start_locking(mm, write); in __mmap_lock_trace_start_locking()
32 static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm, in __mmap_lock_trace_acquire_returned() argument
36 __mmap_lock_do_trace_acquire_returned(mm, write, success); in __mmap_lock_trace_acquire_returned()
39 static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write) in __mmap_lock_trace_released() argument
42 __mmap_lock_do_trace_released(mm, write); in __mmap_lock_trace_released()
47 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm, in __mmap_lock_trace_start_locking() argument
[all …]
Dmmu_notifier.h89 struct mm_struct *mm);
101 struct mm_struct *mm,
111 struct mm_struct *mm,
122 struct mm_struct *mm,
130 struct mm_struct *mm,
208 struct mm_struct *mm,
222 struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
240 struct mm_struct *mm; member
260 struct mm_struct *mm; member
273 struct mm_struct *mm; member
[all …]
Dpage_table_check.h17 void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
19 void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
21 void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
23 void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
25 void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
27 void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
29 void __page_table_check_pte_clear_range(struct mm_struct *mm,
49 static inline void page_table_check_pte_clear(struct mm_struct *mm, in page_table_check_pte_clear() argument
55 __page_table_check_pte_clear(mm, addr, pte); in page_table_check_pte_clear()
58 static inline void page_table_check_pmd_clear(struct mm_struct *mm, in page_table_check_pmd_clear() argument
[all …]
/Linux-v6.1/arch/powerpc/include/asm/
Dmmu_context.h18 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
20 extern void destroy_context(struct mm_struct *mm);
24 extern bool mm_iommu_preregistered(struct mm_struct *mm);
25 extern long mm_iommu_new(struct mm_struct *mm,
28 extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
31 extern long mm_iommu_put(struct mm_struct *mm,
33 extern void mm_iommu_init(struct mm_struct *mm);
34 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
36 extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
40 extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
[all …]
/Linux-v6.1/drivers/gpu/drm/
Ddrm_buddy.c14 static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm, in drm_block_alloc() argument
35 static void drm_block_free(struct drm_buddy *mm, in drm_block_free() argument
49 static void mark_free(struct drm_buddy *mm, in mark_free() argument
56 &mm->free_list[drm_buddy_block_order(block)]); in mark_free()
79 int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) in drm_buddy_init() argument
95 mm->size = size; in drm_buddy_init()
96 mm->avail = size; in drm_buddy_init()
97 mm->chunk_size = chunk_size; in drm_buddy_init()
98 mm->max_order = ilog2(size) - ilog2(chunk_size); in drm_buddy_init()
100 BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER); in drm_buddy_init()
[all …]
Ddrm_mm.c118 static void show_leaks(struct drm_mm *mm) in show_leaks() argument
127 list_for_each_entry(node, drm_mm_nodes(mm), node_list) { in show_leaks()
146 static void show_leaks(struct drm_mm *mm) { } in show_leaks() argument
157 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) in INTERVAL_TREE_DEFINE()
159 return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree, in INTERVAL_TREE_DEFINE()
160 start, last) ?: (struct drm_mm_node *)&mm->head_node; in INTERVAL_TREE_DEFINE()
167 struct drm_mm *mm = hole_node->mm; in drm_mm_interval_tree_add_node() local
190 link = &mm->interval_tree.rb_root.rb_node; in drm_mm_interval_tree_add_node()
208 rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost, in drm_mm_interval_tree_add_node()
268 struct drm_mm *mm = node->mm; in add_hole() local
[all …]
/Linux-v6.1/arch/s390/include/asm/
Dpgalloc.h26 struct page *page_table_alloc_pgste(struct mm_struct *mm);
37 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
39 static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long addr, in check_asce_limit() argument
44 if (addr + len > mm->context.asce_limit && in check_asce_limit()
46 rc = crst_table_upgrade(mm, addr + len); in check_asce_limit()
53 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address) in p4d_alloc_one() argument
55 unsigned long *table = crst_table_alloc(mm); in p4d_alloc_one()
62 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) in p4d_free() argument
64 if (!mm_p4d_folded(mm)) in p4d_free()
65 crst_table_free(mm, (unsigned long *) p4d); in p4d_free()
[all …]
Dmmu_context.h20 struct mm_struct *mm) in init_new_context() argument
24 spin_lock_init(&mm->context.lock); in init_new_context()
25 INIT_LIST_HEAD(&mm->context.pgtable_list); in init_new_context()
26 INIT_LIST_HEAD(&mm->context.gmap_list); in init_new_context()
27 cpumask_clear(&mm->context.cpu_attach_mask); in init_new_context()
28 atomic_set(&mm->context.flush_count, 0); in init_new_context()
29 atomic_set(&mm->context.protected_count, 0); in init_new_context()
30 mm->context.gmap_asce = 0; in init_new_context()
31 mm->context.flush_mm = 0; in init_new_context()
33 mm->context.alloc_pgste = page_table_allocate_pgste || in init_new_context()
[all …]
/Linux-v6.1/arch/x86/include/asm/
Dmmu_context.h61 static inline void init_new_context_ldt(struct mm_struct *mm) in init_new_context_ldt() argument
63 mm->context.ldt = NULL; in init_new_context_ldt()
64 init_rwsem(&mm->context.ldt_usr_sem); in init_new_context_ldt()
66 int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
67 void destroy_context_ldt(struct mm_struct *mm);
68 void ldt_arch_exit_mmap(struct mm_struct *mm);
70 static inline void init_new_context_ldt(struct mm_struct *mm) { } in init_new_context_ldt() argument
72 struct mm_struct *mm) in ldt_dup_context() argument
76 static inline void destroy_context_ldt(struct mm_struct *mm) { } in destroy_context_ldt() argument
77 static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { } in ldt_arch_exit_mmap() argument
[all …]
Dpgalloc.h13 static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; } in __paravirt_pgd_alloc() argument
18 #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm) argument
19 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {} in paravirt_pgd_free() argument
20 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pte() argument
21 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pmd() argument
24 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pud() argument
25 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_p4d() argument
52 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
64 static inline void pmd_populate_kernel(struct mm_struct *mm, in pmd_populate_kernel() argument
67 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); in pmd_populate_kernel()
[all …]
/Linux-v6.1/mm/
Ddebug.c154 void dump_mm(const struct mm_struct *mm) in dump_mm() argument
184 mm, mm->task_size, in dump_mm()
186 mm->get_unmapped_area, in dump_mm()
188 mm->mmap_base, mm->mmap_legacy_base, in dump_mm()
189 mm->pgd, atomic_read(&mm->mm_users), in dump_mm()
190 atomic_read(&mm->mm_count), in dump_mm()
191 mm_pgtables_bytes(mm), in dump_mm()
192 mm->map_count, in dump_mm()
193 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, in dump_mm()
194 (u64)atomic64_read(&mm->pinned_vm), in dump_mm()
[all …]
Dmmu_notifier.c189 interval_sub->mm->notifier_subscriptions; in mmu_interval_read_begin()
262 struct mm_struct *mm) in mn_itree_release() argument
267 .mm = mm, in mn_itree_release()
300 struct mm_struct *mm) in mn_hlist_release() argument
319 subscription->ops->release(subscription, mm); in mn_hlist_release()
348 void __mmu_notifier_release(struct mm_struct *mm) in __mmu_notifier_release() argument
351 mm->notifier_subscriptions; in __mmu_notifier_release()
354 mn_itree_release(subscriptions, mm); in __mmu_notifier_release()
357 mn_hlist_release(subscriptions, mm); in __mmu_notifier_release()
365 int __mmu_notifier_clear_flush_young(struct mm_struct *mm, in __mmu_notifier_clear_flush_young() argument
[all …]
/Linux-v6.1/arch/m68k/include/asm/
Dmmu_context.h28 static inline void get_mmu_context(struct mm_struct *mm) in get_mmu_context() argument
32 if (mm->context != NO_CONTEXT) in get_mmu_context()
45 mm->context = ctx; in get_mmu_context()
46 context_mm[ctx] = mm; in get_mmu_context()
52 #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) argument
58 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument
60 if (mm->context != NO_CONTEXT) { in destroy_context()
61 clear_bit(mm->context, context_map); in destroy_context()
62 mm->context = NO_CONTEXT; in destroy_context()
75 get_mmu_context(tsk->mm); in switch_mm()
[all …]
/Linux-v6.1/drivers/gpu/drm/tests/
Ddrm_buddy_test.c46 static void __dump_block(struct kunit *test, struct drm_buddy *mm, in __dump_block() argument
52 drm_buddy_block_size(mm, block), !block->parent, buddy); in __dump_block()
55 static void dump_block(struct kunit *test, struct drm_buddy *mm, in dump_block() argument
60 __dump_block(test, mm, block, false); in dump_block()
64 __dump_block(test, mm, buddy, true); in dump_block()
67 static int check_block(struct kunit *test, struct drm_buddy *mm, in check_block() argument
84 block_size = drm_buddy_block_size(mm, block); in check_block()
87 if (block_size < mm->chunk_size) { in check_block()
97 if (!IS_ALIGNED(block_size, mm->chunk_size)) { in check_block()
102 if (!IS_ALIGNED(offset, mm->chunk_size)) { in check_block()
[all …]
/Linux-v6.1/arch/powerpc/mm/book3s64/
Dmmu_context.c95 static int hash__init_new_context(struct mm_struct *mm) in hash__init_new_context() argument
99 mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context), in hash__init_new_context()
101 if (!mm->context.hash_context) in hash__init_new_context()
118 if (mm->context.id == 0) { in hash__init_new_context()
119 memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context)); in hash__init_new_context()
120 slice_init_new_context_exec(mm); in hash__init_new_context()
123 …memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context)… in hash__init_new_context()
126 if (current->mm->context.hash_context->spt) { in hash__init_new_context()
127 mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table), in hash__init_new_context()
129 if (!mm->context.hash_context->spt) { in hash__init_new_context()
[all …]
Dslice.c86 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, in slice_area_is_free() argument
91 if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr) in slice_area_is_free()
93 vma = find_vma(mm, addr); in slice_area_is_free()
97 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) in slice_low_has_vma() argument
99 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT, in slice_low_has_vma()
103 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) in slice_high_has_vma() argument
114 return !slice_area_is_free(mm, start, end - start); in slice_high_has_vma()
117 static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret, in slice_mask_for_free() argument
127 if (!slice_low_has_vma(mm, i)) in slice_mask_for_free()
134 if (!slice_high_has_vma(mm, i)) in slice_mask_for_free()
[all …]
/Linux-v6.1/arch/s390/mm/
Dpgtable.c47 static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr, in ptep_ipte_local() argument
54 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_local()
58 asce = asce ? : mm->context.asce; in ptep_ipte_local()
67 static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr, in ptep_ipte_global() argument
74 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_global()
78 asce = asce ? : mm->context.asce; in ptep_ipte_global()
87 static inline pte_t ptep_flush_direct(struct mm_struct *mm, in ptep_flush_direct() argument
96 atomic_inc(&mm->context.flush_count); in ptep_flush_direct()
98 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in ptep_flush_direct()
99 ptep_ipte_local(mm, addr, ptep, nodat); in ptep_flush_direct()
[all …]
/Linux-v6.1/include/linux/sched/
Dmm.h35 static inline void mmgrab(struct mm_struct *mm) in mmgrab() argument
37 atomic_inc(&mm->mm_count); in mmgrab()
40 extern void __mmdrop(struct mm_struct *mm);
42 static inline void mmdrop(struct mm_struct *mm) in mmdrop() argument
49 if (unlikely(atomic_dec_and_test(&mm->mm_count))) in mmdrop()
50 __mmdrop(mm); in mmdrop()
60 struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); in __mmdrop_delayed() local
62 __mmdrop(mm); in __mmdrop_delayed()
69 static inline void mmdrop_sched(struct mm_struct *mm) in mmdrop_sched() argument
72 if (atomic_dec_and_test(&mm->mm_count)) in mmdrop_sched()
[all …]
/Linux-v6.1/arch/arm/include/asm/
Dmmu_context.h24 void __check_vmalloc_seq(struct mm_struct *mm);
27 static inline void check_vmalloc_seq(struct mm_struct *mm) in check_vmalloc_seq() argument
30 unlikely(atomic_read(&mm->context.vmalloc_seq) != in check_vmalloc_seq()
32 __check_vmalloc_seq(mm); in check_vmalloc_seq()
38 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
42 init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument
44 atomic64_set(&mm->context.id, 0); in init_new_context()
49 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
52 static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, in a15_erratum_get_cpumask() argument
62 static inline void check_and_switch_context(struct mm_struct *mm, in check_and_switch_context() argument
[all …]
/Linux-v6.1/arch/sparc/include/asm/
Dmmu_context_64.h24 void get_new_mmu_context(struct mm_struct *mm);
27 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
29 void destroy_context(struct mm_struct *mm);
37 static inline void tsb_context_switch_ctx(struct mm_struct *mm, in tsb_context_switch_ctx() argument
40 __tsb_context_switch(__pa(mm->pgd), in tsb_context_switch_ctx()
41 &mm->context.tsb_block[MM_TSB_BASE], in tsb_context_switch_ctx()
43 (mm->context.tsb_block[MM_TSB_HUGE].tsb ? in tsb_context_switch_ctx()
44 &mm->context.tsb_block[MM_TSB_HUGE] : in tsb_context_switch_ctx()
49 , __pa(&mm->context.tsb_descr[MM_TSB_BASE]), in tsb_context_switch_ctx()
55 void tsb_grow(struct mm_struct *mm,
[all …]
/Linux-v6.1/arch/x86/kernel/
Dldt.c42 void load_mm_ldt(struct mm_struct *mm) in load_mm_ldt() argument
47 ldt = READ_ONCE(mm->context.ldt); in load_mm_ldt()
138 struct mm_struct *mm = __mm; in flush_ldt() local
140 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) in flush_ldt()
143 load_mm_ldt(mm); in flush_ldt()
189 static void do_sanity_check(struct mm_struct *mm, in do_sanity_check() argument
193 if (mm->context.ldt) { in do_sanity_check()
234 static void map_ldt_struct_to_user(struct mm_struct *mm) in map_ldt_struct_to_user() argument
236 pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR); in map_ldt_struct_to_user()
243 if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt) in map_ldt_struct_to_user()
[all …]
/Linux-v6.1/fs/proc/
Dtask_nommu.c21 void task_mem(struct seq_file *m, struct mm_struct *mm) in task_mem() argument
23 VMA_ITERATOR(vmi, mm, 0); in task_mem()
28 mmap_read_lock(mm); in task_mem()
40 if (atomic_read(&mm->mm_count) > 1 || in task_mem()
50 if (atomic_read(&mm->mm_count) > 1) in task_mem()
51 sbytes += kobjsize(mm); in task_mem()
53 bytes += kobjsize(mm); in task_mem()
78 mmap_read_unlock(mm); in task_mem()
81 unsigned long task_vsize(struct mm_struct *mm) in task_vsize() argument
83 VMA_ITERATOR(vmi, mm, 0); in task_vsize()
[all …]
/Linux-v6.1/arch/sparc/mm/
Dtlb.c26 struct mm_struct *mm = tb->mm; in flush_tlb_pending() local
33 if (CTX_VALID(mm->context)) { in flush_tlb_pending()
35 global_flush_tlb_page(mm, tb->vaddrs[0]); in flush_tlb_pending()
38 smp_flush_tlb_pending(tb->mm, tb->tlb_nr, in flush_tlb_pending()
41 __flush_tlb_pending(CTX_HWBITS(tb->mm->context), in flush_tlb_pending()
69 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, in tlb_batch_add_one() argument
81 if (unlikely(nr != 0 && mm != tb->mm)) { in tlb_batch_add_one()
87 flush_tsb_user_page(mm, vaddr, hugepage_shift); in tlb_batch_add_one()
88 global_flush_tlb_page(mm, vaddr); in tlb_batch_add_one()
93 tb->mm = mm; in tlb_batch_add_one()
[all …]
Dtsb.c121 struct mm_struct *mm = tb->mm; in flush_tsb_user() local
124 spin_lock_irqsave(&mm->context.lock, flags); in flush_tsb_user()
127 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; in flush_tsb_user()
128 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; in flush_tsb_user()
140 else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { in flush_tsb_user()
141 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; in flush_tsb_user()
142 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; in flush_tsb_user()
149 spin_unlock_irqrestore(&mm->context.lock, flags); in flush_tsb_user()
152 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, in flush_tsb_user_page() argument
157 spin_lock_irqsave(&mm->context.lock, flags); in flush_tsb_user_page()
[all …]
/Linux-v6.1/drivers/gpu/drm/i915/gem/
Di915_gem_shrinker.c36 return swap_available() || obj->mm.madv == I915_MADV_DONTNEED; in can_release_pages()
112 { &i915->mm.purge_list, ~0u }, in i915_gem_shrink()
114 &i915->mm.shrink_list, in i915_gem_shrink()
190 spin_lock_irqsave(&i915->mm.obj_lock, flags); in i915_gem_shrink()
194 mm.link))) { in i915_gem_shrink()
195 list_move_tail(&obj->mm.link, &still_in_list); in i915_gem_shrink()
198 !is_vmalloc_addr(obj->mm.mapping)) in i915_gem_shrink()
211 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); in i915_gem_shrink()
235 spin_lock_irqsave(&i915->mm.obj_lock, flags); in i915_gem_shrink()
240 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); in i915_gem_shrink()
[all …]

12345678910>>...55