Home
last modified time | relevance | path

Searched refs:mm (Results 1 – 25 of 1195) sorted by relevance

12345678910>>...48

/Linux-v5.4/arch/powerpc/include/asm/
Dmmu_context.h17 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
18 extern void destroy_context(struct mm_struct *mm);
23 extern bool mm_iommu_preregistered(struct mm_struct *mm);
24 extern long mm_iommu_new(struct mm_struct *mm,
27 extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
30 extern long mm_iommu_put(struct mm_struct *mm,
32 extern void mm_iommu_init(struct mm_struct *mm);
33 extern void mm_iommu_cleanup(struct mm_struct *mm);
34 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
37 struct mm_struct *mm, unsigned long ua, unsigned long size);
[all …]
/Linux-v5.4/arch/m68k/include/asm/
Dmmu_context.h8 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument
32 static inline void get_mmu_context(struct mm_struct *mm) in get_mmu_context() argument
36 if (mm->context != NO_CONTEXT) in get_mmu_context()
49 mm->context = ctx; in get_mmu_context()
50 context_mm[ctx] = mm; in get_mmu_context()
56 #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) argument
61 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument
63 if (mm->context != NO_CONTEXT) { in destroy_context()
64 clear_bit(mm->context, context_map); in destroy_context()
65 mm->context = NO_CONTEXT; in destroy_context()
[all …]
/Linux-v5.4/arch/x86/include/asm/
Dmmu_context.h31 static inline void load_mm_cr4_irqsoff(struct mm_struct *mm) in load_mm_cr4_irqsoff() argument
34 atomic_read(&mm->context.perf_rdpmc_allowed)) in load_mm_cr4_irqsoff()
40 static inline void load_mm_cr4_irqsoff(struct mm_struct *mm) {} in load_mm_cr4_irqsoff() argument
81 static inline void init_new_context_ldt(struct mm_struct *mm) in init_new_context_ldt() argument
83 mm->context.ldt = NULL; in init_new_context_ldt()
84 init_rwsem(&mm->context.ldt_usr_sem); in init_new_context_ldt()
86 int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
87 void destroy_context_ldt(struct mm_struct *mm);
88 void ldt_arch_exit_mmap(struct mm_struct *mm);
90 static inline void init_new_context_ldt(struct mm_struct *mm) { } in init_new_context_ldt() argument
[all …]
Dpgalloc.h12 static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; } in __paravirt_pgd_alloc() argument
17 #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm) argument
18 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {} in paravirt_pgd_free() argument
19 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pte() argument
20 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pmd() argument
23 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pud() argument
24 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_p4d() argument
51 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
63 static inline void pmd_populate_kernel(struct mm_struct *mm, in pmd_populate_kernel() argument
66 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); in pmd_populate_kernel()
[all …]
/Linux-v5.4/mm/
Dmmu_notifier.c42 void __mmu_notifier_release(struct mm_struct *mm) in __mmu_notifier_release() argument
52 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) in __mmu_notifier_release()
60 mn->ops->release(mn, mm); in __mmu_notifier_release()
62 spin_lock(&mm->mmu_notifier_mm->lock); in __mmu_notifier_release()
63 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { in __mmu_notifier_release()
64 mn = hlist_entry(mm->mmu_notifier_mm->list.first, in __mmu_notifier_release()
75 spin_unlock(&mm->mmu_notifier_mm->lock); in __mmu_notifier_release()
95 int __mmu_notifier_clear_flush_young(struct mm_struct *mm, in __mmu_notifier_clear_flush_young() argument
103 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { in __mmu_notifier_clear_flush_young()
105 young |= mn->ops->clear_flush_young(mn, mm, start, end); in __mmu_notifier_clear_flush_young()
[all …]
Ddebug.c134 void dump_mm(const struct mm_struct *mm) in dump_mm() argument
164 mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, in dump_mm()
166 mm->get_unmapped_area, in dump_mm()
168 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, in dump_mm()
169 mm->pgd, atomic_read(&mm->mm_users), in dump_mm()
170 atomic_read(&mm->mm_count), in dump_mm()
171 mm_pgtables_bytes(mm), in dump_mm()
172 mm->map_count, in dump_mm()
173 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, in dump_mm()
174 (u64)atomic64_read(&mm->pinned_vm), in dump_mm()
[all …]
/Linux-v5.4/arch/s390/include/asm/
Dmmu_context.h19 struct mm_struct *mm) in init_new_context() argument
21 spin_lock_init(&mm->context.lock); in init_new_context()
22 INIT_LIST_HEAD(&mm->context.pgtable_list); in init_new_context()
23 INIT_LIST_HEAD(&mm->context.gmap_list); in init_new_context()
24 cpumask_clear(&mm->context.cpu_attach_mask); in init_new_context()
25 atomic_set(&mm->context.flush_count, 0); in init_new_context()
26 mm->context.gmap_asce = 0; in init_new_context()
27 mm->context.flush_mm = 0; in init_new_context()
28 mm->context.compat_mm = test_thread_flag(TIF_31BIT); in init_new_context()
30 mm->context.alloc_pgste = page_table_allocate_pgste || in init_new_context()
[all …]
Dpgalloc.h26 struct page *page_table_alloc_pgste(struct mm_struct *mm);
37 static inline unsigned long pgd_entry_type(struct mm_struct *mm) in pgd_entry_type() argument
39 if (mm_pmd_folded(mm)) in pgd_entry_type()
41 if (mm_pud_folded(mm)) in pgd_entry_type()
43 if (mm_p4d_folded(mm)) in pgd_entry_type()
48 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
51 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address) in p4d_alloc_one() argument
53 unsigned long *table = crst_table_alloc(mm); in p4d_alloc_one()
59 #define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d) argument
61 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) in pud_alloc_one() argument
[all …]
/Linux-v5.4/include/linux/
Dmmu_notifier.h66 struct mm_struct *mm; member
98 struct mm_struct *mm);
110 struct mm_struct *mm,
120 struct mm_struct *mm,
131 struct mm_struct *mm,
139 struct mm_struct *mm,
216 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
229 struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
247 struct mm_struct *mm; member
252 static inline int mm_has_notifiers(struct mm_struct *mm) in mm_has_notifiers() argument
[all …]
/Linux-v5.4/drivers/gpu/drm/i915/selftests/
Di915_buddy.c13 static void __igt_dump_block(struct i915_buddy_mm *mm, in __igt_dump_block() argument
22 i915_buddy_block_size(mm, block), in __igt_dump_block()
27 static void igt_dump_block(struct i915_buddy_mm *mm, in igt_dump_block() argument
32 __igt_dump_block(mm, block, false); in igt_dump_block()
36 __igt_dump_block(mm, buddy, true); in igt_dump_block()
39 static int igt_check_block(struct i915_buddy_mm *mm, in igt_check_block() argument
57 block_size = i915_buddy_block_size(mm, block); in igt_check_block()
60 if (block_size < mm->chunk_size) { in igt_check_block()
70 if (!IS_ALIGNED(block_size, mm->chunk_size)) { in igt_check_block()
75 if (!IS_ALIGNED(offset, mm->chunk_size)) { in igt_check_block()
[all …]
/Linux-v5.4/arch/x86/mm/
Dmpx.c26 static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm) in mpx_bd_size_bytes() argument
28 if (is_64bit_mm(mm)) in mpx_bd_size_bytes()
34 static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm) in mpx_bt_size_bytes() argument
36 if (is_64bit_mm(mm)) in mpx_bt_size_bytes()
48 struct mm_struct *mm = current->mm; in mpx_mmap() local
52 if (len != mpx_bt_size_bytes(mm)) in mpx_mmap()
55 down_write(&mm->mmap_sem); in mpx_mmap()
58 up_write(&mm->mmap_sem); in mpx_mmap()
215 struct mm_struct *mm = current->mm; in mpx_enable_management() local
230 down_write(&mm->mmap_sem); in mpx_enable_management()
[all …]
/Linux-v5.4/arch/powerpc/mm/book3s64/
Dmmu_context.c91 static int hash__init_new_context(struct mm_struct *mm) in hash__init_new_context() argument
95 mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context), in hash__init_new_context()
97 if (!mm->context.hash_context) in hash__init_new_context()
114 if (mm->context.id == 0) { in hash__init_new_context()
115 memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context)); in hash__init_new_context()
116 slice_init_new_context_exec(mm); in hash__init_new_context()
119 …memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context)… in hash__init_new_context()
122 if (current->mm->context.hash_context->spt) { in hash__init_new_context()
123 mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table), in hash__init_new_context()
125 if (!mm->context.hash_context->spt) { in hash__init_new_context()
[all …]
/Linux-v5.4/arch/sparc/include/asm/
Dmmu_context_64.h19 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument
28 void get_new_mmu_context(struct mm_struct *mm);
29 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
30 void destroy_context(struct mm_struct *mm);
38 static inline void tsb_context_switch_ctx(struct mm_struct *mm, in tsb_context_switch_ctx() argument
41 __tsb_context_switch(__pa(mm->pgd), in tsb_context_switch_ctx()
42 &mm->context.tsb_block[MM_TSB_BASE], in tsb_context_switch_ctx()
44 (mm->context.tsb_block[MM_TSB_HUGE].tsb ? in tsb_context_switch_ctx()
45 &mm->context.tsb_block[MM_TSB_HUGE] : in tsb_context_switch_ctx()
50 , __pa(&mm->context.tsb_descr[MM_TSB_BASE]), in tsb_context_switch_ctx()
[all …]
/Linux-v5.4/drivers/gpu/drm/i915/gem/
Di915_gem_userptr.c21 struct mm_struct *mm; member
37 struct i915_mm_struct *mm; member
133 unlock = &mn->mm->i915->drm.struct_mutex; in userptr_mn_invalidate_range_start()
184 i915_mmu_notifier_create(struct i915_mm_struct *mm) in i915_mmu_notifier_create() argument
195 mn->mm = mm; in i915_mmu_notifier_create()
216 i915_mmu_notifier_find(struct i915_mm_struct *mm) in i915_mmu_notifier_find() argument
221 mn = mm->mn; in i915_mmu_notifier_find()
225 mn = i915_mmu_notifier_create(mm); in i915_mmu_notifier_find()
229 down_write(&mm->mm->mmap_sem); in i915_mmu_notifier_find()
230 mutex_lock(&mm->i915->mm_lock); in i915_mmu_notifier_find()
[all …]
/Linux-v5.4/arch/arm/include/asm/
Dmmu_context.h24 void __check_vmalloc_seq(struct mm_struct *mm);
28 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
30 init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument
32 atomic64_set(&mm->context.id, 0); in init_new_context()
37 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
40 static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, in a15_erratum_get_cpumask() argument
50 static inline void check_and_switch_context(struct mm_struct *mm, in check_and_switch_context() argument
53 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) in check_and_switch_context()
54 __check_vmalloc_seq(mm); in check_and_switch_context()
64 mm->context.switch_pending = 1; in check_and_switch_context()
[all …]
/Linux-v5.4/arch/s390/mm/
Dpgtable.c29 static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr, in ptep_ipte_local() argument
36 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_local()
40 asce = asce ? : mm->context.asce; in ptep_ipte_local()
49 static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr, in ptep_ipte_global() argument
56 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_global()
60 asce = asce ? : mm->context.asce; in ptep_ipte_global()
69 static inline pte_t ptep_flush_direct(struct mm_struct *mm, in ptep_flush_direct() argument
78 atomic_inc(&mm->context.flush_count); in ptep_flush_direct()
80 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in ptep_flush_direct()
81 ptep_ipte_local(mm, addr, ptep, nodat); in ptep_flush_direct()
[all …]
/Linux-v5.4/drivers/gpu/drm/i915/
Di915_buddy.c74 static void mark_free(struct i915_buddy_mm *mm, in mark_free() argument
81 &mm->free_list[i915_buddy_block_order(block)]); in mark_free()
92 int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size) in i915_buddy_init() argument
108 mm->size = size; in i915_buddy_init()
109 mm->chunk_size = chunk_size; in i915_buddy_init()
110 mm->max_order = ilog2(size) - ilog2(chunk_size); in i915_buddy_init()
112 GEM_BUG_ON(mm->max_order > I915_BUDDY_MAX_ORDER); in i915_buddy_init()
114 mm->free_list = kmalloc_array(mm->max_order + 1, in i915_buddy_init()
117 if (!mm->free_list) in i915_buddy_init()
120 for (i = 0; i <= mm->max_order; ++i) in i915_buddy_init()
[all …]
/Linux-v5.4/arch/x86/kernel/
Dldt.c54 struct mm_struct *mm = __mm; in flush_ldt() local
56 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) in flush_ldt()
59 load_mm_ldt(mm); in flush_ldt()
105 static void do_sanity_check(struct mm_struct *mm, in do_sanity_check() argument
109 if (mm->context.ldt) { in do_sanity_check()
150 static void map_ldt_struct_to_user(struct mm_struct *mm) in map_ldt_struct_to_user() argument
152 pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR); in map_ldt_struct_to_user()
159 if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt) in map_ldt_struct_to_user()
163 static void sanity_check_ldt_mapping(struct mm_struct *mm) in sanity_check_ldt_mapping() argument
165 pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR); in sanity_check_ldt_mapping()
[all …]
/Linux-v5.4/arch/powerpc/mm/
Dslice.c86 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, in slice_area_is_free() argument
91 if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr) in slice_area_is_free()
93 vma = find_vma(mm, addr); in slice_area_is_free()
97 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) in slice_low_has_vma() argument
99 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT, in slice_low_has_vma()
103 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) in slice_high_has_vma() argument
114 return !slice_area_is_free(mm, start, end - start); in slice_high_has_vma()
117 static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret, in slice_mask_for_free() argument
127 if (!slice_low_has_vma(mm, i)) in slice_mask_for_free()
134 if (!slice_high_has_vma(mm, i)) in slice_mask_for_free()
[all …]
/Linux-v5.4/arch/mips/include/asm/
Dmmu_context.h106 static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm) in cpu_context() argument
109 return atomic64_read(&mm->context.mmid); in cpu_context()
111 return mm->context.asid[cpu]; in cpu_context()
115 struct mm_struct *mm, u64 ctx) in set_cpu_context() argument
118 atomic64_set(&mm->context.mmid, ctx); in set_cpu_context()
120 mm->context.asid[cpu] = ctx; in set_cpu_context()
124 #define cpu_asid(cpu, mm) \ argument
125 (cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
127 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument
131 extern void get_new_mmu_context(struct mm_struct *mm);
[all …]
/Linux-v5.4/kernel/sched/
Dmembarrier.c35 struct mm_struct *mm = (struct mm_struct *) info; in ipi_sync_rq_state() local
37 if (current->mm != mm) in ipi_sync_rq_state()
40 atomic_read(&mm->membarrier_state)); in ipi_sync_rq_state()
50 void membarrier_exec_mmap(struct mm_struct *mm) in membarrier_exec_mmap() argument
58 atomic_set(&mm->membarrier_state, 0); in membarrier_exec_mmap()
136 struct mm_struct *mm = current->mm; in membarrier_private_expedited() local
141 if (!(atomic_read(&mm->membarrier_state) & in membarrier_private_expedited()
145 if (!(atomic_read(&mm->membarrier_state) & in membarrier_private_expedited()
150 if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1) in membarrier_private_expedited()
178 if (p && p->mm == mm) in membarrier_private_expedited()
[all …]
/Linux-v5.4/arch/sparc/mm/
Dtlb.c27 struct mm_struct *mm = tb->mm; in flush_tlb_pending() local
34 if (CTX_VALID(mm->context)) { in flush_tlb_pending()
36 global_flush_tlb_page(mm, tb->vaddrs[0]); in flush_tlb_pending()
39 smp_flush_tlb_pending(tb->mm, tb->tlb_nr, in flush_tlb_pending()
42 __flush_tlb_pending(CTX_HWBITS(tb->mm->context), in flush_tlb_pending()
70 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, in tlb_batch_add_one() argument
82 if (unlikely(nr != 0 && mm != tb->mm)) { in tlb_batch_add_one()
88 flush_tsb_user_page(mm, vaddr, hugepage_shift); in tlb_batch_add_one()
89 global_flush_tlb_page(mm, vaddr); in tlb_batch_add_one()
94 tb->mm = mm; in tlb_batch_add_one()
[all …]
Dtsb.c121 struct mm_struct *mm = tb->mm; in flush_tsb_user() local
124 spin_lock_irqsave(&mm->context.lock, flags); in flush_tsb_user()
127 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; in flush_tsb_user()
128 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; in flush_tsb_user()
140 else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { in flush_tsb_user()
141 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; in flush_tsb_user()
142 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; in flush_tsb_user()
149 spin_unlock_irqrestore(&mm->context.lock, flags); in flush_tsb_user()
152 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, in flush_tsb_user_page() argument
157 spin_lock_irqsave(&mm->context.lock, flags); in flush_tsb_user_page()
[all …]
/Linux-v5.4/drivers/gpu/drm/
Ddrm_mm.c118 static void show_leaks(struct drm_mm *mm) in show_leaks() argument
129 list_for_each_entry(node, drm_mm_nodes(mm), node_list) { in show_leaks()
149 static void show_leaks(struct drm_mm *mm) { } in show_leaks() argument
160 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) in INTERVAL_TREE_DEFINE()
162 return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree, in INTERVAL_TREE_DEFINE()
163 start, last) ?: (struct drm_mm_node *)&mm->head_node; in INTERVAL_TREE_DEFINE()
170 struct drm_mm *mm = hole_node->mm; in drm_mm_interval_tree_add_node() local
193 link = &mm->interval_tree.rb_root.rb_node; in drm_mm_interval_tree_add_node()
211 rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost, in drm_mm_interval_tree_add_node()
260 struct drm_mm *mm = node->mm; in add_hole() local
[all …]
/Linux-v5.4/arch/um/kernel/skas/
Dmmu.c18 static int init_stub_pte(struct mm_struct *mm, unsigned long proc, in init_stub_pte() argument
26 pgd = pgd_offset(mm, proc); in init_stub_pte()
27 pud = pud_alloc(mm, pgd, proc); in init_stub_pte()
31 pmd = pmd_alloc(mm, pud, proc); in init_stub_pte()
35 pte = pte_alloc_map(mm, pmd, proc); in init_stub_pte()
44 pmd_free(mm, pmd); in init_stub_pte()
46 pud_free(mm, pud); in init_stub_pte()
51 int init_new_context(struct task_struct *task, struct mm_struct *mm) in init_new_context() argument
54 struct mm_context *to_mm = &mm->context; in init_new_context()
63 if (current->mm != NULL && current->mm != &init_mm) in init_new_context()
[all …]

12345678910>>...48