| /Linux-v5.15/include/linux/ |
| D | mmap_lock.h | 20 void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write); 21 void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write, 23 void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write); 25 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm, in __mmap_lock_trace_start_locking() argument 29 __mmap_lock_do_trace_start_locking(mm, write); in __mmap_lock_trace_start_locking() 32 static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm, in __mmap_lock_trace_acquire_returned() argument 36 __mmap_lock_do_trace_acquire_returned(mm, write, success); in __mmap_lock_trace_acquire_returned() 39 static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write) in __mmap_lock_trace_released() argument 42 __mmap_lock_do_trace_released(mm, write); in __mmap_lock_trace_released() 47 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm, in __mmap_lock_trace_start_locking() argument [all …]
|
| D | mmu_notifier.h | 40 * that the mm refcount is zero and the range is no longer accessible. 66 * Called either by mmu_notifier_unregister or when the mm is 69 * methods (the ones invoked outside the mm context) and it 74 * tsk->mm == mm exits. 81 * last thread of this mm quits, you've also to be sure that 89 struct mm_struct *mm); 101 struct mm_struct *mm, 111 struct mm_struct *mm, 122 struct mm_struct *mm, 130 struct mm_struct *mm, [all …]
|
| /Linux-v5.15/drivers/gpu/drm/i915/ |
| D | i915_buddy.c | 15 static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_mm *mm, in i915_block_alloc() argument 36 static void i915_block_free(struct i915_buddy_mm *mm, in i915_block_free() argument 50 static void mark_free(struct i915_buddy_mm *mm, in mark_free() argument 57 &mm->free_list[i915_buddy_block_order(block)]); in mark_free() 68 int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size) in i915_buddy_init() argument 84 mm->size = size; in i915_buddy_init() 85 mm->chunk_size = chunk_size; in i915_buddy_init() 86 mm->max_order = ilog2(size) - ilog2(chunk_size); in i915_buddy_init() 88 GEM_BUG_ON(mm->max_order > I915_BUDDY_MAX_ORDER); in i915_buddy_init() 90 mm->free_list = kmalloc_array(mm->max_order + 1, in i915_buddy_init() [all …]
|
| /Linux-v5.15/drivers/gpu/drm/i915/selftests/ |
| D | i915_buddy.c | 11 static void __igt_dump_block(struct i915_buddy_mm *mm, in __igt_dump_block() argument 20 i915_buddy_block_size(mm, block), in __igt_dump_block() 25 static void igt_dump_block(struct i915_buddy_mm *mm, in igt_dump_block() argument 30 __igt_dump_block(mm, block, false); in igt_dump_block() 34 __igt_dump_block(mm, buddy, true); in igt_dump_block() 37 static int igt_check_block(struct i915_buddy_mm *mm, in igt_check_block() argument 55 block_size = i915_buddy_block_size(mm, block); in igt_check_block() 58 if (block_size < mm->chunk_size) { in igt_check_block() 68 if (!IS_ALIGNED(block_size, mm->chunk_size)) { in igt_check_block() 73 if (!IS_ALIGNED(offset, mm->chunk_size)) { in igt_check_block() [all …]
|
| /Linux-v5.15/arch/powerpc/include/asm/ |
| D | mmu_context.h | 7 #include <linux/mm.h> 18 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 20 extern void destroy_context(struct mm_struct *mm); 25 extern bool mm_iommu_preregistered(struct mm_struct *mm); 26 extern long mm_iommu_new(struct mm_struct *mm, 29 extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua, 32 extern long mm_iommu_put(struct mm_struct *mm, 34 extern void mm_iommu_init(struct mm_struct *mm); 35 extern void mm_iommu_cleanup(struct mm_struct *mm); 36 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, [all …]
|
| /Linux-v5.15/arch/s390/include/asm/ |
| D | pgalloc.h | 18 #include <linux/mm.h> 26 struct page *page_table_alloc_pgste(struct mm_struct *mm); 37 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit); 39 static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long addr, in check_asce_limit() argument 44 if (addr + len > mm->context.asce_limit && in check_asce_limit() 46 rc = crst_table_upgrade(mm, addr + len); in check_asce_limit() 53 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address) in p4d_alloc_one() argument 55 unsigned long *table = crst_table_alloc(mm); in p4d_alloc_one() 62 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) in p4d_free() argument 64 if (!mm_p4d_folded(mm)) in p4d_free() [all …]
|
| D | mmu_context.h | 20 struct mm_struct *mm) in init_new_context() argument 24 spin_lock_init(&mm->context.lock); in init_new_context() 25 INIT_LIST_HEAD(&mm->context.pgtable_list); in init_new_context() 26 INIT_LIST_HEAD(&mm->context.gmap_list); in init_new_context() 27 cpumask_clear(&mm->context.cpu_attach_mask); in init_new_context() 28 atomic_set(&mm->context.flush_count, 0); in init_new_context() 29 atomic_set(&mm->context.is_protected, 0); in init_new_context() 30 mm->context.gmap_asce = 0; in init_new_context() 31 mm->context.flush_mm = 0; in init_new_context() 33 mm->context.alloc_pgste = page_table_allocate_pgste || in init_new_context() [all …]
|
| D | tlbflush.h | 5 #include <linux/mm.h> 27 /* Global TLB flush for the mm */ in __tlb_flush_idte() 44 * Flush TLB entries for a specific mm on all CPUs (in case gmap is used 47 static inline void __tlb_flush_mm(struct mm_struct *mm) in __tlb_flush_mm() argument 52 * If the machine has IDTE we prefer to do a per mm flush in __tlb_flush_mm() 53 * on all cpus instead of doing a local flush if the mm in __tlb_flush_mm() 57 atomic_inc(&mm->context.flush_count); in __tlb_flush_mm() 59 cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask); in __tlb_flush_mm() 61 gmap_asce = READ_ONCE(mm->context.gmap_asce); in __tlb_flush_mm() 65 __tlb_flush_idte(mm->context.asce); in __tlb_flush_mm() [all …]
|
| /Linux-v5.15/arch/x86/include/asm/ |
| D | mmu_context.h | 61 static inline void init_new_context_ldt(struct mm_struct *mm) in init_new_context_ldt() argument 63 mm->context.ldt = NULL; in init_new_context_ldt() 64 init_rwsem(&mm->context.ldt_usr_sem); in init_new_context_ldt() 66 int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm); 67 void destroy_context_ldt(struct mm_struct *mm); 68 void ldt_arch_exit_mmap(struct mm_struct *mm); 70 static inline void init_new_context_ldt(struct mm_struct *mm) { } in init_new_context_ldt() argument 72 struct mm_struct *mm) in ldt_dup_context() argument 76 static inline void destroy_context_ldt(struct mm_struct *mm) { } in destroy_context_ldt() argument 77 static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { } in ldt_arch_exit_mmap() argument [all …]
|
| D | pgalloc.h | 6 #include <linux/mm.h> /* for struct page */ 13 static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; } in __paravirt_pgd_alloc() argument 18 #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm) argument 19 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {} in paravirt_pgd_free() argument 20 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pte() argument 21 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pmd() argument 24 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pud() argument 25 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_p4d() argument 52 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); 64 static inline void pmd_populate_kernel(struct mm_struct *mm, in pmd_populate_kernel() argument [all …]
|
| /Linux-v5.15/arch/s390/mm/ |
| D | pgtable.c | 11 #include <linux/mm.h> 47 static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr, in ptep_ipte_local() argument 54 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_local() 58 asce = asce ? : mm->context.asce; in ptep_ipte_local() 67 static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr, in ptep_ipte_global() argument 74 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_global() 78 asce = asce ? : mm->context.asce; in ptep_ipte_global() 87 static inline pte_t ptep_flush_direct(struct mm_struct *mm, in ptep_flush_direct() argument 96 atomic_inc(&mm->context.flush_count); in ptep_flush_direct() 98 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in ptep_flush_direct() [all …]
|
| /Linux-v5.15/arch/m68k/include/asm/ |
| D | mmu_context.h | 28 static inline void get_mmu_context(struct mm_struct *mm) in get_mmu_context() argument 32 if (mm->context != NO_CONTEXT) in get_mmu_context() 45 mm->context = ctx; in get_mmu_context() 46 context_mm[ctx] = mm; in get_mmu_context() 52 #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) argument 58 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument 60 if (mm->context != NO_CONTEXT) { in destroy_context() 61 clear_bit(mm->context, context_map); in destroy_context() 62 mm->context = NO_CONTEXT; in destroy_context() 75 get_mmu_context(tsk->mm); in switch_mm() [all …]
|
| /Linux-v5.15/drivers/iommu/ |
| D | iommu-sva-lib.c | 6 #include <linux/sched/mm.h> 14 * iommu_sva_alloc_pasid - Allocate a PASID for the mm 15 * @mm: the mm 19 * Try to allocate a PASID for this mm, or take a reference to the existing one 21 * available in mm->pasid, and must be released with iommu_sva_free_pasid(). 22 * @min must be greater than 0, because 0 indicates an unused mm->pasid. 26 int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max) in iommu_sva_alloc_pasid() argument 36 if (mm->pasid) { in iommu_sva_alloc_pasid() 37 if (mm->pasid >= min && mm->pasid <= max) in iommu_sva_alloc_pasid() 38 ioasid_get(mm->pasid); in iommu_sva_alloc_pasid() [all …]
|
| /Linux-v5.15/mm/ |
| D | mmu_notifier.c | 3 * linux/mm/mmu_notifier.c 13 #include <linux/mm.h> 19 #include <linux/sched/mm.h> 33 * mm->notifier_subscriptions inside the mm_take_all_locks() protected 38 /* all mmu notifiers registered in this mm are queued in this list */ 54 * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any 57 * Note that the core mm creates nested invalidate_range_start()/end() regions 60 * progress on the mm side. 67 * - mm->active_invalidate_ranges != 0 73 * - mm->active_invalidate_ranges != 0 [all …]
|
| D | debug.c | 3 * mm/debug.c 5 * mm/ specific debug routines. 10 #include <linux/mm.h> 193 "next %px prev %px mm %px\n" in dump_vma() 206 void dump_mm(const struct mm_struct *mm) in dump_mm() argument 208 pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n" in dump_mm() 236 mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, in dump_mm() 238 mm->get_unmapped_area, in dump_mm() 240 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, in dump_mm() 241 mm->pgd, atomic_read(&mm->mm_users), in dump_mm() [all …]
|
| /Linux-v5.15/fs/proc/ |
| D | task_nommu.c | 3 #include <linux/mm.h> 11 #include <linux/sched/mm.h> 21 void task_mem(struct seq_file *m, struct mm_struct *mm) in task_mem() argument 28 mmap_read_lock(mm); in task_mem() 29 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { in task_mem() 42 if (atomic_read(&mm->mm_count) > 1 || in task_mem() 52 if (atomic_read(&mm->mm_count) > 1) in task_mem() 53 sbytes += kobjsize(mm); in task_mem() 55 bytes += kobjsize(mm); in task_mem() 80 mmap_read_unlock(mm); in task_mem() [all …]
|
| /Linux-v5.15/arch/sparc/mm/ |
| D | tlb.c | 2 /* arch/sparc64/mm/tlb.c 9 #include <linux/mm.h> 26 struct mm_struct *mm = tb->mm; in flush_tlb_pending() local 33 if (CTX_VALID(mm->context)) { in flush_tlb_pending() 35 global_flush_tlb_page(mm, tb->vaddrs[0]); in flush_tlb_pending() 38 smp_flush_tlb_pending(tb->mm, tb->tlb_nr, in flush_tlb_pending() 41 __flush_tlb_pending(CTX_HWBITS(tb->mm->context), in flush_tlb_pending() 69 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, in tlb_batch_add_one() argument 81 if (unlikely(nr != 0 && mm != tb->mm)) { in tlb_batch_add_one() 87 flush_tsb_user_page(mm, vaddr, hugepage_shift); in tlb_batch_add_one() [all …]
|
| /Linux-v5.15/arch/powerpc/mm/ |
| D | slice.c | 15 #include <linux/mm.h> 21 #include <linux/sched/mm.h> 86 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, in slice_area_is_free() argument 91 if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr) in slice_area_is_free() 93 vma = find_vma(mm, addr); in slice_area_is_free() 97 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) in slice_low_has_vma() argument 99 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT, in slice_low_has_vma() 103 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) in slice_high_has_vma() argument 114 return !slice_area_is_free(mm, start, end - start); in slice_high_has_vma() 117 static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret, in slice_mask_for_free() argument [all …]
|
| /Linux-v5.15/arch/arm/include/asm/ |
| D | mmu_context.h | 24 void __check_vmalloc_seq(struct mm_struct *mm); 28 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); 32 init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument 34 atomic64_set(&mm->context.id, 0); in init_new_context() 39 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, 42 static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, in a15_erratum_get_cpumask() argument 52 static inline void check_and_switch_context(struct mm_struct *mm, in check_and_switch_context() argument 55 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) in check_and_switch_context() 56 __check_vmalloc_seq(mm); in check_and_switch_context() 62 * running with the old mm. Since we only support UP systems in check_and_switch_context() [all …]
|
| /Linux-v5.15/include/trace/events/ |
| D | mmap_lock.h | 18 TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write), 20 TP_ARGS(mm, memcg_path, write), 23 __field(struct mm_struct *, mm) 29 __entry->mm = mm; 35 "mm=%p memcg_path=%s write=%s\n", 36 __entry->mm, 46 TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write, 49 TP_ARGS(mm, memcg_path, write, success), 52 __field(struct mm_struct *, mm) 59 __entry->mm = mm; [all …]
|
| /Linux-v5.15/include/asm-generic/ |
| D | mmu_context.h | 15 * @mm: the currently active mm context which is becoming lazy 18 * tsk->mm will be NULL 21 static inline void enter_lazy_tlb(struct mm_struct *mm, in enter_lazy_tlb() argument 29 * @tsk: task struct for the mm 30 * @mm: the new mm struct 35 struct mm_struct *mm) in init_new_context() argument 42 * destroy_context - Undo init_new_context when the mm is going away 43 * @mm: old mm struct 46 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument 52 * activate_mm - called after exec switches the current task to a new mm, to switch to it [all …]
|
| /Linux-v5.15/arch/powerpc/mm/book3s64/ |
| D | mmu_context.c | 13 #include <linux/mm.h> 92 static int hash__init_new_context(struct mm_struct *mm) in hash__init_new_context() argument 96 mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context), in hash__init_new_context() 98 if (!mm->context.hash_context) in hash__init_new_context() 108 * initialize context slice details for newly allocated mm's (which will in hash__init_new_context() 115 if (mm->context.id == 0) { in hash__init_new_context() 116 memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context)); in hash__init_new_context() 117 slice_init_new_context_exec(mm); in hash__init_new_context() 119 /* This is fork. Copy hash_context details from current->mm */ in hash__init_new_context() 120 …memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context)… in hash__init_new_context() [all …]
|
| /Linux-v5.15/arch/x86/kernel/ |
| D | ldt.c | 19 #include <linux/mm.h> 42 void load_mm_ldt(struct mm_struct *mm) in load_mm_ldt() argument 47 ldt = READ_ONCE(mm->context.ldt); in load_mm_ldt() 50 * Any change to mm->context.ldt is followed by an IPI to all in load_mm_ldt() 51 * CPUs with the mm active. The LDT will not be freed until in load_mm_ldt() 93 * Load the LDT if either the old or new mm had an LDT. in switch_ldt() 95 * An mm will never go from having an LDT to not having an LDT. Two in switch_ldt() 138 struct mm_struct *mm = __mm; in flush_ldt() local 140 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) in flush_ldt() 143 load_mm_ldt(mm); in flush_ldt() [all …]
|
| /Linux-v5.15/drivers/gpu/drm/selftests/ |
| D | test-drm_mm.c | 54 static bool assert_no_holes(const struct drm_mm *mm) in assert_no_holes() argument 61 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) in assert_no_holes() 68 drm_mm_for_each_node(hole, mm) { in assert_no_holes() 78 static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end) in assert_one_hole() argument 89 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { in assert_one_hole() 92 pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n", in assert_one_hole() 107 static bool assert_continuous(const struct drm_mm *mm, u64 size) in assert_continuous() argument 113 if (!assert_no_holes(mm)) in assert_continuous() 118 drm_mm_for_each_node(node, mm) { in assert_continuous() 137 drm_mm_for_each_node_in_range(check, mm, addr, addr + size) { in assert_continuous() [all …]
|
| /Linux-v5.15/drivers/misc/cxl/ |
| D | fault.c | 8 #include <linux/sched/mm.h> 10 #include <linux/mm.h> 84 static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm, in cxl_fault_segment() argument 90 if (!(rc = copro_calculate_slb(mm, ea, &slb))) { in cxl_fault_segment() 113 struct mm_struct *mm, u64 ea) in cxl_handle_segment_miss() argument 120 if ((rc = cxl_fault_segment(ctx, mm, ea))) in cxl_handle_segment_miss() 131 int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar) in cxl_handle_mm_fault() argument 138 * Add the fault handling cpu to task mm cpumask so that we in cxl_handle_mm_fault() 141 * valid mm for user space addresses. Hence using the if (mm) in cxl_handle_mm_fault() 144 if (mm && !cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { in cxl_handle_mm_fault() [all …]
|