/Linux-v4.19/mm/ |
D | debug.c | 115 void dump_mm(const struct mm_struct *mm) in dump_mm() argument 145 mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, in dump_mm() 147 mm->get_unmapped_area, in dump_mm() 149 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, in dump_mm() 150 mm->pgd, atomic_read(&mm->mm_users), in dump_mm() 151 atomic_read(&mm->mm_count), in dump_mm() 152 mm_pgtables_bytes(mm), in dump_mm() 153 mm->map_count, in dump_mm() 154 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, in dump_mm() 155 mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm, in dump_mm() [all …]
|
D | mmu_notifier.c | 57 void __mmu_notifier_release(struct mm_struct *mm) in __mmu_notifier_release() argument 67 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) in __mmu_notifier_release() 75 mn->ops->release(mn, mm); in __mmu_notifier_release() 77 spin_lock(&mm->mmu_notifier_mm->lock); in __mmu_notifier_release() 78 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { in __mmu_notifier_release() 79 mn = hlist_entry(mm->mmu_notifier_mm->list.first, in __mmu_notifier_release() 90 spin_unlock(&mm->mmu_notifier_mm->lock); in __mmu_notifier_release() 110 int __mmu_notifier_clear_flush_young(struct mm_struct *mm, in __mmu_notifier_clear_flush_young() argument 118 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { in __mmu_notifier_clear_flush_young() 120 young |= mn->ops->clear_flush_young(mn, mm, start, end); in __mmu_notifier_clear_flush_young() [all …]
|
/Linux-v4.19/include/linux/ |
D | mmu_notifier.h | 66 struct mm_struct *mm); 78 struct mm_struct *mm, 88 struct mm_struct *mm, 99 struct mm_struct *mm, 107 struct mm_struct *mm, 160 struct mm_struct *mm, 164 struct mm_struct *mm, 189 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, 209 static inline int mm_has_notifiers(struct mm_struct *mm) in mm_has_notifiers() argument 211 return unlikely(mm->mmu_notifier_mm); in mm_has_notifiers() [all …]
|
/Linux-v4.19/arch/s390/include/asm/ |
D | mmu_context.h | 19 struct mm_struct *mm) in init_new_context() argument 21 spin_lock_init(&mm->context.lock); in init_new_context() 22 INIT_LIST_HEAD(&mm->context.pgtable_list); in init_new_context() 23 INIT_LIST_HEAD(&mm->context.gmap_list); in init_new_context() 24 cpumask_clear(&mm->context.cpu_attach_mask); in init_new_context() 25 atomic_set(&mm->context.flush_count, 0); in init_new_context() 26 mm->context.gmap_asce = 0; in init_new_context() 27 mm->context.flush_mm = 0; in init_new_context() 29 mm->context.alloc_pgste = page_table_allocate_pgste || in init_new_context() 31 (current->mm && current->mm->context.alloc_pgste); in init_new_context() [all …]
|
D | pgalloc.h | 26 struct page *page_table_alloc_pgste(struct mm_struct *mm); 37 static inline unsigned long pgd_entry_type(struct mm_struct *mm) in pgd_entry_type() argument 39 if (mm->context.asce_limit <= _REGION3_SIZE) in pgd_entry_type() 41 if (mm->context.asce_limit <= _REGION2_SIZE) in pgd_entry_type() 43 if (mm->context.asce_limit <= _REGION1_SIZE) in pgd_entry_type() 48 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit); 51 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address) in p4d_alloc_one() argument 53 unsigned long *table = crst_table_alloc(mm); in p4d_alloc_one() 59 #define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d) argument 61 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) in pud_alloc_one() argument [all …]
|
/Linux-v4.19/arch/m68k/include/asm/ |
D | mmu_context.h | 8 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument 32 static inline void get_mmu_context(struct mm_struct *mm) in get_mmu_context() argument 36 if (mm->context != NO_CONTEXT) in get_mmu_context() 49 mm->context = ctx; in get_mmu_context() 50 context_mm[ctx] = mm; in get_mmu_context() 56 #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) argument 61 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument 63 if (mm->context != NO_CONTEXT) { in destroy_context() 64 clear_bit(mm->context, context_map); in destroy_context() 65 mm->context = NO_CONTEXT; in destroy_context() [all …]
|
/Linux-v4.19/arch/powerpc/include/asm/ |
D | mmu_context.h | 17 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 18 extern void destroy_context(struct mm_struct *mm); 23 extern bool mm_iommu_preregistered(struct mm_struct *mm); 24 extern long mm_iommu_get(struct mm_struct *mm, 27 extern long mm_iommu_put(struct mm_struct *mm, 29 extern void mm_iommu_init(struct mm_struct *mm); 30 extern void mm_iommu_cleanup(struct mm_struct *mm); 31 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, 34 struct mm_struct *mm, unsigned long ua, unsigned long size); 35 extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, [all …]
|
D | tlb.h | 37 extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, 45 flush_hash_entry(tlb->mm, ptep, address); in __tlb_remove_tlb_entry() 66 static inline int mm_is_core_local(struct mm_struct *mm) in mm_is_core_local() argument 68 return cpumask_subset(mm_cpumask(mm), in mm_is_core_local() 73 static inline int mm_is_thread_local(struct mm_struct *mm) in mm_is_thread_local() argument 75 if (atomic_read(&mm->context.active_cpus) > 1) in mm_is_thread_local() 77 return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)); in mm_is_thread_local() 79 static inline void mm_reset_thread_local(struct mm_struct *mm) in mm_reset_thread_local() argument 81 WARN_ON(atomic_read(&mm->context.copros) > 0); in mm_reset_thread_local() 87 WARN_ON(current->mm != mm); in mm_reset_thread_local() [all …]
|
/Linux-v4.19/arch/x86/include/asm/ |
D | mmu_context.h | 30 static inline void load_mm_cr4(struct mm_struct *mm) in load_mm_cr4() argument 33 atomic_read(&mm->context.perf_rdpmc_allowed)) in load_mm_cr4() 39 static inline void load_mm_cr4(struct mm_struct *mm) {} in load_mm_cr4() argument 80 static inline void init_new_context_ldt(struct mm_struct *mm) in init_new_context_ldt() argument 82 mm->context.ldt = NULL; in init_new_context_ldt() 83 init_rwsem(&mm->context.ldt_usr_sem); in init_new_context_ldt() 85 int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm); 86 void destroy_context_ldt(struct mm_struct *mm); 87 void ldt_arch_exit_mmap(struct mm_struct *mm); 89 static inline void init_new_context_ldt(struct mm_struct *mm) { } in init_new_context_ldt() argument [all …]
|
D | pgalloc.h | 9 static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; } in __paravirt_pgd_alloc() argument 14 #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm) argument 15 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {} in paravirt_pgd_free() argument 16 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pte() argument 17 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pmd() argument 20 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pud() argument 21 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_p4d() argument 48 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); 56 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument 62 static inline void pte_free(struct mm_struct *mm, struct page *pte) in pte_free() argument [all …]
|
D | pkeys.h | 21 extern int __execute_only_pkey(struct mm_struct *mm); 22 static inline int execute_only_pkey(struct mm_struct *mm) in execute_only_pkey() argument 27 return __execute_only_pkey(mm); in execute_only_pkey() 46 #define mm_pkey_allocation_map(mm) (mm->context.pkey_allocation_map) argument 47 #define mm_set_pkey_allocated(mm, pkey) do { \ argument 48 mm_pkey_allocation_map(mm) |= (1U << pkey); \ 50 #define mm_set_pkey_free(mm, pkey) do { \ argument 51 mm_pkey_allocation_map(mm) &= ~(1U << pkey); \ 55 bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) in mm_pkey_is_allocated() argument 71 if (pkey == mm->context.execute_only_pkey) in mm_pkey_is_allocated() [all …]
|
/Linux-v4.19/arch/x86/mm/ |
D | mpx.c | 26 static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm) in mpx_bd_size_bytes() argument 28 if (is_64bit_mm(mm)) in mpx_bd_size_bytes() 34 static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm) in mpx_bt_size_bytes() argument 36 if (is_64bit_mm(mm)) in mpx_bt_size_bytes() 48 struct mm_struct *mm = current->mm; in mpx_mmap() local 52 if (len != mpx_bt_size_bytes(mm)) in mpx_mmap() 55 down_write(&mm->mmap_sem); in mpx_mmap() 58 up_write(&mm->mmap_sem); in mpx_mmap() 227 struct mm_struct *mm = current->mm; in mpx_enable_management() local 242 down_write(&mm->mmap_sem); in mpx_enable_management() [all …]
|
/Linux-v4.19/arch/sparc/include/asm/ |
D | mmu_context_64.h | 19 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument 28 void get_new_mmu_context(struct mm_struct *mm); 29 int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 30 void destroy_context(struct mm_struct *mm); 38 static inline void tsb_context_switch_ctx(struct mm_struct *mm, in tsb_context_switch_ctx() argument 41 __tsb_context_switch(__pa(mm->pgd), in tsb_context_switch_ctx() 42 &mm->context.tsb_block[MM_TSB_BASE], in tsb_context_switch_ctx() 44 (mm->context.tsb_block[MM_TSB_HUGE].tsb ? in tsb_context_switch_ctx() 45 &mm->context.tsb_block[MM_TSB_HUGE] : in tsb_context_switch_ctx() 50 , __pa(&mm->context.tsb_descr[MM_TSB_BASE]), in tsb_context_switch_ctx() [all …]
|
/Linux-v4.19/arch/s390/mm/ |
D | pgtable.c | 29 static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr, in ptep_ipte_local() argument 36 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_local() 40 asce = asce ? : mm->context.asce; in ptep_ipte_local() 49 static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr, in ptep_ipte_global() argument 56 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_global() 60 asce = asce ? : mm->context.asce; in ptep_ipte_global() 69 static inline pte_t ptep_flush_direct(struct mm_struct *mm, in ptep_flush_direct() argument 78 atomic_inc(&mm->context.flush_count); in ptep_flush_direct() 80 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in ptep_flush_direct() 81 ptep_ipte_local(mm, addr, ptep, nodat); in ptep_flush_direct() [all …]
|
/Linux-v4.19/arch/arm/include/asm/ |
D | mmu_context.h | 27 void __check_vmalloc_seq(struct mm_struct *mm); 31 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); 33 init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument 35 atomic64_set(&mm->context.id, 0); in init_new_context() 40 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, 43 static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, in a15_erratum_get_cpumask() argument 53 static inline void check_and_switch_context(struct mm_struct *mm, in check_and_switch_context() argument 56 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) in check_and_switch_context() 57 __check_vmalloc_seq(mm); in check_and_switch_context() 67 mm->context.switch_pending = 1; in check_and_switch_context() [all …]
|
/Linux-v4.19/arch/powerpc/mm/ |
D | slice.c | 90 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, in slice_area_is_free() argument 95 if ((mm->context.slb_addr_limit - len) < addr) in slice_area_is_free() 97 vma = find_vma(mm, addr); in slice_area_is_free() 101 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) in slice_low_has_vma() argument 103 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT, in slice_low_has_vma() 107 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) in slice_high_has_vma() argument 120 return !slice_area_is_free(mm, start, end - start); in slice_high_has_vma() 123 static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret, in slice_mask_for_free() argument 133 if (!slice_low_has_vma(mm, i)) in slice_mask_for_free() 140 if (!slice_high_has_vma(mm, i)) in slice_mask_for_free() [all …]
|
D | mmu_context_book3s64.c | 56 static int hash__init_new_context(struct mm_struct *mm) in hash__init_new_context() argument 78 if (mm->context.id == 0) in hash__init_new_context() 79 slice_init_new_context_exec(mm); in hash__init_new_context() 81 subpage_prot_init_new_context(mm); in hash__init_new_context() 83 pkey_mm_init(mm); in hash__init_new_context() 87 static int radix__init_new_context(struct mm_struct *mm) in radix__init_new_context() argument 101 process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE); in radix__init_new_context() 111 mm->context.npu_context = NULL; in radix__init_new_context() 116 int init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument 121 index = radix__init_new_context(mm); in init_new_context() [all …]
|
/Linux-v4.19/drivers/gpu/drm/i915/ |
D | i915_gem_userptr.c | 37 struct mm_struct *mm; member 72 mutex_lock(&obj->mm.lock); in cancel_userptr() 74 mutex_unlock(&obj->mm.lock); in cancel_userptr() 88 atomic_read(&obj->mm.pages_pin_count), in cancel_userptr() 116 struct mm_struct *mm, in i915_gem_userptr_mn_invalidate_range_start() argument 171 i915_mmu_notifier_create(struct mm_struct *mm) in i915_mmu_notifier_create() argument 211 i915_mmu_notifier_find(struct i915_mm_struct *mm) in i915_mmu_notifier_find() argument 216 mn = mm->mn; in i915_mmu_notifier_find() 220 mn = i915_mmu_notifier_create(mm->mm); in i915_mmu_notifier_find() 224 down_write(&mm->mm->mmap_sem); in i915_mmu_notifier_find() [all …]
|
/Linux-v4.19/arch/sparc/mm/ |
D | tlb.c | 27 struct mm_struct *mm = tb->mm; in flush_tlb_pending() local 34 if (CTX_VALID(mm->context)) { in flush_tlb_pending() 36 global_flush_tlb_page(mm, tb->vaddrs[0]); in flush_tlb_pending() 39 smp_flush_tlb_pending(tb->mm, tb->tlb_nr, in flush_tlb_pending() 42 __flush_tlb_pending(CTX_HWBITS(tb->mm->context), in flush_tlb_pending() 70 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, in tlb_batch_add_one() argument 82 if (unlikely(nr != 0 && mm != tb->mm)) { in tlb_batch_add_one() 88 flush_tsb_user_page(mm, vaddr, hugepage_shift); in tlb_batch_add_one() 89 global_flush_tlb_page(mm, vaddr); in tlb_batch_add_one() 94 tb->mm = mm; in tlb_batch_add_one() [all …]
|
D | tsb.c | 121 struct mm_struct *mm = tb->mm; in flush_tsb_user() local 124 spin_lock_irqsave(&mm->context.lock, flags); in flush_tsb_user() 127 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; in flush_tsb_user() 128 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; in flush_tsb_user() 140 else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { in flush_tsb_user() 141 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; in flush_tsb_user() 142 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; in flush_tsb_user() 149 spin_unlock_irqrestore(&mm->context.lock, flags); in flush_tsb_user() 152 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, in flush_tsb_user_page() argument 157 spin_lock_irqsave(&mm->context.lock, flags); in flush_tsb_user_page() [all …]
|
/Linux-v4.19/drivers/gpu/drm/ |
D | drm_mm.c | 124 static void show_leaks(struct drm_mm *mm) in show_leaks() argument 134 list_for_each_entry(node, drm_mm_nodes(mm), node_list) { in show_leaks() 159 static void show_leaks(struct drm_mm *mm) { } in show_leaks() argument 170 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) in INTERVAL_TREE_DEFINE() 172 return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree, in INTERVAL_TREE_DEFINE() 173 start, last) ?: (struct drm_mm_node *)&mm->head_node; in INTERVAL_TREE_DEFINE() 180 struct drm_mm *mm = hole_node->mm; in drm_mm_interval_tree_add_node() local 203 link = &mm->interval_tree.rb_root.rb_node; in drm_mm_interval_tree_add_node() 221 rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost, in drm_mm_interval_tree_add_node() 270 struct drm_mm *mm = node->mm; in add_hole() local [all …]
|
/Linux-v4.19/arch/um/kernel/skas/ |
D | mmu.c | 18 static int init_stub_pte(struct mm_struct *mm, unsigned long proc, in init_stub_pte() argument 26 pgd = pgd_offset(mm, proc); in init_stub_pte() 27 pud = pud_alloc(mm, pgd, proc); in init_stub_pte() 31 pmd = pmd_alloc(mm, pud, proc); in init_stub_pte() 35 pte = pte_alloc_map(mm, pmd, proc); in init_stub_pte() 44 pmd_free(mm, pmd); in init_stub_pte() 46 pud_free(mm, pud); in init_stub_pte() 51 int init_new_context(struct task_struct *task, struct mm_struct *mm) in init_new_context() argument 54 struct mm_context *to_mm = &mm->context; in init_new_context() 63 if (current->mm != NULL && current->mm != &init_mm) in init_new_context() [all …]
|
/Linux-v4.19/arch/x86/kernel/ |
D | ldt.c | 54 struct mm_struct *mm = __mm; in flush_ldt() local 56 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) in flush_ldt() 59 load_mm_ldt(mm); in flush_ldt() 105 static void do_sanity_check(struct mm_struct *mm, in do_sanity_check() argument 109 if (mm->context.ldt) { in do_sanity_check() 150 static void map_ldt_struct_to_user(struct mm_struct *mm) in map_ldt_struct_to_user() argument 152 pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR); in map_ldt_struct_to_user() 159 if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt) in map_ldt_struct_to_user() 163 static void sanity_check_ldt_mapping(struct mm_struct *mm) in sanity_check_ldt_mapping() argument 165 pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR); in sanity_check_ldt_mapping() [all …]
|
/Linux-v4.19/fs/proc/ |
D | task_nommu.c | 21 void task_mem(struct seq_file *m, struct mm_struct *mm) in task_mem() argument 28 down_read(&mm->mmap_sem); in task_mem() 29 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { in task_mem() 42 if (atomic_read(&mm->mm_count) > 1 || in task_mem() 52 if (atomic_read(&mm->mm_count) > 1) in task_mem() 53 sbytes += kobjsize(mm); in task_mem() 55 bytes += kobjsize(mm); in task_mem() 80 up_read(&mm->mmap_sem); in task_mem() 83 unsigned long task_vsize(struct mm_struct *mm) in task_vsize() argument 89 down_read(&mm->mmap_sem); in task_vsize() [all …]
|
/Linux-v4.19/drivers/gpu/drm/selftests/ |
D | test-drm_mm.c | 52 static bool assert_no_holes(const struct drm_mm *mm) in assert_no_holes() argument 59 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) in assert_no_holes() 66 drm_mm_for_each_node(hole, mm) { in assert_no_holes() 76 static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end) in assert_one_hole() argument 87 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { in assert_one_hole() 105 static bool assert_continuous(const struct drm_mm *mm, u64 size) in assert_continuous() argument 111 if (!assert_no_holes(mm)) in assert_continuous() 116 drm_mm_for_each_node(node, mm) { in assert_continuous() 135 drm_mm_for_each_node_in_range(check, mm, addr, addr + size) { in assert_continuous() 167 static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm, in assert_node() argument [all …]
|