Lines Matching refs:vm_area_struct
158 struct vm_area_struct *vm_area_alloc(struct mm_struct *);
159 struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
160 void vm_area_free(struct vm_area_struct *);
344 struct vm_area_struct *vma; /* Target VMA */
394 void (*open)(struct vm_area_struct * area);
395 void (*close)(struct vm_area_struct * area);
396 int (*split)(struct vm_area_struct * area, unsigned long addr);
397 int (*mremap)(struct vm_area_struct * area);
403 unsigned long (*pagesize)(struct vm_area_struct * area);
415 int (*access)(struct vm_area_struct *vma, unsigned long addr,
421 const char *(*name)(struct vm_area_struct *vma);
431 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
443 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
451 struct page *(*find_special_page)(struct vm_area_struct *vma,
455 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) in vma_init()
465 static inline void vma_set_anonymous(struct vm_area_struct *vma) in vma_set_anonymous()
724 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) in maybe_mkwrite()
1323 struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1327 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1330 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1332 void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1334 void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1376 struct vm_area_struct *vma;
1382 int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
1386 struct vm_area_struct *vma);
1390 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1392 int follow_phys(struct vm_area_struct *vma, unsigned long address,
1394 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1406 extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1416 static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, in handle_mm_fault()
1453 struct vm_area_struct **vmas, int *locked);
1456 struct vm_area_struct **vmas);
1464 struct vm_area_struct **vmas);
1468 struct page **pages, struct vm_area_struct **vmas) in get_user_pages_longterm()
1550 static inline bool vma_is_anonymous(struct vm_area_struct *vma) in vma_is_anonymous()
1560 bool vma_is_shmem(struct vm_area_struct *vma);
1562 static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } in vma_is_shmem()
1565 int vma_is_stack_for_current(struct vm_area_struct *vma);
1567 extern unsigned long move_page_tables(struct vm_area_struct *vma,
1568 unsigned long old_addr, struct vm_area_struct *new_vma,
1571 extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1574 extern int mprotect_fixup(struct vm_area_struct *vma,
1575 struct vm_area_struct **pprev, unsigned long start,
1691 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
2184 void vma_interval_tree_insert(struct vm_area_struct *node,
2186 void vma_interval_tree_insert_after(struct vm_area_struct *node,
2187 struct vm_area_struct *prev,
2189 void vma_interval_tree_remove(struct vm_area_struct *node,
2191 struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
2193 struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
2219 extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
2220 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
2221 struct vm_area_struct *expand);
2222 static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start, in vma_adjust()
2223 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert) in vma_adjust()
2227 extern struct vm_area_struct *vma_merge(struct mm_struct *,
2228 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
2231 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
2232 extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
2234 extern int split_vma(struct mm_struct *, struct vm_area_struct *,
2236 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
2237 extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
2239 extern void unlink_file_vma(struct vm_area_struct *);
2240 extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
2269 extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
2271 extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
2390 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2393 extern int expand_downwards(struct vm_area_struct *vma,
2396 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2402 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2403 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2404 struct vm_area_struct **pprev);
2408 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long st… in find_vma_intersection()
2410 struct vm_area_struct * vma = find_vma(mm,start_addr); in find_vma_intersection()
2417 static inline unsigned long vm_start_gap(struct vm_area_struct *vma) in vm_start_gap()
2429 static inline unsigned long vm_end_gap(struct vm_area_struct *vma) in vm_end_gap()
2441 static inline unsigned long vma_pages(struct vm_area_struct *vma) in vma_pages()
2447 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, in find_exact_vma()
2450 struct vm_area_struct *vma = find_vma(mm, vm_start); in find_exact_vma()
2458 static inline bool range_in_vma(struct vm_area_struct *vma, in range_in_vma()
2466 void vma_set_page_prot(struct vm_area_struct *vma);
2472 static inline void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot()
2479 unsigned long change_prot_numa(struct vm_area_struct *vma,
2483 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2484 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2486 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2487 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2489 int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2491 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2493 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2495 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2497 static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, in vmf_insert_page()
2510 static inline vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, in vmf_insert_mixed()
2523 static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, in vmf_insert_pfn()
2543 struct page *follow_page_mask(struct vm_area_struct *vma,
2547 static inline struct page *follow_page(struct vm_area_struct *vma, in follow_page()
2631 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2635 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm) in get_gate_vma()
2663 const char * arch_vma_name(struct vm_area_struct *vma);
2750 struct vm_area_struct *vma,