Lines Matching +full:page +full:- +full:offset
1 /* SPDX-License-Identifier: GPL-2.0 */
14 #include "highmem-internal.h"
17 * kmap - Map a page for long term usage
18 * @page: Pointer to the page to be mapped
37 static inline void *kmap(struct page *page);
40 * kunmap - Unmap the virtual address mapped by kmap()
41 * @page: Pointer to the page which was mapped by kmap()
46 static inline void kunmap(struct page *page);
49 * kmap_to_page - Get the page for a kmap'ed address
52 * Returns: The page which is mapped to @addr.
54 static inline struct page *kmap_to_page(void *addr);
57 * kmap_flush_unused - Flush all unused kmap mappings in order to
63 * kmap_local_page - Map a page for temporary usage
64 * @page: Pointer to the page to be mapped
92 * On HIGHMEM enabled systems mapping a highmem page has the side effect of
96 static inline void *kmap_local_page(struct page *page);
99 * kmap_local_folio - Map a page in this folio for temporary usage
100 * @folio: The folio containing the page.
101 * @offset: The byte offset within the folio which identifies the page.
126 * On HIGHMEM enabled systems mapping a highmem page has the side effect of
131 * Return: The virtual address of @offset.
133 static inline void *kmap_local_folio(struct folio *folio, size_t offset);
136 * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
137 * @page: Pointer to the page to be mapped
150 * page that might be allocated from high memory (see __GFP_HIGHMEM), for
151 * example a page in the pagecache. The API has two functions, and they
154 * // Find the page of interest.
155 * struct page *page = find_get_page(mapping, offset);
157 * // Gain access to the contents of that page.
158 * void *vaddr = kmap_atomic(page);
160 * // Do something to the contents of that page.
163 * // Unmap that page.
169 * If you need to map two pages because you want to copy from one page to
180 static inline void *kmap_atomic(struct page *page);
187 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma… in flush_anon_page() argument
203 static inline void clear_user_highpage(struct page *page, unsigned long vaddr) in clear_user_highpage() argument
205 void *addr = kmap_local_page(page); in clear_user_highpage()
206 clear_user_page(addr, vaddr, page); in clear_user_highpage()
213 …* alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller kn…
214 * @vma: The VMA the page is to be allocated for
215 * @vaddr: The virtual address the page will be inserted into
217 * Returns: The allocated and zeroed HIGHMEM page
219 * This function will allocate a page for a VMA that the caller knows will
226 static inline struct page *
230 struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); in alloc_zeroed_user_highpage_movable() local
232 if (page) in alloc_zeroed_user_highpage_movable()
233 clear_user_highpage(page, vaddr); in alloc_zeroed_user_highpage_movable()
235 return page; in alloc_zeroed_user_highpage_movable()
239 static inline void clear_highpage(struct page *page) in clear_highpage() argument
241 void *kaddr = kmap_local_page(page); in clear_highpage()
246 static inline void clear_highpage_kasan_tagged(struct page *page) in clear_highpage_kasan_tagged() argument
250 tag = page_kasan_tag(page); in clear_highpage_kasan_tagged()
251 page_kasan_tag_reset(page); in clear_highpage_kasan_tagged()
252 clear_highpage(page); in clear_highpage_kasan_tagged()
253 page_kasan_tag_set(page, tag); in clear_highpage_kasan_tagged()
258 static inline void tag_clear_highpage(struct page *page) in tag_clear_highpage() argument
265 * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
266 * If we pass in a head page, we can zero up to the size of the compound page.
269 void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
272 static inline void zero_user_segments(struct page *page, in zero_user_segments() argument
276 void *kaddr = kmap_local_page(page); in zero_user_segments()
279 BUG_ON(end1 > page_size(page) || end2 > page_size(page)); in zero_user_segments()
282 memset(kaddr + start1, 0, end1 - start1); in zero_user_segments()
285 memset(kaddr + start2, 0, end2 - start2); in zero_user_segments()
288 for (i = 0; i < compound_nr(page); i++) in zero_user_segments()
289 flush_dcache_page(page + i); in zero_user_segments()
293 static inline void zero_user_segment(struct page *page, in zero_user_segment() argument
296 zero_user_segments(page, start, end, 0, 0); in zero_user_segment()
299 static inline void zero_user(struct page *page, in zero_user() argument
302 zero_user_segments(page, start, start + size, 0, 0); in zero_user()
307 static inline void copy_user_highpage(struct page *to, struct page *from, in copy_user_highpage()
324 static inline void copy_highpage(struct page *to, struct page *from) in copy_highpage()
338 static inline void memcpy_page(struct page *dst_page, size_t dst_off, in memcpy_page()
339 struct page *src_page, size_t src_off, in memcpy_page()
351 static inline void memset_page(struct page *page, size_t offset, int val, in memset_page() argument
354 char *addr = kmap_local_page(page); in memset_page()
356 VM_BUG_ON(offset + len > PAGE_SIZE); in memset_page()
357 memset(addr + offset, val, len); in memset_page()
361 static inline void memcpy_from_page(char *to, struct page *page, in memcpy_from_page() argument
362 size_t offset, size_t len) in memcpy_from_page() argument
364 char *from = kmap_local_page(page); in memcpy_from_page()
366 VM_BUG_ON(offset + len > PAGE_SIZE); in memcpy_from_page()
367 memcpy(to, from + offset, len); in memcpy_from_page()
371 static inline void memcpy_to_page(struct page *page, size_t offset, in memcpy_to_page() argument
374 char *to = kmap_local_page(page); in memcpy_to_page()
376 VM_BUG_ON(offset + len > PAGE_SIZE); in memcpy_to_page()
377 memcpy(to + offset, from, len); in memcpy_to_page()
378 flush_dcache_page(page); in memcpy_to_page()
382 static inline void memzero_page(struct page *page, size_t offset, size_t len) in memzero_page() argument
384 char *addr = kmap_local_page(page); in memzero_page()
386 VM_BUG_ON(offset + len > PAGE_SIZE); in memzero_page()
387 memset(addr + offset, 0, len); in memzero_page()
388 flush_dcache_page(page); in memzero_page()
393 * folio_zero_segments() - Zero two byte ranges in a folio.
403 zero_user_segments(&folio->page, start1, xend1, start2, xend2); in folio_zero_segments()
407 * folio_zero_segment() - Zero a byte range in a folio.
415 zero_user_segments(&folio->page, start, xend, 0, 0); in folio_zero_segment()
419 * folio_zero_range() - Zero a byte range in a folio.
427 zero_user_segments(&folio->page, start, start + length, 0, 0); in folio_zero_range()