Lines Matching +full:page +full:- +full:offset
1 /* SPDX-License-Identifier: GPL-2.0 */
13 unsigned int offset; member
31 #define sg_dma_address(sg) ((sg)->dma_address)
34 #define sg_dma_len(sg) ((sg)->dma_length)
36 #define sg_dma_len(sg) ((sg)->length)
55 * the page pointer AND encode information about the sg table as well. The two
71 * We overload the LSB of the page pointer to indicate whether it's
73 * Those low bits are there for everyone! (thanks mason :-)
79 return sg->page_link & SG_PAGE_LINK_MASK; in __sg_flags()
84 return (struct scatterlist *)(sg->page_link & ~SG_PAGE_LINK_MASK); in sg_chain_ptr()
98 * sg_assign_page - Assign a given page to an SG entry
100 * @page: The page
103 * Assign page to sg entry. Also see sg_set_page(), the most commonly used
107 static inline void sg_assign_page(struct scatterlist *sg, struct page *page) in sg_assign_page() argument
109 unsigned long page_link = sg->page_link & (SG_CHAIN | SG_END); in sg_assign_page()
113 * must be aligned at a 32-bit boundary as a minimum. in sg_assign_page()
115 BUG_ON((unsigned long)page & SG_PAGE_LINK_MASK); in sg_assign_page()
119 sg->page_link = page_link | (unsigned long) page; in sg_assign_page()
123 * sg_set_page - Set sg entry to point at given page
125 * @page: The page
127 * @offset: Offset into page
130 * Use this function to set an sg entry pointing at a page, never assign
131 * the page directly. We encode sg table information in the lower bits
132 * of the page pointer. See sg_page() for looking up the page belonging
136 static inline void sg_set_page(struct scatterlist *sg, struct page *page, in sg_set_page() argument
137 unsigned int len, unsigned int offset) in sg_set_page() argument
139 sg_assign_page(sg, page); in sg_set_page()
140 sg->offset = offset; in sg_set_page()
141 sg->length = len; in sg_set_page()
144 static inline struct page *sg_page(struct scatterlist *sg) in sg_page()
149 return (struct page *)((sg)->page_link & ~SG_PAGE_LINK_MASK); in sg_page()
153 * sg_set_buf - Set sg entry to point at given data
178 for_each_sg((sgt)->sgl, sg, (sgt)->orig_nents, i)
186 for_each_sg((sgt)->sgl, sg, (sgt)->nents, i)
192 * offset and length are unused for chain entry. Clear them. in __sg_chain()
194 chain_sg->offset = 0; in __sg_chain()
195 chain_sg->length = 0; in __sg_chain()
201 chain_sg->page_link = ((unsigned long) sgl | SG_CHAIN) & ~SG_END; in __sg_chain()
205 * sg_chain - Chain two sglists together
217 __sg_chain(&prv[prv_nents - 1], sgl); in sg_chain()
221 * sg_mark_end - Mark the end of the scatterlist
234 sg->page_link |= SG_END; in sg_mark_end()
235 sg->page_link &= ~SG_CHAIN; in sg_mark_end()
239 * sg_unmark_end - Undo setting the end of the scatterlist
248 sg->page_link &= ~SG_END; in sg_unmark_end()
262 * sg_dma_is_bus address - Return whether a given segment was marked
272 return sg->dma_flags & SG_DMA_BUS_ADDRESS; in sg_is_dma_bus_address()
276 * sg_dma_mark_bus address - Mark the scatterlist entry as a bus address
287 sg->dma_flags |= SG_DMA_BUS_ADDRESS; in sg_dma_mark_bus_address()
291 * sg_unmark_bus_address - Unmark the scatterlist entry as a bus address
299 sg->dma_flags &= ~SG_DMA_BUS_ADDRESS; in sg_dma_unmark_bus_address()
318 * sg_phys - Return physical address of an sg entry
322 * This calls page_to_phys() on the page in this sg entry, and adds the
323 * sg offset. The caller must know that it is legal to call page_to_phys()
324 * on the sg page.
329 return page_to_phys(sg_page(sg)) + sg->offset; in sg_phys()
333 * sg_virt - Return virtual address of an sg entry
337 * This calls page_address() on the page in this sg entry, and adds the
338 * sg offset. The caller must know that the sg page has a valid virtual
344 return page_address(sg_page(sg)) + sg->offset; in sg_virt()
348 * sg_init_marker - Initialize markers in sg table
356 sg_mark_end(&sgl[nents - 1]); in sg_init_marker()
382 struct page **pages, unsigned int n_pages,
383 unsigned int offset, unsigned long size,
386 int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages,
387 unsigned int n_pages, unsigned int offset,
392 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
395 * @pages: Pointer to an array of page pointers
397 * @offset: Offset from start of the first page to the start of a buffer
398 * @size: Number of valid bytes in the buffer (after offset)
404 * may provide an offset at a start and a size of valid data in a buffer
405 * specified by the page array. The returned sg table is released by
412 struct page **pages, in sg_alloc_table_from_pages()
414 unsigned int offset, in sg_alloc_table_from_pages() argument
417 return sg_alloc_table_from_pages_segment(sgt, pages, n_pages, offset, in sg_alloc_table_from_pages()
456 * single page, to avoid a higher order allocation. We could define this
481 * sg page iterator
483 * Iterates over sg entries page-by-page. On each successful iteration, you
484 * can call sg_page_iter_page(@piter) to get the current page.
485 * @piter->sg will point to the sg holding this page and @piter->sg_pgoffset to
486 * the page's page offset within the sg. The iteration will stop either when a
491 struct scatterlist *sg; /* sg holding the page */
492 unsigned int sg_pgoffset; /* page offset within the sg */
501 * sg page iterator for DMA addresses
504 * sg_page_iter_dma_address(@dma_iter) to get the page's DMA
517 * sg_page_iter_page - get the current page held by the page iterator
518 * @piter: page iterator holding the page
520 static inline struct page *sg_page_iter_page(struct sg_page_iter *piter) in sg_page_iter_page()
522 return nth_page(sg_page(piter->sg), piter->sg_pgoffset); in sg_page_iter_page()
526 * sg_page_iter_dma_address - get the dma address of the current page held by
527 * the page iterator.
528 * @dma_iter: page iterator holding the page
533 return sg_dma_address(dma_iter->base.sg) + in sg_page_iter_dma_address()
534 (dma_iter->base.sg_pgoffset << PAGE_SHIFT); in sg_page_iter_dma_address()
538 * for_each_sg_page - iterate over the pages of the given sg list
540 * @piter: page iterator to hold current page, sg, sg_pgoffset
542 * @pgoffset: starting page offset (in pages)
544 * Callers may use sg_page_iter_page() to get each page pointer.
552 * for_each_sg_dma_page - iterate over the pages of the given sg list
554 * @dma_iter: DMA page iterator to hold current page
557 * @pgoffset: starting page offset (in pages)
559 * Callers may use sg_page_iter_dma_address() to get each page's DMA address.
563 for (__sg_page_iter_start(&(dma_iter)->base, sglist, dma_nents, \
568 * for_each_sgtable_page - iterate over all pages in the sg_table object
570 * @piter: page iterator to hold current page
571 * @pgoffset: starting page offset (in pages)
578 for_each_sg_page((sgt)->sgl, piter, (sgt)->orig_nents, pgoffset)
581 * for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object
583 * @dma_iter: DMA page iterator to hold current page
584 * @pgoffset: starting page offset (in pages)
592 for_each_sg_dma_page((sgt)->sgl, dma_iter, (sgt)->nents, pgoffset)
598 * Iterates over sg entries mapping page-by-page. On each successful
599 * iteration, @miter->page points to the mapped page and
600 * @miter->length bytes of data can be accessed at @miter->addr. As
604 * @miter->consumed is set to @miter->length on each iteration. It
617 struct page *page; /* currently mapped page */ member
621 struct sg_page_iter piter; /* page iterator */
624 unsigned int __offset; /* offset within page */
625 unsigned int __remaining; /* remaining bytes on page */
631 bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset);