Lines Matching +full:pre +full:- +full:filled
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Network filesystem high-level buffered read support.
20 pgoff_t start_page = rreq->start / PAGE_SIZE; in netfs_rreq_unlock_folios()
21 pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1; in netfs_rreq_unlock_folios()
25 XA_STATE(xas, &rreq->mapping->i_pages, start_page); in netfs_rreq_unlock_folios()
27 if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) { in netfs_rreq_unlock_folios()
28 __clear_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags); in netfs_rreq_unlock_folios()
29 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { in netfs_rreq_unlock_folios()
30 __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); in netfs_rreq_unlock_folios()
40 subreq = list_first_entry(&rreq->subrequests, in netfs_rreq_unlock_folios()
42 subreq_failed = (subreq->error < 0); in netfs_rreq_unlock_folios()
54 pg_end = folio_pos(folio) + folio_size(folio) - 1; in netfs_rreq_unlock_folios()
63 if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) in netfs_rreq_unlock_folios()
66 sreq_end = subreq->start + subreq->len - 1; in netfs_rreq_unlock_folios()
70 account += subreq->transferred; in netfs_rreq_unlock_folios()
71 if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) { in netfs_rreq_unlock_folios()
73 subreq_failed = (subreq->error < 0); in netfs_rreq_unlock_folios()
88 if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) { in netfs_rreq_unlock_folios()
89 if (folio_index(folio) == rreq->no_unlock_folio && in netfs_rreq_unlock_folios()
90 test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) in netfs_rreq_unlock_folios()
99 if (rreq->netfs_ops->done) in netfs_rreq_unlock_folios()
100 rreq->netfs_ops->done(rreq); in netfs_rreq_unlock_folios()
106 struct netfs_cache_resources *cres = &rreq->cache_resources; in netfs_cache_expand_readahead()
108 if (cres->ops && cres->ops->expand_readahead) in netfs_cache_expand_readahead()
109 cres->ops->expand_readahead(cres, _start, _len, i_size); in netfs_cache_expand_readahead()
118 netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size); in netfs_rreq_expand()
123 if (rreq->netfs_ops->expand_readahead) in netfs_rreq_expand()
124 rreq->netfs_ops->expand_readahead(rreq); in netfs_rreq_expand()
135 if (rreq->start != readahead_pos(ractl) || in netfs_rreq_expand()
136 rreq->len != readahead_length(ractl)) { in netfs_rreq_expand()
137 readahead_expand(ractl, rreq->start, rreq->len); in netfs_rreq_expand()
138 rreq->start = readahead_pos(ractl); in netfs_rreq_expand()
139 rreq->len = readahead_length(ractl); in netfs_rreq_expand()
147 * netfs_readahead - Helper to manage a read request
151 * the netfs if not. Space beyond the EOF is zero-filled. Multiple I/O
164 struct netfs_inode *ctx = netfs_inode(ractl->mapping->host); in netfs_readahead()
172 rreq = netfs_alloc_request(ractl->mapping, ractl->file, in netfs_readahead()
179 if (ctx->ops->begin_cache_operation) { in netfs_readahead()
180 ret = ctx->ops->begin_cache_operation(rreq); in netfs_readahead()
181 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) in netfs_readahead()
207 * netfs_read_folio - Helper to manage a read_folio request
212 * possible, or the netfs if not. Space beyond the EOF is zero-filled.
224 struct netfs_inode *ctx = netfs_inode(mapping->host); in netfs_read_folio()
237 if (ctx->ops->begin_cache_operation) { in netfs_read_folio()
238 ret = ctx->ops->begin_cache_operation(rreq); in netfs_read_folio()
239 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) in netfs_read_folio()
244 trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage); in netfs_read_folio()
260 * @always_fill: T if the folio should always be completely filled/cleared
263 * - full folio write
264 * - write that lies in a folio that is completely beyond EOF
265 * - write that covers the folio from start to EOF or beyond it
279 if (pos - offset + len <= i_size) in netfs_skip_folio_read()
281 zero_user_segment(&folio->page, 0, plen); in netfs_skip_folio_read()
291 if (pos - offset >= i_size) in netfs_skip_folio_read()
300 zero_user_segments(&folio->page, 0, offset, offset + len, plen); in netfs_skip_folio_read()
305 * netfs_write_begin - Helper to prepare for writing
314 * Pre-read data for a write-begin request by drawing data from the cache if
315 * possible, or the netfs if not. Space beyond the EOF is zero-filled.
330 * will cause the folio to be re-got and the process to be retried.
354 return -ENOMEM; in netfs_write_begin()
356 if (ctx->ops->check_write_begin) { in netfs_write_begin()
358 ret = ctx->ops->check_write_begin(file, pos, len, &folio, _fsdata); in netfs_write_begin()
370 /* If the page is beyond the EOF, we want to clear it - unless it's in netfs_write_begin()
387 rreq->no_unlock_folio = folio_index(folio); in netfs_write_begin()
388 __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); in netfs_write_begin()
390 if (ctx->ops->begin_cache_operation) { in netfs_write_begin()
391 ret = ctx->ops->begin_cache_operation(rreq); in netfs_write_begin()
392 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) in netfs_write_begin()