1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to generic helpers functions
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
10 
11 #include "blk.h"
12 
next_bio(struct bio * bio,unsigned int nr_pages,gfp_t gfp)13 static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
14 		gfp_t gfp)
15 {
16 	struct bio *new = bio_alloc(gfp, nr_pages);
17 
18 	if (bio) {
19 		bio_chain(bio, new);
20 		submit_bio(bio);
21 	}
22 
23 	return new;
24 }
25 
__blkdev_issue_discard(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,int flags,struct bio ** biop)26 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
27 		sector_t nr_sects, gfp_t gfp_mask, int flags,
28 		struct bio **biop)
29 {
30 	struct request_queue *q = bdev_get_queue(bdev);
31 	struct bio *bio = *biop;
32 	unsigned int op;
33 	sector_t bs_mask;
34 
35 	if (!q)
36 		return -ENXIO;
37 
38 	if (bdev_read_only(bdev))
39 		return -EPERM;
40 
41 	if (flags & BLKDEV_DISCARD_SECURE) {
42 		if (!blk_queue_secure_erase(q))
43 			return -EOPNOTSUPP;
44 		op = REQ_OP_SECURE_ERASE;
45 	} else {
46 		if (!blk_queue_discard(q))
47 			return -EOPNOTSUPP;
48 		op = REQ_OP_DISCARD;
49 	}
50 
51 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
52 	if ((sector | nr_sects) & bs_mask)
53 		return -EINVAL;
54 
55 	while (nr_sects) {
56 		unsigned int req_sects = nr_sects;
57 		sector_t end_sect;
58 
59 		if (!req_sects)
60 			goto fail;
61 		if (req_sects > UINT_MAX >> 9)
62 			req_sects = UINT_MAX >> 9;
63 
64 		end_sect = sector + req_sects;
65 
66 		bio = next_bio(bio, 0, gfp_mask);
67 		bio->bi_iter.bi_sector = sector;
68 		bio_set_dev(bio, bdev);
69 		bio_set_op_attrs(bio, op, 0);
70 
71 		bio->bi_iter.bi_size = req_sects << 9;
72 		nr_sects -= req_sects;
73 		sector = end_sect;
74 
75 		/*
76 		 * We can loop for a long time in here, if someone does
77 		 * full device discards (like mkfs). Be nice and allow
78 		 * us to schedule out to avoid softlocking if preempt
79 		 * is disabled.
80 		 */
81 		cond_resched();
82 	}
83 
84 	*biop = bio;
85 	return 0;
86 
87 fail:
88 	if (bio) {
89 		submit_bio_wait(bio);
90 		bio_put(bio);
91 	}
92 	*biop = NULL;
93 	return -EOPNOTSUPP;
94 }
95 EXPORT_SYMBOL(__blkdev_issue_discard);
96 
97 /**
98  * blkdev_issue_discard - queue a discard
99  * @bdev:	blockdev to issue discard for
100  * @sector:	start sector
101  * @nr_sects:	number of sectors to discard
102  * @gfp_mask:	memory allocation flags (for bio_alloc)
103  * @flags:	BLKDEV_DISCARD_* flags to control behaviour
104  *
105  * Description:
106  *    Issue a discard request for the sectors in question.
107  */
blkdev_issue_discard(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,unsigned long flags)108 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
109 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
110 {
111 	struct bio *bio = NULL;
112 	struct blk_plug plug;
113 	int ret;
114 
115 	blk_start_plug(&plug);
116 	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
117 			&bio);
118 	if (!ret && bio) {
119 		ret = submit_bio_wait(bio);
120 		if (ret == -EOPNOTSUPP)
121 			ret = 0;
122 		bio_put(bio);
123 	}
124 	blk_finish_plug(&plug);
125 
126 	return ret;
127 }
128 EXPORT_SYMBOL(blkdev_issue_discard);
129 
130 /**
131  * __blkdev_issue_write_same - generate number of bios with same page
132  * @bdev:	target blockdev
133  * @sector:	start sector
134  * @nr_sects:	number of sectors to write
135  * @gfp_mask:	memory allocation flags (for bio_alloc)
136  * @page:	page containing data to write
137  * @biop:	pointer to anchor bio
138  *
139  * Description:
140  *  Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
141  */
__blkdev_issue_write_same(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct page * page,struct bio ** biop)142 static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
143 		sector_t nr_sects, gfp_t gfp_mask, struct page *page,
144 		struct bio **biop)
145 {
146 	struct request_queue *q = bdev_get_queue(bdev);
147 	unsigned int max_write_same_sectors;
148 	struct bio *bio = *biop;
149 	sector_t bs_mask;
150 
151 	if (!q)
152 		return -ENXIO;
153 
154 	if (bdev_read_only(bdev))
155 		return -EPERM;
156 
157 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
158 	if ((sector | nr_sects) & bs_mask)
159 		return -EINVAL;
160 
161 	if (!bdev_write_same(bdev))
162 		return -EOPNOTSUPP;
163 
164 	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
165 	max_write_same_sectors = UINT_MAX >> 9;
166 
167 	while (nr_sects) {
168 		bio = next_bio(bio, 1, gfp_mask);
169 		bio->bi_iter.bi_sector = sector;
170 		bio_set_dev(bio, bdev);
171 		bio->bi_vcnt = 1;
172 		bio->bi_io_vec->bv_page = page;
173 		bio->bi_io_vec->bv_offset = 0;
174 		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
175 		bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
176 
177 		if (nr_sects > max_write_same_sectors) {
178 			bio->bi_iter.bi_size = max_write_same_sectors << 9;
179 			nr_sects -= max_write_same_sectors;
180 			sector += max_write_same_sectors;
181 		} else {
182 			bio->bi_iter.bi_size = nr_sects << 9;
183 			nr_sects = 0;
184 		}
185 		cond_resched();
186 	}
187 
188 	*biop = bio;
189 	return 0;
190 }
191 
192 /**
193  * blkdev_issue_write_same - queue a write same operation
194  * @bdev:	target blockdev
195  * @sector:	start sector
196  * @nr_sects:	number of sectors to write
197  * @gfp_mask:	memory allocation flags (for bio_alloc)
198  * @page:	page containing data
199  *
200  * Description:
201  *    Issue a write same request for the sectors in question.
202  */
blkdev_issue_write_same(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct page * page)203 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
204 				sector_t nr_sects, gfp_t gfp_mask,
205 				struct page *page)
206 {
207 	struct bio *bio = NULL;
208 	struct blk_plug plug;
209 	int ret;
210 
211 	blk_start_plug(&plug);
212 	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
213 			&bio);
214 	if (ret == 0 && bio) {
215 		ret = submit_bio_wait(bio);
216 		bio_put(bio);
217 	}
218 	blk_finish_plug(&plug);
219 	return ret;
220 }
221 EXPORT_SYMBOL(blkdev_issue_write_same);
222 
__blkdev_issue_write_zeroes(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct bio ** biop,unsigned flags)223 static int __blkdev_issue_write_zeroes(struct block_device *bdev,
224 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
225 		struct bio **biop, unsigned flags)
226 {
227 	struct bio *bio = *biop;
228 	unsigned int max_write_zeroes_sectors;
229 	struct request_queue *q = bdev_get_queue(bdev);
230 
231 	if (!q)
232 		return -ENXIO;
233 
234 	if (bdev_read_only(bdev))
235 		return -EPERM;
236 
237 	/* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
238 	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
239 
240 	if (max_write_zeroes_sectors == 0)
241 		return -EOPNOTSUPP;
242 
243 	while (nr_sects) {
244 		bio = next_bio(bio, 0, gfp_mask);
245 		bio->bi_iter.bi_sector = sector;
246 		bio_set_dev(bio, bdev);
247 		bio->bi_opf = REQ_OP_WRITE_ZEROES;
248 		if (flags & BLKDEV_ZERO_NOUNMAP)
249 			bio->bi_opf |= REQ_NOUNMAP;
250 
251 		if (nr_sects > max_write_zeroes_sectors) {
252 			bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
253 			nr_sects -= max_write_zeroes_sectors;
254 			sector += max_write_zeroes_sectors;
255 		} else {
256 			bio->bi_iter.bi_size = nr_sects << 9;
257 			nr_sects = 0;
258 		}
259 		cond_resched();
260 	}
261 
262 	*biop = bio;
263 	return 0;
264 }
265 
266 /*
267  * Convert a number of 512B sectors to a number of pages.
268  * The result is limited to a number of pages that can fit into a BIO.
269  * Also make sure that the result is always at least 1 (page) for the cases
270  * where nr_sects is lower than the number of sectors in a page.
271  */
__blkdev_sectors_to_bio_pages(sector_t nr_sects)272 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
273 {
274 	sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
275 
276 	return min(pages, (sector_t)BIO_MAX_PAGES);
277 }
278 
__blkdev_issue_zero_pages(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct bio ** biop)279 static int __blkdev_issue_zero_pages(struct block_device *bdev,
280 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
281 		struct bio **biop)
282 {
283 	struct request_queue *q = bdev_get_queue(bdev);
284 	struct bio *bio = *biop;
285 	int bi_size = 0;
286 	unsigned int sz;
287 
288 	if (!q)
289 		return -ENXIO;
290 
291 	if (bdev_read_only(bdev))
292 		return -EPERM;
293 
294 	while (nr_sects != 0) {
295 		bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
296 			       gfp_mask);
297 		bio->bi_iter.bi_sector = sector;
298 		bio_set_dev(bio, bdev);
299 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
300 
301 		while (nr_sects != 0) {
302 			sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
303 			bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
304 			nr_sects -= bi_size >> 9;
305 			sector += bi_size >> 9;
306 			if (bi_size < sz)
307 				break;
308 		}
309 		cond_resched();
310 	}
311 
312 	*biop = bio;
313 	return 0;
314 }
315 
316 /**
317  * __blkdev_issue_zeroout - generate number of zero filed write bios
318  * @bdev:	blockdev to issue
319  * @sector:	start sector
320  * @nr_sects:	number of sectors to write
321  * @gfp_mask:	memory allocation flags (for bio_alloc)
322  * @biop:	pointer to anchor bio
323  * @flags:	controls detailed behavior
324  *
325  * Description:
326  *  Zero-fill a block range, either using hardware offload or by explicitly
327  *  writing zeroes to the device.
328  *
329  *  If a device is using logical block provisioning, the underlying space will
330  *  not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
331  *
332  *  If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
333  *  -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
334  */
__blkdev_issue_zeroout(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct bio ** biop,unsigned flags)335 int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
336 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
337 		unsigned flags)
338 {
339 	int ret;
340 	sector_t bs_mask;
341 
342 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
343 	if ((sector | nr_sects) & bs_mask)
344 		return -EINVAL;
345 
346 	ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
347 			biop, flags);
348 	if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
349 		return ret;
350 
351 	return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
352 					 biop);
353 }
354 EXPORT_SYMBOL(__blkdev_issue_zeroout);
355 
356 /**
357  * blkdev_issue_zeroout - zero-fill a block range
358  * @bdev:	blockdev to write
359  * @sector:	start sector
360  * @nr_sects:	number of sectors to write
361  * @gfp_mask:	memory allocation flags (for bio_alloc)
362  * @flags:	controls detailed behavior
363  *
364  * Description:
365  *  Zero-fill a block range, either using hardware offload or by explicitly
366  *  writing zeroes to the device.  See __blkdev_issue_zeroout() for the
367  *  valid values for %flags.
368  */
blkdev_issue_zeroout(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,unsigned flags)369 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
370 		sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
371 {
372 	int ret = 0;
373 	sector_t bs_mask;
374 	struct bio *bio;
375 	struct blk_plug plug;
376 	bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
377 
378 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
379 	if ((sector | nr_sects) & bs_mask)
380 		return -EINVAL;
381 
382 retry:
383 	bio = NULL;
384 	blk_start_plug(&plug);
385 	if (try_write_zeroes) {
386 		ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
387 						  gfp_mask, &bio, flags);
388 	} else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
389 		ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
390 						gfp_mask, &bio);
391 	} else {
392 		/* No zeroing offload support */
393 		ret = -EOPNOTSUPP;
394 	}
395 	if (ret == 0 && bio) {
396 		ret = submit_bio_wait(bio);
397 		bio_put(bio);
398 	}
399 	blk_finish_plug(&plug);
400 	if (ret && try_write_zeroes) {
401 		if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
402 			try_write_zeroes = false;
403 			goto retry;
404 		}
405 		if (!bdev_write_zeroes_sectors(bdev)) {
406 			/*
407 			 * Zeroing offload support was indicated, but the
408 			 * device reported ILLEGAL REQUEST (for some devices
409 			 * there is no non-destructive way to verify whether
410 			 * WRITE ZEROES is actually supported).
411 			 */
412 			ret = -EOPNOTSUPP;
413 		}
414 	}
415 
416 	return ret;
417 }
418 EXPORT_SYMBOL(blkdev_issue_zeroout);
419