Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Functions related to generic helpers functions
4 : */
5 : #include <linux/kernel.h>
6 : #include <linux/module.h>
7 : #include <linux/bio.h>
8 : #include <linux/blkdev.h>
9 : #include <linux/scatterlist.h>
10 :
11 : #include "blk.h"
12 :
13 0 : int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
14 : sector_t nr_sects, gfp_t gfp_mask, int flags,
15 : struct bio **biop)
16 : {
17 0 : struct request_queue *q = bdev_get_queue(bdev);
18 0 : struct bio *bio = *biop;
19 : unsigned int op;
20 0 : sector_t bs_mask, part_offset = 0;
21 :
22 0 : if (bdev_read_only(bdev))
23 : return -EPERM;
24 :
25 0 : if (flags & BLKDEV_DISCARD_SECURE) {
26 0 : if (!blk_queue_secure_erase(q))
27 : return -EOPNOTSUPP;
28 : op = REQ_OP_SECURE_ERASE;
29 : } else {
30 0 : if (!blk_queue_discard(q))
31 : return -EOPNOTSUPP;
32 : op = REQ_OP_DISCARD;
33 : }
34 :
35 : /* In case the discard granularity isn't set by buggy device driver */
36 0 : if (WARN_ON_ONCE(!q->limits.discard_granularity)) {
37 : char dev_name[BDEVNAME_SIZE];
38 :
39 0 : bdevname(bdev, dev_name);
40 0 : pr_err_ratelimited("%s: Error: discard_granularity is 0.\n", dev_name);
41 : return -EOPNOTSUPP;
42 : }
43 :
44 0 : bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
45 0 : if ((sector | nr_sects) & bs_mask)
46 : return -EINVAL;
47 :
48 0 : if (!nr_sects)
49 : return -EINVAL;
50 :
51 : /* In case the discard request is in a partition */
52 0 : if (bdev_is_partition(bdev))
53 0 : part_offset = bdev->bd_start_sect;
54 :
55 0 : while (nr_sects) {
56 : sector_t granularity_aligned_lba, req_sects;
57 0 : sector_t sector_mapped = sector + part_offset;
58 :
59 0 : granularity_aligned_lba = round_up(sector_mapped,
60 : q->limits.discard_granularity >> SECTOR_SHIFT);
61 :
62 : /*
63 : * Check whether the discard bio starts at a discard_granularity
64 : * aligned LBA,
65 : * - If no: set (granularity_aligned_lba - sector_mapped) to
66 : * bi_size of the first split bio, then the second bio will
67 : * start at a discard_granularity aligned LBA on the device.
68 : * - If yes: use bio_aligned_discard_max_sectors() as the max
69 : * possible bi_size of the first split bio. Then when this bio
70 : * is split in device drive, the split ones are very probably
71 : * to be aligned to discard_granularity of the device's queue.
72 : */
73 0 : if (granularity_aligned_lba == sector_mapped)
74 0 : req_sects = min_t(sector_t, nr_sects,
75 : bio_aligned_discard_max_sectors(q));
76 : else
77 0 : req_sects = min_t(sector_t, nr_sects,
78 : granularity_aligned_lba - sector_mapped);
79 :
80 0 : WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
81 :
82 0 : bio = blk_next_bio(bio, bdev, 0, op, gfp_mask);
83 0 : bio->bi_iter.bi_sector = sector;
84 0 : bio->bi_iter.bi_size = req_sects << 9;
85 0 : sector += req_sects;
86 0 : nr_sects -= req_sects;
87 :
88 : /*
89 : * We can loop for a long time in here, if someone does
90 : * full device discards (like mkfs). Be nice and allow
91 : * us to schedule out to avoid softlocking if preempt
92 : * is disabled.
93 : */
94 0 : cond_resched();
95 : }
96 :
97 0 : *biop = bio;
98 0 : return 0;
99 : }
100 : EXPORT_SYMBOL(__blkdev_issue_discard);
101 :
102 : /**
103 : * blkdev_issue_discard - queue a discard
104 : * @bdev: blockdev to issue discard for
105 : * @sector: start sector
106 : * @nr_sects: number of sectors to discard
107 : * @gfp_mask: memory allocation flags (for bio_alloc)
108 : * @flags: BLKDEV_DISCARD_* flags to control behaviour
109 : *
110 : * Description:
111 : * Issue a discard request for the sectors in question.
112 : */
113 0 : int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
114 : sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
115 : {
116 0 : struct bio *bio = NULL;
117 : struct blk_plug plug;
118 : int ret;
119 :
120 0 : blk_start_plug(&plug);
121 0 : ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
122 : &bio);
123 0 : if (!ret && bio) {
124 0 : ret = submit_bio_wait(bio);
125 0 : if (ret == -EOPNOTSUPP)
126 0 : ret = 0;
127 0 : bio_put(bio);
128 : }
129 0 : blk_finish_plug(&plug);
130 :
131 0 : return ret;
132 : }
133 : EXPORT_SYMBOL(blkdev_issue_discard);
134 :
135 0 : static int __blkdev_issue_write_zeroes(struct block_device *bdev,
136 : sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
137 : struct bio **biop, unsigned flags)
138 : {
139 0 : struct bio *bio = *biop;
140 : unsigned int max_write_zeroes_sectors;
141 :
142 0 : if (bdev_read_only(bdev))
143 : return -EPERM;
144 :
145 : /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
146 0 : max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
147 :
148 0 : if (max_write_zeroes_sectors == 0)
149 : return -EOPNOTSUPP;
150 :
151 0 : while (nr_sects) {
152 0 : bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask);
153 0 : bio->bi_iter.bi_sector = sector;
154 0 : if (flags & BLKDEV_ZERO_NOUNMAP)
155 0 : bio->bi_opf |= REQ_NOUNMAP;
156 :
157 0 : if (nr_sects > max_write_zeroes_sectors) {
158 0 : bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
159 0 : nr_sects -= max_write_zeroes_sectors;
160 0 : sector += max_write_zeroes_sectors;
161 : } else {
162 0 : bio->bi_iter.bi_size = nr_sects << 9;
163 0 : nr_sects = 0;
164 : }
165 0 : cond_resched();
166 : }
167 :
168 0 : *biop = bio;
169 0 : return 0;
170 : }
171 :
172 : /*
173 : * Convert a number of 512B sectors to a number of pages.
174 : * The result is limited to a number of pages that can fit into a BIO.
175 : * Also make sure that the result is always at least 1 (page) for the cases
176 : * where nr_sects is lower than the number of sectors in a page.
177 : */
178 : static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
179 : {
180 0 : sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
181 :
182 0 : return min(pages, (sector_t)BIO_MAX_VECS);
183 : }
184 :
185 0 : static int __blkdev_issue_zero_pages(struct block_device *bdev,
186 : sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
187 : struct bio **biop)
188 : {
189 0 : struct bio *bio = *biop;
190 0 : int bi_size = 0;
191 : unsigned int sz;
192 :
193 0 : if (bdev_read_only(bdev))
194 : return -EPERM;
195 :
196 0 : while (nr_sects != 0) {
197 0 : bio = blk_next_bio(bio, bdev, __blkdev_sectors_to_bio_pages(nr_sects),
198 : REQ_OP_WRITE, gfp_mask);
199 0 : bio->bi_iter.bi_sector = sector;
200 :
201 0 : while (nr_sects != 0) {
202 0 : sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
203 0 : bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
204 0 : nr_sects -= bi_size >> 9;
205 0 : sector += bi_size >> 9;
206 0 : if (bi_size < sz)
207 : break;
208 : }
209 0 : cond_resched();
210 : }
211 :
212 0 : *biop = bio;
213 0 : return 0;
214 : }
215 :
216 : /**
217 : * __blkdev_issue_zeroout - generate number of zero filed write bios
218 : * @bdev: blockdev to issue
219 : * @sector: start sector
220 : * @nr_sects: number of sectors to write
221 : * @gfp_mask: memory allocation flags (for bio_alloc)
222 : * @biop: pointer to anchor bio
223 : * @flags: controls detailed behavior
224 : *
225 : * Description:
226 : * Zero-fill a block range, either using hardware offload or by explicitly
227 : * writing zeroes to the device.
228 : *
229 : * If a device is using logical block provisioning, the underlying space will
230 : * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
231 : *
232 : * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
233 : * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
234 : */
235 0 : int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
236 : sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
237 : unsigned flags)
238 : {
239 : int ret;
240 : sector_t bs_mask;
241 :
242 0 : bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
243 0 : if ((sector | nr_sects) & bs_mask)
244 : return -EINVAL;
245 :
246 0 : ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
247 : biop, flags);
248 0 : if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
249 : return ret;
250 :
251 0 : return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
252 : biop);
253 : }
254 : EXPORT_SYMBOL(__blkdev_issue_zeroout);
255 :
256 : /**
257 : * blkdev_issue_zeroout - zero-fill a block range
258 : * @bdev: blockdev to write
259 : * @sector: start sector
260 : * @nr_sects: number of sectors to write
261 : * @gfp_mask: memory allocation flags (for bio_alloc)
262 : * @flags: controls detailed behavior
263 : *
264 : * Description:
265 : * Zero-fill a block range, either using hardware offload or by explicitly
266 : * writing zeroes to the device. See __blkdev_issue_zeroout() for the
267 : * valid values for %flags.
268 : */
269 0 : int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
270 : sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
271 : {
272 0 : int ret = 0;
273 : sector_t bs_mask;
274 : struct bio *bio;
275 : struct blk_plug plug;
276 0 : bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
277 :
278 0 : bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
279 0 : if ((sector | nr_sects) & bs_mask)
280 : return -EINVAL;
281 :
282 : retry:
283 0 : bio = NULL;
284 0 : blk_start_plug(&plug);
285 0 : if (try_write_zeroes) {
286 0 : ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
287 : gfp_mask, &bio, flags);
288 0 : } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
289 0 : ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
290 : gfp_mask, &bio);
291 : } else {
292 : /* No zeroing offload support */
293 : ret = -EOPNOTSUPP;
294 : }
295 0 : if (ret == 0 && bio) {
296 0 : ret = submit_bio_wait(bio);
297 0 : bio_put(bio);
298 : }
299 0 : blk_finish_plug(&plug);
300 0 : if (ret && try_write_zeroes) {
301 0 : if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
302 : try_write_zeroes = false;
303 : goto retry;
304 : }
305 0 : if (!bdev_write_zeroes_sectors(bdev)) {
306 : /*
307 : * Zeroing offload support was indicated, but the
308 : * device reported ILLEGAL REQUEST (for some devices
309 : * there is no non-destructive way to verify whether
310 : * WRITE ZEROES is actually supported).
311 : */
312 0 : ret = -EOPNOTSUPP;
313 : }
314 : }
315 :
316 : return ret;
317 : }
318 : EXPORT_SYMBOL(blkdev_issue_zeroout);
|