Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Functions related to setting various queue properties from drivers
4 : */
5 : #include <linux/kernel.h>
6 : #include <linux/module.h>
7 : #include <linux/init.h>
8 : #include <linux/bio.h>
9 : #include <linux/blkdev.h>
10 : #include <linux/pagemap.h>
11 : #include <linux/backing-dev-defs.h>
12 : #include <linux/gcd.h>
13 : #include <linux/lcm.h>
14 : #include <linux/jiffies.h>
15 : #include <linux/gfp.h>
16 : #include <linux/dma-mapping.h>
17 :
18 : #include "blk.h"
19 : #include "blk-wbt.h"
20 :
21 0 : void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
22 : {
23 0 : q->rq_timeout = timeout;
24 0 : }
25 : EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
26 :
27 : /**
28 : * blk_set_default_limits - reset limits to default values
29 : * @lim: the queue_limits structure to reset
30 : *
31 : * Description:
32 : * Returns a queue_limit struct to its default state.
33 : */
34 0 : void blk_set_default_limits(struct queue_limits *lim)
35 : {
36 0 : lim->max_segments = BLK_MAX_SEGMENTS;
37 0 : lim->max_discard_segments = 1;
38 0 : lim->max_integrity_segments = 0;
39 0 : lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
40 0 : lim->virt_boundary_mask = 0;
41 0 : lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
42 0 : lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
43 0 : lim->max_dev_sectors = 0;
44 0 : lim->chunk_sectors = 0;
45 0 : lim->max_write_zeroes_sectors = 0;
46 0 : lim->max_zone_append_sectors = 0;
47 0 : lim->max_discard_sectors = 0;
48 0 : lim->max_hw_discard_sectors = 0;
49 0 : lim->discard_granularity = 0;
50 0 : lim->discard_alignment = 0;
51 0 : lim->discard_misaligned = 0;
52 0 : lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
53 0 : lim->bounce = BLK_BOUNCE_NONE;
54 0 : lim->alignment_offset = 0;
55 0 : lim->io_opt = 0;
56 0 : lim->misaligned = 0;
57 0 : lim->zoned = BLK_ZONED_NONE;
58 0 : lim->zone_write_granularity = 0;
59 0 : }
60 : EXPORT_SYMBOL(blk_set_default_limits);
61 :
62 : /**
63 : * blk_set_stacking_limits - set default limits for stacking devices
64 : * @lim: the queue_limits structure to reset
65 : *
66 : * Description:
67 : * Returns a queue_limit struct to its default state. Should be used
68 : * by stacking drivers like DM that have no internal limits.
69 : */
70 0 : void blk_set_stacking_limits(struct queue_limits *lim)
71 : {
72 0 : blk_set_default_limits(lim);
73 :
74 : /* Inherit limits from component devices */
75 0 : lim->max_segments = USHRT_MAX;
76 0 : lim->max_discard_segments = USHRT_MAX;
77 0 : lim->max_hw_sectors = UINT_MAX;
78 0 : lim->max_segment_size = UINT_MAX;
79 0 : lim->max_sectors = UINT_MAX;
80 0 : lim->max_dev_sectors = UINT_MAX;
81 0 : lim->max_write_zeroes_sectors = UINT_MAX;
82 0 : lim->max_zone_append_sectors = UINT_MAX;
83 0 : }
84 : EXPORT_SYMBOL(blk_set_stacking_limits);
85 :
86 : /**
87 : * blk_queue_bounce_limit - set bounce buffer limit for queue
88 : * @q: the request queue for the device
89 : * @bounce: bounce limit to enforce
90 : *
91 : * Description:
92 : * Force bouncing for ISA DMA ranges or highmem.
93 : *
94 : * DEPRECATED, don't use in new code.
95 : **/
96 0 : void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
97 : {
98 0 : q->limits.bounce = bounce;
99 0 : }
100 : EXPORT_SYMBOL(blk_queue_bounce_limit);
101 :
102 : /**
103 : * blk_queue_max_hw_sectors - set max sectors for a request for this queue
104 : * @q: the request queue for the device
105 : * @max_hw_sectors: max hardware sectors in the usual 512b unit
106 : *
107 : * Description:
108 : * Enables a low level driver to set a hard upper limit,
109 : * max_hw_sectors, on the size of requests. max_hw_sectors is set by
110 : * the device driver based upon the capabilities of the I/O
111 : * controller.
112 : *
113 : * max_dev_sectors is a hard limit imposed by the storage device for
114 : * READ/WRITE requests. It is set by the disk driver.
115 : *
116 : * max_sectors is a soft limit imposed by the block layer for
117 : * filesystem type requests. This value can be overridden on a
118 : * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
119 : * The soft limit can not exceed max_hw_sectors.
120 : **/
121 0 : void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
122 : {
123 0 : struct queue_limits *limits = &q->limits;
124 : unsigned int max_sectors;
125 :
126 0 : if ((max_hw_sectors << 9) < PAGE_SIZE) {
127 0 : max_hw_sectors = 1 << (PAGE_SHIFT - 9);
128 0 : printk(KERN_INFO "%s: set to minimum %d\n",
129 : __func__, max_hw_sectors);
130 : }
131 :
132 0 : max_hw_sectors = round_down(max_hw_sectors,
133 : limits->logical_block_size >> SECTOR_SHIFT);
134 0 : limits->max_hw_sectors = max_hw_sectors;
135 :
136 0 : max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
137 0 : max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
138 0 : max_sectors = round_down(max_sectors,
139 : limits->logical_block_size >> SECTOR_SHIFT);
140 0 : limits->max_sectors = max_sectors;
141 :
142 0 : if (!q->disk)
143 : return;
144 0 : q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
145 : }
146 : EXPORT_SYMBOL(blk_queue_max_hw_sectors);
147 :
148 : /**
149 : * blk_queue_chunk_sectors - set size of the chunk for this queue
150 : * @q: the request queue for the device
151 : * @chunk_sectors: chunk sectors in the usual 512b unit
152 : *
153 : * Description:
154 : * If a driver doesn't want IOs to cross a given chunk size, it can set
155 : * this limit and prevent merging across chunks. Note that the block layer
156 : * must accept a page worth of data at any offset. So if the crossing of
157 : * chunks is a hard limitation in the driver, it must still be prepared
158 : * to split single page bios.
159 : **/
160 0 : void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
161 : {
162 0 : q->limits.chunk_sectors = chunk_sectors;
163 0 : }
164 : EXPORT_SYMBOL(blk_queue_chunk_sectors);
165 :
166 : /**
167 : * blk_queue_max_discard_sectors - set max sectors for a single discard
168 : * @q: the request queue for the device
169 : * @max_discard_sectors: maximum number of sectors to discard
170 : **/
171 0 : void blk_queue_max_discard_sectors(struct request_queue *q,
172 : unsigned int max_discard_sectors)
173 : {
174 0 : q->limits.max_hw_discard_sectors = max_discard_sectors;
175 0 : q->limits.max_discard_sectors = max_discard_sectors;
176 0 : }
177 : EXPORT_SYMBOL(blk_queue_max_discard_sectors);
178 :
179 : /**
180 : * blk_queue_max_write_zeroes_sectors - set max sectors for a single
181 : * write zeroes
182 : * @q: the request queue for the device
183 : * @max_write_zeroes_sectors: maximum number of sectors to write per command
184 : **/
185 0 : void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
186 : unsigned int max_write_zeroes_sectors)
187 : {
188 0 : q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
189 0 : }
190 : EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
191 :
192 : /**
193 : * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
194 : * @q: the request queue for the device
195 : * @max_zone_append_sectors: maximum number of sectors to write per command
196 : **/
197 0 : void blk_queue_max_zone_append_sectors(struct request_queue *q,
198 : unsigned int max_zone_append_sectors)
199 : {
200 : unsigned int max_sectors;
201 :
202 0 : if (WARN_ON(!blk_queue_is_zoned(q)))
203 : return;
204 :
205 : max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors);
206 : max_sectors = min(q->limits.chunk_sectors, max_sectors);
207 :
208 : /*
209 : * Signal eventual driver bugs resulting in the max_zone_append sectors limit
210 : * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set,
211 : * or the max_hw_sectors limit not set.
212 : */
213 : WARN_ON(!max_sectors);
214 :
215 : q->limits.max_zone_append_sectors = max_sectors;
216 : }
217 : EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
218 :
219 : /**
220 : * blk_queue_max_segments - set max hw segments for a request for this queue
221 : * @q: the request queue for the device
222 : * @max_segments: max number of segments
223 : *
224 : * Description:
225 : * Enables a low level driver to set an upper limit on the number of
226 : * hw data segments in a request.
227 : **/
228 0 : void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
229 : {
230 0 : if (!max_segments) {
231 0 : max_segments = 1;
232 0 : printk(KERN_INFO "%s: set to minimum %d\n",
233 : __func__, max_segments);
234 : }
235 :
236 0 : q->limits.max_segments = max_segments;
237 0 : }
238 : EXPORT_SYMBOL(blk_queue_max_segments);
239 :
240 : /**
241 : * blk_queue_max_discard_segments - set max segments for discard requests
242 : * @q: the request queue for the device
243 : * @max_segments: max number of segments
244 : *
245 : * Description:
246 : * Enables a low level driver to set an upper limit on the number of
247 : * segments in a discard request.
248 : **/
249 0 : void blk_queue_max_discard_segments(struct request_queue *q,
250 : unsigned short max_segments)
251 : {
252 0 : q->limits.max_discard_segments = max_segments;
253 0 : }
254 : EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
255 :
256 : /**
257 : * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
258 : * @q: the request queue for the device
259 : * @max_size: max size of segment in bytes
260 : *
261 : * Description:
262 : * Enables a low level driver to set an upper limit on the size of a
263 : * coalesced segment
264 : **/
265 0 : void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
266 : {
267 0 : if (max_size < PAGE_SIZE) {
268 0 : max_size = PAGE_SIZE;
269 0 : printk(KERN_INFO "%s: set to minimum %d\n",
270 : __func__, max_size);
271 : }
272 :
273 : /* see blk_queue_virt_boundary() for the explanation */
274 0 : WARN_ON_ONCE(q->limits.virt_boundary_mask);
275 :
276 0 : q->limits.max_segment_size = max_size;
277 0 : }
278 : EXPORT_SYMBOL(blk_queue_max_segment_size);
279 :
280 : /**
281 : * blk_queue_logical_block_size - set logical block size for the queue
282 : * @q: the request queue for the device
283 : * @size: the logical block size, in bytes
284 : *
285 : * Description:
286 : * This should be set to the lowest possible block size that the
287 : * storage device can address. The default of 512 covers most
288 : * hardware.
289 : **/
290 0 : void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
291 : {
292 0 : struct queue_limits *limits = &q->limits;
293 :
294 0 : limits->logical_block_size = size;
295 :
296 0 : if (limits->physical_block_size < size)
297 0 : limits->physical_block_size = size;
298 :
299 0 : if (limits->io_min < limits->physical_block_size)
300 0 : limits->io_min = limits->physical_block_size;
301 :
302 0 : limits->max_hw_sectors =
303 0 : round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT);
304 0 : limits->max_sectors =
305 0 : round_down(limits->max_sectors, size >> SECTOR_SHIFT);
306 0 : }
307 : EXPORT_SYMBOL(blk_queue_logical_block_size);
308 :
309 : /**
310 : * blk_queue_physical_block_size - set physical block size for the queue
311 : * @q: the request queue for the device
312 : * @size: the physical block size, in bytes
313 : *
314 : * Description:
315 : * This should be set to the lowest possible sector size that the
316 : * hardware can operate on without reverting to read-modify-write
317 : * operations.
318 : */
319 0 : void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
320 : {
321 0 : q->limits.physical_block_size = size;
322 :
323 0 : if (q->limits.physical_block_size < q->limits.logical_block_size)
324 0 : q->limits.physical_block_size = q->limits.logical_block_size;
325 :
326 0 : if (q->limits.io_min < q->limits.physical_block_size)
327 0 : q->limits.io_min = q->limits.physical_block_size;
328 0 : }
329 : EXPORT_SYMBOL(blk_queue_physical_block_size);
330 :
331 : /**
332 : * blk_queue_zone_write_granularity - set zone write granularity for the queue
333 : * @q: the request queue for the zoned device
334 : * @size: the zone write granularity size, in bytes
335 : *
336 : * Description:
337 : * This should be set to the lowest possible size allowing to write in
338 : * sequential zones of a zoned block device.
339 : */
340 0 : void blk_queue_zone_write_granularity(struct request_queue *q,
341 : unsigned int size)
342 : {
343 0 : if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
344 : return;
345 :
346 : q->limits.zone_write_granularity = size;
347 :
348 : if (q->limits.zone_write_granularity < q->limits.logical_block_size)
349 : q->limits.zone_write_granularity = q->limits.logical_block_size;
350 : }
351 : EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity);
352 :
353 : /**
354 : * blk_queue_alignment_offset - set physical block alignment offset
355 : * @q: the request queue for the device
356 : * @offset: alignment offset in bytes
357 : *
358 : * Description:
359 : * Some devices are naturally misaligned to compensate for things like
360 : * the legacy DOS partition table 63-sector offset. Low-level drivers
361 : * should call this function for devices whose first sector is not
362 : * naturally aligned.
363 : */
364 0 : void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
365 : {
366 0 : q->limits.alignment_offset =
367 0 : offset & (q->limits.physical_block_size - 1);
368 0 : q->limits.misaligned = 0;
369 0 : }
370 : EXPORT_SYMBOL(blk_queue_alignment_offset);
371 :
372 0 : void disk_update_readahead(struct gendisk *disk)
373 : {
374 0 : struct request_queue *q = disk->queue;
375 :
376 : /*
377 : * For read-ahead of large files to be effective, we need to read ahead
378 : * at least twice the optimal I/O size.
379 : */
380 0 : disk->bdi->ra_pages =
381 0 : max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
382 0 : disk->bdi->io_pages = queue_max_sectors(q) >> (PAGE_SHIFT - 9);
383 0 : }
384 : EXPORT_SYMBOL_GPL(disk_update_readahead);
385 :
386 : /**
387 : * blk_limits_io_min - set minimum request size for a device
388 : * @limits: the queue limits
389 : * @min: smallest I/O size in bytes
390 : *
391 : * Description:
392 : * Some devices have an internal block size bigger than the reported
393 : * hardware sector size. This function can be used to signal the
394 : * smallest I/O the device can perform without incurring a performance
395 : * penalty.
396 : */
397 0 : void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
398 : {
399 0 : limits->io_min = min;
400 :
401 0 : if (limits->io_min < limits->logical_block_size)
402 0 : limits->io_min = limits->logical_block_size;
403 :
404 0 : if (limits->io_min < limits->physical_block_size)
405 0 : limits->io_min = limits->physical_block_size;
406 0 : }
407 : EXPORT_SYMBOL(blk_limits_io_min);
408 :
409 : /**
410 : * blk_queue_io_min - set minimum request size for the queue
411 : * @q: the request queue for the device
412 : * @min: smallest I/O size in bytes
413 : *
414 : * Description:
415 : * Storage devices may report a granularity or preferred minimum I/O
416 : * size which is the smallest request the device can perform without
417 : * incurring a performance penalty. For disk drives this is often the
418 : * physical block size. For RAID arrays it is often the stripe chunk
419 : * size. A properly aligned multiple of minimum_io_size is the
420 : * preferred request size for workloads where a high number of I/O
421 : * operations is desired.
422 : */
423 0 : void blk_queue_io_min(struct request_queue *q, unsigned int min)
424 : {
425 0 : blk_limits_io_min(&q->limits, min);
426 0 : }
427 : EXPORT_SYMBOL(blk_queue_io_min);
428 :
429 : /**
430 : * blk_limits_io_opt - set optimal request size for a device
431 : * @limits: the queue limits
432 : * @opt: smallest I/O size in bytes
433 : *
434 : * Description:
435 : * Storage devices may report an optimal I/O size, which is the
436 : * device's preferred unit for sustained I/O. This is rarely reported
437 : * for disk drives. For RAID arrays it is usually the stripe width or
438 : * the internal track size. A properly aligned multiple of
439 : * optimal_io_size is the preferred request size for workloads where
440 : * sustained throughput is desired.
441 : */
442 0 : void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
443 : {
444 0 : limits->io_opt = opt;
445 0 : }
446 : EXPORT_SYMBOL(blk_limits_io_opt);
447 :
448 : /**
449 : * blk_queue_io_opt - set optimal request size for the queue
450 : * @q: the request queue for the device
451 : * @opt: optimal request size in bytes
452 : *
453 : * Description:
454 : * Storage devices may report an optimal I/O size, which is the
455 : * device's preferred unit for sustained I/O. This is rarely reported
456 : * for disk drives. For RAID arrays it is usually the stripe width or
457 : * the internal track size. A properly aligned multiple of
458 : * optimal_io_size is the preferred request size for workloads where
459 : * sustained throughput is desired.
460 : */
461 0 : void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
462 : {
463 0 : blk_limits_io_opt(&q->limits, opt);
464 0 : if (!q->disk)
465 : return;
466 0 : q->disk->bdi->ra_pages =
467 0 : max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
468 : }
469 : EXPORT_SYMBOL(blk_queue_io_opt);
470 :
471 : static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
472 : {
473 0 : sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
474 0 : if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
475 0 : sectors = PAGE_SIZE >> SECTOR_SHIFT;
476 : return sectors;
477 : }
478 :
479 : /**
480 : * blk_stack_limits - adjust queue_limits for stacked devices
481 : * @t: the stacking driver limits (top device)
482 : * @b: the underlying queue limits (bottom, component device)
483 : * @start: first data sector within component device
484 : *
485 : * Description:
486 : * This function is used by stacking drivers like MD and DM to ensure
487 : * that all component devices have compatible block sizes and
488 : * alignments. The stacking driver must provide a queue_limits
489 : * struct (top) and then iteratively call the stacking function for
490 : * all component (bottom) devices. The stacking function will
491 : * attempt to combine the values and ensure proper alignment.
492 : *
493 : * Returns 0 if the top and bottom queue_limits are compatible. The
494 : * top device's block sizes and alignment offsets may be adjusted to
495 : * ensure alignment with the bottom device. If no compatible sizes
496 : * and alignments exist, -1 is returned and the resulting top
497 : * queue_limits will have the misaligned flag set to indicate that
498 : * the alignment_offset is undefined.
499 : */
500 0 : int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
501 : sector_t start)
502 : {
503 0 : unsigned int top, bottom, alignment, ret = 0;
504 :
505 0 : t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
506 0 : t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
507 0 : t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
508 0 : t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
509 : b->max_write_zeroes_sectors);
510 0 : t->max_zone_append_sectors = min(t->max_zone_append_sectors,
511 : b->max_zone_append_sectors);
512 0 : t->bounce = max(t->bounce, b->bounce);
513 :
514 0 : t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
515 : b->seg_boundary_mask);
516 0 : t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
517 : b->virt_boundary_mask);
518 :
519 0 : t->max_segments = min_not_zero(t->max_segments, b->max_segments);
520 0 : t->max_discard_segments = min_not_zero(t->max_discard_segments,
521 : b->max_discard_segments);
522 0 : t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
523 : b->max_integrity_segments);
524 :
525 0 : t->max_segment_size = min_not_zero(t->max_segment_size,
526 : b->max_segment_size);
527 :
528 0 : t->misaligned |= b->misaligned;
529 :
530 0 : alignment = queue_limit_alignment_offset(b, start);
531 :
532 : /* Bottom device has different alignment. Check that it is
533 : * compatible with the current top alignment.
534 : */
535 0 : if (t->alignment_offset != alignment) {
536 :
537 0 : top = max(t->physical_block_size, t->io_min)
538 : + t->alignment_offset;
539 0 : bottom = max(b->physical_block_size, b->io_min) + alignment;
540 :
541 : /* Verify that top and bottom intervals line up */
542 0 : if (max(top, bottom) % min(top, bottom)) {
543 0 : t->misaligned = 1;
544 0 : ret = -1;
545 : }
546 : }
547 :
548 0 : t->logical_block_size = max(t->logical_block_size,
549 : b->logical_block_size);
550 :
551 0 : t->physical_block_size = max(t->physical_block_size,
552 : b->physical_block_size);
553 :
554 0 : t->io_min = max(t->io_min, b->io_min);
555 0 : t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
556 :
557 : /* Set non-power-of-2 compatible chunk_sectors boundary */
558 0 : if (b->chunk_sectors)
559 0 : t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
560 :
561 : /* Physical block size a multiple of the logical block size? */
562 0 : if (t->physical_block_size & (t->logical_block_size - 1)) {
563 0 : t->physical_block_size = t->logical_block_size;
564 0 : t->misaligned = 1;
565 0 : ret = -1;
566 : }
567 :
568 : /* Minimum I/O a multiple of the physical block size? */
569 0 : if (t->io_min & (t->physical_block_size - 1)) {
570 0 : t->io_min = t->physical_block_size;
571 0 : t->misaligned = 1;
572 0 : ret = -1;
573 : }
574 :
575 : /* Optimal I/O a multiple of the physical block size? */
576 0 : if (t->io_opt & (t->physical_block_size - 1)) {
577 0 : t->io_opt = 0;
578 0 : t->misaligned = 1;
579 0 : ret = -1;
580 : }
581 :
582 : /* chunk_sectors a multiple of the physical block size? */
583 0 : if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
584 0 : t->chunk_sectors = 0;
585 0 : t->misaligned = 1;
586 0 : ret = -1;
587 : }
588 :
589 0 : t->raid_partial_stripes_expensive =
590 0 : max(t->raid_partial_stripes_expensive,
591 : b->raid_partial_stripes_expensive);
592 :
593 : /* Find lowest common alignment_offset */
594 0 : t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
595 0 : % max(t->physical_block_size, t->io_min);
596 :
597 : /* Verify that new alignment_offset is on a logical block boundary */
598 0 : if (t->alignment_offset & (t->logical_block_size - 1)) {
599 0 : t->misaligned = 1;
600 0 : ret = -1;
601 : }
602 :
603 0 : t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
604 0 : t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
605 0 : t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
606 :
607 : /* Discard alignment and granularity */
608 0 : if (b->discard_granularity) {
609 0 : alignment = queue_limit_discard_alignment(b, start);
610 :
611 0 : if (t->discard_granularity != 0 &&
612 0 : t->discard_alignment != alignment) {
613 0 : top = t->discard_granularity + t->discard_alignment;
614 0 : bottom = b->discard_granularity + alignment;
615 :
616 : /* Verify that top and bottom intervals line up */
617 0 : if ((max(top, bottom) % min(top, bottom)) != 0)
618 0 : t->discard_misaligned = 1;
619 : }
620 :
621 0 : t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
622 : b->max_discard_sectors);
623 0 : t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
624 : b->max_hw_discard_sectors);
625 0 : t->discard_granularity = max(t->discard_granularity,
626 : b->discard_granularity);
627 0 : t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
628 0 : t->discard_granularity;
629 : }
630 :
631 0 : t->zone_write_granularity = max(t->zone_write_granularity,
632 : b->zone_write_granularity);
633 0 : t->zoned = max(t->zoned, b->zoned);
634 0 : return ret;
635 : }
636 : EXPORT_SYMBOL(blk_stack_limits);
637 :
638 : /**
639 : * disk_stack_limits - adjust queue limits for stacked drivers
640 : * @disk: MD/DM gendisk (top)
641 : * @bdev: the underlying block device (bottom)
642 : * @offset: offset to beginning of data within component device
643 : *
644 : * Description:
645 : * Merges the limits for a top level gendisk and a bottom level
646 : * block_device.
647 : */
648 0 : void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
649 : sector_t offset)
650 : {
651 0 : struct request_queue *t = disk->queue;
652 :
653 0 : if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits,
654 0 : get_start_sect(bdev) + (offset >> 9)) < 0)
655 0 : pr_notice("%s: Warning: Device %pg is misaligned\n",
656 : disk->disk_name, bdev);
657 :
658 0 : disk_update_readahead(disk);
659 0 : }
660 : EXPORT_SYMBOL(disk_stack_limits);
661 :
662 : /**
663 : * blk_queue_update_dma_pad - update pad mask
664 : * @q: the request queue for the device
665 : * @mask: pad mask
666 : *
667 : * Update dma pad mask.
668 : *
669 : * Appending pad buffer to a request modifies the last entry of a
670 : * scatter list such that it includes the pad buffer.
671 : **/
672 0 : void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
673 : {
674 0 : if (mask > q->dma_pad_mask)
675 0 : q->dma_pad_mask = mask;
676 0 : }
677 : EXPORT_SYMBOL(blk_queue_update_dma_pad);
678 :
679 : /**
680 : * blk_queue_segment_boundary - set boundary rules for segment merging
681 : * @q: the request queue for the device
682 : * @mask: the memory boundary mask
683 : **/
684 0 : void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
685 : {
686 0 : if (mask < PAGE_SIZE - 1) {
687 0 : mask = PAGE_SIZE - 1;
688 0 : printk(KERN_INFO "%s: set to minimum %lx\n",
689 : __func__, mask);
690 : }
691 :
692 0 : q->limits.seg_boundary_mask = mask;
693 0 : }
694 : EXPORT_SYMBOL(blk_queue_segment_boundary);
695 :
696 : /**
697 : * blk_queue_virt_boundary - set boundary rules for bio merging
698 : * @q: the request queue for the device
699 : * @mask: the memory boundary mask
700 : **/
701 0 : void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
702 : {
703 0 : q->limits.virt_boundary_mask = mask;
704 :
705 : /*
706 : * Devices that require a virtual boundary do not support scatter/gather
707 : * I/O natively, but instead require a descriptor list entry for each
708 : * page (which might not be idential to the Linux PAGE_SIZE). Because
709 : * of that they are not limited by our notion of "segment size".
710 : */
711 0 : if (mask)
712 0 : q->limits.max_segment_size = UINT_MAX;
713 0 : }
714 : EXPORT_SYMBOL(blk_queue_virt_boundary);
715 :
716 : /**
717 : * blk_queue_dma_alignment - set dma length and memory alignment
718 : * @q: the request queue for the device
719 : * @mask: alignment mask
720 : *
721 : * description:
722 : * set required memory and length alignment for direct dma transactions.
723 : * this is used when building direct io requests for the queue.
724 : *
725 : **/
726 0 : void blk_queue_dma_alignment(struct request_queue *q, int mask)
727 : {
728 0 : q->dma_alignment = mask;
729 0 : }
730 : EXPORT_SYMBOL(blk_queue_dma_alignment);
731 :
732 : /**
733 : * blk_queue_update_dma_alignment - update dma length and memory alignment
734 : * @q: the request queue for the device
735 : * @mask: alignment mask
736 : *
737 : * description:
738 : * update required memory and length alignment for direct dma transactions.
739 : * If the requested alignment is larger than the current alignment, then
740 : * the current queue alignment is updated to the new value, otherwise it
741 : * is left alone. The design of this is to allow multiple objects
742 : * (driver, device, transport etc) to set their respective
743 : * alignments without having them interfere.
744 : *
745 : **/
746 0 : void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
747 : {
748 0 : BUG_ON(mask > PAGE_SIZE);
749 :
750 0 : if (mask > q->dma_alignment)
751 0 : q->dma_alignment = mask;
752 0 : }
753 : EXPORT_SYMBOL(blk_queue_update_dma_alignment);
754 :
755 : /**
756 : * blk_set_queue_depth - tell the block layer about the device queue depth
757 : * @q: the request queue for the device
758 : * @depth: queue depth
759 : *
760 : */
761 0 : void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
762 : {
763 0 : q->queue_depth = depth;
764 0 : rq_qos_queue_depth_changed(q);
765 0 : }
766 : EXPORT_SYMBOL(blk_set_queue_depth);
767 :
768 : /**
769 : * blk_queue_write_cache - configure queue's write cache
770 : * @q: the request queue for the device
771 : * @wc: write back cache on or off
772 : * @fua: device supports FUA writes, if true
773 : *
774 : * Tell the block layer about the write cache of @q.
775 : */
776 0 : void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
777 : {
778 0 : if (wc)
779 0 : blk_queue_flag_set(QUEUE_FLAG_WC, q);
780 : else
781 0 : blk_queue_flag_clear(QUEUE_FLAG_WC, q);
782 0 : if (fua)
783 0 : blk_queue_flag_set(QUEUE_FLAG_FUA, q);
784 : else
785 0 : blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
786 :
787 0 : wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
788 0 : }
789 : EXPORT_SYMBOL_GPL(blk_queue_write_cache);
790 :
791 : /**
792 : * blk_queue_required_elevator_features - Set a queue required elevator features
793 : * @q: the request queue for the target device
794 : * @features: Required elevator features OR'ed together
795 : *
796 : * Tell the block layer that for the device controlled through @q, only the
797 : * only elevators that can be used are those that implement at least the set of
798 : * features specified by @features.
799 : */
800 0 : void blk_queue_required_elevator_features(struct request_queue *q,
801 : unsigned int features)
802 : {
803 0 : q->required_elevator_features = features;
804 0 : }
805 : EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
806 :
807 : /**
808 : * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
809 : * @q: the request queue for the device
810 : * @dev: the device pointer for dma
811 : *
812 : * Tell the block layer about merging the segments by dma map of @q.
813 : */
814 0 : bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
815 : struct device *dev)
816 : {
817 0 : unsigned long boundary = dma_get_merge_boundary(dev);
818 :
819 0 : if (!boundary)
820 : return false;
821 :
822 : /* No need to update max_segment_size. see blk_queue_virt_boundary() */
823 0 : blk_queue_virt_boundary(q, boundary);
824 :
825 0 : return true;
826 : }
827 : EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
828 :
829 : static bool disk_has_partitions(struct gendisk *disk)
830 : {
831 : unsigned long idx;
832 : struct block_device *part;
833 : bool ret = false;
834 :
835 : rcu_read_lock();
836 : xa_for_each(&disk->part_tbl, idx, part) {
837 : if (bdev_is_partition(part)) {
838 : ret = true;
839 : break;
840 : }
841 : }
842 : rcu_read_unlock();
843 :
844 : return ret;
845 : }
846 :
847 : /**
848 : * blk_queue_set_zoned - configure a disk queue zoned model.
849 : * @disk: the gendisk of the queue to configure
850 : * @model: the zoned model to set
851 : *
852 : * Set the zoned model of the request queue of @disk according to @model.
853 : * When @model is BLK_ZONED_HM (host managed), this should be called only
854 : * if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option).
855 : * If @model specifies BLK_ZONED_HA (host aware), the effective model used
856 : * depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions
857 : * on the disk.
858 : */
859 0 : void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
860 : {
861 0 : struct request_queue *q = disk->queue;
862 :
863 0 : switch (model) {
864 : case BLK_ZONED_HM:
865 : /*
866 : * Host managed devices are supported only if
867 : * CONFIG_BLK_DEV_ZONED is enabled.
868 : */
869 0 : WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
870 : break;
871 : case BLK_ZONED_HA:
872 : /*
873 : * Host aware devices can be treated either as regular block
874 : * devices (similar to drive managed devices) or as zoned block
875 : * devices to take advantage of the zone command set, similarly
876 : * to host managed devices. We try the latter if there are no
877 : * partitions and zoned block device support is enabled, else
878 : * we do nothing special as far as the block layer is concerned.
879 : */
880 : if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
881 : disk_has_partitions(disk))
882 : model = BLK_ZONED_NONE;
883 : break;
884 : case BLK_ZONED_NONE:
885 : default:
886 0 : if (WARN_ON_ONCE(model != BLK_ZONED_NONE))
887 0 : model = BLK_ZONED_NONE;
888 : break;
889 : }
890 :
891 0 : q->limits.zoned = model;
892 0 : if (model != BLK_ZONED_NONE) {
893 : /*
894 : * Set the zone write granularity to the device logical block
895 : * size by default. The driver can change this value if needed.
896 : */
897 0 : blk_queue_zone_write_granularity(q,
898 : queue_logical_block_size(q));
899 : } else {
900 : blk_queue_clear_zone_settings(q);
901 : }
902 0 : }
903 : EXPORT_SYMBOL_GPL(blk_queue_set_zoned);
|