1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions related to setting various queue properties from drivers
4 */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/pagemap.h>
11 #include <linux/backing-dev-defs.h>
12 #include <linux/gcd.h>
13 #include <linux/lcm.h>
14 #include <linux/jiffies.h>
15 #include <linux/gfp.h>
16 #include <linux/dma-mapping.h>
17
18 #include "blk.h"
19 #include "blk-rq-qos.h"
20 #include "blk-wbt.h"
21
blk_queue_rq_timeout(struct request_queue * q,unsigned int timeout)22 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
23 {
24 q->rq_timeout = timeout;
25 }
26 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
27
28 /**
29 * blk_set_stacking_limits - set default limits for stacking devices
30 * @lim: the queue_limits structure to reset
31 *
32 * Prepare queue limits for applying limits from underlying devices using
33 * blk_stack_limits().
34 */
blk_set_stacking_limits(struct queue_limits * lim)35 void blk_set_stacking_limits(struct queue_limits *lim)
36 {
37 memset(lim, 0, sizeof(*lim));
38 lim->logical_block_size = SECTOR_SIZE;
39 lim->physical_block_size = SECTOR_SIZE;
40 lim->io_min = SECTOR_SIZE;
41 lim->discard_granularity = SECTOR_SIZE;
42 lim->dma_alignment = SECTOR_SIZE - 1;
43 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
44
45 /* Inherit limits from component devices */
46 lim->max_segments = USHRT_MAX;
47 lim->max_discard_segments = USHRT_MAX;
48 lim->max_hw_sectors = UINT_MAX;
49 lim->max_segment_size = UINT_MAX;
50 lim->max_sectors = UINT_MAX;
51 lim->max_dev_sectors = UINT_MAX;
52 lim->max_write_zeroes_sectors = UINT_MAX;
53 lim->max_zone_append_sectors = UINT_MAX;
54 lim->max_user_discard_sectors = UINT_MAX;
55 }
56 EXPORT_SYMBOL(blk_set_stacking_limits);
57
blk_apply_bdi_limits(struct backing_dev_info * bdi,struct queue_limits * lim)58 static void blk_apply_bdi_limits(struct backing_dev_info *bdi,
59 struct queue_limits *lim)
60 {
61 /*
62 * For read-ahead of large files to be effective, we need to read ahead
63 * at least twice the optimal I/O size.
64 */
65 bdi->ra_pages = max(lim->io_opt * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
66 bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
67 }
68
blk_validate_zoned_limits(struct queue_limits * lim)69 static int blk_validate_zoned_limits(struct queue_limits *lim)
70 {
71 if (!lim->zoned) {
72 if (WARN_ON_ONCE(lim->max_open_zones) ||
73 WARN_ON_ONCE(lim->max_active_zones) ||
74 WARN_ON_ONCE(lim->zone_write_granularity) ||
75 WARN_ON_ONCE(lim->max_zone_append_sectors))
76 return -EINVAL;
77 return 0;
78 }
79
80 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
81 return -EINVAL;
82
83 if (lim->zone_write_granularity < lim->logical_block_size)
84 lim->zone_write_granularity = lim->logical_block_size;
85
86 if (lim->max_zone_append_sectors) {
87 /*
88 * The Zone Append size is limited by the maximum I/O size
89 * and the zone size given that it can't span zones.
90 */
91 lim->max_zone_append_sectors =
92 min3(lim->max_hw_sectors,
93 lim->max_zone_append_sectors,
94 lim->chunk_sectors);
95 }
96
97 return 0;
98 }
99
100 /*
101 * Check that the limits in lim are valid, initialize defaults for unset
102 * values, and cap values based on others where needed.
103 */
blk_validate_limits(struct queue_limits * lim)104 static int blk_validate_limits(struct queue_limits *lim)
105 {
106 unsigned int max_hw_sectors;
107 unsigned int logical_block_sectors;
108
109 /*
110 * Unless otherwise specified, default to 512 byte logical blocks and a
111 * physical block size equal to the logical block size.
112 */
113 if (!lim->logical_block_size)
114 lim->logical_block_size = SECTOR_SIZE;
115 if (lim->physical_block_size < lim->logical_block_size)
116 lim->physical_block_size = lim->logical_block_size;
117
118 /*
119 * The minimum I/O size defaults to the physical block size unless
120 * explicitly overridden.
121 */
122 if (lim->io_min < lim->physical_block_size)
123 lim->io_min = lim->physical_block_size;
124
125 /*
126 * max_hw_sectors has a somewhat weird default for historical reason,
127 * but driver really should set their own instead of relying on this
128 * value.
129 *
130 * The block layer relies on the fact that every driver can
131 * handle at lest a page worth of data per I/O, and needs the value
132 * aligned to the logical block size.
133 */
134 if (!lim->max_hw_sectors)
135 lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
136 if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
137 return -EINVAL;
138 logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
139 if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
140 return -EINVAL;
141 lim->max_hw_sectors = round_down(lim->max_hw_sectors,
142 logical_block_sectors);
143
144 /*
145 * The actual max_sectors value is a complex beast and also takes the
146 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
147 * value into account. The ->max_sectors value is always calculated
148 * from these, so directly setting it won't have any effect.
149 */
150 max_hw_sectors = min_not_zero(lim->max_hw_sectors,
151 lim->max_dev_sectors);
152 if (lim->max_user_sectors) {
153 if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
154 return -EINVAL;
155 lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
156 } else {
157 lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
158 }
159 lim->max_sectors = round_down(lim->max_sectors,
160 logical_block_sectors);
161
162 /*
163 * Random default for the maximum number of segments. Driver should not
164 * rely on this and set their own.
165 */
166 if (!lim->max_segments)
167 lim->max_segments = BLK_MAX_SEGMENTS;
168
169 lim->max_discard_sectors =
170 min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
171
172 if (!lim->max_discard_segments)
173 lim->max_discard_segments = 1;
174
175 if (lim->discard_granularity < lim->physical_block_size)
176 lim->discard_granularity = lim->physical_block_size;
177
178 /*
179 * By default there is no limit on the segment boundary alignment,
180 * but if there is one it can't be smaller than the page size as
181 * that would break all the normal I/O patterns.
182 */
183 if (!lim->seg_boundary_mask)
184 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
185 if (WARN_ON_ONCE(lim->seg_boundary_mask < PAGE_SIZE - 1))
186 return -EINVAL;
187
188 /*
189 * Stacking device may have both virtual boundary and max segment
190 * size limit, so allow this setting now, and long-term the two
191 * might need to move out of stacking limits since we have immutable
192 * bvec and lower layer bio splitting is supposed to handle the two
193 * correctly.
194 */
195 if (lim->virt_boundary_mask) {
196 if (!lim->max_segment_size)
197 lim->max_segment_size = UINT_MAX;
198 } else {
199 /*
200 * The maximum segment size has an odd historic 64k default that
201 * drivers probably should override. Just like the I/O size we
202 * require drivers to at least handle a full page per segment.
203 */
204 if (!lim->max_segment_size)
205 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
206 if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE))
207 return -EINVAL;
208 }
209
210 /*
211 * We require drivers to at least do logical block aligned I/O, but
212 * historically could not check for that due to the separate calls
213 * to set the limits. Once the transition is finished the check
214 * below should be narrowed down to check the logical block size.
215 */
216 if (!lim->dma_alignment)
217 lim->dma_alignment = SECTOR_SIZE - 1;
218 if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
219 return -EINVAL;
220
221 if (lim->alignment_offset) {
222 lim->alignment_offset &= (lim->physical_block_size - 1);
223 lim->misaligned = 0;
224 }
225
226 return blk_validate_zoned_limits(lim);
227 }
228
229 /*
230 * Set the default limits for a newly allocated queue. @lim contains the
231 * initial limits set by the driver, which could be no limit in which case
232 * all fields are cleared to zero.
233 */
blk_set_default_limits(struct queue_limits * lim)234 int blk_set_default_limits(struct queue_limits *lim)
235 {
236 /*
237 * Most defaults are set by capping the bounds in blk_validate_limits,
238 * but max_user_discard_sectors is special and needs an explicit
239 * initialization to the max value here.
240 */
241 lim->max_user_discard_sectors = UINT_MAX;
242 return blk_validate_limits(lim);
243 }
244
245 /**
246 * queue_limits_commit_update - commit an atomic update of queue limits
247 * @q: queue to update
248 * @lim: limits to apply
249 *
250 * Apply the limits in @lim that were obtained from queue_limits_start_update()
251 * and updated by the caller to @q.
252 *
253 * Returns 0 if successful, else a negative error code.
254 */
queue_limits_commit_update(struct request_queue * q,struct queue_limits * lim)255 int queue_limits_commit_update(struct request_queue *q,
256 struct queue_limits *lim)
257 __releases(q->limits_lock)
258 {
259 int error = blk_validate_limits(lim);
260
261 if (!error) {
262 q->limits = *lim;
263 if (q->disk)
264 blk_apply_bdi_limits(q->disk->bdi, lim);
265 }
266 mutex_unlock(&q->limits_lock);
267 return error;
268 }
269 EXPORT_SYMBOL_GPL(queue_limits_commit_update);
270
271 /**
272 * queue_limits_set - apply queue limits to queue
273 * @q: queue to update
274 * @lim: limits to apply
275 *
276 * Apply the limits in @lim that were freshly initialized to @q.
277 * To update existing limits use queue_limits_start_update() and
278 * queue_limits_commit_update() instead.
279 *
280 * Returns 0 if successful, else a negative error code.
281 */
queue_limits_set(struct request_queue * q,struct queue_limits * lim)282 int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
283 {
284 mutex_lock(&q->limits_lock);
285 return queue_limits_commit_update(q, lim);
286 }
287 EXPORT_SYMBOL_GPL(queue_limits_set);
288
289 /**
290 * blk_queue_chunk_sectors - set size of the chunk for this queue
291 * @q: the request queue for the device
292 * @chunk_sectors: chunk sectors in the usual 512b unit
293 *
294 * Description:
295 * If a driver doesn't want IOs to cross a given chunk size, it can set
296 * this limit and prevent merging across chunks. Note that the block layer
297 * must accept a page worth of data at any offset. So if the crossing of
298 * chunks is a hard limitation in the driver, it must still be prepared
299 * to split single page bios.
300 **/
blk_queue_chunk_sectors(struct request_queue * q,unsigned int chunk_sectors)301 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
302 {
303 q->limits.chunk_sectors = chunk_sectors;
304 }
305 EXPORT_SYMBOL(blk_queue_chunk_sectors);
306
307 /**
308 * blk_queue_max_discard_sectors - set max sectors for a single discard
309 * @q: the request queue for the device
310 * @max_discard_sectors: maximum number of sectors to discard
311 **/
blk_queue_max_discard_sectors(struct request_queue * q,unsigned int max_discard_sectors)312 void blk_queue_max_discard_sectors(struct request_queue *q,
313 unsigned int max_discard_sectors)
314 {
315 struct queue_limits *lim = &q->limits;
316
317 lim->max_hw_discard_sectors = max_discard_sectors;
318 lim->max_discard_sectors =
319 min(max_discard_sectors, lim->max_user_discard_sectors);
320 }
321 EXPORT_SYMBOL(blk_queue_max_discard_sectors);
322
323 /**
324 * blk_queue_max_secure_erase_sectors - set max sectors for a secure erase
325 * @q: the request queue for the device
326 * @max_sectors: maximum number of sectors to secure_erase
327 **/
blk_queue_max_secure_erase_sectors(struct request_queue * q,unsigned int max_sectors)328 void blk_queue_max_secure_erase_sectors(struct request_queue *q,
329 unsigned int max_sectors)
330 {
331 q->limits.max_secure_erase_sectors = max_sectors;
332 }
333 EXPORT_SYMBOL(blk_queue_max_secure_erase_sectors);
334
335 /**
336 * blk_queue_max_write_zeroes_sectors - set max sectors for a single
337 * write zeroes
338 * @q: the request queue for the device
339 * @max_write_zeroes_sectors: maximum number of sectors to write per command
340 **/
blk_queue_max_write_zeroes_sectors(struct request_queue * q,unsigned int max_write_zeroes_sectors)341 void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
342 unsigned int max_write_zeroes_sectors)
343 {
344 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
345 }
346 EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
347
348 /**
349 * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
350 * @q: the request queue for the device
351 * @max_zone_append_sectors: maximum number of sectors to write per command
352 *
353 * Sets the maximum number of sectors allowed for zone append commands. If
354 * Specifying 0 for @max_zone_append_sectors indicates that the queue does
355 * not natively support zone append operations and that the block layer must
356 * emulate these operations using regular writes.
357 **/
blk_queue_max_zone_append_sectors(struct request_queue * q,unsigned int max_zone_append_sectors)358 void blk_queue_max_zone_append_sectors(struct request_queue *q,
359 unsigned int max_zone_append_sectors)
360 {
361 unsigned int max_sectors = 0;
362
363 if (WARN_ON(!blk_queue_is_zoned(q)))
364 return;
365
366 if (max_zone_append_sectors) {
367 max_sectors = min(q->limits.max_hw_sectors,
368 max_zone_append_sectors);
369 max_sectors = min(q->limits.chunk_sectors, max_sectors);
370
371 /*
372 * Signal eventual driver bugs resulting in the max_zone_append
373 * sectors limit being 0 due to the chunk_sectors limit (zone
374 * size) not set or the max_hw_sectors limit not set.
375 */
376 WARN_ON_ONCE(!max_sectors);
377 }
378
379 q->limits.max_zone_append_sectors = max_sectors;
380 }
381 EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
382
383 /**
384 * blk_queue_logical_block_size - set logical block size for the queue
385 * @q: the request queue for the device
386 * @size: the logical block size, in bytes
387 *
388 * Description:
389 * This should be set to the lowest possible block size that the
390 * storage device can address. The default of 512 covers most
391 * hardware.
392 **/
blk_queue_logical_block_size(struct request_queue * q,unsigned int size)393 void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
394 {
395 struct queue_limits *limits = &q->limits;
396
397 limits->logical_block_size = size;
398
399 if (limits->discard_granularity < limits->logical_block_size)
400 limits->discard_granularity = limits->logical_block_size;
401
402 if (limits->physical_block_size < size)
403 limits->physical_block_size = size;
404
405 if (limits->io_min < limits->physical_block_size)
406 limits->io_min = limits->physical_block_size;
407
408 limits->max_hw_sectors =
409 round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT);
410 limits->max_sectors =
411 round_down(limits->max_sectors, size >> SECTOR_SHIFT);
412 }
413 EXPORT_SYMBOL(blk_queue_logical_block_size);
414
415 /**
416 * blk_queue_physical_block_size - set physical block size for the queue
417 * @q: the request queue for the device
418 * @size: the physical block size, in bytes
419 *
420 * Description:
421 * This should be set to the lowest possible sector size that the
422 * hardware can operate on without reverting to read-modify-write
423 * operations.
424 */
blk_queue_physical_block_size(struct request_queue * q,unsigned int size)425 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
426 {
427 q->limits.physical_block_size = size;
428
429 if (q->limits.physical_block_size < q->limits.logical_block_size)
430 q->limits.physical_block_size = q->limits.logical_block_size;
431
432 if (q->limits.discard_granularity < q->limits.physical_block_size)
433 q->limits.discard_granularity = q->limits.physical_block_size;
434
435 if (q->limits.io_min < q->limits.physical_block_size)
436 q->limits.io_min = q->limits.physical_block_size;
437 }
438 EXPORT_SYMBOL(blk_queue_physical_block_size);
439
440 /**
441 * blk_queue_zone_write_granularity - set zone write granularity for the queue
442 * @q: the request queue for the zoned device
443 * @size: the zone write granularity size, in bytes
444 *
445 * Description:
446 * This should be set to the lowest possible size allowing to write in
447 * sequential zones of a zoned block device.
448 */
blk_queue_zone_write_granularity(struct request_queue * q,unsigned int size)449 void blk_queue_zone_write_granularity(struct request_queue *q,
450 unsigned int size)
451 {
452 if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
453 return;
454
455 q->limits.zone_write_granularity = size;
456
457 if (q->limits.zone_write_granularity < q->limits.logical_block_size)
458 q->limits.zone_write_granularity = q->limits.logical_block_size;
459 }
460 EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity);
461
462 /**
463 * blk_queue_alignment_offset - set physical block alignment offset
464 * @q: the request queue for the device
465 * @offset: alignment offset in bytes
466 *
467 * Description:
468 * Some devices are naturally misaligned to compensate for things like
469 * the legacy DOS partition table 63-sector offset. Low-level drivers
470 * should call this function for devices whose first sector is not
471 * naturally aligned.
472 */
blk_queue_alignment_offset(struct request_queue * q,unsigned int offset)473 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
474 {
475 q->limits.alignment_offset =
476 offset & (q->limits.physical_block_size - 1);
477 q->limits.misaligned = 0;
478 }
479 EXPORT_SYMBOL(blk_queue_alignment_offset);
480
disk_update_readahead(struct gendisk * disk)481 void disk_update_readahead(struct gendisk *disk)
482 {
483 blk_apply_bdi_limits(disk->bdi, &disk->queue->limits);
484 }
485 EXPORT_SYMBOL_GPL(disk_update_readahead);
486
487 /**
488 * blk_limits_io_min - set minimum request size for a device
489 * @limits: the queue limits
490 * @min: smallest I/O size in bytes
491 *
492 * Description:
493 * Some devices have an internal block size bigger than the reported
494 * hardware sector size. This function can be used to signal the
495 * smallest I/O the device can perform without incurring a performance
496 * penalty.
497 */
blk_limits_io_min(struct queue_limits * limits,unsigned int min)498 void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
499 {
500 limits->io_min = min;
501
502 if (limits->io_min < limits->logical_block_size)
503 limits->io_min = limits->logical_block_size;
504
505 if (limits->io_min < limits->physical_block_size)
506 limits->io_min = limits->physical_block_size;
507 }
508 EXPORT_SYMBOL(blk_limits_io_min);
509
510 /**
511 * blk_queue_io_min - set minimum request size for the queue
512 * @q: the request queue for the device
513 * @min: smallest I/O size in bytes
514 *
515 * Description:
516 * Storage devices may report a granularity or preferred minimum I/O
517 * size which is the smallest request the device can perform without
518 * incurring a performance penalty. For disk drives this is often the
519 * physical block size. For RAID arrays it is often the stripe chunk
520 * size. A properly aligned multiple of minimum_io_size is the
521 * preferred request size for workloads where a high number of I/O
522 * operations is desired.
523 */
blk_queue_io_min(struct request_queue * q,unsigned int min)524 void blk_queue_io_min(struct request_queue *q, unsigned int min)
525 {
526 blk_limits_io_min(&q->limits, min);
527 }
528 EXPORT_SYMBOL(blk_queue_io_min);
529
530 /**
531 * blk_limits_io_opt - set optimal request size for a device
532 * @limits: the queue limits
533 * @opt: smallest I/O size in bytes
534 *
535 * Description:
536 * Storage devices may report an optimal I/O size, which is the
537 * device's preferred unit for sustained I/O. This is rarely reported
538 * for disk drives. For RAID arrays it is usually the stripe width or
539 * the internal track size. A properly aligned multiple of
540 * optimal_io_size is the preferred request size for workloads where
541 * sustained throughput is desired.
542 */
blk_limits_io_opt(struct queue_limits * limits,unsigned int opt)543 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
544 {
545 limits->io_opt = opt;
546 }
547 EXPORT_SYMBOL(blk_limits_io_opt);
548
queue_limit_alignment_offset(const struct queue_limits * lim,sector_t sector)549 static int queue_limit_alignment_offset(const struct queue_limits *lim,
550 sector_t sector)
551 {
552 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
553 unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
554 << SECTOR_SHIFT;
555
556 return (granularity + lim->alignment_offset - alignment) % granularity;
557 }
558
queue_limit_discard_alignment(const struct queue_limits * lim,sector_t sector)559 static unsigned int queue_limit_discard_alignment(
560 const struct queue_limits *lim, sector_t sector)
561 {
562 unsigned int alignment, granularity, offset;
563
564 if (!lim->max_discard_sectors)
565 return 0;
566
567 /* Why are these in bytes, not sectors? */
568 alignment = lim->discard_alignment >> SECTOR_SHIFT;
569 granularity = lim->discard_granularity >> SECTOR_SHIFT;
570 if (!granularity)
571 return 0;
572
573 /* Offset of the partition start in 'granularity' sectors */
574 offset = sector_div(sector, granularity);
575
576 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
577 offset = (granularity + alignment - offset) % granularity;
578
579 /* Turn it back into bytes, gaah */
580 return offset << SECTOR_SHIFT;
581 }
582
blk_round_down_sectors(unsigned int sectors,unsigned int lbs)583 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
584 {
585 sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
586 if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
587 sectors = PAGE_SIZE >> SECTOR_SHIFT;
588 return sectors;
589 }
590
591 /**
592 * blk_stack_limits - adjust queue_limits for stacked devices
593 * @t: the stacking driver limits (top device)
594 * @b: the underlying queue limits (bottom, component device)
595 * @start: first data sector within component device
596 *
597 * Description:
598 * This function is used by stacking drivers like MD and DM to ensure
599 * that all component devices have compatible block sizes and
600 * alignments. The stacking driver must provide a queue_limits
601 * struct (top) and then iteratively call the stacking function for
602 * all component (bottom) devices. The stacking function will
603 * attempt to combine the values and ensure proper alignment.
604 *
605 * Returns 0 if the top and bottom queue_limits are compatible. The
606 * top device's block sizes and alignment offsets may be adjusted to
607 * ensure alignment with the bottom device. If no compatible sizes
608 * and alignments exist, -1 is returned and the resulting top
609 * queue_limits will have the misaligned flag set to indicate that
610 * the alignment_offset is undefined.
611 */
blk_stack_limits(struct queue_limits * t,struct queue_limits * b,sector_t start)612 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
613 sector_t start)
614 {
615 unsigned int top, bottom, alignment, ret = 0;
616
617 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
618 t->max_user_sectors = min_not_zero(t->max_user_sectors,
619 b->max_user_sectors);
620 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
621 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
622 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
623 b->max_write_zeroes_sectors);
624 t->max_zone_append_sectors = min(queue_limits_max_zone_append_sectors(t),
625 queue_limits_max_zone_append_sectors(b));
626 t->bounce = max(t->bounce, b->bounce);
627
628 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
629 b->seg_boundary_mask);
630 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
631 b->virt_boundary_mask);
632
633 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
634 t->max_discard_segments = min_not_zero(t->max_discard_segments,
635 b->max_discard_segments);
636 t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
637 b->max_integrity_segments);
638
639 t->max_segment_size = min_not_zero(t->max_segment_size,
640 b->max_segment_size);
641
642 t->misaligned |= b->misaligned;
643
644 alignment = queue_limit_alignment_offset(b, start);
645
646 /* Bottom device has different alignment. Check that it is
647 * compatible with the current top alignment.
648 */
649 if (t->alignment_offset != alignment) {
650
651 top = max(t->physical_block_size, t->io_min)
652 + t->alignment_offset;
653 bottom = max(b->physical_block_size, b->io_min) + alignment;
654
655 /* Verify that top and bottom intervals line up */
656 if (max(top, bottom) % min(top, bottom)) {
657 t->misaligned = 1;
658 ret = -1;
659 }
660 }
661
662 t->logical_block_size = max(t->logical_block_size,
663 b->logical_block_size);
664
665 t->physical_block_size = max(t->physical_block_size,
666 b->physical_block_size);
667
668 t->io_min = max(t->io_min, b->io_min);
669 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
670 t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
671
672 /* Set non-power-of-2 compatible chunk_sectors boundary */
673 if (b->chunk_sectors)
674 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
675
676 /* Physical block size a multiple of the logical block size? */
677 if (t->physical_block_size & (t->logical_block_size - 1)) {
678 t->physical_block_size = t->logical_block_size;
679 t->misaligned = 1;
680 ret = -1;
681 }
682
683 /* Minimum I/O a multiple of the physical block size? */
684 if (t->io_min & (t->physical_block_size - 1)) {
685 t->io_min = t->physical_block_size;
686 t->misaligned = 1;
687 ret = -1;
688 }
689
690 /* Optimal I/O a multiple of the physical block size? */
691 if (t->io_opt & (t->physical_block_size - 1)) {
692 t->io_opt = 0;
693 t->misaligned = 1;
694 ret = -1;
695 }
696
697 /* chunk_sectors a multiple of the physical block size? */
698 if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
699 t->chunk_sectors = 0;
700 t->misaligned = 1;
701 ret = -1;
702 }
703
704 t->raid_partial_stripes_expensive =
705 max(t->raid_partial_stripes_expensive,
706 b->raid_partial_stripes_expensive);
707
708 /* Find lowest common alignment_offset */
709 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
710 % max(t->physical_block_size, t->io_min);
711
712 /* Verify that new alignment_offset is on a logical block boundary */
713 if (t->alignment_offset & (t->logical_block_size - 1)) {
714 t->misaligned = 1;
715 ret = -1;
716 }
717
718 t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
719 t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
720 t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
721
722 /* Discard alignment and granularity */
723 if (b->discard_granularity) {
724 alignment = queue_limit_discard_alignment(b, start);
725
726 if (t->discard_granularity != 0 &&
727 t->discard_alignment != alignment) {
728 top = t->discard_granularity + t->discard_alignment;
729 bottom = b->discard_granularity + alignment;
730
731 /* Verify that top and bottom intervals line up */
732 if ((max(top, bottom) % min(top, bottom)) != 0)
733 t->discard_misaligned = 1;
734 }
735
736 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
737 b->max_discard_sectors);
738 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
739 b->max_hw_discard_sectors);
740 t->discard_granularity = max(t->discard_granularity,
741 b->discard_granularity);
742 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
743 t->discard_granularity;
744 }
745 t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
746 b->max_secure_erase_sectors);
747 t->zone_write_granularity = max(t->zone_write_granularity,
748 b->zone_write_granularity);
749 t->zoned = max(t->zoned, b->zoned);
750 if (!t->zoned) {
751 t->zone_write_granularity = 0;
752 t->max_zone_append_sectors = 0;
753 }
754 return ret;
755 }
756 EXPORT_SYMBOL(blk_stack_limits);
757
758 /**
759 * queue_limits_stack_bdev - adjust queue_limits for stacked devices
760 * @t: the stacking driver limits (top device)
761 * @bdev: the underlying block device (bottom)
762 * @offset: offset to beginning of data within component device
763 * @pfx: prefix to use for warnings logged
764 *
765 * Description:
766 * This function is used by stacking drivers like MD and DM to ensure
767 * that all component devices have compatible block sizes and
768 * alignments. The stacking driver must provide a queue_limits
769 * struct (top) and then iteratively call the stacking function for
770 * all component (bottom) devices. The stacking function will
771 * attempt to combine the values and ensure proper alignment.
772 */
queue_limits_stack_bdev(struct queue_limits * t,struct block_device * bdev,sector_t offset,const char * pfx)773 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
774 sector_t offset, const char *pfx)
775 {
776 if (blk_stack_limits(t, &bdev_get_queue(bdev)->limits,
777 get_start_sect(bdev) + offset))
778 pr_notice("%s: Warning: Device %pg is misaligned\n",
779 pfx, bdev);
780 }
781 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
782
783 /**
784 * blk_queue_update_dma_pad - update pad mask
785 * @q: the request queue for the device
786 * @mask: pad mask
787 *
788 * Update dma pad mask.
789 *
790 * Appending pad buffer to a request modifies the last entry of a
791 * scatter list such that it includes the pad buffer.
792 **/
blk_queue_update_dma_pad(struct request_queue * q,unsigned int mask)793 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
794 {
795 if (mask > q->dma_pad_mask)
796 q->dma_pad_mask = mask;
797 }
798 EXPORT_SYMBOL(blk_queue_update_dma_pad);
799
800 /**
801 * blk_set_queue_depth - tell the block layer about the device queue depth
802 * @q: the request queue for the device
803 * @depth: queue depth
804 *
805 */
blk_set_queue_depth(struct request_queue * q,unsigned int depth)806 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
807 {
808 q->queue_depth = depth;
809 rq_qos_queue_depth_changed(q);
810 }
811 EXPORT_SYMBOL(blk_set_queue_depth);
812
813 /**
814 * blk_queue_write_cache - configure queue's write cache
815 * @q: the request queue for the device
816 * @wc: write back cache on or off
817 * @fua: device supports FUA writes, if true
818 *
819 * Tell the block layer about the write cache of @q.
820 */
blk_queue_write_cache(struct request_queue * q,bool wc,bool fua)821 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
822 {
823 if (wc) {
824 blk_queue_flag_set(QUEUE_FLAG_HW_WC, q);
825 blk_queue_flag_set(QUEUE_FLAG_WC, q);
826 } else {
827 blk_queue_flag_clear(QUEUE_FLAG_HW_WC, q);
828 blk_queue_flag_clear(QUEUE_FLAG_WC, q);
829 }
830 if (fua)
831 blk_queue_flag_set(QUEUE_FLAG_FUA, q);
832 else
833 blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
834 }
835 EXPORT_SYMBOL_GPL(blk_queue_write_cache);
836
837 /**
838 * disk_set_zoned - inidicate a zoned device
839 * @disk: gendisk to configure
840 */
disk_set_zoned(struct gendisk * disk)841 void disk_set_zoned(struct gendisk *disk)
842 {
843 struct request_queue *q = disk->queue;
844
845 WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
846
847 /*
848 * Set the zone write granularity to the device logical block
849 * size by default. The driver can change this value if needed.
850 */
851 q->limits.zoned = true;
852 blk_queue_zone_write_granularity(q, queue_logical_block_size(q));
853 }
854 EXPORT_SYMBOL_GPL(disk_set_zoned);
855
bdev_alignment_offset(struct block_device * bdev)856 int bdev_alignment_offset(struct block_device *bdev)
857 {
858 struct request_queue *q = bdev_get_queue(bdev);
859
860 if (q->limits.misaligned)
861 return -1;
862 if (bdev_is_partition(bdev))
863 return queue_limit_alignment_offset(&q->limits,
864 bdev->bd_start_sect);
865 return q->limits.alignment_offset;
866 }
867 EXPORT_SYMBOL_GPL(bdev_alignment_offset);
868
bdev_discard_alignment(struct block_device * bdev)869 unsigned int bdev_discard_alignment(struct block_device *bdev)
870 {
871 struct request_queue *q = bdev_get_queue(bdev);
872
873 if (bdev_is_partition(bdev))
874 return queue_limit_discard_alignment(&q->limits,
875 bdev->bd_start_sect);
876 return q->limits.discard_alignment;
877 }
878 EXPORT_SYMBOL_GPL(bdev_discard_alignment);
879