13dcf60bcSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2bd166ef1SJens Axboe /*
3bd166ef1SJens Axboe * blk-mq scheduling framework
4bd166ef1SJens Axboe *
5bd166ef1SJens Axboe * Copyright (C) 2016 Jens Axboe
6bd166ef1SJens Axboe */
7bd166ef1SJens Axboe #include <linux/kernel.h>
8bd166ef1SJens Axboe #include <linux/module.h>
96e6fcbc2SMing Lei #include <linux/list_sort.h>
10bd166ef1SJens Axboe
11bd166ef1SJens Axboe #include <trace/events/block.h>
12bd166ef1SJens Axboe
13bd166ef1SJens Axboe #include "blk.h"
14bd166ef1SJens Axboe #include "blk-mq.h"
15d332ce09SOmar Sandoval #include "blk-mq-debugfs.h"
16bd166ef1SJens Axboe #include "blk-mq-sched.h"
17bd166ef1SJens Axboe #include "blk-wbt.h"
18bd166ef1SJens Axboe
198e8320c9SJens Axboe /*
20c31e76bcSKemeng Shi * Mark a hardware queue as needing a restart.
218e8320c9SJens Axboe */
blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx * hctx)227211aef8SDamien Le Moal void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
238e8320c9SJens Axboe {
248e8320c9SJens Axboe if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
258e8320c9SJens Axboe return;
268e8320c9SJens Axboe
278e8320c9SJens Axboe set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
288e8320c9SJens Axboe }
297211aef8SDamien Le Moal EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
308e8320c9SJens Axboe
__blk_mq_sched_restart(struct blk_mq_hw_ctx * hctx)31e9ea1596SPavel Begunkov void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
328e8320c9SJens Axboe {
338e8320c9SJens Axboe clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
348e8320c9SJens Axboe
35d7d8535fSMing Lei /*
36d7d8535fSMing Lei * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
37d7d8535fSMing Lei * in blk_mq_run_hw_queue(). Its pair is the barrier in
38d7d8535fSMing Lei * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART,
39d7d8535fSMing Lei * meantime new request added to hctx->dispatch is missed to check in
40d7d8535fSMing Lei * blk_mq_run_hw_queue().
41d7d8535fSMing Lei */
42d7d8535fSMing Lei smp_mb();
43d7d8535fSMing Lei
4497889f9aSMing Lei blk_mq_run_hw_queue(hctx, true);
458e8320c9SJens Axboe }
468e8320c9SJens Axboe
sched_rq_cmp(void * priv,const struct list_head * a,const struct list_head * b)474f0f586bSSami Tolvanen static int sched_rq_cmp(void *priv, const struct list_head *a,
484f0f586bSSami Tolvanen const struct list_head *b)
496e6fcbc2SMing Lei {
506e6fcbc2SMing Lei struct request *rqa = container_of(a, struct request, queuelist);
516e6fcbc2SMing Lei struct request *rqb = container_of(b, struct request, queuelist);
526e6fcbc2SMing Lei
536e6fcbc2SMing Lei return rqa->mq_hctx > rqb->mq_hctx;
546e6fcbc2SMing Lei }
556e6fcbc2SMing Lei
blk_mq_dispatch_hctx_list(struct list_head * rq_list)566e6fcbc2SMing Lei static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list)
576e6fcbc2SMing Lei {
586e6fcbc2SMing Lei struct blk_mq_hw_ctx *hctx =
596e6fcbc2SMing Lei list_first_entry(rq_list, struct request, queuelist)->mq_hctx;
606e6fcbc2SMing Lei struct request *rq;
616e6fcbc2SMing Lei LIST_HEAD(hctx_list);
626e6fcbc2SMing Lei unsigned int count = 0;
636e6fcbc2SMing Lei
646e6fcbc2SMing Lei list_for_each_entry(rq, rq_list, queuelist) {
656e6fcbc2SMing Lei if (rq->mq_hctx != hctx) {
666e6fcbc2SMing Lei list_cut_before(&hctx_list, rq_list, &rq->queuelist);
676e6fcbc2SMing Lei goto dispatch;
686e6fcbc2SMing Lei }
696e6fcbc2SMing Lei count++;
706e6fcbc2SMing Lei }
716e6fcbc2SMing Lei list_splice_tail_init(rq_list, &hctx_list);
726e6fcbc2SMing Lei
736e6fcbc2SMing Lei dispatch:
74106e71c5SBaolin Wang return blk_mq_dispatch_rq_list(hctx, &hctx_list, count);
756e6fcbc2SMing Lei }
766e6fcbc2SMing Lei
77a0823421SDouglas Anderson #define BLK_MQ_BUDGET_DELAY 3 /* ms units */
78a0823421SDouglas Anderson
791f460b63SMing Lei /*
801f460b63SMing Lei * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
811f460b63SMing Lei * its queue by itself in its completion handler, so we don't need to
8201542f65SKemeng Shi * restart queue if .get_budget() fails to get the budget.
8328d65729SSalman Qazi *
8428d65729SSalman Qazi * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
8528d65729SSalman Qazi * be run again. This is necessary to avoid starving flushes.
861f460b63SMing Lei */
__blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx * hctx)876e6fcbc2SMing Lei static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
88caf8eb0dSMing Lei {
89caf8eb0dSMing Lei struct request_queue *q = hctx->queue;
90caf8eb0dSMing Lei struct elevator_queue *e = q->elevator;
916e6fcbc2SMing Lei bool multi_hctxs = false, run_queue = false;
926e6fcbc2SMing Lei bool dispatched = false, busy = false;
936e6fcbc2SMing Lei unsigned int max_dispatch;
94caf8eb0dSMing Lei LIST_HEAD(rq_list);
956e6fcbc2SMing Lei int count = 0;
966e6fcbc2SMing Lei
976e6fcbc2SMing Lei if (hctx->dispatch_busy)
986e6fcbc2SMing Lei max_dispatch = 1;
996e6fcbc2SMing Lei else
1006e6fcbc2SMing Lei max_dispatch = hctx->queue->nr_requests;
101caf8eb0dSMing Lei
102445874e8SMing Lei do {
1036e6fcbc2SMing Lei struct request *rq;
1042a5a24aaSMing Lei int budget_token;
1056e6fcbc2SMing Lei
106f9cd4bfeSJens Axboe if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
107caf8eb0dSMing Lei break;
108de148297SMing Lei
10928d65729SSalman Qazi if (!list_empty_careful(&hctx->dispatch)) {
1106e6fcbc2SMing Lei busy = true;
11128d65729SSalman Qazi break;
11228d65729SSalman Qazi }
11328d65729SSalman Qazi
1142a5a24aaSMing Lei budget_token = blk_mq_get_dispatch_budget(q);
1152a5a24aaSMing Lei if (budget_token < 0)
1161f460b63SMing Lei break;
117de148297SMing Lei
118f9cd4bfeSJens Axboe rq = e->type->ops.dispatch_request(hctx);
119de148297SMing Lei if (!rq) {
1202a5a24aaSMing Lei blk_mq_put_dispatch_budget(q, budget_token);
121a0823421SDouglas Anderson /*
122a0823421SDouglas Anderson * We're releasing without dispatching. Holding the
123a0823421SDouglas Anderson * budget could have blocked any "hctx"s with the
124a0823421SDouglas Anderson * same queue and if we didn't dispatch then there's
125a0823421SDouglas Anderson * no guarantee anyone will kick the queue. Kick it
126a0823421SDouglas Anderson * ourselves.
127a0823421SDouglas Anderson */
1286e6fcbc2SMing Lei run_queue = true;
129de148297SMing Lei break;
130caf8eb0dSMing Lei }
131caf8eb0dSMing Lei
1322a5a24aaSMing Lei blk_mq_set_rq_budget_token(rq, budget_token);
1332a5a24aaSMing Lei
134de148297SMing Lei /*
135de148297SMing Lei * Now this rq owns the budget which has to be released
136de148297SMing Lei * if this rq won't be queued to driver via .queue_rq()
137de148297SMing Lei * in blk_mq_dispatch_rq_list().
138de148297SMing Lei */
1396e6fcbc2SMing Lei list_add_tail(&rq->queuelist, &rq_list);
14061347154SJan Kara count++;
1416e6fcbc2SMing Lei if (rq->mq_hctx != hctx)
1426e6fcbc2SMing Lei multi_hctxs = true;
14361347154SJan Kara
14461347154SJan Kara /*
14561347154SJan Kara * If we cannot get tag for the request, stop dequeueing
14661347154SJan Kara * requests from the IO scheduler. We are unlikely to be able
14761347154SJan Kara * to submit them anyway and it creates false impression for
14861347154SJan Kara * scheduling heuristics that the device can take more IO.
14961347154SJan Kara */
15061347154SJan Kara if (!blk_mq_get_driver_tag(rq))
15161347154SJan Kara break;
15261347154SJan Kara } while (count < max_dispatch);
1536e6fcbc2SMing Lei
1546e6fcbc2SMing Lei if (!count) {
1556e6fcbc2SMing Lei if (run_queue)
1566e6fcbc2SMing Lei blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
1576e6fcbc2SMing Lei } else if (multi_hctxs) {
1586e6fcbc2SMing Lei /*
1596e6fcbc2SMing Lei * Requests from different hctx may be dequeued from some
1606e6fcbc2SMing Lei * schedulers, such as bfq and deadline.
1616e6fcbc2SMing Lei *
1626e6fcbc2SMing Lei * Sort the requests in the list according to their hctx,
1636e6fcbc2SMing Lei * dispatch batching requests from same hctx at a time.
1646e6fcbc2SMing Lei */
1656e6fcbc2SMing Lei list_sort(NULL, &rq_list, sched_rq_cmp);
1666e6fcbc2SMing Lei do {
1676e6fcbc2SMing Lei dispatched |= blk_mq_dispatch_hctx_list(&rq_list);
1686e6fcbc2SMing Lei } while (!list_empty(&rq_list));
1696e6fcbc2SMing Lei } else {
1706e6fcbc2SMing Lei dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count);
1716e6fcbc2SMing Lei }
1726e6fcbc2SMing Lei
1736e6fcbc2SMing Lei if (busy)
1746e6fcbc2SMing Lei return -EAGAIN;
1756e6fcbc2SMing Lei return !!dispatched;
1766e6fcbc2SMing Lei }
1776e6fcbc2SMing Lei
blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx * hctx)1786e6fcbc2SMing Lei static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
1796e6fcbc2SMing Lei {
180572299f0SShin'ichiro Kawasaki unsigned long end = jiffies + HZ;
1816e6fcbc2SMing Lei int ret;
1826e6fcbc2SMing Lei
1836e6fcbc2SMing Lei do {
1846e6fcbc2SMing Lei ret = __blk_mq_do_dispatch_sched(hctx);
185572299f0SShin'ichiro Kawasaki if (ret != 1)
186572299f0SShin'ichiro Kawasaki break;
187572299f0SShin'ichiro Kawasaki if (need_resched() || time_is_before_jiffies(end)) {
188572299f0SShin'ichiro Kawasaki blk_mq_delay_run_hw_queue(hctx, 0);
189572299f0SShin'ichiro Kawasaki break;
190572299f0SShin'ichiro Kawasaki }
191572299f0SShin'ichiro Kawasaki } while (1);
19228d65729SSalman Qazi
19328d65729SSalman Qazi return ret;
194de148297SMing Lei }
195de148297SMing Lei
blk_mq_next_ctx(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx)196b347689fSMing Lei static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
197b347689fSMing Lei struct blk_mq_ctx *ctx)
198b347689fSMing Lei {
199f31967f0SJens Axboe unsigned short idx = ctx->index_hw[hctx->type];
200b347689fSMing Lei
201b347689fSMing Lei if (++idx == hctx->nr_ctx)
202b347689fSMing Lei idx = 0;
203b347689fSMing Lei
204b347689fSMing Lei return hctx->ctxs[idx];
205b347689fSMing Lei }
206b347689fSMing Lei
2071f460b63SMing Lei /*
2081f460b63SMing Lei * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
2091f460b63SMing Lei * its queue by itself in its completion handler, so we don't need to
21001542f65SKemeng Shi * restart queue if .get_budget() fails to get the budget.
21128d65729SSalman Qazi *
21228d65729SSalman Qazi * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
213c4aecaa2SRandy Dunlap * be run again. This is necessary to avoid starving flushes.
2141f460b63SMing Lei */
blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx * hctx)21528d65729SSalman Qazi static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
216b347689fSMing Lei {
217b347689fSMing Lei struct request_queue *q = hctx->queue;
218b347689fSMing Lei LIST_HEAD(rq_list);
219b347689fSMing Lei struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
22028d65729SSalman Qazi int ret = 0;
221b347689fSMing Lei struct request *rq;
222b347689fSMing Lei
223445874e8SMing Lei do {
2242a5a24aaSMing Lei int budget_token;
2252a5a24aaSMing Lei
22628d65729SSalman Qazi if (!list_empty_careful(&hctx->dispatch)) {
22728d65729SSalman Qazi ret = -EAGAIN;
22828d65729SSalman Qazi break;
22928d65729SSalman Qazi }
23028d65729SSalman Qazi
231b347689fSMing Lei if (!sbitmap_any_bit_set(&hctx->ctx_map))
232b347689fSMing Lei break;
233b347689fSMing Lei
2342a5a24aaSMing Lei budget_token = blk_mq_get_dispatch_budget(q);
2352a5a24aaSMing Lei if (budget_token < 0)
2361f460b63SMing Lei break;
237b347689fSMing Lei
238b347689fSMing Lei rq = blk_mq_dequeue_from_ctx(hctx, ctx);
239b347689fSMing Lei if (!rq) {
2402a5a24aaSMing Lei blk_mq_put_dispatch_budget(q, budget_token);
241a0823421SDouglas Anderson /*
242a0823421SDouglas Anderson * We're releasing without dispatching. Holding the
243a0823421SDouglas Anderson * budget could have blocked any "hctx"s with the
244a0823421SDouglas Anderson * same queue and if we didn't dispatch then there's
245a0823421SDouglas Anderson * no guarantee anyone will kick the queue. Kick it
246a0823421SDouglas Anderson * ourselves.
247a0823421SDouglas Anderson */
248a0823421SDouglas Anderson blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
249b347689fSMing Lei break;
250b347689fSMing Lei }
251b347689fSMing Lei
2522a5a24aaSMing Lei blk_mq_set_rq_budget_token(rq, budget_token);
2532a5a24aaSMing Lei
254b347689fSMing Lei /*
255b347689fSMing Lei * Now this rq owns the budget which has to be released
256b347689fSMing Lei * if this rq won't be queued to driver via .queue_rq()
257b347689fSMing Lei * in blk_mq_dispatch_rq_list().
258b347689fSMing Lei */
259b347689fSMing Lei list_add(&rq->queuelist, &rq_list);
260b347689fSMing Lei
261b347689fSMing Lei /* round robin for fair dispatch */
262b347689fSMing Lei ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
263b347689fSMing Lei
2641fd40b5eSMing Lei } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1));
265b347689fSMing Lei
266b347689fSMing Lei WRITE_ONCE(hctx->dispatch_from, ctx);
26728d65729SSalman Qazi return ret;
268b347689fSMing Lei }
269b347689fSMing Lei
__blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx * hctx)270e1b586f2SZheng Bin static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
271bd166ef1SJens Axboe {
272*89ea5cebSChristoph Hellwig bool need_dispatch = false;
273bd166ef1SJens Axboe LIST_HEAD(rq_list);
274bd166ef1SJens Axboe
275bd166ef1SJens Axboe /*
276bd166ef1SJens Axboe * If we have previous entries on our dispatch list, grab them first for
277bd166ef1SJens Axboe * more fair dispatch.
278bd166ef1SJens Axboe */
279bd166ef1SJens Axboe if (!list_empty_careful(&hctx->dispatch)) {
280bd166ef1SJens Axboe spin_lock(&hctx->lock);
281bd166ef1SJens Axboe if (!list_empty(&hctx->dispatch))
282bd166ef1SJens Axboe list_splice_init(&hctx->dispatch, &rq_list);
283bd166ef1SJens Axboe spin_unlock(&hctx->lock);
284bd166ef1SJens Axboe }
285bd166ef1SJens Axboe
286bd166ef1SJens Axboe /*
287bd166ef1SJens Axboe * Only ask the scheduler for requests, if we didn't have residual
288bd166ef1SJens Axboe * requests from the dispatch list. This is to avoid the case where
289bd166ef1SJens Axboe * we only ever dispatch a fraction of the requests available because
290bd166ef1SJens Axboe * of low device queue depth. Once we pull requests out of the IO
291bd166ef1SJens Axboe * scheduler, we can no longer merge or sort them. So it's best to
292bd166ef1SJens Axboe * leave them there for as long as we can. Mark the hw queue as
293bd166ef1SJens Axboe * needing a restart in that case.
294caf8eb0dSMing Lei *
2955e3d02bbSMing Lei * We want to dispatch from the scheduler if there was nothing
2965e3d02bbSMing Lei * on the dispatch list or we were able to dispatch from the
2975e3d02bbSMing Lei * dispatch list.
29864765a75SJens Axboe */
299caf8eb0dSMing Lei if (!list_empty(&rq_list)) {
300caf8eb0dSMing Lei blk_mq_sched_mark_restart_hctx(hctx);
301*89ea5cebSChristoph Hellwig if (!blk_mq_dispatch_rq_list(hctx, &rq_list, 0))
302*89ea5cebSChristoph Hellwig return 0;
303*89ea5cebSChristoph Hellwig need_dispatch = true;
304caf8eb0dSMing Lei } else {
305*89ea5cebSChristoph Hellwig need_dispatch = hctx->dispatch_busy;
306c13660a0SJens Axboe }
30728d65729SSalman Qazi
308*89ea5cebSChristoph Hellwig if (hctx->queue->elevator)
309*89ea5cebSChristoph Hellwig return blk_mq_do_dispatch_sched(hctx);
310*89ea5cebSChristoph Hellwig
311*89ea5cebSChristoph Hellwig /* dequeue request one by one from sw queue if queue is busy */
312*89ea5cebSChristoph Hellwig if (need_dispatch)
313*89ea5cebSChristoph Hellwig return blk_mq_do_dispatch_ctx(hctx);
314*89ea5cebSChristoph Hellwig blk_mq_flush_busy_ctxs(hctx, &rq_list);
315*89ea5cebSChristoph Hellwig blk_mq_dispatch_rq_list(hctx, &rq_list, 0);
316*89ea5cebSChristoph Hellwig return 0;
31728d65729SSalman Qazi }
31828d65729SSalman Qazi
blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx * hctx)31928d65729SSalman Qazi void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
32028d65729SSalman Qazi {
32128d65729SSalman Qazi struct request_queue *q = hctx->queue;
32228d65729SSalman Qazi
32328d65729SSalman Qazi /* RCU or SRCU read lock is needed before checking quiesced flag */
32428d65729SSalman Qazi if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
32528d65729SSalman Qazi return;
32628d65729SSalman Qazi
32728d65729SSalman Qazi /*
32828d65729SSalman Qazi * A return of -EAGAIN is an indication that hctx->dispatch is not
32928d65729SSalman Qazi * empty and we must run again in order to avoid starving flushes.
33028d65729SSalman Qazi */
33128d65729SSalman Qazi if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
33228d65729SSalman Qazi if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
33328d65729SSalman Qazi blk_mq_run_hw_queue(hctx, true);
33428d65729SSalman Qazi }
335bd166ef1SJens Axboe }
336bd166ef1SJens Axboe
blk_mq_sched_bio_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs)337179ae84fSPavel Begunkov bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
33814ccb66bSChristoph Hellwig unsigned int nr_segs)
339bd166ef1SJens Axboe {
340bd166ef1SJens Axboe struct elevator_queue *e = q->elevator;
341efed9a33SOmar Sandoval struct blk_mq_ctx *ctx;
342efed9a33SOmar Sandoval struct blk_mq_hw_ctx *hctx;
3439bddeb2aSMing Lei bool ret = false;
344c16d6b5aSMing Lei enum hctx_type type;
345bd166ef1SJens Axboe
346900e0807SJens Axboe if (e && e->type->ops.bio_merge) {
347900e0807SJens Axboe ret = e->type->ops.bio_merge(q, bio, nr_segs);
348900e0807SJens Axboe goto out_put;
349900e0807SJens Axboe }
350bd166ef1SJens Axboe
351efed9a33SOmar Sandoval ctx = blk_mq_get_ctx(q);
352efed9a33SOmar Sandoval hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
353c16d6b5aSMing Lei type = hctx->type;
354cdfcef9eSBaolin Wang if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
355cdfcef9eSBaolin Wang list_empty_careful(&ctx->rq_lists[type]))
356900e0807SJens Axboe goto out_put;
357cdfcef9eSBaolin Wang
3589bddeb2aSMing Lei /* default per sw-queue merge */
3599bddeb2aSMing Lei spin_lock(&ctx->lock);
360cdfcef9eSBaolin Wang /*
361cdfcef9eSBaolin Wang * Reverse check our software queue for entries that we could
362cdfcef9eSBaolin Wang * potentially merge with. Currently includes a hand-wavy stop
363cdfcef9eSBaolin Wang * count of 8, to not spend too much time checking for merges.
364cdfcef9eSBaolin Wang */
3659a14d6ceSJens Axboe if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs))
366cdfcef9eSBaolin Wang ret = true;
3679bddeb2aSMing Lei
368cdfcef9eSBaolin Wang spin_unlock(&ctx->lock);
369900e0807SJens Axboe out_put:
3709bddeb2aSMing Lei return ret;
371bd166ef1SJens Axboe }
372bd166ef1SJens Axboe
blk_mq_sched_try_insert_merge(struct request_queue * q,struct request * rq,struct list_head * free)373fd2ef39cSJan Kara bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
374fd2ef39cSJan Kara struct list_head *free)
375bd166ef1SJens Axboe {
376fd2ef39cSJan Kara return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free);
377bd166ef1SJens Axboe }
378bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
379bd166ef1SJens Axboe
blk_mq_sched_alloc_map_and_rqs(struct request_queue * q,struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)380d99a6bb3SJohn Garry static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
3816917ff0bSOmar Sandoval struct blk_mq_hw_ctx *hctx,
3826917ff0bSOmar Sandoval unsigned int hctx_idx)
383bd166ef1SJens Axboe {
384079a2e3eSJohn Garry if (blk_mq_is_shared_tags(q->tag_set->flags)) {
385079a2e3eSJohn Garry hctx->sched_tags = q->sched_shared_tags;
386e155b0c2SJohn Garry return 0;
387e155b0c2SJohn Garry }
388e155b0c2SJohn Garry
38963064be1SJohn Garry hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx,
39063064be1SJohn Garry q->nr_requests);
391bd166ef1SJens Axboe
392bd166ef1SJens Axboe if (!hctx->sched_tags)
3936917ff0bSOmar Sandoval return -ENOMEM;
39463064be1SJohn Garry return 0;
395bd166ef1SJens Axboe }
396bd166ef1SJens Axboe
blk_mq_exit_sched_shared_tags(struct request_queue * queue)397079a2e3eSJohn Garry static void blk_mq_exit_sched_shared_tags(struct request_queue *queue)
398e155b0c2SJohn Garry {
399079a2e3eSJohn Garry blk_mq_free_rq_map(queue->sched_shared_tags);
400079a2e3eSJohn Garry queue->sched_shared_tags = NULL;
401e155b0c2SJohn Garry }
402e155b0c2SJohn Garry
403c3e22192SMing Lei /* called in queue's release handler, tagset has gone away */
blk_mq_sched_tags_teardown(struct request_queue * q,unsigned int flags)404e155b0c2SJohn Garry static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags)
405bd166ef1SJens Axboe {
406bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx;
4074f481208SMing Lei unsigned long i;
408bd166ef1SJens Axboe
409c3e22192SMing Lei queue_for_each_hw_ctx(q, hctx, i) {
410c3e22192SMing Lei if (hctx->sched_tags) {
4118bdf7b3fSJohn Garry if (!blk_mq_is_shared_tags(flags))
412e155b0c2SJohn Garry blk_mq_free_rq_map(hctx->sched_tags);
413c3e22192SMing Lei hctx->sched_tags = NULL;
414c3e22192SMing Lei }
415c3e22192SMing Lei }
416e155b0c2SJohn Garry
417079a2e3eSJohn Garry if (blk_mq_is_shared_tags(flags))
418079a2e3eSJohn Garry blk_mq_exit_sched_shared_tags(q);
419bd166ef1SJens Axboe }
420d3484991SJens Axboe
blk_mq_init_sched_shared_tags(struct request_queue * queue)421079a2e3eSJohn Garry static int blk_mq_init_sched_shared_tags(struct request_queue *queue)
422d97e594cSJohn Garry {
423d97e594cSJohn Garry struct blk_mq_tag_set *set = queue->tag_set;
424d97e594cSJohn Garry
425d97e594cSJohn Garry /*
426d97e594cSJohn Garry * Set initial depth at max so that we don't need to reallocate for
427d97e594cSJohn Garry * updating nr_requests.
428d97e594cSJohn Garry */
429079a2e3eSJohn Garry queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set,
430e155b0c2SJohn Garry BLK_MQ_NO_HCTX_IDX,
431e155b0c2SJohn Garry MAX_SCHED_RQ);
432079a2e3eSJohn Garry if (!queue->sched_shared_tags)
433e155b0c2SJohn Garry return -ENOMEM;
434d97e594cSJohn Garry
435079a2e3eSJohn Garry blk_mq_tag_update_sched_shared_tags(queue);
436d97e594cSJohn Garry
437d97e594cSJohn Garry return 0;
438d97e594cSJohn Garry }
439d97e594cSJohn Garry
4408ed40ee3SJinlong Chen /* caller must have a reference to @e, will grab another one if successful */
blk_mq_init_sched(struct request_queue * q,struct elevator_type * e)4416917ff0bSOmar Sandoval int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
4426917ff0bSOmar Sandoval {
4434f481208SMing Lei unsigned int flags = q->tag_set->flags;
4446917ff0bSOmar Sandoval struct blk_mq_hw_ctx *hctx;
445ee056f98SOmar Sandoval struct elevator_queue *eq;
4464f481208SMing Lei unsigned long i;
4476917ff0bSOmar Sandoval int ret;
4486917ff0bSOmar Sandoval
4496917ff0bSOmar Sandoval /*
45032825c45SMing Lei * Default to double of smaller one between hw queue_depth and 128,
45132825c45SMing Lei * since we don't split into sync/async like the old code did.
45232825c45SMing Lei * Additionally, this is a per-hw queue depth.
4536917ff0bSOmar Sandoval */
45432825c45SMing Lei q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
455d2a27964SJohn Garry BLKDEV_DEFAULT_RQ);
4566917ff0bSOmar Sandoval
457079a2e3eSJohn Garry if (blk_mq_is_shared_tags(flags)) {
458079a2e3eSJohn Garry ret = blk_mq_init_sched_shared_tags(q);
459e155b0c2SJohn Garry if (ret)
460e155b0c2SJohn Garry return ret;
461e155b0c2SJohn Garry }
462e155b0c2SJohn Garry
4636917ff0bSOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) {
464d99a6bb3SJohn Garry ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i);
4656917ff0bSOmar Sandoval if (ret)
466d99a6bb3SJohn Garry goto err_free_map_and_rqs;
467d97e594cSJohn Garry }
468d97e594cSJohn Garry
469f9cd4bfeSJens Axboe ret = e->ops.init_sched(q, e);
4706917ff0bSOmar Sandoval if (ret)
471e155b0c2SJohn Garry goto err_free_map_and_rqs;
4726917ff0bSOmar Sandoval
4735cf9c91bSChristoph Hellwig mutex_lock(&q->debugfs_mutex);
474d332ce09SOmar Sandoval blk_mq_debugfs_register_sched(q);
4755cf9c91bSChristoph Hellwig mutex_unlock(&q->debugfs_mutex);
476d332ce09SOmar Sandoval
477ee056f98SOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) {
478f9cd4bfeSJens Axboe if (e->ops.init_hctx) {
479f9cd4bfeSJens Axboe ret = e->ops.init_hctx(hctx, i);
480ee056f98SOmar Sandoval if (ret) {
481ee056f98SOmar Sandoval eq = q->elevator;
4821820f4f0SJohn Garry blk_mq_sched_free_rqs(q);
483ee056f98SOmar Sandoval blk_mq_exit_sched(q, eq);
484ee056f98SOmar Sandoval kobject_put(&eq->kobj);
485ee056f98SOmar Sandoval return ret;
486ee056f98SOmar Sandoval }
487ee056f98SOmar Sandoval }
4885cf9c91bSChristoph Hellwig mutex_lock(&q->debugfs_mutex);
489d332ce09SOmar Sandoval blk_mq_debugfs_register_sched_hctx(q, hctx);
4905cf9c91bSChristoph Hellwig mutex_unlock(&q->debugfs_mutex);
491ee056f98SOmar Sandoval }
492ee056f98SOmar Sandoval
4936917ff0bSOmar Sandoval return 0;
4946917ff0bSOmar Sandoval
495d99a6bb3SJohn Garry err_free_map_and_rqs:
4961820f4f0SJohn Garry blk_mq_sched_free_rqs(q);
497e155b0c2SJohn Garry blk_mq_sched_tags_teardown(q, flags);
498e155b0c2SJohn Garry
49954d5329dSOmar Sandoval q->elevator = NULL;
5006917ff0bSOmar Sandoval return ret;
5016917ff0bSOmar Sandoval }
5026917ff0bSOmar Sandoval
503c3e22192SMing Lei /*
504c3e22192SMing Lei * called in either blk_queue_cleanup or elevator_switch, tagset
505c3e22192SMing Lei * is required for freeing requests
506c3e22192SMing Lei */
blk_mq_sched_free_rqs(struct request_queue * q)5071820f4f0SJohn Garry void blk_mq_sched_free_rqs(struct request_queue *q)
508c3e22192SMing Lei {
509c3e22192SMing Lei struct blk_mq_hw_ctx *hctx;
5104f481208SMing Lei unsigned long i;
511c3e22192SMing Lei
512079a2e3eSJohn Garry if (blk_mq_is_shared_tags(q->tag_set->flags)) {
513079a2e3eSJohn Garry blk_mq_free_rqs(q->tag_set, q->sched_shared_tags,
514e155b0c2SJohn Garry BLK_MQ_NO_HCTX_IDX);
515e155b0c2SJohn Garry } else {
516c3e22192SMing Lei queue_for_each_hw_ctx(q, hctx, i) {
517c3e22192SMing Lei if (hctx->sched_tags)
518e155b0c2SJohn Garry blk_mq_free_rqs(q->tag_set,
519e155b0c2SJohn Garry hctx->sched_tags, i);
520e155b0c2SJohn Garry }
521c3e22192SMing Lei }
522c3e22192SMing Lei }
523c3e22192SMing Lei
blk_mq_exit_sched(struct request_queue * q,struct elevator_queue * e)52454d5329dSOmar Sandoval void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
52554d5329dSOmar Sandoval {
526ee056f98SOmar Sandoval struct blk_mq_hw_ctx *hctx;
5274f481208SMing Lei unsigned long i;
528f0c1c4d2SMing Lei unsigned int flags = 0;
529ee056f98SOmar Sandoval
530ee056f98SOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) {
5315cf9c91bSChristoph Hellwig mutex_lock(&q->debugfs_mutex);
532d332ce09SOmar Sandoval blk_mq_debugfs_unregister_sched_hctx(hctx);
5335cf9c91bSChristoph Hellwig mutex_unlock(&q->debugfs_mutex);
5345cf9c91bSChristoph Hellwig
535f9cd4bfeSJens Axboe if (e->type->ops.exit_hctx && hctx->sched_data) {
536f9cd4bfeSJens Axboe e->type->ops.exit_hctx(hctx, i);
537ee056f98SOmar Sandoval hctx->sched_data = NULL;
538ee056f98SOmar Sandoval }
539f0c1c4d2SMing Lei flags = hctx->flags;
540ee056f98SOmar Sandoval }
5415cf9c91bSChristoph Hellwig
5425cf9c91bSChristoph Hellwig mutex_lock(&q->debugfs_mutex);
543d332ce09SOmar Sandoval blk_mq_debugfs_unregister_sched(q);
5445cf9c91bSChristoph Hellwig mutex_unlock(&q->debugfs_mutex);
5455cf9c91bSChristoph Hellwig
546f9cd4bfeSJens Axboe if (e->type->ops.exit_sched)
547f9cd4bfeSJens Axboe e->type->ops.exit_sched(e);
548e155b0c2SJohn Garry blk_mq_sched_tags_teardown(q, flags);
54954d5329dSOmar Sandoval q->elevator = NULL;
55054d5329dSOmar Sandoval }
551