xref: /linux/block/blk-mq-cpumap.c (revision 90110e04)
13dcf60bcSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
275bb4625SJens Axboe /*
375bb4625SJens Axboe  * CPU <-> hardware queue mapping helpers
475bb4625SJens Axboe  *
575bb4625SJens Axboe  * Copyright (C) 2013-2014 Jens Axboe
675bb4625SJens Axboe  */
7320ae51fSJens Axboe #include <linux/kernel.h>
8320ae51fSJens Axboe #include <linux/threads.h>
9320ae51fSJens Axboe #include <linux/module.h>
10320ae51fSJens Axboe #include <linux/mm.h>
11320ae51fSJens Axboe #include <linux/smp.h>
12320ae51fSJens Axboe #include <linux/cpu.h>
13*6a6dcae8SMing Lei #include <linux/group_cpus.h>
14320ae51fSJens Axboe 
15320ae51fSJens Axboe #include "blk.h"
16320ae51fSJens Axboe #include "blk-mq.h"
17320ae51fSJens Axboe 
blk_mq_map_queues(struct blk_mq_queue_map * qmap)18a4e1d0b7SBart Van Assche void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
19320ae51fSJens Axboe {
20*6a6dcae8SMing Lei 	const struct cpumask *masks;
21*6a6dcae8SMing Lei 	unsigned int queue, cpu;
22556f36e9SMing Lei 
23*6a6dcae8SMing Lei 	masks = group_cpus_evenly(qmap->nr_queues);
24*6a6dcae8SMing Lei 	if (!masks) {
25556f36e9SMing Lei 		for_each_possible_cpu(cpu)
26*6a6dcae8SMing Lei 			qmap->mq_map[cpu] = qmap->queue_offset;
27*6a6dcae8SMing Lei 		return;
28556f36e9SMing Lei 	}
29320ae51fSJens Axboe 
30*6a6dcae8SMing Lei 	for (queue = 0; queue < qmap->nr_queues; queue++) {
31*6a6dcae8SMing Lei 		for_each_cpu(cpu, &masks[queue])
32*6a6dcae8SMing Lei 			qmap->mq_map[cpu] = qmap->queue_offset + queue;
33fe631457SMax Gurtovoy 	}
34*6a6dcae8SMing Lei 	kfree(masks);
35320ae51fSJens Axboe }
369e5a7e22SChristoph Hellwig EXPORT_SYMBOL_GPL(blk_mq_map_queues);
37320ae51fSJens Axboe 
38cd669f88SBart Van Assche /**
39cd669f88SBart Van Assche  * blk_mq_hw_queue_to_node - Look up the memory node for a hardware queue index
40cd669f88SBart Van Assche  * @qmap: CPU to hardware queue map.
41cd669f88SBart Van Assche  * @index: hardware queue index.
42cd669f88SBart Van Assche  *
43f14bbe77SJens Axboe  * We have no quick way of doing reverse lookups. This is only used at
44f14bbe77SJens Axboe  * queue init time, so runtime isn't important.
45f14bbe77SJens Axboe  */
blk_mq_hw_queue_to_node(struct blk_mq_queue_map * qmap,unsigned int index)46ed76e329SJens Axboe int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
47f14bbe77SJens Axboe {
48f14bbe77SJens Axboe 	int i;
49f14bbe77SJens Axboe 
50f14bbe77SJens Axboe 	for_each_possible_cpu(i) {
51ed76e329SJens Axboe 		if (index == qmap->mq_map[i])
52576e85c5SXianting Tian 			return cpu_to_node(i);
53f14bbe77SJens Axboe 	}
54f14bbe77SJens Axboe 
55f14bbe77SJens Axboe 	return NUMA_NO_NODE;
56f14bbe77SJens Axboe }
57