1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
4 * Shaohua Li <shli@fb.com>
5 */
6 #include <linux/module.h>
7
8 #include <linux/moduleparam.h>
9 #include <linux/sched.h>
10 #include <linux/fs.h>
11 #include <linux/init.h>
12 #include "null_blk.h"
13
14 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
15 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
16 #define SECTOR_MASK (PAGE_SECTORS - 1)
17
18 #define FREE_BATCH 16
19
20 #define TICKS_PER_SEC 50ULL
21 #define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
22
23 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
24 static DECLARE_FAULT_ATTR(null_timeout_attr);
25 static DECLARE_FAULT_ATTR(null_requeue_attr);
26 static DECLARE_FAULT_ATTR(null_init_hctx_attr);
27 #endif
28
mb_per_tick(int mbps)29 static inline u64 mb_per_tick(int mbps)
30 {
31 return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
32 }
33
34 /*
35 * Status flags for nullb_device.
36 *
37 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
38 * UP: Device is currently on and visible in userspace.
39 * THROTTLED: Device is being throttled.
40 * CACHE: Device is using a write-back cache.
41 */
42 enum nullb_device_flags {
43 NULLB_DEV_FL_CONFIGURED = 0,
44 NULLB_DEV_FL_UP = 1,
45 NULLB_DEV_FL_THROTTLED = 2,
46 NULLB_DEV_FL_CACHE = 3,
47 };
48
49 #define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
50 /*
51 * nullb_page is a page in memory for nullb devices.
52 *
53 * @page: The page holding the data.
54 * @bitmap: The bitmap represents which sector in the page has data.
55 * Each bit represents one block size. For example, sector 8
56 * will use the 7th bit
57 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
58 * page is being flushing to storage. FREE means the cache page is freed and
59 * should be skipped from flushing to storage. Please see
60 * null_make_cache_space
61 */
62 struct nullb_page {
63 struct page *page;
64 DECLARE_BITMAP(bitmap, MAP_SZ);
65 };
66 #define NULLB_PAGE_LOCK (MAP_SZ - 1)
67 #define NULLB_PAGE_FREE (MAP_SZ - 2)
68
69 static LIST_HEAD(nullb_list);
70 static struct mutex lock;
71 static int null_major;
72 static DEFINE_IDA(nullb_indexes);
73 static struct blk_mq_tag_set tag_set;
74
75 enum {
76 NULL_IRQ_NONE = 0,
77 NULL_IRQ_SOFTIRQ = 1,
78 NULL_IRQ_TIMER = 2,
79 };
80
81 enum {
82 NULL_Q_BIO = 0,
83 NULL_Q_RQ = 1,
84 NULL_Q_MQ = 2,
85 };
86
87 static bool g_virt_boundary = false;
88 module_param_named(virt_boundary, g_virt_boundary, bool, 0444);
89 MODULE_PARM_DESC(virt_boundary, "Require a virtual boundary for the device. Default: False");
90
91 static int g_no_sched;
92 module_param_named(no_sched, g_no_sched, int, 0444);
93 MODULE_PARM_DESC(no_sched, "No io scheduler");
94
95 static int g_submit_queues = 1;
96 module_param_named(submit_queues, g_submit_queues, int, 0444);
97 MODULE_PARM_DESC(submit_queues, "Number of submission queues");
98
99 static int g_home_node = NUMA_NO_NODE;
100 module_param_named(home_node, g_home_node, int, 0444);
101 MODULE_PARM_DESC(home_node, "Home node for the device");
102
103 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
104 /*
105 * For more details about fault injection, please refer to
106 * Documentation/fault-injection/fault-injection.rst.
107 */
108 static char g_timeout_str[80];
109 module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444);
110 MODULE_PARM_DESC(timeout, "Fault injection. timeout=<interval>,<probability>,<space>,<times>");
111
112 static char g_requeue_str[80];
113 module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444);
114 MODULE_PARM_DESC(requeue, "Fault injection. requeue=<interval>,<probability>,<space>,<times>");
115
116 static char g_init_hctx_str[80];
117 module_param_string(init_hctx, g_init_hctx_str, sizeof(g_init_hctx_str), 0444);
118 MODULE_PARM_DESC(init_hctx, "Fault injection to fail hctx init. init_hctx=<interval>,<probability>,<space>,<times>");
119 #endif
120
121 static int g_queue_mode = NULL_Q_MQ;
122
null_param_store_val(const char * str,int * val,int min,int max)123 static int null_param_store_val(const char *str, int *val, int min, int max)
124 {
125 int ret, new_val;
126
127 ret = kstrtoint(str, 10, &new_val);
128 if (ret)
129 return -EINVAL;
130
131 if (new_val < min || new_val > max)
132 return -EINVAL;
133
134 *val = new_val;
135 return 0;
136 }
137
null_set_queue_mode(const char * str,const struct kernel_param * kp)138 static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
139 {
140 return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
141 }
142
143 static const struct kernel_param_ops null_queue_mode_param_ops = {
144 .set = null_set_queue_mode,
145 .get = param_get_int,
146 };
147
148 device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444);
149 MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
150
151 static int g_gb = 250;
152 module_param_named(gb, g_gb, int, 0444);
153 MODULE_PARM_DESC(gb, "Size in GB");
154
155 static int g_bs = 512;
156 module_param_named(bs, g_bs, int, 0444);
157 MODULE_PARM_DESC(bs, "Block size (in bytes)");
158
159 static int g_max_sectors;
160 module_param_named(max_sectors, g_max_sectors, int, 0444);
161 MODULE_PARM_DESC(max_sectors, "Maximum size of a command (in 512B sectors)");
162
163 static unsigned int nr_devices = 1;
164 module_param(nr_devices, uint, 0444);
165 MODULE_PARM_DESC(nr_devices, "Number of devices to register");
166
167 static bool g_blocking;
168 module_param_named(blocking, g_blocking, bool, 0444);
169 MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
170
171 static bool shared_tags;
172 module_param(shared_tags, bool, 0444);
173 MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
174
175 static bool g_shared_tag_bitmap;
176 module_param_named(shared_tag_bitmap, g_shared_tag_bitmap, bool, 0444);
177 MODULE_PARM_DESC(shared_tag_bitmap, "Use shared tag bitmap for all submission queues for blk-mq");
178
179 static int g_irqmode = NULL_IRQ_SOFTIRQ;
180
null_set_irqmode(const char * str,const struct kernel_param * kp)181 static int null_set_irqmode(const char *str, const struct kernel_param *kp)
182 {
183 return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
184 NULL_IRQ_TIMER);
185 }
186
187 static const struct kernel_param_ops null_irqmode_param_ops = {
188 .set = null_set_irqmode,
189 .get = param_get_int,
190 };
191
192 device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444);
193 MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
194
195 static unsigned long g_completion_nsec = 10000;
196 module_param_named(completion_nsec, g_completion_nsec, ulong, 0444);
197 MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
198
199 static int g_hw_queue_depth = 64;
200 module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444);
201 MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
202
203 static bool g_use_per_node_hctx;
204 module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
205 MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
206
207 static bool g_zoned;
208 module_param_named(zoned, g_zoned, bool, S_IRUGO);
209 MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
210
211 static unsigned long g_zone_size = 256;
212 module_param_named(zone_size, g_zone_size, ulong, S_IRUGO);
213 MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
214
215 static unsigned long g_zone_capacity;
216 module_param_named(zone_capacity, g_zone_capacity, ulong, 0444);
217 MODULE_PARM_DESC(zone_capacity, "Zone capacity in MB when block device is zoned. Can be less than or equal to zone size. Default: Zone size");
218
219 static unsigned int g_zone_nr_conv;
220 module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444);
221 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0");
222
223 static unsigned int g_zone_max_open;
224 module_param_named(zone_max_open, g_zone_max_open, uint, 0444);
225 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones when block device is zoned. Default: 0 (no limit)");
226
227 static unsigned int g_zone_max_active;
228 module_param_named(zone_max_active, g_zone_max_active, uint, 0444);
229 MODULE_PARM_DESC(zone_max_active, "Maximum number of active zones when block device is zoned. Default: 0 (no limit)");
230
231 static struct nullb_device *null_alloc_dev(void);
232 static void null_free_dev(struct nullb_device *dev);
233 static void null_del_dev(struct nullb *nullb);
234 static int null_add_dev(struct nullb_device *dev);
235 static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
236
to_nullb_device(struct config_item * item)237 static inline struct nullb_device *to_nullb_device(struct config_item *item)
238 {
239 return item ? container_of(item, struct nullb_device, item) : NULL;
240 }
241
nullb_device_uint_attr_show(unsigned int val,char * page)242 static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
243 {
244 return snprintf(page, PAGE_SIZE, "%u\n", val);
245 }
246
nullb_device_ulong_attr_show(unsigned long val,char * page)247 static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
248 char *page)
249 {
250 return snprintf(page, PAGE_SIZE, "%lu\n", val);
251 }
252
nullb_device_bool_attr_show(bool val,char * page)253 static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
254 {
255 return snprintf(page, PAGE_SIZE, "%u\n", val);
256 }
257
nullb_device_uint_attr_store(unsigned int * val,const char * page,size_t count)258 static ssize_t nullb_device_uint_attr_store(unsigned int *val,
259 const char *page, size_t count)
260 {
261 unsigned int tmp;
262 int result;
263
264 result = kstrtouint(page, 0, &tmp);
265 if (result < 0)
266 return result;
267
268 *val = tmp;
269 return count;
270 }
271
nullb_device_ulong_attr_store(unsigned long * val,const char * page,size_t count)272 static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
273 const char *page, size_t count)
274 {
275 int result;
276 unsigned long tmp;
277
278 result = kstrtoul(page, 0, &tmp);
279 if (result < 0)
280 return result;
281
282 *val = tmp;
283 return count;
284 }
285
nullb_device_bool_attr_store(bool * val,const char * page,size_t count)286 static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
287 size_t count)
288 {
289 bool tmp;
290 int result;
291
292 result = kstrtobool(page, &tmp);
293 if (result < 0)
294 return result;
295
296 *val = tmp;
297 return count;
298 }
299
300 /* The following macro should only be used with TYPE = {uint, ulong, bool}. */
301 #define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY) \
302 static ssize_t \
303 nullb_device_##NAME##_show(struct config_item *item, char *page) \
304 { \
305 return nullb_device_##TYPE##_attr_show( \
306 to_nullb_device(item)->NAME, page); \
307 } \
308 static ssize_t \
309 nullb_device_##NAME##_store(struct config_item *item, const char *page, \
310 size_t count) \
311 { \
312 int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY;\
313 struct nullb_device *dev = to_nullb_device(item); \
314 TYPE new_value = 0; \
315 int ret; \
316 \
317 ret = nullb_device_##TYPE##_attr_store(&new_value, page, count);\
318 if (ret < 0) \
319 return ret; \
320 if (apply_fn) \
321 ret = apply_fn(dev, new_value); \
322 else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) \
323 ret = -EBUSY; \
324 if (ret < 0) \
325 return ret; \
326 dev->NAME = new_value; \
327 return count; \
328 } \
329 CONFIGFS_ATTR(nullb_device_, NAME);
330
nullb_apply_submit_queues(struct nullb_device * dev,unsigned int submit_queues)331 static int nullb_apply_submit_queues(struct nullb_device *dev,
332 unsigned int submit_queues)
333 {
334 struct nullb *nullb = dev->nullb;
335 struct blk_mq_tag_set *set;
336
337 if (!nullb)
338 return 0;
339
340 /*
341 * Make sure that null_init_hctx() does not access nullb->queues[] past
342 * the end of that array.
343 */
344 if (submit_queues > nr_cpu_ids)
345 return -EINVAL;
346 set = nullb->tag_set;
347 blk_mq_update_nr_hw_queues(set, submit_queues);
348 return set->nr_hw_queues == submit_queues ? 0 : -ENOMEM;
349 }
350
351 NULLB_DEVICE_ATTR(size, ulong, NULL);
352 NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL);
353 NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues);
354 NULLB_DEVICE_ATTR(home_node, uint, NULL);
355 NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
356 NULLB_DEVICE_ATTR(blocksize, uint, NULL);
357 NULLB_DEVICE_ATTR(max_sectors, uint, NULL);
358 NULLB_DEVICE_ATTR(irqmode, uint, NULL);
359 NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL);
360 NULLB_DEVICE_ATTR(index, uint, NULL);
361 NULLB_DEVICE_ATTR(blocking, bool, NULL);
362 NULLB_DEVICE_ATTR(use_per_node_hctx, bool, NULL);
363 NULLB_DEVICE_ATTR(memory_backed, bool, NULL);
364 NULLB_DEVICE_ATTR(discard, bool, NULL);
365 NULLB_DEVICE_ATTR(mbps, uint, NULL);
366 NULLB_DEVICE_ATTR(cache_size, ulong, NULL);
367 NULLB_DEVICE_ATTR(zoned, bool, NULL);
368 NULLB_DEVICE_ATTR(zone_size, ulong, NULL);
369 NULLB_DEVICE_ATTR(zone_capacity, ulong, NULL);
370 NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
371 NULLB_DEVICE_ATTR(zone_max_open, uint, NULL);
372 NULLB_DEVICE_ATTR(zone_max_active, uint, NULL);
373 NULLB_DEVICE_ATTR(virt_boundary, bool, NULL);
374
nullb_device_power_show(struct config_item * item,char * page)375 static ssize_t nullb_device_power_show(struct config_item *item, char *page)
376 {
377 return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
378 }
379
nullb_device_power_store(struct config_item * item,const char * page,size_t count)380 static ssize_t nullb_device_power_store(struct config_item *item,
381 const char *page, size_t count)
382 {
383 struct nullb_device *dev = to_nullb_device(item);
384 bool newp = false;
385 ssize_t ret;
386
387 ret = nullb_device_bool_attr_store(&newp, page, count);
388 if (ret < 0)
389 return ret;
390
391 if (!dev->power && newp) {
392 if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
393 return count;
394 if (null_add_dev(dev)) {
395 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
396 return -ENOMEM;
397 }
398
399 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
400 dev->power = newp;
401 } else if (dev->power && !newp) {
402 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
403 mutex_lock(&lock);
404 dev->power = newp;
405 null_del_dev(dev->nullb);
406 mutex_unlock(&lock);
407 }
408 clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
409 }
410
411 return count;
412 }
413
414 CONFIGFS_ATTR(nullb_device_, power);
415
nullb_device_badblocks_show(struct config_item * item,char * page)416 static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
417 {
418 struct nullb_device *t_dev = to_nullb_device(item);
419
420 return badblocks_show(&t_dev->badblocks, page, 0);
421 }
422
nullb_device_badblocks_store(struct config_item * item,const char * page,size_t count)423 static ssize_t nullb_device_badblocks_store(struct config_item *item,
424 const char *page, size_t count)
425 {
426 struct nullb_device *t_dev = to_nullb_device(item);
427 char *orig, *buf, *tmp;
428 u64 start, end;
429 int ret;
430
431 orig = kstrndup(page, count, GFP_KERNEL);
432 if (!orig)
433 return -ENOMEM;
434
435 buf = strstrip(orig);
436
437 ret = -EINVAL;
438 if (buf[0] != '+' && buf[0] != '-')
439 goto out;
440 tmp = strchr(&buf[1], '-');
441 if (!tmp)
442 goto out;
443 *tmp = '\0';
444 ret = kstrtoull(buf + 1, 0, &start);
445 if (ret)
446 goto out;
447 ret = kstrtoull(tmp + 1, 0, &end);
448 if (ret)
449 goto out;
450 ret = -EINVAL;
451 if (start > end)
452 goto out;
453 /* enable badblocks */
454 cmpxchg(&t_dev->badblocks.shift, -1, 0);
455 if (buf[0] == '+')
456 ret = badblocks_set(&t_dev->badblocks, start,
457 end - start + 1, 1);
458 else
459 ret = badblocks_clear(&t_dev->badblocks, start,
460 end - start + 1);
461 if (ret == 0)
462 ret = count;
463 out:
464 kfree(orig);
465 return ret;
466 }
467 CONFIGFS_ATTR(nullb_device_, badblocks);
468
469 static struct configfs_attribute *nullb_device_attrs[] = {
470 &nullb_device_attr_size,
471 &nullb_device_attr_completion_nsec,
472 &nullb_device_attr_submit_queues,
473 &nullb_device_attr_home_node,
474 &nullb_device_attr_queue_mode,
475 &nullb_device_attr_blocksize,
476 &nullb_device_attr_max_sectors,
477 &nullb_device_attr_irqmode,
478 &nullb_device_attr_hw_queue_depth,
479 &nullb_device_attr_index,
480 &nullb_device_attr_blocking,
481 &nullb_device_attr_use_per_node_hctx,
482 &nullb_device_attr_power,
483 &nullb_device_attr_memory_backed,
484 &nullb_device_attr_discard,
485 &nullb_device_attr_mbps,
486 &nullb_device_attr_cache_size,
487 &nullb_device_attr_badblocks,
488 &nullb_device_attr_zoned,
489 &nullb_device_attr_zone_size,
490 &nullb_device_attr_zone_capacity,
491 &nullb_device_attr_zone_nr_conv,
492 &nullb_device_attr_zone_max_open,
493 &nullb_device_attr_zone_max_active,
494 &nullb_device_attr_virt_boundary,
495 NULL,
496 };
497
nullb_device_release(struct config_item * item)498 static void nullb_device_release(struct config_item *item)
499 {
500 struct nullb_device *dev = to_nullb_device(item);
501
502 null_free_device_storage(dev, false);
503 null_free_dev(dev);
504 }
505
506 static struct configfs_item_operations nullb_device_ops = {
507 .release = nullb_device_release,
508 };
509
510 static const struct config_item_type nullb_device_type = {
511 .ct_item_ops = &nullb_device_ops,
512 .ct_attrs = nullb_device_attrs,
513 .ct_owner = THIS_MODULE,
514 };
515
516 static struct
nullb_group_make_item(struct config_group * group,const char * name)517 config_item *nullb_group_make_item(struct config_group *group, const char *name)
518 {
519 struct nullb_device *dev;
520
521 dev = null_alloc_dev();
522 if (!dev)
523 return ERR_PTR(-ENOMEM);
524
525 config_item_init_type_name(&dev->item, name, &nullb_device_type);
526
527 return &dev->item;
528 }
529
530 static void
nullb_group_drop_item(struct config_group * group,struct config_item * item)531 nullb_group_drop_item(struct config_group *group, struct config_item *item)
532 {
533 struct nullb_device *dev = to_nullb_device(item);
534
535 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
536 mutex_lock(&lock);
537 dev->power = false;
538 null_del_dev(dev->nullb);
539 mutex_unlock(&lock);
540 }
541
542 config_item_put(item);
543 }
544
memb_group_features_show(struct config_item * item,char * page)545 static ssize_t memb_group_features_show(struct config_item *item, char *page)
546 {
547 return snprintf(page, PAGE_SIZE,
548 "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_capacity,zone_nr_conv,zone_max_open,zone_max_active,blocksize,max_sectors,virt_boundary\n");
549 }
550
551 CONFIGFS_ATTR_RO(memb_group_, features);
552
553 static struct configfs_attribute *nullb_group_attrs[] = {
554 &memb_group_attr_features,
555 NULL,
556 };
557
558 static struct configfs_group_operations nullb_group_ops = {
559 .make_item = nullb_group_make_item,
560 .drop_item = nullb_group_drop_item,
561 };
562
563 static const struct config_item_type nullb_group_type = {
564 .ct_group_ops = &nullb_group_ops,
565 .ct_attrs = nullb_group_attrs,
566 .ct_owner = THIS_MODULE,
567 };
568
569 static struct configfs_subsystem nullb_subsys = {
570 .su_group = {
571 .cg_item = {
572 .ci_namebuf = "nullb",
573 .ci_type = &nullb_group_type,
574 },
575 },
576 };
577
null_cache_active(struct nullb * nullb)578 static inline int null_cache_active(struct nullb *nullb)
579 {
580 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
581 }
582
null_alloc_dev(void)583 static struct nullb_device *null_alloc_dev(void)
584 {
585 struct nullb_device *dev;
586
587 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
588 if (!dev)
589 return NULL;
590 INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
591 INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
592 if (badblocks_init(&dev->badblocks, 0)) {
593 kfree(dev);
594 return NULL;
595 }
596
597 dev->size = g_gb * 1024;
598 dev->completion_nsec = g_completion_nsec;
599 dev->submit_queues = g_submit_queues;
600 dev->home_node = g_home_node;
601 dev->queue_mode = g_queue_mode;
602 dev->blocksize = g_bs;
603 dev->max_sectors = g_max_sectors;
604 dev->irqmode = g_irqmode;
605 dev->hw_queue_depth = g_hw_queue_depth;
606 dev->blocking = g_blocking;
607 dev->use_per_node_hctx = g_use_per_node_hctx;
608 dev->zoned = g_zoned;
609 dev->zone_size = g_zone_size;
610 dev->zone_capacity = g_zone_capacity;
611 dev->zone_nr_conv = g_zone_nr_conv;
612 dev->zone_max_open = g_zone_max_open;
613 dev->zone_max_active = g_zone_max_active;
614 dev->virt_boundary = g_virt_boundary;
615 return dev;
616 }
617
null_free_dev(struct nullb_device * dev)618 static void null_free_dev(struct nullb_device *dev)
619 {
620 if (!dev)
621 return;
622
623 null_free_zoned_dev(dev);
624 badblocks_exit(&dev->badblocks);
625 kfree(dev);
626 }
627
put_tag(struct nullb_queue * nq,unsigned int tag)628 static void put_tag(struct nullb_queue *nq, unsigned int tag)
629 {
630 clear_bit_unlock(tag, nq->tag_map);
631
632 if (waitqueue_active(&nq->wait))
633 wake_up(&nq->wait);
634 }
635
get_tag(struct nullb_queue * nq)636 static unsigned int get_tag(struct nullb_queue *nq)
637 {
638 unsigned int tag;
639
640 do {
641 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
642 if (tag >= nq->queue_depth)
643 return -1U;
644 } while (test_and_set_bit_lock(tag, nq->tag_map));
645
646 return tag;
647 }
648
free_cmd(struct nullb_cmd * cmd)649 static void free_cmd(struct nullb_cmd *cmd)
650 {
651 put_tag(cmd->nq, cmd->tag);
652 }
653
654 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
655
__alloc_cmd(struct nullb_queue * nq)656 static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
657 {
658 struct nullb_cmd *cmd;
659 unsigned int tag;
660
661 tag = get_tag(nq);
662 if (tag != -1U) {
663 cmd = &nq->cmds[tag];
664 cmd->tag = tag;
665 cmd->error = BLK_STS_OK;
666 cmd->nq = nq;
667 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
668 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
669 HRTIMER_MODE_REL);
670 cmd->timer.function = null_cmd_timer_expired;
671 }
672 return cmd;
673 }
674
675 return NULL;
676 }
677
alloc_cmd(struct nullb_queue * nq,int can_wait)678 static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
679 {
680 struct nullb_cmd *cmd;
681 DEFINE_WAIT(wait);
682
683 cmd = __alloc_cmd(nq);
684 if (cmd || !can_wait)
685 return cmd;
686
687 do {
688 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
689 cmd = __alloc_cmd(nq);
690 if (cmd)
691 break;
692
693 io_schedule();
694 } while (1);
695
696 finish_wait(&nq->wait, &wait);
697 return cmd;
698 }
699
end_cmd(struct nullb_cmd * cmd)700 static void end_cmd(struct nullb_cmd *cmd)
701 {
702 int queue_mode = cmd->nq->dev->queue_mode;
703
704 switch (queue_mode) {
705 case NULL_Q_MQ:
706 blk_mq_end_request(cmd->rq, cmd->error);
707 return;
708 case NULL_Q_BIO:
709 cmd->bio->bi_status = cmd->error;
710 bio_endio(cmd->bio);
711 break;
712 }
713
714 free_cmd(cmd);
715 }
716
null_cmd_timer_expired(struct hrtimer * timer)717 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
718 {
719 end_cmd(container_of(timer, struct nullb_cmd, timer));
720
721 return HRTIMER_NORESTART;
722 }
723
null_cmd_end_timer(struct nullb_cmd * cmd)724 static void null_cmd_end_timer(struct nullb_cmd *cmd)
725 {
726 ktime_t kt = cmd->nq->dev->completion_nsec;
727
728 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
729 }
730
null_complete_rq(struct request * rq)731 static void null_complete_rq(struct request *rq)
732 {
733 end_cmd(blk_mq_rq_to_pdu(rq));
734 }
735
null_alloc_page(gfp_t gfp_flags)736 static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
737 {
738 struct nullb_page *t_page;
739
740 t_page = kmalloc(sizeof(struct nullb_page), gfp_flags);
741 if (!t_page)
742 goto out;
743
744 t_page->page = alloc_pages(gfp_flags, 0);
745 if (!t_page->page)
746 goto out_freepage;
747
748 memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
749 return t_page;
750 out_freepage:
751 kfree(t_page);
752 out:
753 return NULL;
754 }
755
null_free_page(struct nullb_page * t_page)756 static void null_free_page(struct nullb_page *t_page)
757 {
758 __set_bit(NULLB_PAGE_FREE, t_page->bitmap);
759 if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
760 return;
761 __free_page(t_page->page);
762 kfree(t_page);
763 }
764
null_page_empty(struct nullb_page * page)765 static bool null_page_empty(struct nullb_page *page)
766 {
767 int size = MAP_SZ - 2;
768
769 return find_first_bit(page->bitmap, size) == size;
770 }
771
null_free_sector(struct nullb * nullb,sector_t sector,bool is_cache)772 static void null_free_sector(struct nullb *nullb, sector_t sector,
773 bool is_cache)
774 {
775 unsigned int sector_bit;
776 u64 idx;
777 struct nullb_page *t_page, *ret;
778 struct radix_tree_root *root;
779
780 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
781 idx = sector >> PAGE_SECTORS_SHIFT;
782 sector_bit = (sector & SECTOR_MASK);
783
784 t_page = radix_tree_lookup(root, idx);
785 if (t_page) {
786 __clear_bit(sector_bit, t_page->bitmap);
787
788 if (null_page_empty(t_page)) {
789 ret = radix_tree_delete_item(root, idx, t_page);
790 WARN_ON(ret != t_page);
791 null_free_page(ret);
792 if (is_cache)
793 nullb->dev->curr_cache -= PAGE_SIZE;
794 }
795 }
796 }
797
null_radix_tree_insert(struct nullb * nullb,u64 idx,struct nullb_page * t_page,bool is_cache)798 static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
799 struct nullb_page *t_page, bool is_cache)
800 {
801 struct radix_tree_root *root;
802
803 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
804
805 if (radix_tree_insert(root, idx, t_page)) {
806 null_free_page(t_page);
807 t_page = radix_tree_lookup(root, idx);
808 WARN_ON(!t_page || t_page->page->index != idx);
809 } else if (is_cache)
810 nullb->dev->curr_cache += PAGE_SIZE;
811
812 return t_page;
813 }
814
null_free_device_storage(struct nullb_device * dev,bool is_cache)815 static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
816 {
817 unsigned long pos = 0;
818 int nr_pages;
819 struct nullb_page *ret, *t_pages[FREE_BATCH];
820 struct radix_tree_root *root;
821
822 root = is_cache ? &dev->cache : &dev->data;
823
824 do {
825 int i;
826
827 nr_pages = radix_tree_gang_lookup(root,
828 (void **)t_pages, pos, FREE_BATCH);
829
830 for (i = 0; i < nr_pages; i++) {
831 pos = t_pages[i]->page->index;
832 ret = radix_tree_delete_item(root, pos, t_pages[i]);
833 WARN_ON(ret != t_pages[i]);
834 null_free_page(ret);
835 }
836
837 pos++;
838 } while (nr_pages == FREE_BATCH);
839
840 if (is_cache)
841 dev->curr_cache = 0;
842 }
843
__null_lookup_page(struct nullb * nullb,sector_t sector,bool for_write,bool is_cache)844 static struct nullb_page *__null_lookup_page(struct nullb *nullb,
845 sector_t sector, bool for_write, bool is_cache)
846 {
847 unsigned int sector_bit;
848 u64 idx;
849 struct nullb_page *t_page;
850 struct radix_tree_root *root;
851
852 idx = sector >> PAGE_SECTORS_SHIFT;
853 sector_bit = (sector & SECTOR_MASK);
854
855 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
856 t_page = radix_tree_lookup(root, idx);
857 WARN_ON(t_page && t_page->page->index != idx);
858
859 if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
860 return t_page;
861
862 return NULL;
863 }
864
null_lookup_page(struct nullb * nullb,sector_t sector,bool for_write,bool ignore_cache)865 static struct nullb_page *null_lookup_page(struct nullb *nullb,
866 sector_t sector, bool for_write, bool ignore_cache)
867 {
868 struct nullb_page *page = NULL;
869
870 if (!ignore_cache)
871 page = __null_lookup_page(nullb, sector, for_write, true);
872 if (page)
873 return page;
874 return __null_lookup_page(nullb, sector, for_write, false);
875 }
876
null_insert_page(struct nullb * nullb,sector_t sector,bool ignore_cache)877 static struct nullb_page *null_insert_page(struct nullb *nullb,
878 sector_t sector, bool ignore_cache)
879 __releases(&nullb->lock)
880 __acquires(&nullb->lock)
881 {
882 u64 idx;
883 struct nullb_page *t_page;
884
885 t_page = null_lookup_page(nullb, sector, true, ignore_cache);
886 if (t_page)
887 return t_page;
888
889 spin_unlock_irq(&nullb->lock);
890
891 t_page = null_alloc_page(GFP_NOIO);
892 if (!t_page)
893 goto out_lock;
894
895 if (radix_tree_preload(GFP_NOIO))
896 goto out_freepage;
897
898 spin_lock_irq(&nullb->lock);
899 idx = sector >> PAGE_SECTORS_SHIFT;
900 t_page->page->index = idx;
901 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
902 radix_tree_preload_end();
903
904 return t_page;
905 out_freepage:
906 null_free_page(t_page);
907 out_lock:
908 spin_lock_irq(&nullb->lock);
909 return null_lookup_page(nullb, sector, true, ignore_cache);
910 }
911
null_flush_cache_page(struct nullb * nullb,struct nullb_page * c_page)912 static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
913 {
914 int i;
915 unsigned int offset;
916 u64 idx;
917 struct nullb_page *t_page, *ret;
918 void *dst, *src;
919
920 idx = c_page->page->index;
921
922 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
923
924 __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
925 if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
926 null_free_page(c_page);
927 if (t_page && null_page_empty(t_page)) {
928 ret = radix_tree_delete_item(&nullb->dev->data,
929 idx, t_page);
930 null_free_page(t_page);
931 }
932 return 0;
933 }
934
935 if (!t_page)
936 return -ENOMEM;
937
938 src = kmap_atomic(c_page->page);
939 dst = kmap_atomic(t_page->page);
940
941 for (i = 0; i < PAGE_SECTORS;
942 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
943 if (test_bit(i, c_page->bitmap)) {
944 offset = (i << SECTOR_SHIFT);
945 memcpy(dst + offset, src + offset,
946 nullb->dev->blocksize);
947 __set_bit(i, t_page->bitmap);
948 }
949 }
950
951 kunmap_atomic(dst);
952 kunmap_atomic(src);
953
954 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
955 null_free_page(ret);
956 nullb->dev->curr_cache -= PAGE_SIZE;
957
958 return 0;
959 }
960
null_make_cache_space(struct nullb * nullb,unsigned long n)961 static int null_make_cache_space(struct nullb *nullb, unsigned long n)
962 {
963 int i, err, nr_pages;
964 struct nullb_page *c_pages[FREE_BATCH];
965 unsigned long flushed = 0, one_round;
966
967 again:
968 if ((nullb->dev->cache_size * 1024 * 1024) >
969 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
970 return 0;
971
972 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
973 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
974 /*
975 * nullb_flush_cache_page could unlock before using the c_pages. To
976 * avoid race, we don't allow page free
977 */
978 for (i = 0; i < nr_pages; i++) {
979 nullb->cache_flush_pos = c_pages[i]->page->index;
980 /*
981 * We found the page which is being flushed to disk by other
982 * threads
983 */
984 if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
985 c_pages[i] = NULL;
986 else
987 __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
988 }
989
990 one_round = 0;
991 for (i = 0; i < nr_pages; i++) {
992 if (c_pages[i] == NULL)
993 continue;
994 err = null_flush_cache_page(nullb, c_pages[i]);
995 if (err)
996 return err;
997 one_round++;
998 }
999 flushed += one_round << PAGE_SHIFT;
1000
1001 if (n > flushed) {
1002 if (nr_pages == 0)
1003 nullb->cache_flush_pos = 0;
1004 if (one_round == 0) {
1005 /* give other threads a chance */
1006 spin_unlock_irq(&nullb->lock);
1007 spin_lock_irq(&nullb->lock);
1008 }
1009 goto again;
1010 }
1011 return 0;
1012 }
1013
copy_to_nullb(struct nullb * nullb,struct page * source,unsigned int off,sector_t sector,size_t n,bool is_fua)1014 static int copy_to_nullb(struct nullb *nullb, struct page *source,
1015 unsigned int off, sector_t sector, size_t n, bool is_fua)
1016 {
1017 size_t temp, count = 0;
1018 unsigned int offset;
1019 struct nullb_page *t_page;
1020 void *dst, *src;
1021
1022 while (count < n) {
1023 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1024
1025 if (null_cache_active(nullb) && !is_fua)
1026 null_make_cache_space(nullb, PAGE_SIZE);
1027
1028 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
1029 t_page = null_insert_page(nullb, sector,
1030 !null_cache_active(nullb) || is_fua);
1031 if (!t_page)
1032 return -ENOSPC;
1033
1034 src = kmap_atomic(source);
1035 dst = kmap_atomic(t_page->page);
1036 memcpy(dst + offset, src + off + count, temp);
1037 kunmap_atomic(dst);
1038 kunmap_atomic(src);
1039
1040 __set_bit(sector & SECTOR_MASK, t_page->bitmap);
1041
1042 if (is_fua)
1043 null_free_sector(nullb, sector, true);
1044
1045 count += temp;
1046 sector += temp >> SECTOR_SHIFT;
1047 }
1048 return 0;
1049 }
1050
copy_from_nullb(struct nullb * nullb,struct page * dest,unsigned int off,sector_t sector,size_t n)1051 static int copy_from_nullb(struct nullb *nullb, struct page *dest,
1052 unsigned int off, sector_t sector, size_t n)
1053 {
1054 size_t temp, count = 0;
1055 unsigned int offset;
1056 struct nullb_page *t_page;
1057 void *dst, *src;
1058
1059 while (count < n) {
1060 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1061
1062 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
1063 t_page = null_lookup_page(nullb, sector, false,
1064 !null_cache_active(nullb));
1065
1066 dst = kmap_atomic(dest);
1067 if (!t_page) {
1068 memset(dst + off + count, 0, temp);
1069 goto next;
1070 }
1071 src = kmap_atomic(t_page->page);
1072 memcpy(dst + off + count, src + offset, temp);
1073 kunmap_atomic(src);
1074 next:
1075 kunmap_atomic(dst);
1076
1077 count += temp;
1078 sector += temp >> SECTOR_SHIFT;
1079 }
1080 return 0;
1081 }
1082
nullb_fill_pattern(struct nullb * nullb,struct page * page,unsigned int len,unsigned int off)1083 static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
1084 unsigned int len, unsigned int off)
1085 {
1086 void *dst;
1087
1088 dst = kmap_atomic(page);
1089 memset(dst + off, 0xFF, len);
1090 kunmap_atomic(dst);
1091 }
1092
null_handle_discard(struct nullb_device * dev,sector_t sector,sector_t nr_sectors)1093 blk_status_t null_handle_discard(struct nullb_device *dev,
1094 sector_t sector, sector_t nr_sectors)
1095 {
1096 struct nullb *nullb = dev->nullb;
1097 size_t n = nr_sectors << SECTOR_SHIFT;
1098 size_t temp;
1099
1100 spin_lock_irq(&nullb->lock);
1101 while (n > 0) {
1102 temp = min_t(size_t, n, dev->blocksize);
1103 null_free_sector(nullb, sector, false);
1104 if (null_cache_active(nullb))
1105 null_free_sector(nullb, sector, true);
1106 sector += temp >> SECTOR_SHIFT;
1107 n -= temp;
1108 }
1109 spin_unlock_irq(&nullb->lock);
1110
1111 return BLK_STS_OK;
1112 }
1113
null_handle_flush(struct nullb * nullb)1114 static int null_handle_flush(struct nullb *nullb)
1115 {
1116 int err;
1117
1118 if (!null_cache_active(nullb))
1119 return 0;
1120
1121 spin_lock_irq(&nullb->lock);
1122 while (true) {
1123 err = null_make_cache_space(nullb,
1124 nullb->dev->cache_size * 1024 * 1024);
1125 if (err || nullb->dev->curr_cache == 0)
1126 break;
1127 }
1128
1129 WARN_ON(!radix_tree_empty(&nullb->dev->cache));
1130 spin_unlock_irq(&nullb->lock);
1131 return err;
1132 }
1133
null_transfer(struct nullb * nullb,struct page * page,unsigned int len,unsigned int off,bool is_write,sector_t sector,bool is_fua)1134 static int null_transfer(struct nullb *nullb, struct page *page,
1135 unsigned int len, unsigned int off, bool is_write, sector_t sector,
1136 bool is_fua)
1137 {
1138 struct nullb_device *dev = nullb->dev;
1139 unsigned int valid_len = len;
1140 int err = 0;
1141
1142 if (!is_write) {
1143 if (dev->zoned)
1144 valid_len = null_zone_valid_read_len(nullb,
1145 sector, len);
1146
1147 if (valid_len) {
1148 err = copy_from_nullb(nullb, page, off,
1149 sector, valid_len);
1150 off += valid_len;
1151 len -= valid_len;
1152 }
1153
1154 if (len)
1155 nullb_fill_pattern(nullb, page, len, off);
1156 flush_dcache_page(page);
1157 } else {
1158 flush_dcache_page(page);
1159 err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
1160 }
1161
1162 return err;
1163 }
1164
null_handle_rq(struct nullb_cmd * cmd)1165 static int null_handle_rq(struct nullb_cmd *cmd)
1166 {
1167 struct request *rq = cmd->rq;
1168 struct nullb *nullb = cmd->nq->dev->nullb;
1169 int err;
1170 unsigned int len;
1171 sector_t sector = blk_rq_pos(rq);
1172 struct req_iterator iter;
1173 struct bio_vec bvec;
1174
1175 spin_lock_irq(&nullb->lock);
1176 rq_for_each_segment(bvec, rq, iter) {
1177 len = bvec.bv_len;
1178 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
1179 op_is_write(req_op(rq)), sector,
1180 rq->cmd_flags & REQ_FUA);
1181 if (err) {
1182 spin_unlock_irq(&nullb->lock);
1183 return err;
1184 }
1185 sector += len >> SECTOR_SHIFT;
1186 }
1187 spin_unlock_irq(&nullb->lock);
1188
1189 return 0;
1190 }
1191
null_handle_bio(struct nullb_cmd * cmd)1192 static int null_handle_bio(struct nullb_cmd *cmd)
1193 {
1194 struct bio *bio = cmd->bio;
1195 struct nullb *nullb = cmd->nq->dev->nullb;
1196 int err;
1197 unsigned int len;
1198 sector_t sector = bio->bi_iter.bi_sector;
1199 struct bio_vec bvec;
1200 struct bvec_iter iter;
1201
1202 spin_lock_irq(&nullb->lock);
1203 bio_for_each_segment(bvec, bio, iter) {
1204 len = bvec.bv_len;
1205 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
1206 op_is_write(bio_op(bio)), sector,
1207 bio->bi_opf & REQ_FUA);
1208 if (err) {
1209 spin_unlock_irq(&nullb->lock);
1210 return err;
1211 }
1212 sector += len >> SECTOR_SHIFT;
1213 }
1214 spin_unlock_irq(&nullb->lock);
1215 return 0;
1216 }
1217
null_stop_queue(struct nullb * nullb)1218 static void null_stop_queue(struct nullb *nullb)
1219 {
1220 struct request_queue *q = nullb->q;
1221
1222 if (nullb->dev->queue_mode == NULL_Q_MQ)
1223 blk_mq_stop_hw_queues(q);
1224 }
1225
null_restart_queue_async(struct nullb * nullb)1226 static void null_restart_queue_async(struct nullb *nullb)
1227 {
1228 struct request_queue *q = nullb->q;
1229
1230 if (nullb->dev->queue_mode == NULL_Q_MQ)
1231 blk_mq_start_stopped_hw_queues(q, true);
1232 }
1233
null_handle_throttled(struct nullb_cmd * cmd)1234 static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
1235 {
1236 struct nullb_device *dev = cmd->nq->dev;
1237 struct nullb *nullb = dev->nullb;
1238 blk_status_t sts = BLK_STS_OK;
1239 struct request *rq = cmd->rq;
1240
1241 if (!hrtimer_active(&nullb->bw_timer))
1242 hrtimer_restart(&nullb->bw_timer);
1243
1244 if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) {
1245 null_stop_queue(nullb);
1246 /* race with timer */
1247 if (atomic_long_read(&nullb->cur_bytes) > 0)
1248 null_restart_queue_async(nullb);
1249 /* requeue request */
1250 sts = BLK_STS_DEV_RESOURCE;
1251 }
1252 return sts;
1253 }
1254
null_handle_badblocks(struct nullb_cmd * cmd,sector_t sector,sector_t nr_sectors)1255 static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
1256 sector_t sector,
1257 sector_t nr_sectors)
1258 {
1259 struct badblocks *bb = &cmd->nq->dev->badblocks;
1260 sector_t first_bad;
1261 int bad_sectors;
1262
1263 if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors))
1264 return BLK_STS_IOERR;
1265
1266 return BLK_STS_OK;
1267 }
1268
null_handle_memory_backed(struct nullb_cmd * cmd,enum req_opf op,sector_t sector,sector_t nr_sectors)1269 static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
1270 enum req_opf op,
1271 sector_t sector,
1272 sector_t nr_sectors)
1273 {
1274 struct nullb_device *dev = cmd->nq->dev;
1275 int err;
1276
1277 if (op == REQ_OP_DISCARD)
1278 return null_handle_discard(dev, sector, nr_sectors);
1279
1280 if (dev->queue_mode == NULL_Q_BIO)
1281 err = null_handle_bio(cmd);
1282 else
1283 err = null_handle_rq(cmd);
1284
1285 return errno_to_blk_status(err);
1286 }
1287
nullb_zero_read_cmd_buffer(struct nullb_cmd * cmd)1288 static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd)
1289 {
1290 struct nullb_device *dev = cmd->nq->dev;
1291 struct bio *bio;
1292
1293 if (dev->memory_backed)
1294 return;
1295
1296 if (dev->queue_mode == NULL_Q_BIO && bio_op(cmd->bio) == REQ_OP_READ) {
1297 zero_fill_bio(cmd->bio);
1298 } else if (req_op(cmd->rq) == REQ_OP_READ) {
1299 __rq_for_each_bio(bio, cmd->rq)
1300 zero_fill_bio(bio);
1301 }
1302 }
1303
nullb_complete_cmd(struct nullb_cmd * cmd)1304 static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
1305 {
1306 /*
1307 * Since root privileges are required to configure the null_blk
1308 * driver, it is fine that this driver does not initialize the
1309 * data buffers of read commands. Zero-initialize these buffers
1310 * anyway if KMSAN is enabled to prevent that KMSAN complains
1311 * about null_blk not initializing read data buffers.
1312 */
1313 if (IS_ENABLED(CONFIG_KMSAN))
1314 nullb_zero_read_cmd_buffer(cmd);
1315
1316 /* Complete IO by inline, softirq or timer */
1317 switch (cmd->nq->dev->irqmode) {
1318 case NULL_IRQ_SOFTIRQ:
1319 switch (cmd->nq->dev->queue_mode) {
1320 case NULL_Q_MQ:
1321 if (likely(!blk_should_fake_timeout(cmd->rq->q)))
1322 blk_mq_complete_request(cmd->rq);
1323 break;
1324 case NULL_Q_BIO:
1325 /*
1326 * XXX: no proper submitting cpu information available.
1327 */
1328 end_cmd(cmd);
1329 break;
1330 }
1331 break;
1332 case NULL_IRQ_NONE:
1333 end_cmd(cmd);
1334 break;
1335 case NULL_IRQ_TIMER:
1336 null_cmd_end_timer(cmd);
1337 break;
1338 }
1339 }
1340
null_process_cmd(struct nullb_cmd * cmd,enum req_opf op,sector_t sector,unsigned int nr_sectors)1341 blk_status_t null_process_cmd(struct nullb_cmd *cmd,
1342 enum req_opf op, sector_t sector,
1343 unsigned int nr_sectors)
1344 {
1345 struct nullb_device *dev = cmd->nq->dev;
1346 blk_status_t ret;
1347
1348 if (dev->badblocks.shift != -1) {
1349 ret = null_handle_badblocks(cmd, sector, nr_sectors);
1350 if (ret != BLK_STS_OK)
1351 return ret;
1352 }
1353
1354 if (dev->memory_backed)
1355 return null_handle_memory_backed(cmd, op, sector, nr_sectors);
1356
1357 return BLK_STS_OK;
1358 }
1359
null_handle_cmd(struct nullb_cmd * cmd,sector_t sector,sector_t nr_sectors,enum req_opf op)1360 static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
1361 sector_t nr_sectors, enum req_opf op)
1362 {
1363 struct nullb_device *dev = cmd->nq->dev;
1364 struct nullb *nullb = dev->nullb;
1365 blk_status_t sts;
1366
1367 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
1368 sts = null_handle_throttled(cmd);
1369 if (sts != BLK_STS_OK)
1370 return sts;
1371 }
1372
1373 if (op == REQ_OP_FLUSH) {
1374 cmd->error = errno_to_blk_status(null_handle_flush(nullb));
1375 goto out;
1376 }
1377
1378 if (dev->zoned)
1379 sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors);
1380 else
1381 sts = null_process_cmd(cmd, op, sector, nr_sectors);
1382
1383 /* Do not overwrite errors (e.g. timeout errors) */
1384 if (cmd->error == BLK_STS_OK)
1385 cmd->error = sts;
1386
1387 out:
1388 nullb_complete_cmd(cmd);
1389 return BLK_STS_OK;
1390 }
1391
nullb_bwtimer_fn(struct hrtimer * timer)1392 static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
1393 {
1394 struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
1395 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1396 unsigned int mbps = nullb->dev->mbps;
1397
1398 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
1399 return HRTIMER_NORESTART;
1400
1401 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
1402 null_restart_queue_async(nullb);
1403
1404 hrtimer_forward_now(&nullb->bw_timer, timer_interval);
1405
1406 return HRTIMER_RESTART;
1407 }
1408
nullb_setup_bwtimer(struct nullb * nullb)1409 static void nullb_setup_bwtimer(struct nullb *nullb)
1410 {
1411 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1412
1413 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1414 nullb->bw_timer.function = nullb_bwtimer_fn;
1415 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
1416 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
1417 }
1418
nullb_to_queue(struct nullb * nullb)1419 static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
1420 {
1421 int index = 0;
1422
1423 if (nullb->nr_queues != 1)
1424 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
1425
1426 return &nullb->queues[index];
1427 }
1428
null_submit_bio(struct bio * bio)1429 static blk_qc_t null_submit_bio(struct bio *bio)
1430 {
1431 sector_t sector = bio->bi_iter.bi_sector;
1432 sector_t nr_sectors = bio_sectors(bio);
1433 struct nullb *nullb = bio->bi_bdev->bd_disk->private_data;
1434 struct nullb_queue *nq = nullb_to_queue(nullb);
1435 struct nullb_cmd *cmd;
1436
1437 cmd = alloc_cmd(nq, 1);
1438 cmd->bio = bio;
1439
1440 null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio));
1441 return BLK_QC_T_NONE;
1442 }
1443
should_timeout_request(struct request * rq)1444 static bool should_timeout_request(struct request *rq)
1445 {
1446 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1447 if (g_timeout_str[0])
1448 return should_fail(&null_timeout_attr, 1);
1449 #endif
1450 return false;
1451 }
1452
should_requeue_request(struct request * rq)1453 static bool should_requeue_request(struct request *rq)
1454 {
1455 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1456 if (g_requeue_str[0])
1457 return should_fail(&null_requeue_attr, 1);
1458 #endif
1459 return false;
1460 }
1461
null_timeout_rq(struct request * rq,bool res)1462 static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
1463 {
1464 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
1465
1466 pr_info("rq %p timed out\n", rq);
1467
1468 /*
1469 * If the device is marked as blocking (i.e. memory backed or zoned
1470 * device), the submission path may be blocked waiting for resources
1471 * and cause real timeouts. For these real timeouts, the submission
1472 * path will complete the request using blk_mq_complete_request().
1473 * Only fake timeouts need to execute blk_mq_complete_request() here.
1474 */
1475 cmd->error = BLK_STS_TIMEOUT;
1476 if (cmd->fake_timeout)
1477 blk_mq_complete_request(rq);
1478 return BLK_EH_DONE;
1479 }
1480
null_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)1481 static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
1482 const struct blk_mq_queue_data *bd)
1483 {
1484 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
1485 struct nullb_queue *nq = hctx->driver_data;
1486 sector_t nr_sectors = blk_rq_sectors(bd->rq);
1487 sector_t sector = blk_rq_pos(bd->rq);
1488
1489 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1490
1491 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
1492 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1493 cmd->timer.function = null_cmd_timer_expired;
1494 }
1495 cmd->rq = bd->rq;
1496 cmd->error = BLK_STS_OK;
1497 cmd->nq = nq;
1498 cmd->fake_timeout = should_timeout_request(bd->rq);
1499
1500 blk_mq_start_request(bd->rq);
1501
1502 if (should_requeue_request(bd->rq)) {
1503 /*
1504 * Alternate between hitting the core BUSY path, and the
1505 * driver driven requeue path
1506 */
1507 nq->requeue_selection++;
1508 if (nq->requeue_selection & 1)
1509 return BLK_STS_RESOURCE;
1510 else {
1511 blk_mq_requeue_request(bd->rq, true);
1512 return BLK_STS_OK;
1513 }
1514 }
1515 if (cmd->fake_timeout)
1516 return BLK_STS_OK;
1517
1518 return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
1519 }
1520
cleanup_queue(struct nullb_queue * nq)1521 static void cleanup_queue(struct nullb_queue *nq)
1522 {
1523 kfree(nq->tag_map);
1524 kfree(nq->cmds);
1525 }
1526
cleanup_queues(struct nullb * nullb)1527 static void cleanup_queues(struct nullb *nullb)
1528 {
1529 int i;
1530
1531 for (i = 0; i < nullb->nr_queues; i++)
1532 cleanup_queue(&nullb->queues[i]);
1533
1534 kfree(nullb->queues);
1535 }
1536
null_exit_hctx(struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)1537 static void null_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1538 {
1539 struct nullb_queue *nq = hctx->driver_data;
1540 struct nullb *nullb = nq->dev->nullb;
1541
1542 nullb->nr_queues--;
1543 }
1544
null_init_queue(struct nullb * nullb,struct nullb_queue * nq)1545 static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
1546 {
1547 init_waitqueue_head(&nq->wait);
1548 nq->queue_depth = nullb->queue_depth;
1549 nq->dev = nullb->dev;
1550 }
1551
null_init_hctx(struct blk_mq_hw_ctx * hctx,void * driver_data,unsigned int hctx_idx)1552 static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
1553 unsigned int hctx_idx)
1554 {
1555 struct nullb *nullb = hctx->queue->queuedata;
1556 struct nullb_queue *nq;
1557
1558 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1559 if (g_init_hctx_str[0] && should_fail(&null_init_hctx_attr, 1))
1560 return -EFAULT;
1561 #endif
1562
1563 nq = &nullb->queues[hctx_idx];
1564 hctx->driver_data = nq;
1565 null_init_queue(nullb, nq);
1566 nullb->nr_queues++;
1567
1568 return 0;
1569 }
1570
1571 static const struct blk_mq_ops null_mq_ops = {
1572 .queue_rq = null_queue_rq,
1573 .complete = null_complete_rq,
1574 .timeout = null_timeout_rq,
1575 .init_hctx = null_init_hctx,
1576 .exit_hctx = null_exit_hctx,
1577 };
1578
null_del_dev(struct nullb * nullb)1579 static void null_del_dev(struct nullb *nullb)
1580 {
1581 struct nullb_device *dev;
1582
1583 if (!nullb)
1584 return;
1585
1586 dev = nullb->dev;
1587
1588 ida_simple_remove(&nullb_indexes, nullb->index);
1589
1590 list_del_init(&nullb->list);
1591
1592 del_gendisk(nullb->disk);
1593
1594 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
1595 hrtimer_cancel(&nullb->bw_timer);
1596 atomic_long_set(&nullb->cur_bytes, LONG_MAX);
1597 null_restart_queue_async(nullb);
1598 }
1599
1600 blk_cleanup_queue(nullb->q);
1601 if (dev->queue_mode == NULL_Q_MQ &&
1602 nullb->tag_set == &nullb->__tag_set)
1603 blk_mq_free_tag_set(nullb->tag_set);
1604 put_disk(nullb->disk);
1605 cleanup_queues(nullb);
1606 if (null_cache_active(nullb))
1607 null_free_device_storage(nullb->dev, true);
1608 kfree(nullb);
1609 dev->nullb = NULL;
1610 }
1611
null_config_discard(struct nullb * nullb)1612 static void null_config_discard(struct nullb *nullb)
1613 {
1614 if (nullb->dev->discard == false)
1615 return;
1616
1617 if (!nullb->dev->memory_backed) {
1618 nullb->dev->discard = false;
1619 pr_info("discard option is ignored without memory backing\n");
1620 return;
1621 }
1622
1623 if (nullb->dev->zoned) {
1624 nullb->dev->discard = false;
1625 pr_info("discard option is ignored in zoned mode\n");
1626 return;
1627 }
1628
1629 nullb->q->limits.discard_granularity = nullb->dev->blocksize;
1630 nullb->q->limits.discard_alignment = nullb->dev->blocksize;
1631 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
1632 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
1633 }
1634
1635 static const struct block_device_operations null_bio_ops = {
1636 .owner = THIS_MODULE,
1637 .submit_bio = null_submit_bio,
1638 .report_zones = null_report_zones,
1639 };
1640
1641 static const struct block_device_operations null_rq_ops = {
1642 .owner = THIS_MODULE,
1643 .report_zones = null_report_zones,
1644 };
1645
setup_commands(struct nullb_queue * nq)1646 static int setup_commands(struct nullb_queue *nq)
1647 {
1648 struct nullb_cmd *cmd;
1649 int i, tag_size;
1650
1651 nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL);
1652 if (!nq->cmds)
1653 return -ENOMEM;
1654
1655 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
1656 nq->tag_map = kcalloc(tag_size, sizeof(unsigned long), GFP_KERNEL);
1657 if (!nq->tag_map) {
1658 kfree(nq->cmds);
1659 return -ENOMEM;
1660 }
1661
1662 for (i = 0; i < nq->queue_depth; i++) {
1663 cmd = &nq->cmds[i];
1664 cmd->tag = -1U;
1665 }
1666
1667 return 0;
1668 }
1669
setup_queues(struct nullb * nullb)1670 static int setup_queues(struct nullb *nullb)
1671 {
1672 nullb->queues = kcalloc(nr_cpu_ids, sizeof(struct nullb_queue),
1673 GFP_KERNEL);
1674 if (!nullb->queues)
1675 return -ENOMEM;
1676
1677 nullb->queue_depth = nullb->dev->hw_queue_depth;
1678
1679 return 0;
1680 }
1681
init_driver_queues(struct nullb * nullb)1682 static int init_driver_queues(struct nullb *nullb)
1683 {
1684 struct nullb_queue *nq;
1685 int i, ret = 0;
1686
1687 for (i = 0; i < nullb->dev->submit_queues; i++) {
1688 nq = &nullb->queues[i];
1689
1690 null_init_queue(nullb, nq);
1691
1692 ret = setup_commands(nq);
1693 if (ret)
1694 return ret;
1695 nullb->nr_queues++;
1696 }
1697 return 0;
1698 }
1699
null_gendisk_register(struct nullb * nullb)1700 static int null_gendisk_register(struct nullb *nullb)
1701 {
1702 sector_t size = ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT;
1703 struct gendisk *disk;
1704
1705 disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
1706 if (!disk)
1707 return -ENOMEM;
1708 set_capacity(disk, size);
1709
1710 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
1711 disk->major = null_major;
1712 disk->first_minor = nullb->index;
1713 if (queue_is_mq(nullb->q))
1714 disk->fops = &null_rq_ops;
1715 else
1716 disk->fops = &null_bio_ops;
1717 disk->private_data = nullb;
1718 disk->queue = nullb->q;
1719 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
1720
1721 if (nullb->dev->zoned) {
1722 int ret = null_register_zoned_dev(nullb);
1723
1724 if (ret)
1725 return ret;
1726 }
1727
1728 add_disk(disk);
1729 return 0;
1730 }
1731
null_init_tag_set(struct nullb * nullb,struct blk_mq_tag_set * set)1732 static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
1733 {
1734 set->ops = &null_mq_ops;
1735 set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
1736 g_submit_queues;
1737 set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
1738 g_hw_queue_depth;
1739 set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
1740 set->cmd_size = sizeof(struct nullb_cmd);
1741 set->flags = BLK_MQ_F_SHOULD_MERGE;
1742 if (g_no_sched)
1743 set->flags |= BLK_MQ_F_NO_SCHED;
1744 if (g_shared_tag_bitmap)
1745 set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
1746 set->driver_data = NULL;
1747
1748 if ((nullb && nullb->dev->blocking) || g_blocking)
1749 set->flags |= BLK_MQ_F_BLOCKING;
1750
1751 return blk_mq_alloc_tag_set(set);
1752 }
1753
null_validate_conf(struct nullb_device * dev)1754 static int null_validate_conf(struct nullb_device *dev)
1755 {
1756 dev->blocksize = round_down(dev->blocksize, 512);
1757 dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
1758
1759 if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
1760 if (dev->submit_queues != nr_online_nodes)
1761 dev->submit_queues = nr_online_nodes;
1762 } else if (dev->submit_queues > nr_cpu_ids)
1763 dev->submit_queues = nr_cpu_ids;
1764 else if (dev->submit_queues == 0)
1765 dev->submit_queues = 1;
1766
1767 dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
1768 dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
1769
1770 /* Do memory allocation, so set blocking */
1771 if (dev->memory_backed)
1772 dev->blocking = true;
1773 else /* cache is meaningless */
1774 dev->cache_size = 0;
1775 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
1776 dev->cache_size);
1777 dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
1778 /* can not stop a queue */
1779 if (dev->queue_mode == NULL_Q_BIO)
1780 dev->mbps = 0;
1781
1782 if (dev->zoned &&
1783 (!dev->zone_size || !is_power_of_2(dev->zone_size))) {
1784 pr_err("zone_size must be power-of-two\n");
1785 return -EINVAL;
1786 }
1787
1788 return 0;
1789 }
1790
1791 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
__null_setup_fault(struct fault_attr * attr,char * str)1792 static bool __null_setup_fault(struct fault_attr *attr, char *str)
1793 {
1794 if (!str[0])
1795 return true;
1796
1797 if (!setup_fault_attr(attr, str))
1798 return false;
1799
1800 attr->verbose = 0;
1801 return true;
1802 }
1803 #endif
1804
null_setup_fault(void)1805 static bool null_setup_fault(void)
1806 {
1807 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1808 if (!__null_setup_fault(&null_timeout_attr, g_timeout_str))
1809 return false;
1810 if (!__null_setup_fault(&null_requeue_attr, g_requeue_str))
1811 return false;
1812 if (!__null_setup_fault(&null_init_hctx_attr, g_init_hctx_str))
1813 return false;
1814 #endif
1815 return true;
1816 }
1817
null_add_dev(struct nullb_device * dev)1818 static int null_add_dev(struct nullb_device *dev)
1819 {
1820 struct nullb *nullb;
1821 int rv;
1822
1823 rv = null_validate_conf(dev);
1824 if (rv)
1825 return rv;
1826
1827 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
1828 if (!nullb) {
1829 rv = -ENOMEM;
1830 goto out;
1831 }
1832 nullb->dev = dev;
1833 dev->nullb = nullb;
1834
1835 spin_lock_init(&nullb->lock);
1836
1837 rv = setup_queues(nullb);
1838 if (rv)
1839 goto out_free_nullb;
1840
1841 if (dev->queue_mode == NULL_Q_MQ) {
1842 if (shared_tags) {
1843 nullb->tag_set = &tag_set;
1844 rv = 0;
1845 } else {
1846 nullb->tag_set = &nullb->__tag_set;
1847 rv = null_init_tag_set(nullb, nullb->tag_set);
1848 }
1849
1850 if (rv)
1851 goto out_cleanup_queues;
1852
1853 if (!null_setup_fault())
1854 goto out_cleanup_queues;
1855
1856 nullb->tag_set->timeout = 5 * HZ;
1857 nullb->q = blk_mq_init_queue_data(nullb->tag_set, nullb);
1858 if (IS_ERR(nullb->q)) {
1859 rv = -ENOMEM;
1860 goto out_cleanup_tags;
1861 }
1862 } else if (dev->queue_mode == NULL_Q_BIO) {
1863 nullb->q = blk_alloc_queue(dev->home_node);
1864 if (!nullb->q) {
1865 rv = -ENOMEM;
1866 goto out_cleanup_queues;
1867 }
1868 rv = init_driver_queues(nullb);
1869 if (rv)
1870 goto out_cleanup_blk_queue;
1871 }
1872
1873 if (dev->mbps) {
1874 set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
1875 nullb_setup_bwtimer(nullb);
1876 }
1877
1878 if (dev->cache_size > 0) {
1879 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
1880 blk_queue_write_cache(nullb->q, true, true);
1881 }
1882
1883 if (dev->zoned) {
1884 rv = null_init_zoned_dev(dev, nullb->q);
1885 if (rv)
1886 goto out_cleanup_blk_queue;
1887 }
1888
1889 nullb->q->queuedata = nullb;
1890 blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
1891 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
1892
1893 mutex_lock(&lock);
1894 nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
1895 dev->index = nullb->index;
1896 mutex_unlock(&lock);
1897
1898 blk_queue_logical_block_size(nullb->q, dev->blocksize);
1899 blk_queue_physical_block_size(nullb->q, dev->blocksize);
1900 if (!dev->max_sectors)
1901 dev->max_sectors = queue_max_hw_sectors(nullb->q);
1902 dev->max_sectors = min_t(unsigned int, dev->max_sectors,
1903 BLK_DEF_MAX_SECTORS);
1904 blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
1905
1906 if (dev->virt_boundary)
1907 blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1);
1908
1909 null_config_discard(nullb);
1910
1911 sprintf(nullb->disk_name, "nullb%d", nullb->index);
1912
1913 rv = null_gendisk_register(nullb);
1914 if (rv)
1915 goto out_cleanup_zone;
1916
1917 mutex_lock(&lock);
1918 list_add_tail(&nullb->list, &nullb_list);
1919 mutex_unlock(&lock);
1920
1921 return 0;
1922 out_cleanup_zone:
1923 null_free_zoned_dev(dev);
1924 out_cleanup_blk_queue:
1925 blk_cleanup_queue(nullb->q);
1926 out_cleanup_tags:
1927 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
1928 blk_mq_free_tag_set(nullb->tag_set);
1929 out_cleanup_queues:
1930 cleanup_queues(nullb);
1931 out_free_nullb:
1932 kfree(nullb);
1933 dev->nullb = NULL;
1934 out:
1935 return rv;
1936 }
1937
null_init(void)1938 static int __init null_init(void)
1939 {
1940 int ret = 0;
1941 unsigned int i;
1942 struct nullb *nullb;
1943 struct nullb_device *dev;
1944
1945 if (g_bs > PAGE_SIZE) {
1946 pr_warn("invalid block size\n");
1947 pr_warn("defaults block size to %lu\n", PAGE_SIZE);
1948 g_bs = PAGE_SIZE;
1949 }
1950
1951 if (g_max_sectors > BLK_DEF_MAX_SECTORS) {
1952 pr_warn("invalid max sectors\n");
1953 pr_warn("defaults max sectors to %u\n", BLK_DEF_MAX_SECTORS);
1954 g_max_sectors = BLK_DEF_MAX_SECTORS;
1955 }
1956
1957 if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
1958 pr_err("invalid home_node value\n");
1959 g_home_node = NUMA_NO_NODE;
1960 }
1961
1962 if (g_queue_mode == NULL_Q_RQ) {
1963 pr_err("legacy IO path no longer available\n");
1964 return -EINVAL;
1965 }
1966 if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
1967 if (g_submit_queues != nr_online_nodes) {
1968 pr_warn("submit_queues param is set to %u.\n",
1969 nr_online_nodes);
1970 g_submit_queues = nr_online_nodes;
1971 }
1972 } else if (g_submit_queues > nr_cpu_ids)
1973 g_submit_queues = nr_cpu_ids;
1974 else if (g_submit_queues <= 0)
1975 g_submit_queues = 1;
1976
1977 if (g_queue_mode == NULL_Q_MQ && shared_tags) {
1978 ret = null_init_tag_set(NULL, &tag_set);
1979 if (ret)
1980 return ret;
1981 }
1982
1983 config_group_init(&nullb_subsys.su_group);
1984 mutex_init(&nullb_subsys.su_mutex);
1985
1986 ret = configfs_register_subsystem(&nullb_subsys);
1987 if (ret)
1988 goto err_tagset;
1989
1990 mutex_init(&lock);
1991
1992 null_major = register_blkdev(0, "nullb");
1993 if (null_major < 0) {
1994 ret = null_major;
1995 goto err_conf;
1996 }
1997
1998 for (i = 0; i < nr_devices; i++) {
1999 dev = null_alloc_dev();
2000 if (!dev) {
2001 ret = -ENOMEM;
2002 goto err_dev;
2003 }
2004 ret = null_add_dev(dev);
2005 if (ret) {
2006 null_free_dev(dev);
2007 goto err_dev;
2008 }
2009 }
2010
2011 pr_info("module loaded\n");
2012 return 0;
2013
2014 err_dev:
2015 while (!list_empty(&nullb_list)) {
2016 nullb = list_entry(nullb_list.next, struct nullb, list);
2017 dev = nullb->dev;
2018 null_del_dev(nullb);
2019 null_free_dev(dev);
2020 }
2021 unregister_blkdev(null_major, "nullb");
2022 err_conf:
2023 configfs_unregister_subsystem(&nullb_subsys);
2024 err_tagset:
2025 if (g_queue_mode == NULL_Q_MQ && shared_tags)
2026 blk_mq_free_tag_set(&tag_set);
2027 return ret;
2028 }
2029
null_exit(void)2030 static void __exit null_exit(void)
2031 {
2032 struct nullb *nullb;
2033
2034 configfs_unregister_subsystem(&nullb_subsys);
2035
2036 unregister_blkdev(null_major, "nullb");
2037
2038 mutex_lock(&lock);
2039 while (!list_empty(&nullb_list)) {
2040 struct nullb_device *dev;
2041
2042 nullb = list_entry(nullb_list.next, struct nullb, list);
2043 dev = nullb->dev;
2044 null_del_dev(nullb);
2045 null_free_dev(dev);
2046 }
2047 mutex_unlock(&lock);
2048
2049 if (g_queue_mode == NULL_Q_MQ && shared_tags)
2050 blk_mq_free_tag_set(&tag_set);
2051 }
2052
2053 module_init(null_init);
2054 module_exit(null_exit);
2055
2056 MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
2057 MODULE_LICENSE("GPL");
2058