1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
4 * Shaohua Li <shli@fb.com>
5 */
6 #include <linux/module.h>
7
8 #include <linux/moduleparam.h>
9 #include <linux/sched.h>
10 #include <linux/fs.h>
11 #include <linux/init.h>
12 #include "null_blk.h"
13
14 #undef pr_fmt
15 #define pr_fmt(fmt) "null_blk: " fmt
16
17 #define FREE_BATCH 16
18
19 #define TICKS_PER_SEC 50ULL
20 #define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
21
22 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
23 static DECLARE_FAULT_ATTR(null_timeout_attr);
24 static DECLARE_FAULT_ATTR(null_requeue_attr);
25 static DECLARE_FAULT_ATTR(null_init_hctx_attr);
26 #endif
27
mb_per_tick(int mbps)28 static inline u64 mb_per_tick(int mbps)
29 {
30 return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
31 }
32
33 /*
34 * Status flags for nullb_device.
35 *
36 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
37 * UP: Device is currently on and visible in userspace.
38 * THROTTLED: Device is being throttled.
39 * CACHE: Device is using a write-back cache.
40 */
41 enum nullb_device_flags {
42 NULLB_DEV_FL_CONFIGURED = 0,
43 NULLB_DEV_FL_UP = 1,
44 NULLB_DEV_FL_THROTTLED = 2,
45 NULLB_DEV_FL_CACHE = 3,
46 };
47
48 #define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
49 /*
50 * nullb_page is a page in memory for nullb devices.
51 *
52 * @page: The page holding the data.
53 * @bitmap: The bitmap represents which sector in the page has data.
54 * Each bit represents one block size. For example, sector 8
55 * will use the 7th bit
56 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
57 * page is being flushing to storage. FREE means the cache page is freed and
58 * should be skipped from flushing to storage. Please see
59 * null_make_cache_space
60 */
61 struct nullb_page {
62 struct page *page;
63 DECLARE_BITMAP(bitmap, MAP_SZ);
64 };
65 #define NULLB_PAGE_LOCK (MAP_SZ - 1)
66 #define NULLB_PAGE_FREE (MAP_SZ - 2)
67
68 static LIST_HEAD(nullb_list);
69 static struct mutex lock;
70 static int null_major;
71 static DEFINE_IDA(nullb_indexes);
72 static struct blk_mq_tag_set tag_set;
73
74 enum {
75 NULL_IRQ_NONE = 0,
76 NULL_IRQ_SOFTIRQ = 1,
77 NULL_IRQ_TIMER = 2,
78 };
79
80 static bool g_virt_boundary = false;
81 module_param_named(virt_boundary, g_virt_boundary, bool, 0444);
82 MODULE_PARM_DESC(virt_boundary, "Require a virtual boundary for the device. Default: False");
83
84 static int g_no_sched;
85 module_param_named(no_sched, g_no_sched, int, 0444);
86 MODULE_PARM_DESC(no_sched, "No io scheduler");
87
88 static int g_submit_queues = 1;
89 module_param_named(submit_queues, g_submit_queues, int, 0444);
90 MODULE_PARM_DESC(submit_queues, "Number of submission queues");
91
92 static int g_poll_queues = 1;
93 module_param_named(poll_queues, g_poll_queues, int, 0444);
94 MODULE_PARM_DESC(poll_queues, "Number of IOPOLL submission queues");
95
96 static int g_home_node = NUMA_NO_NODE;
97 module_param_named(home_node, g_home_node, int, 0444);
98 MODULE_PARM_DESC(home_node, "Home node for the device");
99
100 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
101 /*
102 * For more details about fault injection, please refer to
103 * Documentation/fault-injection/fault-injection.rst.
104 */
105 static char g_timeout_str[80];
106 module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444);
107 MODULE_PARM_DESC(timeout, "Fault injection. timeout=<interval>,<probability>,<space>,<times>");
108
109 static char g_requeue_str[80];
110 module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444);
111 MODULE_PARM_DESC(requeue, "Fault injection. requeue=<interval>,<probability>,<space>,<times>");
112
113 static char g_init_hctx_str[80];
114 module_param_string(init_hctx, g_init_hctx_str, sizeof(g_init_hctx_str), 0444);
115 MODULE_PARM_DESC(init_hctx, "Fault injection to fail hctx init. init_hctx=<interval>,<probability>,<space>,<times>");
116 #endif
117
118 /*
119 * Historic queue modes.
120 *
121 * These days nothing but NULL_Q_MQ is actually supported, but we keep it the
122 * enum for error reporting.
123 */
124 enum {
125 NULL_Q_BIO = 0,
126 NULL_Q_RQ = 1,
127 NULL_Q_MQ = 2,
128 };
129
130 static int g_queue_mode = NULL_Q_MQ;
131
null_param_store_val(const char * str,int * val,int min,int max)132 static int null_param_store_val(const char *str, int *val, int min, int max)
133 {
134 int ret, new_val;
135
136 ret = kstrtoint(str, 10, &new_val);
137 if (ret)
138 return -EINVAL;
139
140 if (new_val < min || new_val > max)
141 return -EINVAL;
142
143 *val = new_val;
144 return 0;
145 }
146
null_set_queue_mode(const char * str,const struct kernel_param * kp)147 static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
148 {
149 return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
150 }
151
152 static const struct kernel_param_ops null_queue_mode_param_ops = {
153 .set = null_set_queue_mode,
154 .get = param_get_int,
155 };
156
157 device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444);
158 MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
159
160 static int g_gb = 250;
161 module_param_named(gb, g_gb, int, 0444);
162 MODULE_PARM_DESC(gb, "Size in GB");
163
164 static int g_bs = 512;
165 module_param_named(bs, g_bs, int, 0444);
166 MODULE_PARM_DESC(bs, "Block size (in bytes)");
167
168 static int g_max_sectors;
169 module_param_named(max_sectors, g_max_sectors, int, 0444);
170 MODULE_PARM_DESC(max_sectors, "Maximum size of a command (in 512B sectors)");
171
172 static unsigned int nr_devices = 1;
173 module_param(nr_devices, uint, 0444);
174 MODULE_PARM_DESC(nr_devices, "Number of devices to register");
175
176 static bool g_blocking;
177 module_param_named(blocking, g_blocking, bool, 0444);
178 MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
179
180 static bool g_shared_tags;
181 module_param_named(shared_tags, g_shared_tags, bool, 0444);
182 MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
183
184 static bool g_shared_tag_bitmap;
185 module_param_named(shared_tag_bitmap, g_shared_tag_bitmap, bool, 0444);
186 MODULE_PARM_DESC(shared_tag_bitmap, "Use shared tag bitmap for all submission queues for blk-mq");
187
188 static int g_irqmode = NULL_IRQ_SOFTIRQ;
189
null_set_irqmode(const char * str,const struct kernel_param * kp)190 static int null_set_irqmode(const char *str, const struct kernel_param *kp)
191 {
192 return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
193 NULL_IRQ_TIMER);
194 }
195
196 static const struct kernel_param_ops null_irqmode_param_ops = {
197 .set = null_set_irqmode,
198 .get = param_get_int,
199 };
200
201 device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444);
202 MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
203
204 static unsigned long g_completion_nsec = 10000;
205 module_param_named(completion_nsec, g_completion_nsec, ulong, 0444);
206 MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
207
208 static int g_hw_queue_depth = 64;
209 module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444);
210 MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
211
212 static bool g_use_per_node_hctx;
213 module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
214 MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
215
216 static bool g_memory_backed;
217 module_param_named(memory_backed, g_memory_backed, bool, 0444);
218 MODULE_PARM_DESC(memory_backed, "Create a memory-backed block device. Default: false");
219
220 static bool g_discard;
221 module_param_named(discard, g_discard, bool, 0444);
222 MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed null_blk device). Default: false");
223
224 static unsigned long g_cache_size;
225 module_param_named(cache_size, g_cache_size, ulong, 0444);
226 MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)");
227
228 static bool g_fua = true;
229 module_param_named(fua, g_fua, bool, 0444);
230 MODULE_PARM_DESC(zoned, "Enable/disable FUA support when cache_size is used. Default: true");
231
232 static unsigned int g_mbps;
233 module_param_named(mbps, g_mbps, uint, 0444);
234 MODULE_PARM_DESC(mbps, "Limit maximum bandwidth (in MiB/s). Default: 0 (no limit)");
235
236 static bool g_zoned;
237 module_param_named(zoned, g_zoned, bool, S_IRUGO);
238 MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
239
240 static unsigned long g_zone_size = 256;
241 module_param_named(zone_size, g_zone_size, ulong, S_IRUGO);
242 MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
243
244 static unsigned long g_zone_capacity;
245 module_param_named(zone_capacity, g_zone_capacity, ulong, 0444);
246 MODULE_PARM_DESC(zone_capacity, "Zone capacity in MB when block device is zoned. Can be less than or equal to zone size. Default: Zone size");
247
248 static unsigned int g_zone_nr_conv;
249 module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444);
250 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0");
251
252 static unsigned int g_zone_max_open;
253 module_param_named(zone_max_open, g_zone_max_open, uint, 0444);
254 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones when block device is zoned. Default: 0 (no limit)");
255
256 static unsigned int g_zone_max_active;
257 module_param_named(zone_max_active, g_zone_max_active, uint, 0444);
258 MODULE_PARM_DESC(zone_max_active, "Maximum number of active zones when block device is zoned. Default: 0 (no limit)");
259
260 static int g_zone_append_max_sectors = INT_MAX;
261 module_param_named(zone_append_max_sectors, g_zone_append_max_sectors, int, 0444);
262 MODULE_PARM_DESC(zone_append_max_sectors,
263 "Maximum size of a zone append command (in 512B sectors). Specify 0 for zone append emulation");
264
265 static struct nullb_device *null_alloc_dev(void);
266 static void null_free_dev(struct nullb_device *dev);
267 static void null_del_dev(struct nullb *nullb);
268 static int null_add_dev(struct nullb_device *dev);
269 static struct nullb *null_find_dev_by_name(const char *name);
270 static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
271
to_nullb_device(struct config_item * item)272 static inline struct nullb_device *to_nullb_device(struct config_item *item)
273 {
274 return item ? container_of(to_config_group(item), struct nullb_device, group) : NULL;
275 }
276
nullb_device_uint_attr_show(unsigned int val,char * page)277 static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
278 {
279 return snprintf(page, PAGE_SIZE, "%u\n", val);
280 }
281
nullb_device_ulong_attr_show(unsigned long val,char * page)282 static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
283 char *page)
284 {
285 return snprintf(page, PAGE_SIZE, "%lu\n", val);
286 }
287
nullb_device_bool_attr_show(bool val,char * page)288 static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
289 {
290 return snprintf(page, PAGE_SIZE, "%u\n", val);
291 }
292
nullb_device_uint_attr_store(unsigned int * val,const char * page,size_t count)293 static ssize_t nullb_device_uint_attr_store(unsigned int *val,
294 const char *page, size_t count)
295 {
296 unsigned int tmp;
297 int result;
298
299 result = kstrtouint(page, 0, &tmp);
300 if (result < 0)
301 return result;
302
303 *val = tmp;
304 return count;
305 }
306
nullb_device_ulong_attr_store(unsigned long * val,const char * page,size_t count)307 static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
308 const char *page, size_t count)
309 {
310 int result;
311 unsigned long tmp;
312
313 result = kstrtoul(page, 0, &tmp);
314 if (result < 0)
315 return result;
316
317 *val = tmp;
318 return count;
319 }
320
nullb_device_bool_attr_store(bool * val,const char * page,size_t count)321 static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
322 size_t count)
323 {
324 bool tmp;
325 int result;
326
327 result = kstrtobool(page, &tmp);
328 if (result < 0)
329 return result;
330
331 *val = tmp;
332 return count;
333 }
334
335 /* The following macro should only be used with TYPE = {uint, ulong, bool}. */
336 #define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY) \
337 static ssize_t \
338 nullb_device_##NAME##_show(struct config_item *item, char *page) \
339 { \
340 return nullb_device_##TYPE##_attr_show( \
341 to_nullb_device(item)->NAME, page); \
342 } \
343 static ssize_t \
344 nullb_device_##NAME##_store(struct config_item *item, const char *page, \
345 size_t count) \
346 { \
347 int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY;\
348 struct nullb_device *dev = to_nullb_device(item); \
349 TYPE new_value = 0; \
350 int ret; \
351 \
352 ret = nullb_device_##TYPE##_attr_store(&new_value, page, count);\
353 if (ret < 0) \
354 return ret; \
355 if (apply_fn) \
356 ret = apply_fn(dev, new_value); \
357 else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) \
358 ret = -EBUSY; \
359 if (ret < 0) \
360 return ret; \
361 dev->NAME = new_value; \
362 return count; \
363 } \
364 CONFIGFS_ATTR(nullb_device_, NAME);
365
nullb_update_nr_hw_queues(struct nullb_device * dev,unsigned int submit_queues,unsigned int poll_queues)366 static int nullb_update_nr_hw_queues(struct nullb_device *dev,
367 unsigned int submit_queues,
368 unsigned int poll_queues)
369
370 {
371 struct blk_mq_tag_set *set;
372 int ret, nr_hw_queues;
373
374 if (!dev->nullb)
375 return 0;
376
377 /*
378 * Make sure at least one submit queue exists.
379 */
380 if (!submit_queues)
381 return -EINVAL;
382
383 /*
384 * Make sure that null_init_hctx() does not access nullb->queues[] past
385 * the end of that array.
386 */
387 if (submit_queues > nr_cpu_ids || poll_queues > g_poll_queues)
388 return -EINVAL;
389
390 /*
391 * Keep previous and new queue numbers in nullb_device for reference in
392 * the call back function null_map_queues().
393 */
394 dev->prev_submit_queues = dev->submit_queues;
395 dev->prev_poll_queues = dev->poll_queues;
396 dev->submit_queues = submit_queues;
397 dev->poll_queues = poll_queues;
398
399 set = dev->nullb->tag_set;
400 nr_hw_queues = submit_queues + poll_queues;
401 blk_mq_update_nr_hw_queues(set, nr_hw_queues);
402 ret = set->nr_hw_queues == nr_hw_queues ? 0 : -ENOMEM;
403
404 if (ret) {
405 /* on error, revert the queue numbers */
406 dev->submit_queues = dev->prev_submit_queues;
407 dev->poll_queues = dev->prev_poll_queues;
408 }
409
410 return ret;
411 }
412
nullb_apply_submit_queues(struct nullb_device * dev,unsigned int submit_queues)413 static int nullb_apply_submit_queues(struct nullb_device *dev,
414 unsigned int submit_queues)
415 {
416 int ret;
417
418 mutex_lock(&lock);
419 ret = nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
420 mutex_unlock(&lock);
421
422 return ret;
423 }
424
nullb_apply_poll_queues(struct nullb_device * dev,unsigned int poll_queues)425 static int nullb_apply_poll_queues(struct nullb_device *dev,
426 unsigned int poll_queues)
427 {
428 int ret;
429
430 mutex_lock(&lock);
431 ret = nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
432 mutex_unlock(&lock);
433
434 return ret;
435 }
436
437 NULLB_DEVICE_ATTR(size, ulong, NULL);
438 NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL);
439 NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues);
440 NULLB_DEVICE_ATTR(poll_queues, uint, nullb_apply_poll_queues);
441 NULLB_DEVICE_ATTR(home_node, uint, NULL);
442 NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
443 NULLB_DEVICE_ATTR(blocksize, uint, NULL);
444 NULLB_DEVICE_ATTR(max_sectors, uint, NULL);
445 NULLB_DEVICE_ATTR(irqmode, uint, NULL);
446 NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL);
447 NULLB_DEVICE_ATTR(index, uint, NULL);
448 NULLB_DEVICE_ATTR(blocking, bool, NULL);
449 NULLB_DEVICE_ATTR(use_per_node_hctx, bool, NULL);
450 NULLB_DEVICE_ATTR(memory_backed, bool, NULL);
451 NULLB_DEVICE_ATTR(discard, bool, NULL);
452 NULLB_DEVICE_ATTR(mbps, uint, NULL);
453 NULLB_DEVICE_ATTR(cache_size, ulong, NULL);
454 NULLB_DEVICE_ATTR(zoned, bool, NULL);
455 NULLB_DEVICE_ATTR(zone_size, ulong, NULL);
456 NULLB_DEVICE_ATTR(zone_capacity, ulong, NULL);
457 NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
458 NULLB_DEVICE_ATTR(zone_max_open, uint, NULL);
459 NULLB_DEVICE_ATTR(zone_max_active, uint, NULL);
460 NULLB_DEVICE_ATTR(zone_append_max_sectors, uint, NULL);
461 NULLB_DEVICE_ATTR(virt_boundary, bool, NULL);
462 NULLB_DEVICE_ATTR(no_sched, bool, NULL);
463 NULLB_DEVICE_ATTR(shared_tags, bool, NULL);
464 NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
465 NULLB_DEVICE_ATTR(fua, bool, NULL);
466
nullb_device_power_show(struct config_item * item,char * page)467 static ssize_t nullb_device_power_show(struct config_item *item, char *page)
468 {
469 return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
470 }
471
nullb_device_power_store(struct config_item * item,const char * page,size_t count)472 static ssize_t nullb_device_power_store(struct config_item *item,
473 const char *page, size_t count)
474 {
475 struct nullb_device *dev = to_nullb_device(item);
476 bool newp = false;
477 ssize_t ret;
478
479 ret = nullb_device_bool_attr_store(&newp, page, count);
480 if (ret < 0)
481 return ret;
482
483 ret = count;
484 mutex_lock(&lock);
485 if (!dev->power && newp) {
486 if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
487 goto out;
488
489 ret = null_add_dev(dev);
490 if (ret) {
491 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
492 goto out;
493 }
494
495 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
496 dev->power = newp;
497 ret = count;
498 } else if (dev->power && !newp) {
499 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
500 dev->power = newp;
501 null_del_dev(dev->nullb);
502 }
503 clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
504 }
505
506 out:
507 mutex_unlock(&lock);
508 return ret;
509 }
510
511 CONFIGFS_ATTR(nullb_device_, power);
512
nullb_device_badblocks_show(struct config_item * item,char * page)513 static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
514 {
515 struct nullb_device *t_dev = to_nullb_device(item);
516
517 return badblocks_show(&t_dev->badblocks, page, 0);
518 }
519
nullb_device_badblocks_store(struct config_item * item,const char * page,size_t count)520 static ssize_t nullb_device_badblocks_store(struct config_item *item,
521 const char *page, size_t count)
522 {
523 struct nullb_device *t_dev = to_nullb_device(item);
524 char *orig, *buf, *tmp;
525 u64 start, end;
526 int ret;
527
528 orig = kstrndup(page, count, GFP_KERNEL);
529 if (!orig)
530 return -ENOMEM;
531
532 buf = strstrip(orig);
533
534 ret = -EINVAL;
535 if (buf[0] != '+' && buf[0] != '-')
536 goto out;
537 tmp = strchr(&buf[1], '-');
538 if (!tmp)
539 goto out;
540 *tmp = '\0';
541 ret = kstrtoull(buf + 1, 0, &start);
542 if (ret)
543 goto out;
544 ret = kstrtoull(tmp + 1, 0, &end);
545 if (ret)
546 goto out;
547 ret = -EINVAL;
548 if (start > end)
549 goto out;
550 /* enable badblocks */
551 cmpxchg(&t_dev->badblocks.shift, -1, 0);
552 if (buf[0] == '+')
553 ret = badblocks_set(&t_dev->badblocks, start,
554 end - start + 1, 1);
555 else
556 ret = badblocks_clear(&t_dev->badblocks, start,
557 end - start + 1);
558 if (ret == 0)
559 ret = count;
560 out:
561 kfree(orig);
562 return ret;
563 }
564 CONFIGFS_ATTR(nullb_device_, badblocks);
565
nullb_device_zone_readonly_store(struct config_item * item,const char * page,size_t count)566 static ssize_t nullb_device_zone_readonly_store(struct config_item *item,
567 const char *page, size_t count)
568 {
569 struct nullb_device *dev = to_nullb_device(item);
570
571 return zone_cond_store(dev, page, count, BLK_ZONE_COND_READONLY);
572 }
573 CONFIGFS_ATTR_WO(nullb_device_, zone_readonly);
574
nullb_device_zone_offline_store(struct config_item * item,const char * page,size_t count)575 static ssize_t nullb_device_zone_offline_store(struct config_item *item,
576 const char *page, size_t count)
577 {
578 struct nullb_device *dev = to_nullb_device(item);
579
580 return zone_cond_store(dev, page, count, BLK_ZONE_COND_OFFLINE);
581 }
582 CONFIGFS_ATTR_WO(nullb_device_, zone_offline);
583
584 static struct configfs_attribute *nullb_device_attrs[] = {
585 &nullb_device_attr_size,
586 &nullb_device_attr_completion_nsec,
587 &nullb_device_attr_submit_queues,
588 &nullb_device_attr_poll_queues,
589 &nullb_device_attr_home_node,
590 &nullb_device_attr_queue_mode,
591 &nullb_device_attr_blocksize,
592 &nullb_device_attr_max_sectors,
593 &nullb_device_attr_irqmode,
594 &nullb_device_attr_hw_queue_depth,
595 &nullb_device_attr_index,
596 &nullb_device_attr_blocking,
597 &nullb_device_attr_use_per_node_hctx,
598 &nullb_device_attr_power,
599 &nullb_device_attr_memory_backed,
600 &nullb_device_attr_discard,
601 &nullb_device_attr_mbps,
602 &nullb_device_attr_cache_size,
603 &nullb_device_attr_badblocks,
604 &nullb_device_attr_zoned,
605 &nullb_device_attr_zone_size,
606 &nullb_device_attr_zone_capacity,
607 &nullb_device_attr_zone_nr_conv,
608 &nullb_device_attr_zone_max_open,
609 &nullb_device_attr_zone_max_active,
610 &nullb_device_attr_zone_append_max_sectors,
611 &nullb_device_attr_zone_readonly,
612 &nullb_device_attr_zone_offline,
613 &nullb_device_attr_virt_boundary,
614 &nullb_device_attr_no_sched,
615 &nullb_device_attr_shared_tags,
616 &nullb_device_attr_shared_tag_bitmap,
617 &nullb_device_attr_fua,
618 NULL,
619 };
620
nullb_device_release(struct config_item * item)621 static void nullb_device_release(struct config_item *item)
622 {
623 struct nullb_device *dev = to_nullb_device(item);
624
625 null_free_device_storage(dev, false);
626 null_free_dev(dev);
627 }
628
629 static struct configfs_item_operations nullb_device_ops = {
630 .release = nullb_device_release,
631 };
632
633 static const struct config_item_type nullb_device_type = {
634 .ct_item_ops = &nullb_device_ops,
635 .ct_attrs = nullb_device_attrs,
636 .ct_owner = THIS_MODULE,
637 };
638
639 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
640
nullb_add_fault_config(struct nullb_device * dev)641 static void nullb_add_fault_config(struct nullb_device *dev)
642 {
643 fault_config_init(&dev->timeout_config, "timeout_inject");
644 fault_config_init(&dev->requeue_config, "requeue_inject");
645 fault_config_init(&dev->init_hctx_fault_config, "init_hctx_fault_inject");
646
647 configfs_add_default_group(&dev->timeout_config.group, &dev->group);
648 configfs_add_default_group(&dev->requeue_config.group, &dev->group);
649 configfs_add_default_group(&dev->init_hctx_fault_config.group, &dev->group);
650 }
651
652 #else
653
nullb_add_fault_config(struct nullb_device * dev)654 static void nullb_add_fault_config(struct nullb_device *dev)
655 {
656 }
657
658 #endif
659
660 static struct
nullb_group_make_group(struct config_group * group,const char * name)661 config_group *nullb_group_make_group(struct config_group *group, const char *name)
662 {
663 struct nullb_device *dev;
664
665 if (null_find_dev_by_name(name))
666 return ERR_PTR(-EEXIST);
667
668 dev = null_alloc_dev();
669 if (!dev)
670 return ERR_PTR(-ENOMEM);
671
672 config_group_init_type_name(&dev->group, name, &nullb_device_type);
673 nullb_add_fault_config(dev);
674
675 return &dev->group;
676 }
677
678 static void
nullb_group_drop_item(struct config_group * group,struct config_item * item)679 nullb_group_drop_item(struct config_group *group, struct config_item *item)
680 {
681 struct nullb_device *dev = to_nullb_device(item);
682
683 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
684 mutex_lock(&lock);
685 dev->power = false;
686 null_del_dev(dev->nullb);
687 mutex_unlock(&lock);
688 }
689
690 config_item_put(item);
691 }
692
memb_group_features_show(struct config_item * item,char * page)693 static ssize_t memb_group_features_show(struct config_item *item, char *page)
694 {
695 return snprintf(page, PAGE_SIZE,
696 "badblocks,blocking,blocksize,cache_size,fua,"
697 "completion_nsec,discard,home_node,hw_queue_depth,"
698 "irqmode,max_sectors,mbps,memory_backed,no_sched,"
699 "poll_queues,power,queue_mode,shared_tag_bitmap,"
700 "shared_tags,size,submit_queues,use_per_node_hctx,"
701 "virt_boundary,zoned,zone_capacity,zone_max_active,"
702 "zone_max_open,zone_nr_conv,zone_offline,zone_readonly,"
703 "zone_size,zone_append_max_sectors\n");
704 }
705
706 CONFIGFS_ATTR_RO(memb_group_, features);
707
708 static struct configfs_attribute *nullb_group_attrs[] = {
709 &memb_group_attr_features,
710 NULL,
711 };
712
713 static struct configfs_group_operations nullb_group_ops = {
714 .make_group = nullb_group_make_group,
715 .drop_item = nullb_group_drop_item,
716 };
717
718 static const struct config_item_type nullb_group_type = {
719 .ct_group_ops = &nullb_group_ops,
720 .ct_attrs = nullb_group_attrs,
721 .ct_owner = THIS_MODULE,
722 };
723
724 static struct configfs_subsystem nullb_subsys = {
725 .su_group = {
726 .cg_item = {
727 .ci_namebuf = "nullb",
728 .ci_type = &nullb_group_type,
729 },
730 },
731 };
732
null_cache_active(struct nullb * nullb)733 static inline int null_cache_active(struct nullb *nullb)
734 {
735 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
736 }
737
null_alloc_dev(void)738 static struct nullb_device *null_alloc_dev(void)
739 {
740 struct nullb_device *dev;
741
742 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
743 if (!dev)
744 return NULL;
745
746 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
747 dev->timeout_config.attr = null_timeout_attr;
748 dev->requeue_config.attr = null_requeue_attr;
749 dev->init_hctx_fault_config.attr = null_init_hctx_attr;
750 #endif
751
752 INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
753 INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
754 if (badblocks_init(&dev->badblocks, 0)) {
755 kfree(dev);
756 return NULL;
757 }
758
759 dev->size = g_gb * 1024;
760 dev->completion_nsec = g_completion_nsec;
761 dev->submit_queues = g_submit_queues;
762 dev->prev_submit_queues = g_submit_queues;
763 dev->poll_queues = g_poll_queues;
764 dev->prev_poll_queues = g_poll_queues;
765 dev->home_node = g_home_node;
766 dev->queue_mode = g_queue_mode;
767 dev->blocksize = g_bs;
768 dev->max_sectors = g_max_sectors;
769 dev->irqmode = g_irqmode;
770 dev->hw_queue_depth = g_hw_queue_depth;
771 dev->blocking = g_blocking;
772 dev->memory_backed = g_memory_backed;
773 dev->discard = g_discard;
774 dev->cache_size = g_cache_size;
775 dev->mbps = g_mbps;
776 dev->use_per_node_hctx = g_use_per_node_hctx;
777 dev->zoned = g_zoned;
778 dev->zone_size = g_zone_size;
779 dev->zone_capacity = g_zone_capacity;
780 dev->zone_nr_conv = g_zone_nr_conv;
781 dev->zone_max_open = g_zone_max_open;
782 dev->zone_max_active = g_zone_max_active;
783 dev->zone_append_max_sectors = g_zone_append_max_sectors;
784 dev->virt_boundary = g_virt_boundary;
785 dev->no_sched = g_no_sched;
786 dev->shared_tags = g_shared_tags;
787 dev->shared_tag_bitmap = g_shared_tag_bitmap;
788 dev->fua = g_fua;
789
790 return dev;
791 }
792
null_free_dev(struct nullb_device * dev)793 static void null_free_dev(struct nullb_device *dev)
794 {
795 if (!dev)
796 return;
797
798 null_free_zoned_dev(dev);
799 badblocks_exit(&dev->badblocks);
800 kfree(dev);
801 }
802
null_cmd_timer_expired(struct hrtimer * timer)803 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
804 {
805 struct nullb_cmd *cmd = container_of(timer, struct nullb_cmd, timer);
806
807 blk_mq_end_request(blk_mq_rq_from_pdu(cmd), cmd->error);
808 return HRTIMER_NORESTART;
809 }
810
null_cmd_end_timer(struct nullb_cmd * cmd)811 static void null_cmd_end_timer(struct nullb_cmd *cmd)
812 {
813 ktime_t kt = cmd->nq->dev->completion_nsec;
814
815 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
816 }
817
null_complete_rq(struct request * rq)818 static void null_complete_rq(struct request *rq)
819 {
820 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
821
822 blk_mq_end_request(rq, cmd->error);
823 }
824
null_alloc_page(void)825 static struct nullb_page *null_alloc_page(void)
826 {
827 struct nullb_page *t_page;
828
829 t_page = kmalloc(sizeof(struct nullb_page), GFP_NOIO);
830 if (!t_page)
831 return NULL;
832
833 t_page->page = alloc_pages(GFP_NOIO, 0);
834 if (!t_page->page) {
835 kfree(t_page);
836 return NULL;
837 }
838
839 memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
840 return t_page;
841 }
842
null_free_page(struct nullb_page * t_page)843 static void null_free_page(struct nullb_page *t_page)
844 {
845 __set_bit(NULLB_PAGE_FREE, t_page->bitmap);
846 if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
847 return;
848 __free_page(t_page->page);
849 kfree(t_page);
850 }
851
null_page_empty(struct nullb_page * page)852 static bool null_page_empty(struct nullb_page *page)
853 {
854 int size = MAP_SZ - 2;
855
856 return find_first_bit(page->bitmap, size) == size;
857 }
858
null_free_sector(struct nullb * nullb,sector_t sector,bool is_cache)859 static void null_free_sector(struct nullb *nullb, sector_t sector,
860 bool is_cache)
861 {
862 unsigned int sector_bit;
863 u64 idx;
864 struct nullb_page *t_page, *ret;
865 struct radix_tree_root *root;
866
867 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
868 idx = sector >> PAGE_SECTORS_SHIFT;
869 sector_bit = (sector & SECTOR_MASK);
870
871 t_page = radix_tree_lookup(root, idx);
872 if (t_page) {
873 __clear_bit(sector_bit, t_page->bitmap);
874
875 if (null_page_empty(t_page)) {
876 ret = radix_tree_delete_item(root, idx, t_page);
877 WARN_ON(ret != t_page);
878 null_free_page(ret);
879 if (is_cache)
880 nullb->dev->curr_cache -= PAGE_SIZE;
881 }
882 }
883 }
884
null_radix_tree_insert(struct nullb * nullb,u64 idx,struct nullb_page * t_page,bool is_cache)885 static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
886 struct nullb_page *t_page, bool is_cache)
887 {
888 struct radix_tree_root *root;
889
890 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
891
892 if (radix_tree_insert(root, idx, t_page)) {
893 null_free_page(t_page);
894 t_page = radix_tree_lookup(root, idx);
895 WARN_ON(!t_page || t_page->page->index != idx);
896 } else if (is_cache)
897 nullb->dev->curr_cache += PAGE_SIZE;
898
899 return t_page;
900 }
901
null_free_device_storage(struct nullb_device * dev,bool is_cache)902 static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
903 {
904 unsigned long pos = 0;
905 int nr_pages;
906 struct nullb_page *ret, *t_pages[FREE_BATCH];
907 struct radix_tree_root *root;
908
909 root = is_cache ? &dev->cache : &dev->data;
910
911 do {
912 int i;
913
914 nr_pages = radix_tree_gang_lookup(root,
915 (void **)t_pages, pos, FREE_BATCH);
916
917 for (i = 0; i < nr_pages; i++) {
918 pos = t_pages[i]->page->index;
919 ret = radix_tree_delete_item(root, pos, t_pages[i]);
920 WARN_ON(ret != t_pages[i]);
921 null_free_page(ret);
922 }
923
924 pos++;
925 } while (nr_pages == FREE_BATCH);
926
927 if (is_cache)
928 dev->curr_cache = 0;
929 }
930
__null_lookup_page(struct nullb * nullb,sector_t sector,bool for_write,bool is_cache)931 static struct nullb_page *__null_lookup_page(struct nullb *nullb,
932 sector_t sector, bool for_write, bool is_cache)
933 {
934 unsigned int sector_bit;
935 u64 idx;
936 struct nullb_page *t_page;
937 struct radix_tree_root *root;
938
939 idx = sector >> PAGE_SECTORS_SHIFT;
940 sector_bit = (sector & SECTOR_MASK);
941
942 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
943 t_page = radix_tree_lookup(root, idx);
944 WARN_ON(t_page && t_page->page->index != idx);
945
946 if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
947 return t_page;
948
949 return NULL;
950 }
951
null_lookup_page(struct nullb * nullb,sector_t sector,bool for_write,bool ignore_cache)952 static struct nullb_page *null_lookup_page(struct nullb *nullb,
953 sector_t sector, bool for_write, bool ignore_cache)
954 {
955 struct nullb_page *page = NULL;
956
957 if (!ignore_cache)
958 page = __null_lookup_page(nullb, sector, for_write, true);
959 if (page)
960 return page;
961 return __null_lookup_page(nullb, sector, for_write, false);
962 }
963
null_insert_page(struct nullb * nullb,sector_t sector,bool ignore_cache)964 static struct nullb_page *null_insert_page(struct nullb *nullb,
965 sector_t sector, bool ignore_cache)
966 __releases(&nullb->lock)
967 __acquires(&nullb->lock)
968 {
969 u64 idx;
970 struct nullb_page *t_page;
971
972 t_page = null_lookup_page(nullb, sector, true, ignore_cache);
973 if (t_page)
974 return t_page;
975
976 spin_unlock_irq(&nullb->lock);
977
978 t_page = null_alloc_page();
979 if (!t_page)
980 goto out_lock;
981
982 if (radix_tree_preload(GFP_NOIO))
983 goto out_freepage;
984
985 spin_lock_irq(&nullb->lock);
986 idx = sector >> PAGE_SECTORS_SHIFT;
987 t_page->page->index = idx;
988 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
989 radix_tree_preload_end();
990
991 return t_page;
992 out_freepage:
993 null_free_page(t_page);
994 out_lock:
995 spin_lock_irq(&nullb->lock);
996 return null_lookup_page(nullb, sector, true, ignore_cache);
997 }
998
null_flush_cache_page(struct nullb * nullb,struct nullb_page * c_page)999 static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
1000 {
1001 int i;
1002 unsigned int offset;
1003 u64 idx;
1004 struct nullb_page *t_page, *ret;
1005 void *dst, *src;
1006
1007 idx = c_page->page->index;
1008
1009 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
1010
1011 __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
1012 if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
1013 null_free_page(c_page);
1014 if (t_page && null_page_empty(t_page)) {
1015 ret = radix_tree_delete_item(&nullb->dev->data,
1016 idx, t_page);
1017 null_free_page(t_page);
1018 }
1019 return 0;
1020 }
1021
1022 if (!t_page)
1023 return -ENOMEM;
1024
1025 src = kmap_local_page(c_page->page);
1026 dst = kmap_local_page(t_page->page);
1027
1028 for (i = 0; i < PAGE_SECTORS;
1029 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
1030 if (test_bit(i, c_page->bitmap)) {
1031 offset = (i << SECTOR_SHIFT);
1032 memcpy(dst + offset, src + offset,
1033 nullb->dev->blocksize);
1034 __set_bit(i, t_page->bitmap);
1035 }
1036 }
1037
1038 kunmap_local(dst);
1039 kunmap_local(src);
1040
1041 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
1042 null_free_page(ret);
1043 nullb->dev->curr_cache -= PAGE_SIZE;
1044
1045 return 0;
1046 }
1047
null_make_cache_space(struct nullb * nullb,unsigned long n)1048 static int null_make_cache_space(struct nullb *nullb, unsigned long n)
1049 {
1050 int i, err, nr_pages;
1051 struct nullb_page *c_pages[FREE_BATCH];
1052 unsigned long flushed = 0, one_round;
1053
1054 again:
1055 if ((nullb->dev->cache_size * 1024 * 1024) >
1056 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
1057 return 0;
1058
1059 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
1060 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
1061 /*
1062 * nullb_flush_cache_page could unlock before using the c_pages. To
1063 * avoid race, we don't allow page free
1064 */
1065 for (i = 0; i < nr_pages; i++) {
1066 nullb->cache_flush_pos = c_pages[i]->page->index;
1067 /*
1068 * We found the page which is being flushed to disk by other
1069 * threads
1070 */
1071 if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
1072 c_pages[i] = NULL;
1073 else
1074 __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
1075 }
1076
1077 one_round = 0;
1078 for (i = 0; i < nr_pages; i++) {
1079 if (c_pages[i] == NULL)
1080 continue;
1081 err = null_flush_cache_page(nullb, c_pages[i]);
1082 if (err)
1083 return err;
1084 one_round++;
1085 }
1086 flushed += one_round << PAGE_SHIFT;
1087
1088 if (n > flushed) {
1089 if (nr_pages == 0)
1090 nullb->cache_flush_pos = 0;
1091 if (one_round == 0) {
1092 /* give other threads a chance */
1093 spin_unlock_irq(&nullb->lock);
1094 spin_lock_irq(&nullb->lock);
1095 }
1096 goto again;
1097 }
1098 return 0;
1099 }
1100
copy_to_nullb(struct nullb * nullb,struct page * source,unsigned int off,sector_t sector,size_t n,bool is_fua)1101 static int copy_to_nullb(struct nullb *nullb, struct page *source,
1102 unsigned int off, sector_t sector, size_t n, bool is_fua)
1103 {
1104 size_t temp, count = 0;
1105 unsigned int offset;
1106 struct nullb_page *t_page;
1107
1108 while (count < n) {
1109 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1110
1111 if (null_cache_active(nullb) && !is_fua)
1112 null_make_cache_space(nullb, PAGE_SIZE);
1113
1114 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
1115 t_page = null_insert_page(nullb, sector,
1116 !null_cache_active(nullb) || is_fua);
1117 if (!t_page)
1118 return -ENOSPC;
1119
1120 memcpy_page(t_page->page, offset, source, off + count, temp);
1121
1122 __set_bit(sector & SECTOR_MASK, t_page->bitmap);
1123
1124 if (is_fua)
1125 null_free_sector(nullb, sector, true);
1126
1127 count += temp;
1128 sector += temp >> SECTOR_SHIFT;
1129 }
1130 return 0;
1131 }
1132
copy_from_nullb(struct nullb * nullb,struct page * dest,unsigned int off,sector_t sector,size_t n)1133 static int copy_from_nullb(struct nullb *nullb, struct page *dest,
1134 unsigned int off, sector_t sector, size_t n)
1135 {
1136 size_t temp, count = 0;
1137 unsigned int offset;
1138 struct nullb_page *t_page;
1139
1140 while (count < n) {
1141 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1142
1143 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
1144 t_page = null_lookup_page(nullb, sector, false,
1145 !null_cache_active(nullb));
1146
1147 if (t_page)
1148 memcpy_page(dest, off + count, t_page->page, offset,
1149 temp);
1150 else
1151 zero_user(dest, off + count, temp);
1152
1153 count += temp;
1154 sector += temp >> SECTOR_SHIFT;
1155 }
1156 return 0;
1157 }
1158
nullb_fill_pattern(struct nullb * nullb,struct page * page,unsigned int len,unsigned int off)1159 static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
1160 unsigned int len, unsigned int off)
1161 {
1162 memset_page(page, off, 0xff, len);
1163 }
1164
null_handle_discard(struct nullb_device * dev,sector_t sector,sector_t nr_sectors)1165 blk_status_t null_handle_discard(struct nullb_device *dev,
1166 sector_t sector, sector_t nr_sectors)
1167 {
1168 struct nullb *nullb = dev->nullb;
1169 size_t n = nr_sectors << SECTOR_SHIFT;
1170 size_t temp;
1171
1172 spin_lock_irq(&nullb->lock);
1173 while (n > 0) {
1174 temp = min_t(size_t, n, dev->blocksize);
1175 null_free_sector(nullb, sector, false);
1176 if (null_cache_active(nullb))
1177 null_free_sector(nullb, sector, true);
1178 sector += temp >> SECTOR_SHIFT;
1179 n -= temp;
1180 }
1181 spin_unlock_irq(&nullb->lock);
1182
1183 return BLK_STS_OK;
1184 }
1185
null_handle_flush(struct nullb * nullb)1186 static blk_status_t null_handle_flush(struct nullb *nullb)
1187 {
1188 int err;
1189
1190 if (!null_cache_active(nullb))
1191 return 0;
1192
1193 spin_lock_irq(&nullb->lock);
1194 while (true) {
1195 err = null_make_cache_space(nullb,
1196 nullb->dev->cache_size * 1024 * 1024);
1197 if (err || nullb->dev->curr_cache == 0)
1198 break;
1199 }
1200
1201 WARN_ON(!radix_tree_empty(&nullb->dev->cache));
1202 spin_unlock_irq(&nullb->lock);
1203 return errno_to_blk_status(err);
1204 }
1205
null_transfer(struct nullb * nullb,struct page * page,unsigned int len,unsigned int off,bool is_write,sector_t sector,bool is_fua)1206 static int null_transfer(struct nullb *nullb, struct page *page,
1207 unsigned int len, unsigned int off, bool is_write, sector_t sector,
1208 bool is_fua)
1209 {
1210 struct nullb_device *dev = nullb->dev;
1211 unsigned int valid_len = len;
1212 int err = 0;
1213
1214 if (!is_write) {
1215 if (dev->zoned)
1216 valid_len = null_zone_valid_read_len(nullb,
1217 sector, len);
1218
1219 if (valid_len) {
1220 err = copy_from_nullb(nullb, page, off,
1221 sector, valid_len);
1222 off += valid_len;
1223 len -= valid_len;
1224 }
1225
1226 if (len)
1227 nullb_fill_pattern(nullb, page, len, off);
1228 flush_dcache_page(page);
1229 } else {
1230 flush_dcache_page(page);
1231 err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
1232 }
1233
1234 return err;
1235 }
1236
null_handle_rq(struct nullb_cmd * cmd)1237 static blk_status_t null_handle_rq(struct nullb_cmd *cmd)
1238 {
1239 struct request *rq = blk_mq_rq_from_pdu(cmd);
1240 struct nullb *nullb = cmd->nq->dev->nullb;
1241 int err = 0;
1242 unsigned int len;
1243 sector_t sector = blk_rq_pos(rq);
1244 struct req_iterator iter;
1245 struct bio_vec bvec;
1246
1247 spin_lock_irq(&nullb->lock);
1248 rq_for_each_segment(bvec, rq, iter) {
1249 len = bvec.bv_len;
1250 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
1251 op_is_write(req_op(rq)), sector,
1252 rq->cmd_flags & REQ_FUA);
1253 if (err)
1254 break;
1255 sector += len >> SECTOR_SHIFT;
1256 }
1257 spin_unlock_irq(&nullb->lock);
1258
1259 return errno_to_blk_status(err);
1260 }
1261
null_handle_throttled(struct nullb_cmd * cmd)1262 static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
1263 {
1264 struct nullb_device *dev = cmd->nq->dev;
1265 struct nullb *nullb = dev->nullb;
1266 blk_status_t sts = BLK_STS_OK;
1267 struct request *rq = blk_mq_rq_from_pdu(cmd);
1268
1269 if (!hrtimer_active(&nullb->bw_timer))
1270 hrtimer_restart(&nullb->bw_timer);
1271
1272 if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) {
1273 blk_mq_stop_hw_queues(nullb->q);
1274 /* race with timer */
1275 if (atomic_long_read(&nullb->cur_bytes) > 0)
1276 blk_mq_start_stopped_hw_queues(nullb->q, true);
1277 /* requeue request */
1278 sts = BLK_STS_DEV_RESOURCE;
1279 }
1280 return sts;
1281 }
1282
null_handle_badblocks(struct nullb_cmd * cmd,sector_t sector,sector_t nr_sectors)1283 static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
1284 sector_t sector,
1285 sector_t nr_sectors)
1286 {
1287 struct badblocks *bb = &cmd->nq->dev->badblocks;
1288 sector_t first_bad;
1289 int bad_sectors;
1290
1291 if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors))
1292 return BLK_STS_IOERR;
1293
1294 return BLK_STS_OK;
1295 }
1296
null_handle_memory_backed(struct nullb_cmd * cmd,enum req_op op,sector_t sector,sector_t nr_sectors)1297 static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
1298 enum req_op op,
1299 sector_t sector,
1300 sector_t nr_sectors)
1301 {
1302 struct nullb_device *dev = cmd->nq->dev;
1303
1304 if (op == REQ_OP_DISCARD)
1305 return null_handle_discard(dev, sector, nr_sectors);
1306
1307 return null_handle_rq(cmd);
1308 }
1309
nullb_zero_read_cmd_buffer(struct nullb_cmd * cmd)1310 static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd)
1311 {
1312 struct request *rq = blk_mq_rq_from_pdu(cmd);
1313 struct nullb_device *dev = cmd->nq->dev;
1314 struct bio *bio;
1315
1316 if (!dev->memory_backed && req_op(rq) == REQ_OP_READ) {
1317 __rq_for_each_bio(bio, rq)
1318 zero_fill_bio(bio);
1319 }
1320 }
1321
nullb_complete_cmd(struct nullb_cmd * cmd)1322 static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
1323 {
1324 struct request *rq = blk_mq_rq_from_pdu(cmd);
1325
1326 /*
1327 * Since root privileges are required to configure the null_blk
1328 * driver, it is fine that this driver does not initialize the
1329 * data buffers of read commands. Zero-initialize these buffers
1330 * anyway if KMSAN is enabled to prevent that KMSAN complains
1331 * about null_blk not initializing read data buffers.
1332 */
1333 if (IS_ENABLED(CONFIG_KMSAN))
1334 nullb_zero_read_cmd_buffer(cmd);
1335
1336 /* Complete IO by inline, softirq or timer */
1337 switch (cmd->nq->dev->irqmode) {
1338 case NULL_IRQ_SOFTIRQ:
1339 blk_mq_complete_request(rq);
1340 break;
1341 case NULL_IRQ_NONE:
1342 blk_mq_end_request(rq, cmd->error);
1343 break;
1344 case NULL_IRQ_TIMER:
1345 null_cmd_end_timer(cmd);
1346 break;
1347 }
1348 }
1349
null_process_cmd(struct nullb_cmd * cmd,enum req_op op,sector_t sector,unsigned int nr_sectors)1350 blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
1351 sector_t sector, unsigned int nr_sectors)
1352 {
1353 struct nullb_device *dev = cmd->nq->dev;
1354 blk_status_t ret;
1355
1356 if (dev->badblocks.shift != -1) {
1357 ret = null_handle_badblocks(cmd, sector, nr_sectors);
1358 if (ret != BLK_STS_OK)
1359 return ret;
1360 }
1361
1362 if (dev->memory_backed)
1363 return null_handle_memory_backed(cmd, op, sector, nr_sectors);
1364
1365 return BLK_STS_OK;
1366 }
1367
null_handle_cmd(struct nullb_cmd * cmd,sector_t sector,sector_t nr_sectors,enum req_op op)1368 static void null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
1369 sector_t nr_sectors, enum req_op op)
1370 {
1371 struct nullb_device *dev = cmd->nq->dev;
1372 struct nullb *nullb = dev->nullb;
1373 blk_status_t sts;
1374
1375 if (op == REQ_OP_FLUSH) {
1376 cmd->error = null_handle_flush(nullb);
1377 goto out;
1378 }
1379
1380 if (dev->zoned)
1381 sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors);
1382 else
1383 sts = null_process_cmd(cmd, op, sector, nr_sectors);
1384
1385 /* Do not overwrite errors (e.g. timeout errors) */
1386 if (cmd->error == BLK_STS_OK)
1387 cmd->error = sts;
1388
1389 out:
1390 nullb_complete_cmd(cmd);
1391 }
1392
nullb_bwtimer_fn(struct hrtimer * timer)1393 static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
1394 {
1395 struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
1396 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1397 unsigned int mbps = nullb->dev->mbps;
1398
1399 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
1400 return HRTIMER_NORESTART;
1401
1402 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
1403 blk_mq_start_stopped_hw_queues(nullb->q, true);
1404
1405 hrtimer_forward_now(&nullb->bw_timer, timer_interval);
1406
1407 return HRTIMER_RESTART;
1408 }
1409
nullb_setup_bwtimer(struct nullb * nullb)1410 static void nullb_setup_bwtimer(struct nullb *nullb)
1411 {
1412 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1413
1414 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1415 nullb->bw_timer.function = nullb_bwtimer_fn;
1416 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
1417 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
1418 }
1419
1420 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1421
should_timeout_request(struct request * rq)1422 static bool should_timeout_request(struct request *rq)
1423 {
1424 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
1425 struct nullb_device *dev = cmd->nq->dev;
1426
1427 return should_fail(&dev->timeout_config.attr, 1);
1428 }
1429
should_requeue_request(struct request * rq)1430 static bool should_requeue_request(struct request *rq)
1431 {
1432 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
1433 struct nullb_device *dev = cmd->nq->dev;
1434
1435 return should_fail(&dev->requeue_config.attr, 1);
1436 }
1437
should_init_hctx_fail(struct nullb_device * dev)1438 static bool should_init_hctx_fail(struct nullb_device *dev)
1439 {
1440 return should_fail(&dev->init_hctx_fault_config.attr, 1);
1441 }
1442
1443 #else
1444
should_timeout_request(struct request * rq)1445 static bool should_timeout_request(struct request *rq)
1446 {
1447 return false;
1448 }
1449
should_requeue_request(struct request * rq)1450 static bool should_requeue_request(struct request *rq)
1451 {
1452 return false;
1453 }
1454
should_init_hctx_fail(struct nullb_device * dev)1455 static bool should_init_hctx_fail(struct nullb_device *dev)
1456 {
1457 return false;
1458 }
1459
1460 #endif
1461
null_map_queues(struct blk_mq_tag_set * set)1462 static void null_map_queues(struct blk_mq_tag_set *set)
1463 {
1464 struct nullb *nullb = set->driver_data;
1465 int i, qoff;
1466 unsigned int submit_queues = g_submit_queues;
1467 unsigned int poll_queues = g_poll_queues;
1468
1469 if (nullb) {
1470 struct nullb_device *dev = nullb->dev;
1471
1472 /*
1473 * Refer nr_hw_queues of the tag set to check if the expected
1474 * number of hardware queues are prepared. If block layer failed
1475 * to prepare them, use previous numbers of submit queues and
1476 * poll queues to map queues.
1477 */
1478 if (set->nr_hw_queues ==
1479 dev->submit_queues + dev->poll_queues) {
1480 submit_queues = dev->submit_queues;
1481 poll_queues = dev->poll_queues;
1482 } else if (set->nr_hw_queues ==
1483 dev->prev_submit_queues + dev->prev_poll_queues) {
1484 submit_queues = dev->prev_submit_queues;
1485 poll_queues = dev->prev_poll_queues;
1486 } else {
1487 pr_warn("tag set has unexpected nr_hw_queues: %d\n",
1488 set->nr_hw_queues);
1489 WARN_ON_ONCE(true);
1490 submit_queues = 1;
1491 poll_queues = 0;
1492 }
1493 }
1494
1495 for (i = 0, qoff = 0; i < set->nr_maps; i++) {
1496 struct blk_mq_queue_map *map = &set->map[i];
1497
1498 switch (i) {
1499 case HCTX_TYPE_DEFAULT:
1500 map->nr_queues = submit_queues;
1501 break;
1502 case HCTX_TYPE_READ:
1503 map->nr_queues = 0;
1504 continue;
1505 case HCTX_TYPE_POLL:
1506 map->nr_queues = poll_queues;
1507 break;
1508 }
1509 map->queue_offset = qoff;
1510 qoff += map->nr_queues;
1511 blk_mq_map_queues(map);
1512 }
1513 }
1514
null_poll(struct blk_mq_hw_ctx * hctx,struct io_comp_batch * iob)1515 static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
1516 {
1517 struct nullb_queue *nq = hctx->driver_data;
1518 LIST_HEAD(list);
1519 int nr = 0;
1520 struct request *rq;
1521
1522 spin_lock(&nq->poll_lock);
1523 list_splice_init(&nq->poll_list, &list);
1524 list_for_each_entry(rq, &list, queuelist)
1525 blk_mq_set_request_complete(rq);
1526 spin_unlock(&nq->poll_lock);
1527
1528 while (!list_empty(&list)) {
1529 struct nullb_cmd *cmd;
1530 struct request *req;
1531
1532 req = list_first_entry(&list, struct request, queuelist);
1533 list_del_init(&req->queuelist);
1534 cmd = blk_mq_rq_to_pdu(req);
1535 cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
1536 blk_rq_sectors(req));
1537 if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error,
1538 blk_mq_end_request_batch))
1539 blk_mq_end_request(req, cmd->error);
1540 nr++;
1541 }
1542
1543 return nr;
1544 }
1545
null_timeout_rq(struct request * rq)1546 static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
1547 {
1548 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1549 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
1550
1551 if (hctx->type == HCTX_TYPE_POLL) {
1552 struct nullb_queue *nq = hctx->driver_data;
1553
1554 spin_lock(&nq->poll_lock);
1555 /* The request may have completed meanwhile. */
1556 if (blk_mq_request_completed(rq)) {
1557 spin_unlock(&nq->poll_lock);
1558 return BLK_EH_DONE;
1559 }
1560 list_del_init(&rq->queuelist);
1561 spin_unlock(&nq->poll_lock);
1562 }
1563
1564 pr_info("rq %p timed out\n", rq);
1565
1566 /*
1567 * If the device is marked as blocking (i.e. memory backed or zoned
1568 * device), the submission path may be blocked waiting for resources
1569 * and cause real timeouts. For these real timeouts, the submission
1570 * path will complete the request using blk_mq_complete_request().
1571 * Only fake timeouts need to execute blk_mq_complete_request() here.
1572 */
1573 cmd->error = BLK_STS_TIMEOUT;
1574 if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL)
1575 blk_mq_complete_request(rq);
1576 return BLK_EH_DONE;
1577 }
1578
null_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)1579 static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
1580 const struct blk_mq_queue_data *bd)
1581 {
1582 struct request *rq = bd->rq;
1583 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
1584 struct nullb_queue *nq = hctx->driver_data;
1585 sector_t nr_sectors = blk_rq_sectors(rq);
1586 sector_t sector = blk_rq_pos(rq);
1587 const bool is_poll = hctx->type == HCTX_TYPE_POLL;
1588
1589 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1590
1591 if (!is_poll && nq->dev->irqmode == NULL_IRQ_TIMER) {
1592 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1593 cmd->timer.function = null_cmd_timer_expired;
1594 }
1595 cmd->error = BLK_STS_OK;
1596 cmd->nq = nq;
1597 cmd->fake_timeout = should_timeout_request(rq) ||
1598 blk_should_fake_timeout(rq->q);
1599
1600 if (should_requeue_request(rq)) {
1601 /*
1602 * Alternate between hitting the core BUSY path, and the
1603 * driver driven requeue path
1604 */
1605 nq->requeue_selection++;
1606 if (nq->requeue_selection & 1)
1607 return BLK_STS_RESOURCE;
1608 blk_mq_requeue_request(rq, true);
1609 return BLK_STS_OK;
1610 }
1611
1612 if (test_bit(NULLB_DEV_FL_THROTTLED, &nq->dev->flags)) {
1613 blk_status_t sts = null_handle_throttled(cmd);
1614
1615 if (sts != BLK_STS_OK)
1616 return sts;
1617 }
1618
1619 blk_mq_start_request(rq);
1620
1621 if (is_poll) {
1622 spin_lock(&nq->poll_lock);
1623 list_add_tail(&rq->queuelist, &nq->poll_list);
1624 spin_unlock(&nq->poll_lock);
1625 return BLK_STS_OK;
1626 }
1627 if (cmd->fake_timeout)
1628 return BLK_STS_OK;
1629
1630 null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
1631 return BLK_STS_OK;
1632 }
1633
null_queue_rqs(struct request ** rqlist)1634 static void null_queue_rqs(struct request **rqlist)
1635 {
1636 struct request *requeue_list = NULL;
1637 struct request **requeue_lastp = &requeue_list;
1638 struct blk_mq_queue_data bd = { };
1639 blk_status_t ret;
1640
1641 do {
1642 struct request *rq = rq_list_pop(rqlist);
1643
1644 bd.rq = rq;
1645 ret = null_queue_rq(rq->mq_hctx, &bd);
1646 if (ret != BLK_STS_OK)
1647 rq_list_add_tail(&requeue_lastp, rq);
1648 } while (!rq_list_empty(*rqlist));
1649
1650 *rqlist = requeue_list;
1651 }
1652
null_init_queue(struct nullb * nullb,struct nullb_queue * nq)1653 static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
1654 {
1655 nq->dev = nullb->dev;
1656 INIT_LIST_HEAD(&nq->poll_list);
1657 spin_lock_init(&nq->poll_lock);
1658 }
1659
null_init_hctx(struct blk_mq_hw_ctx * hctx,void * driver_data,unsigned int hctx_idx)1660 static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
1661 unsigned int hctx_idx)
1662 {
1663 struct nullb *nullb = hctx->queue->queuedata;
1664 struct nullb_queue *nq;
1665
1666 if (should_init_hctx_fail(nullb->dev))
1667 return -EFAULT;
1668
1669 nq = &nullb->queues[hctx_idx];
1670 hctx->driver_data = nq;
1671 null_init_queue(nullb, nq);
1672
1673 return 0;
1674 }
1675
1676 static const struct blk_mq_ops null_mq_ops = {
1677 .queue_rq = null_queue_rq,
1678 .queue_rqs = null_queue_rqs,
1679 .complete = null_complete_rq,
1680 .timeout = null_timeout_rq,
1681 .poll = null_poll,
1682 .map_queues = null_map_queues,
1683 .init_hctx = null_init_hctx,
1684 };
1685
null_del_dev(struct nullb * nullb)1686 static void null_del_dev(struct nullb *nullb)
1687 {
1688 struct nullb_device *dev;
1689
1690 if (!nullb)
1691 return;
1692
1693 dev = nullb->dev;
1694
1695 ida_free(&nullb_indexes, nullb->index);
1696
1697 list_del_init(&nullb->list);
1698
1699 del_gendisk(nullb->disk);
1700
1701 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
1702 hrtimer_cancel(&nullb->bw_timer);
1703 atomic_long_set(&nullb->cur_bytes, LONG_MAX);
1704 blk_mq_start_stopped_hw_queues(nullb->q, true);
1705 }
1706
1707 put_disk(nullb->disk);
1708 if (nullb->tag_set == &nullb->__tag_set)
1709 blk_mq_free_tag_set(nullb->tag_set);
1710 kfree(nullb->queues);
1711 if (null_cache_active(nullb))
1712 null_free_device_storage(nullb->dev, true);
1713 kfree(nullb);
1714 dev->nullb = NULL;
1715 }
1716
null_config_discard(struct nullb * nullb,struct queue_limits * lim)1717 static void null_config_discard(struct nullb *nullb, struct queue_limits *lim)
1718 {
1719 if (nullb->dev->discard == false)
1720 return;
1721
1722 if (!nullb->dev->memory_backed) {
1723 nullb->dev->discard = false;
1724 pr_info("discard option is ignored without memory backing\n");
1725 return;
1726 }
1727
1728 if (nullb->dev->zoned) {
1729 nullb->dev->discard = false;
1730 pr_info("discard option is ignored in zoned mode\n");
1731 return;
1732 }
1733
1734 lim->max_hw_discard_sectors = UINT_MAX >> 9;
1735 }
1736
1737 static const struct block_device_operations null_ops = {
1738 .owner = THIS_MODULE,
1739 .report_zones = null_report_zones,
1740 };
1741
setup_queues(struct nullb * nullb)1742 static int setup_queues(struct nullb *nullb)
1743 {
1744 int nqueues = nr_cpu_ids;
1745
1746 if (g_poll_queues)
1747 nqueues += g_poll_queues;
1748
1749 nullb->queues = kcalloc(nqueues, sizeof(struct nullb_queue),
1750 GFP_KERNEL);
1751 if (!nullb->queues)
1752 return -ENOMEM;
1753
1754 return 0;
1755 }
1756
null_init_tag_set(struct blk_mq_tag_set * set,int poll_queues)1757 static int null_init_tag_set(struct blk_mq_tag_set *set, int poll_queues)
1758 {
1759 set->ops = &null_mq_ops;
1760 set->cmd_size = sizeof(struct nullb_cmd);
1761 set->timeout = 5 * HZ;
1762 set->nr_maps = 1;
1763 if (poll_queues) {
1764 set->nr_hw_queues += poll_queues;
1765 set->nr_maps += 2;
1766 }
1767 return blk_mq_alloc_tag_set(set);
1768 }
1769
null_init_global_tag_set(void)1770 static int null_init_global_tag_set(void)
1771 {
1772 int error;
1773
1774 if (tag_set.ops)
1775 return 0;
1776
1777 tag_set.nr_hw_queues = g_submit_queues;
1778 tag_set.queue_depth = g_hw_queue_depth;
1779 tag_set.numa_node = g_home_node;
1780 tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
1781 if (g_no_sched)
1782 tag_set.flags |= BLK_MQ_F_NO_SCHED;
1783 if (g_shared_tag_bitmap)
1784 tag_set.flags |= BLK_MQ_F_TAG_HCTX_SHARED;
1785 if (g_blocking)
1786 tag_set.flags |= BLK_MQ_F_BLOCKING;
1787
1788 error = null_init_tag_set(&tag_set, g_poll_queues);
1789 if (error)
1790 tag_set.ops = NULL;
1791 return error;
1792 }
1793
null_setup_tagset(struct nullb * nullb)1794 static int null_setup_tagset(struct nullb *nullb)
1795 {
1796 if (nullb->dev->shared_tags) {
1797 nullb->tag_set = &tag_set;
1798 return null_init_global_tag_set();
1799 }
1800
1801 nullb->tag_set = &nullb->__tag_set;
1802 nullb->tag_set->driver_data = nullb;
1803 nullb->tag_set->nr_hw_queues = nullb->dev->submit_queues;
1804 nullb->tag_set->queue_depth = nullb->dev->hw_queue_depth;
1805 nullb->tag_set->numa_node = nullb->dev->home_node;
1806 nullb->tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
1807 if (nullb->dev->no_sched)
1808 nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED;
1809 if (nullb->dev->shared_tag_bitmap)
1810 nullb->tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
1811 if (nullb->dev->blocking)
1812 nullb->tag_set->flags |= BLK_MQ_F_BLOCKING;
1813 return null_init_tag_set(nullb->tag_set, nullb->dev->poll_queues);
1814 }
1815
null_validate_conf(struct nullb_device * dev)1816 static int null_validate_conf(struct nullb_device *dev)
1817 {
1818 if (dev->queue_mode == NULL_Q_RQ) {
1819 pr_err("legacy IO path is no longer available\n");
1820 return -EINVAL;
1821 }
1822 if (dev->queue_mode == NULL_Q_BIO) {
1823 pr_err("BIO-based IO path is no longer available, using blk-mq instead.\n");
1824 dev->queue_mode = NULL_Q_MQ;
1825 }
1826
1827 if (blk_validate_block_size(dev->blocksize))
1828 return -EINVAL;
1829
1830 if (dev->use_per_node_hctx) {
1831 if (dev->submit_queues != nr_online_nodes)
1832 dev->submit_queues = nr_online_nodes;
1833 } else if (dev->submit_queues > nr_cpu_ids)
1834 dev->submit_queues = nr_cpu_ids;
1835 else if (dev->submit_queues == 0)
1836 dev->submit_queues = 1;
1837 dev->prev_submit_queues = dev->submit_queues;
1838
1839 if (dev->poll_queues > g_poll_queues)
1840 dev->poll_queues = g_poll_queues;
1841 dev->prev_poll_queues = dev->poll_queues;
1842 dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
1843
1844 /* Do memory allocation, so set blocking */
1845 if (dev->memory_backed)
1846 dev->blocking = true;
1847 else /* cache is meaningless */
1848 dev->cache_size = 0;
1849 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
1850 dev->cache_size);
1851 dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
1852
1853 if (dev->zoned &&
1854 (!dev->zone_size || !is_power_of_2(dev->zone_size))) {
1855 pr_err("zone_size must be power-of-two\n");
1856 return -EINVAL;
1857 }
1858
1859 return 0;
1860 }
1861
1862 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
__null_setup_fault(struct fault_attr * attr,char * str)1863 static bool __null_setup_fault(struct fault_attr *attr, char *str)
1864 {
1865 if (!str[0])
1866 return true;
1867
1868 if (!setup_fault_attr(attr, str))
1869 return false;
1870
1871 attr->verbose = 0;
1872 return true;
1873 }
1874 #endif
1875
null_setup_fault(void)1876 static bool null_setup_fault(void)
1877 {
1878 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1879 if (!__null_setup_fault(&null_timeout_attr, g_timeout_str))
1880 return false;
1881 if (!__null_setup_fault(&null_requeue_attr, g_requeue_str))
1882 return false;
1883 if (!__null_setup_fault(&null_init_hctx_attr, g_init_hctx_str))
1884 return false;
1885 #endif
1886 return true;
1887 }
1888
null_add_dev(struct nullb_device * dev)1889 static int null_add_dev(struct nullb_device *dev)
1890 {
1891 struct queue_limits lim = {
1892 .logical_block_size = dev->blocksize,
1893 .physical_block_size = dev->blocksize,
1894 .max_hw_sectors = dev->max_sectors,
1895 };
1896
1897 struct nullb *nullb;
1898 int rv;
1899
1900 rv = null_validate_conf(dev);
1901 if (rv)
1902 return rv;
1903
1904 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
1905 if (!nullb) {
1906 rv = -ENOMEM;
1907 goto out;
1908 }
1909 nullb->dev = dev;
1910 dev->nullb = nullb;
1911
1912 spin_lock_init(&nullb->lock);
1913
1914 rv = setup_queues(nullb);
1915 if (rv)
1916 goto out_free_nullb;
1917
1918 rv = null_setup_tagset(nullb);
1919 if (rv)
1920 goto out_cleanup_queues;
1921
1922 if (dev->virt_boundary)
1923 lim.virt_boundary_mask = PAGE_SIZE - 1;
1924 null_config_discard(nullb, &lim);
1925 if (dev->zoned) {
1926 rv = null_init_zoned_dev(dev, &lim);
1927 if (rv)
1928 goto out_cleanup_tags;
1929 }
1930
1931 nullb->disk = blk_mq_alloc_disk(nullb->tag_set, &lim, nullb);
1932 if (IS_ERR(nullb->disk)) {
1933 rv = PTR_ERR(nullb->disk);
1934 goto out_cleanup_zone;
1935 }
1936 nullb->q = nullb->disk->queue;
1937
1938 if (dev->mbps) {
1939 set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
1940 nullb_setup_bwtimer(nullb);
1941 }
1942
1943 if (dev->cache_size > 0) {
1944 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
1945 blk_queue_write_cache(nullb->q, true, dev->fua);
1946 }
1947
1948 nullb->q->queuedata = nullb;
1949 blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
1950
1951 rv = ida_alloc(&nullb_indexes, GFP_KERNEL);
1952 if (rv < 0)
1953 goto out_cleanup_disk;
1954
1955 nullb->index = rv;
1956 dev->index = rv;
1957
1958 if (config_item_name(&dev->group.cg_item)) {
1959 /* Use configfs dir name as the device name */
1960 snprintf(nullb->disk_name, sizeof(nullb->disk_name),
1961 "%s", config_item_name(&dev->group.cg_item));
1962 } else {
1963 sprintf(nullb->disk_name, "nullb%d", nullb->index);
1964 }
1965
1966 set_capacity(nullb->disk,
1967 ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT);
1968 nullb->disk->major = null_major;
1969 nullb->disk->first_minor = nullb->index;
1970 nullb->disk->minors = 1;
1971 nullb->disk->fops = &null_ops;
1972 nullb->disk->private_data = nullb;
1973 strscpy_pad(nullb->disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
1974
1975 if (nullb->dev->zoned) {
1976 rv = null_register_zoned_dev(nullb);
1977 if (rv)
1978 goto out_ida_free;
1979 }
1980
1981 rv = add_disk(nullb->disk);
1982 if (rv)
1983 goto out_ida_free;
1984
1985 list_add_tail(&nullb->list, &nullb_list);
1986
1987 pr_info("disk %s created\n", nullb->disk_name);
1988
1989 return 0;
1990
1991 out_ida_free:
1992 ida_free(&nullb_indexes, nullb->index);
1993 out_cleanup_disk:
1994 put_disk(nullb->disk);
1995 out_cleanup_zone:
1996 null_free_zoned_dev(dev);
1997 out_cleanup_tags:
1998 if (nullb->tag_set == &nullb->__tag_set)
1999 blk_mq_free_tag_set(nullb->tag_set);
2000 out_cleanup_queues:
2001 kfree(nullb->queues);
2002 out_free_nullb:
2003 kfree(nullb);
2004 dev->nullb = NULL;
2005 out:
2006 return rv;
2007 }
2008
null_find_dev_by_name(const char * name)2009 static struct nullb *null_find_dev_by_name(const char *name)
2010 {
2011 struct nullb *nullb = NULL, *nb;
2012
2013 mutex_lock(&lock);
2014 list_for_each_entry(nb, &nullb_list, list) {
2015 if (strcmp(nb->disk_name, name) == 0) {
2016 nullb = nb;
2017 break;
2018 }
2019 }
2020 mutex_unlock(&lock);
2021
2022 return nullb;
2023 }
2024
null_create_dev(void)2025 static int null_create_dev(void)
2026 {
2027 struct nullb_device *dev;
2028 int ret;
2029
2030 dev = null_alloc_dev();
2031 if (!dev)
2032 return -ENOMEM;
2033
2034 mutex_lock(&lock);
2035 ret = null_add_dev(dev);
2036 mutex_unlock(&lock);
2037 if (ret) {
2038 null_free_dev(dev);
2039 return ret;
2040 }
2041
2042 return 0;
2043 }
2044
null_destroy_dev(struct nullb * nullb)2045 static void null_destroy_dev(struct nullb *nullb)
2046 {
2047 struct nullb_device *dev = nullb->dev;
2048
2049 null_del_dev(nullb);
2050 null_free_device_storage(dev, false);
2051 null_free_dev(dev);
2052 }
2053
null_init(void)2054 static int __init null_init(void)
2055 {
2056 int ret = 0;
2057 unsigned int i;
2058 struct nullb *nullb;
2059
2060 if (g_bs > PAGE_SIZE) {
2061 pr_warn("invalid block size\n");
2062 pr_warn("defaults block size to %lu\n", PAGE_SIZE);
2063 g_bs = PAGE_SIZE;
2064 }
2065
2066 if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
2067 pr_err("invalid home_node value\n");
2068 g_home_node = NUMA_NO_NODE;
2069 }
2070
2071 if (!null_setup_fault())
2072 return -EINVAL;
2073
2074 if (g_queue_mode == NULL_Q_RQ) {
2075 pr_err("legacy IO path is no longer available\n");
2076 return -EINVAL;
2077 }
2078
2079 if (g_use_per_node_hctx) {
2080 if (g_submit_queues != nr_online_nodes) {
2081 pr_warn("submit_queues param is set to %u.\n",
2082 nr_online_nodes);
2083 g_submit_queues = nr_online_nodes;
2084 }
2085 } else if (g_submit_queues > nr_cpu_ids) {
2086 g_submit_queues = nr_cpu_ids;
2087 } else if (g_submit_queues <= 0) {
2088 g_submit_queues = 1;
2089 }
2090
2091 config_group_init(&nullb_subsys.su_group);
2092 mutex_init(&nullb_subsys.su_mutex);
2093
2094 ret = configfs_register_subsystem(&nullb_subsys);
2095 if (ret)
2096 return ret;
2097
2098 mutex_init(&lock);
2099
2100 null_major = register_blkdev(0, "nullb");
2101 if (null_major < 0) {
2102 ret = null_major;
2103 goto err_conf;
2104 }
2105
2106 for (i = 0; i < nr_devices; i++) {
2107 ret = null_create_dev();
2108 if (ret)
2109 goto err_dev;
2110 }
2111
2112 pr_info("module loaded\n");
2113 return 0;
2114
2115 err_dev:
2116 while (!list_empty(&nullb_list)) {
2117 nullb = list_entry(nullb_list.next, struct nullb, list);
2118 null_destroy_dev(nullb);
2119 }
2120 unregister_blkdev(null_major, "nullb");
2121 err_conf:
2122 configfs_unregister_subsystem(&nullb_subsys);
2123 return ret;
2124 }
2125
null_exit(void)2126 static void __exit null_exit(void)
2127 {
2128 struct nullb *nullb;
2129
2130 configfs_unregister_subsystem(&nullb_subsys);
2131
2132 unregister_blkdev(null_major, "nullb");
2133
2134 mutex_lock(&lock);
2135 while (!list_empty(&nullb_list)) {
2136 nullb = list_entry(nullb_list.next, struct nullb, list);
2137 null_destroy_dev(nullb);
2138 }
2139 mutex_unlock(&lock);
2140
2141 if (tag_set.ops)
2142 blk_mq_free_tag_set(&tag_set);
2143
2144 mutex_destroy(&lock);
2145 }
2146
2147 module_init(null_init);
2148 module_exit(null_exit);
2149
2150 MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
2151 MODULE_DESCRIPTION("multi queue aware block test driver");
2152 MODULE_LICENSE("GPL");
2153