1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * RDMA Network Block Driver
4 *
5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
8 */
9
10 #undef pr_fmt
11 #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
12
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/hdreg.h>
16 #include <linux/scatterlist.h>
17 #include <linux/idr.h>
18
19 #include "rnbd-clt.h"
20
21 MODULE_DESCRIPTION("RDMA Network Block Device Client");
22 MODULE_LICENSE("GPL");
23
24 static int rnbd_client_major;
25 static DEFINE_IDA(index_ida);
26 static DEFINE_MUTEX(ida_lock);
27 static DEFINE_MUTEX(sess_lock);
28 static LIST_HEAD(sess_list);
29
30 /*
31 * Maximum number of partitions an instance can have.
32 * 6 bits = 64 minors = 63 partitions (one minor is used for the device itself)
33 */
34 #define RNBD_PART_BITS 6
35
rnbd_clt_get_sess(struct rnbd_clt_session * sess)36 static inline bool rnbd_clt_get_sess(struct rnbd_clt_session *sess)
37 {
38 return refcount_inc_not_zero(&sess->refcount);
39 }
40
41 static void free_sess(struct rnbd_clt_session *sess);
42
rnbd_clt_put_sess(struct rnbd_clt_session * sess)43 static void rnbd_clt_put_sess(struct rnbd_clt_session *sess)
44 {
45 might_sleep();
46
47 if (refcount_dec_and_test(&sess->refcount))
48 free_sess(sess);
49 }
50
rnbd_clt_put_dev(struct rnbd_clt_dev * dev)51 static void rnbd_clt_put_dev(struct rnbd_clt_dev *dev)
52 {
53 might_sleep();
54
55 if (!refcount_dec_and_test(&dev->refcount))
56 return;
57
58 mutex_lock(&ida_lock);
59 ida_simple_remove(&index_ida, dev->clt_device_id);
60 mutex_unlock(&ida_lock);
61 kfree(dev->hw_queues);
62 kfree(dev->pathname);
63 rnbd_clt_put_sess(dev->sess);
64 mutex_destroy(&dev->lock);
65 kfree(dev);
66 }
67
rnbd_clt_get_dev(struct rnbd_clt_dev * dev)68 static inline bool rnbd_clt_get_dev(struct rnbd_clt_dev *dev)
69 {
70 return refcount_inc_not_zero(&dev->refcount);
71 }
72
rnbd_clt_set_dev_attr(struct rnbd_clt_dev * dev,const struct rnbd_msg_open_rsp * rsp)73 static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev,
74 const struct rnbd_msg_open_rsp *rsp)
75 {
76 struct rnbd_clt_session *sess = dev->sess;
77
78 if (!rsp->logical_block_size)
79 return -EINVAL;
80
81 dev->device_id = le32_to_cpu(rsp->device_id);
82 dev->nsectors = le64_to_cpu(rsp->nsectors);
83 dev->logical_block_size = le16_to_cpu(rsp->logical_block_size);
84 dev->physical_block_size = le16_to_cpu(rsp->physical_block_size);
85 dev->max_write_same_sectors = le32_to_cpu(rsp->max_write_same_sectors);
86 dev->max_discard_sectors = le32_to_cpu(rsp->max_discard_sectors);
87 dev->discard_granularity = le32_to_cpu(rsp->discard_granularity);
88 dev->discard_alignment = le32_to_cpu(rsp->discard_alignment);
89 dev->secure_discard = le16_to_cpu(rsp->secure_discard);
90 dev->rotational = rsp->rotational;
91 dev->wc = !!(rsp->cache_policy & RNBD_WRITEBACK);
92 dev->fua = !!(rsp->cache_policy & RNBD_FUA);
93
94 dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE;
95 dev->max_segments = BMAX_SEGMENTS;
96
97 return 0;
98 }
99
rnbd_clt_change_capacity(struct rnbd_clt_dev * dev,size_t new_nsectors)100 static int rnbd_clt_change_capacity(struct rnbd_clt_dev *dev,
101 size_t new_nsectors)
102 {
103 rnbd_clt_info(dev, "Device size changed from %zu to %zu sectors\n",
104 dev->nsectors, new_nsectors);
105 dev->nsectors = new_nsectors;
106 set_capacity_and_notify(dev->gd, dev->nsectors);
107 return 0;
108 }
109
process_msg_open_rsp(struct rnbd_clt_dev * dev,struct rnbd_msg_open_rsp * rsp)110 static int process_msg_open_rsp(struct rnbd_clt_dev *dev,
111 struct rnbd_msg_open_rsp *rsp)
112 {
113 struct kobject *gd_kobj;
114 int err = 0;
115
116 mutex_lock(&dev->lock);
117 if (dev->dev_state == DEV_STATE_UNMAPPED) {
118 rnbd_clt_info(dev,
119 "Ignoring Open-Response message from server for unmapped device\n");
120 err = -ENOENT;
121 goto out;
122 }
123 if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED) {
124 u64 nsectors = le64_to_cpu(rsp->nsectors);
125
126 /*
127 * If the device was remapped and the size changed in the
128 * meantime we need to revalidate it
129 */
130 if (dev->nsectors != nsectors)
131 rnbd_clt_change_capacity(dev, nsectors);
132 gd_kobj = &disk_to_dev(dev->gd)->kobj;
133 kobject_uevent(gd_kobj, KOBJ_ONLINE);
134 rnbd_clt_info(dev, "Device online, device remapped successfully\n");
135 }
136 err = rnbd_clt_set_dev_attr(dev, rsp);
137 if (err)
138 goto out;
139 dev->dev_state = DEV_STATE_MAPPED;
140
141 out:
142 mutex_unlock(&dev->lock);
143
144 return err;
145 }
146
rnbd_clt_resize_disk(struct rnbd_clt_dev * dev,size_t newsize)147 int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize)
148 {
149 int ret = 0;
150
151 mutex_lock(&dev->lock);
152 if (dev->dev_state != DEV_STATE_MAPPED) {
153 pr_err("Failed to set new size of the device, device is not opened\n");
154 ret = -ENOENT;
155 goto out;
156 }
157 ret = rnbd_clt_change_capacity(dev, newsize);
158
159 out:
160 mutex_unlock(&dev->lock);
161
162 return ret;
163 }
164
rnbd_clt_dev_requeue(struct rnbd_queue * q)165 static inline void rnbd_clt_dev_requeue(struct rnbd_queue *q)
166 {
167 if (WARN_ON(!q->hctx))
168 return;
169
170 /* We can come here from interrupt, thus async=true */
171 blk_mq_run_hw_queue(q->hctx, true);
172 }
173
174 enum {
175 RNBD_DELAY_IFBUSY = -1,
176 };
177
178 /**
179 * rnbd_get_cpu_qlist() - finds a list with HW queues to be rerun
180 * @sess: Session to find a queue for
181 * @cpu: Cpu to start the search from
182 *
183 * Description:
184 * Each CPU has a list of HW queues, which needs to be rerun. If a list
185 * is not empty - it is marked with a bit. This function finds first
186 * set bit in a bitmap and returns corresponding CPU list.
187 */
188 static struct rnbd_cpu_qlist *
rnbd_get_cpu_qlist(struct rnbd_clt_session * sess,int cpu)189 rnbd_get_cpu_qlist(struct rnbd_clt_session *sess, int cpu)
190 {
191 int bit;
192
193 /* Search from cpu to nr_cpu_ids */
194 bit = find_next_bit(sess->cpu_queues_bm, nr_cpu_ids, cpu);
195 if (bit < nr_cpu_ids) {
196 return per_cpu_ptr(sess->cpu_queues, bit);
197 } else if (cpu != 0) {
198 /* Search from 0 to cpu */
199 bit = find_next_bit(sess->cpu_queues_bm, cpu, 0);
200 if (bit < cpu)
201 return per_cpu_ptr(sess->cpu_queues, bit);
202 }
203
204 return NULL;
205 }
206
nxt_cpu(int cpu)207 static inline int nxt_cpu(int cpu)
208 {
209 return (cpu + 1) % nr_cpu_ids;
210 }
211
212 /**
213 * rnbd_rerun_if_needed() - rerun next queue marked as stopped
214 * @sess: Session to rerun a queue on
215 *
216 * Description:
217 * Each CPU has it's own list of HW queues, which should be rerun.
218 * Function finds such list with HW queues, takes a list lock, picks up
219 * the first HW queue out of the list and requeues it.
220 *
221 * Return:
222 * True if the queue was requeued, false otherwise.
223 *
224 * Context:
225 * Does not matter.
226 */
rnbd_rerun_if_needed(struct rnbd_clt_session * sess)227 static bool rnbd_rerun_if_needed(struct rnbd_clt_session *sess)
228 {
229 struct rnbd_queue *q = NULL;
230 struct rnbd_cpu_qlist *cpu_q;
231 unsigned long flags;
232 int *cpup;
233
234 /*
235 * To keep fairness and not to let other queues starve we always
236 * try to wake up someone else in round-robin manner. That of course
237 * increases latency but queues always have a chance to be executed.
238 */
239 cpup = get_cpu_ptr(sess->cpu_rr);
240 for (cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(*cpup)); cpu_q;
241 cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(cpu_q->cpu))) {
242 if (!spin_trylock_irqsave(&cpu_q->requeue_lock, flags))
243 continue;
244 if (!test_bit(cpu_q->cpu, sess->cpu_queues_bm))
245 goto unlock;
246 q = list_first_entry_or_null(&cpu_q->requeue_list,
247 typeof(*q), requeue_list);
248 if (WARN_ON(!q))
249 goto clear_bit;
250 list_del_init(&q->requeue_list);
251 clear_bit_unlock(0, &q->in_list);
252
253 if (list_empty(&cpu_q->requeue_list)) {
254 /* Clear bit if nothing is left */
255 clear_bit:
256 clear_bit(cpu_q->cpu, sess->cpu_queues_bm);
257 }
258 unlock:
259 spin_unlock_irqrestore(&cpu_q->requeue_lock, flags);
260
261 if (q)
262 break;
263 }
264
265 /**
266 * Saves the CPU that is going to be requeued on the per-cpu var. Just
267 * incrementing it doesn't work because rnbd_get_cpu_qlist() will
268 * always return the first CPU with something on the queue list when the
269 * value stored on the var is greater than the last CPU with something
270 * on the list.
271 */
272 if (cpu_q)
273 *cpup = cpu_q->cpu;
274 put_cpu_var(sess->cpu_rr);
275
276 if (q)
277 rnbd_clt_dev_requeue(q);
278
279 return q;
280 }
281
282 /**
283 * rnbd_rerun_all_if_idle() - rerun all queues left in the list if
284 * session is idling (there are no requests
285 * in-flight).
286 * @sess: Session to rerun the queues on
287 *
288 * Description:
289 * This function tries to rerun all stopped queues if there are no
290 * requests in-flight anymore. This function tries to solve an obvious
291 * problem, when number of tags < than number of queues (hctx), which
292 * are stopped and put to sleep. If last permit, which has been just put,
293 * does not wake up all left queues (hctxs), IO requests hang forever.
294 *
295 * That can happen when all number of permits, say N, have been exhausted
296 * from one CPU, and we have many block devices per session, say M.
297 * Each block device has it's own queue (hctx) for each CPU, so eventually
298 * we can put that number of queues (hctxs) to sleep: M x nr_cpu_ids.
299 * If number of permits N < M x nr_cpu_ids finally we will get an IO hang.
300 *
301 * To avoid this hang last caller of rnbd_put_permit() (last caller is the
302 * one who observes sess->busy == 0) must wake up all remaining queues.
303 *
304 * Context:
305 * Does not matter.
306 */
rnbd_rerun_all_if_idle(struct rnbd_clt_session * sess)307 static void rnbd_rerun_all_if_idle(struct rnbd_clt_session *sess)
308 {
309 bool requeued;
310
311 do {
312 requeued = rnbd_rerun_if_needed(sess);
313 } while (atomic_read(&sess->busy) == 0 && requeued);
314 }
315
rnbd_get_permit(struct rnbd_clt_session * sess,enum rtrs_clt_con_type con_type,enum wait_type wait)316 static struct rtrs_permit *rnbd_get_permit(struct rnbd_clt_session *sess,
317 enum rtrs_clt_con_type con_type,
318 enum wait_type wait)
319 {
320 struct rtrs_permit *permit;
321
322 permit = rtrs_clt_get_permit(sess->rtrs, con_type, wait);
323 if (permit)
324 /* We have a subtle rare case here, when all permits can be
325 * consumed before busy counter increased. This is safe,
326 * because loser will get NULL as a permit, observe 0 busy
327 * counter and immediately restart the queue himself.
328 */
329 atomic_inc(&sess->busy);
330
331 return permit;
332 }
333
rnbd_put_permit(struct rnbd_clt_session * sess,struct rtrs_permit * permit)334 static void rnbd_put_permit(struct rnbd_clt_session *sess,
335 struct rtrs_permit *permit)
336 {
337 rtrs_clt_put_permit(sess->rtrs, permit);
338 atomic_dec(&sess->busy);
339 /* Paired with rnbd_clt_dev_add_to_requeue(). Decrement first
340 * and then check queue bits.
341 */
342 smp_mb__after_atomic();
343 rnbd_rerun_all_if_idle(sess);
344 }
345
rnbd_get_iu(struct rnbd_clt_session * sess,enum rtrs_clt_con_type con_type,enum wait_type wait)346 static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess,
347 enum rtrs_clt_con_type con_type,
348 enum wait_type wait)
349 {
350 struct rnbd_iu *iu;
351 struct rtrs_permit *permit;
352
353 iu = kzalloc(sizeof(*iu), GFP_KERNEL);
354 if (!iu)
355 return NULL;
356
357 permit = rnbd_get_permit(sess, con_type, wait);
358 if (!permit) {
359 kfree(iu);
360 return NULL;
361 }
362
363 iu->permit = permit;
364 /*
365 * 1st reference is dropped after finishing sending a "user" message,
366 * 2nd reference is dropped after confirmation with the response is
367 * returned.
368 * 1st and 2nd can happen in any order, so the rnbd_iu should be
369 * released (rtrs_permit returned to rtrs) only after both
370 * are finished.
371 */
372 atomic_set(&iu->refcount, 2);
373 init_waitqueue_head(&iu->comp.wait);
374 iu->comp.errno = INT_MAX;
375
376 if (sg_alloc_table(&iu->sgt, 1, GFP_KERNEL)) {
377 rnbd_put_permit(sess, permit);
378 kfree(iu);
379 return NULL;
380 }
381
382 return iu;
383 }
384
rnbd_put_iu(struct rnbd_clt_session * sess,struct rnbd_iu * iu)385 static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu)
386 {
387 if (atomic_dec_and_test(&iu->refcount)) {
388 sg_free_table(&iu->sgt);
389 rnbd_put_permit(sess, iu->permit);
390 kfree(iu);
391 }
392 }
393
rnbd_softirq_done_fn(struct request * rq)394 static void rnbd_softirq_done_fn(struct request *rq)
395 {
396 struct rnbd_clt_dev *dev = rq->rq_disk->private_data;
397 struct rnbd_clt_session *sess = dev->sess;
398 struct rnbd_iu *iu;
399
400 iu = blk_mq_rq_to_pdu(rq);
401 sg_free_table_chained(&iu->sgt, RNBD_INLINE_SG_CNT);
402 rnbd_put_permit(sess, iu->permit);
403 blk_mq_end_request(rq, errno_to_blk_status(iu->errno));
404 }
405
msg_io_conf(void * priv,int errno)406 static void msg_io_conf(void *priv, int errno)
407 {
408 struct rnbd_iu *iu = priv;
409 struct rnbd_clt_dev *dev = iu->dev;
410 struct request *rq = iu->rq;
411 int rw = rq_data_dir(rq);
412
413 iu->errno = errno;
414
415 blk_mq_complete_request(rq);
416
417 if (errno)
418 rnbd_clt_info_rl(dev, "%s I/O failed with err: %d\n",
419 rw == READ ? "read" : "write", errno);
420 }
421
wake_up_iu_comp(struct rnbd_iu * iu,int errno)422 static void wake_up_iu_comp(struct rnbd_iu *iu, int errno)
423 {
424 iu->comp.errno = errno;
425 wake_up(&iu->comp.wait);
426 }
427
msg_conf(void * priv,int errno)428 static void msg_conf(void *priv, int errno)
429 {
430 struct rnbd_iu *iu = priv;
431
432 iu->errno = errno;
433 schedule_work(&iu->work);
434 }
435
send_usr_msg(struct rtrs_clt * rtrs,int dir,struct rnbd_iu * iu,struct kvec * vec,size_t len,struct scatterlist * sg,unsigned int sg_len,void (* conf)(struct work_struct * work),int * errno,int wait)436 static int send_usr_msg(struct rtrs_clt *rtrs, int dir,
437 struct rnbd_iu *iu, struct kvec *vec,
438 size_t len, struct scatterlist *sg, unsigned int sg_len,
439 void (*conf)(struct work_struct *work),
440 int *errno, int wait)
441 {
442 int err;
443 struct rtrs_clt_req_ops req_ops;
444
445 INIT_WORK(&iu->work, conf);
446 req_ops = (struct rtrs_clt_req_ops) {
447 .priv = iu,
448 .conf_fn = msg_conf,
449 };
450 err = rtrs_clt_request(dir, &req_ops, rtrs, iu->permit,
451 vec, 1, len, sg, sg_len);
452 if (!err && wait) {
453 wait_event(iu->comp.wait, iu->comp.errno != INT_MAX);
454 *errno = iu->comp.errno;
455 } else {
456 *errno = 0;
457 }
458
459 return err;
460 }
461
msg_close_conf(struct work_struct * work)462 static void msg_close_conf(struct work_struct *work)
463 {
464 struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
465 struct rnbd_clt_dev *dev = iu->dev;
466
467 wake_up_iu_comp(iu, iu->errno);
468 rnbd_put_iu(dev->sess, iu);
469 rnbd_clt_put_dev(dev);
470 }
471
send_msg_close(struct rnbd_clt_dev * dev,u32 device_id,enum wait_type wait)472 static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id,
473 enum wait_type wait)
474 {
475 struct rnbd_clt_session *sess = dev->sess;
476 struct rnbd_msg_close msg;
477 struct rnbd_iu *iu;
478 struct kvec vec = {
479 .iov_base = &msg,
480 .iov_len = sizeof(msg)
481 };
482 int err, errno;
483
484 iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
485 if (!iu)
486 return -ENOMEM;
487
488 iu->buf = NULL;
489 iu->dev = dev;
490
491 msg.hdr.type = cpu_to_le16(RNBD_MSG_CLOSE);
492 msg.device_id = cpu_to_le32(device_id);
493
494 WARN_ON(!rnbd_clt_get_dev(dev));
495 err = send_usr_msg(sess->rtrs, WRITE, iu, &vec, 0, NULL, 0,
496 msg_close_conf, &errno, wait);
497 if (err) {
498 rnbd_clt_put_dev(dev);
499 rnbd_put_iu(sess, iu);
500 } else {
501 err = errno;
502 }
503
504 rnbd_put_iu(sess, iu);
505 return err;
506 }
507
msg_open_conf(struct work_struct * work)508 static void msg_open_conf(struct work_struct *work)
509 {
510 struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
511 struct rnbd_msg_open_rsp *rsp = iu->buf;
512 struct rnbd_clt_dev *dev = iu->dev;
513 int errno = iu->errno;
514
515 if (errno) {
516 rnbd_clt_err(dev,
517 "Opening failed, server responded: %d\n",
518 errno);
519 } else {
520 errno = process_msg_open_rsp(dev, rsp);
521 if (errno) {
522 u32 device_id = le32_to_cpu(rsp->device_id);
523 /*
524 * If server thinks its fine, but we fail to process
525 * then be nice and send a close to server.
526 */
527 send_msg_close(dev, device_id, RTRS_PERMIT_NOWAIT);
528 }
529 }
530 kfree(rsp);
531 wake_up_iu_comp(iu, errno);
532 rnbd_put_iu(dev->sess, iu);
533 rnbd_clt_put_dev(dev);
534 }
535
msg_sess_info_conf(struct work_struct * work)536 static void msg_sess_info_conf(struct work_struct *work)
537 {
538 struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
539 struct rnbd_msg_sess_info_rsp *rsp = iu->buf;
540 struct rnbd_clt_session *sess = iu->sess;
541
542 if (!iu->errno)
543 sess->ver = min_t(u8, rsp->ver, RNBD_PROTO_VER_MAJOR);
544
545 kfree(rsp);
546 wake_up_iu_comp(iu, iu->errno);
547 rnbd_put_iu(sess, iu);
548 rnbd_clt_put_sess(sess);
549 }
550
send_msg_open(struct rnbd_clt_dev * dev,enum wait_type wait)551 static int send_msg_open(struct rnbd_clt_dev *dev, enum wait_type wait)
552 {
553 struct rnbd_clt_session *sess = dev->sess;
554 struct rnbd_msg_open_rsp *rsp;
555 struct rnbd_msg_open msg;
556 struct rnbd_iu *iu;
557 struct kvec vec = {
558 .iov_base = &msg,
559 .iov_len = sizeof(msg)
560 };
561 int err, errno;
562
563 rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
564 if (!rsp)
565 return -ENOMEM;
566
567 iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
568 if (!iu) {
569 kfree(rsp);
570 return -ENOMEM;
571 }
572
573 iu->buf = rsp;
574 iu->dev = dev;
575
576 sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
577
578 msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN);
579 msg.access_mode = dev->access_mode;
580 strscpy(msg.dev_name, dev->pathname, sizeof(msg.dev_name));
581
582 WARN_ON(!rnbd_clt_get_dev(dev));
583 err = send_usr_msg(sess->rtrs, READ, iu,
584 &vec, sizeof(*rsp), iu->sgt.sgl, 1,
585 msg_open_conf, &errno, wait);
586 if (err) {
587 rnbd_clt_put_dev(dev);
588 rnbd_put_iu(sess, iu);
589 kfree(rsp);
590 } else {
591 err = errno;
592 }
593
594 rnbd_put_iu(sess, iu);
595 return err;
596 }
597
send_msg_sess_info(struct rnbd_clt_session * sess,enum wait_type wait)598 static int send_msg_sess_info(struct rnbd_clt_session *sess, enum wait_type wait)
599 {
600 struct rnbd_msg_sess_info_rsp *rsp;
601 struct rnbd_msg_sess_info msg;
602 struct rnbd_iu *iu;
603 struct kvec vec = {
604 .iov_base = &msg,
605 .iov_len = sizeof(msg)
606 };
607 int err, errno;
608
609 rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
610 if (!rsp)
611 return -ENOMEM;
612
613 iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
614 if (!iu) {
615 kfree(rsp);
616 return -ENOMEM;
617 }
618
619 iu->buf = rsp;
620 iu->sess = sess;
621 sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
622
623 msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO);
624 msg.ver = RNBD_PROTO_VER_MAJOR;
625
626 if (!rnbd_clt_get_sess(sess)) {
627 /*
628 * That can happen only in one case, when RTRS has restablished
629 * the connection and link_ev() is called, but session is almost
630 * dead, last reference on session is put and caller is waiting
631 * for RTRS to close everything.
632 */
633 err = -ENODEV;
634 goto put_iu;
635 }
636 err = send_usr_msg(sess->rtrs, READ, iu,
637 &vec, sizeof(*rsp), iu->sgt.sgl, 1,
638 msg_sess_info_conf, &errno, wait);
639 if (err) {
640 rnbd_clt_put_sess(sess);
641 put_iu:
642 rnbd_put_iu(sess, iu);
643 kfree(rsp);
644 } else {
645 err = errno;
646 }
647 rnbd_put_iu(sess, iu);
648 return err;
649 }
650
set_dev_states_to_disconnected(struct rnbd_clt_session * sess)651 static void set_dev_states_to_disconnected(struct rnbd_clt_session *sess)
652 {
653 struct rnbd_clt_dev *dev;
654 struct kobject *gd_kobj;
655
656 mutex_lock(&sess->lock);
657 list_for_each_entry(dev, &sess->devs_list, list) {
658 rnbd_clt_err(dev, "Device disconnected.\n");
659
660 mutex_lock(&dev->lock);
661 if (dev->dev_state == DEV_STATE_MAPPED) {
662 dev->dev_state = DEV_STATE_MAPPED_DISCONNECTED;
663 gd_kobj = &disk_to_dev(dev->gd)->kobj;
664 kobject_uevent(gd_kobj, KOBJ_OFFLINE);
665 }
666 mutex_unlock(&dev->lock);
667 }
668 mutex_unlock(&sess->lock);
669 }
670
remap_devs(struct rnbd_clt_session * sess)671 static void remap_devs(struct rnbd_clt_session *sess)
672 {
673 struct rnbd_clt_dev *dev;
674 struct rtrs_attrs attrs;
675 int err;
676
677 /*
678 * Careful here: we are called from RTRS link event directly,
679 * thus we can't send any RTRS request and wait for response
680 * or RTRS will not be able to complete request with failure
681 * if something goes wrong (failing of outstanding requests
682 * happens exactly from the context where we are blocking now).
683 *
684 * So to avoid deadlocks each usr message sent from here must
685 * be asynchronous.
686 */
687
688 err = send_msg_sess_info(sess, RTRS_PERMIT_NOWAIT);
689 if (err) {
690 pr_err("send_msg_sess_info(\"%s\"): %d\n", sess->sessname, err);
691 return;
692 }
693
694 err = rtrs_clt_query(sess->rtrs, &attrs);
695 if (err) {
696 pr_err("rtrs_clt_query(\"%s\"): %d\n", sess->sessname, err);
697 return;
698 }
699 mutex_lock(&sess->lock);
700 sess->max_io_size = attrs.max_io_size;
701
702 list_for_each_entry(dev, &sess->devs_list, list) {
703 bool skip;
704
705 mutex_lock(&dev->lock);
706 skip = (dev->dev_state == DEV_STATE_INIT);
707 mutex_unlock(&dev->lock);
708 if (skip)
709 /*
710 * When device is establishing connection for the first
711 * time - do not remap, it will be closed soon.
712 */
713 continue;
714
715 rnbd_clt_info(dev, "session reconnected, remapping device\n");
716 err = send_msg_open(dev, RTRS_PERMIT_NOWAIT);
717 if (err) {
718 rnbd_clt_err(dev, "send_msg_open(): %d\n", err);
719 break;
720 }
721 }
722 mutex_unlock(&sess->lock);
723 }
724
rnbd_clt_link_ev(void * priv,enum rtrs_clt_link_ev ev)725 static void rnbd_clt_link_ev(void *priv, enum rtrs_clt_link_ev ev)
726 {
727 struct rnbd_clt_session *sess = priv;
728
729 switch (ev) {
730 case RTRS_CLT_LINK_EV_DISCONNECTED:
731 set_dev_states_to_disconnected(sess);
732 break;
733 case RTRS_CLT_LINK_EV_RECONNECTED:
734 remap_devs(sess);
735 break;
736 default:
737 pr_err("Unknown session event received (%d), session: %s\n",
738 ev, sess->sessname);
739 }
740 }
741
rnbd_init_cpu_qlists(struct rnbd_cpu_qlist __percpu * cpu_queues)742 static void rnbd_init_cpu_qlists(struct rnbd_cpu_qlist __percpu *cpu_queues)
743 {
744 unsigned int cpu;
745 struct rnbd_cpu_qlist *cpu_q;
746
747 for_each_possible_cpu(cpu) {
748 cpu_q = per_cpu_ptr(cpu_queues, cpu);
749
750 cpu_q->cpu = cpu;
751 INIT_LIST_HEAD(&cpu_q->requeue_list);
752 spin_lock_init(&cpu_q->requeue_lock);
753 }
754 }
755
destroy_mq_tags(struct rnbd_clt_session * sess)756 static void destroy_mq_tags(struct rnbd_clt_session *sess)
757 {
758 if (sess->tag_set.tags)
759 blk_mq_free_tag_set(&sess->tag_set);
760 }
761
wake_up_rtrs_waiters(struct rnbd_clt_session * sess)762 static inline void wake_up_rtrs_waiters(struct rnbd_clt_session *sess)
763 {
764 sess->rtrs_ready = true;
765 wake_up_all(&sess->rtrs_waitq);
766 }
767
close_rtrs(struct rnbd_clt_session * sess)768 static void close_rtrs(struct rnbd_clt_session *sess)
769 {
770 might_sleep();
771
772 if (!IS_ERR_OR_NULL(sess->rtrs)) {
773 rtrs_clt_close(sess->rtrs);
774 sess->rtrs = NULL;
775 wake_up_rtrs_waiters(sess);
776 }
777 }
778
free_sess(struct rnbd_clt_session * sess)779 static void free_sess(struct rnbd_clt_session *sess)
780 {
781 WARN_ON(!list_empty(&sess->devs_list));
782
783 might_sleep();
784
785 close_rtrs(sess);
786 destroy_mq_tags(sess);
787 if (!list_empty(&sess->list)) {
788 mutex_lock(&sess_lock);
789 list_del(&sess->list);
790 mutex_unlock(&sess_lock);
791 }
792 free_percpu(sess->cpu_queues);
793 free_percpu(sess->cpu_rr);
794 mutex_destroy(&sess->lock);
795 kfree(sess);
796 }
797
alloc_sess(const char * sessname)798 static struct rnbd_clt_session *alloc_sess(const char *sessname)
799 {
800 struct rnbd_clt_session *sess;
801 int err, cpu;
802
803 sess = kzalloc_node(sizeof(*sess), GFP_KERNEL, NUMA_NO_NODE);
804 if (!sess)
805 return ERR_PTR(-ENOMEM);
806 strscpy(sess->sessname, sessname, sizeof(sess->sessname));
807 atomic_set(&sess->busy, 0);
808 mutex_init(&sess->lock);
809 INIT_LIST_HEAD(&sess->devs_list);
810 INIT_LIST_HEAD(&sess->list);
811 bitmap_zero(sess->cpu_queues_bm, num_possible_cpus());
812 init_waitqueue_head(&sess->rtrs_waitq);
813 refcount_set(&sess->refcount, 1);
814
815 sess->cpu_queues = alloc_percpu(struct rnbd_cpu_qlist);
816 if (!sess->cpu_queues) {
817 err = -ENOMEM;
818 goto err;
819 }
820 rnbd_init_cpu_qlists(sess->cpu_queues);
821
822 /*
823 * That is simple percpu variable which stores cpu indices, which are
824 * incremented on each access. We need that for the sake of fairness
825 * to wake up queues in a round-robin manner.
826 */
827 sess->cpu_rr = alloc_percpu(int);
828 if (!sess->cpu_rr) {
829 err = -ENOMEM;
830 goto err;
831 }
832 for_each_possible_cpu(cpu)
833 * per_cpu_ptr(sess->cpu_rr, cpu) = cpu;
834
835 return sess;
836
837 err:
838 free_sess(sess);
839
840 return ERR_PTR(err);
841 }
842
wait_for_rtrs_connection(struct rnbd_clt_session * sess)843 static int wait_for_rtrs_connection(struct rnbd_clt_session *sess)
844 {
845 wait_event(sess->rtrs_waitq, sess->rtrs_ready);
846 if (IS_ERR_OR_NULL(sess->rtrs))
847 return -ECONNRESET;
848
849 return 0;
850 }
851
wait_for_rtrs_disconnection(struct rnbd_clt_session * sess)852 static void wait_for_rtrs_disconnection(struct rnbd_clt_session *sess)
853 __releases(&sess_lock)
854 __acquires(&sess_lock)
855 {
856 DEFINE_WAIT(wait);
857
858 prepare_to_wait(&sess->rtrs_waitq, &wait, TASK_UNINTERRUPTIBLE);
859 if (IS_ERR_OR_NULL(sess->rtrs)) {
860 finish_wait(&sess->rtrs_waitq, &wait);
861 return;
862 }
863 mutex_unlock(&sess_lock);
864 /* loop in caller, see __find_and_get_sess().
865 * You can't leave mutex locked and call schedule(), you will catch a
866 * deadlock with a caller of free_sess(), which has just put the last
867 * reference and is about to take the sess_lock in order to delete
868 * the session from the list.
869 */
870 schedule();
871 mutex_lock(&sess_lock);
872 }
873
__find_and_get_sess(const char * sessname)874 static struct rnbd_clt_session *__find_and_get_sess(const char *sessname)
875 __releases(&sess_lock)
876 __acquires(&sess_lock)
877 {
878 struct rnbd_clt_session *sess, *sn;
879 int err;
880
881 again:
882 list_for_each_entry_safe(sess, sn, &sess_list, list) {
883 if (strcmp(sessname, sess->sessname))
884 continue;
885
886 if (sess->rtrs_ready && IS_ERR_OR_NULL(sess->rtrs))
887 /*
888 * No RTRS connection, session is dying.
889 */
890 continue;
891
892 if (rnbd_clt_get_sess(sess)) {
893 /*
894 * Alive session is found, wait for RTRS connection.
895 */
896 mutex_unlock(&sess_lock);
897 err = wait_for_rtrs_connection(sess);
898 if (err)
899 rnbd_clt_put_sess(sess);
900 mutex_lock(&sess_lock);
901
902 if (err)
903 /* Session is dying, repeat the loop */
904 goto again;
905
906 return sess;
907 }
908 /*
909 * Ref is 0, session is dying, wait for RTRS disconnect
910 * in order to avoid session names clashes.
911 */
912 wait_for_rtrs_disconnection(sess);
913 /*
914 * RTRS is disconnected and soon session will be freed,
915 * so repeat a loop.
916 */
917 goto again;
918 }
919
920 return NULL;
921 }
922
923 /* caller is responsible for initializing 'first' to false */
924 static struct
find_or_create_sess(const char * sessname,bool * first)925 rnbd_clt_session *find_or_create_sess(const char *sessname, bool *first)
926 {
927 struct rnbd_clt_session *sess = NULL;
928
929 mutex_lock(&sess_lock);
930 sess = __find_and_get_sess(sessname);
931 if (!sess) {
932 sess = alloc_sess(sessname);
933 if (IS_ERR(sess)) {
934 mutex_unlock(&sess_lock);
935 return sess;
936 }
937 list_add(&sess->list, &sess_list);
938 *first = true;
939 }
940 mutex_unlock(&sess_lock);
941
942 return sess;
943 }
944
rnbd_client_open(struct block_device * block_device,fmode_t mode)945 static int rnbd_client_open(struct block_device *block_device, fmode_t mode)
946 {
947 struct rnbd_clt_dev *dev = block_device->bd_disk->private_data;
948
949 if (dev->read_only && (mode & FMODE_WRITE))
950 return -EPERM;
951
952 if (dev->dev_state == DEV_STATE_UNMAPPED ||
953 !rnbd_clt_get_dev(dev))
954 return -EIO;
955
956 return 0;
957 }
958
rnbd_client_release(struct gendisk * gen,fmode_t mode)959 static void rnbd_client_release(struct gendisk *gen, fmode_t mode)
960 {
961 struct rnbd_clt_dev *dev = gen->private_data;
962
963 rnbd_clt_put_dev(dev);
964 }
965
rnbd_client_getgeo(struct block_device * block_device,struct hd_geometry * geo)966 static int rnbd_client_getgeo(struct block_device *block_device,
967 struct hd_geometry *geo)
968 {
969 u64 size;
970 struct rnbd_clt_dev *dev;
971
972 dev = block_device->bd_disk->private_data;
973 size = dev->size * (dev->logical_block_size / SECTOR_SIZE);
974 geo->cylinders = size >> 6; /* size/64 */
975 geo->heads = 4;
976 geo->sectors = 16;
977 geo->start = 0;
978
979 return 0;
980 }
981
982 static const struct block_device_operations rnbd_client_ops = {
983 .owner = THIS_MODULE,
984 .open = rnbd_client_open,
985 .release = rnbd_client_release,
986 .getgeo = rnbd_client_getgeo
987 };
988
989 /* The amount of data that belongs to an I/O and the amount of data that
990 * should be read or written to the disk (bi_size) can differ.
991 *
992 * E.g. When WRITE_SAME is used, only a small amount of data is
993 * transferred that is then written repeatedly over a lot of sectors.
994 *
995 * Get the size of data to be transferred via RTRS by summing up the size
996 * of the scather-gather list entries.
997 */
rnbd_clt_get_sg_size(struct scatterlist * sglist,u32 len)998 static size_t rnbd_clt_get_sg_size(struct scatterlist *sglist, u32 len)
999 {
1000 struct scatterlist *sg;
1001 size_t tsize = 0;
1002 int i;
1003
1004 for_each_sg(sglist, sg, len, i)
1005 tsize += sg->length;
1006 return tsize;
1007 }
1008
rnbd_client_xfer_request(struct rnbd_clt_dev * dev,struct request * rq,struct rnbd_iu * iu)1009 static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
1010 struct request *rq,
1011 struct rnbd_iu *iu)
1012 {
1013 struct rtrs_clt *rtrs = dev->sess->rtrs;
1014 struct rtrs_permit *permit = iu->permit;
1015 struct rnbd_msg_io msg;
1016 struct rtrs_clt_req_ops req_ops;
1017 unsigned int sg_cnt = 0;
1018 struct kvec vec;
1019 size_t size;
1020 int err;
1021
1022 iu->rq = rq;
1023 iu->dev = dev;
1024 msg.sector = cpu_to_le64(blk_rq_pos(rq));
1025 msg.bi_size = cpu_to_le32(blk_rq_bytes(rq));
1026 msg.rw = cpu_to_le32(rq_to_rnbd_flags(rq));
1027 msg.prio = cpu_to_le16(req_get_ioprio(rq));
1028
1029 /*
1030 * We only support discards with single segment for now.
1031 * See queue limits.
1032 */
1033 if (req_op(rq) != REQ_OP_DISCARD)
1034 sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sgt.sgl);
1035
1036 if (sg_cnt == 0)
1037 sg_mark_end(&iu->sgt.sgl[0]);
1038
1039 msg.hdr.type = cpu_to_le16(RNBD_MSG_IO);
1040 msg.device_id = cpu_to_le32(dev->device_id);
1041
1042 vec = (struct kvec) {
1043 .iov_base = &msg,
1044 .iov_len = sizeof(msg)
1045 };
1046 size = rnbd_clt_get_sg_size(iu->sgt.sgl, sg_cnt);
1047 req_ops = (struct rtrs_clt_req_ops) {
1048 .priv = iu,
1049 .conf_fn = msg_io_conf,
1050 };
1051 err = rtrs_clt_request(rq_data_dir(rq), &req_ops, rtrs, permit,
1052 &vec, 1, size, iu->sgt.sgl, sg_cnt);
1053 if (err) {
1054 rnbd_clt_err_rl(dev, "RTRS failed to transfer IO, err: %d\n",
1055 err);
1056 return err;
1057 }
1058
1059 return 0;
1060 }
1061
1062 /**
1063 * rnbd_clt_dev_add_to_requeue() - add device to requeue if session is busy
1064 * @dev: Device to be checked
1065 * @q: Queue to be added to the requeue list if required
1066 *
1067 * Description:
1068 * If session is busy, that means someone will requeue us when resources
1069 * are freed. If session is not doing anything - device is not added to
1070 * the list and @false is returned.
1071 */
rnbd_clt_dev_add_to_requeue(struct rnbd_clt_dev * dev,struct rnbd_queue * q)1072 static bool rnbd_clt_dev_add_to_requeue(struct rnbd_clt_dev *dev,
1073 struct rnbd_queue *q)
1074 {
1075 struct rnbd_clt_session *sess = dev->sess;
1076 struct rnbd_cpu_qlist *cpu_q;
1077 unsigned long flags;
1078 bool added = true;
1079 bool need_set;
1080
1081 cpu_q = get_cpu_ptr(sess->cpu_queues);
1082 spin_lock_irqsave(&cpu_q->requeue_lock, flags);
1083
1084 if (!test_and_set_bit_lock(0, &q->in_list)) {
1085 if (WARN_ON(!list_empty(&q->requeue_list)))
1086 goto unlock;
1087
1088 need_set = !test_bit(cpu_q->cpu, sess->cpu_queues_bm);
1089 if (need_set) {
1090 set_bit(cpu_q->cpu, sess->cpu_queues_bm);
1091 /* Paired with rnbd_put_permit(). Set a bit first
1092 * and then observe the busy counter.
1093 */
1094 smp_mb__before_atomic();
1095 }
1096 if (atomic_read(&sess->busy)) {
1097 list_add_tail(&q->requeue_list, &cpu_q->requeue_list);
1098 } else {
1099 /* Very unlikely, but possible: busy counter was
1100 * observed as zero. Drop all bits and return
1101 * false to restart the queue by ourselves.
1102 */
1103 if (need_set)
1104 clear_bit(cpu_q->cpu, sess->cpu_queues_bm);
1105 clear_bit_unlock(0, &q->in_list);
1106 added = false;
1107 }
1108 }
1109 unlock:
1110 spin_unlock_irqrestore(&cpu_q->requeue_lock, flags);
1111 put_cpu_ptr(sess->cpu_queues);
1112
1113 return added;
1114 }
1115
rnbd_clt_dev_kick_mq_queue(struct rnbd_clt_dev * dev,struct blk_mq_hw_ctx * hctx,int delay)1116 static void rnbd_clt_dev_kick_mq_queue(struct rnbd_clt_dev *dev,
1117 struct blk_mq_hw_ctx *hctx,
1118 int delay)
1119 {
1120 struct rnbd_queue *q = hctx->driver_data;
1121
1122 if (delay != RNBD_DELAY_IFBUSY)
1123 blk_mq_delay_run_hw_queue(hctx, delay);
1124 else if (!rnbd_clt_dev_add_to_requeue(dev, q))
1125 /*
1126 * If session is not busy we have to restart
1127 * the queue ourselves.
1128 */
1129 blk_mq_delay_run_hw_queue(hctx, 10/*ms*/);
1130 }
1131
rnbd_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)1132 static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
1133 const struct blk_mq_queue_data *bd)
1134 {
1135 struct request *rq = bd->rq;
1136 struct rnbd_clt_dev *dev = rq->rq_disk->private_data;
1137 struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq);
1138 int err;
1139 blk_status_t ret = BLK_STS_IOERR;
1140
1141 if (dev->dev_state != DEV_STATE_MAPPED)
1142 return BLK_STS_IOERR;
1143
1144 iu->permit = rnbd_get_permit(dev->sess, RTRS_IO_CON,
1145 RTRS_PERMIT_NOWAIT);
1146 if (!iu->permit) {
1147 rnbd_clt_dev_kick_mq_queue(dev, hctx, RNBD_DELAY_IFBUSY);
1148 return BLK_STS_RESOURCE;
1149 }
1150
1151 iu->sgt.sgl = iu->first_sgl;
1152 err = sg_alloc_table_chained(&iu->sgt,
1153 /* Even-if the request has no segment,
1154 * sglist must have one entry at least.
1155 */
1156 blk_rq_nr_phys_segments(rq) ? : 1,
1157 iu->sgt.sgl,
1158 RNBD_INLINE_SG_CNT);
1159 if (err) {
1160 rnbd_clt_err_rl(dev, "sg_alloc_table_chained ret=%d\n", err);
1161 rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/);
1162 rnbd_put_permit(dev->sess, iu->permit);
1163 return BLK_STS_RESOURCE;
1164 }
1165
1166 blk_mq_start_request(rq);
1167 err = rnbd_client_xfer_request(dev, rq, iu);
1168 if (err == 0)
1169 return BLK_STS_OK;
1170 if (err == -EAGAIN || err == -ENOMEM) {
1171 rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/);
1172 ret = BLK_STS_RESOURCE;
1173 }
1174 sg_free_table_chained(&iu->sgt, RNBD_INLINE_SG_CNT);
1175 rnbd_put_permit(dev->sess, iu->permit);
1176 return ret;
1177 }
1178
rnbd_rdma_poll(struct blk_mq_hw_ctx * hctx)1179 static int rnbd_rdma_poll(struct blk_mq_hw_ctx *hctx)
1180 {
1181 struct rnbd_queue *q = hctx->driver_data;
1182 struct rnbd_clt_dev *dev = q->dev;
1183 int cnt;
1184
1185 cnt = rtrs_clt_rdma_cq_direct(dev->sess->rtrs, hctx->queue_num);
1186 return cnt;
1187 }
1188
rnbd_rdma_map_queues(struct blk_mq_tag_set * set)1189 static int rnbd_rdma_map_queues(struct blk_mq_tag_set *set)
1190 {
1191 struct rnbd_clt_session *sess = set->driver_data;
1192
1193 /* shared read/write queues */
1194 set->map[HCTX_TYPE_DEFAULT].nr_queues = num_online_cpus();
1195 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
1196 set->map[HCTX_TYPE_READ].nr_queues = num_online_cpus();
1197 set->map[HCTX_TYPE_READ].queue_offset = 0;
1198 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
1199 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
1200
1201 if (sess->nr_poll_queues) {
1202 /* dedicated queue for poll */
1203 set->map[HCTX_TYPE_POLL].nr_queues = sess->nr_poll_queues;
1204 set->map[HCTX_TYPE_POLL].queue_offset = set->map[HCTX_TYPE_READ].queue_offset +
1205 set->map[HCTX_TYPE_READ].nr_queues;
1206 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
1207 pr_info("[session=%s] mapped %d/%d/%d default/read/poll queues.\n",
1208 sess->sessname,
1209 set->map[HCTX_TYPE_DEFAULT].nr_queues,
1210 set->map[HCTX_TYPE_READ].nr_queues,
1211 set->map[HCTX_TYPE_POLL].nr_queues);
1212 } else {
1213 pr_info("[session=%s] mapped %d/%d default/read queues.\n",
1214 sess->sessname,
1215 set->map[HCTX_TYPE_DEFAULT].nr_queues,
1216 set->map[HCTX_TYPE_READ].nr_queues);
1217 }
1218
1219 return 0;
1220 }
1221
1222 static struct blk_mq_ops rnbd_mq_ops = {
1223 .queue_rq = rnbd_queue_rq,
1224 .complete = rnbd_softirq_done_fn,
1225 .map_queues = rnbd_rdma_map_queues,
1226 .poll = rnbd_rdma_poll,
1227 };
1228
setup_mq_tags(struct rnbd_clt_session * sess)1229 static int setup_mq_tags(struct rnbd_clt_session *sess)
1230 {
1231 struct blk_mq_tag_set *tag_set = &sess->tag_set;
1232
1233 memset(tag_set, 0, sizeof(*tag_set));
1234 tag_set->ops = &rnbd_mq_ops;
1235 tag_set->queue_depth = sess->queue_depth;
1236 tag_set->numa_node = NUMA_NO_NODE;
1237 tag_set->flags = BLK_MQ_F_SHOULD_MERGE |
1238 BLK_MQ_F_TAG_QUEUE_SHARED;
1239 tag_set->cmd_size = sizeof(struct rnbd_iu) + RNBD_RDMA_SGL_SIZE;
1240
1241 /* for HCTX_TYPE_DEFAULT, HCTX_TYPE_READ, HCTX_TYPE_POLL */
1242 tag_set->nr_maps = sess->nr_poll_queues ? HCTX_MAX_TYPES : 2;
1243 /*
1244 * HCTX_TYPE_DEFAULT and HCTX_TYPE_READ share one set of queues
1245 * others are for HCTX_TYPE_POLL
1246 */
1247 tag_set->nr_hw_queues = num_online_cpus() + sess->nr_poll_queues;
1248 tag_set->driver_data = sess;
1249
1250 return blk_mq_alloc_tag_set(tag_set);
1251 }
1252
1253 static struct rnbd_clt_session *
find_and_get_or_create_sess(const char * sessname,const struct rtrs_addr * paths,size_t path_cnt,u16 port_nr,u32 nr_poll_queues)1254 find_and_get_or_create_sess(const char *sessname,
1255 const struct rtrs_addr *paths,
1256 size_t path_cnt, u16 port_nr, u32 nr_poll_queues)
1257 {
1258 struct rnbd_clt_session *sess;
1259 struct rtrs_attrs attrs;
1260 int err;
1261 bool first = false;
1262 struct rtrs_clt_ops rtrs_ops;
1263
1264 sess = find_or_create_sess(sessname, &first);
1265 if (sess == ERR_PTR(-ENOMEM))
1266 return ERR_PTR(-ENOMEM);
1267 else if ((nr_poll_queues && !first) || (!nr_poll_queues && sess->nr_poll_queues)) {
1268 /*
1269 * A device MUST have its own session to use the polling-mode.
1270 * It must fail to map new device with the same session.
1271 */
1272 err = -EINVAL;
1273 goto put_sess;
1274 }
1275
1276 if (!first)
1277 return sess;
1278
1279 if (!path_cnt) {
1280 pr_err("Session %s not found, and path parameter not given", sessname);
1281 err = -ENXIO;
1282 goto put_sess;
1283 }
1284
1285 rtrs_ops = (struct rtrs_clt_ops) {
1286 .priv = sess,
1287 .link_ev = rnbd_clt_link_ev,
1288 };
1289 /*
1290 * Nothing was found, establish rtrs connection and proceed further.
1291 */
1292 sess->rtrs = rtrs_clt_open(&rtrs_ops, sessname,
1293 paths, path_cnt, port_nr,
1294 0, /* Do not use pdu of rtrs */
1295 RECONNECT_DELAY, BMAX_SEGMENTS,
1296 MAX_RECONNECTS, nr_poll_queues);
1297 if (IS_ERR(sess->rtrs)) {
1298 err = PTR_ERR(sess->rtrs);
1299 goto wake_up_and_put;
1300 }
1301
1302 err = rtrs_clt_query(sess->rtrs, &attrs);
1303 if (err)
1304 goto close_rtrs;
1305
1306 sess->max_io_size = attrs.max_io_size;
1307 sess->queue_depth = attrs.queue_depth;
1308 sess->nr_poll_queues = nr_poll_queues;
1309
1310 err = setup_mq_tags(sess);
1311 if (err)
1312 goto close_rtrs;
1313
1314 err = send_msg_sess_info(sess, RTRS_PERMIT_WAIT);
1315 if (err)
1316 goto close_rtrs;
1317
1318 wake_up_rtrs_waiters(sess);
1319
1320 return sess;
1321
1322 close_rtrs:
1323 close_rtrs(sess);
1324 put_sess:
1325 rnbd_clt_put_sess(sess);
1326
1327 return ERR_PTR(err);
1328
1329 wake_up_and_put:
1330 wake_up_rtrs_waiters(sess);
1331 goto put_sess;
1332 }
1333
rnbd_init_hw_queue(struct rnbd_clt_dev * dev,struct rnbd_queue * q,struct blk_mq_hw_ctx * hctx)1334 static inline void rnbd_init_hw_queue(struct rnbd_clt_dev *dev,
1335 struct rnbd_queue *q,
1336 struct blk_mq_hw_ctx *hctx)
1337 {
1338 INIT_LIST_HEAD(&q->requeue_list);
1339 q->dev = dev;
1340 q->hctx = hctx;
1341 }
1342
rnbd_init_mq_hw_queues(struct rnbd_clt_dev * dev)1343 static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev)
1344 {
1345 int i;
1346 struct blk_mq_hw_ctx *hctx;
1347 struct rnbd_queue *q;
1348
1349 queue_for_each_hw_ctx(dev->queue, hctx, i) {
1350 q = &dev->hw_queues[i];
1351 rnbd_init_hw_queue(dev, q, hctx);
1352 hctx->driver_data = q;
1353 }
1354 }
1355
setup_mq_dev(struct rnbd_clt_dev * dev)1356 static int setup_mq_dev(struct rnbd_clt_dev *dev)
1357 {
1358 dev->queue = blk_mq_init_queue(&dev->sess->tag_set);
1359 if (IS_ERR(dev->queue)) {
1360 rnbd_clt_err(dev, "Initializing multiqueue queue failed, err: %ld\n",
1361 PTR_ERR(dev->queue));
1362 return PTR_ERR(dev->queue);
1363 }
1364 rnbd_init_mq_hw_queues(dev);
1365 return 0;
1366 }
1367
setup_request_queue(struct rnbd_clt_dev * dev)1368 static void setup_request_queue(struct rnbd_clt_dev *dev)
1369 {
1370 blk_queue_logical_block_size(dev->queue, dev->logical_block_size);
1371 blk_queue_physical_block_size(dev->queue, dev->physical_block_size);
1372 blk_queue_max_hw_sectors(dev->queue, dev->max_hw_sectors);
1373 blk_queue_max_write_same_sectors(dev->queue,
1374 dev->max_write_same_sectors);
1375
1376 /*
1377 * we don't support discards to "discontiguous" segments
1378 * in on request
1379 */
1380 blk_queue_max_discard_segments(dev->queue, 1);
1381
1382 blk_queue_max_discard_sectors(dev->queue, dev->max_discard_sectors);
1383 dev->queue->limits.discard_granularity = dev->discard_granularity;
1384 dev->queue->limits.discard_alignment = dev->discard_alignment;
1385 if (dev->max_discard_sectors)
1386 blk_queue_flag_set(QUEUE_FLAG_DISCARD, dev->queue);
1387 if (dev->secure_discard)
1388 blk_queue_flag_set(QUEUE_FLAG_SECERASE, dev->queue);
1389
1390 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue);
1391 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue);
1392 blk_queue_max_segments(dev->queue, dev->max_segments);
1393 blk_queue_io_opt(dev->queue, dev->sess->max_io_size);
1394 blk_queue_virt_boundary(dev->queue, SZ_4K - 1);
1395 blk_queue_write_cache(dev->queue, dev->wc, dev->fua);
1396 dev->queue->queuedata = dev;
1397 }
1398
rnbd_clt_setup_gen_disk(struct rnbd_clt_dev * dev,int idx)1399 static void rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
1400 {
1401 dev->gd->major = rnbd_client_major;
1402 dev->gd->first_minor = idx << RNBD_PART_BITS;
1403 dev->gd->fops = &rnbd_client_ops;
1404 dev->gd->queue = dev->queue;
1405 dev->gd->private_data = dev;
1406 snprintf(dev->gd->disk_name, sizeof(dev->gd->disk_name), "rnbd%d",
1407 idx);
1408 pr_debug("disk_name=%s, capacity=%zu\n",
1409 dev->gd->disk_name,
1410 dev->nsectors * (dev->logical_block_size / SECTOR_SIZE)
1411 );
1412
1413 set_capacity(dev->gd, dev->nsectors);
1414
1415 if (dev->access_mode == RNBD_ACCESS_RO) {
1416 dev->read_only = true;
1417 set_disk_ro(dev->gd, true);
1418 } else {
1419 dev->read_only = false;
1420 }
1421
1422 if (!dev->rotational)
1423 blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
1424 add_disk(dev->gd);
1425 }
1426
rnbd_client_setup_device(struct rnbd_clt_dev * dev)1427 static int rnbd_client_setup_device(struct rnbd_clt_dev *dev)
1428 {
1429 int err, idx = dev->clt_device_id;
1430
1431 dev->size = dev->nsectors * dev->logical_block_size;
1432
1433 err = setup_mq_dev(dev);
1434 if (err)
1435 return err;
1436
1437 setup_request_queue(dev);
1438
1439 dev->gd = alloc_disk_node(1 << RNBD_PART_BITS, NUMA_NO_NODE);
1440 if (!dev->gd) {
1441 blk_cleanup_queue(dev->queue);
1442 return -ENOMEM;
1443 }
1444
1445 rnbd_clt_setup_gen_disk(dev, idx);
1446
1447 return 0;
1448 }
1449
init_dev(struct rnbd_clt_session * sess,enum rnbd_access_mode access_mode,const char * pathname,u32 nr_poll_queues)1450 static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
1451 enum rnbd_access_mode access_mode,
1452 const char *pathname,
1453 u32 nr_poll_queues)
1454 {
1455 struct rnbd_clt_dev *dev;
1456 int ret;
1457
1458 dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, NUMA_NO_NODE);
1459 if (!dev)
1460 return ERR_PTR(-ENOMEM);
1461
1462 /*
1463 * nr_cpu_ids: the number of softirq queues
1464 * nr_poll_queues: the number of polling queues
1465 */
1466 dev->hw_queues = kcalloc(nr_cpu_ids + nr_poll_queues,
1467 sizeof(*dev->hw_queues),
1468 GFP_KERNEL);
1469 if (!dev->hw_queues) {
1470 ret = -ENOMEM;
1471 goto out_alloc;
1472 }
1473
1474 mutex_lock(&ida_lock);
1475 ret = ida_simple_get(&index_ida, 0, 1 << (MINORBITS - RNBD_PART_BITS),
1476 GFP_KERNEL);
1477 mutex_unlock(&ida_lock);
1478 if (ret < 0) {
1479 pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
1480 pathname, sess->sessname, ret);
1481 goto out_queues;
1482 }
1483
1484 dev->pathname = kstrdup(pathname, GFP_KERNEL);
1485 if (!dev->pathname) {
1486 ret = -ENOMEM;
1487 goto out_queues;
1488 }
1489
1490 dev->clt_device_id = ret;
1491 dev->sess = sess;
1492 dev->access_mode = access_mode;
1493 dev->nr_poll_queues = nr_poll_queues;
1494 mutex_init(&dev->lock);
1495 refcount_set(&dev->refcount, 1);
1496 dev->dev_state = DEV_STATE_INIT;
1497
1498 /*
1499 * Here we called from sysfs entry, thus clt-sysfs is
1500 * responsible that session will not disappear.
1501 */
1502 WARN_ON(!rnbd_clt_get_sess(sess));
1503
1504 return dev;
1505
1506 out_queues:
1507 kfree(dev->hw_queues);
1508 out_alloc:
1509 kfree(dev);
1510 return ERR_PTR(ret);
1511 }
1512
__exists_dev(const char * pathname,const char * sessname)1513 static bool __exists_dev(const char *pathname, const char *sessname)
1514 {
1515 struct rnbd_clt_session *sess;
1516 struct rnbd_clt_dev *dev;
1517 bool found = false;
1518
1519 list_for_each_entry(sess, &sess_list, list) {
1520 if (sessname && strncmp(sess->sessname, sessname,
1521 sizeof(sess->sessname)))
1522 continue;
1523 mutex_lock(&sess->lock);
1524 list_for_each_entry(dev, &sess->devs_list, list) {
1525 if (strlen(dev->pathname) == strlen(pathname) &&
1526 !strcmp(dev->pathname, pathname)) {
1527 found = true;
1528 break;
1529 }
1530 }
1531 mutex_unlock(&sess->lock);
1532 if (found)
1533 break;
1534 }
1535
1536 return found;
1537 }
1538
exists_devpath(const char * pathname,const char * sessname)1539 static bool exists_devpath(const char *pathname, const char *sessname)
1540 {
1541 bool found;
1542
1543 mutex_lock(&sess_lock);
1544 found = __exists_dev(pathname, sessname);
1545 mutex_unlock(&sess_lock);
1546
1547 return found;
1548 }
1549
insert_dev_if_not_exists_devpath(struct rnbd_clt_dev * dev)1550 static bool insert_dev_if_not_exists_devpath(struct rnbd_clt_dev *dev)
1551 {
1552 bool found;
1553 struct rnbd_clt_session *sess = dev->sess;
1554
1555 mutex_lock(&sess_lock);
1556 found = __exists_dev(dev->pathname, sess->sessname);
1557 if (!found) {
1558 mutex_lock(&sess->lock);
1559 list_add_tail(&dev->list, &sess->devs_list);
1560 mutex_unlock(&sess->lock);
1561 }
1562 mutex_unlock(&sess_lock);
1563
1564 return found;
1565 }
1566
delete_dev(struct rnbd_clt_dev * dev)1567 static void delete_dev(struct rnbd_clt_dev *dev)
1568 {
1569 struct rnbd_clt_session *sess = dev->sess;
1570
1571 mutex_lock(&sess->lock);
1572 list_del(&dev->list);
1573 mutex_unlock(&sess->lock);
1574 }
1575
rnbd_clt_map_device(const char * sessname,struct rtrs_addr * paths,size_t path_cnt,u16 port_nr,const char * pathname,enum rnbd_access_mode access_mode,u32 nr_poll_queues)1576 struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
1577 struct rtrs_addr *paths,
1578 size_t path_cnt, u16 port_nr,
1579 const char *pathname,
1580 enum rnbd_access_mode access_mode,
1581 u32 nr_poll_queues)
1582 {
1583 struct rnbd_clt_session *sess;
1584 struct rnbd_clt_dev *dev;
1585 int ret;
1586
1587 if (exists_devpath(pathname, sessname))
1588 return ERR_PTR(-EEXIST);
1589
1590 sess = find_and_get_or_create_sess(sessname, paths, path_cnt, port_nr, nr_poll_queues);
1591 if (IS_ERR(sess))
1592 return ERR_CAST(sess);
1593
1594 dev = init_dev(sess, access_mode, pathname, nr_poll_queues);
1595 if (IS_ERR(dev)) {
1596 pr_err("map_device: failed to map device '%s' from session %s, can't initialize device, err: %ld\n",
1597 pathname, sess->sessname, PTR_ERR(dev));
1598 ret = PTR_ERR(dev);
1599 goto put_sess;
1600 }
1601 if (insert_dev_if_not_exists_devpath(dev)) {
1602 ret = -EEXIST;
1603 goto put_dev;
1604 }
1605 ret = send_msg_open(dev, RTRS_PERMIT_WAIT);
1606 if (ret) {
1607 rnbd_clt_err(dev,
1608 "map_device: failed, can't open remote device, err: %d\n",
1609 ret);
1610 goto del_dev;
1611 }
1612 mutex_lock(&dev->lock);
1613 pr_debug("Opened remote device: session=%s, path='%s'\n",
1614 sess->sessname, pathname);
1615 ret = rnbd_client_setup_device(dev);
1616 if (ret) {
1617 rnbd_clt_err(dev,
1618 "map_device: Failed to configure device, err: %d\n",
1619 ret);
1620 mutex_unlock(&dev->lock);
1621 goto send_close;
1622 }
1623
1624 rnbd_clt_info(dev,
1625 "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d, wc: %d, fua: %d)\n",
1626 dev->gd->disk_name, dev->nsectors,
1627 dev->logical_block_size, dev->physical_block_size,
1628 dev->max_write_same_sectors, dev->max_discard_sectors,
1629 dev->discard_granularity, dev->discard_alignment,
1630 dev->secure_discard, dev->max_segments,
1631 dev->max_hw_sectors, dev->rotational, dev->wc, dev->fua);
1632
1633 mutex_unlock(&dev->lock);
1634 rnbd_clt_put_sess(sess);
1635
1636 return dev;
1637
1638 send_close:
1639 send_msg_close(dev, dev->device_id, RTRS_PERMIT_WAIT);
1640 del_dev:
1641 delete_dev(dev);
1642 put_dev:
1643 rnbd_clt_put_dev(dev);
1644 put_sess:
1645 rnbd_clt_put_sess(sess);
1646
1647 return ERR_PTR(ret);
1648 }
1649
destroy_gen_disk(struct rnbd_clt_dev * dev)1650 static void destroy_gen_disk(struct rnbd_clt_dev *dev)
1651 {
1652 del_gendisk(dev->gd);
1653 blk_cleanup_queue(dev->queue);
1654 put_disk(dev->gd);
1655 }
1656
destroy_sysfs(struct rnbd_clt_dev * dev,const struct attribute * sysfs_self)1657 static void destroy_sysfs(struct rnbd_clt_dev *dev,
1658 const struct attribute *sysfs_self)
1659 {
1660 rnbd_clt_remove_dev_symlink(dev);
1661 if (dev->kobj.state_initialized) {
1662 if (sysfs_self)
1663 /* To avoid deadlock firstly remove itself */
1664 sysfs_remove_file_self(&dev->kobj, sysfs_self);
1665 kobject_del(&dev->kobj);
1666 kobject_put(&dev->kobj);
1667 }
1668 }
1669
rnbd_clt_unmap_device(struct rnbd_clt_dev * dev,bool force,const struct attribute * sysfs_self)1670 int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force,
1671 const struct attribute *sysfs_self)
1672 {
1673 struct rnbd_clt_session *sess = dev->sess;
1674 int refcount, ret = 0;
1675 bool was_mapped;
1676
1677 mutex_lock(&dev->lock);
1678 if (dev->dev_state == DEV_STATE_UNMAPPED) {
1679 rnbd_clt_info(dev, "Device is already being unmapped\n");
1680 ret = -EALREADY;
1681 goto err;
1682 }
1683 refcount = refcount_read(&dev->refcount);
1684 if (!force && refcount > 1) {
1685 rnbd_clt_err(dev,
1686 "Closing device failed, device is in use, (%d device users)\n",
1687 refcount - 1);
1688 ret = -EBUSY;
1689 goto err;
1690 }
1691 was_mapped = (dev->dev_state == DEV_STATE_MAPPED);
1692 dev->dev_state = DEV_STATE_UNMAPPED;
1693 mutex_unlock(&dev->lock);
1694
1695 delete_dev(dev);
1696 destroy_sysfs(dev, sysfs_self);
1697 destroy_gen_disk(dev);
1698 if (was_mapped && sess->rtrs)
1699 send_msg_close(dev, dev->device_id, RTRS_PERMIT_WAIT);
1700
1701 rnbd_clt_info(dev, "Device is unmapped\n");
1702
1703 /* Likely last reference put */
1704 rnbd_clt_put_dev(dev);
1705
1706 /*
1707 * Here device and session can be vanished!
1708 */
1709
1710 return 0;
1711 err:
1712 mutex_unlock(&dev->lock);
1713
1714 return ret;
1715 }
1716
rnbd_clt_remap_device(struct rnbd_clt_dev * dev)1717 int rnbd_clt_remap_device(struct rnbd_clt_dev *dev)
1718 {
1719 int err;
1720
1721 mutex_lock(&dev->lock);
1722 if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED)
1723 err = 0;
1724 else if (dev->dev_state == DEV_STATE_UNMAPPED)
1725 err = -ENODEV;
1726 else if (dev->dev_state == DEV_STATE_MAPPED)
1727 err = -EALREADY;
1728 else
1729 err = -EBUSY;
1730 mutex_unlock(&dev->lock);
1731 if (!err) {
1732 rnbd_clt_info(dev, "Remapping device.\n");
1733 err = send_msg_open(dev, RTRS_PERMIT_WAIT);
1734 if (err)
1735 rnbd_clt_err(dev, "remap_device: %d\n", err);
1736 }
1737
1738 return err;
1739 }
1740
unmap_device_work(struct work_struct * work)1741 static void unmap_device_work(struct work_struct *work)
1742 {
1743 struct rnbd_clt_dev *dev;
1744
1745 dev = container_of(work, typeof(*dev), unmap_on_rmmod_work);
1746 rnbd_clt_unmap_device(dev, true, NULL);
1747 }
1748
rnbd_destroy_sessions(void)1749 static void rnbd_destroy_sessions(void)
1750 {
1751 struct rnbd_clt_session *sess, *sn;
1752 struct rnbd_clt_dev *dev, *tn;
1753
1754 /* Firstly forbid access through sysfs interface */
1755 rnbd_clt_destroy_sysfs_files();
1756
1757 /*
1758 * Here at this point there is no any concurrent access to sessions
1759 * list and devices list:
1760 * 1. New session or device can't be created - session sysfs files
1761 * are removed.
1762 * 2. Device or session can't be removed - module reference is taken
1763 * into account in unmap device sysfs callback.
1764 * 3. No IO requests inflight - each file open of block_dev increases
1765 * module reference in get_disk().
1766 *
1767 * But still there can be user requests inflights, which are sent by
1768 * asynchronous send_msg_*() functions, thus before unmapping devices
1769 * RTRS session must be explicitly closed.
1770 */
1771
1772 list_for_each_entry_safe(sess, sn, &sess_list, list) {
1773 if (!rnbd_clt_get_sess(sess))
1774 continue;
1775 close_rtrs(sess);
1776 list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
1777 /*
1778 * Here unmap happens in parallel for only one reason:
1779 * blk_cleanup_queue() takes around half a second, so
1780 * on huge amount of devices the whole module unload
1781 * procedure takes minutes.
1782 */
1783 INIT_WORK(&dev->unmap_on_rmmod_work, unmap_device_work);
1784 queue_work(system_long_wq, &dev->unmap_on_rmmod_work);
1785 }
1786 rnbd_clt_put_sess(sess);
1787 }
1788 /* Wait for all scheduled unmap works */
1789 flush_workqueue(system_long_wq);
1790 WARN_ON(!list_empty(&sess_list));
1791 }
1792
rnbd_client_init(void)1793 static int __init rnbd_client_init(void)
1794 {
1795 int err = 0;
1796
1797 BUILD_BUG_ON(sizeof(struct rnbd_msg_hdr) != 4);
1798 BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info) != 36);
1799 BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info_rsp) != 36);
1800 BUILD_BUG_ON(sizeof(struct rnbd_msg_open) != 264);
1801 BUILD_BUG_ON(sizeof(struct rnbd_msg_close) != 8);
1802 BUILD_BUG_ON(sizeof(struct rnbd_msg_open_rsp) != 56);
1803 rnbd_client_major = register_blkdev(rnbd_client_major, "rnbd");
1804 if (rnbd_client_major <= 0) {
1805 pr_err("Failed to load module, block device registration failed\n");
1806 return -EBUSY;
1807 }
1808
1809 err = rnbd_clt_create_sysfs_files();
1810 if (err) {
1811 pr_err("Failed to load module, creating sysfs device files failed, err: %d\n",
1812 err);
1813 unregister_blkdev(rnbd_client_major, "rnbd");
1814 }
1815
1816 return err;
1817 }
1818
rnbd_client_exit(void)1819 static void __exit rnbd_client_exit(void)
1820 {
1821 rnbd_destroy_sessions();
1822 unregister_blkdev(rnbd_client_major, "rnbd");
1823 ida_destroy(&index_ida);
1824 }
1825
1826 module_init(rnbd_client_init);
1827 module_exit(rnbd_client_exit);
1828