1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014 Ezequiel Garcia
4  * Copyright (c) 2011 Free Electrons
5  *
6  * Driver parameter handling strongly based on drivers/mtd/ubi/build.c
7  *   Copyright (c) International Business Machines Corp., 2006
8  *   Copyright (c) Nokia Corporation, 2007
9  *   Authors: Artem Bityutskiy, Frank Haverkamp
10  */
11 
12 /*
13  * Read-only block devices on top of UBI volumes
14  *
15  * A simple implementation to allow a block device to be layered on top of a
16  * UBI volume. The implementation is provided by creating a static 1-to-1
17  * mapping between the block device and the UBI volume.
18  *
19  * The addressed byte is obtained from the addressed block sector, which is
20  * mapped linearly into the corresponding LEB:
21  *
22  *   LEB number = addressed byte / LEB size
23  *
24  * This feature is compiled in the UBI core, and adds a 'block' parameter
25  * to allow early creation of block devices on top of UBI volumes. Runtime
26  * block creation/removal for UBI volumes is provided through two UBI ioctls:
27  * UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK.
28  */
29 
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/err.h>
33 #include <linux/kernel.h>
34 #include <linux/list.h>
35 #include <linux/mutex.h>
36 #include <linux/slab.h>
37 #include <linux/mtd/ubi.h>
38 #include <linux/workqueue.h>
39 #include <linux/blkdev.h>
40 #include <linux/blk-mq.h>
41 #include <linux/hdreg.h>
42 #include <linux/scatterlist.h>
43 #include <linux/idr.h>
44 #include <asm/div64.h>
45 
46 #include "ubi-media.h"
47 #include "ubi.h"
48 
49 /* Maximum number of supported devices */
50 #define UBIBLOCK_MAX_DEVICES 32
51 
52 /* Maximum length of the 'block=' parameter */
53 #define UBIBLOCK_PARAM_LEN 63
54 
55 /* Maximum number of comma-separated items in the 'block=' parameter */
56 #define UBIBLOCK_PARAM_COUNT 2
57 
58 struct ubiblock_param {
59 	int ubi_num;
60 	int vol_id;
61 	char name[UBIBLOCK_PARAM_LEN+1];
62 };
63 
64 struct ubiblock_pdu {
65 	struct work_struct work;
66 	struct ubi_sgl usgl;
67 };
68 
69 /* Numbers of elements set in the @ubiblock_param array */
70 static int ubiblock_devs __initdata;
71 
72 /* MTD devices specification parameters */
73 static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
74 
75 struct ubiblock {
76 	struct ubi_volume_desc *desc;
77 	int ubi_num;
78 	int vol_id;
79 	int refcnt;
80 	int leb_size;
81 
82 	struct gendisk *gd;
83 	struct request_queue *rq;
84 
85 	struct workqueue_struct *wq;
86 
87 	struct mutex dev_mutex;
88 	struct list_head list;
89 	struct blk_mq_tag_set tag_set;
90 };
91 
92 /* Linked list of all ubiblock instances */
93 static LIST_HEAD(ubiblock_devices);
94 static DEFINE_IDR(ubiblock_minor_idr);
95 /* Protects ubiblock_devices and ubiblock_minor_idr */
96 static DEFINE_MUTEX(devices_mutex);
97 static int ubiblock_major;
98 
ubiblock_set_param(const char * val,const struct kernel_param * kp)99 static int __init ubiblock_set_param(const char *val,
100 				     const struct kernel_param *kp)
101 {
102 	int i, ret;
103 	size_t len;
104 	struct ubiblock_param *param;
105 	char buf[UBIBLOCK_PARAM_LEN];
106 	char *pbuf = &buf[0];
107 	char *tokens[UBIBLOCK_PARAM_COUNT];
108 
109 	if (!val)
110 		return -EINVAL;
111 
112 	len = strnlen(val, UBIBLOCK_PARAM_LEN);
113 	if (len == 0) {
114 		pr_warn("UBI: block: empty 'block=' parameter - ignored\n");
115 		return 0;
116 	}
117 
118 	if (len == UBIBLOCK_PARAM_LEN) {
119 		pr_err("UBI: block: parameter \"%s\" is too long, max. is %d\n",
120 		       val, UBIBLOCK_PARAM_LEN);
121 		return -EINVAL;
122 	}
123 
124 	strcpy(buf, val);
125 
126 	/* Get rid of the final newline */
127 	if (buf[len - 1] == '\n')
128 		buf[len - 1] = '\0';
129 
130 	for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
131 		tokens[i] = strsep(&pbuf, ",");
132 
133 	param = &ubiblock_param[ubiblock_devs];
134 	if (tokens[1]) {
135 		/* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */
136 		ret = kstrtoint(tokens[0], 10, &param->ubi_num);
137 		if (ret < 0)
138 			return -EINVAL;
139 
140 		/* Second param can be a number or a name */
141 		ret = kstrtoint(tokens[1], 10, &param->vol_id);
142 		if (ret < 0) {
143 			param->vol_id = -1;
144 			strcpy(param->name, tokens[1]);
145 		}
146 
147 	} else {
148 		/* One parameter: must be device path */
149 		strcpy(param->name, tokens[0]);
150 		param->ubi_num = -1;
151 		param->vol_id = -1;
152 	}
153 
154 	ubiblock_devs++;
155 
156 	return 0;
157 }
158 
159 static const struct kernel_param_ops ubiblock_param_ops = {
160 	.set    = ubiblock_set_param,
161 };
162 module_param_cb(block, &ubiblock_param_ops, NULL, 0);
163 MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
164 			"Multiple \"block\" parameters may be specified.\n"
165 			"UBI volumes may be specified by their number, name, or path to the device node.\n"
166 			"Examples\n"
167 			"Using the UBI volume path:\n"
168 			"ubi.block=/dev/ubi0_0\n"
169 			"Using the UBI device, and the volume name:\n"
170 			"ubi.block=0,rootfs\n"
171 			"Using both UBI device number and UBI volume number:\n"
172 			"ubi.block=0,0\n");
173 
find_dev_nolock(int ubi_num,int vol_id)174 static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
175 {
176 	struct ubiblock *dev;
177 
178 	list_for_each_entry(dev, &ubiblock_devices, list)
179 		if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
180 			return dev;
181 	return NULL;
182 }
183 
ubiblock_read(struct ubiblock_pdu * pdu)184 static int ubiblock_read(struct ubiblock_pdu *pdu)
185 {
186 	int ret, leb, offset, bytes_left, to_read;
187 	u64 pos;
188 	struct request *req = blk_mq_rq_from_pdu(pdu);
189 	struct ubiblock *dev = req->q->queuedata;
190 
191 	to_read = blk_rq_bytes(req);
192 	pos = blk_rq_pos(req) << 9;
193 
194 	/* Get LEB:offset address to read from */
195 	offset = do_div(pos, dev->leb_size);
196 	leb = pos;
197 	bytes_left = to_read;
198 
199 	while (bytes_left) {
200 		/*
201 		 * We can only read one LEB at a time. Therefore if the read
202 		 * length is larger than one LEB size, we split the operation.
203 		 */
204 		if (offset + to_read > dev->leb_size)
205 			to_read = dev->leb_size - offset;
206 
207 		ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
208 		if (ret < 0)
209 			return ret;
210 
211 		bytes_left -= to_read;
212 		to_read = bytes_left;
213 		leb += 1;
214 		offset = 0;
215 	}
216 	return 0;
217 }
218 
ubiblock_open(struct block_device * bdev,fmode_t mode)219 static int ubiblock_open(struct block_device *bdev, fmode_t mode)
220 {
221 	struct ubiblock *dev = bdev->bd_disk->private_data;
222 	int ret;
223 
224 	mutex_lock(&dev->dev_mutex);
225 	if (dev->refcnt > 0) {
226 		/*
227 		 * The volume is already open, just increase the reference
228 		 * counter.
229 		 */
230 		goto out_done;
231 	}
232 
233 	/*
234 	 * We want users to be aware they should only mount us as read-only.
235 	 * It's just a paranoid check, as write requests will get rejected
236 	 * in any case.
237 	 */
238 	if (mode & FMODE_WRITE) {
239 		ret = -EROFS;
240 		goto out_unlock;
241 	}
242 
243 	dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
244 	if (IS_ERR(dev->desc)) {
245 		dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d",
246 			dev->ubi_num, dev->vol_id);
247 		ret = PTR_ERR(dev->desc);
248 		dev->desc = NULL;
249 		goto out_unlock;
250 	}
251 
252 out_done:
253 	dev->refcnt++;
254 	mutex_unlock(&dev->dev_mutex);
255 	return 0;
256 
257 out_unlock:
258 	mutex_unlock(&dev->dev_mutex);
259 	return ret;
260 }
261 
ubiblock_release(struct gendisk * gd,fmode_t mode)262 static void ubiblock_release(struct gendisk *gd, fmode_t mode)
263 {
264 	struct ubiblock *dev = gd->private_data;
265 
266 	mutex_lock(&dev->dev_mutex);
267 	dev->refcnt--;
268 	if (dev->refcnt == 0) {
269 		ubi_close_volume(dev->desc);
270 		dev->desc = NULL;
271 	}
272 	mutex_unlock(&dev->dev_mutex);
273 }
274 
ubiblock_getgeo(struct block_device * bdev,struct hd_geometry * geo)275 static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
276 {
277 	/* Some tools might require this information */
278 	geo->heads = 1;
279 	geo->cylinders = 1;
280 	geo->sectors = get_capacity(bdev->bd_disk);
281 	geo->start = 0;
282 	return 0;
283 }
284 
285 static const struct block_device_operations ubiblock_ops = {
286 	.owner = THIS_MODULE,
287 	.open = ubiblock_open,
288 	.release = ubiblock_release,
289 	.getgeo	= ubiblock_getgeo,
290 };
291 
ubiblock_do_work(struct work_struct * work)292 static void ubiblock_do_work(struct work_struct *work)
293 {
294 	int ret;
295 	struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
296 	struct request *req = blk_mq_rq_from_pdu(pdu);
297 
298 	blk_mq_start_request(req);
299 
300 	/*
301 	 * It is safe to ignore the return value of blk_rq_map_sg() because
302 	 * the number of sg entries is limited to UBI_MAX_SG_COUNT
303 	 * and ubi_read_sg() will check that limit.
304 	 */
305 	blk_rq_map_sg(req->q, req, pdu->usgl.sg);
306 
307 	ret = ubiblock_read(pdu);
308 	rq_flush_dcache_pages(req);
309 
310 	blk_mq_end_request(req, errno_to_blk_status(ret));
311 }
312 
ubiblock_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)313 static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
314 			     const struct blk_mq_queue_data *bd)
315 {
316 	struct request *req = bd->rq;
317 	struct ubiblock *dev = hctx->queue->queuedata;
318 	struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
319 
320 	switch (req_op(req)) {
321 	case REQ_OP_READ:
322 		ubi_sgl_init(&pdu->usgl);
323 		queue_work(dev->wq, &pdu->work);
324 		return BLK_STS_OK;
325 	default:
326 		return BLK_STS_IOERR;
327 	}
328 
329 }
330 
ubiblock_init_request(struct blk_mq_tag_set * set,struct request * req,unsigned int hctx_idx,unsigned int numa_node)331 static int ubiblock_init_request(struct blk_mq_tag_set *set,
332 		struct request *req, unsigned int hctx_idx,
333 		unsigned int numa_node)
334 {
335 	struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
336 
337 	sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
338 	INIT_WORK(&pdu->work, ubiblock_do_work);
339 
340 	return 0;
341 }
342 
343 static const struct blk_mq_ops ubiblock_mq_ops = {
344 	.queue_rq       = ubiblock_queue_rq,
345 	.init_request	= ubiblock_init_request,
346 };
347 
calc_disk_capacity(struct ubi_volume_info * vi,u64 * disk_capacity)348 static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity)
349 {
350 	u64 size = vi->used_bytes >> 9;
351 
352 	if (vi->used_bytes % 512) {
353 		pr_warn("UBI: block: volume size is not a multiple of 512, "
354 			"last %llu bytes are ignored!\n",
355 			vi->used_bytes - (size << 9));
356 	}
357 
358 	if ((sector_t)size != size)
359 		return -EFBIG;
360 
361 	*disk_capacity = size;
362 
363 	return 0;
364 }
365 
ubiblock_create(struct ubi_volume_info * vi)366 int ubiblock_create(struct ubi_volume_info *vi)
367 {
368 	struct ubiblock *dev;
369 	struct gendisk *gd;
370 	u64 disk_capacity;
371 	int ret;
372 
373 	ret = calc_disk_capacity(vi, &disk_capacity);
374 	if (ret) {
375 		return ret;
376 	}
377 
378 	/* Check that the volume isn't already handled */
379 	mutex_lock(&devices_mutex);
380 	if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
381 		ret = -EEXIST;
382 		goto out_unlock;
383 	}
384 
385 	dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
386 	if (!dev) {
387 		ret = -ENOMEM;
388 		goto out_unlock;
389 	}
390 
391 	mutex_init(&dev->dev_mutex);
392 
393 	dev->ubi_num = vi->ubi_num;
394 	dev->vol_id = vi->vol_id;
395 	dev->leb_size = vi->usable_leb_size;
396 
397 	/* Initialize the gendisk of this ubiblock device */
398 	gd = alloc_disk(1);
399 	if (!gd) {
400 		pr_err("UBI: block: alloc_disk failed\n");
401 		ret = -ENODEV;
402 		goto out_free_dev;
403 	}
404 
405 	gd->fops = &ubiblock_ops;
406 	gd->major = ubiblock_major;
407 	gd->first_minor = idr_alloc(&ubiblock_minor_idr, dev, 0, 0, GFP_KERNEL);
408 	if (gd->first_minor < 0) {
409 		dev_err(disk_to_dev(gd),
410 			"block: dynamic minor allocation failed");
411 		ret = -ENODEV;
412 		goto out_put_disk;
413 	}
414 	gd->private_data = dev;
415 	sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
416 	set_capacity(gd, disk_capacity);
417 	dev->gd = gd;
418 
419 	dev->tag_set.ops = &ubiblock_mq_ops;
420 	dev->tag_set.queue_depth = 64;
421 	dev->tag_set.numa_node = NUMA_NO_NODE;
422 	dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
423 	dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
424 	dev->tag_set.driver_data = dev;
425 	dev->tag_set.nr_hw_queues = 1;
426 
427 	ret = blk_mq_alloc_tag_set(&dev->tag_set);
428 	if (ret) {
429 		dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
430 		goto out_remove_minor;
431 	}
432 
433 	dev->rq = blk_mq_init_queue(&dev->tag_set);
434 	if (IS_ERR(dev->rq)) {
435 		dev_err(disk_to_dev(gd), "blk_mq_init_queue failed");
436 		ret = PTR_ERR(dev->rq);
437 		goto out_free_tags;
438 	}
439 	blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
440 
441 	dev->rq->queuedata = dev;
442 	dev->gd->queue = dev->rq;
443 
444 	/*
445 	 * Create one workqueue per volume (per registered block device).
446 	 * Rembember workqueues are cheap, they're not threads.
447 	 */
448 	dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
449 	if (!dev->wq) {
450 		ret = -ENOMEM;
451 		goto out_free_queue;
452 	}
453 
454 	list_add_tail(&dev->list, &ubiblock_devices);
455 
456 	/* Must be the last step: anyone can call file ops from now on */
457 	add_disk(dev->gd);
458 	dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
459 		 dev->ubi_num, dev->vol_id, vi->name);
460 	mutex_unlock(&devices_mutex);
461 	return 0;
462 
463 out_free_queue:
464 	blk_cleanup_queue(dev->rq);
465 out_free_tags:
466 	blk_mq_free_tag_set(&dev->tag_set);
467 out_remove_minor:
468 	idr_remove(&ubiblock_minor_idr, gd->first_minor);
469 out_put_disk:
470 	put_disk(dev->gd);
471 out_free_dev:
472 	kfree(dev);
473 out_unlock:
474 	mutex_unlock(&devices_mutex);
475 
476 	return ret;
477 }
478 
ubiblock_cleanup(struct ubiblock * dev)479 static void ubiblock_cleanup(struct ubiblock *dev)
480 {
481 	/* Stop new requests to arrive */
482 	del_gendisk(dev->gd);
483 	/* Flush pending work */
484 	destroy_workqueue(dev->wq);
485 	/* Finally destroy the blk queue */
486 	blk_cleanup_queue(dev->rq);
487 	blk_mq_free_tag_set(&dev->tag_set);
488 	dev_info(disk_to_dev(dev->gd), "released");
489 	idr_remove(&ubiblock_minor_idr, dev->gd->first_minor);
490 	put_disk(dev->gd);
491 }
492 
ubiblock_remove(struct ubi_volume_info * vi)493 int ubiblock_remove(struct ubi_volume_info *vi)
494 {
495 	struct ubiblock *dev;
496 	int ret;
497 
498 	mutex_lock(&devices_mutex);
499 	dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
500 	if (!dev) {
501 		ret = -ENODEV;
502 		goto out_unlock;
503 	}
504 
505 	/* Found a device, let's lock it so we can check if it's busy */
506 	mutex_lock(&dev->dev_mutex);
507 	if (dev->refcnt > 0) {
508 		ret = -EBUSY;
509 		goto out_unlock_dev;
510 	}
511 
512 	/* Remove from device list */
513 	list_del(&dev->list);
514 	ubiblock_cleanup(dev);
515 	mutex_unlock(&dev->dev_mutex);
516 	mutex_unlock(&devices_mutex);
517 
518 	kfree(dev);
519 	return 0;
520 
521 out_unlock_dev:
522 	mutex_unlock(&dev->dev_mutex);
523 out_unlock:
524 	mutex_unlock(&devices_mutex);
525 	return ret;
526 }
527 
ubiblock_resize(struct ubi_volume_info * vi)528 static int ubiblock_resize(struct ubi_volume_info *vi)
529 {
530 	struct ubiblock *dev;
531 	u64 disk_capacity;
532 	int ret;
533 
534 	/*
535 	 * Need to lock the device list until we stop using the device,
536 	 * otherwise the device struct might get released in
537 	 * 'ubiblock_remove()'.
538 	 */
539 	mutex_lock(&devices_mutex);
540 	dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
541 	if (!dev) {
542 		mutex_unlock(&devices_mutex);
543 		return -ENODEV;
544 	}
545 
546 	ret = calc_disk_capacity(vi, &disk_capacity);
547 	if (ret) {
548 		mutex_unlock(&devices_mutex);
549 		if (ret == -EFBIG) {
550 			dev_warn(disk_to_dev(dev->gd),
551 				 "the volume is too big (%d LEBs), cannot resize",
552 				 vi->size);
553 		}
554 		return ret;
555 	}
556 
557 	mutex_lock(&dev->dev_mutex);
558 
559 	if (get_capacity(dev->gd) != disk_capacity) {
560 		set_capacity(dev->gd, disk_capacity);
561 		dev_info(disk_to_dev(dev->gd), "resized to %lld bytes",
562 			 vi->used_bytes);
563 	}
564 	mutex_unlock(&dev->dev_mutex);
565 	mutex_unlock(&devices_mutex);
566 	return 0;
567 }
568 
ubiblock_notify(struct notifier_block * nb,unsigned long notification_type,void * ns_ptr)569 static int ubiblock_notify(struct notifier_block *nb,
570 			 unsigned long notification_type, void *ns_ptr)
571 {
572 	struct ubi_notification *nt = ns_ptr;
573 
574 	switch (notification_type) {
575 	case UBI_VOLUME_ADDED:
576 		/*
577 		 * We want to enforce explicit block device creation for
578 		 * volumes, so when a volume is added we do nothing.
579 		 */
580 		break;
581 	case UBI_VOLUME_REMOVED:
582 		ubiblock_remove(&nt->vi);
583 		break;
584 	case UBI_VOLUME_RESIZED:
585 		ubiblock_resize(&nt->vi);
586 		break;
587 	case UBI_VOLUME_UPDATED:
588 		/*
589 		 * If the volume is static, a content update might mean the
590 		 * size (i.e. used_bytes) was also changed.
591 		 */
592 		if (nt->vi.vol_type == UBI_STATIC_VOLUME)
593 			ubiblock_resize(&nt->vi);
594 		break;
595 	default:
596 		break;
597 	}
598 	return NOTIFY_OK;
599 }
600 
601 static struct notifier_block ubiblock_notifier = {
602 	.notifier_call = ubiblock_notify,
603 };
604 
605 static struct ubi_volume_desc * __init
open_volume_desc(const char * name,int ubi_num,int vol_id)606 open_volume_desc(const char *name, int ubi_num, int vol_id)
607 {
608 	if (ubi_num == -1)
609 		/* No ubi num, name must be a vol device path */
610 		return ubi_open_volume_path(name, UBI_READONLY);
611 	else if (vol_id == -1)
612 		/* No vol_id, must be vol_name */
613 		return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
614 	else
615 		return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
616 }
617 
ubiblock_create_from_param(void)618 static void __init ubiblock_create_from_param(void)
619 {
620 	int i, ret = 0;
621 	struct ubiblock_param *p;
622 	struct ubi_volume_desc *desc;
623 	struct ubi_volume_info vi;
624 
625 	/*
626 	 * If there is an error creating one of the ubiblocks, continue on to
627 	 * create the following ubiblocks. This helps in a circumstance where
628 	 * the kernel command-line specifies multiple block devices and some
629 	 * may be broken, but we still want the working ones to come up.
630 	 */
631 	for (i = 0; i < ubiblock_devs; i++) {
632 		p = &ubiblock_param[i];
633 
634 		desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
635 		if (IS_ERR(desc)) {
636 			pr_err(
637 			       "UBI: block: can't open volume on ubi%d_%d, err=%ld\n",
638 			       p->ubi_num, p->vol_id, PTR_ERR(desc));
639 			continue;
640 		}
641 
642 		ubi_get_volume_info(desc, &vi);
643 		ubi_close_volume(desc);
644 
645 		ret = ubiblock_create(&vi);
646 		if (ret) {
647 			pr_err(
648 			       "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
649 			       vi.name, p->ubi_num, p->vol_id, ret);
650 			continue;
651 		}
652 	}
653 }
654 
ubiblock_remove_all(void)655 static void ubiblock_remove_all(void)
656 {
657 	struct ubiblock *next;
658 	struct ubiblock *dev;
659 
660 	mutex_lock(&devices_mutex);
661 	list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
662 		/* The module is being forcefully removed */
663 		WARN_ON(dev->desc);
664 		/* Remove from device list */
665 		list_del(&dev->list);
666 		ubiblock_cleanup(dev);
667 		kfree(dev);
668 	}
669 	mutex_unlock(&devices_mutex);
670 }
671 
ubiblock_init(void)672 int __init ubiblock_init(void)
673 {
674 	int ret;
675 
676 	ubiblock_major = register_blkdev(0, "ubiblock");
677 	if (ubiblock_major < 0)
678 		return ubiblock_major;
679 
680 	/*
681 	 * Attach block devices from 'block=' module param.
682 	 * Even if one block device in the param list fails to come up,
683 	 * still allow the module to load and leave any others up.
684 	 */
685 	ubiblock_create_from_param();
686 
687 	/*
688 	 * Block devices are only created upon user requests, so we ignore
689 	 * existing volumes.
690 	 */
691 	ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
692 	if (ret)
693 		goto err_unreg;
694 	return 0;
695 
696 err_unreg:
697 	unregister_blkdev(ubiblock_major, "ubiblock");
698 	ubiblock_remove_all();
699 	return ret;
700 }
701 
ubiblock_exit(void)702 void __exit ubiblock_exit(void)
703 {
704 	ubi_unregister_volume_notifier(&ubiblock_notifier);
705 	ubiblock_remove_all();
706 	unregister_blkdev(ubiblock_major, "ubiblock");
707 }
708