1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * bcache setup/teardown code, and some metadata io - read a superblock and
4 * figure out what to do with it.
5 *
6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7 * Copyright 2012 Google, Inc.
8 */
9
10 #include "bcache.h"
11 #include "btree.h"
12 #include "debug.h"
13 #include "extents.h"
14 #include "request.h"
15 #include "writeback.h"
16 #include "features.h"
17
18 #include <linux/blkdev.h>
19 #include <linux/pagemap.h>
20 #include <linux/debugfs.h>
21 #include <linux/idr.h>
22 #include <linux/kthread.h>
23 #include <linux/workqueue.h>
24 #include <linux/module.h>
25 #include <linux/random.h>
26 #include <linux/reboot.h>
27 #include <linux/sysfs.h>
28
29 unsigned int bch_cutoff_writeback;
30 unsigned int bch_cutoff_writeback_sync;
31
32 static const char bcache_magic[] = {
33 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
34 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
35 };
36
37 static const char invalid_uuid[] = {
38 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
39 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
40 };
41
42 static struct kobject *bcache_kobj;
43 struct mutex bch_register_lock;
44 bool bcache_is_reboot;
45 LIST_HEAD(bch_cache_sets);
46 static LIST_HEAD(uncached_devices);
47
48 static int bcache_major;
49 static DEFINE_IDA(bcache_device_idx);
50 static wait_queue_head_t unregister_wait;
51 struct workqueue_struct *bcache_wq;
52 struct workqueue_struct *bch_flush_wq;
53 struct workqueue_struct *bch_journal_wq;
54
55
56 #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
57 /* limitation of partitions number on single bcache device */
58 #define BCACHE_MINORS 128
59 /* limitation of bcache devices number on single system */
60 #define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS)
61
62 /* Superblock */
63
get_bucket_size(struct cache_sb * sb,struct cache_sb_disk * s)64 static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s)
65 {
66 unsigned int bucket_size = le16_to_cpu(s->bucket_size);
67
68 if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
69 if (bch_has_feature_large_bucket(sb)) {
70 unsigned int max, order;
71
72 max = sizeof(unsigned int) * BITS_PER_BYTE - 1;
73 order = le16_to_cpu(s->bucket_size);
74 /*
75 * bcache tool will make sure the overflow won't
76 * happen, an error message here is enough.
77 */
78 if (order > max)
79 pr_err("Bucket size (1 << %u) overflows\n",
80 order);
81 bucket_size = 1 << order;
82 } else if (bch_has_feature_obso_large_bucket(sb)) {
83 bucket_size +=
84 le16_to_cpu(s->obso_bucket_size_hi) << 16;
85 }
86 }
87
88 return bucket_size;
89 }
90
read_super_common(struct cache_sb * sb,struct block_device * bdev,struct cache_sb_disk * s)91 static const char *read_super_common(struct cache_sb *sb, struct block_device *bdev,
92 struct cache_sb_disk *s)
93 {
94 const char *err;
95 unsigned int i;
96
97 sb->first_bucket= le16_to_cpu(s->first_bucket);
98 sb->nbuckets = le64_to_cpu(s->nbuckets);
99 sb->bucket_size = get_bucket_size(sb, s);
100
101 sb->nr_in_set = le16_to_cpu(s->nr_in_set);
102 sb->nr_this_dev = le16_to_cpu(s->nr_this_dev);
103
104 err = "Too many journal buckets";
105 if (sb->keys > SB_JOURNAL_BUCKETS)
106 goto err;
107
108 err = "Too many buckets";
109 if (sb->nbuckets > LONG_MAX)
110 goto err;
111
112 err = "Not enough buckets";
113 if (sb->nbuckets < 1 << 7)
114 goto err;
115
116 err = "Bad block size (not power of 2)";
117 if (!is_power_of_2(sb->block_size))
118 goto err;
119
120 err = "Bad block size (larger than page size)";
121 if (sb->block_size > PAGE_SECTORS)
122 goto err;
123
124 err = "Bad bucket size (not power of 2)";
125 if (!is_power_of_2(sb->bucket_size))
126 goto err;
127
128 err = "Bad bucket size (smaller than page size)";
129 if (sb->bucket_size < PAGE_SECTORS)
130 goto err;
131
132 err = "Invalid superblock: device too small";
133 if (get_capacity(bdev->bd_disk) <
134 sb->bucket_size * sb->nbuckets)
135 goto err;
136
137 err = "Bad UUID";
138 if (bch_is_zero(sb->set_uuid, 16))
139 goto err;
140
141 err = "Bad cache device number in set";
142 if (!sb->nr_in_set ||
143 sb->nr_in_set <= sb->nr_this_dev ||
144 sb->nr_in_set > MAX_CACHES_PER_SET)
145 goto err;
146
147 err = "Journal buckets not sequential";
148 for (i = 0; i < sb->keys; i++)
149 if (sb->d[i] != sb->first_bucket + i)
150 goto err;
151
152 err = "Too many journal buckets";
153 if (sb->first_bucket + sb->keys > sb->nbuckets)
154 goto err;
155
156 err = "Invalid superblock: first bucket comes before end of super";
157 if (sb->first_bucket * sb->bucket_size < 16)
158 goto err;
159
160 err = NULL;
161 err:
162 return err;
163 }
164
165
read_super(struct cache_sb * sb,struct block_device * bdev,struct cache_sb_disk ** res)166 static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
167 struct cache_sb_disk **res)
168 {
169 const char *err;
170 struct cache_sb_disk *s;
171 struct page *page;
172 unsigned int i;
173
174 page = read_cache_page_gfp(bdev->bd_mapping,
175 SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL);
176 if (IS_ERR(page))
177 return "IO error";
178 s = page_address(page) + offset_in_page(SB_OFFSET);
179
180 sb->offset = le64_to_cpu(s->offset);
181 sb->version = le64_to_cpu(s->version);
182
183 memcpy(sb->magic, s->magic, 16);
184 memcpy(sb->uuid, s->uuid, 16);
185 memcpy(sb->set_uuid, s->set_uuid, 16);
186 memcpy(sb->label, s->label, SB_LABEL_SIZE);
187
188 sb->flags = le64_to_cpu(s->flags);
189 sb->seq = le64_to_cpu(s->seq);
190 sb->last_mount = le32_to_cpu(s->last_mount);
191 sb->keys = le16_to_cpu(s->keys);
192
193 for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
194 sb->d[i] = le64_to_cpu(s->d[i]);
195
196 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u\n",
197 sb->version, sb->flags, sb->seq, sb->keys);
198
199 err = "Not a bcache superblock (bad offset)";
200 if (sb->offset != SB_SECTOR)
201 goto err;
202
203 err = "Not a bcache superblock (bad magic)";
204 if (memcmp(sb->magic, bcache_magic, 16))
205 goto err;
206
207 err = "Bad checksum";
208 if (s->csum != csum_set(s))
209 goto err;
210
211 err = "Bad UUID";
212 if (bch_is_zero(sb->uuid, 16))
213 goto err;
214
215 sb->block_size = le16_to_cpu(s->block_size);
216
217 err = "Superblock block size smaller than device block size";
218 if (sb->block_size << 9 < bdev_logical_block_size(bdev))
219 goto err;
220
221 switch (sb->version) {
222 case BCACHE_SB_VERSION_BDEV:
223 sb->data_offset = BDEV_DATA_START_DEFAULT;
224 break;
225 case BCACHE_SB_VERSION_BDEV_WITH_OFFSET:
226 case BCACHE_SB_VERSION_BDEV_WITH_FEATURES:
227 sb->data_offset = le64_to_cpu(s->data_offset);
228
229 err = "Bad data offset";
230 if (sb->data_offset < BDEV_DATA_START_DEFAULT)
231 goto err;
232
233 break;
234 case BCACHE_SB_VERSION_CDEV:
235 case BCACHE_SB_VERSION_CDEV_WITH_UUID:
236 err = read_super_common(sb, bdev, s);
237 if (err)
238 goto err;
239 break;
240 case BCACHE_SB_VERSION_CDEV_WITH_FEATURES:
241 /*
242 * Feature bits are needed in read_super_common(),
243 * convert them firstly.
244 */
245 sb->feature_compat = le64_to_cpu(s->feature_compat);
246 sb->feature_incompat = le64_to_cpu(s->feature_incompat);
247 sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat);
248
249 /* Check incompatible features */
250 err = "Unsupported compatible feature found";
251 if (bch_has_unknown_compat_features(sb))
252 goto err;
253
254 err = "Unsupported read-only compatible feature found";
255 if (bch_has_unknown_ro_compat_features(sb))
256 goto err;
257
258 err = "Unsupported incompatible feature found";
259 if (bch_has_unknown_incompat_features(sb))
260 goto err;
261
262 err = read_super_common(sb, bdev, s);
263 if (err)
264 goto err;
265 break;
266 default:
267 err = "Unsupported superblock version";
268 goto err;
269 }
270
271 sb->last_mount = (u32)ktime_get_real_seconds();
272 *res = s;
273 return NULL;
274 err:
275 put_page(page);
276 return err;
277 }
278
write_bdev_super_endio(struct bio * bio)279 static void write_bdev_super_endio(struct bio *bio)
280 {
281 struct cached_dev *dc = bio->bi_private;
282
283 if (bio->bi_status)
284 bch_count_backing_io_errors(dc, bio);
285
286 closure_put(&dc->sb_write);
287 }
288
__write_super(struct cache_sb * sb,struct cache_sb_disk * out,struct bio * bio)289 static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
290 struct bio *bio)
291 {
292 unsigned int i;
293
294 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META;
295 bio->bi_iter.bi_sector = SB_SECTOR;
296 __bio_add_page(bio, virt_to_page(out), SB_SIZE,
297 offset_in_page(out));
298
299 out->offset = cpu_to_le64(sb->offset);
300
301 memcpy(out->uuid, sb->uuid, 16);
302 memcpy(out->set_uuid, sb->set_uuid, 16);
303 memcpy(out->label, sb->label, SB_LABEL_SIZE);
304
305 out->flags = cpu_to_le64(sb->flags);
306 out->seq = cpu_to_le64(sb->seq);
307
308 out->last_mount = cpu_to_le32(sb->last_mount);
309 out->first_bucket = cpu_to_le16(sb->first_bucket);
310 out->keys = cpu_to_le16(sb->keys);
311
312 for (i = 0; i < sb->keys; i++)
313 out->d[i] = cpu_to_le64(sb->d[i]);
314
315 if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
316 out->feature_compat = cpu_to_le64(sb->feature_compat);
317 out->feature_incompat = cpu_to_le64(sb->feature_incompat);
318 out->feature_ro_compat = cpu_to_le64(sb->feature_ro_compat);
319 }
320
321 out->version = cpu_to_le64(sb->version);
322 out->csum = csum_set(out);
323
324 pr_debug("ver %llu, flags %llu, seq %llu\n",
325 sb->version, sb->flags, sb->seq);
326
327 submit_bio(bio);
328 }
329
CLOSURE_CALLBACK(bch_write_bdev_super_unlock)330 static CLOSURE_CALLBACK(bch_write_bdev_super_unlock)
331 {
332 closure_type(dc, struct cached_dev, sb_write);
333
334 up(&dc->sb_write_mutex);
335 }
336
bch_write_bdev_super(struct cached_dev * dc,struct closure * parent)337 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
338 {
339 struct closure *cl = &dc->sb_write;
340 struct bio *bio = &dc->sb_bio;
341
342 down(&dc->sb_write_mutex);
343 closure_init(cl, parent);
344
345 bio_init(bio, dc->bdev, dc->sb_bv, 1, 0);
346 bio->bi_end_io = write_bdev_super_endio;
347 bio->bi_private = dc;
348
349 closure_get(cl);
350 /* I/O request sent to backing device */
351 __write_super(&dc->sb, dc->sb_disk, bio);
352
353 closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
354 }
355
write_super_endio(struct bio * bio)356 static void write_super_endio(struct bio *bio)
357 {
358 struct cache *ca = bio->bi_private;
359
360 /* is_read = 0 */
361 bch_count_io_errors(ca, bio->bi_status, 0,
362 "writing superblock");
363 closure_put(&ca->set->sb_write);
364 }
365
CLOSURE_CALLBACK(bcache_write_super_unlock)366 static CLOSURE_CALLBACK(bcache_write_super_unlock)
367 {
368 closure_type(c, struct cache_set, sb_write);
369
370 up(&c->sb_write_mutex);
371 }
372
bcache_write_super(struct cache_set * c)373 void bcache_write_super(struct cache_set *c)
374 {
375 struct closure *cl = &c->sb_write;
376 struct cache *ca = c->cache;
377 struct bio *bio = &ca->sb_bio;
378 unsigned int version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
379
380 down(&c->sb_write_mutex);
381 closure_init(cl, &c->cl);
382
383 ca->sb.seq++;
384
385 if (ca->sb.version < version)
386 ca->sb.version = version;
387
388 bio_init(bio, ca->bdev, ca->sb_bv, 1, 0);
389 bio->bi_end_io = write_super_endio;
390 bio->bi_private = ca;
391
392 closure_get(cl);
393 __write_super(&ca->sb, ca->sb_disk, bio);
394
395 closure_return_with_destructor(cl, bcache_write_super_unlock);
396 }
397
398 /* UUID io */
399
uuid_endio(struct bio * bio)400 static void uuid_endio(struct bio *bio)
401 {
402 struct closure *cl = bio->bi_private;
403 struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
404
405 cache_set_err_on(bio->bi_status, c, "accessing uuids");
406 bch_bbio_free(bio, c);
407 closure_put(cl);
408 }
409
CLOSURE_CALLBACK(uuid_io_unlock)410 static CLOSURE_CALLBACK(uuid_io_unlock)
411 {
412 closure_type(c, struct cache_set, uuid_write);
413
414 up(&c->uuid_write_mutex);
415 }
416
uuid_io(struct cache_set * c,blk_opf_t opf,struct bkey * k,struct closure * parent)417 static void uuid_io(struct cache_set *c, blk_opf_t opf, struct bkey *k,
418 struct closure *parent)
419 {
420 struct closure *cl = &c->uuid_write;
421 struct uuid_entry *u;
422 unsigned int i;
423 char buf[80];
424
425 BUG_ON(!parent);
426 down(&c->uuid_write_mutex);
427 closure_init(cl, parent);
428
429 for (i = 0; i < KEY_PTRS(k); i++) {
430 struct bio *bio = bch_bbio_alloc(c);
431
432 bio->bi_opf = opf | REQ_SYNC | REQ_META;
433 bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
434
435 bio->bi_end_io = uuid_endio;
436 bio->bi_private = cl;
437 bch_bio_map(bio, c->uuids);
438
439 bch_submit_bbio(bio, c, k, i);
440
441 if ((opf & REQ_OP_MASK) != REQ_OP_WRITE)
442 break;
443 }
444
445 bch_extent_to_text(buf, sizeof(buf), k);
446 pr_debug("%s UUIDs at %s\n", (opf & REQ_OP_MASK) == REQ_OP_WRITE ?
447 "wrote" : "read", buf);
448
449 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
450 if (!bch_is_zero(u->uuid, 16))
451 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u\n",
452 u - c->uuids, u->uuid, u->label,
453 u->first_reg, u->last_reg, u->invalidated);
454
455 closure_return_with_destructor(cl, uuid_io_unlock);
456 }
457
uuid_read(struct cache_set * c,struct jset * j,struct closure * cl)458 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
459 {
460 struct bkey *k = &j->uuid_bucket;
461
462 if (__bch_btree_ptr_invalid(c, k))
463 return "bad uuid pointer";
464
465 bkey_copy(&c->uuid_bucket, k);
466 uuid_io(c, REQ_OP_READ, k, cl);
467
468 if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
469 struct uuid_entry_v0 *u0 = (void *) c->uuids;
470 struct uuid_entry *u1 = (void *) c->uuids;
471 int i;
472
473 closure_sync(cl);
474
475 /*
476 * Since the new uuid entry is bigger than the old, we have to
477 * convert starting at the highest memory address and work down
478 * in order to do it in place
479 */
480
481 for (i = c->nr_uuids - 1;
482 i >= 0;
483 --i) {
484 memcpy(u1[i].uuid, u0[i].uuid, 16);
485 memcpy(u1[i].label, u0[i].label, 32);
486
487 u1[i].first_reg = u0[i].first_reg;
488 u1[i].last_reg = u0[i].last_reg;
489 u1[i].invalidated = u0[i].invalidated;
490
491 u1[i].flags = 0;
492 u1[i].sectors = 0;
493 }
494 }
495
496 return NULL;
497 }
498
__uuid_write(struct cache_set * c)499 static int __uuid_write(struct cache_set *c)
500 {
501 BKEY_PADDED(key) k;
502 struct closure cl;
503 struct cache *ca = c->cache;
504 unsigned int size;
505
506 closure_init_stack(&cl);
507 lockdep_assert_held(&bch_register_lock);
508
509 if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
510 return 1;
511
512 size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS;
513 SET_KEY_SIZE(&k.key, size);
514 uuid_io(c, REQ_OP_WRITE, &k.key, &cl);
515 closure_sync(&cl);
516
517 /* Only one bucket used for uuid write */
518 atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written);
519
520 bkey_copy(&c->uuid_bucket, &k.key);
521 bkey_put(c, &k.key);
522 return 0;
523 }
524
bch_uuid_write(struct cache_set * c)525 int bch_uuid_write(struct cache_set *c)
526 {
527 int ret = __uuid_write(c);
528
529 if (!ret)
530 bch_journal_meta(c, NULL);
531
532 return ret;
533 }
534
uuid_find(struct cache_set * c,const char * uuid)535 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
536 {
537 struct uuid_entry *u;
538
539 for (u = c->uuids;
540 u < c->uuids + c->nr_uuids; u++)
541 if (!memcmp(u->uuid, uuid, 16))
542 return u;
543
544 return NULL;
545 }
546
uuid_find_empty(struct cache_set * c)547 static struct uuid_entry *uuid_find_empty(struct cache_set *c)
548 {
549 static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
550
551 return uuid_find(c, zero_uuid);
552 }
553
554 /*
555 * Bucket priorities/gens:
556 *
557 * For each bucket, we store on disk its
558 * 8 bit gen
559 * 16 bit priority
560 *
561 * See alloc.c for an explanation of the gen. The priority is used to implement
562 * lru (and in the future other) cache replacement policies; for most purposes
563 * it's just an opaque integer.
564 *
565 * The gens and the priorities don't have a whole lot to do with each other, and
566 * it's actually the gens that must be written out at specific times - it's no
567 * big deal if the priorities don't get written, if we lose them we just reuse
568 * buckets in suboptimal order.
569 *
570 * On disk they're stored in a packed array, and in as many buckets are required
571 * to fit them all. The buckets we use to store them form a list; the journal
572 * header points to the first bucket, the first bucket points to the second
573 * bucket, et cetera.
574 *
575 * This code is used by the allocation code; periodically (whenever it runs out
576 * of buckets to allocate from) the allocation code will invalidate some
577 * buckets, but it can't use those buckets until their new gens are safely on
578 * disk.
579 */
580
prio_endio(struct bio * bio)581 static void prio_endio(struct bio *bio)
582 {
583 struct cache *ca = bio->bi_private;
584
585 cache_set_err_on(bio->bi_status, ca->set, "accessing priorities");
586 bch_bbio_free(bio, ca->set);
587 closure_put(&ca->prio);
588 }
589
prio_io(struct cache * ca,uint64_t bucket,blk_opf_t opf)590 static void prio_io(struct cache *ca, uint64_t bucket, blk_opf_t opf)
591 {
592 struct closure *cl = &ca->prio;
593 struct bio *bio = bch_bbio_alloc(ca->set);
594
595 closure_init_stack(cl);
596
597 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
598 bio_set_dev(bio, ca->bdev);
599 bio->bi_iter.bi_size = meta_bucket_bytes(&ca->sb);
600
601 bio->bi_end_io = prio_endio;
602 bio->bi_private = ca;
603 bio->bi_opf = opf | REQ_SYNC | REQ_META;
604 bch_bio_map(bio, ca->disk_buckets);
605
606 closure_bio_submit(ca->set, bio, &ca->prio);
607 closure_sync(cl);
608 }
609
bch_prio_write(struct cache * ca,bool wait)610 int bch_prio_write(struct cache *ca, bool wait)
611 {
612 int i;
613 struct bucket *b;
614 struct closure cl;
615
616 pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu\n",
617 fifo_used(&ca->free[RESERVE_PRIO]),
618 fifo_used(&ca->free[RESERVE_NONE]),
619 fifo_used(&ca->free_inc));
620
621 /*
622 * Pre-check if there are enough free buckets. In the non-blocking
623 * scenario it's better to fail early rather than starting to allocate
624 * buckets and do a cleanup later in case of failure.
625 */
626 if (!wait) {
627 size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) +
628 fifo_used(&ca->free[RESERVE_NONE]);
629 if (prio_buckets(ca) > avail)
630 return -ENOMEM;
631 }
632
633 closure_init_stack(&cl);
634
635 lockdep_assert_held(&ca->set->bucket_lock);
636
637 ca->disk_buckets->seq++;
638
639 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
640 &ca->meta_sectors_written);
641
642 for (i = prio_buckets(ca) - 1; i >= 0; --i) {
643 long bucket;
644 struct prio_set *p = ca->disk_buckets;
645 struct bucket_disk *d = p->data;
646 struct bucket_disk *end = d + prios_per_bucket(ca);
647
648 for (b = ca->buckets + i * prios_per_bucket(ca);
649 b < ca->buckets + ca->sb.nbuckets && d < end;
650 b++, d++) {
651 d->prio = cpu_to_le16(b->prio);
652 d->gen = b->gen;
653 }
654
655 p->next_bucket = ca->prio_buckets[i + 1];
656 p->magic = pset_magic(&ca->sb);
657 p->csum = bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8);
658
659 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait);
660 BUG_ON(bucket == -1);
661
662 mutex_unlock(&ca->set->bucket_lock);
663 prio_io(ca, bucket, REQ_OP_WRITE);
664 mutex_lock(&ca->set->bucket_lock);
665
666 ca->prio_buckets[i] = bucket;
667 atomic_dec_bug(&ca->buckets[bucket].pin);
668 }
669
670 mutex_unlock(&ca->set->bucket_lock);
671
672 bch_journal_meta(ca->set, &cl);
673 closure_sync(&cl);
674
675 mutex_lock(&ca->set->bucket_lock);
676
677 /*
678 * Don't want the old priorities to get garbage collected until after we
679 * finish writing the new ones, and they're journalled
680 */
681 for (i = 0; i < prio_buckets(ca); i++) {
682 if (ca->prio_last_buckets[i])
683 __bch_bucket_free(ca,
684 &ca->buckets[ca->prio_last_buckets[i]]);
685
686 ca->prio_last_buckets[i] = ca->prio_buckets[i];
687 }
688 return 0;
689 }
690
prio_read(struct cache * ca,uint64_t bucket)691 static int prio_read(struct cache *ca, uint64_t bucket)
692 {
693 struct prio_set *p = ca->disk_buckets;
694 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
695 struct bucket *b;
696 unsigned int bucket_nr = 0;
697 int ret = -EIO;
698
699 for (b = ca->buckets;
700 b < ca->buckets + ca->sb.nbuckets;
701 b++, d++) {
702 if (d == end) {
703 ca->prio_buckets[bucket_nr] = bucket;
704 ca->prio_last_buckets[bucket_nr] = bucket;
705 bucket_nr++;
706
707 prio_io(ca, bucket, REQ_OP_READ);
708
709 if (p->csum !=
710 bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8)) {
711 pr_warn("bad csum reading priorities\n");
712 goto out;
713 }
714
715 if (p->magic != pset_magic(&ca->sb)) {
716 pr_warn("bad magic reading priorities\n");
717 goto out;
718 }
719
720 bucket = p->next_bucket;
721 d = p->data;
722 }
723
724 b->prio = le16_to_cpu(d->prio);
725 b->gen = b->last_gc = d->gen;
726 }
727
728 ret = 0;
729 out:
730 return ret;
731 }
732
733 /* Bcache device */
734
open_dev(struct gendisk * disk,blk_mode_t mode)735 static int open_dev(struct gendisk *disk, blk_mode_t mode)
736 {
737 struct bcache_device *d = disk->private_data;
738
739 if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
740 return -ENXIO;
741
742 closure_get(&d->cl);
743 return 0;
744 }
745
release_dev(struct gendisk * b)746 static void release_dev(struct gendisk *b)
747 {
748 struct bcache_device *d = b->private_data;
749
750 closure_put(&d->cl);
751 }
752
ioctl_dev(struct block_device * b,blk_mode_t mode,unsigned int cmd,unsigned long arg)753 static int ioctl_dev(struct block_device *b, blk_mode_t mode,
754 unsigned int cmd, unsigned long arg)
755 {
756 struct bcache_device *d = b->bd_disk->private_data;
757
758 return d->ioctl(d, mode, cmd, arg);
759 }
760
761 static const struct block_device_operations bcache_cached_ops = {
762 .submit_bio = cached_dev_submit_bio,
763 .open = open_dev,
764 .release = release_dev,
765 .ioctl = ioctl_dev,
766 .owner = THIS_MODULE,
767 };
768
769 static const struct block_device_operations bcache_flash_ops = {
770 .submit_bio = flash_dev_submit_bio,
771 .open = open_dev,
772 .release = release_dev,
773 .ioctl = ioctl_dev,
774 .owner = THIS_MODULE,
775 };
776
bcache_device_stop(struct bcache_device * d)777 void bcache_device_stop(struct bcache_device *d)
778 {
779 if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
780 /*
781 * closure_fn set to
782 * - cached device: cached_dev_flush()
783 * - flash dev: flash_dev_flush()
784 */
785 closure_queue(&d->cl);
786 }
787
bcache_device_unlink(struct bcache_device * d)788 static void bcache_device_unlink(struct bcache_device *d)
789 {
790 lockdep_assert_held(&bch_register_lock);
791
792 if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
793 struct cache *ca = d->c->cache;
794
795 sysfs_remove_link(&d->c->kobj, d->name);
796 sysfs_remove_link(&d->kobj, "cache");
797
798 bd_unlink_disk_holder(ca->bdev, d->disk);
799 }
800 }
801
bcache_device_link(struct bcache_device * d,struct cache_set * c,const char * name)802 static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
803 const char *name)
804 {
805 struct cache *ca = c->cache;
806 int ret;
807
808 bd_link_disk_holder(ca->bdev, d->disk);
809
810 snprintf(d->name, BCACHEDEVNAME_SIZE,
811 "%s%u", name, d->id);
812
813 ret = sysfs_create_link(&d->kobj, &c->kobj, "cache");
814 if (ret < 0)
815 pr_err("Couldn't create device -> cache set symlink\n");
816
817 ret = sysfs_create_link(&c->kobj, &d->kobj, d->name);
818 if (ret < 0)
819 pr_err("Couldn't create cache set -> device symlink\n");
820
821 clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
822 }
823
bcache_device_detach(struct bcache_device * d)824 static void bcache_device_detach(struct bcache_device *d)
825 {
826 lockdep_assert_held(&bch_register_lock);
827
828 atomic_dec(&d->c->attached_dev_nr);
829
830 if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
831 struct uuid_entry *u = d->c->uuids + d->id;
832
833 SET_UUID_FLASH_ONLY(u, 0);
834 memcpy(u->uuid, invalid_uuid, 16);
835 u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds());
836 bch_uuid_write(d->c);
837 }
838
839 bcache_device_unlink(d);
840
841 d->c->devices[d->id] = NULL;
842 closure_put(&d->c->caching);
843 d->c = NULL;
844 }
845
bcache_device_attach(struct bcache_device * d,struct cache_set * c,unsigned int id)846 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
847 unsigned int id)
848 {
849 d->id = id;
850 d->c = c;
851 c->devices[id] = d;
852
853 if (id >= c->devices_max_used)
854 c->devices_max_used = id + 1;
855
856 closure_get(&c->caching);
857 }
858
first_minor_to_idx(int first_minor)859 static inline int first_minor_to_idx(int first_minor)
860 {
861 return (first_minor/BCACHE_MINORS);
862 }
863
idx_to_first_minor(int idx)864 static inline int idx_to_first_minor(int idx)
865 {
866 return (idx * BCACHE_MINORS);
867 }
868
bcache_device_free(struct bcache_device * d)869 static void bcache_device_free(struct bcache_device *d)
870 {
871 struct gendisk *disk = d->disk;
872
873 lockdep_assert_held(&bch_register_lock);
874
875 if (disk)
876 pr_info("%s stopped\n", disk->disk_name);
877 else
878 pr_err("bcache device (NULL gendisk) stopped\n");
879
880 if (d->c)
881 bcache_device_detach(d);
882
883 if (disk) {
884 ida_free(&bcache_device_idx,
885 first_minor_to_idx(disk->first_minor));
886 put_disk(disk);
887 }
888
889 bioset_exit(&d->bio_split);
890 kvfree(d->full_dirty_stripes);
891 kvfree(d->stripe_sectors_dirty);
892
893 closure_debug_destroy(&d->cl);
894 }
895
bcache_device_init(struct bcache_device * d,unsigned int block_size,sector_t sectors,struct block_device * cached_bdev,const struct block_device_operations * ops)896 static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
897 sector_t sectors, struct block_device *cached_bdev,
898 const struct block_device_operations *ops)
899 {
900 struct request_queue *q;
901 const size_t max_stripes = min_t(size_t, INT_MAX,
902 SIZE_MAX / sizeof(atomic_t));
903 struct queue_limits lim = {
904 .max_hw_sectors = UINT_MAX,
905 .max_sectors = UINT_MAX,
906 .max_segment_size = UINT_MAX,
907 .max_segments = BIO_MAX_VECS,
908 .max_hw_discard_sectors = UINT_MAX,
909 .io_min = block_size,
910 .logical_block_size = block_size,
911 .physical_block_size = block_size,
912 };
913 uint64_t n;
914 int idx;
915
916 if (cached_bdev) {
917 d->stripe_size = bdev_io_opt(cached_bdev) >> SECTOR_SHIFT;
918 lim.io_opt = umax(block_size, bdev_io_opt(cached_bdev));
919 }
920 if (!d->stripe_size)
921 d->stripe_size = 1 << 31;
922 else if (d->stripe_size < BCH_MIN_STRIPE_SZ)
923 d->stripe_size = roundup(BCH_MIN_STRIPE_SZ, d->stripe_size);
924
925 n = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
926 if (!n || n > max_stripes) {
927 pr_err("nr_stripes too large or invalid: %llu (start sector beyond end of disk?)\n",
928 n);
929 return -ENOMEM;
930 }
931 d->nr_stripes = n;
932
933 n = d->nr_stripes * sizeof(atomic_t);
934 d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL);
935 if (!d->stripe_sectors_dirty)
936 return -ENOMEM;
937
938 n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
939 d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
940 if (!d->full_dirty_stripes)
941 goto out_free_stripe_sectors_dirty;
942
943 idx = ida_alloc_max(&bcache_device_idx, BCACHE_DEVICE_IDX_MAX - 1,
944 GFP_KERNEL);
945 if (idx < 0)
946 goto out_free_full_dirty_stripes;
947
948 if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
949 BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
950 goto out_ida_remove;
951
952 if (lim.logical_block_size > PAGE_SIZE && cached_bdev) {
953 /*
954 * This should only happen with BCACHE_SB_VERSION_BDEV.
955 * Block/page size is checked for BCACHE_SB_VERSION_CDEV.
956 */
957 pr_info("bcache%i: sb/logical block size (%u) greater than page size (%lu) falling back to device logical block size (%u)\n",
958 idx, lim.logical_block_size,
959 PAGE_SIZE, bdev_logical_block_size(cached_bdev));
960
961 /* This also adjusts physical block size/min io size if needed */
962 lim.logical_block_size = bdev_logical_block_size(cached_bdev);
963 }
964
965 d->disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
966 if (IS_ERR(d->disk))
967 goto out_bioset_exit;
968
969 set_capacity(d->disk, sectors);
970 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
971
972 d->disk->major = bcache_major;
973 d->disk->first_minor = idx_to_first_minor(idx);
974 d->disk->minors = BCACHE_MINORS;
975 d->disk->fops = ops;
976 d->disk->private_data = d;
977
978 q = d->disk->queue;
979
980 blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
981
982 blk_queue_write_cache(q, true, true);
983
984 return 0;
985
986 out_bioset_exit:
987 bioset_exit(&d->bio_split);
988 out_ida_remove:
989 ida_free(&bcache_device_idx, idx);
990 out_free_full_dirty_stripes:
991 kvfree(d->full_dirty_stripes);
992 out_free_stripe_sectors_dirty:
993 kvfree(d->stripe_sectors_dirty);
994 return -ENOMEM;
995
996 }
997
998 /* Cached device */
999
calc_cached_dev_sectors(struct cache_set * c)1000 static void calc_cached_dev_sectors(struct cache_set *c)
1001 {
1002 uint64_t sectors = 0;
1003 struct cached_dev *dc;
1004
1005 list_for_each_entry(dc, &c->cached_devs, list)
1006 sectors += bdev_nr_sectors(dc->bdev);
1007
1008 c->cached_dev_sectors = sectors;
1009 }
1010
1011 #define BACKING_DEV_OFFLINE_TIMEOUT 5
cached_dev_status_update(void * arg)1012 static int cached_dev_status_update(void *arg)
1013 {
1014 struct cached_dev *dc = arg;
1015 struct request_queue *q;
1016
1017 /*
1018 * If this delayed worker is stopping outside, directly quit here.
1019 * dc->io_disable might be set via sysfs interface, so check it
1020 * here too.
1021 */
1022 while (!kthread_should_stop() && !dc->io_disable) {
1023 q = bdev_get_queue(dc->bdev);
1024 if (blk_queue_dying(q))
1025 dc->offline_seconds++;
1026 else
1027 dc->offline_seconds = 0;
1028
1029 if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) {
1030 pr_err("%pg: device offline for %d seconds\n",
1031 dc->bdev,
1032 BACKING_DEV_OFFLINE_TIMEOUT);
1033 pr_err("%s: disable I/O request due to backing device offline\n",
1034 dc->disk.name);
1035 dc->io_disable = true;
1036 /* let others know earlier that io_disable is true */
1037 smp_mb();
1038 bcache_device_stop(&dc->disk);
1039 break;
1040 }
1041 schedule_timeout_interruptible(HZ);
1042 }
1043
1044 wait_for_kthread_stop();
1045 return 0;
1046 }
1047
1048
bch_cached_dev_run(struct cached_dev * dc)1049 int bch_cached_dev_run(struct cached_dev *dc)
1050 {
1051 int ret = 0;
1052 struct bcache_device *d = &dc->disk;
1053 char *buf = kmemdup_nul(dc->sb.label, SB_LABEL_SIZE, GFP_KERNEL);
1054 char *env[] = {
1055 "DRIVER=bcache",
1056 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid),
1057 kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf ? : ""),
1058 NULL,
1059 };
1060
1061 if (dc->io_disable) {
1062 pr_err("I/O disabled on cached dev %pg\n", dc->bdev);
1063 ret = -EIO;
1064 goto out;
1065 }
1066
1067 if (atomic_xchg(&dc->running, 1)) {
1068 pr_info("cached dev %pg is running already\n", dc->bdev);
1069 ret = -EBUSY;
1070 goto out;
1071 }
1072
1073 if (!d->c &&
1074 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
1075 struct closure cl;
1076
1077 closure_init_stack(&cl);
1078
1079 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
1080 bch_write_bdev_super(dc, &cl);
1081 closure_sync(&cl);
1082 }
1083
1084 ret = add_disk(d->disk);
1085 if (ret)
1086 goto out;
1087 bd_link_disk_holder(dc->bdev, dc->disk.disk);
1088 /*
1089 * won't show up in the uevent file, use udevadm monitor -e instead
1090 * only class / kset properties are persistent
1091 */
1092 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
1093
1094 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
1095 sysfs_create_link(&disk_to_dev(d->disk)->kobj,
1096 &d->kobj, "bcache")) {
1097 pr_err("Couldn't create bcache dev <-> disk sysfs symlinks\n");
1098 ret = -ENOMEM;
1099 goto out;
1100 }
1101
1102 dc->status_update_thread = kthread_run(cached_dev_status_update,
1103 dc, "bcache_status_update");
1104 if (IS_ERR(dc->status_update_thread)) {
1105 pr_warn("failed to create bcache_status_update kthread, continue to run without monitoring backing device status\n");
1106 }
1107
1108 out:
1109 kfree(env[1]);
1110 kfree(env[2]);
1111 kfree(buf);
1112 return ret;
1113 }
1114
1115 /*
1116 * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed
1117 * work dc->writeback_rate_update is running. Wait until the routine
1118 * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to
1119 * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out
1120 * seconds, give up waiting here and continue to cancel it too.
1121 */
cancel_writeback_rate_update_dwork(struct cached_dev * dc)1122 static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
1123 {
1124 int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ;
1125
1126 do {
1127 if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING,
1128 &dc->disk.flags))
1129 break;
1130 time_out--;
1131 schedule_timeout_interruptible(1);
1132 } while (time_out > 0);
1133
1134 if (time_out == 0)
1135 pr_warn("give up waiting for dc->writeback_write_update to quit\n");
1136
1137 cancel_delayed_work_sync(&dc->writeback_rate_update);
1138 }
1139
cached_dev_detach_finish(struct work_struct * w)1140 static void cached_dev_detach_finish(struct work_struct *w)
1141 {
1142 struct cached_dev *dc = container_of(w, struct cached_dev, detach);
1143 struct cache_set *c = dc->disk.c;
1144
1145 BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
1146 BUG_ON(refcount_read(&dc->count));
1147
1148
1149 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
1150 cancel_writeback_rate_update_dwork(dc);
1151
1152 if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
1153 kthread_stop(dc->writeback_thread);
1154 dc->writeback_thread = NULL;
1155 }
1156
1157 mutex_lock(&bch_register_lock);
1158
1159 bcache_device_detach(&dc->disk);
1160 list_move(&dc->list, &uncached_devices);
1161 calc_cached_dev_sectors(c);
1162
1163 clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
1164 clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
1165
1166 mutex_unlock(&bch_register_lock);
1167
1168 pr_info("Caching disabled for %pg\n", dc->bdev);
1169
1170 /* Drop ref we took in cached_dev_detach() */
1171 closure_put(&dc->disk.cl);
1172 }
1173
bch_cached_dev_detach(struct cached_dev * dc)1174 void bch_cached_dev_detach(struct cached_dev *dc)
1175 {
1176 lockdep_assert_held(&bch_register_lock);
1177
1178 if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
1179 return;
1180
1181 if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
1182 return;
1183
1184 /*
1185 * Block the device from being closed and freed until we're finished
1186 * detaching
1187 */
1188 closure_get(&dc->disk.cl);
1189
1190 bch_writeback_queue(dc);
1191
1192 cached_dev_put(dc);
1193 }
1194
bch_cached_dev_attach(struct cached_dev * dc,struct cache_set * c,uint8_t * set_uuid)1195 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
1196 uint8_t *set_uuid)
1197 {
1198 uint32_t rtime = cpu_to_le32((u32)ktime_get_real_seconds());
1199 struct uuid_entry *u;
1200 struct cached_dev *exist_dc, *t;
1201 int ret = 0;
1202
1203 if ((set_uuid && memcmp(set_uuid, c->set_uuid, 16)) ||
1204 (!set_uuid && memcmp(dc->sb.set_uuid, c->set_uuid, 16)))
1205 return -ENOENT;
1206
1207 if (dc->disk.c) {
1208 pr_err("Can't attach %pg: already attached\n", dc->bdev);
1209 return -EINVAL;
1210 }
1211
1212 if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
1213 pr_err("Can't attach %pg: shutting down\n", dc->bdev);
1214 return -EINVAL;
1215 }
1216
1217 if (dc->sb.block_size < c->cache->sb.block_size) {
1218 /* Will die */
1219 pr_err("Couldn't attach %pg: block size less than set's block size\n",
1220 dc->bdev);
1221 return -EINVAL;
1222 }
1223
1224 /* Check whether already attached */
1225 list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
1226 if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
1227 pr_err("Tried to attach %pg but duplicate UUID already attached\n",
1228 dc->bdev);
1229
1230 return -EINVAL;
1231 }
1232 }
1233
1234 u = uuid_find(c, dc->sb.uuid);
1235
1236 if (u &&
1237 (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE ||
1238 BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) {
1239 memcpy(u->uuid, invalid_uuid, 16);
1240 u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds());
1241 u = NULL;
1242 }
1243
1244 if (!u) {
1245 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
1246 pr_err("Couldn't find uuid for %pg in set\n", dc->bdev);
1247 return -ENOENT;
1248 }
1249
1250 u = uuid_find_empty(c);
1251 if (!u) {
1252 pr_err("Not caching %pg, no room for UUID\n", dc->bdev);
1253 return -EINVAL;
1254 }
1255 }
1256
1257 /*
1258 * Deadlocks since we're called via sysfs...
1259 * sysfs_remove_file(&dc->kobj, &sysfs_attach);
1260 */
1261
1262 if (bch_is_zero(u->uuid, 16)) {
1263 struct closure cl;
1264
1265 closure_init_stack(&cl);
1266
1267 memcpy(u->uuid, dc->sb.uuid, 16);
1268 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE);
1269 u->first_reg = u->last_reg = rtime;
1270 bch_uuid_write(c);
1271
1272 memcpy(dc->sb.set_uuid, c->set_uuid, 16);
1273 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
1274
1275 bch_write_bdev_super(dc, &cl);
1276 closure_sync(&cl);
1277 } else {
1278 u->last_reg = rtime;
1279 bch_uuid_write(c);
1280 }
1281
1282 bcache_device_attach(&dc->disk, c, u - c->uuids);
1283 list_move(&dc->list, &c->cached_devs);
1284 calc_cached_dev_sectors(c);
1285
1286 /*
1287 * dc->c must be set before dc->count != 0 - paired with the mb in
1288 * cached_dev_get()
1289 */
1290 smp_wmb();
1291 refcount_set(&dc->count, 1);
1292
1293 /* Block writeback thread, but spawn it */
1294 down_write(&dc->writeback_lock);
1295 if (bch_cached_dev_writeback_start(dc)) {
1296 up_write(&dc->writeback_lock);
1297 pr_err("Couldn't start writeback facilities for %s\n",
1298 dc->disk.disk->disk_name);
1299 return -ENOMEM;
1300 }
1301
1302 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
1303 atomic_set(&dc->has_dirty, 1);
1304 bch_writeback_queue(dc);
1305 }
1306
1307 bch_sectors_dirty_init(&dc->disk);
1308
1309 ret = bch_cached_dev_run(dc);
1310 if (ret && (ret != -EBUSY)) {
1311 up_write(&dc->writeback_lock);
1312 /*
1313 * bch_register_lock is held, bcache_device_stop() is not
1314 * able to be directly called. The kthread and kworker
1315 * created previously in bch_cached_dev_writeback_start()
1316 * have to be stopped manually here.
1317 */
1318 kthread_stop(dc->writeback_thread);
1319 cancel_writeback_rate_update_dwork(dc);
1320 pr_err("Couldn't run cached device %pg\n", dc->bdev);
1321 return ret;
1322 }
1323
1324 bcache_device_link(&dc->disk, c, "bdev");
1325 atomic_inc(&c->attached_dev_nr);
1326
1327 if (bch_has_feature_obso_large_bucket(&(c->cache->sb))) {
1328 pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
1329 pr_err("Please update to the latest bcache-tools to create the cache device\n");
1330 set_disk_ro(dc->disk.disk, 1);
1331 }
1332
1333 /* Allow the writeback thread to proceed */
1334 up_write(&dc->writeback_lock);
1335
1336 pr_info("Caching %pg as %s on set %pU\n",
1337 dc->bdev,
1338 dc->disk.disk->disk_name,
1339 dc->disk.c->set_uuid);
1340 return 0;
1341 }
1342
1343 /* when dc->disk.kobj released */
bch_cached_dev_release(struct kobject * kobj)1344 void bch_cached_dev_release(struct kobject *kobj)
1345 {
1346 struct cached_dev *dc = container_of(kobj, struct cached_dev,
1347 disk.kobj);
1348 kfree(dc);
1349 module_put(THIS_MODULE);
1350 }
1351
CLOSURE_CALLBACK(cached_dev_free)1352 static CLOSURE_CALLBACK(cached_dev_free)
1353 {
1354 closure_type(dc, struct cached_dev, disk.cl);
1355
1356 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
1357 cancel_writeback_rate_update_dwork(dc);
1358
1359 if (!IS_ERR_OR_NULL(dc->writeback_thread))
1360 kthread_stop(dc->writeback_thread);
1361 if (!IS_ERR_OR_NULL(dc->status_update_thread))
1362 kthread_stop(dc->status_update_thread);
1363
1364 mutex_lock(&bch_register_lock);
1365
1366 if (atomic_read(&dc->running)) {
1367 bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
1368 del_gendisk(dc->disk.disk);
1369 }
1370 bcache_device_free(&dc->disk);
1371 list_del(&dc->list);
1372
1373 mutex_unlock(&bch_register_lock);
1374
1375 if (dc->sb_disk)
1376 put_page(virt_to_page(dc->sb_disk));
1377
1378 if (dc->bdev_file)
1379 fput(dc->bdev_file);
1380
1381 wake_up(&unregister_wait);
1382
1383 kobject_put(&dc->disk.kobj);
1384 }
1385
CLOSURE_CALLBACK(cached_dev_flush)1386 static CLOSURE_CALLBACK(cached_dev_flush)
1387 {
1388 closure_type(dc, struct cached_dev, disk.cl);
1389 struct bcache_device *d = &dc->disk;
1390
1391 mutex_lock(&bch_register_lock);
1392 bcache_device_unlink(d);
1393 mutex_unlock(&bch_register_lock);
1394
1395 bch_cache_accounting_destroy(&dc->accounting);
1396 kobject_del(&d->kobj);
1397
1398 continue_at(cl, cached_dev_free, system_wq);
1399 }
1400
cached_dev_init(struct cached_dev * dc,unsigned int block_size)1401 static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
1402 {
1403 int ret;
1404 struct io *io;
1405 struct request_queue *q = bdev_get_queue(dc->bdev);
1406
1407 __module_get(THIS_MODULE);
1408 INIT_LIST_HEAD(&dc->list);
1409 closure_init(&dc->disk.cl, NULL);
1410 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
1411 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
1412 INIT_WORK(&dc->detach, cached_dev_detach_finish);
1413 sema_init(&dc->sb_write_mutex, 1);
1414 INIT_LIST_HEAD(&dc->io_lru);
1415 spin_lock_init(&dc->io_lock);
1416 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
1417
1418 dc->sequential_cutoff = 4 << 20;
1419
1420 for (io = dc->io; io < dc->io + RECENT_IO; io++) {
1421 list_add(&io->lru, &dc->io_lru);
1422 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
1423 }
1424
1425 if (bdev_io_opt(dc->bdev))
1426 dc->partial_stripes_expensive =
1427 q->limits.raid_partial_stripes_expensive;
1428
1429 ret = bcache_device_init(&dc->disk, block_size,
1430 bdev_nr_sectors(dc->bdev) - dc->sb.data_offset,
1431 dc->bdev, &bcache_cached_ops);
1432 if (ret)
1433 return ret;
1434
1435 atomic_set(&dc->io_errors, 0);
1436 dc->io_disable = false;
1437 dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT;
1438 /* default to auto */
1439 dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO;
1440
1441 bch_cached_dev_request_init(dc);
1442 bch_cached_dev_writeback_init(dc);
1443 return 0;
1444 }
1445
1446 /* Cached device - bcache superblock */
1447
register_bdev(struct cache_sb * sb,struct cache_sb_disk * sb_disk,struct file * bdev_file,struct cached_dev * dc)1448 static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
1449 struct file *bdev_file,
1450 struct cached_dev *dc)
1451 {
1452 const char *err = "cannot allocate memory";
1453 struct cache_set *c;
1454 int ret = -ENOMEM;
1455
1456 memcpy(&dc->sb, sb, sizeof(struct cache_sb));
1457 dc->bdev_file = bdev_file;
1458 dc->bdev = file_bdev(bdev_file);
1459 dc->sb_disk = sb_disk;
1460
1461 if (cached_dev_init(dc, sb->block_size << 9))
1462 goto err;
1463
1464 err = "error creating kobject";
1465 if (kobject_add(&dc->disk.kobj, bdev_kobj(dc->bdev), "bcache"))
1466 goto err;
1467 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
1468 goto err;
1469
1470 pr_info("registered backing device %pg\n", dc->bdev);
1471
1472 list_add(&dc->list, &uncached_devices);
1473 /* attach to a matched cache set if it exists */
1474 list_for_each_entry(c, &bch_cache_sets, list)
1475 bch_cached_dev_attach(dc, c, NULL);
1476
1477 if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
1478 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) {
1479 err = "failed to run cached device";
1480 ret = bch_cached_dev_run(dc);
1481 if (ret)
1482 goto err;
1483 }
1484
1485 return 0;
1486 err:
1487 pr_notice("error %pg: %s\n", dc->bdev, err);
1488 bcache_device_stop(&dc->disk);
1489 return ret;
1490 }
1491
1492 /* Flash only volumes */
1493
1494 /* When d->kobj released */
bch_flash_dev_release(struct kobject * kobj)1495 void bch_flash_dev_release(struct kobject *kobj)
1496 {
1497 struct bcache_device *d = container_of(kobj, struct bcache_device,
1498 kobj);
1499 kfree(d);
1500 }
1501
CLOSURE_CALLBACK(flash_dev_free)1502 static CLOSURE_CALLBACK(flash_dev_free)
1503 {
1504 closure_type(d, struct bcache_device, cl);
1505
1506 mutex_lock(&bch_register_lock);
1507 atomic_long_sub(bcache_dev_sectors_dirty(d),
1508 &d->c->flash_dev_dirty_sectors);
1509 del_gendisk(d->disk);
1510 bcache_device_free(d);
1511 mutex_unlock(&bch_register_lock);
1512 kobject_put(&d->kobj);
1513 }
1514
CLOSURE_CALLBACK(flash_dev_flush)1515 static CLOSURE_CALLBACK(flash_dev_flush)
1516 {
1517 closure_type(d, struct bcache_device, cl);
1518
1519 mutex_lock(&bch_register_lock);
1520 bcache_device_unlink(d);
1521 mutex_unlock(&bch_register_lock);
1522 kobject_del(&d->kobj);
1523 continue_at(cl, flash_dev_free, system_wq);
1524 }
1525
flash_dev_run(struct cache_set * c,struct uuid_entry * u)1526 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
1527 {
1528 int err = -ENOMEM;
1529 struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
1530 GFP_KERNEL);
1531 if (!d)
1532 goto err_ret;
1533
1534 closure_init(&d->cl, NULL);
1535 set_closure_fn(&d->cl, flash_dev_flush, system_wq);
1536
1537 kobject_init(&d->kobj, &bch_flash_dev_ktype);
1538
1539 if (bcache_device_init(d, block_bytes(c->cache), u->sectors,
1540 NULL, &bcache_flash_ops))
1541 goto err;
1542
1543 bcache_device_attach(d, c, u - c->uuids);
1544 bch_sectors_dirty_init(d);
1545 bch_flash_dev_request_init(d);
1546 err = add_disk(d->disk);
1547 if (err)
1548 goto err;
1549
1550 err = kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache");
1551 if (err)
1552 goto err;
1553
1554 bcache_device_link(d, c, "volume");
1555
1556 if (bch_has_feature_obso_large_bucket(&c->cache->sb)) {
1557 pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
1558 pr_err("Please update to the latest bcache-tools to create the cache device\n");
1559 set_disk_ro(d->disk, 1);
1560 }
1561
1562 return 0;
1563 err:
1564 kobject_put(&d->kobj);
1565 err_ret:
1566 return err;
1567 }
1568
flash_devs_run(struct cache_set * c)1569 static int flash_devs_run(struct cache_set *c)
1570 {
1571 int ret = 0;
1572 struct uuid_entry *u;
1573
1574 for (u = c->uuids;
1575 u < c->uuids + c->nr_uuids && !ret;
1576 u++)
1577 if (UUID_FLASH_ONLY(u))
1578 ret = flash_dev_run(c, u);
1579
1580 return ret;
1581 }
1582
bch_flash_dev_create(struct cache_set * c,uint64_t size)1583 int bch_flash_dev_create(struct cache_set *c, uint64_t size)
1584 {
1585 struct uuid_entry *u;
1586
1587 if (test_bit(CACHE_SET_STOPPING, &c->flags))
1588 return -EINTR;
1589
1590 if (!test_bit(CACHE_SET_RUNNING, &c->flags))
1591 return -EPERM;
1592
1593 u = uuid_find_empty(c);
1594 if (!u) {
1595 pr_err("Can't create volume, no room for UUID\n");
1596 return -EINVAL;
1597 }
1598
1599 get_random_bytes(u->uuid, 16);
1600 memset(u->label, 0, 32);
1601 u->first_reg = u->last_reg = cpu_to_le32((u32)ktime_get_real_seconds());
1602
1603 SET_UUID_FLASH_ONLY(u, 1);
1604 u->sectors = size >> 9;
1605
1606 bch_uuid_write(c);
1607
1608 return flash_dev_run(c, u);
1609 }
1610
bch_cached_dev_error(struct cached_dev * dc)1611 bool bch_cached_dev_error(struct cached_dev *dc)
1612 {
1613 if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
1614 return false;
1615
1616 dc->io_disable = true;
1617 /* make others know io_disable is true earlier */
1618 smp_mb();
1619
1620 pr_err("stop %s: too many IO errors on backing device %pg\n",
1621 dc->disk.disk->disk_name, dc->bdev);
1622
1623 bcache_device_stop(&dc->disk);
1624 return true;
1625 }
1626
1627 /* Cache set */
1628
1629 __printf(2, 3)
bch_cache_set_error(struct cache_set * c,const char * fmt,...)1630 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
1631 {
1632 struct va_format vaf;
1633 va_list args;
1634
1635 if (c->on_error != ON_ERROR_PANIC &&
1636 test_bit(CACHE_SET_STOPPING, &c->flags))
1637 return false;
1638
1639 if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
1640 pr_info("CACHE_SET_IO_DISABLE already set\n");
1641
1642 /*
1643 * XXX: we can be called from atomic context
1644 * acquire_console_sem();
1645 */
1646
1647 va_start(args, fmt);
1648
1649 vaf.fmt = fmt;
1650 vaf.va = &args;
1651
1652 pr_err("error on %pU: %pV, disabling caching\n",
1653 c->set_uuid, &vaf);
1654
1655 va_end(args);
1656
1657 if (c->on_error == ON_ERROR_PANIC)
1658 panic("panic forced after error\n");
1659
1660 bch_cache_set_unregister(c);
1661 return true;
1662 }
1663
1664 /* When c->kobj released */
bch_cache_set_release(struct kobject * kobj)1665 void bch_cache_set_release(struct kobject *kobj)
1666 {
1667 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
1668
1669 kfree(c);
1670 module_put(THIS_MODULE);
1671 }
1672
CLOSURE_CALLBACK(cache_set_free)1673 static CLOSURE_CALLBACK(cache_set_free)
1674 {
1675 closure_type(c, struct cache_set, cl);
1676 struct cache *ca;
1677
1678 debugfs_remove(c->debug);
1679
1680 bch_open_buckets_free(c);
1681 bch_btree_cache_free(c);
1682 bch_journal_free(c);
1683
1684 mutex_lock(&bch_register_lock);
1685 bch_bset_sort_state_free(&c->sort);
1686 free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->cache->sb)));
1687
1688 ca = c->cache;
1689 if (ca) {
1690 ca->set = NULL;
1691 c->cache = NULL;
1692 kobject_put(&ca->kobj);
1693 }
1694
1695
1696 if (c->moving_gc_wq)
1697 destroy_workqueue(c->moving_gc_wq);
1698 bioset_exit(&c->bio_split);
1699 mempool_exit(&c->fill_iter);
1700 mempool_exit(&c->bio_meta);
1701 mempool_exit(&c->search);
1702 kfree(c->devices);
1703
1704 list_del(&c->list);
1705 mutex_unlock(&bch_register_lock);
1706
1707 pr_info("Cache set %pU unregistered\n", c->set_uuid);
1708 wake_up(&unregister_wait);
1709
1710 closure_debug_destroy(&c->cl);
1711 kobject_put(&c->kobj);
1712 }
1713
CLOSURE_CALLBACK(cache_set_flush)1714 static CLOSURE_CALLBACK(cache_set_flush)
1715 {
1716 closure_type(c, struct cache_set, caching);
1717 struct cache *ca = c->cache;
1718 struct btree *b;
1719
1720 bch_cache_accounting_destroy(&c->accounting);
1721
1722 kobject_put(&c->internal);
1723 kobject_del(&c->kobj);
1724
1725 if (!IS_ERR_OR_NULL(c->gc_thread))
1726 kthread_stop(c->gc_thread);
1727
1728 if (!IS_ERR(c->root))
1729 list_add(&c->root->list, &c->btree_cache);
1730
1731 /*
1732 * Avoid flushing cached nodes if cache set is retiring
1733 * due to too many I/O errors detected.
1734 */
1735 if (!test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1736 list_for_each_entry(b, &c->btree_cache, list) {
1737 mutex_lock(&b->write_lock);
1738 if (btree_node_dirty(b))
1739 __bch_btree_node_write(b, NULL);
1740 mutex_unlock(&b->write_lock);
1741 }
1742
1743 if (ca->alloc_thread)
1744 kthread_stop(ca->alloc_thread);
1745
1746 if (c->journal.cur) {
1747 cancel_delayed_work_sync(&c->journal.work);
1748 /* flush last journal entry if needed */
1749 c->journal.work.work.func(&c->journal.work.work);
1750 }
1751
1752 closure_return(cl);
1753 }
1754
1755 /*
1756 * This function is only called when CACHE_SET_IO_DISABLE is set, which means
1757 * cache set is unregistering due to too many I/O errors. In this condition,
1758 * the bcache device might be stopped, it depends on stop_when_cache_set_failed
1759 * value and whether the broken cache has dirty data:
1760 *
1761 * dc->stop_when_cache_set_failed dc->has_dirty stop bcache device
1762 * BCH_CACHED_STOP_AUTO 0 NO
1763 * BCH_CACHED_STOP_AUTO 1 YES
1764 * BCH_CACHED_DEV_STOP_ALWAYS 0 YES
1765 * BCH_CACHED_DEV_STOP_ALWAYS 1 YES
1766 *
1767 * The expected behavior is, if stop_when_cache_set_failed is configured to
1768 * "auto" via sysfs interface, the bcache device will not be stopped if the
1769 * backing device is clean on the broken cache device.
1770 */
conditional_stop_bcache_device(struct cache_set * c,struct bcache_device * d,struct cached_dev * dc)1771 static void conditional_stop_bcache_device(struct cache_set *c,
1772 struct bcache_device *d,
1773 struct cached_dev *dc)
1774 {
1775 if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) {
1776 pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.\n",
1777 d->disk->disk_name, c->set_uuid);
1778 bcache_device_stop(d);
1779 } else if (atomic_read(&dc->has_dirty)) {
1780 /*
1781 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
1782 * and dc->has_dirty == 1
1783 */
1784 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.\n",
1785 d->disk->disk_name);
1786 /*
1787 * There might be a small time gap that cache set is
1788 * released but bcache device is not. Inside this time
1789 * gap, regular I/O requests will directly go into
1790 * backing device as no cache set attached to. This
1791 * behavior may also introduce potential inconsistence
1792 * data in writeback mode while cache is dirty.
1793 * Therefore before calling bcache_device_stop() due
1794 * to a broken cache device, dc->io_disable should be
1795 * explicitly set to true.
1796 */
1797 dc->io_disable = true;
1798 /* make others know io_disable is true earlier */
1799 smp_mb();
1800 bcache_device_stop(d);
1801 } else {
1802 /*
1803 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
1804 * and dc->has_dirty == 0
1805 */
1806 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.\n",
1807 d->disk->disk_name);
1808 }
1809 }
1810
CLOSURE_CALLBACK(__cache_set_unregister)1811 static CLOSURE_CALLBACK(__cache_set_unregister)
1812 {
1813 closure_type(c, struct cache_set, caching);
1814 struct cached_dev *dc;
1815 struct bcache_device *d;
1816 size_t i;
1817
1818 mutex_lock(&bch_register_lock);
1819
1820 for (i = 0; i < c->devices_max_used; i++) {
1821 d = c->devices[i];
1822 if (!d)
1823 continue;
1824
1825 if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
1826 test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
1827 dc = container_of(d, struct cached_dev, disk);
1828 bch_cached_dev_detach(dc);
1829 if (test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1830 conditional_stop_bcache_device(c, d, dc);
1831 } else {
1832 bcache_device_stop(d);
1833 }
1834 }
1835
1836 mutex_unlock(&bch_register_lock);
1837
1838 continue_at(cl, cache_set_flush, system_wq);
1839 }
1840
bch_cache_set_stop(struct cache_set * c)1841 void bch_cache_set_stop(struct cache_set *c)
1842 {
1843 if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
1844 /* closure_fn set to __cache_set_unregister() */
1845 closure_queue(&c->caching);
1846 }
1847
bch_cache_set_unregister(struct cache_set * c)1848 void bch_cache_set_unregister(struct cache_set *c)
1849 {
1850 set_bit(CACHE_SET_UNREGISTERING, &c->flags);
1851 bch_cache_set_stop(c);
1852 }
1853
1854 #define alloc_meta_bucket_pages(gfp, sb) \
1855 ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(meta_bucket_pages(sb))))
1856
bch_cache_set_alloc(struct cache_sb * sb)1857 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
1858 {
1859 int iter_size;
1860 struct cache *ca = container_of(sb, struct cache, sb);
1861 struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
1862
1863 if (!c)
1864 return NULL;
1865
1866 __module_get(THIS_MODULE);
1867 closure_init(&c->cl, NULL);
1868 set_closure_fn(&c->cl, cache_set_free, system_wq);
1869
1870 closure_init(&c->caching, &c->cl);
1871 set_closure_fn(&c->caching, __cache_set_unregister, system_wq);
1872
1873 /* Maybe create continue_at_noreturn() and use it here? */
1874 closure_set_stopped(&c->cl);
1875 closure_put(&c->cl);
1876
1877 kobject_init(&c->kobj, &bch_cache_set_ktype);
1878 kobject_init(&c->internal, &bch_cache_set_internal_ktype);
1879
1880 bch_cache_accounting_init(&c->accounting, &c->cl);
1881
1882 memcpy(c->set_uuid, sb->set_uuid, 16);
1883
1884 c->cache = ca;
1885 c->cache->set = c;
1886 c->bucket_bits = ilog2(sb->bucket_size);
1887 c->block_bits = ilog2(sb->block_size);
1888 c->nr_uuids = meta_bucket_bytes(sb) / sizeof(struct uuid_entry);
1889 c->devices_max_used = 0;
1890 atomic_set(&c->attached_dev_nr, 0);
1891 c->btree_pages = meta_bucket_pages(sb);
1892 if (c->btree_pages > BTREE_MAX_PAGES)
1893 c->btree_pages = max_t(int, c->btree_pages / 4,
1894 BTREE_MAX_PAGES);
1895
1896 sema_init(&c->sb_write_mutex, 1);
1897 mutex_init(&c->bucket_lock);
1898 init_waitqueue_head(&c->btree_cache_wait);
1899 spin_lock_init(&c->btree_cannibalize_lock);
1900 init_waitqueue_head(&c->bucket_wait);
1901 init_waitqueue_head(&c->gc_wait);
1902 sema_init(&c->uuid_write_mutex, 1);
1903
1904 spin_lock_init(&c->btree_gc_time.lock);
1905 spin_lock_init(&c->btree_split_time.lock);
1906 spin_lock_init(&c->btree_read_time.lock);
1907
1908 bch_moving_init_cache_set(c);
1909
1910 INIT_LIST_HEAD(&c->list);
1911 INIT_LIST_HEAD(&c->cached_devs);
1912 INIT_LIST_HEAD(&c->btree_cache);
1913 INIT_LIST_HEAD(&c->btree_cache_freeable);
1914 INIT_LIST_HEAD(&c->btree_cache_freed);
1915 INIT_LIST_HEAD(&c->data_buckets);
1916
1917 iter_size = sizeof(struct btree_iter) +
1918 ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) *
1919 sizeof(struct btree_iter_set);
1920
1921 c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL);
1922 if (!c->devices)
1923 goto err;
1924
1925 if (mempool_init_slab_pool(&c->search, 32, bch_search_cache))
1926 goto err;
1927
1928 if (mempool_init_kmalloc_pool(&c->bio_meta, 2,
1929 sizeof(struct bbio) +
1930 sizeof(struct bio_vec) * meta_bucket_pages(sb)))
1931 goto err;
1932
1933 if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size))
1934 goto err;
1935
1936 if (bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio),
1937 BIOSET_NEED_RESCUER))
1938 goto err;
1939
1940 c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb);
1941 if (!c->uuids)
1942 goto err;
1943
1944 c->moving_gc_wq = alloc_workqueue("bcache_gc", WQ_MEM_RECLAIM, 0);
1945 if (!c->moving_gc_wq)
1946 goto err;
1947
1948 if (bch_journal_alloc(c))
1949 goto err;
1950
1951 if (bch_btree_cache_alloc(c))
1952 goto err;
1953
1954 if (bch_open_buckets_alloc(c))
1955 goto err;
1956
1957 if (bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
1958 goto err;
1959
1960 c->congested_read_threshold_us = 2000;
1961 c->congested_write_threshold_us = 20000;
1962 c->error_limit = DEFAULT_IO_ERROR_LIMIT;
1963 c->idle_max_writeback_rate_enabled = 1;
1964 WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags));
1965
1966 return c;
1967 err:
1968 bch_cache_set_unregister(c);
1969 return NULL;
1970 }
1971
run_cache_set(struct cache_set * c)1972 static int run_cache_set(struct cache_set *c)
1973 {
1974 const char *err = "cannot allocate memory";
1975 struct cached_dev *dc, *t;
1976 struct cache *ca = c->cache;
1977 struct closure cl;
1978 LIST_HEAD(journal);
1979 struct journal_replay *l;
1980
1981 closure_init_stack(&cl);
1982
1983 c->nbuckets = ca->sb.nbuckets;
1984 set_gc_sectors(c);
1985
1986 if (CACHE_SYNC(&c->cache->sb)) {
1987 struct bkey *k;
1988 struct jset *j;
1989
1990 err = "cannot allocate memory for journal";
1991 if (bch_journal_read(c, &journal))
1992 goto err;
1993
1994 pr_debug("btree_journal_read() done\n");
1995
1996 err = "no journal entries found";
1997 if (list_empty(&journal))
1998 goto err;
1999
2000 j = &list_entry(journal.prev, struct journal_replay, list)->j;
2001
2002 err = "IO error reading priorities";
2003 if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
2004 goto err;
2005
2006 /*
2007 * If prio_read() fails it'll call cache_set_error and we'll
2008 * tear everything down right away, but if we perhaps checked
2009 * sooner we could avoid journal replay.
2010 */
2011
2012 k = &j->btree_root;
2013
2014 err = "bad btree root";
2015 if (__bch_btree_ptr_invalid(c, k))
2016 goto err;
2017
2018 err = "error reading btree root";
2019 c->root = bch_btree_node_get(c, NULL, k,
2020 j->btree_level,
2021 true, NULL);
2022 if (IS_ERR(c->root))
2023 goto err;
2024
2025 list_del_init(&c->root->list);
2026 rw_unlock(true, c->root);
2027
2028 err = uuid_read(c, j, &cl);
2029 if (err)
2030 goto err;
2031
2032 err = "error in recovery";
2033 if (bch_btree_check(c))
2034 goto err;
2035
2036 bch_journal_mark(c, &journal);
2037 bch_initial_gc_finish(c);
2038 pr_debug("btree_check() done\n");
2039
2040 /*
2041 * bcache_journal_next() can't happen sooner, or
2042 * btree_gc_finish() will give spurious errors about last_gc >
2043 * gc_gen - this is a hack but oh well.
2044 */
2045 bch_journal_next(&c->journal);
2046
2047 err = "error starting allocator thread";
2048 if (bch_cache_allocator_start(ca))
2049 goto err;
2050
2051 /*
2052 * First place it's safe to allocate: btree_check() and
2053 * btree_gc_finish() have to run before we have buckets to
2054 * allocate, and bch_bucket_alloc_set() might cause a journal
2055 * entry to be written so bcache_journal_next() has to be called
2056 * first.
2057 *
2058 * If the uuids were in the old format we have to rewrite them
2059 * before the next journal entry is written:
2060 */
2061 if (j->version < BCACHE_JSET_VERSION_UUID)
2062 __uuid_write(c);
2063
2064 err = "bcache: replay journal failed";
2065 if (bch_journal_replay(c, &journal))
2066 goto err;
2067 } else {
2068 unsigned int j;
2069
2070 pr_notice("invalidating existing data\n");
2071 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
2072 2, SB_JOURNAL_BUCKETS);
2073
2074 for (j = 0; j < ca->sb.keys; j++)
2075 ca->sb.d[j] = ca->sb.first_bucket + j;
2076
2077 bch_initial_gc_finish(c);
2078
2079 err = "error starting allocator thread";
2080 if (bch_cache_allocator_start(ca))
2081 goto err;
2082
2083 mutex_lock(&c->bucket_lock);
2084 bch_prio_write(ca, true);
2085 mutex_unlock(&c->bucket_lock);
2086
2087 err = "cannot allocate new UUID bucket";
2088 if (__uuid_write(c))
2089 goto err;
2090
2091 err = "cannot allocate new btree root";
2092 c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL);
2093 if (IS_ERR(c->root))
2094 goto err;
2095
2096 mutex_lock(&c->root->write_lock);
2097 bkey_copy_key(&c->root->key, &MAX_KEY);
2098 bch_btree_node_write(c->root, &cl);
2099 mutex_unlock(&c->root->write_lock);
2100
2101 bch_btree_set_root(c->root);
2102 rw_unlock(true, c->root);
2103
2104 /*
2105 * We don't want to write the first journal entry until
2106 * everything is set up - fortunately journal entries won't be
2107 * written until the SET_CACHE_SYNC() here:
2108 */
2109 SET_CACHE_SYNC(&c->cache->sb, true);
2110
2111 bch_journal_next(&c->journal);
2112 bch_journal_meta(c, &cl);
2113 }
2114
2115 err = "error starting gc thread";
2116 if (bch_gc_thread_start(c))
2117 goto err;
2118
2119 closure_sync(&cl);
2120 c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
2121 bcache_write_super(c);
2122
2123 if (bch_has_feature_obso_large_bucket(&c->cache->sb))
2124 pr_err("Detect obsoleted large bucket layout, all attached bcache device will be read-only\n");
2125
2126 list_for_each_entry_safe(dc, t, &uncached_devices, list)
2127 bch_cached_dev_attach(dc, c, NULL);
2128
2129 flash_devs_run(c);
2130
2131 bch_journal_space_reserve(&c->journal);
2132 set_bit(CACHE_SET_RUNNING, &c->flags);
2133 return 0;
2134 err:
2135 while (!list_empty(&journal)) {
2136 l = list_first_entry(&journal, struct journal_replay, list);
2137 list_del(&l->list);
2138 kfree(l);
2139 }
2140
2141 closure_sync(&cl);
2142
2143 bch_cache_set_error(c, "%s", err);
2144
2145 return -EIO;
2146 }
2147
register_cache_set(struct cache * ca)2148 static const char *register_cache_set(struct cache *ca)
2149 {
2150 char buf[12];
2151 const char *err = "cannot allocate memory";
2152 struct cache_set *c;
2153
2154 list_for_each_entry(c, &bch_cache_sets, list)
2155 if (!memcmp(c->set_uuid, ca->sb.set_uuid, 16)) {
2156 if (c->cache)
2157 return "duplicate cache set member";
2158
2159 goto found;
2160 }
2161
2162 c = bch_cache_set_alloc(&ca->sb);
2163 if (!c)
2164 return err;
2165
2166 err = "error creating kobject";
2167 if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->set_uuid) ||
2168 kobject_add(&c->internal, &c->kobj, "internal"))
2169 goto err;
2170
2171 if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj))
2172 goto err;
2173
2174 bch_debug_init_cache_set(c);
2175
2176 list_add(&c->list, &bch_cache_sets);
2177 found:
2178 sprintf(buf, "cache%i", ca->sb.nr_this_dev);
2179 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
2180 sysfs_create_link(&c->kobj, &ca->kobj, buf))
2181 goto err;
2182
2183 kobject_get(&ca->kobj);
2184 ca->set = c;
2185 ca->set->cache = ca;
2186
2187 err = "failed to run cache set";
2188 if (run_cache_set(c) < 0)
2189 goto err;
2190
2191 return NULL;
2192 err:
2193 bch_cache_set_unregister(c);
2194 return err;
2195 }
2196
2197 /* Cache device */
2198
2199 /* When ca->kobj released */
bch_cache_release(struct kobject * kobj)2200 void bch_cache_release(struct kobject *kobj)
2201 {
2202 struct cache *ca = container_of(kobj, struct cache, kobj);
2203 unsigned int i;
2204
2205 if (ca->set) {
2206 BUG_ON(ca->set->cache != ca);
2207 ca->set->cache = NULL;
2208 }
2209
2210 free_pages((unsigned long) ca->disk_buckets, ilog2(meta_bucket_pages(&ca->sb)));
2211 kfree(ca->prio_buckets);
2212 vfree(ca->buckets);
2213
2214 free_heap(&ca->heap);
2215 free_fifo(&ca->free_inc);
2216
2217 for (i = 0; i < RESERVE_NR; i++)
2218 free_fifo(&ca->free[i]);
2219
2220 if (ca->sb_disk)
2221 put_page(virt_to_page(ca->sb_disk));
2222
2223 if (ca->bdev_file)
2224 fput(ca->bdev_file);
2225
2226 kfree(ca);
2227 module_put(THIS_MODULE);
2228 }
2229
cache_alloc(struct cache * ca)2230 static int cache_alloc(struct cache *ca)
2231 {
2232 size_t free;
2233 size_t btree_buckets;
2234 struct bucket *b;
2235 int ret = -ENOMEM;
2236 const char *err = NULL;
2237
2238 __module_get(THIS_MODULE);
2239 kobject_init(&ca->kobj, &bch_cache_ktype);
2240
2241 bio_init(&ca->journal.bio, NULL, ca->journal.bio.bi_inline_vecs, 8, 0);
2242
2243 /*
2244 * when ca->sb.njournal_buckets is not zero, journal exists,
2245 * and in bch_journal_replay(), tree node may split,
2246 * so bucket of RESERVE_BTREE type is needed,
2247 * the worst situation is all journal buckets are valid journal,
2248 * and all the keys need to replay,
2249 * so the number of RESERVE_BTREE type buckets should be as much
2250 * as journal buckets
2251 */
2252 btree_buckets = ca->sb.njournal_buckets ?: 8;
2253 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
2254 if (!free) {
2255 ret = -EPERM;
2256 err = "ca->sb.nbuckets is too small";
2257 goto err_free;
2258 }
2259
2260 if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets,
2261 GFP_KERNEL)) {
2262 err = "ca->free[RESERVE_BTREE] alloc failed";
2263 goto err_btree_alloc;
2264 }
2265
2266 if (!init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca),
2267 GFP_KERNEL)) {
2268 err = "ca->free[RESERVE_PRIO] alloc failed";
2269 goto err_prio_alloc;
2270 }
2271
2272 if (!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL)) {
2273 err = "ca->free[RESERVE_MOVINGGC] alloc failed";
2274 goto err_movinggc_alloc;
2275 }
2276
2277 if (!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL)) {
2278 err = "ca->free[RESERVE_NONE] alloc failed";
2279 goto err_none_alloc;
2280 }
2281
2282 if (!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL)) {
2283 err = "ca->free_inc alloc failed";
2284 goto err_free_inc_alloc;
2285 }
2286
2287 if (!init_heap(&ca->heap, free << 3, GFP_KERNEL)) {
2288 err = "ca->heap alloc failed";
2289 goto err_heap_alloc;
2290 }
2291
2292 ca->buckets = vzalloc(array_size(sizeof(struct bucket),
2293 ca->sb.nbuckets));
2294 if (!ca->buckets) {
2295 err = "ca->buckets alloc failed";
2296 goto err_buckets_alloc;
2297 }
2298
2299 ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t),
2300 prio_buckets(ca), 2),
2301 GFP_KERNEL);
2302 if (!ca->prio_buckets) {
2303 err = "ca->prio_buckets alloc failed";
2304 goto err_prio_buckets_alloc;
2305 }
2306
2307 ca->disk_buckets = alloc_meta_bucket_pages(GFP_KERNEL, &ca->sb);
2308 if (!ca->disk_buckets) {
2309 err = "ca->disk_buckets alloc failed";
2310 goto err_disk_buckets_alloc;
2311 }
2312
2313 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
2314
2315 for_each_bucket(b, ca)
2316 atomic_set(&b->pin, 0);
2317 return 0;
2318
2319 err_disk_buckets_alloc:
2320 kfree(ca->prio_buckets);
2321 err_prio_buckets_alloc:
2322 vfree(ca->buckets);
2323 err_buckets_alloc:
2324 free_heap(&ca->heap);
2325 err_heap_alloc:
2326 free_fifo(&ca->free_inc);
2327 err_free_inc_alloc:
2328 free_fifo(&ca->free[RESERVE_NONE]);
2329 err_none_alloc:
2330 free_fifo(&ca->free[RESERVE_MOVINGGC]);
2331 err_movinggc_alloc:
2332 free_fifo(&ca->free[RESERVE_PRIO]);
2333 err_prio_alloc:
2334 free_fifo(&ca->free[RESERVE_BTREE]);
2335 err_btree_alloc:
2336 err_free:
2337 module_put(THIS_MODULE);
2338 if (err)
2339 pr_notice("error %pg: %s\n", ca->bdev, err);
2340 return ret;
2341 }
2342
register_cache(struct cache_sb * sb,struct cache_sb_disk * sb_disk,struct file * bdev_file,struct cache * ca)2343 static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
2344 struct file *bdev_file,
2345 struct cache *ca)
2346 {
2347 const char *err = NULL; /* must be set for any error case */
2348 int ret = 0;
2349
2350 memcpy(&ca->sb, sb, sizeof(struct cache_sb));
2351 ca->bdev_file = bdev_file;
2352 ca->bdev = file_bdev(bdev_file);
2353 ca->sb_disk = sb_disk;
2354
2355 if (bdev_max_discard_sectors(file_bdev(bdev_file)))
2356 ca->discard = CACHE_DISCARD(&ca->sb);
2357
2358 ret = cache_alloc(ca);
2359 if (ret != 0) {
2360 if (ret == -ENOMEM)
2361 err = "cache_alloc(): -ENOMEM";
2362 else if (ret == -EPERM)
2363 err = "cache_alloc(): cache device is too small";
2364 else
2365 err = "cache_alloc(): unknown error";
2366 pr_notice("error %pg: %s\n", file_bdev(bdev_file), err);
2367 /*
2368 * If we failed here, it means ca->kobj is not initialized yet,
2369 * kobject_put() won't be called and there is no chance to
2370 * call fput() to bdev in bch_cache_release(). So
2371 * we explicitly call fput() on the block device here.
2372 */
2373 fput(bdev_file);
2374 return ret;
2375 }
2376
2377 if (kobject_add(&ca->kobj, bdev_kobj(file_bdev(bdev_file)), "bcache")) {
2378 pr_notice("error %pg: error calling kobject_add\n",
2379 file_bdev(bdev_file));
2380 ret = -ENOMEM;
2381 goto out;
2382 }
2383
2384 mutex_lock(&bch_register_lock);
2385 err = register_cache_set(ca);
2386 mutex_unlock(&bch_register_lock);
2387
2388 if (err) {
2389 ret = -ENODEV;
2390 goto out;
2391 }
2392
2393 pr_info("registered cache device %pg\n", file_bdev(ca->bdev_file));
2394
2395 out:
2396 kobject_put(&ca->kobj);
2397 return ret;
2398 }
2399
2400 /* Global interfaces/init */
2401
2402 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
2403 const char *buffer, size_t size);
2404 static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
2405 struct kobj_attribute *attr,
2406 const char *buffer, size_t size);
2407
2408 kobj_attribute_write(register, register_bcache);
2409 kobj_attribute_write(register_quiet, register_bcache);
2410 kobj_attribute_write(pendings_cleanup, bch_pending_bdevs_cleanup);
2411
bch_is_open_backing(dev_t dev)2412 static bool bch_is_open_backing(dev_t dev)
2413 {
2414 struct cache_set *c, *tc;
2415 struct cached_dev *dc, *t;
2416
2417 list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
2418 list_for_each_entry_safe(dc, t, &c->cached_devs, list)
2419 if (dc->bdev->bd_dev == dev)
2420 return true;
2421 list_for_each_entry_safe(dc, t, &uncached_devices, list)
2422 if (dc->bdev->bd_dev == dev)
2423 return true;
2424 return false;
2425 }
2426
bch_is_open_cache(dev_t dev)2427 static bool bch_is_open_cache(dev_t dev)
2428 {
2429 struct cache_set *c, *tc;
2430
2431 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
2432 struct cache *ca = c->cache;
2433
2434 if (ca->bdev->bd_dev == dev)
2435 return true;
2436 }
2437
2438 return false;
2439 }
2440
bch_is_open(dev_t dev)2441 static bool bch_is_open(dev_t dev)
2442 {
2443 return bch_is_open_cache(dev) || bch_is_open_backing(dev);
2444 }
2445
2446 struct async_reg_args {
2447 struct delayed_work reg_work;
2448 char *path;
2449 struct cache_sb *sb;
2450 struct cache_sb_disk *sb_disk;
2451 struct file *bdev_file;
2452 void *holder;
2453 };
2454
register_bdev_worker(struct work_struct * work)2455 static void register_bdev_worker(struct work_struct *work)
2456 {
2457 int fail = false;
2458 struct async_reg_args *args =
2459 container_of(work, struct async_reg_args, reg_work.work);
2460
2461 mutex_lock(&bch_register_lock);
2462 if (register_bdev(args->sb, args->sb_disk, args->bdev_file,
2463 args->holder) < 0)
2464 fail = true;
2465 mutex_unlock(&bch_register_lock);
2466
2467 if (fail)
2468 pr_info("error %s: fail to register backing device\n",
2469 args->path);
2470 kfree(args->sb);
2471 kfree(args->path);
2472 kfree(args);
2473 module_put(THIS_MODULE);
2474 }
2475
register_cache_worker(struct work_struct * work)2476 static void register_cache_worker(struct work_struct *work)
2477 {
2478 int fail = false;
2479 struct async_reg_args *args =
2480 container_of(work, struct async_reg_args, reg_work.work);
2481
2482 /* blkdev_put() will be called in bch_cache_release() */
2483 if (register_cache(args->sb, args->sb_disk, args->bdev_file,
2484 args->holder))
2485 fail = true;
2486
2487 if (fail)
2488 pr_info("error %s: fail to register cache device\n",
2489 args->path);
2490 kfree(args->sb);
2491 kfree(args->path);
2492 kfree(args);
2493 module_put(THIS_MODULE);
2494 }
2495
register_device_async(struct async_reg_args * args)2496 static void register_device_async(struct async_reg_args *args)
2497 {
2498 if (SB_IS_BDEV(args->sb))
2499 INIT_DELAYED_WORK(&args->reg_work, register_bdev_worker);
2500 else
2501 INIT_DELAYED_WORK(&args->reg_work, register_cache_worker);
2502
2503 /* 10 jiffies is enough for a delay */
2504 queue_delayed_work(system_wq, &args->reg_work, 10);
2505 }
2506
alloc_holder_object(struct cache_sb * sb)2507 static void *alloc_holder_object(struct cache_sb *sb)
2508 {
2509 if (SB_IS_BDEV(sb))
2510 return kzalloc(sizeof(struct cached_dev), GFP_KERNEL);
2511 return kzalloc(sizeof(struct cache), GFP_KERNEL);
2512 }
2513
register_bcache(struct kobject * k,struct kobj_attribute * attr,const char * buffer,size_t size)2514 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
2515 const char *buffer, size_t size)
2516 {
2517 const char *err;
2518 char *path = NULL;
2519 struct cache_sb *sb;
2520 struct cache_sb_disk *sb_disk;
2521 struct file *bdev_file, *bdev_file2;
2522 void *holder = NULL;
2523 ssize_t ret;
2524 bool async_registration = false;
2525 bool quiet = false;
2526
2527 #ifdef CONFIG_BCACHE_ASYNC_REGISTRATION
2528 async_registration = true;
2529 #endif
2530
2531 ret = -EBUSY;
2532 err = "failed to reference bcache module";
2533 if (!try_module_get(THIS_MODULE))
2534 goto out;
2535
2536 /* For latest state of bcache_is_reboot */
2537 smp_mb();
2538 err = "bcache is in reboot";
2539 if (bcache_is_reboot)
2540 goto out_module_put;
2541
2542 ret = -ENOMEM;
2543 err = "cannot allocate memory";
2544 path = kstrndup(buffer, size, GFP_KERNEL);
2545 if (!path)
2546 goto out_module_put;
2547
2548 sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL);
2549 if (!sb)
2550 goto out_free_path;
2551
2552 ret = -EINVAL;
2553 err = "failed to open device";
2554 bdev_file = bdev_file_open_by_path(strim(path), BLK_OPEN_READ, NULL, NULL);
2555 if (IS_ERR(bdev_file))
2556 goto out_free_sb;
2557
2558 err = read_super(sb, file_bdev(bdev_file), &sb_disk);
2559 if (err)
2560 goto out_blkdev_put;
2561
2562 holder = alloc_holder_object(sb);
2563 if (!holder) {
2564 ret = -ENOMEM;
2565 err = "cannot allocate memory";
2566 goto out_put_sb_page;
2567 }
2568
2569 /* Now reopen in exclusive mode with proper holder */
2570 bdev_file2 = bdev_file_open_by_dev(file_bdev(bdev_file)->bd_dev,
2571 BLK_OPEN_READ | BLK_OPEN_WRITE, holder, NULL);
2572 fput(bdev_file);
2573 bdev_file = bdev_file2;
2574 if (IS_ERR(bdev_file)) {
2575 ret = PTR_ERR(bdev_file);
2576 bdev_file = NULL;
2577 if (ret == -EBUSY) {
2578 dev_t dev;
2579
2580 mutex_lock(&bch_register_lock);
2581 if (lookup_bdev(strim(path), &dev) == 0 &&
2582 bch_is_open(dev))
2583 err = "device already registered";
2584 else
2585 err = "device busy";
2586 mutex_unlock(&bch_register_lock);
2587 if (attr == &ksysfs_register_quiet) {
2588 quiet = true;
2589 ret = size;
2590 }
2591 }
2592 goto out_free_holder;
2593 }
2594
2595 err = "failed to register device";
2596
2597 if (async_registration) {
2598 /* register in asynchronous way */
2599 struct async_reg_args *args =
2600 kzalloc(sizeof(struct async_reg_args), GFP_KERNEL);
2601
2602 if (!args) {
2603 ret = -ENOMEM;
2604 err = "cannot allocate memory";
2605 goto out_free_holder;
2606 }
2607
2608 args->path = path;
2609 args->sb = sb;
2610 args->sb_disk = sb_disk;
2611 args->bdev_file = bdev_file;
2612 args->holder = holder;
2613 register_device_async(args);
2614 /* No wait and returns to user space */
2615 goto async_done;
2616 }
2617
2618 if (SB_IS_BDEV(sb)) {
2619 mutex_lock(&bch_register_lock);
2620 ret = register_bdev(sb, sb_disk, bdev_file, holder);
2621 mutex_unlock(&bch_register_lock);
2622 /* blkdev_put() will be called in cached_dev_free() */
2623 if (ret < 0)
2624 goto out_free_sb;
2625 } else {
2626 /* blkdev_put() will be called in bch_cache_release() */
2627 ret = register_cache(sb, sb_disk, bdev_file, holder);
2628 if (ret)
2629 goto out_free_sb;
2630 }
2631
2632 kfree(sb);
2633 kfree(path);
2634 module_put(THIS_MODULE);
2635 async_done:
2636 return size;
2637
2638 out_free_holder:
2639 kfree(holder);
2640 out_put_sb_page:
2641 put_page(virt_to_page(sb_disk));
2642 out_blkdev_put:
2643 if (bdev_file)
2644 fput(bdev_file);
2645 out_free_sb:
2646 kfree(sb);
2647 out_free_path:
2648 kfree(path);
2649 path = NULL;
2650 out_module_put:
2651 module_put(THIS_MODULE);
2652 out:
2653 if (!quiet)
2654 pr_info("error %s: %s\n", path?path:"", err);
2655 return ret;
2656 }
2657
2658
2659 struct pdev {
2660 struct list_head list;
2661 struct cached_dev *dc;
2662 };
2663
bch_pending_bdevs_cleanup(struct kobject * k,struct kobj_attribute * attr,const char * buffer,size_t size)2664 static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
2665 struct kobj_attribute *attr,
2666 const char *buffer,
2667 size_t size)
2668 {
2669 LIST_HEAD(pending_devs);
2670 ssize_t ret = size;
2671 struct cached_dev *dc, *tdc;
2672 struct pdev *pdev, *tpdev;
2673 struct cache_set *c, *tc;
2674
2675 mutex_lock(&bch_register_lock);
2676 list_for_each_entry_safe(dc, tdc, &uncached_devices, list) {
2677 pdev = kmalloc(sizeof(struct pdev), GFP_KERNEL);
2678 if (!pdev)
2679 break;
2680 pdev->dc = dc;
2681 list_add(&pdev->list, &pending_devs);
2682 }
2683
2684 list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
2685 char *pdev_set_uuid = pdev->dc->sb.set_uuid;
2686 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
2687 char *set_uuid = c->set_uuid;
2688
2689 if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
2690 list_del(&pdev->list);
2691 kfree(pdev);
2692 break;
2693 }
2694 }
2695 }
2696 mutex_unlock(&bch_register_lock);
2697
2698 list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
2699 pr_info("delete pdev %p\n", pdev);
2700 list_del(&pdev->list);
2701 bcache_device_stop(&pdev->dc->disk);
2702 kfree(pdev);
2703 }
2704
2705 return ret;
2706 }
2707
bcache_reboot(struct notifier_block * n,unsigned long code,void * x)2708 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
2709 {
2710 if (bcache_is_reboot)
2711 return NOTIFY_DONE;
2712
2713 if (code == SYS_DOWN ||
2714 code == SYS_HALT ||
2715 code == SYS_POWER_OFF) {
2716 DEFINE_WAIT(wait);
2717 unsigned long start = jiffies;
2718 bool stopped = false;
2719
2720 struct cache_set *c, *tc;
2721 struct cached_dev *dc, *tdc;
2722
2723 mutex_lock(&bch_register_lock);
2724
2725 if (bcache_is_reboot)
2726 goto out;
2727
2728 /* New registration is rejected since now */
2729 bcache_is_reboot = true;
2730 /*
2731 * Make registering caller (if there is) on other CPU
2732 * core know bcache_is_reboot set to true earlier
2733 */
2734 smp_mb();
2735
2736 if (list_empty(&bch_cache_sets) &&
2737 list_empty(&uncached_devices))
2738 goto out;
2739
2740 mutex_unlock(&bch_register_lock);
2741
2742 pr_info("Stopping all devices:\n");
2743
2744 /*
2745 * The reason bch_register_lock is not held to call
2746 * bch_cache_set_stop() and bcache_device_stop() is to
2747 * avoid potential deadlock during reboot, because cache
2748 * set or bcache device stopping process will acquire
2749 * bch_register_lock too.
2750 *
2751 * We are safe here because bcache_is_reboot sets to
2752 * true already, register_bcache() will reject new
2753 * registration now. bcache_is_reboot also makes sure
2754 * bcache_reboot() won't be re-entered on by other thread,
2755 * so there is no race in following list iteration by
2756 * list_for_each_entry_safe().
2757 */
2758 list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
2759 bch_cache_set_stop(c);
2760
2761 list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
2762 bcache_device_stop(&dc->disk);
2763
2764
2765 /*
2766 * Give an early chance for other kthreads and
2767 * kworkers to stop themselves
2768 */
2769 schedule();
2770
2771 /* What's a condition variable? */
2772 while (1) {
2773 long timeout = start + 10 * HZ - jiffies;
2774
2775 mutex_lock(&bch_register_lock);
2776 stopped = list_empty(&bch_cache_sets) &&
2777 list_empty(&uncached_devices);
2778
2779 if (timeout < 0 || stopped)
2780 break;
2781
2782 prepare_to_wait(&unregister_wait, &wait,
2783 TASK_UNINTERRUPTIBLE);
2784
2785 mutex_unlock(&bch_register_lock);
2786 schedule_timeout(timeout);
2787 }
2788
2789 finish_wait(&unregister_wait, &wait);
2790
2791 if (stopped)
2792 pr_info("All devices stopped\n");
2793 else
2794 pr_notice("Timeout waiting for devices to be closed\n");
2795 out:
2796 mutex_unlock(&bch_register_lock);
2797 }
2798
2799 return NOTIFY_DONE;
2800 }
2801
2802 static struct notifier_block reboot = {
2803 .notifier_call = bcache_reboot,
2804 .priority = INT_MAX, /* before any real devices */
2805 };
2806
bcache_exit(void)2807 static void bcache_exit(void)
2808 {
2809 bch_debug_exit();
2810 bch_request_exit();
2811 if (bcache_kobj)
2812 kobject_put(bcache_kobj);
2813 if (bcache_wq)
2814 destroy_workqueue(bcache_wq);
2815 if (bch_journal_wq)
2816 destroy_workqueue(bch_journal_wq);
2817 if (bch_flush_wq)
2818 destroy_workqueue(bch_flush_wq);
2819 bch_btree_exit();
2820
2821 if (bcache_major)
2822 unregister_blkdev(bcache_major, "bcache");
2823 unregister_reboot_notifier(&reboot);
2824 mutex_destroy(&bch_register_lock);
2825 }
2826
2827 /* Check and fixup module parameters */
check_module_parameters(void)2828 static void check_module_parameters(void)
2829 {
2830 if (bch_cutoff_writeback_sync == 0)
2831 bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC;
2832 else if (bch_cutoff_writeback_sync > CUTOFF_WRITEBACK_SYNC_MAX) {
2833 pr_warn("set bch_cutoff_writeback_sync (%u) to max value %u\n",
2834 bch_cutoff_writeback_sync, CUTOFF_WRITEBACK_SYNC_MAX);
2835 bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC_MAX;
2836 }
2837
2838 if (bch_cutoff_writeback == 0)
2839 bch_cutoff_writeback = CUTOFF_WRITEBACK;
2840 else if (bch_cutoff_writeback > CUTOFF_WRITEBACK_MAX) {
2841 pr_warn("set bch_cutoff_writeback (%u) to max value %u\n",
2842 bch_cutoff_writeback, CUTOFF_WRITEBACK_MAX);
2843 bch_cutoff_writeback = CUTOFF_WRITEBACK_MAX;
2844 }
2845
2846 if (bch_cutoff_writeback > bch_cutoff_writeback_sync) {
2847 pr_warn("set bch_cutoff_writeback (%u) to %u\n",
2848 bch_cutoff_writeback, bch_cutoff_writeback_sync);
2849 bch_cutoff_writeback = bch_cutoff_writeback_sync;
2850 }
2851 }
2852
bcache_init(void)2853 static int __init bcache_init(void)
2854 {
2855 static const struct attribute *files[] = {
2856 &ksysfs_register.attr,
2857 &ksysfs_register_quiet.attr,
2858 &ksysfs_pendings_cleanup.attr,
2859 NULL
2860 };
2861
2862 check_module_parameters();
2863
2864 mutex_init(&bch_register_lock);
2865 init_waitqueue_head(&unregister_wait);
2866 register_reboot_notifier(&reboot);
2867
2868 bcache_major = register_blkdev(0, "bcache");
2869 if (bcache_major < 0) {
2870 unregister_reboot_notifier(&reboot);
2871 mutex_destroy(&bch_register_lock);
2872 return bcache_major;
2873 }
2874
2875 if (bch_btree_init())
2876 goto err;
2877
2878 bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0);
2879 if (!bcache_wq)
2880 goto err;
2881
2882 /*
2883 * Let's not make this `WQ_MEM_RECLAIM` for the following reasons:
2884 *
2885 * 1. It used `system_wq` before which also does no memory reclaim.
2886 * 2. With `WQ_MEM_RECLAIM` desktop stalls, increased boot times, and
2887 * reduced throughput can be observed.
2888 *
2889 * We still want to user our own queue to not congest the `system_wq`.
2890 */
2891 bch_flush_wq = alloc_workqueue("bch_flush", 0, 0);
2892 if (!bch_flush_wq)
2893 goto err;
2894
2895 bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
2896 if (!bch_journal_wq)
2897 goto err;
2898
2899 bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
2900 if (!bcache_kobj)
2901 goto err;
2902
2903 if (bch_request_init() ||
2904 sysfs_create_files(bcache_kobj, files))
2905 goto err;
2906
2907 bch_debug_init();
2908
2909 bcache_is_reboot = false;
2910
2911 return 0;
2912 err:
2913 bcache_exit();
2914 return -ENOMEM;
2915 }
2916
2917 /*
2918 * Module hooks
2919 */
2920 module_exit(bcache_exit);
2921 module_init(bcache_init);
2922
2923 module_param(bch_cutoff_writeback, uint, 0);
2924 MODULE_PARM_DESC(bch_cutoff_writeback, "threshold to cutoff writeback");
2925
2926 module_param(bch_cutoff_writeback_sync, uint, 0);
2927 MODULE_PARM_DESC(bch_cutoff_writeback_sync, "hard threshold to cutoff writeback");
2928
2929 MODULE_DESCRIPTION("Bcache: a Linux block layer cache");
2930 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
2931 MODULE_LICENSE("GPL");
2932