1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Primary bucket allocation code
4 *
5 * Copyright 2012 Google, Inc.
6 *
7 * Allocation in bcache is done in terms of buckets:
8 *
9 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
10 * btree pointers - they must match for the pointer to be considered valid.
11 *
12 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
13 * bucket simply by incrementing its gen.
14 *
15 * The gens (along with the priorities; it's really the gens are important but
16 * the code is named as if it's the priorities) are written in an arbitrary list
17 * of buckets on disk, with a pointer to them in the journal header.
18 *
19 * When we invalidate a bucket, we have to write its new gen to disk and wait
20 * for that write to complete before we use it - otherwise after a crash we
21 * could have pointers that appeared to be good but pointed to data that had
22 * been overwritten.
23 *
24 * Since the gens and priorities are all stored contiguously on disk, we can
25 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
26 * call prio_write(), and when prio_write() finishes we pull buckets off the
27 * free_inc list and optionally discard them.
28 *
29 * free_inc isn't the only freelist - if it was, we'd often to sleep while
30 * priorities and gens were being written before we could allocate. c->free is a
31 * smaller freelist, and buckets on that list are always ready to be used.
32 *
33 * If we've got discards enabled, that happens when a bucket moves from the
34 * free_inc list to the free list.
35 *
36 * There is another freelist, because sometimes we have buckets that we know
37 * have nothing pointing into them - these we can reuse without waiting for
38 * priorities to be rewritten. These come from freed btree nodes and buckets
39 * that garbage collection discovered no longer had valid keys pointing into
40 * them (because they were overwritten). That's the unused list - buckets on the
41 * unused list move to the free list, optionally being discarded in the process.
42 *
43 * It's also important to ensure that gens don't wrap around - with respect to
44 * either the oldest gen in the btree or the gen on disk. This is quite
45 * difficult to do in practice, but we explicitly guard against it anyways - if
46 * a bucket is in danger of wrapping around we simply skip invalidating it that
47 * time around, and we garbage collect or rewrite the priorities sooner than we
48 * would have otherwise.
49 *
50 * bch_bucket_alloc() allocates a single bucket from a specific cache.
51 *
52 * bch_bucket_alloc_set() allocates one bucket from different caches
53 * out of a cache set.
54 *
55 * free_some_buckets() drives all the processes described above. It's called
56 * from bch_bucket_alloc() and a few other places that need to make sure free
57 * buckets are ready.
58 *
59 * invalidate_buckets_(lru|fifo)() find buckets that are available to be
60 * invalidated, and then invalidate them and stick them on the free_inc list -
61 * in either lru or fifo order.
62 */
63
64 #include "bcache.h"
65 #include "btree.h"
66
67 #include <linux/blkdev.h>
68 #include <linux/kthread.h>
69 #include <linux/random.h>
70 #include <trace/events/bcache.h>
71
72 #define MAX_OPEN_BUCKETS 128
73
74 /* Bucket heap / gen */
75
bch_inc_gen(struct cache * ca,struct bucket * b)76 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
77 {
78 uint8_t ret = ++b->gen;
79
80 ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
81 WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
82
83 return ret;
84 }
85
bch_rescale_priorities(struct cache_set * c,int sectors)86 void bch_rescale_priorities(struct cache_set *c, int sectors)
87 {
88 struct cache *ca;
89 struct bucket *b;
90 unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024;
91 int r;
92
93 atomic_sub(sectors, &c->rescale);
94
95 do {
96 r = atomic_read(&c->rescale);
97
98 if (r >= 0)
99 return;
100 } while (atomic_cmpxchg(&c->rescale, r, r + next) != r);
101
102 mutex_lock(&c->bucket_lock);
103
104 c->min_prio = USHRT_MAX;
105
106 ca = c->cache;
107 for_each_bucket(b, ca)
108 if (b->prio &&
109 b->prio != BTREE_PRIO &&
110 !atomic_read(&b->pin)) {
111 b->prio--;
112 c->min_prio = min(c->min_prio, b->prio);
113 }
114
115 mutex_unlock(&c->bucket_lock);
116 }
117
118 /*
119 * Background allocation thread: scans for buckets to be invalidated,
120 * invalidates them, rewrites prios/gens (marking them as invalidated on disk),
121 * then optionally issues discard commands to the newly free buckets, then puts
122 * them on the various freelists.
123 */
124
can_inc_bucket_gen(struct bucket * b)125 static inline bool can_inc_bucket_gen(struct bucket *b)
126 {
127 return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX;
128 }
129
bch_can_invalidate_bucket(struct cache * ca,struct bucket * b)130 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
131 {
132 return (ca->set->gc_mark_valid || b->reclaimable_in_gc) &&
133 ((!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
134 !atomic_read(&b->pin) && can_inc_bucket_gen(b));
135 }
136
__bch_invalidate_one_bucket(struct cache * ca,struct bucket * b)137 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
138 {
139 lockdep_assert_held(&ca->set->bucket_lock);
140 BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE);
141
142 if (GC_SECTORS_USED(b))
143 trace_bcache_invalidate(ca, b - ca->buckets);
144
145 bch_inc_gen(ca, b);
146 b->prio = INITIAL_PRIO;
147 atomic_inc(&b->pin);
148 b->reclaimable_in_gc = 0;
149 }
150
bch_invalidate_one_bucket(struct cache * ca,struct bucket * b)151 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
152 {
153 __bch_invalidate_one_bucket(ca, b);
154
155 fifo_push(&ca->free_inc, b - ca->buckets);
156 }
157
158 /*
159 * Determines what order we're going to reuse buckets, smallest bucket_prio()
160 * first: we also take into account the number of sectors of live data in that
161 * bucket, and in order for that multiply to make sense we have to scale bucket
162 *
163 * Thus, we scale the bucket priorities so that the bucket with the smallest
164 * prio is worth 1/8th of what INITIAL_PRIO is worth.
165 */
166
new_bucket_prio(struct cache * ca,struct bucket * b)167 static inline unsigned int new_bucket_prio(struct cache *ca, struct bucket *b)
168 {
169 unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8;
170
171 return (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b);
172 }
173
new_bucket_max_cmp(const void * l,const void * r,void * args)174 static inline bool new_bucket_max_cmp(const void *l, const void *r, void *args)
175 {
176 struct bucket **lhs = (struct bucket **)l;
177 struct bucket **rhs = (struct bucket **)r;
178 struct cache *ca = args;
179
180 return new_bucket_prio(ca, *lhs) > new_bucket_prio(ca, *rhs);
181 }
182
new_bucket_min_cmp(const void * l,const void * r,void * args)183 static inline bool new_bucket_min_cmp(const void *l, const void *r, void *args)
184 {
185 struct bucket **lhs = (struct bucket **)l;
186 struct bucket **rhs = (struct bucket **)r;
187 struct cache *ca = args;
188
189 return new_bucket_prio(ca, *lhs) < new_bucket_prio(ca, *rhs);
190 }
191
new_bucket_swap(void * l,void * r,void __always_unused * args)192 static inline void new_bucket_swap(void *l, void *r, void __always_unused *args)
193 {
194 struct bucket **lhs = l, **rhs = r;
195
196 swap(*lhs, *rhs);
197 }
198
invalidate_buckets_lru(struct cache * ca)199 static void invalidate_buckets_lru(struct cache *ca)
200 {
201 struct bucket *b;
202 const struct min_heap_callbacks bucket_max_cmp_callback = {
203 .less = new_bucket_max_cmp,
204 .swp = new_bucket_swap,
205 };
206 const struct min_heap_callbacks bucket_min_cmp_callback = {
207 .less = new_bucket_min_cmp,
208 .swp = new_bucket_swap,
209 };
210
211 ca->heap.nr = 0;
212
213 for_each_bucket(b, ca) {
214 if (!bch_can_invalidate_bucket(ca, b))
215 continue;
216
217 if (!min_heap_full(&ca->heap))
218 min_heap_push(&ca->heap, &b, &bucket_max_cmp_callback, ca);
219 else if (!new_bucket_max_cmp(&b, min_heap_peek(&ca->heap), ca)) {
220 ca->heap.data[0] = b;
221 min_heap_sift_down(&ca->heap, 0, &bucket_max_cmp_callback, ca);
222 }
223 }
224
225 min_heapify_all(&ca->heap, &bucket_min_cmp_callback, ca);
226
227 while (!fifo_full(&ca->free_inc)) {
228 if (!ca->heap.nr) {
229 /*
230 * We don't want to be calling invalidate_buckets()
231 * multiple times when it can't do anything
232 */
233 ca->invalidate_needs_gc = 1;
234 wake_up_gc(ca->set);
235 return;
236 }
237 b = min_heap_peek(&ca->heap)[0];
238 min_heap_pop(&ca->heap, &bucket_min_cmp_callback, ca);
239
240 bch_invalidate_one_bucket(ca, b);
241 }
242 }
243
invalidate_buckets_fifo(struct cache * ca)244 static void invalidate_buckets_fifo(struct cache *ca)
245 {
246 struct bucket *b;
247 size_t checked = 0;
248
249 while (!fifo_full(&ca->free_inc)) {
250 if (ca->fifo_last_bucket < ca->sb.first_bucket ||
251 ca->fifo_last_bucket >= ca->sb.nbuckets)
252 ca->fifo_last_bucket = ca->sb.first_bucket;
253
254 b = ca->buckets + ca->fifo_last_bucket++;
255
256 if (bch_can_invalidate_bucket(ca, b))
257 bch_invalidate_one_bucket(ca, b);
258
259 if (++checked >= ca->sb.nbuckets) {
260 ca->invalidate_needs_gc = 1;
261 wake_up_gc(ca->set);
262 return;
263 }
264 }
265 }
266
invalidate_buckets_random(struct cache * ca)267 static void invalidate_buckets_random(struct cache *ca)
268 {
269 struct bucket *b;
270 size_t checked = 0;
271
272 while (!fifo_full(&ca->free_inc)) {
273 size_t n;
274
275 get_random_bytes(&n, sizeof(n));
276
277 n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
278 n += ca->sb.first_bucket;
279
280 b = ca->buckets + n;
281
282 if (bch_can_invalidate_bucket(ca, b))
283 bch_invalidate_one_bucket(ca, b);
284
285 if (++checked >= ca->sb.nbuckets / 2) {
286 ca->invalidate_needs_gc = 1;
287 wake_up_gc(ca->set);
288 return;
289 }
290 }
291 }
292
invalidate_buckets(struct cache * ca)293 static void invalidate_buckets(struct cache *ca)
294 {
295 BUG_ON(ca->invalidate_needs_gc);
296
297 switch (CACHE_REPLACEMENT(&ca->sb)) {
298 case CACHE_REPLACEMENT_LRU:
299 invalidate_buckets_lru(ca);
300 break;
301 case CACHE_REPLACEMENT_FIFO:
302 invalidate_buckets_fifo(ca);
303 break;
304 case CACHE_REPLACEMENT_RANDOM:
305 invalidate_buckets_random(ca);
306 break;
307 }
308 }
309
310 #define allocator_wait(ca, cond) \
311 do { \
312 while (1) { \
313 set_current_state(TASK_INTERRUPTIBLE); \
314 if (cond) \
315 break; \
316 \
317 mutex_unlock(&(ca)->set->bucket_lock); \
318 if (kthread_should_stop() || \
319 test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \
320 set_current_state(TASK_RUNNING); \
321 goto out; \
322 } \
323 \
324 schedule(); \
325 mutex_lock(&(ca)->set->bucket_lock); \
326 } \
327 __set_current_state(TASK_RUNNING); \
328 } while (0)
329
bch_allocator_push(struct cache * ca,long bucket)330 static int bch_allocator_push(struct cache *ca, long bucket)
331 {
332 unsigned int i;
333
334 /* Prios/gens are actually the most important reserve */
335 if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
336 return true;
337
338 for (i = 0; i < RESERVE_NR; i++)
339 if (fifo_push(&ca->free[i], bucket))
340 return true;
341
342 return false;
343 }
344
bch_allocator_thread(void * arg)345 static int bch_allocator_thread(void *arg)
346 {
347 struct cache *ca = arg;
348
349 mutex_lock(&ca->set->bucket_lock);
350
351 while (1) {
352 /*
353 * First, we pull buckets off of the unused and free_inc lists,
354 * possibly issue discards to them, then we add the bucket to
355 * the free list:
356 */
357 while (1) {
358 long bucket;
359
360 if (!fifo_pop(&ca->free_inc, bucket))
361 break;
362
363 if (ca->discard) {
364 mutex_unlock(&ca->set->bucket_lock);
365 blkdev_issue_discard(ca->bdev,
366 bucket_to_sector(ca->set, bucket),
367 ca->sb.bucket_size, GFP_KERNEL);
368 mutex_lock(&ca->set->bucket_lock);
369 }
370
371 allocator_wait(ca, bch_allocator_push(ca, bucket));
372 wake_up(&ca->set->btree_cache_wait);
373 wake_up(&ca->set->bucket_wait);
374 }
375
376 /*
377 * We've run out of free buckets, we need to find some buckets
378 * we can invalidate. First, invalidate them in memory and add
379 * them to the free_inc list:
380 */
381
382 retry_invalidate:
383 allocator_wait(ca, !ca->invalidate_needs_gc);
384 invalidate_buckets(ca);
385
386 /*
387 * Now, we write their new gens to disk so we can start writing
388 * new stuff to them:
389 */
390 allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
391 if (CACHE_SYNC(&ca->sb)) {
392 /*
393 * This could deadlock if an allocation with a btree
394 * node locked ever blocked - having the btree node
395 * locked would block garbage collection, but here we're
396 * waiting on garbage collection before we invalidate
397 * and free anything.
398 *
399 * But this should be safe since the btree code always
400 * uses btree_check_reserve() before allocating now, and
401 * if it fails it blocks without btree nodes locked.
402 */
403 if (!fifo_full(&ca->free_inc))
404 goto retry_invalidate;
405
406 if (bch_prio_write(ca, false) < 0) {
407 ca->invalidate_needs_gc = 1;
408 wake_up_gc(ca->set);
409 }
410 }
411 }
412 out:
413 wait_for_kthread_stop();
414 return 0;
415 }
416
417 /* Allocation */
418
bch_bucket_alloc(struct cache * ca,unsigned int reserve,bool wait)419 long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
420 {
421 DEFINE_WAIT(w);
422 struct bucket *b;
423 long r;
424
425
426 /* No allocation if CACHE_SET_IO_DISABLE bit is set */
427 if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)))
428 return -1;
429
430 /* fastpath */
431 if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
432 fifo_pop(&ca->free[reserve], r))
433 goto out;
434
435 if (!wait) {
436 trace_bcache_alloc_fail(ca, reserve);
437 return -1;
438 }
439
440 do {
441 prepare_to_wait(&ca->set->bucket_wait, &w,
442 TASK_UNINTERRUPTIBLE);
443
444 mutex_unlock(&ca->set->bucket_lock);
445 schedule();
446 mutex_lock(&ca->set->bucket_lock);
447 } while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
448 !fifo_pop(&ca->free[reserve], r));
449
450 finish_wait(&ca->set->bucket_wait, &w);
451 out:
452 if (ca->alloc_thread)
453 wake_up_process(ca->alloc_thread);
454
455 trace_bcache_alloc(ca, reserve);
456
457 if (expensive_debug_checks(ca->set)) {
458 size_t iter;
459 long i;
460 unsigned int j;
461
462 for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
463 BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
464
465 for (j = 0; j < RESERVE_NR; j++)
466 fifo_for_each(i, &ca->free[j], iter)
467 BUG_ON(i == r);
468 fifo_for_each(i, &ca->free_inc, iter)
469 BUG_ON(i == r);
470 }
471
472 b = ca->buckets + r;
473
474 BUG_ON(atomic_read(&b->pin) != 1);
475
476 SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
477
478 if (reserve <= RESERVE_PRIO) {
479 SET_GC_MARK(b, GC_MARK_METADATA);
480 SET_GC_MOVE(b, 0);
481 b->prio = BTREE_PRIO;
482 } else {
483 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
484 SET_GC_MOVE(b, 0);
485 b->prio = INITIAL_PRIO;
486 }
487
488 if (ca->set->avail_nbuckets > 0) {
489 ca->set->avail_nbuckets--;
490 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
491 }
492
493 return r;
494 }
495
__bch_bucket_free(struct cache * ca,struct bucket * b)496 void __bch_bucket_free(struct cache *ca, struct bucket *b)
497 {
498 SET_GC_MARK(b, 0);
499 SET_GC_SECTORS_USED(b, 0);
500
501 if (ca->set->avail_nbuckets < ca->set->nbuckets) {
502 ca->set->avail_nbuckets++;
503 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
504 }
505 }
506
bch_bucket_free(struct cache_set * c,struct bkey * k)507 void bch_bucket_free(struct cache_set *c, struct bkey *k)
508 {
509 unsigned int i;
510
511 for (i = 0; i < KEY_PTRS(k); i++)
512 __bch_bucket_free(c->cache, PTR_BUCKET(c, k, i));
513 }
514
__bch_bucket_alloc_set(struct cache_set * c,unsigned int reserve,struct bkey * k,bool wait)515 int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
516 struct bkey *k, bool wait)
517 {
518 struct cache *ca;
519 long b;
520
521 /* No allocation if CACHE_SET_IO_DISABLE bit is set */
522 if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
523 return -1;
524
525 lockdep_assert_held(&c->bucket_lock);
526
527 bkey_init(k);
528
529 ca = c->cache;
530 b = bch_bucket_alloc(ca, reserve, wait);
531 if (b < 0)
532 return -1;
533
534 k->ptr[0] = MAKE_PTR(ca->buckets[b].gen,
535 bucket_to_sector(c, b),
536 ca->sb.nr_this_dev);
537
538 SET_KEY_PTRS(k, 1);
539
540 return 0;
541 }
542
bch_bucket_alloc_set(struct cache_set * c,unsigned int reserve,struct bkey * k,bool wait)543 int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
544 struct bkey *k, bool wait)
545 {
546 int ret;
547
548 mutex_lock(&c->bucket_lock);
549 ret = __bch_bucket_alloc_set(c, reserve, k, wait);
550 mutex_unlock(&c->bucket_lock);
551 return ret;
552 }
553
554 /* Sector allocator */
555
556 struct open_bucket {
557 struct list_head list;
558 unsigned int last_write_point;
559 unsigned int sectors_free;
560 BKEY_PADDED(key);
561 };
562
563 /*
564 * We keep multiple buckets open for writes, and try to segregate different
565 * write streams for better cache utilization: first we try to segregate flash
566 * only volume write streams from cached devices, secondly we look for a bucket
567 * where the last write to it was sequential with the current write, and
568 * failing that we look for a bucket that was last used by the same task.
569 *
570 * The ideas is if you've got multiple tasks pulling data into the cache at the
571 * same time, you'll get better cache utilization if you try to segregate their
572 * data and preserve locality.
573 *
574 * For example, dirty sectors of flash only volume is not reclaimable, if their
575 * dirty sectors mixed with dirty sectors of cached device, such buckets will
576 * be marked as dirty and won't be reclaimed, though the dirty data of cached
577 * device have been written back to backend device.
578 *
579 * And say you've starting Firefox at the same time you're copying a
580 * bunch of files. Firefox will likely end up being fairly hot and stay in the
581 * cache awhile, but the data you copied might not be; if you wrote all that
582 * data to the same buckets it'd get invalidated at the same time.
583 *
584 * Both of those tasks will be doing fairly random IO so we can't rely on
585 * detecting sequential IO to segregate their data, but going off of the task
586 * should be a sane heuristic.
587 */
pick_data_bucket(struct cache_set * c,const struct bkey * search,unsigned int write_point,struct bkey * alloc)588 static struct open_bucket *pick_data_bucket(struct cache_set *c,
589 const struct bkey *search,
590 unsigned int write_point,
591 struct bkey *alloc)
592 {
593 struct open_bucket *ret, *ret_task = NULL;
594
595 list_for_each_entry_reverse(ret, &c->data_buckets, list)
596 if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
597 UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
598 continue;
599 else if (!bkey_cmp(&ret->key, search))
600 goto found;
601 else if (ret->last_write_point == write_point)
602 ret_task = ret;
603
604 ret = ret_task ?: list_first_entry(&c->data_buckets,
605 struct open_bucket, list);
606 found:
607 if (!ret->sectors_free && KEY_PTRS(alloc)) {
608 ret->sectors_free = c->cache->sb.bucket_size;
609 bkey_copy(&ret->key, alloc);
610 bkey_init(alloc);
611 }
612
613 if (!ret->sectors_free)
614 ret = NULL;
615
616 return ret;
617 }
618
619 /*
620 * Allocates some space in the cache to write to, and k to point to the newly
621 * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
622 * end of the newly allocated space).
623 *
624 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
625 * sectors were actually allocated.
626 *
627 * If s->writeback is true, will not fail.
628 */
bch_alloc_sectors(struct cache_set * c,struct bkey * k,unsigned int sectors,unsigned int write_point,unsigned int write_prio,bool wait)629 bool bch_alloc_sectors(struct cache_set *c,
630 struct bkey *k,
631 unsigned int sectors,
632 unsigned int write_point,
633 unsigned int write_prio,
634 bool wait)
635 {
636 struct open_bucket *b;
637 BKEY_PADDED(key) alloc;
638 unsigned int i;
639
640 /*
641 * We might have to allocate a new bucket, which we can't do with a
642 * spinlock held. So if we have to allocate, we drop the lock, allocate
643 * and then retry. KEY_PTRS() indicates whether alloc points to
644 * allocated bucket(s).
645 */
646
647 bkey_init(&alloc.key);
648 spin_lock(&c->data_bucket_lock);
649
650 while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
651 unsigned int watermark = write_prio
652 ? RESERVE_MOVINGGC
653 : RESERVE_NONE;
654
655 spin_unlock(&c->data_bucket_lock);
656
657 if (bch_bucket_alloc_set(c, watermark, &alloc.key, wait))
658 return false;
659
660 spin_lock(&c->data_bucket_lock);
661 }
662
663 /*
664 * If we had to allocate, we might race and not need to allocate the
665 * second time we call pick_data_bucket(). If we allocated a bucket but
666 * didn't use it, drop the refcount bch_bucket_alloc_set() took:
667 */
668 if (KEY_PTRS(&alloc.key))
669 bkey_put(c, &alloc.key);
670
671 for (i = 0; i < KEY_PTRS(&b->key); i++)
672 EBUG_ON(ptr_stale(c, &b->key, i));
673
674 /* Set up the pointer to the space we're allocating: */
675
676 for (i = 0; i < KEY_PTRS(&b->key); i++)
677 k->ptr[i] = b->key.ptr[i];
678
679 sectors = min(sectors, b->sectors_free);
680
681 SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
682 SET_KEY_SIZE(k, sectors);
683 SET_KEY_PTRS(k, KEY_PTRS(&b->key));
684
685 /*
686 * Move b to the end of the lru, and keep track of what this bucket was
687 * last used for:
688 */
689 list_move_tail(&b->list, &c->data_buckets);
690 bkey_copy_key(&b->key, k);
691 b->last_write_point = write_point;
692
693 b->sectors_free -= sectors;
694
695 for (i = 0; i < KEY_PTRS(&b->key); i++) {
696 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
697
698 atomic_long_add(sectors,
699 &c->cache->sectors_written);
700 }
701
702 if (b->sectors_free < c->cache->sb.block_size)
703 b->sectors_free = 0;
704
705 /*
706 * k takes refcounts on the buckets it points to until it's inserted
707 * into the btree, but if we're done with this bucket we just transfer
708 * get_data_bucket()'s refcount.
709 */
710 if (b->sectors_free)
711 for (i = 0; i < KEY_PTRS(&b->key); i++)
712 atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
713
714 spin_unlock(&c->data_bucket_lock);
715 return true;
716 }
717
718 /* Init */
719
bch_open_buckets_free(struct cache_set * c)720 void bch_open_buckets_free(struct cache_set *c)
721 {
722 struct open_bucket *b;
723
724 while (!list_empty(&c->data_buckets)) {
725 b = list_first_entry(&c->data_buckets,
726 struct open_bucket, list);
727 list_del(&b->list);
728 kfree(b);
729 }
730 }
731
bch_open_buckets_alloc(struct cache_set * c)732 int bch_open_buckets_alloc(struct cache_set *c)
733 {
734 int i;
735
736 spin_lock_init(&c->data_bucket_lock);
737
738 for (i = 0; i < MAX_OPEN_BUCKETS; i++) {
739 struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
740
741 if (!b)
742 return -ENOMEM;
743
744 list_add(&b->list, &c->data_buckets);
745 }
746
747 return 0;
748 }
749
bch_cache_allocator_start(struct cache * ca)750 int bch_cache_allocator_start(struct cache *ca)
751 {
752 struct task_struct *k = kthread_run(bch_allocator_thread,
753 ca, "bcache_allocator");
754 if (IS_ERR(k))
755 return PTR_ERR(k);
756
757 ca->alloc_thread = k;
758 return 0;
759 }
760