1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Code for manipulating bucket marks for garbage collection.
4 *
5 * Copyright 2014 Datera, Inc.
6 */
7
8 #ifndef _BUCKETS_H
9 #define _BUCKETS_H
10
11 #include "buckets_types.h"
12 #include "extents.h"
13 #include "sb-members.h"
14
sector_to_bucket(const struct bch_dev * ca,sector_t s)15 static inline u64 sector_to_bucket(const struct bch_dev *ca, sector_t s)
16 {
17 return div_u64(s, ca->mi.bucket_size);
18 }
19
bucket_to_sector(const struct bch_dev * ca,size_t b)20 static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
21 {
22 return ((sector_t) b) * ca->mi.bucket_size;
23 }
24
bucket_remainder(const struct bch_dev * ca,sector_t s)25 static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
26 {
27 u32 remainder;
28
29 div_u64_rem(s, ca->mi.bucket_size, &remainder);
30 return remainder;
31 }
32
sector_to_bucket_and_offset(const struct bch_dev * ca,sector_t s,u32 * offset)33 static inline u64 sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s, u32 *offset)
34 {
35 return div_u64_rem(s, ca->mi.bucket_size, offset);
36 }
37
38 #define for_each_bucket(_b, _buckets) \
39 for (_b = (_buckets)->b + (_buckets)->first_bucket; \
40 _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
41
42 /*
43 * Ugly hack alert:
44 *
45 * We need to cram a spinlock in a single byte, because that's what we have left
46 * in struct bucket, and we care about the size of these - during fsck, we need
47 * in memory state for every single bucket on every device.
48 *
49 * We used to do
50 * while (xchg(&b->lock, 1) cpu_relax();
51 * but, it turns out not all architectures support xchg on a single byte.
52 *
53 * So now we use bit_spin_lock(), with fun games since we can't burn a whole
54 * ulong for this - we just need to make sure the lock bit always ends up in the
55 * first byte.
56 */
57
58 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
59 #define BUCKET_LOCK_BITNR 0
60 #else
61 #define BUCKET_LOCK_BITNR (BITS_PER_LONG - 1)
62 #endif
63
64 union ulong_byte_assert {
65 ulong ulong;
66 u8 byte;
67 };
68
bucket_unlock(struct bucket * b)69 static inline void bucket_unlock(struct bucket *b)
70 {
71 BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
72
73 clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock);
74 wake_up_bit((void *) &b->lock, BUCKET_LOCK_BITNR);
75 }
76
bucket_lock(struct bucket * b)77 static inline void bucket_lock(struct bucket *b)
78 {
79 wait_on_bit_lock((void *) &b->lock, BUCKET_LOCK_BITNR,
80 TASK_UNINTERRUPTIBLE);
81 }
82
gc_bucket(struct bch_dev * ca,size_t b)83 static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
84 {
85 return genradix_ptr(&ca->buckets_gc, b);
86 }
87
bucket_gens(struct bch_dev * ca)88 static inline struct bucket_gens *bucket_gens(struct bch_dev *ca)
89 {
90 return rcu_dereference_check(ca->bucket_gens,
91 !ca->fs ||
92 percpu_rwsem_is_held(&ca->fs->mark_lock) ||
93 lockdep_is_held(&ca->fs->state_lock) ||
94 lockdep_is_held(&ca->bucket_lock));
95 }
96
bucket_gen(struct bch_dev * ca,size_t b)97 static inline u8 *bucket_gen(struct bch_dev *ca, size_t b)
98 {
99 struct bucket_gens *gens = bucket_gens(ca);
100
101 if (b - gens->first_bucket >= gens->nbuckets_minus_first)
102 return NULL;
103 return gens->b + b;
104 }
105
bucket_gen_get(struct bch_dev * ca,size_t b)106 static inline u8 bucket_gen_get(struct bch_dev *ca, size_t b)
107 {
108 rcu_read_lock();
109 u8 gen = *bucket_gen(ca, b);
110 rcu_read_unlock();
111 return gen;
112 }
113
PTR_BUCKET_NR(const struct bch_dev * ca,const struct bch_extent_ptr * ptr)114 static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
115 const struct bch_extent_ptr *ptr)
116 {
117 return sector_to_bucket(ca, ptr->offset);
118 }
119
PTR_BUCKET_POS(const struct bch_dev * ca,const struct bch_extent_ptr * ptr)120 static inline struct bpos PTR_BUCKET_POS(const struct bch_dev *ca,
121 const struct bch_extent_ptr *ptr)
122 {
123 return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
124 }
125
PTR_BUCKET_POS_OFFSET(const struct bch_dev * ca,const struct bch_extent_ptr * ptr,u32 * bucket_offset)126 static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_dev *ca,
127 const struct bch_extent_ptr *ptr,
128 u32 *bucket_offset)
129 {
130 return POS(ptr->dev, sector_to_bucket_and_offset(ca, ptr->offset, bucket_offset));
131 }
132
PTR_GC_BUCKET(struct bch_dev * ca,const struct bch_extent_ptr * ptr)133 static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca,
134 const struct bch_extent_ptr *ptr)
135 {
136 return gc_bucket(ca, PTR_BUCKET_NR(ca, ptr));
137 }
138
ptr_data_type(const struct bkey * k,const struct bch_extent_ptr * ptr)139 static inline enum bch_data_type ptr_data_type(const struct bkey *k,
140 const struct bch_extent_ptr *ptr)
141 {
142 if (bkey_is_btree_ptr(k))
143 return BCH_DATA_btree;
144
145 return ptr->cached ? BCH_DATA_cached : BCH_DATA_user;
146 }
147
ptr_disk_sectors(s64 sectors,struct extent_ptr_decoded p)148 static inline s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
149 {
150 EBUG_ON(sectors < 0);
151
152 return crc_is_compressed(p.crc)
153 ? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
154 p.crc.uncompressed_size)
155 : sectors;
156 }
157
gen_cmp(u8 a,u8 b)158 static inline int gen_cmp(u8 a, u8 b)
159 {
160 return (s8) (a - b);
161 }
162
gen_after(u8 a,u8 b)163 static inline int gen_after(u8 a, u8 b)
164 {
165 int r = gen_cmp(a, b);
166
167 return r > 0 ? r : 0;
168 }
169
dev_ptr_stale_rcu(struct bch_dev * ca,const struct bch_extent_ptr * ptr)170 static inline int dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
171 {
172 u8 *gen = bucket_gen(ca, PTR_BUCKET_NR(ca, ptr));
173 if (!gen)
174 return -1;
175 return gen_after(*gen, ptr->gen);
176 }
177
178 /**
179 * dev_ptr_stale() - check if a pointer points into a bucket that has been
180 * invalidated.
181 */
dev_ptr_stale(struct bch_dev * ca,const struct bch_extent_ptr * ptr)182 static inline int dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
183 {
184 rcu_read_lock();
185 int ret = dev_ptr_stale_rcu(ca, ptr);
186 rcu_read_unlock();
187
188 return ret;
189 }
190
191 /* Device usage: */
192
193 void bch2_dev_usage_read_fast(struct bch_dev *, struct bch_dev_usage *);
bch2_dev_usage_read(struct bch_dev * ca)194 static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
195 {
196 struct bch_dev_usage ret;
197
198 bch2_dev_usage_read_fast(ca, &ret);
199 return ret;
200 }
201
202 void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage *);
203
bch2_dev_buckets_reserved(struct bch_dev * ca,enum bch_watermark watermark)204 static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
205 {
206 s64 reserved = 0;
207
208 switch (watermark) {
209 case BCH_WATERMARK_NR:
210 BUG();
211 case BCH_WATERMARK_stripe:
212 reserved += ca->mi.nbuckets >> 6;
213 fallthrough;
214 case BCH_WATERMARK_normal:
215 reserved += ca->mi.nbuckets >> 6;
216 fallthrough;
217 case BCH_WATERMARK_copygc:
218 reserved += ca->nr_btree_reserve;
219 fallthrough;
220 case BCH_WATERMARK_btree:
221 reserved += ca->nr_btree_reserve;
222 fallthrough;
223 case BCH_WATERMARK_btree_copygc:
224 case BCH_WATERMARK_reclaim:
225 case BCH_WATERMARK_interior_updates:
226 break;
227 }
228
229 return reserved;
230 }
231
dev_buckets_free(struct bch_dev * ca,struct bch_dev_usage usage,enum bch_watermark watermark)232 static inline u64 dev_buckets_free(struct bch_dev *ca,
233 struct bch_dev_usage usage,
234 enum bch_watermark watermark)
235 {
236 return max_t(s64, 0,
237 usage.d[BCH_DATA_free].buckets -
238 ca->nr_open_buckets -
239 bch2_dev_buckets_reserved(ca, watermark));
240 }
241
__dev_buckets_available(struct bch_dev * ca,struct bch_dev_usage usage,enum bch_watermark watermark)242 static inline u64 __dev_buckets_available(struct bch_dev *ca,
243 struct bch_dev_usage usage,
244 enum bch_watermark watermark)
245 {
246 return max_t(s64, 0,
247 usage.d[BCH_DATA_free].buckets
248 + usage.d[BCH_DATA_cached].buckets
249 + usage.d[BCH_DATA_need_gc_gens].buckets
250 + usage.d[BCH_DATA_need_discard].buckets
251 - ca->nr_open_buckets
252 - bch2_dev_buckets_reserved(ca, watermark));
253 }
254
dev_buckets_available(struct bch_dev * ca,enum bch_watermark watermark)255 static inline u64 dev_buckets_available(struct bch_dev *ca,
256 enum bch_watermark watermark)
257 {
258 return __dev_buckets_available(ca, bch2_dev_usage_read(ca), watermark);
259 }
260
261 /* Filesystem usage: */
262
dev_usage_u64s(void)263 static inline unsigned dev_usage_u64s(void)
264 {
265 return sizeof(struct bch_dev_usage) / sizeof(u64);
266 }
267
268 struct bch_fs_usage_short
269 bch2_fs_usage_read_short(struct bch_fs *);
270
271 int bch2_bucket_ref_update(struct btree_trans *, struct bch_dev *,
272 struct bkey_s_c, const struct bch_extent_ptr *,
273 s64, enum bch_data_type, u8, u8, u32 *);
274
275 int bch2_check_fix_ptrs(struct btree_trans *,
276 enum btree_id, unsigned, struct bkey_s_c,
277 enum btree_iter_update_trigger_flags);
278
279 int bch2_trigger_extent(struct btree_trans *, enum btree_id, unsigned,
280 struct bkey_s_c, struct bkey_s,
281 enum btree_iter_update_trigger_flags);
282 int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned,
283 struct bkey_s_c, struct bkey_s,
284 enum btree_iter_update_trigger_flags);
285
286 #define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\
287 ({ \
288 int ret = 0; \
289 \
290 if (_old.k->type) \
291 ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_insert); \
292 if (!ret && _new.k->type) \
293 ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_overwrite);\
294 ret; \
295 })
296
297 void bch2_trans_account_disk_usage_change(struct btree_trans *);
298
299 int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *, u64,
300 enum bch_data_type, unsigned,
301 enum btree_iter_update_trigger_flags);
302 int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *,
303 enum btree_iter_update_trigger_flags);
304 int bch2_trans_mark_dev_sbs_flags(struct bch_fs *,
305 enum btree_iter_update_trigger_flags);
306 int bch2_trans_mark_dev_sbs(struct bch_fs *);
307
is_superblock_bucket(struct bch_dev * ca,u64 b)308 static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
309 {
310 struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
311 u64 b_offset = bucket_to_sector(ca, b);
312 u64 b_end = bucket_to_sector(ca, b + 1);
313 unsigned i;
314
315 if (!b)
316 return true;
317
318 for (i = 0; i < layout->nr_superblocks; i++) {
319 u64 offset = le64_to_cpu(layout->sb_offset[i]);
320 u64 end = offset + (1 << layout->sb_max_size_bits);
321
322 if (!(offset >= b_end || end <= b_offset))
323 return true;
324 }
325
326 return false;
327 }
328
bch2_data_type_str(enum bch_data_type type)329 static inline const char *bch2_data_type_str(enum bch_data_type type)
330 {
331 return type < BCH_DATA_NR
332 ? __bch2_data_types[type]
333 : "(invalid data type)";
334 }
335
336 /* disk reservations: */
337
bch2_disk_reservation_put(struct bch_fs * c,struct disk_reservation * res)338 static inline void bch2_disk_reservation_put(struct bch_fs *c,
339 struct disk_reservation *res)
340 {
341 if (res->sectors) {
342 this_cpu_sub(*c->online_reserved, res->sectors);
343 res->sectors = 0;
344 }
345 }
346
347 enum bch_reservation_flags {
348 BCH_DISK_RESERVATION_NOFAIL = 1 << 0,
349 BCH_DISK_RESERVATION_PARTIAL = 1 << 1,
350 };
351
352 int __bch2_disk_reservation_add(struct bch_fs *, struct disk_reservation *,
353 u64, enum bch_reservation_flags);
354
bch2_disk_reservation_add(struct bch_fs * c,struct disk_reservation * res,u64 sectors,enum bch_reservation_flags flags)355 static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
356 u64 sectors, enum bch_reservation_flags flags)
357 {
358 #ifdef __KERNEL__
359 u64 old, new;
360
361 old = this_cpu_read(c->pcpu->sectors_available);
362 do {
363 if (sectors > old)
364 return __bch2_disk_reservation_add(c, res, sectors, flags);
365
366 new = old - sectors;
367 } while (!this_cpu_try_cmpxchg(c->pcpu->sectors_available, &old, new));
368
369 this_cpu_add(*c->online_reserved, sectors);
370 res->sectors += sectors;
371 return 0;
372 #else
373 return __bch2_disk_reservation_add(c, res, sectors, flags);
374 #endif
375 }
376
377 static inline struct disk_reservation
bch2_disk_reservation_init(struct bch_fs * c,unsigned nr_replicas)378 bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
379 {
380 return (struct disk_reservation) {
381 .sectors = 0,
382 #if 0
383 /* not used yet: */
384 .gen = c->capacity_gen,
385 #endif
386 .nr_replicas = nr_replicas,
387 };
388 }
389
bch2_disk_reservation_get(struct bch_fs * c,struct disk_reservation * res,u64 sectors,unsigned nr_replicas,int flags)390 static inline int bch2_disk_reservation_get(struct bch_fs *c,
391 struct disk_reservation *res,
392 u64 sectors, unsigned nr_replicas,
393 int flags)
394 {
395 *res = bch2_disk_reservation_init(c, nr_replicas);
396
397 return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
398 }
399
400 #define RESERVE_FACTOR 6
401
avail_factor(u64 r)402 static inline u64 avail_factor(u64 r)
403 {
404 return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
405 }
406
407 void bch2_buckets_nouse_free(struct bch_fs *);
408 int bch2_buckets_nouse_alloc(struct bch_fs *);
409
410 int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
411 void bch2_dev_buckets_free(struct bch_dev *);
412 int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
413
414 #endif /* _BUCKETS_H */
415