xref: /linux/fs/bcachefs/extents.c (revision 9432e90d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4  *
5  * Code for managing the extent btree and dynamically updating the writeback
6  * dirty sector count.
7  */
8 
9 #include "bcachefs.h"
10 #include "bkey_methods.h"
11 #include "btree_cache.h"
12 #include "btree_gc.h"
13 #include "btree_io.h"
14 #include "btree_iter.h"
15 #include "buckets.h"
16 #include "checksum.h"
17 #include "compress.h"
18 #include "debug.h"
19 #include "disk_groups.h"
20 #include "error.h"
21 #include "extents.h"
22 #include "inode.h"
23 #include "journal.h"
24 #include "replicas.h"
25 #include "super.h"
26 #include "super-io.h"
27 #include "trace.h"
28 #include "util.h"
29 
30 static unsigned bch2_crc_field_size_max[] = {
31 	[BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
32 	[BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
33 	[BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
34 };
35 
36 static void bch2_extent_crc_pack(union bch_extent_crc *,
37 				 struct bch_extent_crc_unpacked,
38 				 enum bch_extent_entry_type);
39 
dev_io_failures(struct bch_io_failures * f,unsigned dev)40 static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
41 						   unsigned dev)
42 {
43 	struct bch_dev_io_failures *i;
44 
45 	for (i = f->devs; i < f->devs + f->nr; i++)
46 		if (i->dev == dev)
47 			return i;
48 
49 	return NULL;
50 }
51 
bch2_mark_io_failure(struct bch_io_failures * failed,struct extent_ptr_decoded * p)52 void bch2_mark_io_failure(struct bch_io_failures *failed,
53 			  struct extent_ptr_decoded *p)
54 {
55 	struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);
56 
57 	if (!f) {
58 		BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
59 
60 		f = &failed->devs[failed->nr++];
61 		f->dev		= p->ptr.dev;
62 		f->idx		= p->idx;
63 		f->nr_failed	= 1;
64 		f->nr_retries	= 0;
65 	} else if (p->idx != f->idx) {
66 		f->idx		= p->idx;
67 		f->nr_failed	= 1;
68 		f->nr_retries	= 0;
69 	} else {
70 		f->nr_failed++;
71 	}
72 }
73 
dev_latency(struct bch_fs * c,unsigned dev)74 static inline u64 dev_latency(struct bch_fs *c, unsigned dev)
75 {
76 	struct bch_dev *ca = bch2_dev_rcu(c, dev);
77 	return ca ? atomic64_read(&ca->cur_latency[READ]) : S64_MAX;
78 }
79 
80 /*
81  * returns true if p1 is better than p2:
82  */
ptr_better(struct bch_fs * c,const struct extent_ptr_decoded p1,const struct extent_ptr_decoded p2)83 static inline bool ptr_better(struct bch_fs *c,
84 			      const struct extent_ptr_decoded p1,
85 			      const struct extent_ptr_decoded p2)
86 {
87 	if (likely(!p1.idx && !p2.idx)) {
88 		u64 l1 = dev_latency(c, p1.ptr.dev);
89 		u64 l2 = dev_latency(c, p2.ptr.dev);
90 
91 		/* Pick at random, biased in favor of the faster device: */
92 
93 		return bch2_rand_range(l1 + l2) > l1;
94 	}
95 
96 	if (bch2_force_reconstruct_read)
97 		return p1.idx > p2.idx;
98 
99 	return p1.idx < p2.idx;
100 }
101 
102 /*
103  * This picks a non-stale pointer, preferably from a device other than @avoid.
104  * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
105  * other devices, it will still pick a pointer from avoid.
106  */
bch2_bkey_pick_read_device(struct bch_fs * c,struct bkey_s_c k,struct bch_io_failures * failed,struct extent_ptr_decoded * pick)107 int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
108 			       struct bch_io_failures *failed,
109 			       struct extent_ptr_decoded *pick)
110 {
111 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
112 	const union bch_extent_entry *entry;
113 	struct extent_ptr_decoded p;
114 	struct bch_dev_io_failures *f;
115 	int ret = 0;
116 
117 	if (k.k->type == KEY_TYPE_error)
118 		return -EIO;
119 
120 	rcu_read_lock();
121 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
122 		/*
123 		 * Unwritten extent: no need to actually read, treat it as a
124 		 * hole and return 0s:
125 		 */
126 		if (p.ptr.unwritten) {
127 			ret = 0;
128 			break;
129 		}
130 
131 		/*
132 		 * If there are any dirty pointers it's an error if we can't
133 		 * read:
134 		 */
135 		if (!ret && !p.ptr.cached)
136 			ret = -EIO;
137 
138 		struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
139 
140 		if (p.ptr.cached && (!ca || dev_ptr_stale_rcu(ca, &p.ptr)))
141 			continue;
142 
143 		f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
144 		if (f)
145 			p.idx = f->nr_failed < f->nr_retries
146 				? f->idx
147 				: f->idx + 1;
148 
149 		if (!p.idx && !ca)
150 			p.idx++;
151 
152 		if (!p.idx && p.has_ec && bch2_force_reconstruct_read)
153 			p.idx++;
154 
155 		if (!p.idx && !bch2_dev_is_readable(ca))
156 			p.idx++;
157 
158 		if (p.idx >= (unsigned) p.has_ec + 1)
159 			continue;
160 
161 		if (ret > 0 && !ptr_better(c, p, *pick))
162 			continue;
163 
164 		*pick = p;
165 		ret = 1;
166 	}
167 	rcu_read_unlock();
168 
169 	return ret;
170 }
171 
172 /* KEY_TYPE_btree_ptr: */
173 
bch2_btree_ptr_invalid(struct bch_fs * c,struct bkey_s_c k,enum bch_validate_flags flags,struct printbuf * err)174 int bch2_btree_ptr_invalid(struct bch_fs *c, struct bkey_s_c k,
175 			   enum bch_validate_flags flags,
176 			   struct printbuf *err)
177 {
178 	int ret = 0;
179 
180 	bkey_fsck_err_on(bkey_val_u64s(k.k) > BCH_REPLICAS_MAX, c, err,
181 			 btree_ptr_val_too_big,
182 			 "value too big (%zu > %u)", bkey_val_u64s(k.k), BCH_REPLICAS_MAX);
183 
184 	ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
185 fsck_err:
186 	return ret;
187 }
188 
bch2_btree_ptr_to_text(struct printbuf * out,struct bch_fs * c,struct bkey_s_c k)189 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
190 			    struct bkey_s_c k)
191 {
192 	bch2_bkey_ptrs_to_text(out, c, k);
193 }
194 
bch2_btree_ptr_v2_invalid(struct bch_fs * c,struct bkey_s_c k,enum bch_validate_flags flags,struct printbuf * err)195 int bch2_btree_ptr_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
196 			      enum bch_validate_flags flags,
197 			      struct printbuf *err)
198 {
199 	struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
200 	int ret = 0;
201 
202 	bkey_fsck_err_on(bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX,
203 			 c, err, btree_ptr_v2_val_too_big,
204 			 "value too big (%zu > %zu)",
205 			 bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
206 
207 	bkey_fsck_err_on(bpos_ge(bp.v->min_key, bp.k->p),
208 			 c, err, btree_ptr_v2_min_key_bad,
209 			 "min_key > key");
210 
211 	if (flags & BCH_VALIDATE_write)
212 		bkey_fsck_err_on(!bp.v->sectors_written,
213 				 c, err, btree_ptr_v2_written_0,
214 				 "sectors_written == 0");
215 
216 	ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
217 fsck_err:
218 	return ret;
219 }
220 
bch2_btree_ptr_v2_to_text(struct printbuf * out,struct bch_fs * c,struct bkey_s_c k)221 void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
222 			       struct bkey_s_c k)
223 {
224 	struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
225 
226 	prt_printf(out, "seq %llx written %u min_key %s",
227 	       le64_to_cpu(bp.v->seq),
228 	       le16_to_cpu(bp.v->sectors_written),
229 	       BTREE_PTR_RANGE_UPDATED(bp.v) ? "R " : "");
230 
231 	bch2_bpos_to_text(out, bp.v->min_key);
232 	prt_printf(out, " ");
233 	bch2_bkey_ptrs_to_text(out, c, k);
234 }
235 
bch2_btree_ptr_v2_compat(enum btree_id btree_id,unsigned version,unsigned big_endian,int write,struct bkey_s k)236 void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
237 			      unsigned big_endian, int write,
238 			      struct bkey_s k)
239 {
240 	struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k);
241 
242 	compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
243 
244 	if (version < bcachefs_metadata_version_inode_btree_change &&
245 	    btree_id_is_extents(btree_id) &&
246 	    !bkey_eq(bp.v->min_key, POS_MIN))
247 		bp.v->min_key = write
248 			? bpos_nosnap_predecessor(bp.v->min_key)
249 			: bpos_nosnap_successor(bp.v->min_key);
250 }
251 
252 /* KEY_TYPE_extent: */
253 
bch2_extent_merge(struct bch_fs * c,struct bkey_s l,struct bkey_s_c r)254 bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
255 {
256 	struct bkey_ptrs   l_ptrs = bch2_bkey_ptrs(l);
257 	struct bkey_ptrs_c r_ptrs = bch2_bkey_ptrs_c(r);
258 	union bch_extent_entry *en_l;
259 	const union bch_extent_entry *en_r;
260 	struct extent_ptr_decoded lp, rp;
261 	bool use_right_ptr;
262 
263 	en_l = l_ptrs.start;
264 	en_r = r_ptrs.start;
265 	while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
266 		if (extent_entry_type(en_l) != extent_entry_type(en_r))
267 			return false;
268 
269 		en_l = extent_entry_next(en_l);
270 		en_r = extent_entry_next(en_r);
271 	}
272 
273 	if (en_l < l_ptrs.end || en_r < r_ptrs.end)
274 		return false;
275 
276 	en_l = l_ptrs.start;
277 	en_r = r_ptrs.start;
278 	lp.crc = bch2_extent_crc_unpack(l.k, NULL);
279 	rp.crc = bch2_extent_crc_unpack(r.k, NULL);
280 
281 	while (__bkey_ptr_next_decode(l.k, l_ptrs.end, lp, en_l) &&
282 	       __bkey_ptr_next_decode(r.k, r_ptrs.end, rp, en_r)) {
283 		if (lp.ptr.offset + lp.crc.offset + lp.crc.live_size !=
284 		    rp.ptr.offset + rp.crc.offset ||
285 		    lp.ptr.dev			!= rp.ptr.dev ||
286 		    lp.ptr.gen			!= rp.ptr.gen ||
287 		    lp.ptr.unwritten		!= rp.ptr.unwritten ||
288 		    lp.has_ec			!= rp.has_ec)
289 			return false;
290 
291 		/* Extents may not straddle buckets: */
292 		rcu_read_lock();
293 		struct bch_dev *ca = bch2_dev_rcu(c, lp.ptr.dev);
294 		bool same_bucket = ca && PTR_BUCKET_NR(ca, &lp.ptr) == PTR_BUCKET_NR(ca, &rp.ptr);
295 		rcu_read_unlock();
296 
297 		if (!same_bucket)
298 			return false;
299 
300 		if (lp.has_ec			!= rp.has_ec ||
301 		    (lp.has_ec &&
302 		     (lp.ec.block		!= rp.ec.block ||
303 		      lp.ec.redundancy		!= rp.ec.redundancy ||
304 		      lp.ec.idx			!= rp.ec.idx)))
305 			return false;
306 
307 		if (lp.crc.compression_type	!= rp.crc.compression_type ||
308 		    lp.crc.nonce		!= rp.crc.nonce)
309 			return false;
310 
311 		if (lp.crc.offset + lp.crc.live_size + rp.crc.live_size <=
312 		    lp.crc.uncompressed_size) {
313 			/* can use left extent's crc entry */
314 		} else if (lp.crc.live_size <= rp.crc.offset) {
315 			/* can use right extent's crc entry */
316 		} else {
317 			/* check if checksums can be merged: */
318 			if (lp.crc.csum_type		!= rp.crc.csum_type ||
319 			    lp.crc.nonce		!= rp.crc.nonce ||
320 			    crc_is_compressed(lp.crc) ||
321 			    !bch2_checksum_mergeable(lp.crc.csum_type))
322 				return false;
323 
324 			if (lp.crc.offset + lp.crc.live_size != lp.crc.compressed_size ||
325 			    rp.crc.offset)
326 				return false;
327 
328 			if (lp.crc.csum_type &&
329 			    lp.crc.uncompressed_size +
330 			    rp.crc.uncompressed_size > (c->opts.encoded_extent_max >> 9))
331 				return false;
332 		}
333 
334 		en_l = extent_entry_next(en_l);
335 		en_r = extent_entry_next(en_r);
336 	}
337 
338 	en_l = l_ptrs.start;
339 	en_r = r_ptrs.start;
340 	while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
341 		if (extent_entry_is_crc(en_l)) {
342 			struct bch_extent_crc_unpacked crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
343 			struct bch_extent_crc_unpacked crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
344 
345 			if (crc_l.uncompressed_size + crc_r.uncompressed_size >
346 			    bch2_crc_field_size_max[extent_entry_type(en_l)])
347 				return false;
348 		}
349 
350 		en_l = extent_entry_next(en_l);
351 		en_r = extent_entry_next(en_r);
352 	}
353 
354 	use_right_ptr = false;
355 	en_l = l_ptrs.start;
356 	en_r = r_ptrs.start;
357 	while (en_l < l_ptrs.end) {
358 		if (extent_entry_type(en_l) == BCH_EXTENT_ENTRY_ptr &&
359 		    use_right_ptr)
360 			en_l->ptr = en_r->ptr;
361 
362 		if (extent_entry_is_crc(en_l)) {
363 			struct bch_extent_crc_unpacked crc_l =
364 				bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
365 			struct bch_extent_crc_unpacked crc_r =
366 				bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
367 
368 			use_right_ptr = false;
369 
370 			if (crc_l.offset + crc_l.live_size + crc_r.live_size <=
371 			    crc_l.uncompressed_size) {
372 				/* can use left extent's crc entry */
373 			} else if (crc_l.live_size <= crc_r.offset) {
374 				/* can use right extent's crc entry */
375 				crc_r.offset -= crc_l.live_size;
376 				bch2_extent_crc_pack(entry_to_crc(en_l), crc_r,
377 						     extent_entry_type(en_l));
378 				use_right_ptr = true;
379 			} else {
380 				crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
381 								 crc_l.csum,
382 								 crc_r.csum,
383 								 crc_r.uncompressed_size << 9);
384 
385 				crc_l.uncompressed_size	+= crc_r.uncompressed_size;
386 				crc_l.compressed_size	+= crc_r.compressed_size;
387 				bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
388 						     extent_entry_type(en_l));
389 			}
390 		}
391 
392 		en_l = extent_entry_next(en_l);
393 		en_r = extent_entry_next(en_r);
394 	}
395 
396 	bch2_key_resize(l.k, l.k->size + r.k->size);
397 	return true;
398 }
399 
400 /* KEY_TYPE_reservation: */
401 
bch2_reservation_invalid(struct bch_fs * c,struct bkey_s_c k,enum bch_validate_flags flags,struct printbuf * err)402 int bch2_reservation_invalid(struct bch_fs *c, struct bkey_s_c k,
403 			     enum bch_validate_flags flags,
404 			     struct printbuf *err)
405 {
406 	struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
407 	int ret = 0;
408 
409 	bkey_fsck_err_on(!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX, c, err,
410 			 reservation_key_nr_replicas_invalid,
411 			 "invalid nr_replicas (%u)", r.v->nr_replicas);
412 fsck_err:
413 	return ret;
414 }
415 
bch2_reservation_to_text(struct printbuf * out,struct bch_fs * c,struct bkey_s_c k)416 void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
417 			      struct bkey_s_c k)
418 {
419 	struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
420 
421 	prt_printf(out, "generation %u replicas %u",
422 	       le32_to_cpu(r.v->generation),
423 	       r.v->nr_replicas);
424 }
425 
bch2_reservation_merge(struct bch_fs * c,struct bkey_s _l,struct bkey_s_c _r)426 bool bch2_reservation_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
427 {
428 	struct bkey_s_reservation l = bkey_s_to_reservation(_l);
429 	struct bkey_s_c_reservation r = bkey_s_c_to_reservation(_r);
430 
431 	if (l.v->generation != r.v->generation ||
432 	    l.v->nr_replicas != r.v->nr_replicas)
433 		return false;
434 
435 	bch2_key_resize(l.k, l.k->size + r.k->size);
436 	return true;
437 }
438 
439 /* Extent checksum entries: */
440 
441 /* returns true if not equal */
bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,struct bch_extent_crc_unpacked r)442 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
443 					 struct bch_extent_crc_unpacked r)
444 {
445 	return (l.csum_type		!= r.csum_type ||
446 		l.compression_type	!= r.compression_type ||
447 		l.compressed_size	!= r.compressed_size ||
448 		l.uncompressed_size	!= r.uncompressed_size ||
449 		l.offset		!= r.offset ||
450 		l.live_size		!= r.live_size ||
451 		l.nonce			!= r.nonce ||
452 		bch2_crc_cmp(l.csum, r.csum));
453 }
454 
can_narrow_crc(struct bch_extent_crc_unpacked u,struct bch_extent_crc_unpacked n)455 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
456 				  struct bch_extent_crc_unpacked n)
457 {
458 	return !crc_is_compressed(u) &&
459 		u.csum_type &&
460 		u.uncompressed_size > u.live_size &&
461 		bch2_csum_type_is_encryption(u.csum_type) ==
462 		bch2_csum_type_is_encryption(n.csum_type);
463 }
464 
bch2_can_narrow_extent_crcs(struct bkey_s_c k,struct bch_extent_crc_unpacked n)465 bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
466 				 struct bch_extent_crc_unpacked n)
467 {
468 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
469 	struct bch_extent_crc_unpacked crc;
470 	const union bch_extent_entry *i;
471 
472 	if (!n.csum_type)
473 		return false;
474 
475 	bkey_for_each_crc(k.k, ptrs, crc, i)
476 		if (can_narrow_crc(crc, n))
477 			return true;
478 
479 	return false;
480 }
481 
482 /*
483  * We're writing another replica for this extent, so while we've got the data in
484  * memory we'll be computing a new checksum for the currently live data.
485  *
486  * If there are other replicas we aren't moving, and they are checksummed but
487  * not compressed, we can modify them to point to only the data that is
488  * currently live (so that readers won't have to bounce) while we've got the
489  * checksum we need:
490  */
bch2_bkey_narrow_crcs(struct bkey_i * k,struct bch_extent_crc_unpacked n)491 bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
492 {
493 	struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
494 	struct bch_extent_crc_unpacked u;
495 	struct extent_ptr_decoded p;
496 	union bch_extent_entry *i;
497 	bool ret = false;
498 
499 	/* Find a checksum entry that covers only live data: */
500 	if (!n.csum_type) {
501 		bkey_for_each_crc(&k->k, ptrs, u, i)
502 			if (!crc_is_compressed(u) &&
503 			    u.csum_type &&
504 			    u.live_size == u.uncompressed_size) {
505 				n = u;
506 				goto found;
507 			}
508 		return false;
509 	}
510 found:
511 	BUG_ON(crc_is_compressed(n));
512 	BUG_ON(n.offset);
513 	BUG_ON(n.live_size != k->k.size);
514 
515 restart_narrow_pointers:
516 	ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
517 
518 	bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
519 		if (can_narrow_crc(p.crc, n)) {
520 			bch2_bkey_drop_ptr_noerror(bkey_i_to_s(k), &i->ptr);
521 			p.ptr.offset += p.crc.offset;
522 			p.crc = n;
523 			bch2_extent_ptr_decoded_append(k, &p);
524 			ret = true;
525 			goto restart_narrow_pointers;
526 		}
527 
528 	return ret;
529 }
530 
bch2_extent_crc_pack(union bch_extent_crc * dst,struct bch_extent_crc_unpacked src,enum bch_extent_entry_type type)531 static void bch2_extent_crc_pack(union bch_extent_crc *dst,
532 				 struct bch_extent_crc_unpacked src,
533 				 enum bch_extent_entry_type type)
534 {
535 #define set_common_fields(_dst, _src)					\
536 		_dst.type		= 1 << type;			\
537 		_dst.csum_type		= _src.csum_type,		\
538 		_dst.compression_type	= _src.compression_type,	\
539 		_dst._compressed_size	= _src.compressed_size - 1,	\
540 		_dst._uncompressed_size	= _src.uncompressed_size - 1,	\
541 		_dst.offset		= _src.offset
542 
543 	switch (type) {
544 	case BCH_EXTENT_ENTRY_crc32:
545 		set_common_fields(dst->crc32, src);
546 		dst->crc32.csum		= (u32 __force) *((__le32 *) &src.csum.lo);
547 		break;
548 	case BCH_EXTENT_ENTRY_crc64:
549 		set_common_fields(dst->crc64, src);
550 		dst->crc64.nonce	= src.nonce;
551 		dst->crc64.csum_lo	= (u64 __force) src.csum.lo;
552 		dst->crc64.csum_hi	= (u64 __force) *((__le16 *) &src.csum.hi);
553 		break;
554 	case BCH_EXTENT_ENTRY_crc128:
555 		set_common_fields(dst->crc128, src);
556 		dst->crc128.nonce	= src.nonce;
557 		dst->crc128.csum	= src.csum;
558 		break;
559 	default:
560 		BUG();
561 	}
562 #undef set_common_fields
563 }
564 
bch2_extent_crc_append(struct bkey_i * k,struct bch_extent_crc_unpacked new)565 void bch2_extent_crc_append(struct bkey_i *k,
566 			    struct bch_extent_crc_unpacked new)
567 {
568 	struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
569 	union bch_extent_crc *crc = (void *) ptrs.end;
570 	enum bch_extent_entry_type type;
571 
572 	if (bch_crc_bytes[new.csum_type]	<= 4 &&
573 	    new.uncompressed_size		<= CRC32_SIZE_MAX &&
574 	    new.nonce				<= CRC32_NONCE_MAX)
575 		type = BCH_EXTENT_ENTRY_crc32;
576 	else if (bch_crc_bytes[new.csum_type]	<= 10 &&
577 		   new.uncompressed_size	<= CRC64_SIZE_MAX &&
578 		   new.nonce			<= CRC64_NONCE_MAX)
579 		type = BCH_EXTENT_ENTRY_crc64;
580 	else if (bch_crc_bytes[new.csum_type]	<= 16 &&
581 		   new.uncompressed_size	<= CRC128_SIZE_MAX &&
582 		   new.nonce			<= CRC128_NONCE_MAX)
583 		type = BCH_EXTENT_ENTRY_crc128;
584 	else
585 		BUG();
586 
587 	bch2_extent_crc_pack(crc, new, type);
588 
589 	k->k.u64s += extent_entry_u64s(ptrs.end);
590 
591 	EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
592 }
593 
594 /* Generic code for keys with pointers: */
595 
bch2_bkey_nr_ptrs(struct bkey_s_c k)596 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
597 {
598 	return bch2_bkey_devs(k).nr;
599 }
600 
bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)601 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
602 {
603 	return k.k->type == KEY_TYPE_reservation
604 		? bkey_s_c_to_reservation(k).v->nr_replicas
605 		: bch2_bkey_dirty_devs(k).nr;
606 }
607 
bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)608 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
609 {
610 	unsigned ret = 0;
611 
612 	if (k.k->type == KEY_TYPE_reservation) {
613 		ret = bkey_s_c_to_reservation(k).v->nr_replicas;
614 	} else {
615 		struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
616 		const union bch_extent_entry *entry;
617 		struct extent_ptr_decoded p;
618 
619 		bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
620 			ret += !p.ptr.cached && !crc_is_compressed(p.crc);
621 	}
622 
623 	return ret;
624 }
625 
bch2_bkey_sectors_compressed(struct bkey_s_c k)626 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
627 {
628 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
629 	const union bch_extent_entry *entry;
630 	struct extent_ptr_decoded p;
631 	unsigned ret = 0;
632 
633 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
634 		if (!p.ptr.cached && crc_is_compressed(p.crc))
635 			ret += p.crc.compressed_size;
636 
637 	return ret;
638 }
639 
bch2_bkey_is_incompressible(struct bkey_s_c k)640 bool bch2_bkey_is_incompressible(struct bkey_s_c k)
641 {
642 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
643 	const union bch_extent_entry *entry;
644 	struct bch_extent_crc_unpacked crc;
645 
646 	bkey_for_each_crc(k.k, ptrs, crc, entry)
647 		if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
648 			return true;
649 	return false;
650 }
651 
bch2_bkey_replicas(struct bch_fs * c,struct bkey_s_c k)652 unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
653 {
654 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
655 	const union bch_extent_entry *entry;
656 	struct extent_ptr_decoded p = { 0 };
657 	unsigned replicas = 0;
658 
659 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
660 		if (p.ptr.cached)
661 			continue;
662 
663 		if (p.has_ec)
664 			replicas += p.ec.redundancy;
665 
666 		replicas++;
667 
668 	}
669 
670 	return replicas;
671 }
672 
__extent_ptr_durability(struct bch_dev * ca,struct extent_ptr_decoded * p)673 static inline unsigned __extent_ptr_durability(struct bch_dev *ca, struct extent_ptr_decoded *p)
674 {
675 	if (p->ptr.cached)
676 		return 0;
677 
678 	return p->has_ec
679 		? p->ec.redundancy + 1
680 		: ca->mi.durability;
681 }
682 
bch2_extent_ptr_desired_durability(struct bch_fs * c,struct extent_ptr_decoded * p)683 unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
684 {
685 	struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev);
686 
687 	return ca ? __extent_ptr_durability(ca, p) : 0;
688 }
689 
bch2_extent_ptr_durability(struct bch_fs * c,struct extent_ptr_decoded * p)690 unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
691 {
692 	struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev);
693 
694 	if (!ca || ca->mi.state == BCH_MEMBER_STATE_failed)
695 		return 0;
696 
697 	return __extent_ptr_durability(ca, p);
698 }
699 
bch2_bkey_durability(struct bch_fs * c,struct bkey_s_c k)700 unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
701 {
702 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
703 	const union bch_extent_entry *entry;
704 	struct extent_ptr_decoded p;
705 	unsigned durability = 0;
706 
707 	rcu_read_lock();
708 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
709 		durability += bch2_extent_ptr_durability(c, &p);
710 	rcu_read_unlock();
711 
712 	return durability;
713 }
714 
bch2_bkey_durability_safe(struct bch_fs * c,struct bkey_s_c k)715 static unsigned bch2_bkey_durability_safe(struct bch_fs *c, struct bkey_s_c k)
716 {
717 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
718 	const union bch_extent_entry *entry;
719 	struct extent_ptr_decoded p;
720 	unsigned durability = 0;
721 
722 	rcu_read_lock();
723 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
724 		if (p.ptr.dev < c->sb.nr_devices && c->devs[p.ptr.dev])
725 			durability += bch2_extent_ptr_durability(c, &p);
726 	rcu_read_unlock();
727 
728 	return durability;
729 }
730 
bch2_bkey_extent_entry_drop(struct bkey_i * k,union bch_extent_entry * entry)731 void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry)
732 {
733 	union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
734 	union bch_extent_entry *next = extent_entry_next(entry);
735 
736 	memmove_u64s(entry, next, (u64 *) end - (u64 *) next);
737 	k->k.u64s -= extent_entry_u64s(entry);
738 }
739 
bch2_extent_ptr_decoded_append(struct bkey_i * k,struct extent_ptr_decoded * p)740 void bch2_extent_ptr_decoded_append(struct bkey_i *k,
741 				    struct extent_ptr_decoded *p)
742 {
743 	struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
744 	struct bch_extent_crc_unpacked crc =
745 		bch2_extent_crc_unpack(&k->k, NULL);
746 	union bch_extent_entry *pos;
747 
748 	if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
749 		pos = ptrs.start;
750 		goto found;
751 	}
752 
753 	bkey_for_each_crc(&k->k, ptrs, crc, pos)
754 		if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
755 			pos = extent_entry_next(pos);
756 			goto found;
757 		}
758 
759 	bch2_extent_crc_append(k, p->crc);
760 	pos = bkey_val_end(bkey_i_to_s(k));
761 found:
762 	p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
763 	__extent_entry_insert(k, pos, to_entry(&p->ptr));
764 
765 	if (p->has_ec) {
766 		p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
767 		__extent_entry_insert(k, pos, to_entry(&p->ec));
768 	}
769 }
770 
extent_entry_prev(struct bkey_ptrs ptrs,union bch_extent_entry * entry)771 static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
772 					  union bch_extent_entry *entry)
773 {
774 	union bch_extent_entry *i = ptrs.start;
775 
776 	if (i == entry)
777 		return NULL;
778 
779 	while (extent_entry_next(i) != entry)
780 		i = extent_entry_next(i);
781 	return i;
782 }
783 
784 /*
785  * Returns pointer to the next entry after the one being dropped:
786  */
bch2_bkey_drop_ptr_noerror(struct bkey_s k,struct bch_extent_ptr * ptr)787 union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s k,
788 						   struct bch_extent_ptr *ptr)
789 {
790 	struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
791 	union bch_extent_entry *entry = to_entry(ptr), *next;
792 	union bch_extent_entry *ret = entry;
793 	bool drop_crc = true;
794 
795 	EBUG_ON(ptr < &ptrs.start->ptr ||
796 		ptr >= &ptrs.end->ptr);
797 	EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
798 
799 	for (next = extent_entry_next(entry);
800 	     next != ptrs.end;
801 	     next = extent_entry_next(next)) {
802 		if (extent_entry_is_crc(next)) {
803 			break;
804 		} else if (extent_entry_is_ptr(next)) {
805 			drop_crc = false;
806 			break;
807 		}
808 	}
809 
810 	extent_entry_drop(k, entry);
811 
812 	while ((entry = extent_entry_prev(ptrs, entry))) {
813 		if (extent_entry_is_ptr(entry))
814 			break;
815 
816 		if ((extent_entry_is_crc(entry) && drop_crc) ||
817 		    extent_entry_is_stripe_ptr(entry)) {
818 			ret = (void *) ret - extent_entry_bytes(entry);
819 			extent_entry_drop(k, entry);
820 		}
821 	}
822 
823 	return ret;
824 }
825 
bch2_bkey_drop_ptr(struct bkey_s k,struct bch_extent_ptr * ptr)826 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
827 					   struct bch_extent_ptr *ptr)
828 {
829 	bool have_dirty = bch2_bkey_dirty_devs(k.s_c).nr;
830 	union bch_extent_entry *ret =
831 		bch2_bkey_drop_ptr_noerror(k, ptr);
832 
833 	/*
834 	 * If we deleted all the dirty pointers and there's still cached
835 	 * pointers, we could set the cached pointers to dirty if they're not
836 	 * stale - but to do that correctly we'd need to grab an open_bucket
837 	 * reference so that we don't race with bucket reuse:
838 	 */
839 	if (have_dirty &&
840 	    !bch2_bkey_dirty_devs(k.s_c).nr) {
841 		k.k->type = KEY_TYPE_error;
842 		set_bkey_val_u64s(k.k, 0);
843 		ret = NULL;
844 	} else if (!bch2_bkey_nr_ptrs(k.s_c)) {
845 		k.k->type = KEY_TYPE_deleted;
846 		set_bkey_val_u64s(k.k, 0);
847 		ret = NULL;
848 	}
849 
850 	return ret;
851 }
852 
bch2_bkey_drop_device(struct bkey_s k,unsigned dev)853 void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
854 {
855 	bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
856 }
857 
bch2_bkey_drop_device_noerror(struct bkey_s k,unsigned dev)858 void bch2_bkey_drop_device_noerror(struct bkey_s k, unsigned dev)
859 {
860 	struct bch_extent_ptr *ptr = bch2_bkey_has_device(k, dev);
861 
862 	if (ptr)
863 		bch2_bkey_drop_ptr_noerror(k, ptr);
864 }
865 
bch2_bkey_has_device_c(struct bkey_s_c k,unsigned dev)866 const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned dev)
867 {
868 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
869 
870 	bkey_for_each_ptr(ptrs, ptr)
871 		if (ptr->dev == dev)
872 			return ptr;
873 
874 	return NULL;
875 }
876 
bch2_bkey_has_target(struct bch_fs * c,struct bkey_s_c k,unsigned target)877 bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
878 {
879 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
880 	struct bch_dev *ca;
881 	bool ret = false;
882 
883 	rcu_read_lock();
884 	bkey_for_each_ptr(ptrs, ptr)
885 		if (bch2_dev_in_target(c, ptr->dev, target) &&
886 		    (ca = bch2_dev_rcu(c, ptr->dev)) &&
887 		    (!ptr->cached ||
888 		     !dev_ptr_stale_rcu(ca, ptr))) {
889 			ret = true;
890 			break;
891 		}
892 	rcu_read_unlock();
893 
894 	return ret;
895 }
896 
bch2_bkey_matches_ptr(struct bch_fs * c,struct bkey_s_c k,struct bch_extent_ptr m,u64 offset)897 bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
898 			   struct bch_extent_ptr m, u64 offset)
899 {
900 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
901 	const union bch_extent_entry *entry;
902 	struct extent_ptr_decoded p;
903 
904 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
905 		if (p.ptr.dev	== m.dev &&
906 		    p.ptr.gen	== m.gen &&
907 		    (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
908 		    (s64) m.offset  - offset)
909 			return true;
910 
911 	return false;
912 }
913 
914 /*
915  * Returns true if two extents refer to the same data:
916  */
bch2_extents_match(struct bkey_s_c k1,struct bkey_s_c k2)917 bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2)
918 {
919 	if (k1.k->type != k2.k->type)
920 		return false;
921 
922 	if (bkey_extent_is_direct_data(k1.k)) {
923 		struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1);
924 		struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2);
925 		const union bch_extent_entry *entry1, *entry2;
926 		struct extent_ptr_decoded p1, p2;
927 
928 		if (bkey_extent_is_unwritten(k1) != bkey_extent_is_unwritten(k2))
929 			return false;
930 
931 		bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1)
932 			bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
933 				if (p1.ptr.dev		== p2.ptr.dev &&
934 				    p1.ptr.gen		== p2.ptr.gen &&
935 				    (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
936 				    (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
937 					return true;
938 
939 		return false;
940 	} else {
941 		/* KEY_TYPE_deleted, etc. */
942 		return true;
943 	}
944 }
945 
946 struct bch_extent_ptr *
bch2_extent_has_ptr(struct bkey_s_c k1,struct extent_ptr_decoded p1,struct bkey_s k2)947 bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1, struct bkey_s k2)
948 {
949 	struct bkey_ptrs ptrs2 = bch2_bkey_ptrs(k2);
950 	union bch_extent_entry *entry2;
951 	struct extent_ptr_decoded p2;
952 
953 	bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
954 		if (p1.ptr.dev		== p2.ptr.dev &&
955 		    p1.ptr.gen		== p2.ptr.gen &&
956 		    (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
957 		    (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
958 			return &entry2->ptr;
959 
960 	return NULL;
961 }
962 
bch2_extent_ptr_set_cached(struct bkey_s k,struct bch_extent_ptr * ptr)963 void bch2_extent_ptr_set_cached(struct bkey_s k, struct bch_extent_ptr *ptr)
964 {
965 	struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
966 	union bch_extent_entry *entry;
967 	union bch_extent_entry *ec = NULL;
968 
969 	bkey_extent_entry_for_each(ptrs, entry) {
970 		if (&entry->ptr == ptr) {
971 			ptr->cached = true;
972 			if (ec)
973 				extent_entry_drop(k, ec);
974 			return;
975 		}
976 
977 		if (extent_entry_is_stripe_ptr(entry))
978 			ec = entry;
979 		else if (extent_entry_is_ptr(entry))
980 			ec = NULL;
981 	}
982 
983 	BUG();
984 }
985 
986 /*
987  * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
988  *
989  * Returns true if @k should be dropped entirely
990  *
991  * For existing keys, only called when btree nodes are being rewritten, not when
992  * they're merely being compacted/resorted in memory.
993  */
bch2_extent_normalize(struct bch_fs * c,struct bkey_s k)994 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
995 {
996 	struct bch_dev *ca;
997 
998 	rcu_read_lock();
999 	bch2_bkey_drop_ptrs(k, ptr,
1000 		ptr->cached &&
1001 		(ca = bch2_dev_rcu(c, ptr->dev)) &&
1002 		dev_ptr_stale_rcu(ca, ptr) > 0);
1003 	rcu_read_unlock();
1004 
1005 	return bkey_deleted(k.k);
1006 }
1007 
bch2_extent_ptr_to_text(struct printbuf * out,struct bch_fs * c,const struct bch_extent_ptr * ptr)1008 void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struct bch_extent_ptr *ptr)
1009 {
1010 	out->atomic++;
1011 	rcu_read_lock();
1012 	struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
1013 	if (!ca) {
1014 		prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
1015 			   (u64) ptr->offset, ptr->gen,
1016 			   ptr->cached ? " cached" : "");
1017 	} else {
1018 		u32 offset;
1019 		u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
1020 
1021 		prt_printf(out, "ptr: %u:%llu:%u gen %u",
1022 			   ptr->dev, b, offset, ptr->gen);
1023 		if (ptr->cached)
1024 			prt_str(out, " cached");
1025 		if (ptr->unwritten)
1026 			prt_str(out, " unwritten");
1027 		int stale = dev_ptr_stale_rcu(ca, ptr);
1028 		if (stale > 0)
1029 			prt_printf(out, " stale");
1030 		else if (stale)
1031 			prt_printf(out, " invalid");
1032 	}
1033 	rcu_read_unlock();
1034 	--out->atomic;
1035 }
1036 
bch2_bkey_ptrs_to_text(struct printbuf * out,struct bch_fs * c,struct bkey_s_c k)1037 void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
1038 			    struct bkey_s_c k)
1039 {
1040 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1041 	const union bch_extent_entry *entry;
1042 	bool first = true;
1043 
1044 	if (c)
1045 		prt_printf(out, "durability: %u ", bch2_bkey_durability_safe(c, k));
1046 
1047 	bkey_extent_entry_for_each(ptrs, entry) {
1048 		if (!first)
1049 			prt_printf(out, " ");
1050 
1051 		switch (__extent_entry_type(entry)) {
1052 		case BCH_EXTENT_ENTRY_ptr:
1053 			bch2_extent_ptr_to_text(out, c, entry_to_ptr(entry));
1054 			break;
1055 
1056 		case BCH_EXTENT_ENTRY_crc32:
1057 		case BCH_EXTENT_ENTRY_crc64:
1058 		case BCH_EXTENT_ENTRY_crc128: {
1059 			struct bch_extent_crc_unpacked crc =
1060 				bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1061 
1062 			prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum ",
1063 			       crc.compressed_size,
1064 			       crc.uncompressed_size,
1065 			       crc.offset, crc.nonce);
1066 			bch2_prt_csum_type(out, crc.csum_type);
1067 			prt_str(out, " compress ");
1068 			bch2_prt_compression_type(out, crc.compression_type);
1069 			break;
1070 		}
1071 		case BCH_EXTENT_ENTRY_stripe_ptr: {
1072 			const struct bch_extent_stripe_ptr *ec = &entry->stripe_ptr;
1073 
1074 			prt_printf(out, "ec: idx %llu block %u",
1075 			       (u64) ec->idx, ec->block);
1076 			break;
1077 		}
1078 		case BCH_EXTENT_ENTRY_rebalance: {
1079 			const struct bch_extent_rebalance *r = &entry->rebalance;
1080 
1081 			prt_str(out, "rebalance: target ");
1082 			if (c)
1083 				bch2_target_to_text(out, c, r->target);
1084 			else
1085 				prt_printf(out, "%u", r->target);
1086 			prt_str(out, " compression ");
1087 			bch2_compression_opt_to_text(out, r->compression);
1088 			break;
1089 		}
1090 		default:
1091 			prt_printf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
1092 			return;
1093 		}
1094 
1095 		first = false;
1096 	}
1097 }
1098 
extent_ptr_invalid(struct bch_fs * c,struct bkey_s_c k,enum bch_validate_flags flags,const struct bch_extent_ptr * ptr,unsigned size_ondisk,bool metadata,struct printbuf * err)1099 static int extent_ptr_invalid(struct bch_fs *c,
1100 			      struct bkey_s_c k,
1101 			      enum bch_validate_flags flags,
1102 			      const struct bch_extent_ptr *ptr,
1103 			      unsigned size_ondisk,
1104 			      bool metadata,
1105 			      struct printbuf *err)
1106 {
1107 	int ret = 0;
1108 
1109 	rcu_read_lock();
1110 	struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
1111 	if (!ca) {
1112 		rcu_read_unlock();
1113 		return 0;
1114 	}
1115 	u32 bucket_offset;
1116 	u64 bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset);
1117 	unsigned first_bucket	= ca->mi.first_bucket;
1118 	u64 nbuckets		= ca->mi.nbuckets;
1119 	unsigned bucket_size	= ca->mi.bucket_size;
1120 	rcu_read_unlock();
1121 
1122 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1123 	bkey_for_each_ptr(ptrs, ptr2)
1124 		bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev, c, err,
1125 				 ptr_to_duplicate_device,
1126 				 "multiple pointers to same device (%u)", ptr->dev);
1127 
1128 
1129 	bkey_fsck_err_on(bucket >= nbuckets, c, err,
1130 			 ptr_after_last_bucket,
1131 			 "pointer past last bucket (%llu > %llu)", bucket, nbuckets);
1132 	bkey_fsck_err_on(bucket < first_bucket, c, err,
1133 			 ptr_before_first_bucket,
1134 			 "pointer before first bucket (%llu < %u)", bucket, first_bucket);
1135 	bkey_fsck_err_on(bucket_offset + size_ondisk > bucket_size, c, err,
1136 			 ptr_spans_multiple_buckets,
1137 			 "pointer spans multiple buckets (%u + %u > %u)",
1138 		       bucket_offset, size_ondisk, bucket_size);
1139 fsck_err:
1140 	return ret;
1141 }
1142 
bch2_bkey_ptrs_invalid(struct bch_fs * c,struct bkey_s_c k,enum bch_validate_flags flags,struct printbuf * err)1143 int bch2_bkey_ptrs_invalid(struct bch_fs *c, struct bkey_s_c k,
1144 			   enum bch_validate_flags flags,
1145 			   struct printbuf *err)
1146 {
1147 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1148 	const union bch_extent_entry *entry;
1149 	struct bch_extent_crc_unpacked crc;
1150 	unsigned size_ondisk = k.k->size;
1151 	unsigned nonce = UINT_MAX;
1152 	unsigned nr_ptrs = 0;
1153 	bool have_written = false, have_unwritten = false, have_ec = false, crc_since_last_ptr = false;
1154 	int ret = 0;
1155 
1156 	if (bkey_is_btree_ptr(k.k))
1157 		size_ondisk = btree_sectors(c);
1158 
1159 	bkey_extent_entry_for_each(ptrs, entry) {
1160 		bkey_fsck_err_on(__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX, c, err,
1161 			extent_ptrs_invalid_entry,
1162 			"invalid extent entry type (got %u, max %u)",
1163 			__extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX);
1164 
1165 		bkey_fsck_err_on(bkey_is_btree_ptr(k.k) &&
1166 				 !extent_entry_is_ptr(entry), c, err,
1167 				 btree_ptr_has_non_ptr,
1168 				 "has non ptr field");
1169 
1170 		switch (extent_entry_type(entry)) {
1171 		case BCH_EXTENT_ENTRY_ptr:
1172 			ret = extent_ptr_invalid(c, k, flags, &entry->ptr,
1173 						 size_ondisk, false, err);
1174 			if (ret)
1175 				return ret;
1176 
1177 			bkey_fsck_err_on(entry->ptr.cached && have_ec, c, err,
1178 					 ptr_cached_and_erasure_coded,
1179 					 "cached, erasure coded ptr");
1180 
1181 			if (!entry->ptr.unwritten)
1182 				have_written = true;
1183 			else
1184 				have_unwritten = true;
1185 
1186 			have_ec = false;
1187 			crc_since_last_ptr = false;
1188 			nr_ptrs++;
1189 			break;
1190 		case BCH_EXTENT_ENTRY_crc32:
1191 		case BCH_EXTENT_ENTRY_crc64:
1192 		case BCH_EXTENT_ENTRY_crc128:
1193 			crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1194 
1195 			bkey_fsck_err_on(crc.offset + crc.live_size > crc.uncompressed_size, c, err,
1196 					 ptr_crc_uncompressed_size_too_small,
1197 					 "checksum offset + key size > uncompressed size");
1198 			bkey_fsck_err_on(!bch2_checksum_type_valid(c, crc.csum_type), c, err,
1199 					 ptr_crc_csum_type_unknown,
1200 					 "invalid checksum type");
1201 			bkey_fsck_err_on(crc.compression_type >= BCH_COMPRESSION_TYPE_NR, c, err,
1202 					 ptr_crc_compression_type_unknown,
1203 					 "invalid compression type");
1204 
1205 			if (bch2_csum_type_is_encryption(crc.csum_type)) {
1206 				if (nonce == UINT_MAX)
1207 					nonce = crc.offset + crc.nonce;
1208 				else if (nonce != crc.offset + crc.nonce)
1209 					bkey_fsck_err(c, err, ptr_crc_nonce_mismatch,
1210 						      "incorrect nonce");
1211 			}
1212 
1213 			bkey_fsck_err_on(crc_since_last_ptr, c, err,
1214 					 ptr_crc_redundant,
1215 					 "redundant crc entry");
1216 			crc_since_last_ptr = true;
1217 
1218 			bkey_fsck_err_on(crc_is_encoded(crc) &&
1219 					 (crc.uncompressed_size > c->opts.encoded_extent_max >> 9) &&
1220 					 (flags & (BCH_VALIDATE_write|BCH_VALIDATE_commit)), c, err,
1221 					 ptr_crc_uncompressed_size_too_big,
1222 					 "too large encoded extent");
1223 
1224 			size_ondisk = crc.compressed_size;
1225 			break;
1226 		case BCH_EXTENT_ENTRY_stripe_ptr:
1227 			bkey_fsck_err_on(have_ec, c, err,
1228 					 ptr_stripe_redundant,
1229 					 "redundant stripe entry");
1230 			have_ec = true;
1231 			break;
1232 		case BCH_EXTENT_ENTRY_rebalance: {
1233 			const struct bch_extent_rebalance *r = &entry->rebalance;
1234 
1235 			if (!bch2_compression_opt_valid(r->compression)) {
1236 				struct bch_compression_opt opt = __bch2_compression_decode(r->compression);
1237 				prt_printf(err, "invalid compression opt %u:%u",
1238 					   opt.type, opt.level);
1239 				return -BCH_ERR_invalid_bkey;
1240 			}
1241 			break;
1242 		}
1243 		}
1244 	}
1245 
1246 	bkey_fsck_err_on(!nr_ptrs, c, err,
1247 			 extent_ptrs_no_ptrs,
1248 			 "no ptrs");
1249 	bkey_fsck_err_on(nr_ptrs > BCH_BKEY_PTRS_MAX, c, err,
1250 			 extent_ptrs_too_many_ptrs,
1251 			 "too many ptrs: %u > %u", nr_ptrs, BCH_BKEY_PTRS_MAX);
1252 	bkey_fsck_err_on(have_written && have_unwritten, c, err,
1253 			 extent_ptrs_written_and_unwritten,
1254 			 "extent with unwritten and written ptrs");
1255 	bkey_fsck_err_on(k.k->type != KEY_TYPE_extent && have_unwritten, c, err,
1256 			 extent_ptrs_unwritten,
1257 			 "has unwritten ptrs");
1258 	bkey_fsck_err_on(crc_since_last_ptr, c, err,
1259 			 extent_ptrs_redundant_crc,
1260 			 "redundant crc entry");
1261 	bkey_fsck_err_on(have_ec, c, err,
1262 			 extent_ptrs_redundant_stripe,
1263 			 "redundant stripe entry");
1264 fsck_err:
1265 	return ret;
1266 }
1267 
bch2_ptr_swab(struct bkey_s k)1268 void bch2_ptr_swab(struct bkey_s k)
1269 {
1270 	struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1271 	union bch_extent_entry *entry;
1272 	u64 *d;
1273 
1274 	for (d =  (u64 *) ptrs.start;
1275 	     d != (u64 *) ptrs.end;
1276 	     d++)
1277 		*d = swab64(*d);
1278 
1279 	for (entry = ptrs.start;
1280 	     entry < ptrs.end;
1281 	     entry = extent_entry_next(entry)) {
1282 		switch (extent_entry_type(entry)) {
1283 		case BCH_EXTENT_ENTRY_ptr:
1284 			break;
1285 		case BCH_EXTENT_ENTRY_crc32:
1286 			entry->crc32.csum = swab32(entry->crc32.csum);
1287 			break;
1288 		case BCH_EXTENT_ENTRY_crc64:
1289 			entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
1290 			entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
1291 			break;
1292 		case BCH_EXTENT_ENTRY_crc128:
1293 			entry->crc128.csum.hi = (__force __le64)
1294 				swab64((__force u64) entry->crc128.csum.hi);
1295 			entry->crc128.csum.lo = (__force __le64)
1296 				swab64((__force u64) entry->crc128.csum.lo);
1297 			break;
1298 		case BCH_EXTENT_ENTRY_stripe_ptr:
1299 			break;
1300 		case BCH_EXTENT_ENTRY_rebalance:
1301 			break;
1302 		}
1303 	}
1304 }
1305 
bch2_bkey_rebalance_opts(struct bkey_s_c k)1306 const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k)
1307 {
1308 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1309 	const union bch_extent_entry *entry;
1310 
1311 	bkey_extent_entry_for_each(ptrs, entry)
1312 		if (__extent_entry_type(entry) == BCH_EXTENT_ENTRY_rebalance)
1313 			return &entry->rebalance;
1314 
1315 	return NULL;
1316 }
1317 
bch2_bkey_ptrs_need_rebalance(struct bch_fs * c,struct bkey_s_c k,unsigned target,unsigned compression)1318 unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c, struct bkey_s_c k,
1319 				       unsigned target, unsigned compression)
1320 {
1321 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1322 	unsigned rewrite_ptrs = 0;
1323 
1324 	if (compression) {
1325 		unsigned compression_type = bch2_compression_opt_to_type(compression);
1326 		const union bch_extent_entry *entry;
1327 		struct extent_ptr_decoded p;
1328 		unsigned i = 0;
1329 
1330 		bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1331 			if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
1332 			    p.ptr.unwritten) {
1333 				rewrite_ptrs = 0;
1334 				goto incompressible;
1335 			}
1336 
1337 			if (!p.ptr.cached && p.crc.compression_type != compression_type)
1338 				rewrite_ptrs |= 1U << i;
1339 			i++;
1340 		}
1341 	}
1342 incompressible:
1343 	if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) {
1344 		unsigned i = 0;
1345 
1346 		bkey_for_each_ptr(ptrs, ptr) {
1347 			if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, target))
1348 				rewrite_ptrs |= 1U << i;
1349 			i++;
1350 		}
1351 	}
1352 
1353 	return rewrite_ptrs;
1354 }
1355 
bch2_bkey_needs_rebalance(struct bch_fs * c,struct bkey_s_c k)1356 bool bch2_bkey_needs_rebalance(struct bch_fs *c, struct bkey_s_c k)
1357 {
1358 	const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k);
1359 
1360 	/*
1361 	 * If it's an indirect extent, we don't delete the rebalance entry when
1362 	 * done so that we know what options were applied - check if it still
1363 	 * needs work done:
1364 	 */
1365 	if (r &&
1366 	    k.k->type == KEY_TYPE_reflink_v &&
1367 	    !bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression))
1368 		r = NULL;
1369 
1370 	return r != NULL;
1371 }
1372 
bch2_bkey_set_needs_rebalance(struct bch_fs * c,struct bkey_i * _k,struct bch_io_opts * opts)1373 int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bkey_i *_k,
1374 				  struct bch_io_opts *opts)
1375 {
1376 	struct bkey_s k = bkey_i_to_s(_k);
1377 	struct bch_extent_rebalance *r;
1378 	unsigned target = opts->background_target;
1379 	unsigned compression = background_compression(*opts);
1380 	bool needs_rebalance;
1381 
1382 	if (!bkey_extent_is_direct_data(k.k))
1383 		return 0;
1384 
1385 	/* get existing rebalance entry: */
1386 	r = (struct bch_extent_rebalance *) bch2_bkey_rebalance_opts(k.s_c);
1387 	if (r) {
1388 		if (k.k->type == KEY_TYPE_reflink_v) {
1389 			/*
1390 			 * indirect extents: existing options take precedence,
1391 			 * so that we don't move extents back and forth if
1392 			 * they're referenced by different inodes with different
1393 			 * options:
1394 			 */
1395 			if (r->target)
1396 				target = r->target;
1397 			if (r->compression)
1398 				compression = r->compression;
1399 		}
1400 
1401 		r->target	= target;
1402 		r->compression	= compression;
1403 	}
1404 
1405 	needs_rebalance = bch2_bkey_ptrs_need_rebalance(c, k.s_c, target, compression);
1406 
1407 	if (needs_rebalance && !r) {
1408 		union bch_extent_entry *new = bkey_val_end(k);
1409 
1410 		new->rebalance.type		= 1U << BCH_EXTENT_ENTRY_rebalance;
1411 		new->rebalance.compression	= compression;
1412 		new->rebalance.target		= target;
1413 		new->rebalance.unused		= 0;
1414 		k.k->u64s += extent_entry_u64s(new);
1415 	} else if (!needs_rebalance && r && k.k->type != KEY_TYPE_reflink_v) {
1416 		/*
1417 		 * For indirect extents, don't delete the rebalance entry when
1418 		 * we're finished so that we know we specifically moved it or
1419 		 * compressed it to its current location/compression type
1420 		 */
1421 		extent_entry_drop(k, (union bch_extent_entry *) r);
1422 	}
1423 
1424 	return 0;
1425 }
1426 
1427 /* Generic extent code: */
1428 
bch2_cut_front_s(struct bpos where,struct bkey_s k)1429 int bch2_cut_front_s(struct bpos where, struct bkey_s k)
1430 {
1431 	unsigned new_val_u64s = bkey_val_u64s(k.k);
1432 	int val_u64s_delta;
1433 	u64 sub;
1434 
1435 	if (bkey_le(where, bkey_start_pos(k.k)))
1436 		return 0;
1437 
1438 	EBUG_ON(bkey_gt(where, k.k->p));
1439 
1440 	sub = where.offset - bkey_start_offset(k.k);
1441 
1442 	k.k->size -= sub;
1443 
1444 	if (!k.k->size) {
1445 		k.k->type = KEY_TYPE_deleted;
1446 		new_val_u64s = 0;
1447 	}
1448 
1449 	switch (k.k->type) {
1450 	case KEY_TYPE_extent:
1451 	case KEY_TYPE_reflink_v: {
1452 		struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1453 		union bch_extent_entry *entry;
1454 		bool seen_crc = false;
1455 
1456 		bkey_extent_entry_for_each(ptrs, entry) {
1457 			switch (extent_entry_type(entry)) {
1458 			case BCH_EXTENT_ENTRY_ptr:
1459 				if (!seen_crc)
1460 					entry->ptr.offset += sub;
1461 				break;
1462 			case BCH_EXTENT_ENTRY_crc32:
1463 				entry->crc32.offset += sub;
1464 				break;
1465 			case BCH_EXTENT_ENTRY_crc64:
1466 				entry->crc64.offset += sub;
1467 				break;
1468 			case BCH_EXTENT_ENTRY_crc128:
1469 				entry->crc128.offset += sub;
1470 				break;
1471 			case BCH_EXTENT_ENTRY_stripe_ptr:
1472 				break;
1473 			case BCH_EXTENT_ENTRY_rebalance:
1474 				break;
1475 			}
1476 
1477 			if (extent_entry_is_crc(entry))
1478 				seen_crc = true;
1479 		}
1480 
1481 		break;
1482 	}
1483 	case KEY_TYPE_reflink_p: {
1484 		struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
1485 
1486 		le64_add_cpu(&p.v->idx, sub);
1487 		break;
1488 	}
1489 	case KEY_TYPE_inline_data:
1490 	case KEY_TYPE_indirect_inline_data: {
1491 		void *p = bkey_inline_data_p(k);
1492 		unsigned bytes = bkey_inline_data_bytes(k.k);
1493 
1494 		sub = min_t(u64, sub << 9, bytes);
1495 
1496 		memmove(p, p + sub, bytes - sub);
1497 
1498 		new_val_u64s -= sub >> 3;
1499 		break;
1500 	}
1501 	}
1502 
1503 	val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1504 	BUG_ON(val_u64s_delta < 0);
1505 
1506 	set_bkey_val_u64s(k.k, new_val_u64s);
1507 	memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1508 	return -val_u64s_delta;
1509 }
1510 
bch2_cut_back_s(struct bpos where,struct bkey_s k)1511 int bch2_cut_back_s(struct bpos where, struct bkey_s k)
1512 {
1513 	unsigned new_val_u64s = bkey_val_u64s(k.k);
1514 	int val_u64s_delta;
1515 	u64 len = 0;
1516 
1517 	if (bkey_ge(where, k.k->p))
1518 		return 0;
1519 
1520 	EBUG_ON(bkey_lt(where, bkey_start_pos(k.k)));
1521 
1522 	len = where.offset - bkey_start_offset(k.k);
1523 
1524 	k.k->p.offset = where.offset;
1525 	k.k->size = len;
1526 
1527 	if (!len) {
1528 		k.k->type = KEY_TYPE_deleted;
1529 		new_val_u64s = 0;
1530 	}
1531 
1532 	switch (k.k->type) {
1533 	case KEY_TYPE_inline_data:
1534 	case KEY_TYPE_indirect_inline_data:
1535 		new_val_u64s = (bkey_inline_data_offset(k.k) +
1536 				min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3;
1537 		break;
1538 	}
1539 
1540 	val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1541 	BUG_ON(val_u64s_delta < 0);
1542 
1543 	set_bkey_val_u64s(k.k, new_val_u64s);
1544 	memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1545 	return -val_u64s_delta;
1546 }
1547