1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Some low level IO code, and hacks for various block layer limitations
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8 
9 #include "bcache.h"
10 #include "bset.h"
11 #include "debug.h"
12 
13 #include <linux/blkdev.h>
14 
15 /* Bios with headers */
16 
bch_bbio_free(struct bio * bio,struct cache_set * c)17 void bch_bbio_free(struct bio *bio, struct cache_set *c)
18 {
19 	struct bbio *b = container_of(bio, struct bbio, bio);
20 
21 	mempool_free(b, &c->bio_meta);
22 }
23 
bch_bbio_alloc(struct cache_set * c)24 struct bio *bch_bbio_alloc(struct cache_set *c)
25 {
26 	struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
27 	struct bio *bio = &b->bio;
28 
29 	bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb));
30 
31 	return bio;
32 }
33 
__bch_submit_bbio(struct bio * bio,struct cache_set * c)34 void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
35 {
36 	struct bbio *b = container_of(bio, struct bbio, bio);
37 
38 	bio->bi_iter.bi_sector	= PTR_OFFSET(&b->key, 0);
39 	bio_set_dev(bio, c->cache->bdev);
40 
41 	b->submit_time_us = local_clock_us();
42 	closure_bio_submit(c, bio, bio->bi_private);
43 }
44 
bch_submit_bbio(struct bio * bio,struct cache_set * c,struct bkey * k,unsigned int ptr)45 void bch_submit_bbio(struct bio *bio, struct cache_set *c,
46 		     struct bkey *k, unsigned int ptr)
47 {
48 	struct bbio *b = container_of(bio, struct bbio, bio);
49 
50 	bch_bkey_copy_single_ptr(&b->key, k, ptr);
51 	__bch_submit_bbio(bio, c);
52 }
53 
54 /* IO errors */
bch_count_backing_io_errors(struct cached_dev * dc,struct bio * bio)55 void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
56 {
57 	unsigned int errors;
58 
59 	WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
60 
61 	/*
62 	 * Read-ahead requests on a degrading and recovering md raid
63 	 * (e.g. raid6) device might be failured immediately by md
64 	 * raid code, which is not a real hardware media failure. So
65 	 * we shouldn't count failed REQ_RAHEAD bio to dc->io_errors.
66 	 */
67 	if (bio->bi_opf & REQ_RAHEAD) {
68 		pr_warn_ratelimited("%s: Read-ahead I/O failed on backing device, ignore\n",
69 				    dc->backing_dev_name);
70 		return;
71 	}
72 
73 	errors = atomic_add_return(1, &dc->io_errors);
74 	if (errors < dc->error_limit)
75 		pr_err("%s: IO error on backing device, unrecoverable\n",
76 			dc->backing_dev_name);
77 	else
78 		bch_cached_dev_error(dc);
79 }
80 
bch_count_io_errors(struct cache * ca,blk_status_t error,int is_read,const char * m)81 void bch_count_io_errors(struct cache *ca,
82 			 blk_status_t error,
83 			 int is_read,
84 			 const char *m)
85 {
86 	/*
87 	 * The halflife of an error is:
88 	 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
89 	 */
90 
91 	if (ca->set->error_decay) {
92 		unsigned int count = atomic_inc_return(&ca->io_count);
93 
94 		while (count > ca->set->error_decay) {
95 			unsigned int errors;
96 			unsigned int old = count;
97 			unsigned int new = count - ca->set->error_decay;
98 
99 			/*
100 			 * First we subtract refresh from count; each time we
101 			 * successfully do so, we rescale the errors once:
102 			 */
103 
104 			count = atomic_cmpxchg(&ca->io_count, old, new);
105 
106 			if (count == old) {
107 				count = new;
108 
109 				errors = atomic_read(&ca->io_errors);
110 				do {
111 					old = errors;
112 					new = ((uint64_t) errors * 127) / 128;
113 					errors = atomic_cmpxchg(&ca->io_errors,
114 								old, new);
115 				} while (old != errors);
116 			}
117 		}
118 	}
119 
120 	if (error) {
121 		unsigned int errors = atomic_add_return(1 << IO_ERROR_SHIFT,
122 						    &ca->io_errors);
123 		errors >>= IO_ERROR_SHIFT;
124 
125 		if (errors < ca->set->error_limit)
126 			pr_err("%s: IO error on %s%s\n",
127 			       ca->cache_dev_name, m,
128 			       is_read ? ", recovering." : ".");
129 		else
130 			bch_cache_set_error(ca->set,
131 					    "%s: too many IO errors %s\n",
132 					    ca->cache_dev_name, m);
133 	}
134 }
135 
bch_bbio_count_io_errors(struct cache_set * c,struct bio * bio,blk_status_t error,const char * m)136 void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
137 			      blk_status_t error, const char *m)
138 {
139 	struct bbio *b = container_of(bio, struct bbio, bio);
140 	struct cache *ca = c->cache;
141 	int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
142 
143 	unsigned int threshold = op_is_write(bio_op(bio))
144 		? c->congested_write_threshold_us
145 		: c->congested_read_threshold_us;
146 
147 	if (threshold) {
148 		unsigned int t = local_clock_us();
149 		int us = t - b->submit_time_us;
150 		int congested = atomic_read(&c->congested);
151 
152 		if (us > (int) threshold) {
153 			int ms = us / 1024;
154 
155 			c->congested_last_us = t;
156 
157 			ms = min(ms, CONGESTED_MAX + congested);
158 			atomic_sub(ms, &c->congested);
159 		} else if (congested < 0)
160 			atomic_inc(&c->congested);
161 	}
162 
163 	bch_count_io_errors(ca, error, is_read, m);
164 }
165 
bch_bbio_endio(struct cache_set * c,struct bio * bio,blk_status_t error,const char * m)166 void bch_bbio_endio(struct cache_set *c, struct bio *bio,
167 		    blk_status_t error, const char *m)
168 {
169 	struct closure *cl = bio->bi_private;
170 
171 	bch_bbio_count_io_errors(c, bio, error, m);
172 	bio_put(bio);
173 	closure_put(cl);
174 }
175