1 /*
2  * Copyright (C) 2011 Red Hat, Inc.
3  *
4  * This file is released under the GPL.
5  */
6 #include "dm-transaction-manager.h"
7 #include "dm-space-map.h"
8 #include "dm-space-map-disk.h"
9 #include "dm-space-map-metadata.h"
10 #include "dm-persistent-data-internal.h"
11 
12 #include <linux/export.h>
13 #include <linux/mutex.h>
14 #include <linux/hash.h>
15 #include <linux/slab.h>
16 #include <linux/device-mapper.h>
17 
18 #define DM_MSG_PREFIX "transaction manager"
19 
20 /*----------------------------------------------------------------*/
21 
22 #define PREFETCH_SIZE 128
23 #define PREFETCH_BITS 7
24 #define PREFETCH_SENTINEL ((dm_block_t) -1ULL)
25 
26 struct prefetch_set {
27 	struct mutex lock;
28 	dm_block_t blocks[PREFETCH_SIZE];
29 };
30 
31 static unsigned prefetch_hash(dm_block_t b)
32 {
33 	return hash_64(b, PREFETCH_BITS);
34 }
35 
36 static void prefetch_wipe(struct prefetch_set *p)
37 {
38 	unsigned i;
39 	for (i = 0; i < PREFETCH_SIZE; i++)
40 		p->blocks[i] = PREFETCH_SENTINEL;
41 }
42 
43 static void prefetch_init(struct prefetch_set *p)
44 {
45 	mutex_init(&p->lock);
46 	prefetch_wipe(p);
47 }
48 
49 static void prefetch_add(struct prefetch_set *p, dm_block_t b)
50 {
51 	unsigned h = prefetch_hash(b);
52 
53 	mutex_lock(&p->lock);
54 	if (p->blocks[h] == PREFETCH_SENTINEL)
55 		p->blocks[h] = b;
56 
57 	mutex_unlock(&p->lock);
58 }
59 
60 static void prefetch_issue(struct prefetch_set *p, struct dm_block_manager *bm)
61 {
62 	unsigned i;
63 
64 	mutex_lock(&p->lock);
65 
66 	for (i = 0; i < PREFETCH_SIZE; i++)
67 		if (p->blocks[i] != PREFETCH_SENTINEL) {
68 			dm_bm_prefetch(bm, p->blocks[i]);
69 			p->blocks[i] = PREFETCH_SENTINEL;
70 		}
71 
72 	mutex_unlock(&p->lock);
73 }
74 
75 /*----------------------------------------------------------------*/
76 
77 struct shadow_info {
78 	struct hlist_node hlist;
79 	dm_block_t where;
80 };
81 
82 /*
83  * It would be nice if we scaled with the size of transaction.
84  */
85 #define DM_HASH_SIZE 256
86 #define DM_HASH_MASK (DM_HASH_SIZE - 1)
87 
88 struct dm_transaction_manager {
89 	int is_clone;
90 	struct dm_transaction_manager *real;
91 
92 	struct dm_block_manager *bm;
93 	struct dm_space_map *sm;
94 
95 	spinlock_t lock;
96 	struct hlist_head buckets[DM_HASH_SIZE];
97 
98 	struct prefetch_set prefetches;
99 };
100 
101 /*----------------------------------------------------------------*/
102 
103 static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
104 {
105 	int r = 0;
106 	unsigned bucket = dm_hash_block(b, DM_HASH_MASK);
107 	struct shadow_info *si;
108 
109 	spin_lock(&tm->lock);
110 	hlist_for_each_entry(si, tm->buckets + bucket, hlist)
111 		if (si->where == b) {
112 			r = 1;
113 			break;
114 		}
115 	spin_unlock(&tm->lock);
116 
117 	return r;
118 }
119 
120 /*
121  * This can silently fail if there's no memory.  We're ok with this since
122  * creating redundant shadows causes no harm.
123  */
124 static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
125 {
126 	unsigned bucket;
127 	struct shadow_info *si;
128 
129 	si = kmalloc(sizeof(*si), GFP_NOIO);
130 	if (si) {
131 		si->where = b;
132 		bucket = dm_hash_block(b, DM_HASH_MASK);
133 		spin_lock(&tm->lock);
134 		hlist_add_head(&si->hlist, tm->buckets + bucket);
135 		spin_unlock(&tm->lock);
136 	}
137 }
138 
139 static void wipe_shadow_table(struct dm_transaction_manager *tm)
140 {
141 	struct shadow_info *si;
142 	struct hlist_node *tmp;
143 	struct hlist_head *bucket;
144 	int i;
145 
146 	spin_lock(&tm->lock);
147 	for (i = 0; i < DM_HASH_SIZE; i++) {
148 		bucket = tm->buckets + i;
149 		hlist_for_each_entry_safe(si, tmp, bucket, hlist)
150 			kfree(si);
151 
152 		INIT_HLIST_HEAD(bucket);
153 	}
154 
155 	spin_unlock(&tm->lock);
156 }
157 
158 /*----------------------------------------------------------------*/
159 
160 static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
161 						   struct dm_space_map *sm)
162 {
163 	int i;
164 	struct dm_transaction_manager *tm;
165 
166 	tm = kmalloc(sizeof(*tm), GFP_KERNEL);
167 	if (!tm)
168 		return ERR_PTR(-ENOMEM);
169 
170 	tm->is_clone = 0;
171 	tm->real = NULL;
172 	tm->bm = bm;
173 	tm->sm = sm;
174 
175 	spin_lock_init(&tm->lock);
176 	for (i = 0; i < DM_HASH_SIZE; i++)
177 		INIT_HLIST_HEAD(tm->buckets + i);
178 
179 	prefetch_init(&tm->prefetches);
180 
181 	return tm;
182 }
183 
184 struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transaction_manager *real)
185 {
186 	struct dm_transaction_manager *tm;
187 
188 	tm = kmalloc(sizeof(*tm), GFP_KERNEL);
189 	if (tm) {
190 		tm->is_clone = 1;
191 		tm->real = real;
192 	}
193 
194 	return tm;
195 }
196 EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
197 
198 void dm_tm_destroy(struct dm_transaction_manager *tm)
199 {
200 	if (!tm->is_clone)
201 		wipe_shadow_table(tm);
202 
203 	kfree(tm);
204 }
205 EXPORT_SYMBOL_GPL(dm_tm_destroy);
206 
207 int dm_tm_pre_commit(struct dm_transaction_manager *tm)
208 {
209 	int r;
210 
211 	if (tm->is_clone)
212 		return -EWOULDBLOCK;
213 
214 	r = dm_sm_commit(tm->sm);
215 	if (r < 0)
216 		return r;
217 
218 	return dm_bm_flush(tm->bm);
219 }
220 EXPORT_SYMBOL_GPL(dm_tm_pre_commit);
221 
222 int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root)
223 {
224 	if (tm->is_clone)
225 		return -EWOULDBLOCK;
226 
227 	wipe_shadow_table(tm);
228 	dm_bm_unlock(root);
229 
230 	return dm_bm_flush(tm->bm);
231 }
232 EXPORT_SYMBOL_GPL(dm_tm_commit);
233 
234 int dm_tm_new_block(struct dm_transaction_manager *tm,
235 		    struct dm_block_validator *v,
236 		    struct dm_block **result)
237 {
238 	int r;
239 	dm_block_t new_block;
240 
241 	if (tm->is_clone)
242 		return -EWOULDBLOCK;
243 
244 	r = dm_sm_new_block(tm->sm, &new_block);
245 	if (r < 0)
246 		return r;
247 
248 	r = dm_bm_write_lock_zero(tm->bm, new_block, v, result);
249 	if (r < 0) {
250 		dm_sm_dec_block(tm->sm, new_block);
251 		return r;
252 	}
253 
254 	/*
255 	 * New blocks count as shadows in that they don't need to be
256 	 * shadowed again.
257 	 */
258 	insert_shadow(tm, new_block);
259 
260 	return 0;
261 }
262 
263 static int __shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
264 			  struct dm_block_validator *v,
265 			  struct dm_block **result)
266 {
267 	int r;
268 	dm_block_t new;
269 	struct dm_block *orig_block;
270 
271 	r = dm_sm_new_block(tm->sm, &new);
272 	if (r < 0)
273 		return r;
274 
275 	r = dm_sm_dec_block(tm->sm, orig);
276 	if (r < 0)
277 		return r;
278 
279 	r = dm_bm_read_lock(tm->bm, orig, v, &orig_block);
280 	if (r < 0)
281 		return r;
282 
283 	/*
284 	 * It would be tempting to use dm_bm_unlock_move here, but some
285 	 * code, such as the space maps, keeps using the old data structures
286 	 * secure in the knowledge they won't be changed until the next
287 	 * transaction.  Using unlock_move would force a synchronous read
288 	 * since the old block would no longer be in the cache.
289 	 */
290 	r = dm_bm_write_lock_zero(tm->bm, new, v, result);
291 	if (r) {
292 		dm_bm_unlock(orig_block);
293 		return r;
294 	}
295 
296 	memcpy(dm_block_data(*result), dm_block_data(orig_block),
297 	       dm_bm_block_size(tm->bm));
298 
299 	dm_bm_unlock(orig_block);
300 	return r;
301 }
302 
303 int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
304 		       struct dm_block_validator *v, struct dm_block **result,
305 		       int *inc_children)
306 {
307 	int r;
308 
309 	if (tm->is_clone)
310 		return -EWOULDBLOCK;
311 
312 	r = dm_sm_count_is_more_than_one(tm->sm, orig, inc_children);
313 	if (r < 0)
314 		return r;
315 
316 	if (is_shadow(tm, orig) && !*inc_children)
317 		return dm_bm_write_lock(tm->bm, orig, v, result);
318 
319 	r = __shadow_block(tm, orig, v, result);
320 	if (r < 0)
321 		return r;
322 	insert_shadow(tm, dm_block_location(*result));
323 
324 	return r;
325 }
326 EXPORT_SYMBOL_GPL(dm_tm_shadow_block);
327 
328 int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
329 		    struct dm_block_validator *v,
330 		    struct dm_block **blk)
331 {
332 	if (tm->is_clone) {
333 		int r = dm_bm_read_try_lock(tm->real->bm, b, v, blk);
334 
335 		if (r == -EWOULDBLOCK)
336 			prefetch_add(&tm->real->prefetches, b);
337 
338 		return r;
339 	}
340 
341 	return dm_bm_read_lock(tm->bm, b, v, blk);
342 }
343 EXPORT_SYMBOL_GPL(dm_tm_read_lock);
344 
345 void dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
346 {
347 	dm_bm_unlock(b);
348 }
349 EXPORT_SYMBOL_GPL(dm_tm_unlock);
350 
351 void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b)
352 {
353 	/*
354 	 * The non-blocking clone doesn't support this.
355 	 */
356 	BUG_ON(tm->is_clone);
357 
358 	dm_sm_inc_block(tm->sm, b);
359 }
360 EXPORT_SYMBOL_GPL(dm_tm_inc);
361 
362 void dm_tm_inc_range(struct dm_transaction_manager *tm, dm_block_t b, dm_block_t e)
363 {
364 	/*
365 	 * The non-blocking clone doesn't support this.
366 	 */
367 	BUG_ON(tm->is_clone);
368 
369 	dm_sm_inc_blocks(tm->sm, b, e);
370 }
371 EXPORT_SYMBOL_GPL(dm_tm_inc_range);
372 
373 void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b)
374 {
375 	/*
376 	 * The non-blocking clone doesn't support this.
377 	 */
378 	BUG_ON(tm->is_clone);
379 
380 	dm_sm_dec_block(tm->sm, b);
381 }
382 EXPORT_SYMBOL_GPL(dm_tm_dec);
383 
384 void dm_tm_dec_range(struct dm_transaction_manager *tm, dm_block_t b, dm_block_t e)
385 {
386 	/*
387 	 * The non-blocking clone doesn't support this.
388 	 */
389 	BUG_ON(tm->is_clone);
390 
391 	dm_sm_dec_blocks(tm->sm, b, e);
392 }
393 EXPORT_SYMBOL_GPL(dm_tm_dec_range);
394 
395 void dm_tm_with_runs(struct dm_transaction_manager *tm,
396 		     const __le64 *value_le, unsigned count, dm_tm_run_fn fn)
397 {
398 	uint64_t b, begin, end;
399 	bool in_run = false;
400 	unsigned i;
401 
402 	for (i = 0; i < count; i++, value_le++) {
403 		b = le64_to_cpu(*value_le);
404 
405 		if (in_run) {
406 			if (b == end)
407 				end++;
408 			else {
409 				fn(tm, begin, end);
410 				begin = b;
411 				end = b + 1;
412 			}
413 		} else {
414 			in_run = true;
415 			begin = b;
416 			end = b + 1;
417 		}
418 	}
419 
420 	if (in_run)
421 		fn(tm, begin, end);
422 }
423 EXPORT_SYMBOL_GPL(dm_tm_with_runs);
424 
425 int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b,
426 	      uint32_t *result)
427 {
428 	if (tm->is_clone)
429 		return -EWOULDBLOCK;
430 
431 	return dm_sm_get_count(tm->sm, b, result);
432 }
433 
434 int dm_tm_block_is_shared(struct dm_transaction_manager *tm, dm_block_t b,
435 			  int *result)
436 {
437 	if (tm->is_clone)
438 		return -EWOULDBLOCK;
439 
440 	return dm_sm_count_is_more_than_one(tm->sm, b, result);
441 }
442 
443 struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm)
444 {
445 	return tm->bm;
446 }
447 
448 void dm_tm_issue_prefetches(struct dm_transaction_manager *tm)
449 {
450 	prefetch_issue(&tm->prefetches, tm->bm);
451 }
452 EXPORT_SYMBOL_GPL(dm_tm_issue_prefetches);
453 
454 /*----------------------------------------------------------------*/
455 
456 static int dm_tm_create_internal(struct dm_block_manager *bm,
457 				 dm_block_t sb_location,
458 				 struct dm_transaction_manager **tm,
459 				 struct dm_space_map **sm,
460 				 int create,
461 				 void *sm_root, size_t sm_len)
462 {
463 	int r;
464 
465 	*sm = dm_sm_metadata_init();
466 	if (IS_ERR(*sm))
467 		return PTR_ERR(*sm);
468 
469 	*tm = dm_tm_create(bm, *sm);
470 	if (IS_ERR(*tm)) {
471 		dm_sm_destroy(*sm);
472 		return PTR_ERR(*tm);
473 	}
474 
475 	if (create) {
476 		r = dm_sm_metadata_create(*sm, *tm, dm_bm_nr_blocks(bm),
477 					  sb_location);
478 		if (r) {
479 			DMERR("couldn't create metadata space map");
480 			goto bad;
481 		}
482 
483 	} else {
484 		r = dm_sm_metadata_open(*sm, *tm, sm_root, sm_len);
485 		if (r) {
486 			DMERR("couldn't open metadata space map");
487 			goto bad;
488 		}
489 	}
490 
491 	return 0;
492 
493 bad:
494 	dm_tm_destroy(*tm);
495 	dm_sm_destroy(*sm);
496 	return r;
497 }
498 
499 int dm_tm_create_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
500 			 struct dm_transaction_manager **tm,
501 			 struct dm_space_map **sm)
502 {
503 	return dm_tm_create_internal(bm, sb_location, tm, sm, 1, NULL, 0);
504 }
505 EXPORT_SYMBOL_GPL(dm_tm_create_with_sm);
506 
507 int dm_tm_open_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
508 		       void *sm_root, size_t root_len,
509 		       struct dm_transaction_manager **tm,
510 		       struct dm_space_map **sm)
511 {
512 	return dm_tm_create_internal(bm, sb_location, tm, sm, 0, sm_root, root_len);
513 }
514 EXPORT_SYMBOL_GPL(dm_tm_open_with_sm);
515 
516 /*----------------------------------------------------------------*/
517