1 /*
2  * Copyright (C) 2017 Red Hat. All rights reserved.
3  *
4  * This file is released under the GPL.
5  */
6 
7 #include "dm-cache-background-tracker.h"
8 
9 /*----------------------------------------------------------------*/
10 
11 #define DM_MSG_PREFIX "dm-background-tracker"
12 
13 struct bt_work {
14 	struct list_head list;
15 	struct rb_node node;
16 	struct policy_work work;
17 };
18 
19 struct background_tracker {
20 	unsigned max_work;
21 	atomic_t pending_promotes;
22 	atomic_t pending_writebacks;
23 	atomic_t pending_demotes;
24 
25 	struct list_head issued;
26 	struct list_head queued;
27 	struct rb_root pending;
28 
29 	struct kmem_cache *work_cache;
30 };
31 
32 struct background_tracker *btracker_create(unsigned max_work)
33 {
34 	struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL);
35 
36 	if (!b) {
37 		DMERR("couldn't create background_tracker");
38 		return NULL;
39 	}
40 
41 	b->max_work = max_work;
42 	atomic_set(&b->pending_promotes, 0);
43 	atomic_set(&b->pending_writebacks, 0);
44 	atomic_set(&b->pending_demotes, 0);
45 
46 	INIT_LIST_HEAD(&b->issued);
47 	INIT_LIST_HEAD(&b->queued);
48 
49 	b->pending = RB_ROOT;
50 	b->work_cache = KMEM_CACHE(bt_work, 0);
51 	if (!b->work_cache) {
52 		DMERR("couldn't create mempool for background work items");
53 		kfree(b);
54 		b = NULL;
55 	}
56 
57 	return b;
58 }
59 EXPORT_SYMBOL_GPL(btracker_create);
60 
61 void btracker_destroy(struct background_tracker *b)
62 {
63 	kmem_cache_destroy(b->work_cache);
64 	kfree(b);
65 }
66 EXPORT_SYMBOL_GPL(btracker_destroy);
67 
68 static int cmp_oblock(dm_oblock_t lhs, dm_oblock_t rhs)
69 {
70 	if (from_oblock(lhs) < from_oblock(rhs))
71 		return -1;
72 
73 	if (from_oblock(rhs) < from_oblock(lhs))
74 		return 1;
75 
76 	return 0;
77 }
78 
79 static bool __insert_pending(struct background_tracker *b,
80 			     struct bt_work *nw)
81 {
82 	int cmp;
83 	struct bt_work *w;
84 	struct rb_node **new = &b->pending.rb_node, *parent = NULL;
85 
86 	while (*new) {
87 		w = container_of(*new, struct bt_work, node);
88 
89 		parent = *new;
90 		cmp = cmp_oblock(w->work.oblock, nw->work.oblock);
91 		if (cmp < 0)
92 			new = &((*new)->rb_left);
93 
94 		else if (cmp > 0)
95 			new = &((*new)->rb_right);
96 
97 		else
98 			/* already present */
99 			return false;
100 	}
101 
102 	rb_link_node(&nw->node, parent, new);
103 	rb_insert_color(&nw->node, &b->pending);
104 
105 	return true;
106 }
107 
108 static struct bt_work *__find_pending(struct background_tracker *b,
109 				      dm_oblock_t oblock)
110 {
111 	int cmp;
112 	struct bt_work *w;
113 	struct rb_node **new = &b->pending.rb_node;
114 
115 	while (*new) {
116 		w = container_of(*new, struct bt_work, node);
117 
118 		cmp = cmp_oblock(w->work.oblock, oblock);
119 		if (cmp < 0)
120 			new = &((*new)->rb_left);
121 
122 		else if (cmp > 0)
123 			new = &((*new)->rb_right);
124 
125 		else
126 			break;
127 	}
128 
129 	return *new ? w : NULL;
130 }
131 
132 
133 static void update_stats(struct background_tracker *b, struct policy_work *w, int delta)
134 {
135 	switch (w->op) {
136 	case POLICY_PROMOTE:
137 		atomic_add(delta, &b->pending_promotes);
138 		break;
139 
140 	case POLICY_DEMOTE:
141 		atomic_add(delta, &b->pending_demotes);
142 		break;
143 
144 	case POLICY_WRITEBACK:
145 		atomic_add(delta, &b->pending_writebacks);
146 		break;
147 	}
148 }
149 
150 unsigned btracker_nr_writebacks_queued(struct background_tracker *b)
151 {
152 	return atomic_read(&b->pending_writebacks);
153 }
154 EXPORT_SYMBOL_GPL(btracker_nr_writebacks_queued);
155 
156 unsigned btracker_nr_demotions_queued(struct background_tracker *b)
157 {
158 	return atomic_read(&b->pending_demotes);
159 }
160 EXPORT_SYMBOL_GPL(btracker_nr_demotions_queued);
161 
162 static bool max_work_reached(struct background_tracker *b)
163 {
164 	return atomic_read(&b->pending_promotes) +
165 		atomic_read(&b->pending_writebacks) +
166 		atomic_read(&b->pending_demotes) >= b->max_work;
167 }
168 
169 static struct bt_work *alloc_work(struct background_tracker *b)
170 {
171 	if (max_work_reached(b))
172 		return NULL;
173 
174 	return kmem_cache_alloc(b->work_cache, GFP_NOWAIT);
175 }
176 
177 int btracker_queue(struct background_tracker *b,
178 		   struct policy_work *work,
179 		   struct policy_work **pwork)
180 {
181 	struct bt_work *w;
182 
183 	if (pwork)
184 		*pwork = NULL;
185 
186 	w = alloc_work(b);
187 	if (!w)
188 		return -ENOMEM;
189 
190 	memcpy(&w->work, work, sizeof(*work));
191 
192 	if (!__insert_pending(b, w)) {
193 		/*
194 		 * There was a race, we'll just ignore this second
195 		 * bit of work for the same oblock.
196 		 */
197 		kmem_cache_free(b->work_cache, w);
198 		return -EINVAL;
199 	}
200 
201 	if (pwork) {
202 		*pwork = &w->work;
203 		list_add(&w->list, &b->issued);
204 	} else
205 		list_add(&w->list, &b->queued);
206 	update_stats(b, &w->work, 1);
207 
208 	return 0;
209 }
210 EXPORT_SYMBOL_GPL(btracker_queue);
211 
212 /*
213  * Returns -ENODATA if there's no work.
214  */
215 int btracker_issue(struct background_tracker *b, struct policy_work **work)
216 {
217 	struct bt_work *w;
218 
219 	if (list_empty(&b->queued))
220 		return -ENODATA;
221 
222 	w = list_first_entry(&b->queued, struct bt_work, list);
223 	list_move(&w->list, &b->issued);
224 	*work = &w->work;
225 
226 	return 0;
227 }
228 EXPORT_SYMBOL_GPL(btracker_issue);
229 
230 void btracker_complete(struct background_tracker *b,
231 		       struct policy_work *op)
232 {
233 	struct bt_work *w = container_of(op, struct bt_work, work);
234 
235 	update_stats(b, &w->work, -1);
236 	rb_erase(&w->node, &b->pending);
237 	list_del(&w->list);
238 	kmem_cache_free(b->work_cache, w);
239 }
240 EXPORT_SYMBOL_GPL(btracker_complete);
241 
242 bool btracker_promotion_already_present(struct background_tracker *b,
243 					dm_oblock_t oblock)
244 {
245 	return __find_pending(b, oblock) != NULL;
246 }
247 EXPORT_SYMBOL_GPL(btracker_promotion_already_present);
248 
249 /*----------------------------------------------------------------*/
250