1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2018 Mellanox Technologies */
3 
4 #include <linux/mlx5/vport.h>
5 #include <linux/list.h>
6 #include "lib/devcom.h"
7 #include "mlx5_core.h"
8 
9 static LIST_HEAD(devcom_dev_list);
10 static LIST_HEAD(devcom_comp_list);
11 /* protect device list */
12 static DEFINE_MUTEX(dev_list_lock);
13 /* protect component list */
14 static DEFINE_MUTEX(comp_list_lock);
15 
16 #define devcom_for_each_component(iter) \
17 	list_for_each_entry(iter, &devcom_comp_list, comp_list)
18 
19 struct mlx5_devcom_dev {
20 	struct list_head list;
21 	struct mlx5_core_dev *dev;
22 	struct kref ref;
23 };
24 
25 struct mlx5_devcom_comp {
26 	struct list_head comp_list;
27 	enum mlx5_devcom_component id;
28 	u64 key;
29 	struct list_head comp_dev_list_head;
30 	mlx5_devcom_event_handler_t handler;
31 	struct kref ref;
32 	bool ready;
33 	struct rw_semaphore sem;
34 };
35 
36 struct mlx5_devcom_comp_dev {
37 	struct list_head list;
38 	struct mlx5_devcom_comp *comp;
39 	struct mlx5_devcom_dev *devc;
40 	void __rcu *data;
41 };
42 
43 static bool devcom_dev_exists(struct mlx5_core_dev *dev)
44 {
45 	struct mlx5_devcom_dev *iter;
46 
47 	list_for_each_entry(iter, &devcom_dev_list, list)
48 		if (iter->dev == dev)
49 			return true;
50 
51 	return false;
52 }
53 
54 static struct mlx5_devcom_dev *
55 mlx5_devcom_dev_alloc(struct mlx5_core_dev *dev)
56 {
57 	struct mlx5_devcom_dev *devc;
58 
59 	devc = kzalloc(sizeof(*devc), GFP_KERNEL);
60 	if (!devc)
61 		return NULL;
62 
63 	devc->dev = dev;
64 	kref_init(&devc->ref);
65 	return devc;
66 }
67 
68 struct mlx5_devcom_dev *
69 mlx5_devcom_register_device(struct mlx5_core_dev *dev)
70 {
71 	struct mlx5_devcom_dev *devc;
72 
73 	mutex_lock(&dev_list_lock);
74 
75 	if (devcom_dev_exists(dev)) {
76 		devc = ERR_PTR(-EEXIST);
77 		goto out;
78 	}
79 
80 	devc = mlx5_devcom_dev_alloc(dev);
81 	if (!devc) {
82 		devc = ERR_PTR(-ENOMEM);
83 		goto out;
84 	}
85 
86 	list_add_tail(&devc->list, &devcom_dev_list);
87 out:
88 	mutex_unlock(&dev_list_lock);
89 	return devc;
90 }
91 
92 static void
93 mlx5_devcom_dev_release(struct kref *ref)
94 {
95 	struct mlx5_devcom_dev *devc = container_of(ref, struct mlx5_devcom_dev, ref);
96 
97 	mutex_lock(&dev_list_lock);
98 	list_del(&devc->list);
99 	mutex_unlock(&dev_list_lock);
100 	kfree(devc);
101 }
102 
103 void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc)
104 {
105 	if (!IS_ERR_OR_NULL(devc))
106 		kref_put(&devc->ref, mlx5_devcom_dev_release);
107 }
108 
109 static struct mlx5_devcom_comp *
110 mlx5_devcom_comp_alloc(u64 id, u64 key, mlx5_devcom_event_handler_t handler)
111 {
112 	struct mlx5_devcom_comp *comp;
113 
114 	comp = kzalloc(sizeof(*comp), GFP_KERNEL);
115 	if (!comp)
116 		return ERR_PTR(-ENOMEM);
117 
118 	comp->id = id;
119 	comp->key = key;
120 	comp->handler = handler;
121 	init_rwsem(&comp->sem);
122 	kref_init(&comp->ref);
123 	INIT_LIST_HEAD(&comp->comp_dev_list_head);
124 
125 	return comp;
126 }
127 
128 static void
129 mlx5_devcom_comp_release(struct kref *ref)
130 {
131 	struct mlx5_devcom_comp *comp = container_of(ref, struct mlx5_devcom_comp, ref);
132 
133 	mutex_lock(&comp_list_lock);
134 	list_del(&comp->comp_list);
135 	mutex_unlock(&comp_list_lock);
136 	kfree(comp);
137 }
138 
139 static struct mlx5_devcom_comp_dev *
140 devcom_alloc_comp_dev(struct mlx5_devcom_dev *devc,
141 		      struct mlx5_devcom_comp *comp,
142 		      void *data)
143 {
144 	struct mlx5_devcom_comp_dev *devcom;
145 
146 	devcom = kzalloc(sizeof(*devcom), GFP_KERNEL);
147 	if (!devcom)
148 		return ERR_PTR(-ENOMEM);
149 
150 	kref_get(&devc->ref);
151 	devcom->devc = devc;
152 	devcom->comp = comp;
153 	rcu_assign_pointer(devcom->data, data);
154 
155 	down_write(&comp->sem);
156 	list_add_tail(&devcom->list, &comp->comp_dev_list_head);
157 	up_write(&comp->sem);
158 
159 	return devcom;
160 }
161 
162 static void
163 devcom_free_comp_dev(struct mlx5_devcom_comp_dev *devcom)
164 {
165 	struct mlx5_devcom_comp *comp = devcom->comp;
166 
167 	down_write(&comp->sem);
168 	list_del(&devcom->list);
169 	up_write(&comp->sem);
170 
171 	kref_put(&devcom->devc->ref, mlx5_devcom_dev_release);
172 	kfree(devcom);
173 	kref_put(&comp->ref, mlx5_devcom_comp_release);
174 }
175 
176 static bool
177 devcom_component_equal(struct mlx5_devcom_comp *devcom,
178 		       enum mlx5_devcom_component id,
179 		       u64 key)
180 {
181 	return devcom->id == id && devcom->key == key;
182 }
183 
184 static struct mlx5_devcom_comp *
185 devcom_component_get(struct mlx5_devcom_dev *devc,
186 		     enum mlx5_devcom_component id,
187 		     u64 key,
188 		     mlx5_devcom_event_handler_t handler)
189 {
190 	struct mlx5_devcom_comp *comp;
191 
192 	devcom_for_each_component(comp) {
193 		if (devcom_component_equal(comp, id, key)) {
194 			if (handler == comp->handler) {
195 				kref_get(&comp->ref);
196 				return comp;
197 			}
198 
199 			mlx5_core_err(devc->dev,
200 				      "Cannot register existing devcom component with different handler\n");
201 			return ERR_PTR(-EINVAL);
202 		}
203 	}
204 
205 	return NULL;
206 }
207 
208 struct mlx5_devcom_comp_dev *
209 mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
210 			       enum mlx5_devcom_component id,
211 			       u64 key,
212 			       mlx5_devcom_event_handler_t handler,
213 			       void *data)
214 {
215 	struct mlx5_devcom_comp_dev *devcom;
216 	struct mlx5_devcom_comp *comp;
217 
218 	if (IS_ERR_OR_NULL(devc))
219 		return NULL;
220 
221 	mutex_lock(&comp_list_lock);
222 	comp = devcom_component_get(devc, id, key, handler);
223 	if (IS_ERR(comp)) {
224 		devcom = ERR_PTR(-EINVAL);
225 		goto out_unlock;
226 	}
227 
228 	if (!comp) {
229 		comp = mlx5_devcom_comp_alloc(id, key, handler);
230 		if (IS_ERR(comp)) {
231 			devcom = ERR_CAST(comp);
232 			goto out_unlock;
233 		}
234 		list_add_tail(&comp->comp_list, &devcom_comp_list);
235 	}
236 	mutex_unlock(&comp_list_lock);
237 
238 	devcom = devcom_alloc_comp_dev(devc, comp, data);
239 	if (IS_ERR(devcom))
240 		kref_put(&comp->ref, mlx5_devcom_comp_release);
241 
242 	return devcom;
243 
244 out_unlock:
245 	mutex_unlock(&comp_list_lock);
246 	return devcom;
247 }
248 
249 void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom)
250 {
251 	if (!IS_ERR_OR_NULL(devcom))
252 		devcom_free_comp_dev(devcom);
253 }
254 
255 int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
256 			   int event, int rollback_event,
257 			   void *event_data)
258 {
259 	struct mlx5_devcom_comp_dev *pos;
260 	struct mlx5_devcom_comp *comp;
261 	int err = 0;
262 	void *data;
263 
264 	if (IS_ERR_OR_NULL(devcom))
265 		return -ENODEV;
266 
267 	comp = devcom->comp;
268 	down_write(&comp->sem);
269 	list_for_each_entry(pos, &comp->comp_dev_list_head, list) {
270 		data = rcu_dereference_protected(pos->data, lockdep_is_held(&comp->sem));
271 
272 		if (pos != devcom && data) {
273 			err = comp->handler(event, data, event_data);
274 			if (err)
275 				goto rollback;
276 		}
277 	}
278 
279 	up_write(&comp->sem);
280 	return 0;
281 
282 rollback:
283 	if (list_entry_is_head(pos, &comp->comp_dev_list_head, list))
284 		goto out;
285 	pos = list_prev_entry(pos, list);
286 	list_for_each_entry_from_reverse(pos, &comp->comp_dev_list_head, list) {
287 		data = rcu_dereference_protected(pos->data, lockdep_is_held(&comp->sem));
288 
289 		if (pos != devcom && data)
290 			comp->handler(rollback_event, data, event_data);
291 	}
292 out:
293 	up_write(&comp->sem);
294 	return err;
295 }
296 
297 void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready)
298 {
299 	WARN_ON(!rwsem_is_locked(&devcom->comp->sem));
300 
301 	WRITE_ONCE(devcom->comp->ready, ready);
302 }
303 
304 bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom)
305 {
306 	if (IS_ERR_OR_NULL(devcom))
307 		return false;
308 
309 	return READ_ONCE(devcom->comp->ready);
310 }
311 
312 bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev *devcom)
313 {
314 	struct mlx5_devcom_comp *comp;
315 
316 	if (IS_ERR_OR_NULL(devcom))
317 		return false;
318 
319 	comp = devcom->comp;
320 	down_read(&comp->sem);
321 	if (!READ_ONCE(comp->ready)) {
322 		up_read(&comp->sem);
323 		return false;
324 	}
325 
326 	return true;
327 }
328 
329 void mlx5_devcom_for_each_peer_end(struct mlx5_devcom_comp_dev *devcom)
330 {
331 	up_read(&devcom->comp->sem);
332 }
333 
334 void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom_comp_dev *devcom,
335 				     struct mlx5_devcom_comp_dev **pos)
336 {
337 	struct mlx5_devcom_comp *comp = devcom->comp;
338 	struct mlx5_devcom_comp_dev *tmp;
339 	void *data;
340 
341 	tmp = list_prepare_entry(*pos, &comp->comp_dev_list_head, list);
342 
343 	list_for_each_entry_continue(tmp, &comp->comp_dev_list_head, list) {
344 		if (tmp != devcom) {
345 			data = rcu_dereference_protected(tmp->data, lockdep_is_held(&comp->sem));
346 			if (data)
347 				break;
348 		}
349 	}
350 
351 	if (list_entry_is_head(tmp, &comp->comp_dev_list_head, list))
352 		return NULL;
353 
354 	*pos = tmp;
355 	return data;
356 }
357 
358 void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
359 					 struct mlx5_devcom_comp_dev **pos)
360 {
361 	struct mlx5_devcom_comp *comp = devcom->comp;
362 	struct mlx5_devcom_comp_dev *tmp;
363 	void *data;
364 
365 	tmp = list_prepare_entry(*pos, &comp->comp_dev_list_head, list);
366 
367 	list_for_each_entry_continue(tmp, &comp->comp_dev_list_head, list) {
368 		if (tmp != devcom) {
369 			/* This can change concurrently, however 'data' pointer will remain
370 			 * valid for the duration of RCU read section.
371 			 */
372 			if (!READ_ONCE(comp->ready))
373 				return NULL;
374 			data = rcu_dereference(tmp->data);
375 			if (data)
376 				break;
377 		}
378 	}
379 
380 	if (list_entry_is_head(tmp, &comp->comp_dev_list_head, list))
381 		return NULL;
382 
383 	*pos = tmp;
384 	return data;
385 }
386