1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2018 Mellanox Technologies */
3 
4 #include <linux/mlx5/vport.h>
5 #include <linux/list.h>
6 #include "lib/devcom.h"
7 #include "mlx5_core.h"
8 
9 static LIST_HEAD(devcom_dev_list);
10 static LIST_HEAD(devcom_comp_list);
11 /* protect device list */
12 static DEFINE_MUTEX(dev_list_lock);
13 /* protect component list */
14 static DEFINE_MUTEX(comp_list_lock);
15 
16 #define devcom_for_each_component(iter) \
17 	list_for_each_entry(iter, &devcom_comp_list, comp_list)
18 
19 struct mlx5_devcom_dev {
20 	struct list_head list;
21 	struct mlx5_core_dev *dev;
22 	struct kref ref;
23 };
24 
25 struct mlx5_devcom_comp {
26 	struct list_head comp_list;
27 	enum mlx5_devcom_component id;
28 	u64 key;
29 	struct list_head comp_dev_list_head;
30 	mlx5_devcom_event_handler_t handler;
31 	struct kref ref;
32 	bool ready;
33 	struct rw_semaphore sem;
34 	struct lock_class_key lock_key;
35 };
36 
37 struct mlx5_devcom_comp_dev {
38 	struct list_head list;
39 	struct mlx5_devcom_comp *comp;
40 	struct mlx5_devcom_dev *devc;
41 	void __rcu *data;
42 };
43 
devcom_dev_exists(struct mlx5_core_dev * dev)44 static bool devcom_dev_exists(struct mlx5_core_dev *dev)
45 {
46 	struct mlx5_devcom_dev *iter;
47 
48 	list_for_each_entry(iter, &devcom_dev_list, list)
49 		if (iter->dev == dev)
50 			return true;
51 
52 	return false;
53 }
54 
55 static struct mlx5_devcom_dev *
mlx5_devcom_dev_alloc(struct mlx5_core_dev * dev)56 mlx5_devcom_dev_alloc(struct mlx5_core_dev *dev)
57 {
58 	struct mlx5_devcom_dev *devc;
59 
60 	devc = kzalloc(sizeof(*devc), GFP_KERNEL);
61 	if (!devc)
62 		return NULL;
63 
64 	devc->dev = dev;
65 	kref_init(&devc->ref);
66 	return devc;
67 }
68 
69 struct mlx5_devcom_dev *
mlx5_devcom_register_device(struct mlx5_core_dev * dev)70 mlx5_devcom_register_device(struct mlx5_core_dev *dev)
71 {
72 	struct mlx5_devcom_dev *devc;
73 
74 	mutex_lock(&dev_list_lock);
75 
76 	if (devcom_dev_exists(dev)) {
77 		devc = ERR_PTR(-EEXIST);
78 		goto out;
79 	}
80 
81 	devc = mlx5_devcom_dev_alloc(dev);
82 	if (!devc) {
83 		devc = ERR_PTR(-ENOMEM);
84 		goto out;
85 	}
86 
87 	list_add_tail(&devc->list, &devcom_dev_list);
88 out:
89 	mutex_unlock(&dev_list_lock);
90 	return devc;
91 }
92 
93 static void
mlx5_devcom_dev_release(struct kref * ref)94 mlx5_devcom_dev_release(struct kref *ref)
95 {
96 	struct mlx5_devcom_dev *devc = container_of(ref, struct mlx5_devcom_dev, ref);
97 
98 	mutex_lock(&dev_list_lock);
99 	list_del(&devc->list);
100 	mutex_unlock(&dev_list_lock);
101 	kfree(devc);
102 }
103 
mlx5_devcom_unregister_device(struct mlx5_devcom_dev * devc)104 void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc)
105 {
106 	if (!IS_ERR_OR_NULL(devc))
107 		kref_put(&devc->ref, mlx5_devcom_dev_release);
108 }
109 
110 static struct mlx5_devcom_comp *
mlx5_devcom_comp_alloc(u64 id,u64 key,mlx5_devcom_event_handler_t handler)111 mlx5_devcom_comp_alloc(u64 id, u64 key, mlx5_devcom_event_handler_t handler)
112 {
113 	struct mlx5_devcom_comp *comp;
114 
115 	comp = kzalloc(sizeof(*comp), GFP_KERNEL);
116 	if (!comp)
117 		return ERR_PTR(-ENOMEM);
118 
119 	comp->id = id;
120 	comp->key = key;
121 	comp->handler = handler;
122 	init_rwsem(&comp->sem);
123 	lockdep_register_key(&comp->lock_key);
124 	lockdep_set_class(&comp->sem, &comp->lock_key);
125 	kref_init(&comp->ref);
126 	INIT_LIST_HEAD(&comp->comp_dev_list_head);
127 
128 	return comp;
129 }
130 
131 static void
mlx5_devcom_comp_release(struct kref * ref)132 mlx5_devcom_comp_release(struct kref *ref)
133 {
134 	struct mlx5_devcom_comp *comp = container_of(ref, struct mlx5_devcom_comp, ref);
135 
136 	mutex_lock(&comp_list_lock);
137 	list_del(&comp->comp_list);
138 	mutex_unlock(&comp_list_lock);
139 	lockdep_unregister_key(&comp->lock_key);
140 	kfree(comp);
141 }
142 
143 static struct mlx5_devcom_comp_dev *
devcom_alloc_comp_dev(struct mlx5_devcom_dev * devc,struct mlx5_devcom_comp * comp,void * data)144 devcom_alloc_comp_dev(struct mlx5_devcom_dev *devc,
145 		      struct mlx5_devcom_comp *comp,
146 		      void *data)
147 {
148 	struct mlx5_devcom_comp_dev *devcom;
149 
150 	devcom = kzalloc(sizeof(*devcom), GFP_KERNEL);
151 	if (!devcom)
152 		return ERR_PTR(-ENOMEM);
153 
154 	kref_get(&devc->ref);
155 	devcom->devc = devc;
156 	devcom->comp = comp;
157 	rcu_assign_pointer(devcom->data, data);
158 
159 	down_write(&comp->sem);
160 	list_add_tail(&devcom->list, &comp->comp_dev_list_head);
161 	up_write(&comp->sem);
162 
163 	return devcom;
164 }
165 
166 static void
devcom_free_comp_dev(struct mlx5_devcom_comp_dev * devcom)167 devcom_free_comp_dev(struct mlx5_devcom_comp_dev *devcom)
168 {
169 	struct mlx5_devcom_comp *comp = devcom->comp;
170 
171 	down_write(&comp->sem);
172 	list_del(&devcom->list);
173 	up_write(&comp->sem);
174 
175 	kref_put(&devcom->devc->ref, mlx5_devcom_dev_release);
176 	kfree(devcom);
177 	kref_put(&comp->ref, mlx5_devcom_comp_release);
178 }
179 
180 static bool
devcom_component_equal(struct mlx5_devcom_comp * devcom,enum mlx5_devcom_component id,u64 key)181 devcom_component_equal(struct mlx5_devcom_comp *devcom,
182 		       enum mlx5_devcom_component id,
183 		       u64 key)
184 {
185 	return devcom->id == id && devcom->key == key;
186 }
187 
188 static struct mlx5_devcom_comp *
devcom_component_get(struct mlx5_devcom_dev * devc,enum mlx5_devcom_component id,u64 key,mlx5_devcom_event_handler_t handler)189 devcom_component_get(struct mlx5_devcom_dev *devc,
190 		     enum mlx5_devcom_component id,
191 		     u64 key,
192 		     mlx5_devcom_event_handler_t handler)
193 {
194 	struct mlx5_devcom_comp *comp;
195 
196 	devcom_for_each_component(comp) {
197 		if (devcom_component_equal(comp, id, key)) {
198 			if (handler == comp->handler) {
199 				kref_get(&comp->ref);
200 				return comp;
201 			}
202 
203 			mlx5_core_err(devc->dev,
204 				      "Cannot register existing devcom component with different handler\n");
205 			return ERR_PTR(-EINVAL);
206 		}
207 	}
208 
209 	return NULL;
210 }
211 
212 struct mlx5_devcom_comp_dev *
mlx5_devcom_register_component(struct mlx5_devcom_dev * devc,enum mlx5_devcom_component id,u64 key,mlx5_devcom_event_handler_t handler,void * data)213 mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
214 			       enum mlx5_devcom_component id,
215 			       u64 key,
216 			       mlx5_devcom_event_handler_t handler,
217 			       void *data)
218 {
219 	struct mlx5_devcom_comp_dev *devcom;
220 	struct mlx5_devcom_comp *comp;
221 
222 	if (IS_ERR_OR_NULL(devc))
223 		return ERR_PTR(-EINVAL);
224 
225 	mutex_lock(&comp_list_lock);
226 	comp = devcom_component_get(devc, id, key, handler);
227 	if (IS_ERR(comp)) {
228 		devcom = ERR_PTR(-EINVAL);
229 		goto out_unlock;
230 	}
231 
232 	if (!comp) {
233 		comp = mlx5_devcom_comp_alloc(id, key, handler);
234 		if (IS_ERR(comp)) {
235 			devcom = ERR_CAST(comp);
236 			goto out_unlock;
237 		}
238 		list_add_tail(&comp->comp_list, &devcom_comp_list);
239 	}
240 	mutex_unlock(&comp_list_lock);
241 
242 	devcom = devcom_alloc_comp_dev(devc, comp, data);
243 	if (IS_ERR(devcom))
244 		kref_put(&comp->ref, mlx5_devcom_comp_release);
245 
246 	return devcom;
247 
248 out_unlock:
249 	mutex_unlock(&comp_list_lock);
250 	return devcom;
251 }
252 
mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev * devcom)253 void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom)
254 {
255 	if (!IS_ERR_OR_NULL(devcom))
256 		devcom_free_comp_dev(devcom);
257 }
258 
mlx5_devcom_comp_get_size(struct mlx5_devcom_comp_dev * devcom)259 int mlx5_devcom_comp_get_size(struct mlx5_devcom_comp_dev *devcom)
260 {
261 	struct mlx5_devcom_comp *comp = devcom->comp;
262 
263 	return kref_read(&comp->ref);
264 }
265 
mlx5_devcom_send_event(struct mlx5_devcom_comp_dev * devcom,int event,int rollback_event,void * event_data)266 int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
267 			   int event, int rollback_event,
268 			   void *event_data)
269 {
270 	struct mlx5_devcom_comp_dev *pos;
271 	struct mlx5_devcom_comp *comp;
272 	int err = 0;
273 	void *data;
274 
275 	if (IS_ERR_OR_NULL(devcom))
276 		return -ENODEV;
277 
278 	comp = devcom->comp;
279 	down_write(&comp->sem);
280 	list_for_each_entry(pos, &comp->comp_dev_list_head, list) {
281 		data = rcu_dereference_protected(pos->data, lockdep_is_held(&comp->sem));
282 
283 		if (pos != devcom && data) {
284 			err = comp->handler(event, data, event_data);
285 			if (err)
286 				goto rollback;
287 		}
288 	}
289 
290 	up_write(&comp->sem);
291 	return 0;
292 
293 rollback:
294 	if (list_entry_is_head(pos, &comp->comp_dev_list_head, list))
295 		goto out;
296 	pos = list_prev_entry(pos, list);
297 	list_for_each_entry_from_reverse(pos, &comp->comp_dev_list_head, list) {
298 		data = rcu_dereference_protected(pos->data, lockdep_is_held(&comp->sem));
299 
300 		if (pos != devcom && data)
301 			comp->handler(rollback_event, data, event_data);
302 	}
303 out:
304 	up_write(&comp->sem);
305 	return err;
306 }
307 
mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev * devcom,bool ready)308 void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready)
309 {
310 	WARN_ON(!rwsem_is_locked(&devcom->comp->sem));
311 
312 	WRITE_ONCE(devcom->comp->ready, ready);
313 }
314 
mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev * devcom)315 bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom)
316 {
317 	if (IS_ERR_OR_NULL(devcom))
318 		return false;
319 
320 	return READ_ONCE(devcom->comp->ready);
321 }
322 
mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev * devcom)323 bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev *devcom)
324 {
325 	struct mlx5_devcom_comp *comp;
326 
327 	if (IS_ERR_OR_NULL(devcom))
328 		return false;
329 
330 	comp = devcom->comp;
331 	down_read(&comp->sem);
332 	if (!READ_ONCE(comp->ready)) {
333 		up_read(&comp->sem);
334 		return false;
335 	}
336 
337 	return true;
338 }
339 
mlx5_devcom_for_each_peer_end(struct mlx5_devcom_comp_dev * devcom)340 void mlx5_devcom_for_each_peer_end(struct mlx5_devcom_comp_dev *devcom)
341 {
342 	up_read(&devcom->comp->sem);
343 }
344 
mlx5_devcom_get_next_peer_data(struct mlx5_devcom_comp_dev * devcom,struct mlx5_devcom_comp_dev ** pos)345 void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom_comp_dev *devcom,
346 				     struct mlx5_devcom_comp_dev **pos)
347 {
348 	struct mlx5_devcom_comp *comp = devcom->comp;
349 	struct mlx5_devcom_comp_dev *tmp;
350 	void *data;
351 
352 	tmp = list_prepare_entry(*pos, &comp->comp_dev_list_head, list);
353 
354 	list_for_each_entry_continue(tmp, &comp->comp_dev_list_head, list) {
355 		if (tmp != devcom) {
356 			data = rcu_dereference_protected(tmp->data, lockdep_is_held(&comp->sem));
357 			if (data)
358 				break;
359 		}
360 	}
361 
362 	if (list_entry_is_head(tmp, &comp->comp_dev_list_head, list))
363 		return NULL;
364 
365 	*pos = tmp;
366 	return data;
367 }
368 
mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev * devcom,struct mlx5_devcom_comp_dev ** pos)369 void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
370 					 struct mlx5_devcom_comp_dev **pos)
371 {
372 	struct mlx5_devcom_comp *comp = devcom->comp;
373 	struct mlx5_devcom_comp_dev *tmp;
374 	void *data;
375 
376 	tmp = list_prepare_entry(*pos, &comp->comp_dev_list_head, list);
377 
378 	list_for_each_entry_continue(tmp, &comp->comp_dev_list_head, list) {
379 		if (tmp != devcom) {
380 			/* This can change concurrently, however 'data' pointer will remain
381 			 * valid for the duration of RCU read section.
382 			 */
383 			if (!READ_ONCE(comp->ready))
384 				return NULL;
385 			data = rcu_dereference(tmp->data);
386 			if (data)
387 				break;
388 		}
389 	}
390 
391 	if (list_entry_is_head(tmp, &comp->comp_dev_list_head, list))
392 		return NULL;
393 
394 	*pos = tmp;
395 	return data;
396 }
397 
mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev * devcom)398 void mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev *devcom)
399 {
400 	if (IS_ERR_OR_NULL(devcom))
401 		return;
402 	down_write(&devcom->comp->sem);
403 }
404 
mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev * devcom)405 void mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev *devcom)
406 {
407 	if (IS_ERR_OR_NULL(devcom))
408 		return;
409 	up_write(&devcom->comp->sem);
410 }
411 
mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev * devcom)412 int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom)
413 {
414 	if (IS_ERR_OR_NULL(devcom))
415 		return 0;
416 	return down_write_trylock(&devcom->comp->sem);
417 }
418