1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/eswitch.h>
35 #include <linux/mlx5/mlx5_ifc_vdpa.h>
36 #include "mlx5_core.h"
37 
38 /* intf dev list mutex */
39 static DEFINE_MUTEX(mlx5_intf_mutex);
40 static DEFINE_IDA(mlx5_adev_ida);
41 
is_eth_rep_supported(struct mlx5_core_dev * dev)42 static bool is_eth_rep_supported(struct mlx5_core_dev *dev)
43 {
44 	if (!IS_ENABLED(CONFIG_MLX5_ESWITCH))
45 		return false;
46 
47 	if (!MLX5_ESWITCH_MANAGER(dev))
48 		return false;
49 
50 	if (!is_mdev_switchdev_mode(dev))
51 		return false;
52 
53 	return true;
54 }
55 
is_eth_supported(struct mlx5_core_dev * dev)56 static bool is_eth_supported(struct mlx5_core_dev *dev)
57 {
58 	if (!IS_ENABLED(CONFIG_MLX5_CORE_EN))
59 		return false;
60 
61 	if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
62 		return false;
63 
64 	if (!MLX5_CAP_GEN(dev, eth_net_offloads)) {
65 		mlx5_core_warn(dev, "Missing eth_net_offloads capability\n");
66 		return false;
67 	}
68 
69 	if (!MLX5_CAP_GEN(dev, nic_flow_table)) {
70 		mlx5_core_warn(dev, "Missing nic_flow_table capability\n");
71 		return false;
72 	}
73 
74 	if (!MLX5_CAP_ETH(dev, csum_cap)) {
75 		mlx5_core_warn(dev, "Missing csum_cap capability\n");
76 		return false;
77 	}
78 
79 	if (!MLX5_CAP_ETH(dev, max_lso_cap)) {
80 		mlx5_core_warn(dev, "Missing max_lso_cap capability\n");
81 		return false;
82 	}
83 
84 	if (!MLX5_CAP_ETH(dev, vlan_cap)) {
85 		mlx5_core_warn(dev, "Missing vlan_cap capability\n");
86 		return false;
87 	}
88 
89 	if (!MLX5_CAP_ETH(dev, rss_ind_tbl_cap)) {
90 		mlx5_core_warn(dev, "Missing rss_ind_tbl_cap capability\n");
91 		return false;
92 	}
93 
94 	if (MLX5_CAP_FLOWTABLE(dev,
95 			       flow_table_properties_nic_receive.max_ft_level) < 3) {
96 		mlx5_core_warn(dev, "max_ft_level < 3\n");
97 		return false;
98 	}
99 
100 	if (!MLX5_CAP_ETH(dev, self_lb_en_modifiable))
101 		mlx5_core_warn(dev, "Self loop back prevention is not supported\n");
102 	if (!MLX5_CAP_GEN(dev, cq_moderation))
103 		mlx5_core_warn(dev, "CQ moderation is not supported\n");
104 
105 	return true;
106 }
107 
is_vnet_supported(struct mlx5_core_dev * dev)108 static bool is_vnet_supported(struct mlx5_core_dev *dev)
109 {
110 	if (!IS_ENABLED(CONFIG_MLX5_VDPA_NET))
111 		return false;
112 
113 	if (mlx5_core_is_pf(dev))
114 		return false;
115 
116 	if (!(MLX5_CAP_GEN_64(dev, general_obj_types) &
117 	      MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q))
118 		return false;
119 
120 	if (!(MLX5_CAP_DEV_VDPA_EMULATION(dev, event_mode) &
121 	      MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE))
122 		return false;
123 
124 	if (!MLX5_CAP_DEV_VDPA_EMULATION(dev, eth_frame_offload_type))
125 		return false;
126 
127 	return true;
128 }
129 
is_ib_rep_supported(struct mlx5_core_dev * dev)130 static bool is_ib_rep_supported(struct mlx5_core_dev *dev)
131 {
132 	if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
133 		return false;
134 
135 	if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
136 		return false;
137 
138 	if (!is_eth_rep_supported(dev))
139 		return false;
140 
141 	if (!MLX5_ESWITCH_MANAGER(dev))
142 		return false;
143 
144 	if (!is_mdev_switchdev_mode(dev))
145 		return false;
146 
147 	if (mlx5_core_mp_enabled(dev))
148 		return false;
149 
150 	return true;
151 }
152 
is_mp_supported(struct mlx5_core_dev * dev)153 static bool is_mp_supported(struct mlx5_core_dev *dev)
154 {
155 	if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
156 		return false;
157 
158 	if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
159 		return false;
160 
161 	if (is_ib_rep_supported(dev))
162 		return false;
163 
164 	if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
165 		return false;
166 
167 	if (!mlx5_core_is_mp_slave(dev))
168 		return false;
169 
170 	return true;
171 }
172 
is_ib_supported(struct mlx5_core_dev * dev)173 static bool is_ib_supported(struct mlx5_core_dev *dev)
174 {
175 	if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
176 		return false;
177 
178 	if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
179 		return false;
180 
181 	if (is_ib_rep_supported(dev))
182 		return false;
183 
184 	if (is_mp_supported(dev))
185 		return false;
186 
187 	return true;
188 }
189 
190 enum {
191 	MLX5_INTERFACE_PROTOCOL_ETH,
192 	MLX5_INTERFACE_PROTOCOL_ETH_REP,
193 
194 	MLX5_INTERFACE_PROTOCOL_IB,
195 	MLX5_INTERFACE_PROTOCOL_IB_REP,
196 	MLX5_INTERFACE_PROTOCOL_MPIB,
197 
198 	MLX5_INTERFACE_PROTOCOL_VNET,
199 };
200 
201 static const struct mlx5_adev_device {
202 	const char *suffix;
203 	bool (*is_supported)(struct mlx5_core_dev *dev);
204 } mlx5_adev_devices[] = {
205 	[MLX5_INTERFACE_PROTOCOL_VNET] = { .suffix = "vnet",
206 					   .is_supported = &is_vnet_supported },
207 	[MLX5_INTERFACE_PROTOCOL_IB] = { .suffix = "rdma",
208 					 .is_supported = &is_ib_supported },
209 	[MLX5_INTERFACE_PROTOCOL_ETH] = { .suffix = "eth",
210 					  .is_supported = &is_eth_supported },
211 	[MLX5_INTERFACE_PROTOCOL_ETH_REP] = { .suffix = "eth-rep",
212 					   .is_supported = &is_eth_rep_supported },
213 	[MLX5_INTERFACE_PROTOCOL_IB_REP] = { .suffix = "rdma-rep",
214 					   .is_supported = &is_ib_rep_supported },
215 	[MLX5_INTERFACE_PROTOCOL_MPIB] = { .suffix = "multiport",
216 					   .is_supported = &is_mp_supported },
217 };
218 
mlx5_adev_idx_alloc(void)219 int mlx5_adev_idx_alloc(void)
220 {
221 	return ida_alloc(&mlx5_adev_ida, GFP_KERNEL);
222 }
223 
mlx5_adev_idx_free(int idx)224 void mlx5_adev_idx_free(int idx)
225 {
226 	ida_free(&mlx5_adev_ida, idx);
227 }
228 
mlx5_adev_init(struct mlx5_core_dev * dev)229 int mlx5_adev_init(struct mlx5_core_dev *dev)
230 {
231 	struct mlx5_priv *priv = &dev->priv;
232 
233 	priv->adev = kcalloc(ARRAY_SIZE(mlx5_adev_devices),
234 			     sizeof(struct mlx5_adev *), GFP_KERNEL);
235 	if (!priv->adev)
236 		return -ENOMEM;
237 
238 	return 0;
239 }
240 
mlx5_adev_cleanup(struct mlx5_core_dev * dev)241 void mlx5_adev_cleanup(struct mlx5_core_dev *dev)
242 {
243 	struct mlx5_priv *priv = &dev->priv;
244 
245 	kfree(priv->adev);
246 }
247 
adev_release(struct device * dev)248 static void adev_release(struct device *dev)
249 {
250 	struct mlx5_adev *mlx5_adev =
251 		container_of(dev, struct mlx5_adev, adev.dev);
252 	struct mlx5_priv *priv = &mlx5_adev->mdev->priv;
253 	int idx = mlx5_adev->idx;
254 
255 	kfree(mlx5_adev);
256 	priv->adev[idx] = NULL;
257 }
258 
add_adev(struct mlx5_core_dev * dev,int idx)259 static struct mlx5_adev *add_adev(struct mlx5_core_dev *dev, int idx)
260 {
261 	const char *suffix = mlx5_adev_devices[idx].suffix;
262 	struct auxiliary_device *adev;
263 	struct mlx5_adev *madev;
264 	int ret;
265 
266 	madev = kzalloc(sizeof(*madev), GFP_KERNEL);
267 	if (!madev)
268 		return ERR_PTR(-ENOMEM);
269 
270 	adev = &madev->adev;
271 	adev->id = dev->priv.adev_idx;
272 	adev->name = suffix;
273 	adev->dev.parent = dev->device;
274 	adev->dev.release = adev_release;
275 	madev->mdev = dev;
276 	madev->idx = idx;
277 
278 	ret = auxiliary_device_init(adev);
279 	if (ret) {
280 		kfree(madev);
281 		return ERR_PTR(ret);
282 	}
283 
284 	ret = auxiliary_device_add(adev);
285 	if (ret) {
286 		auxiliary_device_uninit(adev);
287 		return ERR_PTR(ret);
288 	}
289 	return madev;
290 }
291 
del_adev(struct auxiliary_device * adev)292 static void del_adev(struct auxiliary_device *adev)
293 {
294 	auxiliary_device_delete(adev);
295 	auxiliary_device_uninit(adev);
296 }
297 
mlx5_attach_device(struct mlx5_core_dev * dev)298 int mlx5_attach_device(struct mlx5_core_dev *dev)
299 {
300 	struct mlx5_priv *priv = &dev->priv;
301 	struct auxiliary_device *adev;
302 	struct auxiliary_driver *adrv;
303 	int ret = 0, i;
304 
305 	mutex_lock(&mlx5_intf_mutex);
306 	for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
307 		if (!priv->adev[i]) {
308 			bool is_supported = false;
309 
310 			if (mlx5_adev_devices[i].is_supported)
311 				is_supported = mlx5_adev_devices[i].is_supported(dev);
312 
313 			if (!is_supported)
314 				continue;
315 
316 			priv->adev[i] = add_adev(dev, i);
317 			if (IS_ERR(priv->adev[i])) {
318 				ret = PTR_ERR(priv->adev[i]);
319 				priv->adev[i] = NULL;
320 			}
321 		} else {
322 			adev = &priv->adev[i]->adev;
323 			adrv = to_auxiliary_drv(adev->dev.driver);
324 
325 			if (adrv->resume)
326 				ret = adrv->resume(adev);
327 		}
328 		if (ret) {
329 			mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n",
330 				       i, mlx5_adev_devices[i].suffix);
331 
332 			break;
333 		}
334 	}
335 	mutex_unlock(&mlx5_intf_mutex);
336 	return ret;
337 }
338 
mlx5_detach_device(struct mlx5_core_dev * dev)339 void mlx5_detach_device(struct mlx5_core_dev *dev)
340 {
341 	struct mlx5_priv *priv = &dev->priv;
342 	struct auxiliary_device *adev;
343 	struct auxiliary_driver *adrv;
344 	pm_message_t pm = {};
345 	int i;
346 
347 	mutex_lock(&mlx5_intf_mutex);
348 	for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
349 		if (!priv->adev[i])
350 			continue;
351 
352 		adev = &priv->adev[i]->adev;
353 		adrv = to_auxiliary_drv(adev->dev.driver);
354 
355 		if (adrv->suspend) {
356 			adrv->suspend(adev, pm);
357 			continue;
358 		}
359 
360 		del_adev(&priv->adev[i]->adev);
361 		priv->adev[i] = NULL;
362 	}
363 	mutex_unlock(&mlx5_intf_mutex);
364 }
365 
mlx5_register_device(struct mlx5_core_dev * dev)366 int mlx5_register_device(struct mlx5_core_dev *dev)
367 {
368 	int ret;
369 
370 	mutex_lock(&mlx5_intf_mutex);
371 	dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
372 	ret = mlx5_rescan_drivers_locked(dev);
373 	mutex_unlock(&mlx5_intf_mutex);
374 	if (ret)
375 		mlx5_unregister_device(dev);
376 
377 	return ret;
378 }
379 
mlx5_unregister_device(struct mlx5_core_dev * dev)380 void mlx5_unregister_device(struct mlx5_core_dev *dev)
381 {
382 	mutex_lock(&mlx5_intf_mutex);
383 	dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
384 	mlx5_rescan_drivers_locked(dev);
385 	mutex_unlock(&mlx5_intf_mutex);
386 }
387 
add_drivers(struct mlx5_core_dev * dev)388 static int add_drivers(struct mlx5_core_dev *dev)
389 {
390 	struct mlx5_priv *priv = &dev->priv;
391 	int i, ret = 0;
392 
393 	for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
394 		bool is_supported = false;
395 
396 		if (priv->adev[i])
397 			continue;
398 
399 		if (mlx5_adev_devices[i].is_supported)
400 			is_supported = mlx5_adev_devices[i].is_supported(dev);
401 
402 		if (!is_supported)
403 			continue;
404 
405 		priv->adev[i] = add_adev(dev, i);
406 		if (IS_ERR(priv->adev[i])) {
407 			mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n",
408 				       i, mlx5_adev_devices[i].suffix);
409 			/* We continue to rescan drivers and leave to the caller
410 			 * to make decision if to release everything or continue.
411 			 */
412 			ret = PTR_ERR(priv->adev[i]);
413 			priv->adev[i] = NULL;
414 		}
415 	}
416 	return ret;
417 }
418 
delete_drivers(struct mlx5_core_dev * dev)419 static void delete_drivers(struct mlx5_core_dev *dev)
420 {
421 	struct mlx5_priv *priv = &dev->priv;
422 	bool delete_all;
423 	int i;
424 
425 	delete_all = priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
426 
427 	for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
428 		bool is_supported = false;
429 
430 		if (!priv->adev[i])
431 			continue;
432 
433 		if (mlx5_adev_devices[i].is_supported && !delete_all)
434 			is_supported = mlx5_adev_devices[i].is_supported(dev);
435 
436 		if (is_supported)
437 			continue;
438 
439 		del_adev(&priv->adev[i]->adev);
440 		priv->adev[i] = NULL;
441 	}
442 }
443 
444 /* This function is used after mlx5_core_dev is reconfigured.
445  */
mlx5_rescan_drivers_locked(struct mlx5_core_dev * dev)446 int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
447 {
448 	struct mlx5_priv *priv = &dev->priv;
449 
450 	lockdep_assert_held(&mlx5_intf_mutex);
451 
452 	delete_drivers(dev);
453 	if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
454 		return 0;
455 
456 	return add_drivers(dev);
457 }
458 
mlx5_gen_pci_id(const struct mlx5_core_dev * dev)459 static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
460 {
461 	return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
462 		     (dev->pdev->bus->number << 8) |
463 		     PCI_SLOT(dev->pdev->devfn));
464 }
465 
next_phys_dev(struct device * dev,const void * data)466 static int next_phys_dev(struct device *dev, const void *data)
467 {
468 	struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
469 	struct mlx5_core_dev *mdev = madev->mdev;
470 	const struct mlx5_core_dev *curr = data;
471 
472 	if (!mlx5_core_is_pf(mdev))
473 		return 0;
474 
475 	if (mdev == curr)
476 		return 0;
477 
478 	if (mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr))
479 		return 0;
480 
481 	return 1;
482 }
483 
484 /* This function is called with two flows:
485  * 1. During initialization of mlx5_core_dev and we don't need to lock it.
486  * 2. During LAG configure stage and caller holds &mlx5_intf_mutex.
487  */
mlx5_get_next_phys_dev(struct mlx5_core_dev * dev)488 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
489 {
490 	struct auxiliary_device *adev;
491 	struct mlx5_adev *madev;
492 
493 	if (!mlx5_core_is_pf(dev))
494 		return NULL;
495 
496 	adev = auxiliary_find_device(NULL, dev, &next_phys_dev);
497 	if (!adev)
498 		return NULL;
499 
500 	madev = container_of(adev, struct mlx5_adev, adev);
501 	put_device(&adev->dev);
502 	return madev->mdev;
503 }
504 
mlx5_dev_list_lock(void)505 void mlx5_dev_list_lock(void)
506 {
507 	mutex_lock(&mlx5_intf_mutex);
508 }
mlx5_dev_list_unlock(void)509 void mlx5_dev_list_unlock(void)
510 {
511 	mutex_unlock(&mlx5_intf_mutex);
512 }
513 
mlx5_dev_list_trylock(void)514 int mlx5_dev_list_trylock(void)
515 {
516 	return mutex_trylock(&mlx5_intf_mutex);
517 }
518