1 /*
2 * Copyright (c) 2014, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/pci.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include "mlx5_core.h"
37 #include "eswitch.h"
38
sriov_restore_guids(struct mlx5_core_dev * dev,int vf)39 static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf)
40 {
41 struct mlx5_core_sriov *sriov = &dev->priv.sriov;
42 struct mlx5_hca_vport_context *in;
43 int err = 0;
44
45 /* Restore sriov guid and policy settings */
46 if (sriov->vfs_ctx[vf].node_guid ||
47 sriov->vfs_ctx[vf].port_guid ||
48 sriov->vfs_ctx[vf].policy != MLX5_POLICY_INVALID) {
49 in = kzalloc(sizeof(*in), GFP_KERNEL);
50 if (!in)
51 return -ENOMEM;
52
53 in->node_guid = sriov->vfs_ctx[vf].node_guid;
54 in->port_guid = sriov->vfs_ctx[vf].port_guid;
55 in->policy = sriov->vfs_ctx[vf].policy;
56 in->field_select =
57 !!(in->port_guid) * MLX5_HCA_VPORT_SEL_PORT_GUID |
58 !!(in->node_guid) * MLX5_HCA_VPORT_SEL_NODE_GUID |
59 !!(in->policy) * MLX5_HCA_VPORT_SEL_STATE_POLICY;
60
61 err = mlx5_core_modify_hca_vport_context(dev, 1, 1, vf + 1, in);
62 if (err)
63 mlx5_core_warn(dev, "modify vport context failed, unable to restore VF %d settings\n", vf);
64
65 kfree(in);
66 }
67
68 return err;
69 }
70
mlx5_device_enable_sriov(struct mlx5_core_dev * dev,int num_vfs)71 static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
72 {
73 struct mlx5_core_sriov *sriov = &dev->priv.sriov;
74 int err, vf, num_msix_count;
75
76 if (!MLX5_ESWITCH_MANAGER(dev))
77 goto enable_vfs_hca;
78
79 err = mlx5_eswitch_enable(dev->priv.eswitch, num_vfs);
80 if (err) {
81 mlx5_core_warn(dev,
82 "failed to enable eswitch SRIOV (%d)\n", err);
83 return err;
84 }
85
86 enable_vfs_hca:
87 num_msix_count = mlx5_get_default_msix_vec_count(dev, num_vfs);
88 for (vf = 0; vf < num_vfs; vf++) {
89 err = mlx5_core_enable_hca(dev, vf + 1);
90 if (err) {
91 mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err);
92 continue;
93 }
94
95 err = mlx5_set_msix_vec_count(dev, vf + 1, num_msix_count);
96 if (err) {
97 mlx5_core_warn(dev,
98 "failed to set MSI-X vector counts VF %d, err %d\n",
99 vf, err);
100 continue;
101 }
102
103 sriov->vfs_ctx[vf].enabled = 1;
104 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) {
105 err = sriov_restore_guids(dev, vf);
106 if (err) {
107 mlx5_core_warn(dev,
108 "failed to restore VF %d settings, err %d\n",
109 vf, err);
110 continue;
111 }
112 }
113 mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf);
114 }
115
116 return 0;
117 }
118
119 static void
mlx5_device_disable_sriov(struct mlx5_core_dev * dev,int num_vfs,bool clear_vf)120 mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
121 {
122 struct mlx5_core_sriov *sriov = &dev->priv.sriov;
123 int err;
124 int vf;
125
126 for (vf = num_vfs - 1; vf >= 0; vf--) {
127 if (!sriov->vfs_ctx[vf].enabled)
128 continue;
129 err = mlx5_core_disable_hca(dev, vf + 1);
130 if (err) {
131 mlx5_core_warn(dev, "failed to disable VF %d\n", vf);
132 continue;
133 }
134 sriov->vfs_ctx[vf].enabled = 0;
135 }
136
137 if (MLX5_ESWITCH_MANAGER(dev))
138 mlx5_eswitch_disable(dev->priv.eswitch, clear_vf);
139
140 if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
141 mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
142 }
143
mlx5_sriov_enable(struct pci_dev * pdev,int num_vfs)144 static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
145 {
146 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
147 int err;
148
149 err = mlx5_device_enable_sriov(dev, num_vfs);
150 if (err) {
151 mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
152 return err;
153 }
154
155 err = pci_enable_sriov(pdev, num_vfs);
156 if (err) {
157 mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
158 mlx5_device_disable_sriov(dev, num_vfs, true);
159 }
160 return err;
161 }
162
mlx5_sriov_disable(struct pci_dev * pdev)163 static void mlx5_sriov_disable(struct pci_dev *pdev)
164 {
165 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
166 int num_vfs = pci_num_vf(dev->pdev);
167
168 pci_disable_sriov(pdev);
169 mlx5_device_disable_sriov(dev, num_vfs, true);
170 }
171
mlx5_core_sriov_configure(struct pci_dev * pdev,int num_vfs)172 int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
173 {
174 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
175 struct mlx5_core_sriov *sriov = &dev->priv.sriov;
176 int err = 0;
177
178 mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs);
179
180 if (num_vfs)
181 err = mlx5_sriov_enable(pdev, num_vfs);
182 else
183 mlx5_sriov_disable(pdev);
184
185 if (!err)
186 sriov->num_vfs = num_vfs;
187 return err ? err : num_vfs;
188 }
189
mlx5_core_sriov_set_msix_vec_count(struct pci_dev * vf,int msix_vec_count)190 int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count)
191 {
192 struct pci_dev *pf = pci_physfn(vf);
193 struct mlx5_core_sriov *sriov;
194 struct mlx5_core_dev *dev;
195 int num_vf_msix, id;
196
197 dev = pci_get_drvdata(pf);
198 num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
199 if (!num_vf_msix)
200 return -EOPNOTSUPP;
201
202 if (!msix_vec_count)
203 msix_vec_count =
204 mlx5_get_default_msix_vec_count(dev, pci_num_vf(pf));
205
206 sriov = &dev->priv.sriov;
207
208 /* Reversed translation of PCI VF function number to the internal
209 * function_id, which exists in the name of virtfn symlink.
210 */
211 for (id = 0; id < pci_num_vf(pf); id++) {
212 if (!sriov->vfs_ctx[id].enabled)
213 continue;
214
215 if (vf->devfn == pci_iov_virtfn_devfn(pf, id))
216 break;
217 }
218
219 if (id == pci_num_vf(pf) || !sriov->vfs_ctx[id].enabled)
220 return -EINVAL;
221
222 return mlx5_set_msix_vec_count(dev, id + 1, msix_vec_count);
223 }
224
mlx5_sriov_attach(struct mlx5_core_dev * dev)225 int mlx5_sriov_attach(struct mlx5_core_dev *dev)
226 {
227 if (!mlx5_core_is_pf(dev) || !pci_num_vf(dev->pdev))
228 return 0;
229
230 /* If sriov VFs exist in PCI level, enable them in device level */
231 return mlx5_device_enable_sriov(dev, pci_num_vf(dev->pdev));
232 }
233
mlx5_sriov_detach(struct mlx5_core_dev * dev)234 void mlx5_sriov_detach(struct mlx5_core_dev *dev)
235 {
236 if (!mlx5_core_is_pf(dev))
237 return;
238
239 mlx5_device_disable_sriov(dev, pci_num_vf(dev->pdev), false);
240 }
241
mlx5_get_max_vfs(struct mlx5_core_dev * dev)242 static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
243 {
244 u16 host_total_vfs;
245 const u32 *out;
246
247 if (mlx5_core_is_ecpf_esw_manager(dev)) {
248 out = mlx5_esw_query_functions(dev);
249
250 /* Old FW doesn't support getting total_vfs from esw func
251 * but supports getting it from pci_sriov.
252 */
253 if (IS_ERR(out))
254 goto done;
255 host_total_vfs = MLX5_GET(query_esw_functions_out, out,
256 host_params_context.host_total_vfs);
257 kvfree(out);
258 if (host_total_vfs)
259 return host_total_vfs;
260 }
261
262 done:
263 return pci_sriov_get_totalvfs(dev->pdev);
264 }
265
mlx5_sriov_init(struct mlx5_core_dev * dev)266 int mlx5_sriov_init(struct mlx5_core_dev *dev)
267 {
268 struct mlx5_core_sriov *sriov = &dev->priv.sriov;
269 struct pci_dev *pdev = dev->pdev;
270 int total_vfs;
271
272 if (!mlx5_core_is_pf(dev))
273 return 0;
274
275 total_vfs = pci_sriov_get_totalvfs(pdev);
276 sriov->max_vfs = mlx5_get_max_vfs(dev);
277 sriov->num_vfs = pci_num_vf(pdev);
278 sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL);
279 if (!sriov->vfs_ctx)
280 return -ENOMEM;
281
282 return 0;
283 }
284
mlx5_sriov_cleanup(struct mlx5_core_dev * dev)285 void mlx5_sriov_cleanup(struct mlx5_core_dev *dev)
286 {
287 struct mlx5_core_sriov *sriov = &dev->priv.sriov;
288
289 if (!mlx5_core_is_pf(dev))
290 return;
291
292 kfree(sriov->vfs_ctx);
293 }
294