1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/module.h>
34 #include <linux/mlx5/vport.h>
35 #include "mlx5_ib.h"
36 
mlx_to_net_policy(enum port_state_policy mlx_policy)37 static inline u32 mlx_to_net_policy(enum port_state_policy mlx_policy)
38 {
39 	switch (mlx_policy) {
40 	case MLX5_POLICY_DOWN:
41 		return IFLA_VF_LINK_STATE_DISABLE;
42 	case MLX5_POLICY_UP:
43 		return IFLA_VF_LINK_STATE_ENABLE;
44 	case MLX5_POLICY_FOLLOW:
45 		return IFLA_VF_LINK_STATE_AUTO;
46 	default:
47 		return __IFLA_VF_LINK_STATE_MAX;
48 	}
49 }
50 
mlx5_ib_get_vf_config(struct ib_device * device,int vf,u32 port,struct ifla_vf_info * info)51 int mlx5_ib_get_vf_config(struct ib_device *device, int vf, u32 port,
52 			  struct ifla_vf_info *info)
53 {
54 	struct mlx5_ib_dev *dev = to_mdev(device);
55 	struct mlx5_core_dev *mdev = dev->mdev;
56 	struct mlx5_hca_vport_context *rep;
57 	int err;
58 
59 	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
60 	if (!rep)
61 		return -ENOMEM;
62 
63 	err = mlx5_query_hca_vport_context(mdev, 1, 1,  vf + 1, rep);
64 	if (err) {
65 		mlx5_ib_warn(dev, "failed to query port policy for vf %d (%d)\n",
66 			     vf, err);
67 		goto free;
68 	}
69 	memset(info, 0, sizeof(*info));
70 	info->linkstate = mlx_to_net_policy(rep->policy);
71 	if (info->linkstate == __IFLA_VF_LINK_STATE_MAX)
72 		err = -EINVAL;
73 
74 free:
75 	kfree(rep);
76 	return err;
77 }
78 
net_to_mlx_policy(int policy)79 static inline enum port_state_policy net_to_mlx_policy(int policy)
80 {
81 	switch (policy) {
82 	case IFLA_VF_LINK_STATE_DISABLE:
83 		return MLX5_POLICY_DOWN;
84 	case IFLA_VF_LINK_STATE_ENABLE:
85 		return MLX5_POLICY_UP;
86 	case IFLA_VF_LINK_STATE_AUTO:
87 		return MLX5_POLICY_FOLLOW;
88 	default:
89 		return MLX5_POLICY_INVALID;
90 	}
91 }
92 
mlx5_ib_set_vf_link_state(struct ib_device * device,int vf,u32 port,int state)93 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
94 			      u32 port, int state)
95 {
96 	struct mlx5_ib_dev *dev = to_mdev(device);
97 	struct mlx5_core_dev *mdev = dev->mdev;
98 	struct mlx5_hca_vport_context *in;
99 	struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
100 	int err;
101 
102 	in = kzalloc(sizeof(*in), GFP_KERNEL);
103 	if (!in)
104 		return -ENOMEM;
105 
106 	in->policy = net_to_mlx_policy(state);
107 	if (in->policy == MLX5_POLICY_INVALID) {
108 		err = -EINVAL;
109 		goto out;
110 	}
111 	in->field_select = MLX5_HCA_VPORT_SEL_STATE_POLICY;
112 	err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
113 	if (!err)
114 		vfs_ctx[vf].policy = in->policy;
115 
116 out:
117 	kfree(in);
118 	return err;
119 }
120 
mlx5_ib_get_vf_stats(struct ib_device * device,int vf,u32 port,struct ifla_vf_stats * stats)121 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
122 			 u32 port, struct ifla_vf_stats *stats)
123 {
124 	int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
125 	struct mlx5_core_dev *mdev;
126 	struct mlx5_ib_dev *dev;
127 	void *out;
128 	int err;
129 
130 	dev = to_mdev(device);
131 	mdev = dev->mdev;
132 
133 	out = kzalloc(out_sz, GFP_KERNEL);
134 	if (!out)
135 		return -ENOMEM;
136 
137 	err = mlx5_core_query_vport_counter(mdev, true, vf, port, out);
138 	if (err)
139 		goto ex;
140 
141 	stats->rx_packets = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_unicast.packets);
142 	stats->tx_packets = MLX5_GET64_PR(query_vport_counter_out, out, transmitted_ib_unicast.packets);
143 	stats->rx_bytes = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_unicast.octets);
144 	stats->tx_bytes = MLX5_GET64_PR(query_vport_counter_out, out, transmitted_ib_unicast.octets);
145 	stats->multicast = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_multicast.packets);
146 
147 ex:
148 	kfree(out);
149 	return err;
150 }
151 
set_vf_node_guid(struct ib_device * device,int vf,u32 port,u64 guid)152 static int set_vf_node_guid(struct ib_device *device, int vf, u32 port,
153 			    u64 guid)
154 {
155 	struct mlx5_ib_dev *dev = to_mdev(device);
156 	struct mlx5_core_dev *mdev = dev->mdev;
157 	struct mlx5_hca_vport_context *in;
158 	struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
159 	int err;
160 
161 	in = kzalloc(sizeof(*in), GFP_KERNEL);
162 	if (!in)
163 		return -ENOMEM;
164 
165 	in->field_select = MLX5_HCA_VPORT_SEL_NODE_GUID;
166 	in->node_guid = guid;
167 	err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
168 	if (!err) {
169 		vfs_ctx[vf].node_guid = guid;
170 		vfs_ctx[vf].node_guid_valid = 1;
171 	}
172 	kfree(in);
173 	return err;
174 }
175 
set_vf_port_guid(struct ib_device * device,int vf,u32 port,u64 guid)176 static int set_vf_port_guid(struct ib_device *device, int vf, u32 port,
177 			    u64 guid)
178 {
179 	struct mlx5_ib_dev *dev = to_mdev(device);
180 	struct mlx5_core_dev *mdev = dev->mdev;
181 	struct mlx5_hca_vport_context *in;
182 	struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
183 	int err;
184 
185 	in = kzalloc(sizeof(*in), GFP_KERNEL);
186 	if (!in)
187 		return -ENOMEM;
188 
189 	in->field_select = MLX5_HCA_VPORT_SEL_PORT_GUID;
190 	in->port_guid = guid;
191 	err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
192 	if (!err) {
193 		vfs_ctx[vf].port_guid = guid;
194 		vfs_ctx[vf].port_guid_valid = 1;
195 	}
196 	kfree(in);
197 	return err;
198 }
199 
mlx5_ib_set_vf_guid(struct ib_device * device,int vf,u32 port,u64 guid,int type)200 int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u32 port,
201 			u64 guid, int type)
202 {
203 	if (type == IFLA_VF_IB_NODE_GUID)
204 		return set_vf_node_guid(device, vf, port, guid);
205 	else if (type == IFLA_VF_IB_PORT_GUID)
206 		return set_vf_port_guid(device, vf, port, guid);
207 
208 	return -EINVAL;
209 }
210 
mlx5_ib_get_vf_guid(struct ib_device * device,int vf,u32 port,struct ifla_vf_guid * node_guid,struct ifla_vf_guid * port_guid)211 int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
212 			struct ifla_vf_guid *node_guid,
213 			struct ifla_vf_guid *port_guid)
214 {
215 	struct mlx5_ib_dev *dev = to_mdev(device);
216 	struct mlx5_core_dev *mdev = dev->mdev;
217 	struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
218 
219 	node_guid->guid =
220 		vfs_ctx[vf].node_guid_valid ? vfs_ctx[vf].node_guid : 0;
221 	port_guid->guid =
222 		vfs_ctx[vf].port_guid_valid ? vfs_ctx[vf].port_guid : 0;
223 
224 	return 0;
225 }
226