xref: /linux/drivers/infiniband/hw/usnic/usnic_fwd.c (revision 0be3ff0c)
1 /*
2  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/netdevice.h>
34 #include <linux/pci.h>
35 
36 #include "enic_api.h"
37 #include "usnic_common_pkt_hdr.h"
38 #include "usnic_fwd.h"
39 #include "usnic_log.h"
40 
41 static int usnic_fwd_devcmd_locked(struct usnic_fwd_dev *ufdev, int vnic_idx,
42 					enum vnic_devcmd_cmd cmd, u64 *a0,
43 					u64 *a1)
44 {
45 	int status;
46 	struct net_device *netdev = ufdev->netdev;
47 
48 	lockdep_assert_held(&ufdev->lock);
49 
50 	status = enic_api_devcmd_proxy_by_index(netdev,
51 			vnic_idx,
52 			cmd,
53 			a0, a1,
54 			1000);
55 	if (status) {
56 		if (status == ERR_EINVAL && cmd == CMD_DEL_FILTER) {
57 			usnic_dbg("Dev %s vnic idx %u cmd %u already deleted",
58 					ufdev->name, vnic_idx, cmd);
59 		} else {
60 			usnic_err("Dev %s vnic idx %u cmd %u failed with status %d\n",
61 					ufdev->name, vnic_idx, cmd,
62 					status);
63 		}
64 	} else {
65 		usnic_dbg("Dev %s vnic idx %u cmd %u success",
66 				ufdev->name, vnic_idx, cmd);
67 	}
68 
69 	return status;
70 }
71 
72 static int usnic_fwd_devcmd(struct usnic_fwd_dev *ufdev, int vnic_idx,
73 				enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1)
74 {
75 	int status;
76 
77 	spin_lock(&ufdev->lock);
78 	status = usnic_fwd_devcmd_locked(ufdev, vnic_idx, cmd, a0, a1);
79 	spin_unlock(&ufdev->lock);
80 
81 	return status;
82 }
83 
84 struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev)
85 {
86 	struct usnic_fwd_dev *ufdev;
87 
88 	ufdev = kzalloc(sizeof(*ufdev), GFP_KERNEL);
89 	if (!ufdev)
90 		return NULL;
91 
92 	ufdev->pdev = pdev;
93 	ufdev->netdev = pci_get_drvdata(pdev);
94 	spin_lock_init(&ufdev->lock);
95 	BUILD_BUG_ON(sizeof(ufdev->name) != sizeof(ufdev->netdev->name));
96 	strcpy(ufdev->name, ufdev->netdev->name);
97 
98 	return ufdev;
99 }
100 
101 void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev)
102 {
103 	kfree(ufdev);
104 }
105 
106 void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, const char mac[ETH_ALEN])
107 {
108 	spin_lock(&ufdev->lock);
109 	memcpy(&ufdev->mac, mac, sizeof(ufdev->mac));
110 	spin_unlock(&ufdev->lock);
111 }
112 
113 void usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr)
114 {
115 	spin_lock(&ufdev->lock);
116 	if (!ufdev->inaddr)
117 		ufdev->inaddr = inaddr;
118 	spin_unlock(&ufdev->lock);
119 }
120 
121 void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev)
122 {
123 	spin_lock(&ufdev->lock);
124 	ufdev->inaddr = 0;
125 	spin_unlock(&ufdev->lock);
126 }
127 
128 void usnic_fwd_carrier_up(struct usnic_fwd_dev *ufdev)
129 {
130 	spin_lock(&ufdev->lock);
131 	ufdev->link_up = 1;
132 	spin_unlock(&ufdev->lock);
133 }
134 
135 void usnic_fwd_carrier_down(struct usnic_fwd_dev *ufdev)
136 {
137 	spin_lock(&ufdev->lock);
138 	ufdev->link_up = 0;
139 	spin_unlock(&ufdev->lock);
140 }
141 
142 void usnic_fwd_set_mtu(struct usnic_fwd_dev *ufdev, unsigned int mtu)
143 {
144 	spin_lock(&ufdev->lock);
145 	ufdev->mtu = mtu;
146 	spin_unlock(&ufdev->lock);
147 }
148 
149 static int usnic_fwd_dev_ready_locked(struct usnic_fwd_dev *ufdev)
150 {
151 	lockdep_assert_held(&ufdev->lock);
152 
153 	if (!ufdev->link_up)
154 		return -EPERM;
155 
156 	return 0;
157 }
158 
159 static int validate_filter_locked(struct usnic_fwd_dev *ufdev,
160 					struct filter *filter)
161 {
162 
163 	lockdep_assert_held(&ufdev->lock);
164 
165 	if (filter->type == FILTER_IPV4_5TUPLE) {
166 		if (!(filter->u.ipv4.flags & FILTER_FIELD_5TUP_DST_AD))
167 			return -EACCES;
168 		if (!(filter->u.ipv4.flags & FILTER_FIELD_5TUP_DST_PT))
169 			return -EBUSY;
170 		else if (ufdev->inaddr == 0)
171 			return -EINVAL;
172 		else if (filter->u.ipv4.dst_port == 0)
173 			return -ERANGE;
174 		else if (ntohl(ufdev->inaddr) != filter->u.ipv4.dst_addr)
175 			return -EFAULT;
176 		else
177 			return 0;
178 	}
179 
180 	return 0;
181 }
182 
183 static void fill_tlv(struct filter_tlv *tlv, struct filter *filter,
184 		struct filter_action *action)
185 {
186 	tlv->type = CLSF_TLV_FILTER;
187 	tlv->length = sizeof(struct filter);
188 	*((struct filter *)&tlv->val) = *filter;
189 
190 	tlv = (struct filter_tlv *)((char *)tlv + sizeof(struct filter_tlv) +
191 			sizeof(struct filter));
192 	tlv->type = CLSF_TLV_ACTION;
193 	tlv->length = sizeof(struct filter_action);
194 	*((struct filter_action *)&tlv->val) = *action;
195 }
196 
197 struct usnic_fwd_flow*
198 usnic_fwd_alloc_flow(struct usnic_fwd_dev *ufdev, struct filter *filter,
199 				struct usnic_filter_action *uaction)
200 {
201 	struct filter_tlv *tlv;
202 	struct pci_dev *pdev;
203 	struct usnic_fwd_flow *flow;
204 	uint64_t a0, a1;
205 	uint64_t tlv_size;
206 	dma_addr_t tlv_pa;
207 	int status;
208 
209 	pdev = ufdev->pdev;
210 	tlv_size = (2*sizeof(struct filter_tlv) + sizeof(struct filter) +
211 			sizeof(struct filter_action));
212 
213 	flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
214 	if (!flow)
215 		return ERR_PTR(-ENOMEM);
216 
217 	tlv = dma_alloc_coherent(&pdev->dev, tlv_size, &tlv_pa, GFP_ATOMIC);
218 	if (!tlv) {
219 		usnic_err("Failed to allocate memory\n");
220 		status = -ENOMEM;
221 		goto out_free_flow;
222 	}
223 
224 	fill_tlv(tlv, filter, &uaction->action);
225 
226 	spin_lock(&ufdev->lock);
227 	status = usnic_fwd_dev_ready_locked(ufdev);
228 	if (status) {
229 		usnic_err("Forwarding dev %s not ready with status %d\n",
230 				ufdev->name, status);
231 		goto out_free_tlv;
232 	}
233 
234 	status = validate_filter_locked(ufdev, filter);
235 	if (status) {
236 		usnic_err("Failed to validate filter with status %d\n",
237 				status);
238 		goto out_free_tlv;
239 	}
240 
241 	/* Issue Devcmd */
242 	a0 = tlv_pa;
243 	a1 = tlv_size;
244 	status = usnic_fwd_devcmd_locked(ufdev, uaction->vnic_idx,
245 						CMD_ADD_FILTER, &a0, &a1);
246 	if (status) {
247 		usnic_err("VF %s Filter add failed with status:%d",
248 				ufdev->name, status);
249 		status = -EFAULT;
250 		goto out_free_tlv;
251 	} else {
252 		usnic_dbg("VF %s FILTER ID:%llu", ufdev->name, a0);
253 	}
254 
255 	flow->flow_id = (uint32_t) a0;
256 	flow->vnic_idx = uaction->vnic_idx;
257 	flow->ufdev = ufdev;
258 
259 out_free_tlv:
260 	spin_unlock(&ufdev->lock);
261 	dma_free_coherent(&pdev->dev, tlv_size, tlv, tlv_pa);
262 	if (!status)
263 		return flow;
264 out_free_flow:
265 	kfree(flow);
266 	return ERR_PTR(status);
267 }
268 
269 int usnic_fwd_dealloc_flow(struct usnic_fwd_flow *flow)
270 {
271 	int status;
272 	u64 a0, a1;
273 
274 	a0 = flow->flow_id;
275 
276 	status = usnic_fwd_devcmd(flow->ufdev, flow->vnic_idx,
277 					CMD_DEL_FILTER, &a0, &a1);
278 	if (status) {
279 		if (status == ERR_EINVAL) {
280 			usnic_dbg("Filter %u already deleted for VF Idx %u pf: %s status: %d",
281 					flow->flow_id, flow->vnic_idx,
282 					flow->ufdev->name, status);
283 		} else {
284 			usnic_err("PF %s VF Idx %u Filter: %u FILTER DELETE failed with status %d",
285 					flow->ufdev->name, flow->vnic_idx,
286 					flow->flow_id, status);
287 		}
288 		status = 0;
289 		/*
290 		 * Log the error and fake success to the caller because if
291 		 * a flow fails to be deleted in the firmware, it is an
292 		 * unrecoverable error.
293 		 */
294 	} else {
295 		usnic_dbg("PF %s VF Idx %u Filter: %u FILTER DELETED",
296 				flow->ufdev->name, flow->vnic_idx,
297 				flow->flow_id);
298 	}
299 
300 	kfree(flow);
301 	return status;
302 }
303 
304 int usnic_fwd_enable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx)
305 {
306 	int status;
307 	struct net_device *pf_netdev;
308 	u64 a0, a1;
309 
310 	pf_netdev = ufdev->netdev;
311 	a0 = qp_idx;
312 	a1 = CMD_QP_RQWQ;
313 
314 	status = usnic_fwd_devcmd(ufdev, vnic_idx, CMD_QP_ENABLE,
315 						&a0, &a1);
316 	if (status) {
317 		usnic_err("PF %s VNIC Index %u RQ Index: %u ENABLE Failed with status %d",
318 				netdev_name(pf_netdev),
319 				vnic_idx,
320 				qp_idx,
321 				status);
322 	} else {
323 		usnic_dbg("PF %s VNIC Index %u RQ Index: %u ENABLED",
324 				netdev_name(pf_netdev),
325 				vnic_idx, qp_idx);
326 	}
327 
328 	return status;
329 }
330 
331 int usnic_fwd_disable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx)
332 {
333 	int status;
334 	u64 a0, a1;
335 	struct net_device *pf_netdev;
336 
337 	pf_netdev = ufdev->netdev;
338 	a0 = qp_idx;
339 	a1 = CMD_QP_RQWQ;
340 
341 	status = usnic_fwd_devcmd(ufdev, vnic_idx, CMD_QP_DISABLE,
342 			&a0, &a1);
343 	if (status) {
344 		usnic_err("PF %s VNIC Index %u RQ Index: %u DISABLE Failed with status %d",
345 				netdev_name(pf_netdev),
346 				vnic_idx,
347 				qp_idx,
348 				status);
349 	} else {
350 		usnic_dbg("PF %s VNIC Index %u RQ Index: %u DISABLED",
351 				netdev_name(pf_netdev),
352 				vnic_idx,
353 				qp_idx);
354 	}
355 
356 	return status;
357 }
358