xref: /freebsd/sys/dev/cxgbe/iw_cxgbe/provider.c (revision 19261079)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #define	LINUXKPI_PARAM_PREFIX iw_cxgbe_
38 
39 #include "opt_inet.h"
40 
41 #ifdef TCP_OFFLOAD
42 #include <asm/pgtable.h>
43 #include <linux/page.h>
44 #include <rdma/ib_verbs.h>
45 #include <rdma/ib_user_verbs.h>
46 
47 #include "iw_cxgbe.h"
48 #include "user.h"
49 
50 static int fastreg_support = 1;
51 module_param(fastreg_support, int, 0644);
52 MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default = 1)");
53 
54 static int c4iw_modify_port(struct ib_device *ibdev,
55 			    u8 port, int port_modify_mask,
56 			    struct ib_port_modify *props)
57 {
58 	return -ENOSYS;
59 }
60 
61 static int c4iw_ah_create(struct ib_ah *ah,
62 			  struct ib_ah_attr *ah_attr, u32 flags,
63 			  struct ib_udata *udata)
64 {
65 	return -ENOSYS;
66 }
67 
68 static void c4iw_ah_destroy(struct ib_ah *ah, u32 flags)
69 {
70 }
71 
72 static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
73 {
74 	return -ENOSYS;
75 }
76 
77 static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
78 {
79 	return -ENOSYS;
80 }
81 
82 static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
83 		u8 port_num, const struct ib_wc *in_wc,
84 		const struct ib_grh *in_grh,
85 		const struct ib_mad_hdr *in_mad,
86 		size_t in_mad_size,
87 		struct ib_mad_hdr *out_mad,
88 		size_t *out_mad_size,
89 		u16 *out_mad_pkey_index)
90 
91 {
92 	return -ENOSYS;
93 }
94 
95 static void c4iw_dealloc_ucontext(struct ib_ucontext *context)
96 {
97 	struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
98 	struct c4iw_dev *rhp;
99 	struct c4iw_mm_entry *mm, *tmp;
100 
101 	pr_debug("context %p\n", context);
102 	rhp = to_c4iw_dev(ucontext->ibucontext.device);
103 
104 	CTR2(KTR_IW_CXGBE, "%s ucontext %p", __func__, ucontext);
105 
106 	list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
107 		kfree(mm);
108 	c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
109 }
110 
111 static int c4iw_alloc_ucontext(struct ib_ucontext *ucontext,
112 			       struct ib_udata *udata)
113 {
114 	struct ib_device *ibdev = ucontext->device;
115 	struct c4iw_ucontext *context = to_c4iw_ucontext(ucontext);
116 	struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
117 	static int warned;
118 	struct c4iw_alloc_ucontext_resp uresp;
119 	int ret = 0;
120 	struct c4iw_mm_entry *mm = NULL;
121 
122 	PDBG("%s ibdev %p\n", __func__, ibdev);
123 	c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
124 	INIT_LIST_HEAD(&context->mmaps);
125 	spin_lock_init(&context->mmap_lock);
126 
127 	if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
128 		if (!warned++)
129 			log(LOG_ERR, "%s Warning - downlevel libcxgb4 "
130 			       "(non-fatal), device status page disabled.\n",
131 			       __func__);
132 		rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
133 	} else {
134 
135 		mm = kmalloc(sizeof *mm, GFP_KERNEL);
136 		if (!mm)
137 			goto err;
138 
139 		uresp.status_page_size = PAGE_SIZE;
140 
141 		spin_lock(&context->mmap_lock);
142 		uresp.status_page_key = context->key;
143 		context->key += PAGE_SIZE;
144 		spin_unlock(&context->mmap_lock);
145 
146 		ret = ib_copy_to_udata(udata, &uresp,
147 				       sizeof(uresp) - sizeof(uresp.reserved));
148 		if (ret)
149 			goto err_mm;
150 
151 		mm->key = uresp.status_page_key;
152 		mm->addr = vtophys(rhp->rdev.status_page);
153 		mm->len = PAGE_SIZE;
154 		insert_mmap(context, mm);
155 	}
156 	return 0;
157 err_mm:
158 	kfree(mm);
159 err:
160 	return ret;
161 }
162 
163 static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
164 {
165 	int len = vma->vm_end - vma->vm_start;
166 	u32 key = vma->vm_pgoff << PAGE_SHIFT;
167 	struct c4iw_rdev *rdev;
168 	int ret = 0;
169 	struct c4iw_mm_entry *mm;
170 	struct c4iw_ucontext *ucontext;
171 	u64 addr = 0;
172 
173 	CTR4(KTR_IW_CXGBE, "%s:1 ctx %p vma %p, vm_start %u", __func__,
174 			context, vma, vma->vm_start);
175 
176 	CTR4(KTR_IW_CXGBE, "%s:1a pgoff 0x%lx key 0x%x len %d", __func__,
177 	    vma->vm_pgoff, key, len);
178 
179 	if (vma->vm_start & (PAGE_SIZE-1)) {
180 		CTR3(KTR_IW_CXGBE, "%s:2 unaligned vm_start %u vma %p",
181 		    __func__, vma->vm_start, vma);
182 		return -EINVAL;
183 	}
184 
185 	rdev = &(to_c4iw_dev(context->device)->rdev);
186 	ucontext = to_c4iw_ucontext(context);
187 
188 	mm = remove_mmap(ucontext, key, len);
189 	if (!mm) {
190 		CTR4(KTR_IW_CXGBE, "%s:3 ucontext %p key %u len %u", __func__,
191 		    ucontext, key, len);
192 		return -EINVAL;
193 	}
194 	addr = mm->addr;
195 	kfree(mm);
196 
197 	/* user DB-GTS registers if addr in udbs_res range,
198 	 * else WQ or CQ memory.
199 	 * */
200 	if (rdev->adap->iwt.wc_en && addr >= rdev->bar2_pa &&
201 			addr < rdev->bar2_pa + rdev->bar2_len)
202 		vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
203 
204 	ret = io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
205 			len, vma->vm_page_prot);
206 	CTR4(KTR_IW_CXGBE, "%s:4 ctx %p vma %p ret %u", __func__, context, vma,
207 	    ret);
208 	return ret;
209 }
210 
211 static void
212 c4iw_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata)
213 {
214 	struct c4iw_pd *php = to_c4iw_pd(pd);
215 	struct c4iw_dev *rhp = php->rhp;
216 
217 	CTR3(KTR_IW_CXGBE, "%s: pd %p, pdid 0x%x", __func__, pd, php->pdid);
218 
219 	c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
220 	mutex_lock(&rhp->rdev.stats.lock);
221 	rhp->rdev.stats.pd.cur--;
222 	mutex_unlock(&rhp->rdev.stats.lock);
223 }
224 
225 static int
226 c4iw_allocate_pd(struct ib_pd *pd, struct ib_udata *udata)
227 {
228 	struct c4iw_pd *php = to_c4iw_pd(pd);
229 	struct ib_device *ibdev = pd->device;
230 	u32 pdid;
231 	struct c4iw_dev *rhp;
232 
233 	CTR4(KTR_IW_CXGBE, "%s: ibdev %p, pd %p, data %p", __func__, ibdev,
234 	    pd, udata);
235 	rhp = (struct c4iw_dev *) ibdev;
236 	pdid =  c4iw_get_resource(&rhp->rdev.resource.pdid_table);
237 	if (!pdid)
238 		return -EINVAL;
239 
240 	php->pdid = pdid;
241 	php->rhp = rhp;
242 	if (udata) {
243 		if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) {
244 			c4iw_deallocate_pd(&php->ibpd, udata);
245 			return -EFAULT;
246 		}
247 	}
248 	mutex_lock(&rhp->rdev.stats.lock);
249 	rhp->rdev.stats.pd.cur++;
250 	if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
251 		rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
252 	mutex_unlock(&rhp->rdev.stats.lock);
253 
254 	CTR5(KTR_IW_CXGBE,
255 	    "%s: ibdev %p, context %p, data %p, pddid 0x%x, pd %p", __func__,
256 	    ibdev, udata, pdid, php);
257 	return (0);
258 }
259 
260 static int
261 c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
262 {
263 
264 	CTR5(KTR_IW_CXGBE, "%s ibdev %p, port %d, index %d, pkey %p", __func__,
265 	    ibdev, port, index, pkey);
266 
267 	*pkey = 0;
268 	return (0);
269 }
270 
271 static int
272 c4iw_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid)
273 {
274 	struct c4iw_dev *dev;
275 	struct port_info *pi;
276 	struct adapter *sc;
277 
278 	CTR5(KTR_IW_CXGBE, "%s ibdev %p, port %d, index %d, gid %p", __func__,
279 	    ibdev, port, index, gid);
280 
281 	memset(&gid->raw[0], 0, sizeof(gid->raw));
282 	dev = to_c4iw_dev(ibdev);
283 	sc = dev->rdev.adap;
284 	if (port == 0 || port > sc->params.nports)
285 		return (-EINVAL);
286 	pi = sc->port[port - 1];
287 	memcpy(&gid->raw[0], pi->vi[0].hw_addr, ETHER_ADDR_LEN);
288 	return (0);
289 }
290 
291 static int
292 c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
293 		struct ib_udata *uhw)
294 {
295 	struct c4iw_dev *dev = to_c4iw_dev(ibdev);
296 	struct adapter *sc = dev->rdev.adap;
297 
298 	CTR3(KTR_IW_CXGBE, "%s ibdev %p, props %p", __func__, ibdev, props);
299 
300 	if (uhw->inlen || uhw->outlen)
301 		return -EINVAL;
302 
303 	memset(props, 0, sizeof *props);
304 	memcpy(&props->sys_image_guid, sc->port[0]->vi[0].hw_addr,
305 	    ETHER_ADDR_LEN);
306 	props->hw_ver = sc->params.chipid;
307 	props->fw_ver = sc->params.fw_vers;
308 	props->device_cap_flags = dev->device_cap_flags;
309 	props->page_size_cap = T4_PAGESIZE_MASK;
310 	props->vendor_id = pci_get_vendor(sc->dev);
311 	props->vendor_part_id = pci_get_device(sc->dev);
312 	props->max_mr_size = T4_MAX_MR_SIZE;
313 	props->max_qp = sc->vres.qp.size / 2;
314 	props->max_qp_wr = dev->rdev.hw_queue.t4_max_qp_depth;
315 	props->max_sge = T4_MAX_RECV_SGE;
316 	props->max_sge_rd = 1;
317 	props->max_res_rd_atom = sc->params.max_ird_adapter;
318 	props->max_qp_rd_atom = min(sc->params.max_ordird_qp,
319 	    c4iw_max_read_depth);
320 	props->max_qp_init_rd_atom = props->max_qp_rd_atom;
321 	props->max_cq = sc->vres.qp.size;
322 	props->max_cqe = dev->rdev.hw_queue.t4_max_cq_depth;
323 	props->max_mr = c4iw_num_stags(&dev->rdev);
324 	props->max_pd = T4_MAX_NUM_PD;
325 	props->local_ca_ack_delay = 0;
326 	props->max_fast_reg_page_list_len = t4_max_fr_depth(&dev->rdev, use_dsgl);
327 
328 	return (0);
329 }
330 
331 /*
332  * Returns -errno on failure.
333  */
334 static int
335 c4iw_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props)
336 {
337 	struct c4iw_dev *dev;
338 	struct adapter *sc;
339 	struct port_info *pi;
340 	struct ifnet *ifp;
341 
342 	CTR4(KTR_IW_CXGBE, "%s ibdev %p, port %d, props %p", __func__, ibdev,
343 	    port, props);
344 
345 	dev = to_c4iw_dev(ibdev);
346 	sc = dev->rdev.adap;
347 	if (port > sc->params.nports)
348 		return (-EINVAL);
349 	pi = sc->port[port - 1];
350 	ifp = pi->vi[0].ifp;
351 
352 	memset(props, 0, sizeof(struct ib_port_attr));
353 	props->max_mtu = IB_MTU_4096;
354 	if (ifp->if_mtu >= 4096)
355 		props->active_mtu = IB_MTU_4096;
356 	else if (ifp->if_mtu >= 2048)
357 		props->active_mtu = IB_MTU_2048;
358 	else if (ifp->if_mtu >= 1024)
359 		props->active_mtu = IB_MTU_1024;
360 	else if (ifp->if_mtu >= 512)
361 		props->active_mtu = IB_MTU_512;
362 	else
363 		props->active_mtu = IB_MTU_256;
364 	props->state = pi->link_cfg.link_ok ? IB_PORT_ACTIVE : IB_PORT_DOWN;
365 	props->port_cap_flags =
366 	    IB_PORT_CM_SUP |
367 	    IB_PORT_SNMP_TUNNEL_SUP |
368 	    IB_PORT_REINIT_SUP |
369 	    IB_PORT_DEVICE_MGMT_SUP |
370 	    IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
371 	props->gid_tbl_len = 1;
372 	props->pkey_tbl_len = 1;
373 	props->active_width = 2;
374 	props->active_speed = 2;
375 	props->max_msg_sz = -1;
376 
377 	return 0;
378 }
379 
380 static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num,
381 			       struct ib_port_immutable *immutable)
382 {
383 	struct ib_port_attr attr;
384 	int err;
385 
386 	immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
387 
388 	err = ib_query_port(ibdev, port_num, &attr);
389 	if (err)
390 		return err;
391 
392 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
393 	immutable->gid_tbl_len = attr.gid_tbl_len;
394 
395 	return 0;
396 }
397 
398 /*
399  * Returns -errno on error.
400  */
401 int
402 c4iw_register_device(struct c4iw_dev *dev)
403 {
404 	struct adapter *sc = dev->rdev.adap;
405 	struct ib_device *ibdev = &dev->ibdev;
406 	struct iw_cm_verbs *iwcm;
407 	int ret;
408 
409 	CTR3(KTR_IW_CXGBE, "%s c4iw_dev %p, adapter %p", __func__, dev, sc);
410 	BUG_ON(!sc->port[0]);
411 	ret = linux_pci_attach_device(sc->dev, NULL, NULL, &dev->pdev);
412 	if (ret)
413 		return (ret);
414 
415 #define	c4iw_ib_cq c4iw_cq
416 #define	c4iw_ib_pd c4iw_pd
417 #define	c4iw_ib_qp c4iw_qp
418 #define	c4iw_ib_ucontext c4iw_ucontext
419 	INIT_IB_DEVICE_OPS(&ibdev->ops, c4iw, CXGB4);
420 
421 	strlcpy(ibdev->name, device_get_nameunit(sc->dev), sizeof(ibdev->name));
422 	memset(&ibdev->node_guid, 0, sizeof(ibdev->node_guid));
423 	memcpy(&ibdev->node_guid, sc->port[0]->vi[0].hw_addr, ETHER_ADDR_LEN);
424 	ibdev->owner = THIS_MODULE;
425 	dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
426 	if (fastreg_support)
427 		dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
428 	ibdev->local_dma_lkey = 0;
429 	ibdev->uverbs_cmd_mask =
430 	    (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
431 	    (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
432 	    (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
433 	    (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
434 	    (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
435 	    (1ull << IB_USER_VERBS_CMD_REG_MR) |
436 	    (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
437 	    (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
438 	    (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
439 	    (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
440 	    (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
441 	    (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
442 	    (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
443 	    (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
444 	    (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
445 	    (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
446 	    (1ull << IB_USER_VERBS_CMD_POST_SEND) |
447 	    (1ull << IB_USER_VERBS_CMD_POST_RECV);
448 	ibdev->node_type = RDMA_NODE_RNIC;
449 	strlcpy(ibdev->node_desc, C4IW_NODE_DESC, sizeof(ibdev->node_desc));
450 	ibdev->phys_port_cnt = sc->params.nports;
451 	ibdev->num_comp_vectors = 1;
452 	ibdev->dma_device = &dev->pdev.dev;
453 	ibdev->query_device = c4iw_query_device;
454 	ibdev->query_port = c4iw_query_port;
455 	ibdev->modify_port = c4iw_modify_port;
456 	ibdev->query_pkey = c4iw_query_pkey;
457 	ibdev->query_gid = c4iw_query_gid;
458 	ibdev->alloc_ucontext = c4iw_alloc_ucontext;
459 	ibdev->dealloc_ucontext = c4iw_dealloc_ucontext;
460 	ibdev->mmap = c4iw_mmap;
461 	ibdev->alloc_pd = c4iw_allocate_pd;
462 	ibdev->dealloc_pd = c4iw_deallocate_pd;
463 	ibdev->create_ah = c4iw_ah_create;
464 	ibdev->destroy_ah = c4iw_ah_destroy;
465 	ibdev->create_qp = c4iw_create_qp;
466 	ibdev->modify_qp = c4iw_ib_modify_qp;
467 	ibdev->query_qp = c4iw_ib_query_qp;
468 	ibdev->destroy_qp = c4iw_destroy_qp;
469 	ibdev->create_cq = c4iw_create_cq;
470 	ibdev->destroy_cq = c4iw_destroy_cq;
471 	ibdev->resize_cq = c4iw_resize_cq;
472 	ibdev->poll_cq = c4iw_poll_cq;
473 	ibdev->get_dma_mr = c4iw_get_dma_mr;
474 	ibdev->reg_user_mr = c4iw_reg_user_mr;
475 	ibdev->dereg_mr = c4iw_dereg_mr;
476 	ibdev->alloc_mw = c4iw_alloc_mw;
477 	ibdev->dealloc_mw = c4iw_dealloc_mw;
478 	ibdev->alloc_mr = c4iw_alloc_mr;
479 	ibdev->map_mr_sg = c4iw_map_mr_sg;
480 	ibdev->attach_mcast = c4iw_multicast_attach;
481 	ibdev->detach_mcast = c4iw_multicast_detach;
482 	ibdev->process_mad = c4iw_process_mad;
483 	ibdev->req_notify_cq = c4iw_arm_cq;
484 	ibdev->post_send = c4iw_post_send;
485 	ibdev->post_recv = c4iw_post_receive;
486 	ibdev->uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
487 	ibdev->get_port_immutable = c4iw_port_immutable;
488 
489 	iwcm = kmalloc(sizeof(*iwcm), GFP_KERNEL);
490 	if (iwcm == NULL)
491 		return (-ENOMEM);
492 
493 	iwcm->connect = c4iw_connect;
494 	iwcm->accept = c4iw_accept_cr;
495 	iwcm->reject = c4iw_reject_cr;
496 	iwcm->create_listen = c4iw_create_listen;
497 	iwcm->destroy_listen = c4iw_destroy_listen;
498 	iwcm->add_ref = c4iw_qp_add_ref;
499 	iwcm->rem_ref = c4iw_qp_rem_ref;
500 	iwcm->get_qp = c4iw_get_qp;
501 	ibdev->iwcm = iwcm;
502 
503 	ret = ib_register_device(&dev->ibdev, NULL);
504 	if (ret) {
505 		kfree(iwcm);
506 		linux_pci_detach_device(&dev->pdev);
507 	}
508 
509 	return (ret);
510 }
511 
512 void
513 c4iw_unregister_device(struct c4iw_dev *dev)
514 {
515 
516 	CTR3(KTR_IW_CXGBE, "%s c4iw_dev %p, adapter %p", __func__, dev,
517 	    dev->rdev.adap);
518 	ib_unregister_device(&dev->ibdev);
519 	kfree(dev->ibdev.iwcm);
520 	linux_pci_detach_device(&dev->pdev);
521 	return;
522 }
523 #endif
524