xref: /freebsd/sys/dev/cxgbe/iw_cxgbe/provider.c (revision d0b2dbfa)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 #include <sys/cdefs.h>
35 #define	LINUXKPI_PARAM_PREFIX iw_cxgbe_
36 
37 #include "opt_inet.h"
38 
39 #ifdef TCP_OFFLOAD
40 #include <asm/pgtable.h>
41 #include <linux/page.h>
42 #include <rdma/ib_verbs.h>
43 #include <rdma/ib_user_verbs.h>
44 
45 #include "iw_cxgbe.h"
46 #include "user.h"
47 
48 static int fastreg_support = 1;
49 module_param(fastreg_support, int, 0644);
50 MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default = 1)");
51 
52 static int c4iw_modify_port(struct ib_device *ibdev,
53 			    u8 port, int port_modify_mask,
54 			    struct ib_port_modify *props)
55 {
56 	return -ENOSYS;
57 }
58 
59 static int c4iw_ah_create(struct ib_ah *ah,
60 			  struct ib_ah_attr *ah_attr, u32 flags,
61 			  struct ib_udata *udata)
62 {
63 	return -ENOSYS;
64 }
65 
66 static void c4iw_ah_destroy(struct ib_ah *ah, u32 flags)
67 {
68 }
69 
70 static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
71 {
72 	return -ENOSYS;
73 }
74 
75 static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
76 {
77 	return -ENOSYS;
78 }
79 
80 static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
81 		u8 port_num, const struct ib_wc *in_wc,
82 		const struct ib_grh *in_grh,
83 		const struct ib_mad_hdr *in_mad,
84 		size_t in_mad_size,
85 		struct ib_mad_hdr *out_mad,
86 		size_t *out_mad_size,
87 		u16 *out_mad_pkey_index)
88 
89 {
90 	return -ENOSYS;
91 }
92 
93 static void c4iw_dealloc_ucontext(struct ib_ucontext *context)
94 {
95 	struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
96 	struct c4iw_dev *rhp;
97 	struct c4iw_mm_entry *mm, *tmp;
98 
99 	pr_debug("context %p\n", context);
100 	rhp = to_c4iw_dev(ucontext->ibucontext.device);
101 
102 	CTR2(KTR_IW_CXGBE, "%s ucontext %p", __func__, ucontext);
103 
104 	list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
105 		kfree(mm);
106 	c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
107 }
108 
109 static int c4iw_alloc_ucontext(struct ib_ucontext *ucontext,
110 			       struct ib_udata *udata)
111 {
112 	struct ib_device *ibdev = ucontext->device;
113 	struct c4iw_ucontext *context = to_c4iw_ucontext(ucontext);
114 	struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
115 	static int warned;
116 	struct c4iw_alloc_ucontext_resp uresp;
117 	int ret = 0;
118 	struct c4iw_mm_entry *mm = NULL;
119 
120 	PDBG("%s ibdev %p\n", __func__, ibdev);
121 	c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
122 	INIT_LIST_HEAD(&context->mmaps);
123 	spin_lock_init(&context->mmap_lock);
124 
125 	if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
126 		if (!warned++)
127 			log(LOG_ERR, "%s Warning - downlevel libcxgb4 "
128 			       "(non-fatal), device status page disabled.\n",
129 			       __func__);
130 		rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
131 	} else {
132 
133 		mm = kmalloc(sizeof *mm, GFP_KERNEL);
134 		if (!mm)
135 			goto err;
136 
137 		uresp.status_page_size = PAGE_SIZE;
138 
139 		spin_lock(&context->mmap_lock);
140 		uresp.status_page_key = context->key;
141 		context->key += PAGE_SIZE;
142 		spin_unlock(&context->mmap_lock);
143 
144 		ret = ib_copy_to_udata(udata, &uresp,
145 				       sizeof(uresp) - sizeof(uresp.reserved));
146 		if (ret)
147 			goto err_mm;
148 
149 		mm->key = uresp.status_page_key;
150 		mm->addr = vtophys(rhp->rdev.status_page);
151 		mm->len = PAGE_SIZE;
152 		insert_mmap(context, mm);
153 	}
154 	return 0;
155 err_mm:
156 	kfree(mm);
157 err:
158 	return ret;
159 }
160 
161 static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
162 {
163 	int len = vma->vm_end - vma->vm_start;
164 	u32 key = vma->vm_pgoff << PAGE_SHIFT;
165 	struct c4iw_rdev *rdev;
166 	int ret = 0;
167 	struct c4iw_mm_entry *mm;
168 	struct c4iw_ucontext *ucontext;
169 	u64 addr = 0;
170 
171 	CTR4(KTR_IW_CXGBE, "%s:1 ctx %p vma %p, vm_start %u", __func__,
172 			context, vma, vma->vm_start);
173 
174 	CTR4(KTR_IW_CXGBE, "%s:1a pgoff 0x%lx key 0x%x len %d", __func__,
175 	    vma->vm_pgoff, key, len);
176 
177 	if (vma->vm_start & (PAGE_SIZE-1)) {
178 		CTR3(KTR_IW_CXGBE, "%s:2 unaligned vm_start %u vma %p",
179 		    __func__, vma->vm_start, vma);
180 		return -EINVAL;
181 	}
182 
183 	rdev = &(to_c4iw_dev(context->device)->rdev);
184 	ucontext = to_c4iw_ucontext(context);
185 
186 	mm = remove_mmap(ucontext, key, len);
187 	if (!mm) {
188 		CTR4(KTR_IW_CXGBE, "%s:3 ucontext %p key %u len %u", __func__,
189 		    ucontext, key, len);
190 		return -EINVAL;
191 	}
192 	addr = mm->addr;
193 	kfree(mm);
194 
195 	/* user DB-GTS registers if addr in udbs_res range,
196 	 * else WQ or CQ memory.
197 	 * */
198 	if (rdev->adap->iwt.wc_en && addr >= rdev->bar2_pa &&
199 			addr < rdev->bar2_pa + rdev->bar2_len)
200 		vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
201 
202 	ret = rdma_user_mmap_io(context, vma, addr >> PAGE_SHIFT, len,
203 	    vma->vm_page_prot, NULL);
204 	CTR4(KTR_IW_CXGBE, "%s:4 ctx %p vma %p ret %u", __func__, context, vma,
205 	    ret);
206 	return ret;
207 }
208 
209 static void
210 c4iw_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata)
211 {
212 	struct c4iw_pd *php = to_c4iw_pd(pd);
213 	struct c4iw_dev *rhp = php->rhp;
214 
215 	CTR3(KTR_IW_CXGBE, "%s: pd %p, pdid 0x%x", __func__, pd, php->pdid);
216 
217 	c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
218 	mutex_lock(&rhp->rdev.stats.lock);
219 	rhp->rdev.stats.pd.cur--;
220 	mutex_unlock(&rhp->rdev.stats.lock);
221 }
222 
223 static int
224 c4iw_allocate_pd(struct ib_pd *pd, struct ib_udata *udata)
225 {
226 	struct c4iw_pd *php = to_c4iw_pd(pd);
227 	struct ib_device *ibdev = pd->device;
228 	u32 pdid;
229 	struct c4iw_dev *rhp;
230 
231 	CTR4(KTR_IW_CXGBE, "%s: ibdev %p, pd %p, data %p", __func__, ibdev,
232 	    pd, udata);
233 	rhp = (struct c4iw_dev *) ibdev;
234 	pdid =  c4iw_get_resource(&rhp->rdev.resource.pdid_table);
235 	if (!pdid)
236 		return -EINVAL;
237 
238 	php->pdid = pdid;
239 	php->rhp = rhp;
240 	if (udata) {
241 		if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) {
242 			c4iw_deallocate_pd(&php->ibpd, udata);
243 			return -EFAULT;
244 		}
245 	}
246 	mutex_lock(&rhp->rdev.stats.lock);
247 	rhp->rdev.stats.pd.cur++;
248 	if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
249 		rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
250 	mutex_unlock(&rhp->rdev.stats.lock);
251 
252 	CTR5(KTR_IW_CXGBE,
253 	    "%s: ibdev %p, context %p, data %p, pddid 0x%x, pd %p", __func__,
254 	    ibdev, udata, pdid, php);
255 	return (0);
256 }
257 
258 static int
259 c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
260 {
261 
262 	CTR5(KTR_IW_CXGBE, "%s ibdev %p, port %d, index %d, pkey %p", __func__,
263 	    ibdev, port, index, pkey);
264 
265 	*pkey = 0;
266 	return (0);
267 }
268 
269 static int
270 c4iw_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid)
271 {
272 	struct c4iw_dev *dev;
273 	struct port_info *pi;
274 	struct adapter *sc;
275 
276 	CTR5(KTR_IW_CXGBE, "%s ibdev %p, port %d, index %d, gid %p", __func__,
277 	    ibdev, port, index, gid);
278 
279 	memset(&gid->raw[0], 0, sizeof(gid->raw));
280 	dev = to_c4iw_dev(ibdev);
281 	sc = dev->rdev.adap;
282 	if (port == 0 || port > sc->params.nports)
283 		return (-EINVAL);
284 	pi = sc->port[port - 1];
285 	memcpy(&gid->raw[0], pi->vi[0].hw_addr, ETHER_ADDR_LEN);
286 	return (0);
287 }
288 
289 static int
290 c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
291 		struct ib_udata *uhw)
292 {
293 	struct c4iw_dev *dev = to_c4iw_dev(ibdev);
294 	struct adapter *sc = dev->rdev.adap;
295 
296 	CTR3(KTR_IW_CXGBE, "%s ibdev %p, props %p", __func__, ibdev, props);
297 
298 	if (uhw->inlen || uhw->outlen)
299 		return -EINVAL;
300 
301 	memset(props, 0, sizeof *props);
302 	memcpy(&props->sys_image_guid, sc->port[0]->vi[0].hw_addr,
303 	    ETHER_ADDR_LEN);
304 	props->hw_ver = sc->params.chipid;
305 	props->fw_ver = sc->params.fw_vers;
306 	props->device_cap_flags = dev->device_cap_flags;
307 	props->page_size_cap = T4_PAGESIZE_MASK;
308 	props->vendor_id = pci_get_vendor(sc->dev);
309 	props->vendor_part_id = pci_get_device(sc->dev);
310 	props->max_mr_size = T4_MAX_MR_SIZE;
311 	props->max_qp = sc->vres.qp.size / 2;
312 	props->max_qp_wr = dev->rdev.hw_queue.t4_max_qp_depth;
313 	props->max_sge = T4_MAX_RECV_SGE;
314 	props->max_sge_rd = 1;
315 	props->max_res_rd_atom = sc->params.max_ird_adapter;
316 	props->max_qp_rd_atom = min(sc->params.max_ordird_qp,
317 	    c4iw_max_read_depth);
318 	props->max_qp_init_rd_atom = props->max_qp_rd_atom;
319 	props->max_cq = sc->vres.qp.size;
320 	props->max_cqe = dev->rdev.hw_queue.t4_max_cq_depth;
321 	props->max_mr = c4iw_num_stags(&dev->rdev);
322 	props->max_pd = T4_MAX_NUM_PD;
323 	props->local_ca_ack_delay = 0;
324 	props->max_fast_reg_page_list_len = t4_max_fr_depth(&dev->rdev, use_dsgl);
325 
326 	return (0);
327 }
328 
329 /*
330  * Returns -errno on failure.
331  */
332 static int
333 c4iw_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props)
334 {
335 	struct c4iw_dev *dev;
336 	struct adapter *sc;
337 	struct port_info *pi;
338 	if_t ifp;
339 
340 	CTR4(KTR_IW_CXGBE, "%s ibdev %p, port %d, props %p", __func__, ibdev,
341 	    port, props);
342 
343 	dev = to_c4iw_dev(ibdev);
344 	sc = dev->rdev.adap;
345 	if (port > sc->params.nports)
346 		return (-EINVAL);
347 	pi = sc->port[port - 1];
348 	ifp = pi->vi[0].ifp;
349 
350 	memset(props, 0, sizeof(struct ib_port_attr));
351 	props->max_mtu = IB_MTU_4096;
352 	if (if_getmtu(ifp) >= 4096)
353 		props->active_mtu = IB_MTU_4096;
354 	else if (if_getmtu(ifp) >= 2048)
355 		props->active_mtu = IB_MTU_2048;
356 	else if (if_getmtu(ifp) >= 1024)
357 		props->active_mtu = IB_MTU_1024;
358 	else if (if_getmtu(ifp) >= 512)
359 		props->active_mtu = IB_MTU_512;
360 	else
361 		props->active_mtu = IB_MTU_256;
362 	props->state = pi->link_cfg.link_ok ? IB_PORT_ACTIVE : IB_PORT_DOWN;
363 	props->port_cap_flags =
364 	    IB_PORT_CM_SUP |
365 	    IB_PORT_SNMP_TUNNEL_SUP |
366 	    IB_PORT_REINIT_SUP |
367 	    IB_PORT_DEVICE_MGMT_SUP |
368 	    IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
369 	props->gid_tbl_len = 1;
370 	props->pkey_tbl_len = 1;
371 	props->active_width = 2;
372 	props->active_speed = 2;
373 	props->max_msg_sz = -1;
374 
375 	return 0;
376 }
377 
378 static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num,
379 			       struct ib_port_immutable *immutable)
380 {
381 	struct ib_port_attr attr;
382 	int err;
383 
384 	immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
385 
386 	err = ib_query_port(ibdev, port_num, &attr);
387 	if (err)
388 		return err;
389 
390 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
391 	immutable->gid_tbl_len = attr.gid_tbl_len;
392 
393 	return 0;
394 }
395 
396 /*
397  * Returns -errno on error.
398  */
399 int
400 c4iw_register_device(struct c4iw_dev *dev)
401 {
402 	struct adapter *sc = dev->rdev.adap;
403 	struct ib_device *ibdev = &dev->ibdev;
404 	struct iw_cm_verbs *iwcm;
405 	int ret;
406 
407 	CTR3(KTR_IW_CXGBE, "%s c4iw_dev %p, adapter %p", __func__, dev, sc);
408 	BUG_ON(!sc->port[0]);
409 	ret = linux_pci_attach_device(sc->dev, NULL, NULL, &dev->pdev);
410 	if (ret)
411 		return (ret);
412 
413 #define	c4iw_ib_cq c4iw_cq
414 #define	c4iw_ib_pd c4iw_pd
415 #define	c4iw_ib_qp c4iw_qp
416 #define	c4iw_ib_ucontext c4iw_ucontext
417 	INIT_IB_DEVICE_OPS(&ibdev->ops, c4iw, CXGB4);
418 
419 	strlcpy(ibdev->name, device_get_nameunit(sc->dev), sizeof(ibdev->name));
420 	memset(&ibdev->node_guid, 0, sizeof(ibdev->node_guid));
421 	memcpy(&ibdev->node_guid, sc->port[0]->vi[0].hw_addr, ETHER_ADDR_LEN);
422 	ibdev->owner = THIS_MODULE;
423 	dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
424 	if (fastreg_support)
425 		dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
426 	ibdev->local_dma_lkey = 0;
427 	ibdev->uverbs_cmd_mask =
428 	    (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
429 	    (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
430 	    (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
431 	    (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
432 	    (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
433 	    (1ull << IB_USER_VERBS_CMD_REG_MR) |
434 	    (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
435 	    (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
436 	    (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
437 	    (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
438 	    (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
439 	    (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
440 	    (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
441 	    (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
442 	    (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
443 	    (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
444 	    (1ull << IB_USER_VERBS_CMD_POST_SEND) |
445 	    (1ull << IB_USER_VERBS_CMD_POST_RECV);
446 	ibdev->node_type = RDMA_NODE_RNIC;
447 	strlcpy(ibdev->node_desc, C4IW_NODE_DESC, sizeof(ibdev->node_desc));
448 	ibdev->phys_port_cnt = sc->params.nports;
449 	ibdev->num_comp_vectors = 1;
450 	ibdev->dma_device = &dev->pdev.dev;
451 	ibdev->query_device = c4iw_query_device;
452 	ibdev->query_port = c4iw_query_port;
453 	ibdev->modify_port = c4iw_modify_port;
454 	ibdev->query_pkey = c4iw_query_pkey;
455 	ibdev->query_gid = c4iw_query_gid;
456 	ibdev->alloc_ucontext = c4iw_alloc_ucontext;
457 	ibdev->dealloc_ucontext = c4iw_dealloc_ucontext;
458 	ibdev->mmap = c4iw_mmap;
459 	ibdev->alloc_pd = c4iw_allocate_pd;
460 	ibdev->dealloc_pd = c4iw_deallocate_pd;
461 	ibdev->create_ah = c4iw_ah_create;
462 	ibdev->destroy_ah = c4iw_ah_destroy;
463 	ibdev->create_qp = c4iw_create_qp;
464 	ibdev->modify_qp = c4iw_ib_modify_qp;
465 	ibdev->query_qp = c4iw_ib_query_qp;
466 	ibdev->destroy_qp = c4iw_destroy_qp;
467 	ibdev->create_cq = c4iw_create_cq;
468 	ibdev->destroy_cq = c4iw_destroy_cq;
469 	ibdev->resize_cq = c4iw_resize_cq;
470 	ibdev->poll_cq = c4iw_poll_cq;
471 	ibdev->get_dma_mr = c4iw_get_dma_mr;
472 	ibdev->reg_user_mr = c4iw_reg_user_mr;
473 	ibdev->dereg_mr = c4iw_dereg_mr;
474 	ibdev->alloc_mw = c4iw_alloc_mw;
475 	ibdev->dealloc_mw = c4iw_dealloc_mw;
476 	ibdev->alloc_mr = c4iw_alloc_mr;
477 	ibdev->map_mr_sg = c4iw_map_mr_sg;
478 	ibdev->attach_mcast = c4iw_multicast_attach;
479 	ibdev->detach_mcast = c4iw_multicast_detach;
480 	ibdev->process_mad = c4iw_process_mad;
481 	ibdev->req_notify_cq = c4iw_arm_cq;
482 	ibdev->post_send = c4iw_post_send;
483 	ibdev->post_recv = c4iw_post_receive;
484 	ibdev->uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
485 	ibdev->get_port_immutable = c4iw_port_immutable;
486 
487 	iwcm = kmalloc(sizeof(*iwcm), GFP_KERNEL);
488 	if (iwcm == NULL)
489 		return (-ENOMEM);
490 
491 	iwcm->connect = c4iw_connect;
492 	iwcm->accept = c4iw_accept_cr;
493 	iwcm->reject = c4iw_reject_cr;
494 	iwcm->create_listen = c4iw_create_listen;
495 	iwcm->destroy_listen = c4iw_destroy_listen;
496 	iwcm->add_ref = c4iw_qp_add_ref;
497 	iwcm->rem_ref = c4iw_qp_rem_ref;
498 	iwcm->get_qp = c4iw_get_qp;
499 	ibdev->iwcm = iwcm;
500 
501 	ret = ib_register_device(&dev->ibdev, NULL);
502 	if (ret) {
503 		kfree(iwcm);
504 		linux_pci_detach_device(&dev->pdev);
505 	}
506 
507 	return (ret);
508 }
509 
510 void
511 c4iw_unregister_device(struct c4iw_dev *dev)
512 {
513 
514 	CTR3(KTR_IW_CXGBE, "%s c4iw_dev %p, adapter %p", __func__, dev,
515 	    dev->rdev.adap);
516 	ib_unregister_device(&dev->ibdev);
517 	kfree(dev->ibdev.iwcm);
518 	linux_pci_detach_device(&dev->pdev);
519 	return;
520 }
521 #endif
522