xref: /linux/drivers/infiniband/hw/qedr/qedr_iw_cm.c (revision 9a6b55ac)
1 /* QLogic qedr NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <net/ip.h>
33 #include <net/ipv6.h>
34 #include <net/udp.h>
35 #include <net/addrconf.h>
36 #include <net/route.h>
37 #include <net/ip6_route.h>
38 #include <net/flow.h>
39 #include "qedr.h"
40 #include "qedr_iw_cm.h"
41 
42 static inline void
43 qedr_fill_sockaddr4(const struct qed_iwarp_cm_info *cm_info,
44 		    struct iw_cm_event *event)
45 {
46 	struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;
47 	struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;
48 
49 	laddr->sin_family = AF_INET;
50 	raddr->sin_family = AF_INET;
51 
52 	laddr->sin_port = htons(cm_info->local_port);
53 	raddr->sin_port = htons(cm_info->remote_port);
54 
55 	laddr->sin_addr.s_addr = htonl(cm_info->local_ip[0]);
56 	raddr->sin_addr.s_addr = htonl(cm_info->remote_ip[0]);
57 }
58 
59 static inline void
60 qedr_fill_sockaddr6(const struct qed_iwarp_cm_info *cm_info,
61 		    struct iw_cm_event *event)
62 {
63 	struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;
64 	struct sockaddr_in6 *raddr6 =
65 	    (struct sockaddr_in6 *)&event->remote_addr;
66 	int i;
67 
68 	laddr6->sin6_family = AF_INET6;
69 	raddr6->sin6_family = AF_INET6;
70 
71 	laddr6->sin6_port = htons(cm_info->local_port);
72 	raddr6->sin6_port = htons(cm_info->remote_port);
73 
74 	for (i = 0; i < 4; i++) {
75 		laddr6->sin6_addr.in6_u.u6_addr32[i] =
76 		    htonl(cm_info->local_ip[i]);
77 		raddr6->sin6_addr.in6_u.u6_addr32[i] =
78 		    htonl(cm_info->remote_ip[i]);
79 	}
80 }
81 
82 static void qedr_iw_free_qp(struct kref *ref)
83 {
84 	struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt);
85 
86 	kfree(qp);
87 }
88 
89 static void
90 qedr_iw_free_ep(struct kref *ref)
91 {
92 	struct qedr_iw_ep *ep = container_of(ref, struct qedr_iw_ep, refcnt);
93 
94 	if (ep->qp)
95 		kref_put(&ep->qp->refcnt, qedr_iw_free_qp);
96 
97 	if (ep->cm_id)
98 		ep->cm_id->rem_ref(ep->cm_id);
99 
100 	kfree(ep);
101 }
102 
103 static void
104 qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params)
105 {
106 	struct qedr_iw_listener *listener = (struct qedr_iw_listener *)context;
107 	struct qedr_dev *dev = listener->dev;
108 	struct iw_cm_event event;
109 	struct qedr_iw_ep *ep;
110 
111 	ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
112 	if (!ep)
113 		return;
114 
115 	ep->dev = dev;
116 	ep->qed_context = params->ep_context;
117 	kref_init(&ep->refcnt);
118 
119 	memset(&event, 0, sizeof(event));
120 	event.event = IW_CM_EVENT_CONNECT_REQUEST;
121 	event.status = params->status;
122 
123 	if (!IS_ENABLED(CONFIG_IPV6) ||
124 	    params->cm_info->ip_version == QED_TCP_IPV4)
125 		qedr_fill_sockaddr4(params->cm_info, &event);
126 	else
127 		qedr_fill_sockaddr6(params->cm_info, &event);
128 
129 	event.provider_data = (void *)ep;
130 	event.private_data = (void *)params->cm_info->private_data;
131 	event.private_data_len = (u8)params->cm_info->private_data_len;
132 	event.ord = params->cm_info->ord;
133 	event.ird = params->cm_info->ird;
134 
135 	listener->cm_id->event_handler(listener->cm_id, &event);
136 }
137 
138 static void
139 qedr_iw_issue_event(void *context,
140 		    struct qed_iwarp_cm_event_params *params,
141 		    enum iw_cm_event_type event_type)
142 {
143 	struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
144 	struct iw_cm_event event;
145 
146 	memset(&event, 0, sizeof(event));
147 	event.status = params->status;
148 	event.event = event_type;
149 
150 	if (params->cm_info) {
151 		event.ird = params->cm_info->ird;
152 		event.ord = params->cm_info->ord;
153 		event.private_data_len = params->cm_info->private_data_len;
154 		event.private_data = (void *)params->cm_info->private_data;
155 	}
156 
157 	if (ep->cm_id)
158 		ep->cm_id->event_handler(ep->cm_id, &event);
159 }
160 
161 static void
162 qedr_iw_close_event(void *context, struct qed_iwarp_cm_event_params *params)
163 {
164 	struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
165 
166 	if (ep->cm_id)
167 		qedr_iw_issue_event(context, params, IW_CM_EVENT_CLOSE);
168 
169 	kref_put(&ep->refcnt, qedr_iw_free_ep);
170 }
171 
172 static void
173 qedr_iw_qp_event(void *context,
174 		 struct qed_iwarp_cm_event_params *params,
175 		 enum ib_event_type ib_event, char *str)
176 {
177 	struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
178 	struct qedr_dev *dev = ep->dev;
179 	struct ib_qp *ibqp = &ep->qp->ibqp;
180 	struct ib_event event;
181 
182 	DP_NOTICE(dev, "QP error received: %s\n", str);
183 
184 	if (ibqp->event_handler) {
185 		event.event = ib_event;
186 		event.device = ibqp->device;
187 		event.element.qp = ibqp;
188 		ibqp->event_handler(&event, ibqp->qp_context);
189 	}
190 }
191 
192 struct qedr_discon_work {
193 	struct work_struct		work;
194 	struct qedr_iw_ep		*ep;
195 	enum qed_iwarp_event_type	event;
196 	int				status;
197 };
198 
199 static void qedr_iw_disconnect_worker(struct work_struct *work)
200 {
201 	struct qedr_discon_work *dwork =
202 	    container_of(work, struct qedr_discon_work, work);
203 	struct qed_rdma_modify_qp_in_params qp_params = { 0 };
204 	struct qedr_iw_ep *ep = dwork->ep;
205 	struct qedr_dev *dev = ep->dev;
206 	struct qedr_qp *qp = ep->qp;
207 	struct iw_cm_event event;
208 
209 	/* The qp won't be released until we release the ep.
210 	 * the ep's refcnt was increased before calling this
211 	 * function, therefore it is safe to access qp
212 	 */
213 	if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
214 			     &qp->iwarp_cm_flags))
215 		goto out;
216 
217 	memset(&event, 0, sizeof(event));
218 	event.status = dwork->status;
219 	event.event = IW_CM_EVENT_DISCONNECT;
220 
221 	/* Success means graceful disconnect was requested. modifying
222 	 * to SQD is translated to graceful disconnect. O/w reset is sent
223 	 */
224 	if (dwork->status)
225 		qp_params.new_state = QED_ROCE_QP_STATE_ERR;
226 	else
227 		qp_params.new_state = QED_ROCE_QP_STATE_SQD;
228 
229 
230 	if (ep->cm_id)
231 		ep->cm_id->event_handler(ep->cm_id, &event);
232 
233 	SET_FIELD(qp_params.modify_flags,
234 		  QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
235 
236 	dev->ops->rdma_modify_qp(dev->rdma_ctx, qp->qed_qp, &qp_params);
237 
238 	complete(&ep->qp->iwarp_cm_comp);
239 out:
240 	kfree(dwork);
241 	kref_put(&ep->refcnt, qedr_iw_free_ep);
242 }
243 
244 static void
245 qedr_iw_disconnect_event(void *context,
246 			 struct qed_iwarp_cm_event_params *params)
247 {
248 	struct qedr_discon_work *work;
249 	struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
250 	struct qedr_dev *dev = ep->dev;
251 
252 	work = kzalloc(sizeof(*work), GFP_ATOMIC);
253 	if (!work)
254 		return;
255 
256 	/* We can't get a close event before disconnect, but since
257 	 * we're scheduling a work queue we need to make sure close
258 	 * won't delete the ep, so we increase the refcnt
259 	 */
260 	kref_get(&ep->refcnt);
261 
262 	work->ep = ep;
263 	work->event = params->event;
264 	work->status = params->status;
265 
266 	INIT_WORK(&work->work, qedr_iw_disconnect_worker);
267 	queue_work(dev->iwarp_wq, &work->work);
268 }
269 
270 static void
271 qedr_iw_passive_complete(void *context,
272 			 struct qed_iwarp_cm_event_params *params)
273 {
274 	struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
275 	struct qedr_dev *dev = ep->dev;
276 
277 	/* We will only reach the following state if MPA_REJECT was called on
278 	 * passive. In this case there will be no associated QP.
279 	 */
280 	if ((params->status == -ECONNREFUSED) && (!ep->qp)) {
281 		DP_DEBUG(dev, QEDR_MSG_IWARP,
282 			 "PASSIVE connection refused releasing ep...\n");
283 		kref_put(&ep->refcnt, qedr_iw_free_ep);
284 		return;
285 	}
286 
287 	complete(&ep->qp->iwarp_cm_comp);
288 	qedr_iw_issue_event(context, params, IW_CM_EVENT_ESTABLISHED);
289 
290 	if (params->status < 0)
291 		qedr_iw_close_event(context, params);
292 }
293 
294 static void
295 qedr_iw_active_complete(void *context,
296 			struct qed_iwarp_cm_event_params *params)
297 {
298 	struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
299 
300 	complete(&ep->qp->iwarp_cm_comp);
301 	qedr_iw_issue_event(context, params, IW_CM_EVENT_CONNECT_REPLY);
302 
303 	if (params->status < 0)
304 		kref_put(&ep->refcnt, qedr_iw_free_ep);
305 }
306 
307 static int
308 qedr_iw_mpa_reply(void *context, struct qed_iwarp_cm_event_params *params)
309 {
310 	struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
311 	struct qedr_dev *dev = ep->dev;
312 	struct qed_iwarp_send_rtr_in rtr_in;
313 
314 	rtr_in.ep_context = params->ep_context;
315 
316 	return dev->ops->iwarp_send_rtr(dev->rdma_ctx, &rtr_in);
317 }
318 
319 static int
320 qedr_iw_event_handler(void *context, struct qed_iwarp_cm_event_params *params)
321 {
322 	struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
323 	struct qedr_dev *dev = ep->dev;
324 
325 	switch (params->event) {
326 	case QED_IWARP_EVENT_MPA_REQUEST:
327 		qedr_iw_mpa_request(context, params);
328 		break;
329 	case QED_IWARP_EVENT_ACTIVE_MPA_REPLY:
330 		qedr_iw_mpa_reply(context, params);
331 		break;
332 	case QED_IWARP_EVENT_PASSIVE_COMPLETE:
333 		qedr_iw_passive_complete(context, params);
334 		break;
335 	case QED_IWARP_EVENT_ACTIVE_COMPLETE:
336 		qedr_iw_active_complete(context, params);
337 		break;
338 	case QED_IWARP_EVENT_DISCONNECT:
339 		qedr_iw_disconnect_event(context, params);
340 		break;
341 	case QED_IWARP_EVENT_CLOSE:
342 		qedr_iw_close_event(context, params);
343 		break;
344 	case QED_IWARP_EVENT_RQ_EMPTY:
345 		qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
346 				 "QED_IWARP_EVENT_RQ_EMPTY");
347 		break;
348 	case QED_IWARP_EVENT_IRQ_FULL:
349 		qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
350 				 "QED_IWARP_EVENT_IRQ_FULL");
351 		break;
352 	case QED_IWARP_EVENT_LLP_TIMEOUT:
353 		qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
354 				 "QED_IWARP_EVENT_LLP_TIMEOUT");
355 		break;
356 	case QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR:
357 		qedr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
358 				 "QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR");
359 		break;
360 	case QED_IWARP_EVENT_CQ_OVERFLOW:
361 		qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
362 				 "QED_IWARP_EVENT_CQ_OVERFLOW");
363 		break;
364 	case QED_IWARP_EVENT_QP_CATASTROPHIC:
365 		qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
366 				 "QED_IWARP_EVENT_QP_CATASTROPHIC");
367 		break;
368 	case QED_IWARP_EVENT_LOCAL_ACCESS_ERROR:
369 		qedr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
370 				 "QED_IWARP_EVENT_LOCAL_ACCESS_ERROR");
371 		break;
372 	case QED_IWARP_EVENT_REMOTE_OPERATION_ERROR:
373 		qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
374 				 "QED_IWARP_EVENT_REMOTE_OPERATION_ERROR");
375 		break;
376 	case QED_IWARP_EVENT_TERMINATE_RECEIVED:
377 		DP_NOTICE(dev, "Got terminate message\n");
378 		break;
379 	default:
380 		DP_NOTICE(dev, "Unknown event received %d\n", params->event);
381 		break;
382 	}
383 	return 0;
384 }
385 
386 static u16 qedr_iw_get_vlan_ipv4(struct qedr_dev *dev, u32 *addr)
387 {
388 	struct net_device *ndev;
389 	u16 vlan_id = 0;
390 
391 	ndev = ip_dev_find(&init_net, htonl(addr[0]));
392 
393 	if (ndev) {
394 		vlan_id = rdma_vlan_dev_vlan_id(ndev);
395 		dev_put(ndev);
396 	}
397 	if (vlan_id == 0xffff)
398 		vlan_id = 0;
399 	return vlan_id;
400 }
401 
402 static u16 qedr_iw_get_vlan_ipv6(u32 *addr)
403 {
404 	struct net_device *ndev = NULL;
405 	struct in6_addr laddr6;
406 	u16 vlan_id = 0;
407 	int i;
408 
409 	if (!IS_ENABLED(CONFIG_IPV6))
410 		return vlan_id;
411 
412 	for (i = 0; i < 4; i++)
413 		laddr6.in6_u.u6_addr32[i] = htonl(addr[i]);
414 
415 	rcu_read_lock();
416 	for_each_netdev_rcu(&init_net, ndev) {
417 		if (ipv6_chk_addr(&init_net, &laddr6, ndev, 1)) {
418 			vlan_id = rdma_vlan_dev_vlan_id(ndev);
419 			break;
420 		}
421 	}
422 
423 	rcu_read_unlock();
424 	if (vlan_id == 0xffff)
425 		vlan_id = 0;
426 
427 	return vlan_id;
428 }
429 
430 static int
431 qedr_addr4_resolve(struct qedr_dev *dev,
432 		   struct sockaddr_in *src_in,
433 		   struct sockaddr_in *dst_in, u8 *dst_mac)
434 {
435 	__be32 src_ip = src_in->sin_addr.s_addr;
436 	__be32 dst_ip = dst_in->sin_addr.s_addr;
437 	struct neighbour *neigh = NULL;
438 	struct rtable *rt = NULL;
439 	int rc = 0;
440 
441 	rt = ip_route_output(&init_net, dst_ip, src_ip, 0, 0);
442 	if (IS_ERR(rt)) {
443 		DP_ERR(dev, "ip_route_output returned error\n");
444 		return -EINVAL;
445 	}
446 
447 	neigh = dst_neigh_lookup(&rt->dst, &dst_ip);
448 
449 	if (neigh) {
450 		rcu_read_lock();
451 		if (neigh->nud_state & NUD_VALID) {
452 			ether_addr_copy(dst_mac, neigh->ha);
453 			DP_DEBUG(dev, QEDR_MSG_QP, "mac_addr=[%pM]\n", dst_mac);
454 		} else {
455 			neigh_event_send(neigh, NULL);
456 		}
457 		rcu_read_unlock();
458 		neigh_release(neigh);
459 	}
460 
461 	ip_rt_put(rt);
462 
463 	return rc;
464 }
465 
466 static int
467 qedr_addr6_resolve(struct qedr_dev *dev,
468 		   struct sockaddr_in6 *src_in,
469 		   struct sockaddr_in6 *dst_in, u8 *dst_mac)
470 {
471 	struct neighbour *neigh = NULL;
472 	struct dst_entry *dst;
473 	struct flowi6 fl6;
474 	int rc = 0;
475 
476 	memset(&fl6, 0, sizeof(fl6));
477 	fl6.daddr = dst_in->sin6_addr;
478 	fl6.saddr = src_in->sin6_addr;
479 
480 	dst = ip6_route_output(&init_net, NULL, &fl6);
481 
482 	if ((!dst) || dst->error) {
483 		if (dst) {
484 			DP_ERR(dev,
485 			       "ip6_route_output returned dst->error = %d\n",
486 			       dst->error);
487 			dst_release(dst);
488 		}
489 		return -EINVAL;
490 	}
491 	neigh = dst_neigh_lookup(dst, &fl6.daddr);
492 	if (neigh) {
493 		rcu_read_lock();
494 		if (neigh->nud_state & NUD_VALID) {
495 			ether_addr_copy(dst_mac, neigh->ha);
496 			DP_DEBUG(dev, QEDR_MSG_QP, "mac_addr=[%pM]\n", dst_mac);
497 		} else {
498 			neigh_event_send(neigh, NULL);
499 		}
500 		rcu_read_unlock();
501 		neigh_release(neigh);
502 	}
503 
504 	dst_release(dst);
505 
506 	return rc;
507 }
508 
509 static struct qedr_qp *qedr_iw_load_qp(struct qedr_dev *dev, u32 qpn)
510 {
511 	struct qedr_qp *qp;
512 
513 	xa_lock(&dev->qps);
514 	qp = xa_load(&dev->qps, qpn);
515 	if (qp)
516 		kref_get(&qp->refcnt);
517 	xa_unlock(&dev->qps);
518 
519 	return qp;
520 }
521 
522 int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
523 {
524 	struct qedr_dev *dev = get_qedr_dev(cm_id->device);
525 	struct qed_iwarp_connect_out out_params;
526 	struct qed_iwarp_connect_in in_params;
527 	struct qed_iwarp_cm_info *cm_info;
528 	struct sockaddr_in6 *laddr6;
529 	struct sockaddr_in6 *raddr6;
530 	struct sockaddr_in *laddr;
531 	struct sockaddr_in *raddr;
532 	struct qedr_iw_ep *ep;
533 	struct qedr_qp *qp;
534 	int rc = 0;
535 	int i;
536 
537 	laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
538 	raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
539 	laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
540 	raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
541 
542 	DP_DEBUG(dev, QEDR_MSG_IWARP, "MAPPED %d %d\n",
543 		 ntohs(((struct sockaddr_in *)&cm_id->remote_addr)->sin_port),
544 		 ntohs(raddr->sin_port));
545 
546 	DP_DEBUG(dev, QEDR_MSG_IWARP,
547 		 "Connect source address: %pISpc, remote address: %pISpc\n",
548 		 &cm_id->local_addr, &cm_id->remote_addr);
549 
550 	if (!laddr->sin_port || !raddr->sin_port)
551 		return -EINVAL;
552 
553 	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
554 	if (!ep)
555 		return -ENOMEM;
556 
557 	ep->dev = dev;
558 	kref_init(&ep->refcnt);
559 
560 	qp = qedr_iw_load_qp(dev, conn_param->qpn);
561 	if (!qp) {
562 		rc = -EINVAL;
563 		goto err;
564 	}
565 
566 	ep->qp = qp;
567 	cm_id->add_ref(cm_id);
568 	ep->cm_id = cm_id;
569 
570 	in_params.event_cb = qedr_iw_event_handler;
571 	in_params.cb_context = ep;
572 
573 	cm_info = &in_params.cm_info;
574 	memset(cm_info->local_ip, 0, sizeof(cm_info->local_ip));
575 	memset(cm_info->remote_ip, 0, sizeof(cm_info->remote_ip));
576 
577 	if (!IS_ENABLED(CONFIG_IPV6) ||
578 	    cm_id->remote_addr.ss_family == AF_INET) {
579 		cm_info->ip_version = QED_TCP_IPV4;
580 
581 		cm_info->remote_ip[0] = ntohl(raddr->sin_addr.s_addr);
582 		cm_info->local_ip[0] = ntohl(laddr->sin_addr.s_addr);
583 		cm_info->remote_port = ntohs(raddr->sin_port);
584 		cm_info->local_port = ntohs(laddr->sin_port);
585 		cm_info->vlan = qedr_iw_get_vlan_ipv4(dev, cm_info->local_ip);
586 
587 		rc = qedr_addr4_resolve(dev, laddr, raddr,
588 					(u8 *)in_params.remote_mac_addr);
589 
590 		in_params.mss = dev->iwarp_max_mtu -
591 		    (sizeof(struct iphdr) + sizeof(struct tcphdr));
592 
593 	} else {
594 		in_params.cm_info.ip_version = QED_TCP_IPV6;
595 
596 		for (i = 0; i < 4; i++) {
597 			cm_info->remote_ip[i] =
598 			    ntohl(raddr6->sin6_addr.in6_u.u6_addr32[i]);
599 			cm_info->local_ip[i] =
600 			    ntohl(laddr6->sin6_addr.in6_u.u6_addr32[i]);
601 		}
602 
603 		cm_info->local_port = ntohs(laddr6->sin6_port);
604 		cm_info->remote_port = ntohs(raddr6->sin6_port);
605 
606 		in_params.mss = dev->iwarp_max_mtu -
607 		    (sizeof(struct ipv6hdr) + sizeof(struct tcphdr));
608 
609 		cm_info->vlan = qedr_iw_get_vlan_ipv6(cm_info->local_ip);
610 
611 		rc = qedr_addr6_resolve(dev, laddr6, raddr6,
612 					(u8 *)in_params.remote_mac_addr);
613 	}
614 	if (rc)
615 		goto err;
616 
617 	DP_DEBUG(dev, QEDR_MSG_IWARP,
618 		 "ord = %d ird=%d private_data=%p private_data_len=%d rq_psn=%d\n",
619 		 conn_param->ord, conn_param->ird, conn_param->private_data,
620 		 conn_param->private_data_len, qp->rq_psn);
621 
622 	cm_info->ord = conn_param->ord;
623 	cm_info->ird = conn_param->ird;
624 	cm_info->private_data = conn_param->private_data;
625 	cm_info->private_data_len = conn_param->private_data_len;
626 	in_params.qp = qp->qed_qp;
627 	memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
628 
629 	if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
630 			     &qp->iwarp_cm_flags))
631 		goto err; /* QP already being destroyed */
632 
633 	rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
634 	if (rc) {
635 		complete(&qp->iwarp_cm_comp);
636 		goto err;
637 	}
638 
639 	return rc;
640 
641 err:
642 	kref_put(&ep->refcnt, qedr_iw_free_ep);
643 	return rc;
644 }
645 
646 int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog)
647 {
648 	struct qedr_dev *dev = get_qedr_dev(cm_id->device);
649 	struct qedr_iw_listener *listener;
650 	struct qed_iwarp_listen_in iparams;
651 	struct qed_iwarp_listen_out oparams;
652 	struct sockaddr_in *laddr;
653 	struct sockaddr_in6 *laddr6;
654 	int rc;
655 	int i;
656 
657 	laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
658 	laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
659 
660 	DP_DEBUG(dev, QEDR_MSG_IWARP,
661 		 "Create Listener address: %pISpc\n", &cm_id->local_addr);
662 
663 	listener = kzalloc(sizeof(*listener), GFP_KERNEL);
664 	if (!listener)
665 		return -ENOMEM;
666 
667 	listener->dev = dev;
668 	cm_id->add_ref(cm_id);
669 	listener->cm_id = cm_id;
670 	listener->backlog = backlog;
671 
672 	iparams.cb_context = listener;
673 	iparams.event_cb = qedr_iw_event_handler;
674 	iparams.max_backlog = backlog;
675 
676 	if (!IS_ENABLED(CONFIG_IPV6) ||
677 	    cm_id->local_addr.ss_family == AF_INET) {
678 		iparams.ip_version = QED_TCP_IPV4;
679 		memset(iparams.ip_addr, 0, sizeof(iparams.ip_addr));
680 
681 		iparams.ip_addr[0] = ntohl(laddr->sin_addr.s_addr);
682 		iparams.port = ntohs(laddr->sin_port);
683 		iparams.vlan = qedr_iw_get_vlan_ipv4(dev, iparams.ip_addr);
684 	} else {
685 		iparams.ip_version = QED_TCP_IPV6;
686 
687 		for (i = 0; i < 4; i++) {
688 			iparams.ip_addr[i] =
689 			    ntohl(laddr6->sin6_addr.in6_u.u6_addr32[i]);
690 		}
691 
692 		iparams.port = ntohs(laddr6->sin6_port);
693 
694 		iparams.vlan = qedr_iw_get_vlan_ipv6(iparams.ip_addr);
695 	}
696 	rc = dev->ops->iwarp_create_listen(dev->rdma_ctx, &iparams, &oparams);
697 	if (rc)
698 		goto err;
699 
700 	listener->qed_handle = oparams.handle;
701 	cm_id->provider_data = listener;
702 	return rc;
703 
704 err:
705 	cm_id->rem_ref(cm_id);
706 	kfree(listener);
707 	return rc;
708 }
709 
710 int qedr_iw_destroy_listen(struct iw_cm_id *cm_id)
711 {
712 	struct qedr_iw_listener *listener = cm_id->provider_data;
713 	struct qedr_dev *dev = get_qedr_dev(cm_id->device);
714 	int rc = 0;
715 
716 	if (listener->qed_handle)
717 		rc = dev->ops->iwarp_destroy_listen(dev->rdma_ctx,
718 						    listener->qed_handle);
719 
720 	cm_id->rem_ref(cm_id);
721 	return rc;
722 }
723 
724 int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
725 {
726 	struct qedr_iw_ep *ep = (struct qedr_iw_ep *)cm_id->provider_data;
727 	struct qedr_dev *dev = ep->dev;
728 	struct qedr_qp *qp;
729 	struct qed_iwarp_accept_in params;
730 	int rc = 0;
731 
732 	DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
733 
734 	qp = qedr_iw_load_qp(dev, conn_param->qpn);
735 	if (!qp) {
736 		DP_ERR(dev, "Invalid QP number %d\n", conn_param->qpn);
737 		return -EINVAL;
738 	}
739 
740 	ep->qp = qp;
741 	cm_id->add_ref(cm_id);
742 	ep->cm_id = cm_id;
743 
744 	params.ep_context = ep->qed_context;
745 	params.cb_context = ep;
746 	params.qp = ep->qp->qed_qp;
747 	params.private_data = conn_param->private_data;
748 	params.private_data_len = conn_param->private_data_len;
749 	params.ird = conn_param->ird;
750 	params.ord = conn_param->ord;
751 
752 	if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
753 			     &qp->iwarp_cm_flags))
754 		goto err; /* QP already destroyed */
755 
756 	rc = dev->ops->iwarp_accept(dev->rdma_ctx, &params);
757 	if (rc) {
758 		complete(&qp->iwarp_cm_comp);
759 		goto err;
760 	}
761 
762 	return rc;
763 
764 err:
765 	kref_put(&ep->refcnt, qedr_iw_free_ep);
766 
767 	return rc;
768 }
769 
770 int qedr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
771 {
772 	struct qedr_iw_ep *ep = (struct qedr_iw_ep *)cm_id->provider_data;
773 	struct qedr_dev *dev = ep->dev;
774 	struct qed_iwarp_reject_in params;
775 
776 	params.ep_context = ep->qed_context;
777 	params.cb_context = ep;
778 	params.private_data = pdata;
779 	params.private_data_len = pdata_len;
780 	ep->qp = NULL;
781 
782 	return dev->ops->iwarp_reject(dev->rdma_ctx, &params);
783 }
784 
785 void qedr_iw_qp_add_ref(struct ib_qp *ibqp)
786 {
787 	struct qedr_qp *qp = get_qedr_qp(ibqp);
788 
789 	kref_get(&qp->refcnt);
790 }
791 
792 void qedr_iw_qp_rem_ref(struct ib_qp *ibqp)
793 {
794 	struct qedr_qp *qp = get_qedr_qp(ibqp);
795 
796 	kref_put(&qp->refcnt, qedr_iw_free_qp);
797 }
798 
799 struct ib_qp *qedr_iw_get_qp(struct ib_device *ibdev, int qpn)
800 {
801 	struct qedr_dev *dev = get_qedr_dev(ibdev);
802 
803 	return xa_load(&dev->qps, qpn);
804 }
805