xref: /freebsd/sys/dev/cxgbe/iw_cxgbe/cm.c (revision 85732ac8)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *	  copyright notice, this list of conditions and the following
18  *	  disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *	  copyright notice, this list of conditions and the following
22  *	  disclaimer in the documentation and/or other materials
23  *	  provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_inet.h"
38 
39 #ifdef TCP_OFFLOAD
40 #include <sys/types.h>
41 #include <sys/malloc.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/sockio.h>
45 #include <sys/taskqueue.h>
46 #include <netinet/in.h>
47 #include <net/route.h>
48 
49 #include <netinet/in_systm.h>
50 #include <netinet/in_pcb.h>
51 #include <netinet6/in6_pcb.h>
52 #include <netinet/ip.h>
53 #include <netinet/in_fib.h>
54 #include <netinet6/in6_fib.h>
55 #include <netinet6/scope6_var.h>
56 #include <netinet/ip_var.h>
57 #include <netinet/tcp_var.h>
58 #include <netinet/tcp.h>
59 #include <netinet/tcpip.h>
60 
61 #include <netinet/toecore.h>
62 
63 struct sge_iq;
64 struct rss_header;
65 struct cpl_set_tcb_rpl;
66 #include <linux/types.h>
67 #include "offload.h"
68 #include "tom/t4_tom.h"
69 
70 #define TOEPCB(so)  ((struct toepcb *)(so_sototcpcb((so))->t_toe))
71 
72 #include "iw_cxgbe.h"
73 #include <linux/module.h>
74 #include <linux/workqueue.h>
75 #include <linux/notifier.h>
76 #include <linux/inetdevice.h>
77 #include <linux/if_vlan.h>
78 #include <net/netevent.h>
79 #include <rdma/rdma_cm.h>
80 
81 static spinlock_t req_lock;
82 static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list;
83 static struct work_struct c4iw_task;
84 static struct workqueue_struct *c4iw_taskq;
85 static LIST_HEAD(err_cqe_list);
86 static spinlock_t err_cqe_lock;
87 static LIST_HEAD(listen_port_list);
88 static DEFINE_MUTEX(listen_port_mutex);
89 
90 static void process_req(struct work_struct *ctx);
91 static void start_ep_timer(struct c4iw_ep *ep);
92 static int stop_ep_timer(struct c4iw_ep *ep);
93 static int set_tcpinfo(struct c4iw_ep *ep);
94 static void process_timeout(struct c4iw_ep *ep);
95 static void process_err_cqes(void);
96 static void *alloc_ep(int size, gfp_t flags);
97 static void close_socket(struct socket *so);
98 static int send_mpa_req(struct c4iw_ep *ep);
99 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen);
100 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen);
101 static void close_complete_upcall(struct c4iw_ep *ep, int status);
102 static int send_abort(struct c4iw_ep *ep);
103 static void peer_close_upcall(struct c4iw_ep *ep);
104 static void peer_abort_upcall(struct c4iw_ep *ep);
105 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
106 static int connect_request_upcall(struct c4iw_ep *ep);
107 static void established_upcall(struct c4iw_ep *ep);
108 static int process_mpa_reply(struct c4iw_ep *ep);
109 static int process_mpa_request(struct c4iw_ep *ep);
110 static void process_peer_close(struct c4iw_ep *ep);
111 static void process_conn_error(struct c4iw_ep *ep);
112 static void process_close_complete(struct c4iw_ep *ep);
113 static void ep_timeout(unsigned long arg);
114 static void setiwsockopt(struct socket *so);
115 static void init_iwarp_socket(struct socket *so, void *arg);
116 static void uninit_iwarp_socket(struct socket *so);
117 static void process_data(struct c4iw_ep *ep);
118 static void process_connected(struct c4iw_ep *ep);
119 static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag);
120 static void process_socket_event(struct c4iw_ep *ep);
121 static void release_ep_resources(struct c4iw_ep *ep);
122 static int process_terminate(struct c4iw_ep *ep);
123 static int terminate(struct sge_iq *iq, const struct rss_header *rss,
124     struct mbuf *m);
125 static int add_ep_to_req_list(struct c4iw_ep *ep, int ep_events);
126 static struct listen_port_info *
127 add_ep_to_listenlist(struct c4iw_listen_ep *lep);
128 static int rem_ep_from_listenlist(struct c4iw_listen_ep *lep);
129 static struct c4iw_listen_ep *
130 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so);
131 static int get_ifnet_from_raddr(struct sockaddr_storage *raddr,
132 		struct ifnet **ifp);
133 static void process_newconn(struct c4iw_listen_ep *master_lep,
134 		struct socket *new_so);
135 #define START_EP_TIMER(ep) \
136     do { \
137 	    CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \
138 		__func__, __LINE__, (ep)); \
139 	    start_ep_timer(ep); \
140     } while (0)
141 
142 #define STOP_EP_TIMER(ep) \
143     ({ \
144 	    CTR3(KTR_IW_CXGBE, "stop_ep_timer (%s:%d) ep %p", \
145 		__func__, __LINE__, (ep)); \
146 	    stop_ep_timer(ep); \
147     })
148 
149 #define GET_LOCAL_ADDR(pladdr, so) \
150 	do { \
151 		struct sockaddr_storage *__a = NULL; \
152 		struct  inpcb *__inp = sotoinpcb(so); \
153 		KASSERT(__inp != NULL, \
154 		   ("GET_LOCAL_ADDR(%s):so:%p, inp = NULL", __func__, so)); \
155 		if (__inp->inp_vflag & INP_IPV4) \
156 			in_getsockaddr(so, (struct sockaddr **)&__a); \
157 		else \
158 			in6_getsockaddr(so, (struct sockaddr **)&__a); \
159 		*(pladdr) = *__a; \
160 		free(__a, M_SONAME); \
161 	} while (0)
162 
163 #define GET_REMOTE_ADDR(praddr, so) \
164 	do { \
165 		struct sockaddr_storage *__a = NULL; \
166 		struct  inpcb *__inp = sotoinpcb(so); \
167 		KASSERT(__inp != NULL, \
168 		   ("GET_REMOTE_ADDR(%s):so:%p, inp = NULL", __func__, so)); \
169 		if (__inp->inp_vflag & INP_IPV4) \
170 			in_getpeeraddr(so, (struct sockaddr **)&__a); \
171 		else \
172 			in6_getpeeraddr(so, (struct sockaddr **)&__a); \
173 		*(praddr) = *__a; \
174 		free(__a, M_SONAME); \
175 	} while (0)
176 
177 static char *states[] = {
178 	"idle",
179 	"listen",
180 	"connecting",
181 	"mpa_wait_req",
182 	"mpa_req_sent",
183 	"mpa_req_rcvd",
184 	"mpa_rep_sent",
185 	"fpdu_mode",
186 	"aborting",
187 	"closing",
188 	"moribund",
189 	"dead",
190 	NULL,
191 };
192 
193 static void deref_cm_id(struct c4iw_ep_common *epc)
194 {
195       epc->cm_id->rem_ref(epc->cm_id);
196       epc->cm_id = NULL;
197       set_bit(CM_ID_DEREFED, &epc->history);
198 }
199 
200 static void ref_cm_id(struct c4iw_ep_common *epc)
201 {
202       set_bit(CM_ID_REFED, &epc->history);
203       epc->cm_id->add_ref(epc->cm_id);
204 }
205 
206 static void deref_qp(struct c4iw_ep *ep)
207 {
208 	c4iw_qp_rem_ref(&ep->com.qp->ibqp);
209 	clear_bit(QP_REFERENCED, &ep->com.flags);
210 	set_bit(QP_DEREFED, &ep->com.history);
211 }
212 
213 static void ref_qp(struct c4iw_ep *ep)
214 {
215 	set_bit(QP_REFERENCED, &ep->com.flags);
216 	set_bit(QP_REFED, &ep->com.history);
217 	c4iw_qp_add_ref(&ep->com.qp->ibqp);
218 }
219 /* allocated per TCP port while listening */
220 struct listen_port_info {
221 	uint16_t port_num; /* TCP port address */
222 	struct list_head list; /* belongs to listen_port_list */
223 	struct list_head lep_list; /* per port lep list */
224 	uint32_t refcnt; /* number of lep's listening */
225 };
226 
227 /*
228  * Following two lists are used to manage INADDR_ANY listeners:
229  * 1)listen_port_list
230  * 2)lep_list
231  *
232  * Below is the INADDR_ANY listener lists overview on a system with a two port
233  * adapter:
234  *   |------------------|
235  *   |listen_port_list  |
236  *   |------------------|
237  *            |
238  *            |              |-----------|       |-----------|
239  *            |              | port_num:X|       | port_num:X|
240  *            |--------------|-list------|-------|-list------|-------....
241  *                           | lep_list----|     | lep_list----|
242  *                           | refcnt    | |     | refcnt    | |
243  *                           |           | |     |           | |
244  *                           |           | |     |           | |
245  *                           |-----------| |     |-----------| |
246  *                                         |                   |
247  *                                         |                   |
248  *                                         |                   |
249  *                                         |                   |         lep1                  lep2
250  *                                         |                   |    |----------------|    |----------------|
251  *                                         |                   |----| listen_ep_list |----| listen_ep_list |
252  *                                         |                        |----------------|    |----------------|
253  *                                         |
254  *                                         |
255  *                                         |        lep1                  lep2
256  *                                         |   |----------------|    |----------------|
257  *                                         |---| listen_ep_list |----| listen_ep_list |
258  *                                             |----------------|    |----------------|
259  *
260  * Because of two port adapter, the number of lep's are two(lep1 & lep2) for
261  * each TCP port number.
262  *
263  * Here 'lep1' is always marked as Master lep, because solisten() is always
264  * called through first lep.
265  *
266  */
267 static struct listen_port_info *
268 add_ep_to_listenlist(struct c4iw_listen_ep *lep)
269 {
270 	uint16_t port;
271 	struct listen_port_info *port_info = NULL;
272 	struct sockaddr_storage *laddr = &lep->com.local_addr;
273 
274 	port = (laddr->ss_family == AF_INET) ?
275 		((struct sockaddr_in *)laddr)->sin_port :
276 		((struct sockaddr_in6 *)laddr)->sin6_port;
277 
278 	mutex_lock(&listen_port_mutex);
279 
280 	list_for_each_entry(port_info, &listen_port_list, list)
281 		if (port_info->port_num == port)
282 			goto found_port;
283 
284 	port_info = malloc(sizeof(*port_info), M_CXGBE, M_WAITOK);
285 	port_info->port_num = port;
286 	port_info->refcnt    = 0;
287 
288 	list_add_tail(&port_info->list, &listen_port_list);
289 	INIT_LIST_HEAD(&port_info->lep_list);
290 
291 found_port:
292 	port_info->refcnt++;
293 	list_add_tail(&lep->listen_ep_list, &port_info->lep_list);
294 	mutex_unlock(&listen_port_mutex);
295 	return port_info;
296 }
297 
298 static int
299 rem_ep_from_listenlist(struct c4iw_listen_ep *lep)
300 {
301 	uint16_t port;
302 	struct listen_port_info *port_info = NULL;
303 	struct sockaddr_storage *laddr = &lep->com.local_addr;
304 	int refcnt = 0;
305 
306 	port = (laddr->ss_family == AF_INET) ?
307 		((struct sockaddr_in *)laddr)->sin_port :
308 		((struct sockaddr_in6 *)laddr)->sin6_port;
309 
310 	mutex_lock(&listen_port_mutex);
311 
312 	/* get the port_info structure based on the lep's port address */
313 	list_for_each_entry(port_info, &listen_port_list, list) {
314 		if (port_info->port_num == port) {
315 			port_info->refcnt--;
316 			refcnt = port_info->refcnt;
317 			/* remove the current lep from the listen list */
318 			list_del(&lep->listen_ep_list);
319 			if (port_info->refcnt == 0) {
320 				/* Remove this entry from the list as there
321 				 * are no more listeners for this port_num.
322 				 */
323 				list_del(&port_info->list);
324 				kfree(port_info);
325 			}
326 			break;
327 		}
328 	}
329 	mutex_unlock(&listen_port_mutex);
330 	return refcnt;
331 }
332 
333 /*
334  * Find the lep that belongs to the ifnet on which the SYN frame was received.
335  */
336 struct c4iw_listen_ep *
337 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so)
338 {
339 	struct adapter *adap = NULL;
340 	struct c4iw_listen_ep *lep = NULL;
341 	struct ifnet *ifp = NULL, *hw_ifp = NULL;
342 	struct listen_port_info *port_info = NULL;
343 	int i = 0, found_portinfo = 0, found_lep = 0;
344 	uint16_t port;
345 
346 	/*
347 	 * STEP 1: Figure out 'ifp' of the physical interface, not pseudo
348 	 * interfaces like vlan, lagg, etc..
349 	 * TBD: lagg support, lagg + vlan support.
350 	 */
351 	ifp = TOEPCB(so)->l2te->ifp;
352 	if (ifp->if_type == IFT_L2VLAN) {
353 		hw_ifp = VLAN_TRUNKDEV(ifp);
354 		if (hw_ifp == NULL) {
355 			CTR4(KTR_IW_CXGBE, "%s: Failed to get parent ifnet of "
356 				"vlan ifnet %p, sock %p, master_lep %p",
357 				__func__, ifp, so, master_lep);
358 			return (NULL);
359 		}
360 	} else
361 		hw_ifp = ifp;
362 
363 	/* STEP 2: Find 'port_info' with listener local port address. */
364 	port = (master_lep->com.local_addr.ss_family == AF_INET) ?
365 		((struct sockaddr_in *)&master_lep->com.local_addr)->sin_port :
366 		((struct sockaddr_in6 *)&master_lep->com.local_addr)->sin6_port;
367 
368 
369 	mutex_lock(&listen_port_mutex);
370 	list_for_each_entry(port_info, &listen_port_list, list)
371 		if (port_info->port_num == port) {
372 			found_portinfo =1;
373 			break;
374 		}
375 	if (!found_portinfo)
376 		goto out;
377 
378 	/* STEP 3: Traverse through list of lep's that are bound to the current
379 	 * TCP port address and find the lep that belongs to the ifnet on which
380 	 * the SYN frame was received.
381 	 */
382 	list_for_each_entry(lep, &port_info->lep_list, listen_ep_list) {
383 		adap = lep->com.dev->rdev.adap;
384 		for_each_port(adap, i) {
385 			if (hw_ifp == adap->port[i]->vi[0].ifp) {
386 				found_lep =1;
387 				goto out;
388 			}
389 		}
390 	}
391 out:
392 	mutex_unlock(&listen_port_mutex);
393 	return found_lep ? lep : (NULL);
394 }
395 
396 static void process_timeout(struct c4iw_ep *ep)
397 {
398 	struct c4iw_qp_attributes attrs = {0};
399 	int abort = 1;
400 
401 	CTR4(KTR_IW_CXGBE, "%s ep :%p, tid:%u, state %d", __func__,
402 			ep, ep->hwtid, ep->com.state);
403 	set_bit(TIMEDOUT, &ep->com.history);
404 	switch (ep->com.state) {
405 	case MPA_REQ_SENT:
406 		connect_reply_upcall(ep, -ETIMEDOUT);
407 		break;
408 	case MPA_REQ_WAIT:
409 	case MPA_REQ_RCVD:
410 	case MPA_REP_SENT:
411 	case FPDU_MODE:
412 		break;
413 	case CLOSING:
414 	case MORIBUND:
415 		if (ep->com.cm_id && ep->com.qp) {
416 			attrs.next_state = C4IW_QP_STATE_ERROR;
417 			c4iw_modify_qp(ep->com.dev, ep->com.qp,
418 					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
419 		}
420 		close_complete_upcall(ep, -ETIMEDOUT);
421 		break;
422 	case ABORTING:
423 	case DEAD:
424 		/*
425 		 * These states are expected if the ep timed out at the same
426 		 * time as another thread was calling stop_ep_timer().
427 		 * So we silently do nothing for these states.
428 		 */
429 		abort = 0;
430 		break;
431 	default:
432 		CTR4(KTR_IW_CXGBE, "%s unexpected state ep %p tid %u state %u\n"
433 				, __func__, ep, ep->hwtid, ep->com.state);
434 		abort = 0;
435 	}
436 	if (abort)
437 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
438 	c4iw_put_ep(&ep->com);
439 	return;
440 }
441 
442 struct cqe_list_entry {
443 	struct list_head entry;
444 	struct c4iw_dev *rhp;
445 	struct t4_cqe err_cqe;
446 };
447 
448 static void
449 process_err_cqes(void)
450 {
451 	unsigned long flag;
452 	struct cqe_list_entry *cle;
453 
454 	spin_lock_irqsave(&err_cqe_lock, flag);
455 	while (!list_empty(&err_cqe_list)) {
456 		struct list_head *tmp;
457 		tmp = err_cqe_list.next;
458 		list_del(tmp);
459 		tmp->next = tmp->prev = NULL;
460 		spin_unlock_irqrestore(&err_cqe_lock, flag);
461 		cle = list_entry(tmp, struct cqe_list_entry, entry);
462 		c4iw_ev_dispatch(cle->rhp, &cle->err_cqe);
463 		free(cle, M_CXGBE);
464 		spin_lock_irqsave(&err_cqe_lock, flag);
465 	}
466 	spin_unlock_irqrestore(&err_cqe_lock, flag);
467 
468 	return;
469 }
470 
471 static void
472 process_req(struct work_struct *ctx)
473 {
474 	struct c4iw_ep_common *epc;
475 	unsigned long flag;
476 	int ep_events;
477 
478 	process_err_cqes();
479 	spin_lock_irqsave(&req_lock, flag);
480 	while (!TAILQ_EMPTY(&req_list)) {
481 		epc = TAILQ_FIRST(&req_list);
482 		TAILQ_REMOVE(&req_list, epc, entry);
483 		epc->entry.tqe_prev = NULL;
484 		ep_events = epc->ep_events;
485 		epc->ep_events = 0;
486 		spin_unlock_irqrestore(&req_lock, flag);
487 		mutex_lock(&epc->mutex);
488 		CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, ep_state %s events 0x%x",
489 		    __func__, epc->so, epc, states[epc->state], ep_events);
490 		if (ep_events & C4IW_EVENT_TERM)
491 			process_terminate((struct c4iw_ep *)epc);
492 		if (ep_events & C4IW_EVENT_TIMEOUT)
493 			process_timeout((struct c4iw_ep *)epc);
494 		if (ep_events & C4IW_EVENT_SOCKET)
495 			process_socket_event((struct c4iw_ep *)epc);
496 		mutex_unlock(&epc->mutex);
497 		c4iw_put_ep(epc);
498 		process_err_cqes();
499 		spin_lock_irqsave(&req_lock, flag);
500 	}
501 	spin_unlock_irqrestore(&req_lock, flag);
502 }
503 
504 /*
505  * XXX: doesn't belong here in the iWARP driver.
506  * XXX: assumes that the connection was offloaded by cxgbe/t4_tom if TF_TOE is
507  *      set.  Is this a valid assumption for active open?
508  */
509 static int
510 set_tcpinfo(struct c4iw_ep *ep)
511 {
512 	struct socket *so = ep->com.so;
513 	struct inpcb *inp = sotoinpcb(so);
514 	struct tcpcb *tp;
515 	struct toepcb *toep;
516 	int rc = 0;
517 
518 	INP_WLOCK(inp);
519 	tp = intotcpcb(inp);
520 	if ((tp->t_flags & TF_TOE) == 0) {
521 		rc = EINVAL;
522 		log(LOG_ERR, "%s: connection not offloaded (so %p, ep %p)\n",
523 		    __func__, so, ep);
524 		goto done;
525 	}
526 	toep = TOEPCB(so);
527 
528 	ep->hwtid = toep->tid;
529 	ep->snd_seq = tp->snd_nxt;
530 	ep->rcv_seq = tp->rcv_nxt;
531 	ep->emss = max(tp->t_maxseg, 128);
532 done:
533 	INP_WUNLOCK(inp);
534 	return (rc);
535 
536 }
537 static int
538 get_ifnet_from_raddr(struct sockaddr_storage *raddr, struct ifnet **ifp)
539 {
540 	int err = 0;
541 
542 	if (raddr->ss_family == AF_INET) {
543 		struct sockaddr_in *raddr4 = (struct sockaddr_in *)raddr;
544 		struct nhop4_extended nh4 = {0};
545 
546 		err = fib4_lookup_nh_ext(RT_DEFAULT_FIB, raddr4->sin_addr,
547 				NHR_REF, 0, &nh4);
548 		*ifp = nh4.nh_ifp;
549 		if (err)
550 			fib4_free_nh_ext(RT_DEFAULT_FIB, &nh4);
551 	} else {
552 		struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)raddr;
553 		struct nhop6_extended nh6 = {0};
554 		struct in6_addr addr6;
555 		uint32_t scopeid;
556 
557 		memset(&addr6, 0, sizeof(addr6));
558 		in6_splitscope((struct in6_addr *)&raddr6->sin6_addr,
559 					&addr6, &scopeid);
560 		err = fib6_lookup_nh_ext(RT_DEFAULT_FIB, &addr6, scopeid,
561 				NHR_REF, 0, &nh6);
562 		*ifp = nh6.nh_ifp;
563 		if (err)
564 			fib6_free_nh_ext(RT_DEFAULT_FIB, &nh6);
565 	}
566 
567 	CTR2(KTR_IW_CXGBE, "%s: return: %d", __func__, err);
568 	return err;
569 }
570 
571 static void
572 close_socket(struct socket *so)
573 {
574 	uninit_iwarp_socket(so);
575 	soclose(so);
576 }
577 
578 static void
579 process_peer_close(struct c4iw_ep *ep)
580 {
581 	struct c4iw_qp_attributes attrs = {0};
582 	int disconnect = 1;
583 	int release = 0;
584 
585 	CTR4(KTR_IW_CXGBE, "%s:ppcB ep %p so %p state %s", __func__, ep,
586 	    ep->com.so, states[ep->com.state]);
587 
588 	switch (ep->com.state) {
589 
590 		case MPA_REQ_WAIT:
591 			CTR2(KTR_IW_CXGBE, "%s:ppc1 %p MPA_REQ_WAIT DEAD",
592 			    __func__, ep);
593 			/* Fallthrough */
594 		case MPA_REQ_SENT:
595 			CTR2(KTR_IW_CXGBE, "%s:ppc2 %p MPA_REQ_SENT DEAD",
596 			    __func__, ep);
597 			ep->com.state = DEAD;
598 			connect_reply_upcall(ep, -ECONNABORTED);
599 
600 			disconnect = 0;
601 			STOP_EP_TIMER(ep);
602 			close_socket(ep->com.so);
603 			deref_cm_id(&ep->com);
604 			release = 1;
605 			break;
606 
607 		case MPA_REQ_RCVD:
608 
609 			/*
610 			 * We're gonna mark this puppy DEAD, but keep
611 			 * the reference on it until the ULP accepts or
612 			 * rejects the CR.
613 			 */
614 			CTR2(KTR_IW_CXGBE, "%s:ppc3 %p MPA_REQ_RCVD CLOSING",
615 			    __func__, ep);
616 			ep->com.state = CLOSING;
617 			break;
618 
619 		case MPA_REP_SENT:
620 			CTR2(KTR_IW_CXGBE, "%s:ppc4 %p MPA_REP_SENT CLOSING",
621 			    __func__, ep);
622 			ep->com.state = CLOSING;
623 			break;
624 
625 		case FPDU_MODE:
626 			CTR2(KTR_IW_CXGBE, "%s:ppc5 %p FPDU_MODE CLOSING",
627 			    __func__, ep);
628 			START_EP_TIMER(ep);
629 			ep->com.state = CLOSING;
630 			attrs.next_state = C4IW_QP_STATE_CLOSING;
631 			c4iw_modify_qp(ep->com.dev, ep->com.qp,
632 					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
633 			peer_close_upcall(ep);
634 			break;
635 
636 		case ABORTING:
637 			CTR2(KTR_IW_CXGBE, "%s:ppc6 %p ABORTING (disconn)",
638 			    __func__, ep);
639 			disconnect = 0;
640 			break;
641 
642 		case CLOSING:
643 			CTR2(KTR_IW_CXGBE, "%s:ppc7 %p CLOSING MORIBUND",
644 			    __func__, ep);
645 			ep->com.state = MORIBUND;
646 			disconnect = 0;
647 			break;
648 
649 		case MORIBUND:
650 			CTR2(KTR_IW_CXGBE, "%s:ppc8 %p MORIBUND DEAD", __func__,
651 			    ep);
652 			STOP_EP_TIMER(ep);
653 			if (ep->com.cm_id && ep->com.qp) {
654 				attrs.next_state = C4IW_QP_STATE_IDLE;
655 				c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
656 						C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
657 			}
658 			close_socket(ep->com.so);
659 			close_complete_upcall(ep, 0);
660 			ep->com.state = DEAD;
661 			release = 1;
662 			disconnect = 0;
663 			break;
664 
665 		case DEAD:
666 			CTR2(KTR_IW_CXGBE, "%s:ppc9 %p DEAD (disconn)",
667 			    __func__, ep);
668 			disconnect = 0;
669 			break;
670 
671 		default:
672 			panic("%s: ep %p state %d", __func__, ep,
673 			    ep->com.state);
674 			break;
675 	}
676 
677 
678 	if (disconnect) {
679 
680 		CTR2(KTR_IW_CXGBE, "%s:ppca %p", __func__, ep);
681 		c4iw_ep_disconnect(ep, 0, M_NOWAIT);
682 	}
683 	if (release) {
684 
685 		CTR2(KTR_IW_CXGBE, "%s:ppcb %p", __func__, ep);
686 		c4iw_put_ep(&ep->com);
687 	}
688 	CTR2(KTR_IW_CXGBE, "%s:ppcE %p", __func__, ep);
689 	return;
690 }
691 
692 static void
693 process_conn_error(struct c4iw_ep *ep)
694 {
695 	struct c4iw_qp_attributes attrs = {0};
696 	int ret;
697 	int state;
698 
699 	state = ep->com.state;
700 	CTR5(KTR_IW_CXGBE, "%s:pceB ep %p so %p so->so_error %u state %s",
701 	    __func__, ep, ep->com.so, ep->com.so->so_error,
702 	    states[ep->com.state]);
703 
704 	switch (state) {
705 
706 		case MPA_REQ_WAIT:
707 			STOP_EP_TIMER(ep);
708 			c4iw_put_ep(&ep->parent_ep->com);
709 			break;
710 
711 		case MPA_REQ_SENT:
712 			STOP_EP_TIMER(ep);
713 			connect_reply_upcall(ep, -ECONNRESET);
714 			break;
715 
716 		case MPA_REP_SENT:
717 			ep->com.rpl_err = ECONNRESET;
718 			CTR1(KTR_IW_CXGBE, "waking up ep %p", ep);
719 			break;
720 
721 		case MPA_REQ_RCVD:
722 			break;
723 
724 		case MORIBUND:
725 		case CLOSING:
726 			STOP_EP_TIMER(ep);
727 			/*FALLTHROUGH*/
728 		case FPDU_MODE:
729 
730 			if (ep->com.cm_id && ep->com.qp) {
731 
732 				attrs.next_state = C4IW_QP_STATE_ERROR;
733 				ret = c4iw_modify_qp(ep->com.qp->rhp,
734 					ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
735 					&attrs, 1);
736 				if (ret)
737 					log(LOG_ERR,
738 							"%s - qp <- error failed!\n",
739 							__func__);
740 			}
741 			peer_abort_upcall(ep);
742 			break;
743 
744 		case ABORTING:
745 			break;
746 
747 		case DEAD:
748 			CTR2(KTR_IW_CXGBE, "%s so_error %d IN DEAD STATE!!!!",
749 			    __func__, ep->com.so->so_error);
750 			return;
751 
752 		default:
753 			panic("%s: ep %p state %d", __func__, ep, state);
754 			break;
755 	}
756 
757 	if (state != ABORTING) {
758 		close_socket(ep->com.so);
759 		ep->com.state = DEAD;
760 		c4iw_put_ep(&ep->com);
761 	}
762 	CTR2(KTR_IW_CXGBE, "%s:pceE %p", __func__, ep);
763 	return;
764 }
765 
766 static void
767 process_close_complete(struct c4iw_ep *ep)
768 {
769 	struct c4iw_qp_attributes attrs = {0};
770 	int release = 0;
771 
772 	CTR4(KTR_IW_CXGBE, "%s:pccB ep %p so %p state %s", __func__, ep,
773 	    ep->com.so, states[ep->com.state]);
774 
775 	/* The cm_id may be null if we failed to connect */
776 	set_bit(CLOSE_CON_RPL, &ep->com.history);
777 
778 	switch (ep->com.state) {
779 
780 		case CLOSING:
781 			CTR2(KTR_IW_CXGBE, "%s:pcc1 %p CLOSING MORIBUND",
782 			    __func__, ep);
783 			ep->com.state = MORIBUND;
784 			break;
785 
786 		case MORIBUND:
787 			CTR2(KTR_IW_CXGBE, "%s:pcc1 %p MORIBUND DEAD", __func__,
788 			    ep);
789 			STOP_EP_TIMER(ep);
790 
791 			if ((ep->com.cm_id) && (ep->com.qp)) {
792 
793 				CTR2(KTR_IW_CXGBE, "%s:pcc2 %p QP_STATE_IDLE",
794 				    __func__, ep);
795 				attrs.next_state = C4IW_QP_STATE_IDLE;
796 				c4iw_modify_qp(ep->com.dev,
797 						ep->com.qp,
798 						C4IW_QP_ATTR_NEXT_STATE,
799 						&attrs, 1);
800 			}
801 
802 			close_socket(ep->com.so);
803 			close_complete_upcall(ep, 0);
804 			ep->com.state = DEAD;
805 			release = 1;
806 			break;
807 
808 		case ABORTING:
809 			CTR2(KTR_IW_CXGBE, "%s:pcc5 %p ABORTING", __func__, ep);
810 			break;
811 
812 		case DEAD:
813 			CTR2(KTR_IW_CXGBE, "%s:pcc6 %p DEAD", __func__, ep);
814 			break;
815 		default:
816 			CTR2(KTR_IW_CXGBE, "%s:pcc7 %p unknown ep state",
817 					__func__, ep);
818 			panic("%s:pcc6 %p unknown ep state", __func__, ep);
819 			break;
820 	}
821 
822 	if (release) {
823 
824 		CTR2(KTR_IW_CXGBE, "%s:pcc8 %p", __func__, ep);
825 		release_ep_resources(ep);
826 	}
827 	CTR2(KTR_IW_CXGBE, "%s:pccE %p", __func__, ep);
828 	return;
829 }
830 
831 static void
832 setiwsockopt(struct socket *so)
833 {
834 	int rc;
835 	struct sockopt sopt;
836 	int on = 1;
837 
838 	sopt.sopt_dir = SOPT_SET;
839 	sopt.sopt_level = IPPROTO_TCP;
840 	sopt.sopt_name = TCP_NODELAY;
841 	sopt.sopt_val = (caddr_t)&on;
842 	sopt.sopt_valsize = sizeof on;
843 	sopt.sopt_td = NULL;
844 	rc = sosetopt(so, &sopt);
845 	if (rc) {
846 		log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n",
847 		    __func__, so, rc);
848 	}
849 }
850 
851 static void
852 init_iwarp_socket(struct socket *so, void *arg)
853 {
854 	if (SOLISTENING(so)) {
855 		SOLISTEN_LOCK(so);
856 		solisten_upcall_set(so, c4iw_so_upcall, arg);
857 		so->so_state |= SS_NBIO;
858 		SOLISTEN_UNLOCK(so);
859 	} else {
860 		SOCKBUF_LOCK(&so->so_rcv);
861 		soupcall_set(so, SO_RCV, c4iw_so_upcall, arg);
862 		so->so_state |= SS_NBIO;
863 		SOCKBUF_UNLOCK(&so->so_rcv);
864 	}
865 }
866 
867 static void
868 uninit_iwarp_socket(struct socket *so)
869 {
870 	if (SOLISTENING(so)) {
871 		SOLISTEN_LOCK(so);
872 		solisten_upcall_set(so, NULL, NULL);
873 		SOLISTEN_UNLOCK(so);
874 	} else {
875 		SOCKBUF_LOCK(&so->so_rcv);
876 		soupcall_clear(so, SO_RCV);
877 		SOCKBUF_UNLOCK(&so->so_rcv);
878 	}
879 }
880 
881 static void
882 process_data(struct c4iw_ep *ep)
883 {
884 	int ret = 0;
885 	int disconnect = 0;
886 	struct c4iw_qp_attributes attrs = {0};
887 
888 	CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sbused %d", __func__,
889 	    ep->com.so, ep, states[ep->com.state], sbused(&ep->com.so->so_rcv));
890 
891 	switch (ep->com.state) {
892 	case MPA_REQ_SENT:
893 		disconnect = process_mpa_reply(ep);
894 		break;
895 	case MPA_REQ_WAIT:
896 		disconnect = process_mpa_request(ep);
897 		if (disconnect)
898 			/* Refered in process_newconn() */
899 			c4iw_put_ep(&ep->parent_ep->com);
900 		break;
901 	case FPDU_MODE:
902 		MPASS(ep->com.qp != NULL);
903 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
904 		ret = c4iw_modify_qp(ep->com.dev, ep->com.qp,
905 					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
906 		if (ret != -EINPROGRESS)
907 			disconnect = 1;
908 		break;
909 	default:
910 		log(LOG_ERR, "%s: Unexpected streaming data. ep %p, "
911 			    "state %d, so %p, so_state 0x%x, sbused %u\n",
912 			    __func__, ep, ep->com.state, ep->com.so,
913 			    ep->com.so->so_state, sbused(&ep->com.so->so_rcv));
914 		break;
915 	}
916 	if (disconnect)
917 		c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
918 
919 }
920 
921 static void
922 process_connected(struct c4iw_ep *ep)
923 {
924 	struct socket *so = ep->com.so;
925 
926 	if ((so->so_state & SS_ISCONNECTED) && !so->so_error) {
927 		if (send_mpa_req(ep))
928 			goto err;
929 	} else {
930 		connect_reply_upcall(ep, -so->so_error);
931 		goto err;
932 	}
933 	return;
934 err:
935 	close_socket(so);
936 	ep->com.state = DEAD;
937 	c4iw_put_ep(&ep->com);
938 	return;
939 }
940 
941 static inline int c4iw_zero_addr(struct sockaddr *addr)
942 {
943 	struct in6_addr *ip6;
944 
945 	if (addr->sa_family == AF_INET)
946 		return IN_ZERONET(
947 			ntohl(((struct sockaddr_in *)addr)->sin_addr.s_addr));
948 	else {
949 		ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
950 		return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
951 				ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
952 	}
953 }
954 
955 static inline int c4iw_loopback_addr(struct sockaddr *addr)
956 {
957 	if (addr->sa_family == AF_INET)
958 		return IN_LOOPBACK(
959 			ntohl(((struct sockaddr_in *) addr)->sin_addr.s_addr));
960 	else
961 		return IN6_IS_ADDR_LOOPBACK(
962 				&((struct sockaddr_in6 *) addr)->sin6_addr);
963 }
964 
965 static inline int c4iw_any_addr(struct sockaddr *addr)
966 {
967 	return c4iw_zero_addr(addr) || c4iw_loopback_addr(addr);
968 }
969 
970 static void
971 process_newconn(struct c4iw_listen_ep *master_lep, struct socket *new_so)
972 {
973 	struct c4iw_listen_ep *real_lep = NULL;
974 	struct c4iw_ep *new_ep = NULL;
975 	struct sockaddr_in *remote = NULL;
976 	int ret = 0;
977 
978 	MPASS(new_so != NULL);
979 
980 	if (c4iw_any_addr((struct sockaddr *)&master_lep->com.local_addr)) {
981 		/* Here we need to find the 'real_lep' that belongs to the
982 		 * incomming socket's network interface, such that the newly
983 		 * created 'ep' can be attached to the real 'lep'.
984 		 */
985 		real_lep = find_real_listen_ep(master_lep, new_so);
986 		if (real_lep == NULL) {
987 			CTR2(KTR_IW_CXGBE, "%s: Could not find the real listen "
988 					"ep for sock: %p", __func__, new_so);
989 			log(LOG_ERR,"%s: Could not find the real listen ep for "
990 					"sock: %p\n", __func__, new_so);
991 			/* FIXME: properly free the 'new_so' in failure case.
992 			 * Use of soabort() and  soclose() are not legal
993 			 * here(before soaccept()).
994 			 */
995 			return;
996 		}
997 	} else /* for Non-Wildcard address, master_lep is always the real_lep */
998 		real_lep = master_lep;
999 
1000 	new_ep = alloc_ep(sizeof(*new_ep), GFP_KERNEL);
1001 
1002 	CTR6(KTR_IW_CXGBE, "%s: master_lep %p, real_lep: %p, new ep %p, "
1003 	    "listening so %p, new so %p", __func__, master_lep, real_lep,
1004 	    new_ep, master_lep->com.so, new_so);
1005 
1006 	new_ep->com.dev = real_lep->com.dev;
1007 	new_ep->com.so = new_so;
1008 	new_ep->com.cm_id = NULL;
1009 	new_ep->com.thread = real_lep->com.thread;
1010 	new_ep->parent_ep = real_lep;
1011 
1012 	GET_LOCAL_ADDR(&new_ep->com.local_addr, new_so);
1013 	GET_REMOTE_ADDR(&new_ep->com.remote_addr, new_so);
1014 	c4iw_get_ep(&real_lep->com);
1015 	init_timer(&new_ep->timer);
1016 	new_ep->com.state = MPA_REQ_WAIT;
1017 	START_EP_TIMER(new_ep);
1018 
1019 	setiwsockopt(new_so);
1020 	ret = soaccept(new_so, (struct sockaddr **)&remote);
1021 	if (ret != 0) {
1022 		CTR4(KTR_IW_CXGBE,
1023 				"%s:listen sock:%p, new sock:%p, ret:%d\n",
1024 				__func__, master_lep->com.so, new_so, ret);
1025 		if (remote != NULL)
1026 			free(remote, M_SONAME);
1027 		uninit_iwarp_socket(new_so);
1028 		soclose(new_so);
1029 		c4iw_put_ep(&new_ep->com);
1030 		c4iw_put_ep(&real_lep->com);
1031 		return;
1032 	}
1033 	free(remote, M_SONAME);
1034 
1035 	/* MPA request might have been queued up on the socket already, so we
1036 	 * initialize the socket/upcall_handler under lock to prevent processing
1037 	 * MPA request on another thread(via process_req()) simultaniously.
1038 	 */
1039 	c4iw_get_ep(&new_ep->com); /* Dereferenced at the end below, this is to
1040 				      avoid freeing of ep before ep unlock. */
1041 	mutex_lock(&new_ep->com.mutex);
1042 	init_iwarp_socket(new_so, &new_ep->com);
1043 
1044 	ret = process_mpa_request(new_ep);
1045 	if (ret) {
1046 		/* ABORT */
1047 		c4iw_ep_disconnect(new_ep, 1, GFP_KERNEL);
1048 		c4iw_put_ep(&real_lep->com);
1049 	}
1050 	mutex_unlock(&new_ep->com.mutex);
1051 	c4iw_put_ep(&new_ep->com);
1052 	return;
1053 }
1054 
1055 static int
1056 add_ep_to_req_list(struct c4iw_ep *ep, int new_ep_event)
1057 {
1058 	unsigned long flag;
1059 
1060 	spin_lock_irqsave(&req_lock, flag);
1061 	if (ep && ep->com.so) {
1062 		ep->com.ep_events |= new_ep_event;
1063 		if (!ep->com.entry.tqe_prev) {
1064 			c4iw_get_ep(&ep->com);
1065 			TAILQ_INSERT_TAIL(&req_list, &ep->com, entry);
1066 			queue_work(c4iw_taskq, &c4iw_task);
1067 		}
1068 	}
1069 	spin_unlock_irqrestore(&req_lock, flag);
1070 
1071 	return (0);
1072 }
1073 
1074 static int
1075 c4iw_so_upcall(struct socket *so, void *arg, int waitflag)
1076 {
1077 	struct c4iw_ep *ep = arg;
1078 
1079 	CTR6(KTR_IW_CXGBE,
1080 	    "%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p",
1081 	    __func__, so, so->so_state, ep, states[ep->com.state],
1082 	    ep->com.entry.tqe_prev);
1083 
1084 	MPASS(ep->com.so == so);
1085 	/*
1086 	 * Wake up any threads waiting in rdma_init()/rdma_fini(),
1087 	 * with locks held.
1088 	 */
1089 	if (so->so_error)
1090 		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1091 	add_ep_to_req_list(ep, C4IW_EVENT_SOCKET);
1092 
1093 	return (SU_OK);
1094 }
1095 
1096 
1097 static int
1098 terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1099 {
1100 	struct adapter *sc = iq->adapter;
1101 	const struct cpl_rdma_terminate *cpl = mtod(m, const void *);
1102 	unsigned int tid = GET_TID(cpl);
1103 	struct toepcb *toep = lookup_tid(sc, tid);
1104 	struct socket *so;
1105 	struct c4iw_ep *ep;
1106 
1107 	INP_WLOCK(toep->inp);
1108 	so = inp_inpcbtosocket(toep->inp);
1109 	ep = so->so_rcv.sb_upcallarg;
1110 	INP_WUNLOCK(toep->inp);
1111 
1112 	CTR3(KTR_IW_CXGBE, "%s: so %p, ep %p", __func__, so, ep);
1113 	add_ep_to_req_list(ep, C4IW_EVENT_TERM);
1114 
1115 	return 0;
1116 }
1117 
1118 static void
1119 process_socket_event(struct c4iw_ep *ep)
1120 {
1121 	int state = ep->com.state;
1122 	struct socket *so = ep->com.so;
1123 
1124 	if (ep->com.state == DEAD) {
1125 		CTR3(KTR_IW_CXGBE, "%s: Pending socket event discarded "
1126 			"ep %p ep_state %s", __func__, ep, states[state]);
1127 		return;
1128 	}
1129 
1130 	CTR6(KTR_IW_CXGBE, "process_socket_event: so %p, so_state 0x%x, "
1131 	    "so_err %d, sb_state 0x%x, ep %p, ep_state %s", so, so->so_state,
1132 	    so->so_error, so->so_rcv.sb_state, ep, states[state]);
1133 
1134 	if (state == CONNECTING) {
1135 		process_connected(ep);
1136 		return;
1137 	}
1138 
1139 	if (state == LISTEN) {
1140 		struct c4iw_listen_ep *lep = (struct c4iw_listen_ep *)ep;
1141 		struct socket *listen_so = so, *new_so = NULL;
1142 		int error = 0;
1143 
1144 		SOLISTEN_LOCK(listen_so);
1145 		do {
1146 			error = solisten_dequeue(listen_so, &new_so,
1147 						SOCK_NONBLOCK);
1148 			if (error) {
1149 				CTR4(KTR_IW_CXGBE, "%s: lep %p listen_so %p "
1150 					"error %d", __func__, lep, listen_so,
1151 					error);
1152 				return;
1153 			}
1154 			process_newconn(lep, new_so);
1155 
1156 			/* solisten_dequeue() unlocks while return, so aquire
1157 			 * lock again for sol_qlen and also for next iteration.
1158 			 */
1159 			SOLISTEN_LOCK(listen_so);
1160 		} while (listen_so->sol_qlen);
1161 		SOLISTEN_UNLOCK(listen_so);
1162 
1163 		return;
1164 	}
1165 
1166 	/* connection error */
1167 	if (so->so_error) {
1168 		process_conn_error(ep);
1169 		return;
1170 	}
1171 
1172 	/* peer close */
1173 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state <= CLOSING) {
1174 		process_peer_close(ep);
1175 		/*
1176 		 * check whether socket disconnect event is pending before
1177 		 * returning. Fallthrough if yes.
1178 		 */
1179 		if (!(so->so_state & SS_ISDISCONNECTED))
1180 			return;
1181 	}
1182 
1183 	/* close complete */
1184 	if (so->so_state & SS_ISDISCONNECTED) {
1185 		process_close_complete(ep);
1186 		return;
1187 	}
1188 
1189 	/* rx data */
1190 	if (sbused(&ep->com.so->so_rcv)) {
1191 		process_data(ep);
1192 		return;
1193 	}
1194 
1195 	/* Socket events for 'MPA Request Received' and 'Close Complete'
1196 	 * were already processed earlier in their previous events handlers.
1197 	 * Hence, these socket events are skipped.
1198 	 * And any other socket events must have handled above.
1199 	 */
1200 	MPASS((ep->com.state == MPA_REQ_RCVD) || (ep->com.state == MORIBUND));
1201 
1202 	if ((ep->com.state != MPA_REQ_RCVD) && (ep->com.state != MORIBUND))
1203 		log(LOG_ERR, "%s: Unprocessed socket event so %p, "
1204 		"so_state 0x%x, so_err %d, sb_state 0x%x, ep %p, ep_state %s\n",
1205 		__func__, so, so->so_state, so->so_error, so->so_rcv.sb_state,
1206 			ep, states[state]);
1207 
1208 }
1209 
1210 SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD, 0, "iw_cxgbe driver parameters");
1211 
1212 static int dack_mode = 0;
1213 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RWTUN, &dack_mode, 0,
1214 		"Delayed ack mode (default = 0)");
1215 
1216 int c4iw_max_read_depth = 8;
1217 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RWTUN, &c4iw_max_read_depth, 0,
1218 		"Per-connection max ORD/IRD (default = 8)");
1219 
1220 static int enable_tcp_timestamps;
1221 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RWTUN, &enable_tcp_timestamps, 0,
1222 		"Enable tcp timestamps (default = 0)");
1223 
1224 static int enable_tcp_sack;
1225 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RWTUN, &enable_tcp_sack, 0,
1226 		"Enable tcp SACK (default = 0)");
1227 
1228 static int enable_tcp_window_scaling = 1;
1229 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RWTUN, &enable_tcp_window_scaling, 0,
1230 		"Enable tcp window scaling (default = 1)");
1231 
1232 int c4iw_debug = 0;
1233 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RWTUN, &c4iw_debug, 0,
1234 		"Enable debug logging (default = 0)");
1235 
1236 static int peer2peer = 1;
1237 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RWTUN, &peer2peer, 0,
1238 		"Support peer2peer ULPs (default = 1)");
1239 
1240 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
1241 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RWTUN, &p2p_type, 0,
1242 		"RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)");
1243 
1244 static int ep_timeout_secs = 60;
1245 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RWTUN, &ep_timeout_secs, 0,
1246 		"CM Endpoint operation timeout in seconds (default = 60)");
1247 
1248 static int mpa_rev = 1;
1249 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0,
1250 		"MPA Revision, 0 supports amso1100, 1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)");
1251 
1252 static int markers_enabled;
1253 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RWTUN, &markers_enabled, 0,
1254 		"Enable MPA MARKERS (default(0) = disabled)");
1255 
1256 static int crc_enabled = 1;
1257 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RWTUN, &crc_enabled, 0,
1258 		"Enable MPA CRC (default(1) = enabled)");
1259 
1260 static int rcv_win = 256 * 1024;
1261 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RWTUN, &rcv_win, 0,
1262 		"TCP receive window in bytes (default = 256KB)");
1263 
1264 static int snd_win = 128 * 1024;
1265 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0,
1266 		"TCP send window in bytes (default = 128KB)");
1267 
1268 static void
1269 start_ep_timer(struct c4iw_ep *ep)
1270 {
1271 
1272 	if (timer_pending(&ep->timer)) {
1273 		CTR2(KTR_IW_CXGBE, "%s: ep %p, already started", __func__, ep);
1274 		printk(KERN_ERR "%s timer already started! ep %p\n", __func__,
1275 		    ep);
1276 		return;
1277 	}
1278 	clear_bit(TIMEOUT, &ep->com.flags);
1279 	c4iw_get_ep(&ep->com);
1280 	ep->timer.expires = jiffies + ep_timeout_secs * HZ;
1281 	ep->timer.data = (unsigned long)ep;
1282 	ep->timer.function = ep_timeout;
1283 	add_timer(&ep->timer);
1284 }
1285 
1286 static int
1287 stop_ep_timer(struct c4iw_ep *ep)
1288 {
1289 
1290 	del_timer_sync(&ep->timer);
1291 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
1292 		c4iw_put_ep(&ep->com);
1293 		return 0;
1294 	}
1295 	return 1;
1296 }
1297 
1298 static void *
1299 alloc_ep(int size, gfp_t gfp)
1300 {
1301 	struct c4iw_ep_common *epc;
1302 
1303 	epc = kzalloc(size, gfp);
1304 	if (epc == NULL)
1305 		return (NULL);
1306 
1307 	kref_init(&epc->kref);
1308 	mutex_init(&epc->mutex);
1309 	c4iw_init_wr_wait(&epc->wr_wait);
1310 
1311 	return (epc);
1312 }
1313 
1314 void _c4iw_free_ep(struct kref *kref)
1315 {
1316 	struct c4iw_ep *ep;
1317 	struct c4iw_ep_common *epc;
1318 
1319 	ep = container_of(kref, struct c4iw_ep, com.kref);
1320 	epc = &ep->com;
1321 	KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list",
1322 	    __func__, epc));
1323 	if (test_bit(QP_REFERENCED, &ep->com.flags))
1324 		deref_qp(ep);
1325 	CTR4(KTR_IW_CXGBE, "%s: ep %p, history 0x%lx, flags 0x%lx",
1326 	    __func__, ep, epc->history, epc->flags);
1327 	kfree(ep);
1328 }
1329 
1330 static void release_ep_resources(struct c4iw_ep *ep)
1331 {
1332 	CTR2(KTR_IW_CXGBE, "%s:rerB %p", __func__, ep);
1333 	set_bit(RELEASE_RESOURCES, &ep->com.flags);
1334 	c4iw_put_ep(&ep->com);
1335 	CTR2(KTR_IW_CXGBE, "%s:rerE %p", __func__, ep);
1336 }
1337 
1338 static int
1339 send_mpa_req(struct c4iw_ep *ep)
1340 {
1341 	int mpalen;
1342 	struct mpa_message *mpa;
1343 	struct mpa_v2_conn_params mpa_v2_params;
1344 	struct mbuf *m;
1345 	char mpa_rev_to_use = mpa_rev;
1346 	int err = 0;
1347 
1348 	if (ep->retry_with_mpa_v1)
1349 		mpa_rev_to_use = 1;
1350 	mpalen = sizeof(*mpa) + ep->plen;
1351 	if (mpa_rev_to_use == 2)
1352 		mpalen += sizeof(struct mpa_v2_conn_params);
1353 
1354 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1355 	if (mpa == NULL) {
1356 		err = -ENOMEM;
1357 		CTR3(KTR_IW_CXGBE, "%s:smr1 ep: %p , error: %d",
1358 				__func__, ep, err);
1359 		goto err;
1360 	}
1361 
1362 	memset(mpa, 0, mpalen);
1363 	memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
1364 	mpa->flags = (crc_enabled ? MPA_CRC : 0) |
1365 		(markers_enabled ? MPA_MARKERS : 0) |
1366 		(mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
1367 	mpa->private_data_size = htons(ep->plen);
1368 	mpa->revision = mpa_rev_to_use;
1369 
1370 	if (mpa_rev_to_use == 1) {
1371 		ep->tried_with_mpa_v1 = 1;
1372 		ep->retry_with_mpa_v1 = 0;
1373 	}
1374 
1375 	if (mpa_rev_to_use == 2) {
1376 		mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1377 					    sizeof(struct mpa_v2_conn_params));
1378 		mpa_v2_params.ird = htons((u16)ep->ird);
1379 		mpa_v2_params.ord = htons((u16)ep->ord);
1380 
1381 		if (peer2peer) {
1382 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1383 
1384 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1385 				mpa_v2_params.ord |=
1386 				    htons(MPA_V2_RDMA_WRITE_RTR);
1387 			} else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1388 				mpa_v2_params.ord |=
1389 					htons(MPA_V2_RDMA_READ_RTR);
1390 			}
1391 		}
1392 		memcpy(mpa->private_data, &mpa_v2_params,
1393 			sizeof(struct mpa_v2_conn_params));
1394 
1395 		if (ep->plen) {
1396 
1397 			memcpy(mpa->private_data +
1398 				sizeof(struct mpa_v2_conn_params),
1399 				ep->mpa_pkt + sizeof(*mpa), ep->plen);
1400 		}
1401 	} else {
1402 
1403 		if (ep->plen)
1404 			memcpy(mpa->private_data,
1405 					ep->mpa_pkt + sizeof(*mpa), ep->plen);
1406 		CTR2(KTR_IW_CXGBE, "%s:smr7 %p", __func__, ep);
1407 	}
1408 
1409 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1410 	if (m == NULL) {
1411 		err = -ENOMEM;
1412 		CTR3(KTR_IW_CXGBE, "%s:smr2 ep: %p , error: %d",
1413 				__func__, ep, err);
1414 		free(mpa, M_CXGBE);
1415 		goto err;
1416 	}
1417 	m_copyback(m, 0, mpalen, (void *)mpa);
1418 	free(mpa, M_CXGBE);
1419 
1420 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1421 			ep->com.thread);
1422 	if (err) {
1423 		CTR3(KTR_IW_CXGBE, "%s:smr3 ep: %p , error: %d",
1424 				__func__, ep, err);
1425 		goto err;
1426 	}
1427 
1428 	START_EP_TIMER(ep);
1429 	ep->com.state = MPA_REQ_SENT;
1430 	ep->mpa_attr.initiator = 1;
1431 	CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
1432 	return 0;
1433 err:
1434 	connect_reply_upcall(ep, err);
1435 	CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
1436 	return err;
1437 }
1438 
1439 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
1440 {
1441 	int mpalen ;
1442 	struct mpa_message *mpa;
1443 	struct mpa_v2_conn_params mpa_v2_params;
1444 	struct mbuf *m;
1445 	int err;
1446 
1447 	CTR4(KTR_IW_CXGBE, "%s:smrejB %p %u %d", __func__, ep, ep->hwtid,
1448 	    ep->plen);
1449 
1450 	mpalen = sizeof(*mpa) + plen;
1451 
1452 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1453 
1454 		mpalen += sizeof(struct mpa_v2_conn_params);
1455 		CTR4(KTR_IW_CXGBE, "%s:smrej1 %p %u %d", __func__, ep,
1456 		    ep->mpa_attr.version, mpalen);
1457 	}
1458 
1459 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1460 	if (mpa == NULL)
1461 		return (-ENOMEM);
1462 
1463 	memset(mpa, 0, mpalen);
1464 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1465 	mpa->flags = MPA_REJECT;
1466 	mpa->revision = mpa_rev;
1467 	mpa->private_data_size = htons(plen);
1468 
1469 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1470 
1471 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1472 		mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1473 					    sizeof(struct mpa_v2_conn_params));
1474 		mpa_v2_params.ird = htons(((u16)ep->ird) |
1475 				(peer2peer ? MPA_V2_PEER2PEER_MODEL :
1476 				 0));
1477 		mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
1478 					(p2p_type ==
1479 					 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
1480 					 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
1481 					 FW_RI_INIT_P2PTYPE_READ_REQ ?
1482 					 MPA_V2_RDMA_READ_RTR : 0) : 0));
1483 		memcpy(mpa->private_data, &mpa_v2_params,
1484 				sizeof(struct mpa_v2_conn_params));
1485 
1486 		if (ep->plen)
1487 			memcpy(mpa->private_data +
1488 				sizeof(struct mpa_v2_conn_params), pdata, plen);
1489 		CTR5(KTR_IW_CXGBE, "%s:smrej3 %p %d %d %d", __func__, ep,
1490 		    mpa_v2_params.ird, mpa_v2_params.ord, ep->plen);
1491 	} else
1492 		if (plen)
1493 			memcpy(mpa->private_data, pdata, plen);
1494 
1495 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1496 	if (m == NULL) {
1497 		free(mpa, M_CXGBE);
1498 		return (-ENOMEM);
1499 	}
1500 	m_copyback(m, 0, mpalen, (void *)mpa);
1501 	free(mpa, M_CXGBE);
1502 
1503 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread);
1504 	if (!err)
1505 		ep->snd_seq += mpalen;
1506 	CTR4(KTR_IW_CXGBE, "%s:smrejE %p %u %d", __func__, ep, ep->hwtid, err);
1507 	return err;
1508 }
1509 
1510 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
1511 {
1512 	int mpalen;
1513 	struct mpa_message *mpa;
1514 	struct mbuf *m;
1515 	struct mpa_v2_conn_params mpa_v2_params;
1516 	int err;
1517 
1518 	CTR2(KTR_IW_CXGBE, "%s:smrepB %p", __func__, ep);
1519 
1520 	mpalen = sizeof(*mpa) + plen;
1521 
1522 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1523 
1524 		CTR3(KTR_IW_CXGBE, "%s:smrep1 %p %d", __func__, ep,
1525 		    ep->mpa_attr.version);
1526 		mpalen += sizeof(struct mpa_v2_conn_params);
1527 	}
1528 
1529 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1530 	if (mpa == NULL)
1531 		return (-ENOMEM);
1532 
1533 	memset(mpa, 0, sizeof(*mpa));
1534 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1535 	mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
1536 		(markers_enabled ? MPA_MARKERS : 0);
1537 	mpa->revision = ep->mpa_attr.version;
1538 	mpa->private_data_size = htons(plen);
1539 
1540 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1541 
1542 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1543 		mpa->private_data_size +=
1544 			htons(sizeof(struct mpa_v2_conn_params));
1545 		mpa_v2_params.ird = htons((u16)ep->ird);
1546 		mpa_v2_params.ord = htons((u16)ep->ord);
1547 		CTR5(KTR_IW_CXGBE, "%s:smrep3 %p %d %d %d", __func__, ep,
1548 		    ep->mpa_attr.version, mpa_v2_params.ird, mpa_v2_params.ord);
1549 
1550 		if (peer2peer && (ep->mpa_attr.p2p_type !=
1551 			FW_RI_INIT_P2PTYPE_DISABLED)) {
1552 
1553 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1554 
1555 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1556 
1557 				mpa_v2_params.ord |=
1558 					htons(MPA_V2_RDMA_WRITE_RTR);
1559 				CTR5(KTR_IW_CXGBE, "%s:smrep4 %p %d %d %d",
1560 				    __func__, ep, p2p_type, mpa_v2_params.ird,
1561 				    mpa_v2_params.ord);
1562 			}
1563 			else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1564 
1565 				mpa_v2_params.ord |=
1566 					htons(MPA_V2_RDMA_READ_RTR);
1567 				CTR5(KTR_IW_CXGBE, "%s:smrep5 %p %d %d %d",
1568 				    __func__, ep, p2p_type, mpa_v2_params.ird,
1569 				    mpa_v2_params.ord);
1570 			}
1571 		}
1572 
1573 		memcpy(mpa->private_data, &mpa_v2_params,
1574 			sizeof(struct mpa_v2_conn_params));
1575 
1576 		if (ep->plen)
1577 			memcpy(mpa->private_data +
1578 				sizeof(struct mpa_v2_conn_params), pdata, plen);
1579 	} else
1580 		if (plen)
1581 			memcpy(mpa->private_data, pdata, plen);
1582 
1583 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1584 	if (m == NULL) {
1585 		free(mpa, M_CXGBE);
1586 		return (-ENOMEM);
1587 	}
1588 	m_copyback(m, 0, mpalen, (void *)mpa);
1589 	free(mpa, M_CXGBE);
1590 
1591 
1592 	ep->com.state = MPA_REP_SENT;
1593 	ep->snd_seq += mpalen;
1594 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1595 			ep->com.thread);
1596 	CTR3(KTR_IW_CXGBE, "%s:smrepE %p %d", __func__, ep, err);
1597 	return err;
1598 }
1599 
1600 
1601 
1602 static void close_complete_upcall(struct c4iw_ep *ep, int status)
1603 {
1604 	struct iw_cm_event event;
1605 
1606 	CTR2(KTR_IW_CXGBE, "%s:ccuB %p", __func__, ep);
1607 	memset(&event, 0, sizeof(event));
1608 	event.event = IW_CM_EVENT_CLOSE;
1609 	event.status = status;
1610 
1611 	if (ep->com.cm_id) {
1612 
1613 		CTR2(KTR_IW_CXGBE, "%s:ccu1 %1", __func__, ep);
1614 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1615 		deref_cm_id(&ep->com);
1616 		set_bit(CLOSE_UPCALL, &ep->com.history);
1617 	}
1618 	CTR2(KTR_IW_CXGBE, "%s:ccuE %p", __func__, ep);
1619 }
1620 
1621 static int
1622 send_abort(struct c4iw_ep *ep)
1623 {
1624 	struct socket *so = ep->com.so;
1625 	struct sockopt sopt;
1626 	int rc;
1627 	struct linger l;
1628 
1629 	CTR5(KTR_IW_CXGBE, "%s ep %p so %p state %s tid %d", __func__, ep, so,
1630 	    states[ep->com.state], ep->hwtid);
1631 
1632 	l.l_onoff = 1;
1633 	l.l_linger = 0;
1634 
1635 	/* linger_time of 0 forces RST to be sent */
1636 	sopt.sopt_dir = SOPT_SET;
1637 	sopt.sopt_level = SOL_SOCKET;
1638 	sopt.sopt_name = SO_LINGER;
1639 	sopt.sopt_val = (caddr_t)&l;
1640 	sopt.sopt_valsize = sizeof l;
1641 	sopt.sopt_td = NULL;
1642 	rc = sosetopt(so, &sopt);
1643 	if (rc != 0) {
1644 		log(LOG_ERR, "%s: sosetopt(%p, linger = 0) failed with %d.\n",
1645 		    __func__, so, rc);
1646 	}
1647 
1648 	uninit_iwarp_socket(so);
1649 	soclose(so);
1650 	set_bit(ABORT_CONN, &ep->com.history);
1651 
1652 	/*
1653 	 * TBD: iw_cxgbe driver should receive ABORT reply for every ABORT
1654 	 * request it has sent. But the current TOE driver is not propagating
1655 	 * this ABORT reply event (via do_abort_rpl) to iw_cxgbe. So as a work-
1656 	 * around de-refererece 'ep' here instead of doing it in abort_rpl()
1657 	 * handler(not yet implemented) of iw_cxgbe driver.
1658 	 */
1659 	release_ep_resources(ep);
1660 	ep->com.state = DEAD;
1661 
1662 	return (0);
1663 }
1664 
1665 static void peer_close_upcall(struct c4iw_ep *ep)
1666 {
1667 	struct iw_cm_event event;
1668 
1669 	CTR2(KTR_IW_CXGBE, "%s:pcuB %p", __func__, ep);
1670 	memset(&event, 0, sizeof(event));
1671 	event.event = IW_CM_EVENT_DISCONNECT;
1672 
1673 	if (ep->com.cm_id) {
1674 
1675 		CTR2(KTR_IW_CXGBE, "%s:pcu1 %p", __func__, ep);
1676 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1677 		set_bit(DISCONN_UPCALL, &ep->com.history);
1678 	}
1679 	CTR2(KTR_IW_CXGBE, "%s:pcuE %p", __func__, ep);
1680 }
1681 
1682 static void peer_abort_upcall(struct c4iw_ep *ep)
1683 {
1684 	struct iw_cm_event event;
1685 
1686 	CTR2(KTR_IW_CXGBE, "%s:pauB %p", __func__, ep);
1687 	memset(&event, 0, sizeof(event));
1688 	event.event = IW_CM_EVENT_CLOSE;
1689 	event.status = -ECONNRESET;
1690 
1691 	if (ep->com.cm_id) {
1692 
1693 		CTR2(KTR_IW_CXGBE, "%s:pau1 %p", __func__, ep);
1694 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1695 		deref_cm_id(&ep->com);
1696 		set_bit(ABORT_UPCALL, &ep->com.history);
1697 	}
1698 	CTR2(KTR_IW_CXGBE, "%s:pauE %p", __func__, ep);
1699 }
1700 
1701 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1702 {
1703 	struct iw_cm_event event;
1704 
1705 	CTR3(KTR_IW_CXGBE, "%s:cruB %p, status: %d", __func__, ep, status);
1706 	memset(&event, 0, sizeof(event));
1707 	event.event = IW_CM_EVENT_CONNECT_REPLY;
1708 	event.status = ((status == -ECONNABORTED) || (status == -EPIPE)) ?
1709 					-ECONNRESET : status;
1710 	event.local_addr = ep->com.local_addr;
1711 	event.remote_addr = ep->com.remote_addr;
1712 
1713 	if ((status == 0) || (status == -ECONNREFUSED)) {
1714 
1715 		if (!ep->tried_with_mpa_v1) {
1716 
1717 			CTR2(KTR_IW_CXGBE, "%s:cru1 %p", __func__, ep);
1718 			/* this means MPA_v2 is used */
1719 			event.ord = ep->ird;
1720 			event.ird = ep->ord;
1721 			event.private_data_len = ep->plen -
1722 				sizeof(struct mpa_v2_conn_params);
1723 			event.private_data = ep->mpa_pkt +
1724 				sizeof(struct mpa_message) +
1725 				sizeof(struct mpa_v2_conn_params);
1726 		} else {
1727 
1728 			CTR2(KTR_IW_CXGBE, "%s:cru2 %p", __func__, ep);
1729 			/* this means MPA_v1 is used */
1730 			event.ord = c4iw_max_read_depth;
1731 			event.ird = c4iw_max_read_depth;
1732 			event.private_data_len = ep->plen;
1733 			event.private_data = ep->mpa_pkt +
1734 				sizeof(struct mpa_message);
1735 		}
1736 	}
1737 
1738 	if (ep->com.cm_id) {
1739 
1740 		CTR2(KTR_IW_CXGBE, "%s:cru3 %p", __func__, ep);
1741 		set_bit(CONN_RPL_UPCALL, &ep->com.history);
1742 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1743 	}
1744 
1745 	if(status == -ECONNABORTED) {
1746 
1747 		CTR3(KTR_IW_CXGBE, "%s:cruE %p %d", __func__, ep, status);
1748 		return;
1749 	}
1750 
1751 	if (status < 0) {
1752 
1753 		CTR3(KTR_IW_CXGBE, "%s:cru4 %p %d", __func__, ep, status);
1754 		deref_cm_id(&ep->com);
1755 	}
1756 
1757 	CTR2(KTR_IW_CXGBE, "%s:cruE %p", __func__, ep);
1758 }
1759 
1760 static int connect_request_upcall(struct c4iw_ep *ep)
1761 {
1762 	struct iw_cm_event event;
1763 	int ret;
1764 
1765 	CTR3(KTR_IW_CXGBE, "%s: ep %p, mpa_v1 %d", __func__, ep,
1766 	    ep->tried_with_mpa_v1);
1767 
1768 	memset(&event, 0, sizeof(event));
1769 	event.event = IW_CM_EVENT_CONNECT_REQUEST;
1770 	event.local_addr = ep->com.local_addr;
1771 	event.remote_addr = ep->com.remote_addr;
1772 	event.provider_data = ep;
1773 
1774 	if (!ep->tried_with_mpa_v1) {
1775 		/* this means MPA_v2 is used */
1776 		event.ord = ep->ord;
1777 		event.ird = ep->ird;
1778 		event.private_data_len = ep->plen -
1779 			sizeof(struct mpa_v2_conn_params);
1780 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1781 			sizeof(struct mpa_v2_conn_params);
1782 	} else {
1783 
1784 		/* this means MPA_v1 is used. Send max supported */
1785 		event.ord = c4iw_max_read_depth;
1786 		event.ird = c4iw_max_read_depth;
1787 		event.private_data_len = ep->plen;
1788 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1789 	}
1790 
1791 	c4iw_get_ep(&ep->com);
1792 	ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1793 	    &event);
1794 	if(ret) {
1795 		CTR3(KTR_IW_CXGBE, "%s: ep %p, Failure while notifying event to"
1796 			" IWCM, err:%d", __func__, ep, ret);
1797 		c4iw_put_ep(&ep->com);
1798 	} else
1799 		/* Dereference parent_ep only in success case.
1800 		 * In case of failure, parent_ep is dereferenced by the caller
1801 		 * of process_mpa_request().
1802 		 */
1803 		c4iw_put_ep(&ep->parent_ep->com);
1804 
1805 	set_bit(CONNREQ_UPCALL, &ep->com.history);
1806 	return ret;
1807 }
1808 
1809 static void established_upcall(struct c4iw_ep *ep)
1810 {
1811 	struct iw_cm_event event;
1812 
1813 	CTR2(KTR_IW_CXGBE, "%s:euB %p", __func__, ep);
1814 	memset(&event, 0, sizeof(event));
1815 	event.event = IW_CM_EVENT_ESTABLISHED;
1816 	event.ird = ep->ord;
1817 	event.ord = ep->ird;
1818 
1819 	if (ep->com.cm_id) {
1820 
1821 		CTR2(KTR_IW_CXGBE, "%s:eu1 %p", __func__, ep);
1822 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1823 		set_bit(ESTAB_UPCALL, &ep->com.history);
1824 	}
1825 	CTR2(KTR_IW_CXGBE, "%s:euE %p", __func__, ep);
1826 }
1827 
1828 
1829 #define RELAXED_IRD_NEGOTIATION 1
1830 
1831 /*
1832  * process_mpa_reply - process streaming mode MPA reply
1833  *
1834  * Returns:
1835  *
1836  * 0 upon success indicating a connect request was delivered to the ULP
1837  * or the mpa request is incomplete but valid so far.
1838  *
1839  * 1 if a failure requires the caller to close the connection.
1840  *
1841  * 2 if a failure requires the caller to abort the connection.
1842  */
1843 static int process_mpa_reply(struct c4iw_ep *ep)
1844 {
1845 	struct mpa_message *mpa;
1846 	struct mpa_v2_conn_params *mpa_v2_params;
1847 	u16 plen;
1848 	u16 resp_ird, resp_ord;
1849 	u8 rtr_mismatch = 0, insuff_ird = 0;
1850 	struct c4iw_qp_attributes attrs = {0};
1851 	enum c4iw_qp_attr_mask mask;
1852 	int err;
1853 	struct mbuf *top, *m;
1854 	int flags = MSG_DONTWAIT;
1855 	struct uio uio;
1856 	int disconnect = 0;
1857 
1858 	CTR2(KTR_IW_CXGBE, "%s:pmrB %p", __func__, ep);
1859 
1860 	/*
1861 	 * Stop mpa timer.  If it expired, then
1862 	 * we ignore the MPA reply.  process_timeout()
1863 	 * will abort the connection.
1864 	 */
1865 	if (STOP_EP_TIMER(ep))
1866 		return 0;
1867 
1868 	uio.uio_resid = 1000000;
1869 	uio.uio_td = ep->com.thread;
1870 	err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags);
1871 
1872 	if (err) {
1873 
1874 		if (err == EWOULDBLOCK) {
1875 
1876 			CTR2(KTR_IW_CXGBE, "%s:pmr1 %p", __func__, ep);
1877 			START_EP_TIMER(ep);
1878 			return 0;
1879 		}
1880 		err = -err;
1881 		CTR2(KTR_IW_CXGBE, "%s:pmr2 %p", __func__, ep);
1882 		goto err;
1883 	}
1884 
1885 	if (ep->com.so->so_rcv.sb_mb) {
1886 
1887 		CTR2(KTR_IW_CXGBE, "%s:pmr3 %p", __func__, ep);
1888 		printf("%s data after soreceive called! so %p sb_mb %p top %p\n",
1889 		       __func__, ep->com.so, ep->com.so->so_rcv.sb_mb, top);
1890 	}
1891 
1892 	m = top;
1893 
1894 	do {
1895 
1896 		CTR2(KTR_IW_CXGBE, "%s:pmr4 %p", __func__, ep);
1897 		/*
1898 		 * If we get more than the supported amount of private data
1899 		 * then we must fail this connection.
1900 		 */
1901 		if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) {
1902 
1903 			CTR3(KTR_IW_CXGBE, "%s:pmr5 %p %d", __func__, ep,
1904 			    ep->mpa_pkt_len + m->m_len);
1905 			err = (-EINVAL);
1906 			goto err_stop_timer;
1907 		}
1908 
1909 		/*
1910 		 * copy the new data into our accumulation buffer.
1911 		 */
1912 		m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len]));
1913 		ep->mpa_pkt_len += m->m_len;
1914 		if (!m->m_next)
1915 			m = m->m_nextpkt;
1916 		else
1917 			m = m->m_next;
1918 	} while (m);
1919 
1920 	m_freem(top);
1921 	/*
1922 	 * if we don't even have the mpa message, then bail.
1923 	 */
1924 	if (ep->mpa_pkt_len < sizeof(*mpa)) {
1925 		return 0;
1926 	}
1927 	mpa = (struct mpa_message *) ep->mpa_pkt;
1928 
1929 	/* Validate MPA header. */
1930 	if (mpa->revision > mpa_rev) {
1931 
1932 		CTR4(KTR_IW_CXGBE, "%s:pmr6 %p %d %d", __func__, ep,
1933 		    mpa->revision, mpa_rev);
1934 		printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d, "
1935 				" Received = %d\n", __func__, mpa_rev, mpa->revision);
1936 		err = -EPROTO;
1937 		goto err_stop_timer;
1938 	}
1939 
1940 	if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1941 
1942 		CTR2(KTR_IW_CXGBE, "%s:pmr7 %p", __func__, ep);
1943 		err = -EPROTO;
1944 		goto err_stop_timer;
1945 	}
1946 
1947 	plen = ntohs(mpa->private_data_size);
1948 
1949 	/*
1950 	 * Fail if there's too much private data.
1951 	 */
1952 	if (plen > MPA_MAX_PRIVATE_DATA) {
1953 
1954 		CTR2(KTR_IW_CXGBE, "%s:pmr8 %p", __func__, ep);
1955 		err = -EPROTO;
1956 		goto err_stop_timer;
1957 	}
1958 
1959 	/*
1960 	 * If plen does not account for pkt size
1961 	 */
1962 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1963 
1964 		CTR2(KTR_IW_CXGBE, "%s:pmr9 %p", __func__, ep);
1965 		STOP_EP_TIMER(ep);
1966 		err = -EPROTO;
1967 		goto err_stop_timer;
1968 	}
1969 
1970 	ep->plen = (u8) plen;
1971 
1972 	/*
1973 	 * If we don't have all the pdata yet, then bail.
1974 	 * We'll continue process when more data arrives.
1975 	 */
1976 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) {
1977 
1978 		CTR2(KTR_IW_CXGBE, "%s:pmra %p", __func__, ep);
1979 		return 0;
1980 	}
1981 
1982 	if (mpa->flags & MPA_REJECT) {
1983 
1984 		CTR2(KTR_IW_CXGBE, "%s:pmrb %p", __func__, ep);
1985 		err = -ECONNREFUSED;
1986 		goto err_stop_timer;
1987 	}
1988 
1989 	/*
1990 	 * If we get here we have accumulated the entire mpa
1991 	 * start reply message including private data. And
1992 	 * the MPA header is valid.
1993 	 */
1994 	ep->com.state = FPDU_MODE;
1995 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1996 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
1997 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1998 	ep->mpa_attr.version = mpa->revision;
1999 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2000 
2001 	if (mpa->revision == 2) {
2002 
2003 		CTR2(KTR_IW_CXGBE, "%s:pmrc %p", __func__, ep);
2004 		ep->mpa_attr.enhanced_rdma_conn =
2005 			mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
2006 
2007 		if (ep->mpa_attr.enhanced_rdma_conn) {
2008 
2009 			CTR2(KTR_IW_CXGBE, "%s:pmrd %p", __func__, ep);
2010 			mpa_v2_params = (struct mpa_v2_conn_params *)
2011 				(ep->mpa_pkt + sizeof(*mpa));
2012 			resp_ird = ntohs(mpa_v2_params->ird) &
2013 				MPA_V2_IRD_ORD_MASK;
2014 			resp_ord = ntohs(mpa_v2_params->ord) &
2015 				MPA_V2_IRD_ORD_MASK;
2016 
2017 			/*
2018 			 * This is a double-check. Ideally, below checks are
2019 			 * not required since ird/ord stuff has been taken
2020 			 * care of in c4iw_accept_cr
2021 			 */
2022 			if (ep->ird < resp_ord) {
2023 				if (RELAXED_IRD_NEGOTIATION && resp_ord <=
2024 				   ep->com.dev->rdev.adap->params.max_ordird_qp)
2025 					ep->ird = resp_ord;
2026 				else
2027 					insuff_ird = 1;
2028 			} else if (ep->ird > resp_ord) {
2029 				ep->ird = resp_ord;
2030 			}
2031 			if (ep->ord > resp_ird) {
2032 				if (RELAXED_IRD_NEGOTIATION)
2033 					ep->ord = resp_ird;
2034 				else
2035 					insuff_ird = 1;
2036 			}
2037 			if (insuff_ird) {
2038 				err = -ENOMEM;
2039 				ep->ird = resp_ord;
2040 				ep->ord = resp_ird;
2041 			}
2042 
2043 			if (ntohs(mpa_v2_params->ird) &
2044 				MPA_V2_PEER2PEER_MODEL) {
2045 
2046 				CTR2(KTR_IW_CXGBE, "%s:pmrf %p", __func__, ep);
2047 				if (ntohs(mpa_v2_params->ord) &
2048 					MPA_V2_RDMA_WRITE_RTR) {
2049 
2050 					CTR2(KTR_IW_CXGBE, "%s:pmrg %p", __func__, ep);
2051 					ep->mpa_attr.p2p_type =
2052 						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
2053 				}
2054 				else if (ntohs(mpa_v2_params->ord) &
2055 					MPA_V2_RDMA_READ_RTR) {
2056 
2057 					CTR2(KTR_IW_CXGBE, "%s:pmrh %p", __func__, ep);
2058 					ep->mpa_attr.p2p_type =
2059 						FW_RI_INIT_P2PTYPE_READ_REQ;
2060 				}
2061 			}
2062 		}
2063 	} else {
2064 
2065 		CTR2(KTR_IW_CXGBE, "%s:pmri %p", __func__, ep);
2066 
2067 		if (mpa->revision == 1) {
2068 
2069 			CTR2(KTR_IW_CXGBE, "%s:pmrj %p", __func__, ep);
2070 
2071 			if (peer2peer) {
2072 
2073 				CTR2(KTR_IW_CXGBE, "%s:pmrk %p", __func__, ep);
2074 				ep->mpa_attr.p2p_type = p2p_type;
2075 			}
2076 		}
2077 	}
2078 
2079 	if (set_tcpinfo(ep)) {
2080 
2081 		CTR2(KTR_IW_CXGBE, "%s:pmrl %p", __func__, ep);
2082 		printf("%s set_tcpinfo error\n", __func__);
2083 		err = -ECONNRESET;
2084 		goto err;
2085 	}
2086 
2087 	CTR6(KTR_IW_CXGBE, "%s - crc_enabled = %d, recv_marker_enabled = %d, "
2088 	    "xmit_marker_enabled = %d, version = %d p2p_type = %d", __func__,
2089 	    ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
2090 	    ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
2091 	    ep->mpa_attr.p2p_type);
2092 
2093 	/*
2094 	 * If responder's RTR does not match with that of initiator, assign
2095 	 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
2096 	 * generated when moving QP to RTS state.
2097 	 * A TERM message will be sent after QP has moved to RTS state
2098 	 */
2099 	if ((ep->mpa_attr.version == 2) && peer2peer &&
2100 		(ep->mpa_attr.p2p_type != p2p_type)) {
2101 
2102 		CTR2(KTR_IW_CXGBE, "%s:pmrm %p", __func__, ep);
2103 		ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2104 		rtr_mismatch = 1;
2105 	}
2106 
2107 
2108 	//ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
2109 	attrs.mpa_attr = ep->mpa_attr;
2110 	attrs.max_ird = ep->ird;
2111 	attrs.max_ord = ep->ord;
2112 	attrs.llp_stream_handle = ep;
2113 	attrs.next_state = C4IW_QP_STATE_RTS;
2114 
2115 	mask = C4IW_QP_ATTR_NEXT_STATE |
2116 		C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
2117 		C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
2118 
2119 	/* bind QP and TID with INIT_WR */
2120 	err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2121 
2122 	if (err) {
2123 
2124 		CTR2(KTR_IW_CXGBE, "%s:pmrn %p", __func__, ep);
2125 		goto err;
2126 	}
2127 
2128 	/*
2129 	 * If responder's RTR requirement did not match with what initiator
2130 	 * supports, generate TERM message
2131 	 */
2132 	if (rtr_mismatch) {
2133 
2134 		CTR2(KTR_IW_CXGBE, "%s:pmro %p", __func__, ep);
2135 		printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
2136 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
2137 		attrs.ecode = MPA_NOMATCH_RTR;
2138 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
2139 		attrs.send_term = 1;
2140 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2141 			C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2142 		err = -ENOMEM;
2143 		disconnect = 1;
2144 		goto out;
2145 	}
2146 
2147 	/*
2148 	 * Generate TERM if initiator IRD is not sufficient for responder
2149 	 * provided ORD. Currently, we do the same behaviour even when
2150 	 * responder provided IRD is also not sufficient as regards to
2151 	 * initiator ORD.
2152 	 */
2153 	if (insuff_ird) {
2154 
2155 		CTR2(KTR_IW_CXGBE, "%s:pmrp %p", __func__, ep);
2156 		printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
2157 				__func__);
2158 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
2159 		attrs.ecode = MPA_INSUFF_IRD;
2160 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
2161 		attrs.send_term = 1;
2162 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2163 			C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2164 		err = -ENOMEM;
2165 		disconnect = 1;
2166 		goto out;
2167 	}
2168 	goto out;
2169 err_stop_timer:
2170 	STOP_EP_TIMER(ep);
2171 err:
2172 	disconnect = 2;
2173 out:
2174 	connect_reply_upcall(ep, err);
2175 	CTR2(KTR_IW_CXGBE, "%s:pmrE %p", __func__, ep);
2176 	return disconnect;
2177 }
2178 
2179 /*
2180  * process_mpa_request - process streaming mode MPA request
2181  *
2182  * Returns:
2183  *
2184  * 0 upon success indicating a connect request was delivered to the ULP
2185  * or the mpa request is incomplete but valid so far.
2186  *
2187  * 1 if a failure requires the caller to close the connection.
2188  *
2189  * 2 if a failure requires the caller to abort the connection.
2190  */
2191 static int
2192 process_mpa_request(struct c4iw_ep *ep)
2193 {
2194 	struct mpa_message *mpa;
2195 	struct mpa_v2_conn_params *mpa_v2_params;
2196 	u16 plen;
2197 	int flags = MSG_DONTWAIT;
2198 	int rc;
2199 	struct iovec iov;
2200 	struct uio uio;
2201 	enum c4iw_ep_state state = ep->com.state;
2202 
2203 	CTR3(KTR_IW_CXGBE, "%s: ep %p, state %s", __func__, ep, states[state]);
2204 
2205 	if (state != MPA_REQ_WAIT)
2206 		return 0;
2207 
2208 	iov.iov_base = &ep->mpa_pkt[ep->mpa_pkt_len];
2209 	iov.iov_len = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
2210 	uio.uio_iov = &iov;
2211 	uio.uio_iovcnt = 1;
2212 	uio.uio_offset = 0;
2213 	uio.uio_resid = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
2214 	uio.uio_segflg = UIO_SYSSPACE;
2215 	uio.uio_rw = UIO_READ;
2216 	uio.uio_td = NULL; /* uio.uio_td = ep->com.thread; */
2217 
2218 	rc = soreceive(ep->com.so, NULL, &uio, NULL, NULL, &flags);
2219 	if (rc == EAGAIN)
2220 		return 0;
2221 	else if (rc)
2222 		goto err_stop_timer;
2223 
2224 	KASSERT(uio.uio_offset > 0, ("%s: sorecieve on so %p read no data",
2225 	    __func__, ep->com.so));
2226 	ep->mpa_pkt_len += uio.uio_offset;
2227 
2228 	/*
2229 	 * If we get more than the supported amount of private data then we must
2230 	 * fail this connection.  XXX: check so_rcv->sb_cc, or peek with another
2231 	 * soreceive, or increase the size of mpa_pkt by 1 and abort if the last
2232 	 * byte is filled by the soreceive above.
2233 	 */
2234 
2235 	/* Don't even have the MPA message.  Wait for more data to arrive. */
2236 	if (ep->mpa_pkt_len < sizeof(*mpa))
2237 		return 0;
2238 	mpa = (struct mpa_message *) ep->mpa_pkt;
2239 
2240 	/*
2241 	 * Validate MPA Header.
2242 	 */
2243 	if (mpa->revision > mpa_rev) {
2244 		log(LOG_ERR, "%s: MPA version mismatch. Local = %d,"
2245 		    " Received = %d\n", __func__, mpa_rev, mpa->revision);
2246 		goto err_stop_timer;
2247 	}
2248 
2249 	if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
2250 		goto err_stop_timer;
2251 
2252 	/*
2253 	 * Fail if there's too much private data.
2254 	 */
2255 	plen = ntohs(mpa->private_data_size);
2256 	if (plen > MPA_MAX_PRIVATE_DATA)
2257 		goto err_stop_timer;
2258 
2259 	/*
2260 	 * If plen does not account for pkt size
2261 	 */
2262 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
2263 		goto err_stop_timer;
2264 
2265 	ep->plen = (u8) plen;
2266 
2267 	/*
2268 	 * If we don't have all the pdata yet, then bail.
2269 	 */
2270 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
2271 		return 0;
2272 
2273 	/*
2274 	 * If we get here we have accumulated the entire mpa
2275 	 * start reply message including private data.
2276 	 */
2277 	ep->mpa_attr.initiator = 0;
2278 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
2279 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
2280 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
2281 	ep->mpa_attr.version = mpa->revision;
2282 	if (mpa->revision == 1)
2283 		ep->tried_with_mpa_v1 = 1;
2284 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2285 
2286 	if (mpa->revision == 2) {
2287 		ep->mpa_attr.enhanced_rdma_conn =
2288 		    mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
2289 		if (ep->mpa_attr.enhanced_rdma_conn) {
2290 			mpa_v2_params = (struct mpa_v2_conn_params *)
2291 				(ep->mpa_pkt + sizeof(*mpa));
2292 			ep->ird = ntohs(mpa_v2_params->ird) &
2293 				MPA_V2_IRD_ORD_MASK;
2294 			ep->ird = min_t(u32, ep->ird,
2295 					cur_max_read_depth(ep->com.dev));
2296 			ep->ord = ntohs(mpa_v2_params->ord) &
2297 				MPA_V2_IRD_ORD_MASK;
2298 			ep->ord = min_t(u32, ep->ord,
2299 					cur_max_read_depth(ep->com.dev));
2300 			CTR3(KTR_IW_CXGBE, "%s initiator ird %u ord %u\n",
2301 				 __func__, ep->ird, ep->ord);
2302 			if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
2303 				if (peer2peer) {
2304 					if (ntohs(mpa_v2_params->ord) &
2305 							MPA_V2_RDMA_WRITE_RTR)
2306 						ep->mpa_attr.p2p_type =
2307 						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
2308 					else if (ntohs(mpa_v2_params->ord) &
2309 							MPA_V2_RDMA_READ_RTR)
2310 						ep->mpa_attr.p2p_type =
2311 						FW_RI_INIT_P2PTYPE_READ_REQ;
2312 				}
2313 		}
2314 	} else if (mpa->revision == 1 && peer2peer)
2315 		ep->mpa_attr.p2p_type = p2p_type;
2316 
2317 	if (set_tcpinfo(ep))
2318 		goto err_stop_timer;
2319 
2320 	CTR5(KTR_IW_CXGBE, "%s: crc_enabled = %d, recv_marker_enabled = %d, "
2321 	    "xmit_marker_enabled = %d, version = %d", __func__,
2322 	    ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
2323 	    ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
2324 
2325 	ep->com.state = MPA_REQ_RCVD;
2326 	STOP_EP_TIMER(ep);
2327 
2328 	/* drive upcall */
2329 	if (ep->parent_ep->com.state != DEAD)
2330 		if (connect_request_upcall(ep))
2331 			goto err_out;
2332 	return 0;
2333 
2334 err_stop_timer:
2335 	STOP_EP_TIMER(ep);
2336 err_out:
2337 	return 2;
2338 }
2339 
2340 /*
2341  * Upcall from the adapter indicating data has been transmitted.
2342  * For us its just the single MPA request or reply.  We can now free
2343  * the skb holding the mpa message.
2344  */
2345 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2346 {
2347 	int err;
2348 	struct c4iw_ep *ep = to_ep(cm_id);
2349 	int abort = 0;
2350 
2351 	mutex_lock(&ep->com.mutex);
2352 	CTR2(KTR_IW_CXGBE, "%s:crcB %p", __func__, ep);
2353 
2354 	if ((ep->com.state == DEAD) ||
2355 			(ep->com.state != MPA_REQ_RCVD)) {
2356 
2357 		CTR2(KTR_IW_CXGBE, "%s:crc1 %p", __func__, ep);
2358 		mutex_unlock(&ep->com.mutex);
2359 		c4iw_put_ep(&ep->com);
2360 		return -ECONNRESET;
2361 	}
2362 	set_bit(ULP_REJECT, &ep->com.history);
2363 
2364 	if (mpa_rev == 0) {
2365 
2366 		CTR2(KTR_IW_CXGBE, "%s:crc2 %p", __func__, ep);
2367 		abort = 1;
2368 	}
2369 	else {
2370 
2371 		CTR2(KTR_IW_CXGBE, "%s:crc3 %p", __func__, ep);
2372 		abort = send_mpa_reject(ep, pdata, pdata_len);
2373 	}
2374 	STOP_EP_TIMER(ep);
2375 	err = c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
2376 	mutex_unlock(&ep->com.mutex);
2377 	c4iw_put_ep(&ep->com);
2378 	CTR3(KTR_IW_CXGBE, "%s:crc4 %p, err: %d", __func__, ep, err);
2379 	return 0;
2380 }
2381 
2382 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2383 {
2384 	int err;
2385 	struct c4iw_qp_attributes attrs = {0};
2386 	enum c4iw_qp_attr_mask mask;
2387 	struct c4iw_ep *ep = to_ep(cm_id);
2388 	struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
2389 	struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
2390 	int abort = 0;
2391 
2392 	mutex_lock(&ep->com.mutex);
2393 	CTR2(KTR_IW_CXGBE, "%s:cacB %p", __func__, ep);
2394 
2395 	if ((ep->com.state == DEAD) ||
2396 			(ep->com.state != MPA_REQ_RCVD)) {
2397 
2398 		CTR2(KTR_IW_CXGBE, "%s:cac1 %p", __func__, ep);
2399 		err = -ECONNRESET;
2400 		goto err_out;
2401 	}
2402 
2403 	BUG_ON(!qp);
2404 
2405 	set_bit(ULP_ACCEPT, &ep->com.history);
2406 
2407 	if ((conn_param->ord > c4iw_max_read_depth) ||
2408 		(conn_param->ird > c4iw_max_read_depth)) {
2409 
2410 		CTR2(KTR_IW_CXGBE, "%s:cac2 %p", __func__, ep);
2411 		err = -EINVAL;
2412 		goto err_abort;
2413 	}
2414 
2415 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
2416 
2417 		CTR2(KTR_IW_CXGBE, "%s:cac3 %p", __func__, ep);
2418 
2419 		if (conn_param->ord > ep->ird) {
2420 			if (RELAXED_IRD_NEGOTIATION) {
2421 				conn_param->ord = ep->ird;
2422 			} else {
2423 				ep->ird = conn_param->ird;
2424 				ep->ord = conn_param->ord;
2425 				send_mpa_reject(ep, conn_param->private_data,
2426 						conn_param->private_data_len);
2427 				err = -ENOMEM;
2428 				goto err_abort;
2429 			}
2430 		}
2431 		if (conn_param->ird < ep->ord) {
2432 			if (RELAXED_IRD_NEGOTIATION &&
2433 			    ep->ord <= h->rdev.adap->params.max_ordird_qp) {
2434 				conn_param->ird = ep->ord;
2435 			} else {
2436 				err = -ENOMEM;
2437 				goto err_abort;
2438 			}
2439 		}
2440 	}
2441 	ep->ird = conn_param->ird;
2442 	ep->ord = conn_param->ord;
2443 
2444 	if (ep->mpa_attr.version == 1) {
2445 		if (peer2peer && ep->ird == 0)
2446 			ep->ird = 1;
2447 	} else {
2448 		if (peer2peer &&
2449 		    (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
2450 		    (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0)
2451 			ep->ird = 1;
2452 	}
2453 
2454 	CTR4(KTR_IW_CXGBE, "%s %d ird %d ord %d\n", __func__, __LINE__,
2455 			ep->ird, ep->ord);
2456 
2457 	ep->com.cm_id = cm_id;
2458 	ref_cm_id(&ep->com);
2459 	ep->com.qp = qp;
2460 	ref_qp(ep);
2461 	//ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
2462 
2463 	/* bind QP to EP and move to RTS */
2464 	attrs.mpa_attr = ep->mpa_attr;
2465 	attrs.max_ird = ep->ird;
2466 	attrs.max_ord = ep->ord;
2467 	attrs.llp_stream_handle = ep;
2468 	attrs.next_state = C4IW_QP_STATE_RTS;
2469 
2470 	/* bind QP and TID with INIT_WR */
2471 	mask = C4IW_QP_ATTR_NEXT_STATE |
2472 		C4IW_QP_ATTR_LLP_STREAM_HANDLE |
2473 		C4IW_QP_ATTR_MPA_ATTR |
2474 		C4IW_QP_ATTR_MAX_IRD |
2475 		C4IW_QP_ATTR_MAX_ORD;
2476 
2477 	err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2478 	if (err) {
2479 		CTR3(KTR_IW_CXGBE, "%s:caca %p, err: %d", __func__, ep, err);
2480 		goto err_defef_cm_id;
2481 	}
2482 
2483 	err = send_mpa_reply(ep, conn_param->private_data,
2484 			conn_param->private_data_len);
2485 	if (err) {
2486 		CTR3(KTR_IW_CXGBE, "%s:cacb %p, err: %d", __func__, ep, err);
2487 		goto err_defef_cm_id;
2488 	}
2489 
2490 	ep->com.state = FPDU_MODE;
2491 	established_upcall(ep);
2492 	mutex_unlock(&ep->com.mutex);
2493 	c4iw_put_ep(&ep->com);
2494 	CTR2(KTR_IW_CXGBE, "%s:cacE %p", __func__, ep);
2495 	return 0;
2496 err_defef_cm_id:
2497 	deref_cm_id(&ep->com);
2498 err_abort:
2499 	abort = 1;
2500 err_out:
2501 	if (abort)
2502 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
2503 	mutex_unlock(&ep->com.mutex);
2504 	c4iw_put_ep(&ep->com);
2505 	CTR2(KTR_IW_CXGBE, "%s:cacE err %p", __func__, ep);
2506 	return err;
2507 }
2508 
2509 static int
2510 c4iw_sock_create(struct sockaddr_storage *laddr, struct socket **so)
2511 {
2512 	int ret;
2513 	int size;
2514 	struct socket *sock = NULL;
2515 
2516 	ret = sock_create_kern(laddr->ss_family,
2517 			SOCK_STREAM, IPPROTO_TCP, &sock);
2518 	if (ret) {
2519 		CTR2(KTR_IW_CXGBE, "%s:Failed to create TCP socket. err %d",
2520 				__func__, ret);
2521 		return ret;
2522 	}
2523 
2524 	ret = sobind(sock, (struct sockaddr *)laddr, curthread);
2525 	if (ret) {
2526 		CTR2(KTR_IW_CXGBE, "%s:Failed to bind socket. err %p",
2527 				__func__, ret);
2528 		sock_release(sock);
2529 		return ret;
2530 	}
2531 
2532 	size = laddr->ss_family == AF_INET6 ?
2533 		sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in);
2534 	ret = sock_getname(sock, (struct sockaddr *)laddr, &size, 0);
2535 	if (ret) {
2536 		CTR2(KTR_IW_CXGBE, "%s:sock_getname failed. err %p",
2537 				__func__, ret);
2538 		sock_release(sock);
2539 		return ret;
2540 	}
2541 
2542 	*so = sock;
2543 	return 0;
2544 }
2545 
2546 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2547 {
2548 	int err = 0;
2549 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2550 	struct c4iw_ep *ep = NULL;
2551 	struct ifnet    *nh_ifp;        /* Logical egress interface */
2552 #ifdef VIMAGE
2553 	struct rdma_cm_id *rdma_id = (struct rdma_cm_id*)cm_id->context;
2554 	struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
2555 #endif
2556 
2557 	CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id);
2558 
2559 
2560 	if ((conn_param->ord > c4iw_max_read_depth) ||
2561 		(conn_param->ird > c4iw_max_read_depth)) {
2562 
2563 		CTR2(KTR_IW_CXGBE, "%s:cc1 %p", __func__, cm_id);
2564 		err = -EINVAL;
2565 		goto out;
2566 	}
2567 	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2568 	cm_id->provider_data = ep;
2569 
2570 	init_timer(&ep->timer);
2571 	ep->plen = conn_param->private_data_len;
2572 
2573 	if (ep->plen) {
2574 
2575 		CTR2(KTR_IW_CXGBE, "%s:cc3 %p", __func__, ep);
2576 		memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
2577 				conn_param->private_data, ep->plen);
2578 	}
2579 	ep->ird = conn_param->ird;
2580 	ep->ord = conn_param->ord;
2581 
2582 	if (peer2peer && ep->ord == 0) {
2583 
2584 		CTR2(KTR_IW_CXGBE, "%s:cc4 %p", __func__, ep);
2585 		ep->ord = 1;
2586 	}
2587 
2588 	ep->com.dev = dev;
2589 	ep->com.cm_id = cm_id;
2590 	ref_cm_id(&ep->com);
2591 	ep->com.qp = get_qhp(dev, conn_param->qpn);
2592 
2593 	if (!ep->com.qp) {
2594 
2595 		CTR2(KTR_IW_CXGBE, "%s:cc5 %p", __func__, ep);
2596 		err = -EINVAL;
2597 		goto fail;
2598 	}
2599 	ref_qp(ep);
2600 	ep->com.thread = curthread;
2601 
2602 	CURVNET_SET(vnet);
2603 	err = get_ifnet_from_raddr(&cm_id->remote_addr, &nh_ifp);
2604 	CURVNET_RESTORE();
2605 
2606 	if (err) {
2607 
2608 		CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep);
2609 		printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
2610 		err = EHOSTUNREACH;
2611 		return err;
2612 	}
2613 
2614 	if (!(nh_ifp->if_capenable & IFCAP_TOE) ||
2615 	    TOEDEV(nh_ifp) == NULL) {
2616 		err = -ENOPROTOOPT;
2617 		goto fail;
2618 	}
2619 	ep->com.state = CONNECTING;
2620 	ep->tos = 0;
2621 	ep->com.local_addr = cm_id->local_addr;
2622 	ep->com.remote_addr = cm_id->remote_addr;
2623 
2624 	err = c4iw_sock_create(&cm_id->local_addr, &ep->com.so);
2625 	if (err)
2626 		goto fail;
2627 
2628 	setiwsockopt(ep->com.so);
2629 	init_iwarp_socket(ep->com.so, &ep->com);
2630 	err = -soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr,
2631 		ep->com.thread);
2632 	if (err)
2633 		goto fail_free_so;
2634 	CTR2(KTR_IW_CXGBE, "%s:ccE, ep %p", __func__, ep);
2635 	return 0;
2636 
2637 fail_free_so:
2638 	uninit_iwarp_socket(ep->com.so);
2639 	ep->com.state = DEAD;
2640 	sock_release(ep->com.so);
2641 fail:
2642 	deref_cm_id(&ep->com);
2643 	c4iw_put_ep(&ep->com);
2644 	ep = NULL;
2645 out:
2646 	CTR2(KTR_IW_CXGBE, "%s:ccE Error %d", __func__, err);
2647 	return err;
2648 }
2649 
2650 /*
2651  * iwcm->create_listen.  Returns -errno on failure.
2652  */
2653 int
2654 c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2655 {
2656 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2657 	struct c4iw_listen_ep *lep = NULL;
2658 	struct listen_port_info *port_info = NULL;
2659 	int rc = 0;
2660 
2661 	CTR3(KTR_IW_CXGBE, "%s: cm_id %p, backlog %s", __func__, cm_id,
2662 			backlog);
2663 	lep = alloc_ep(sizeof(*lep), GFP_KERNEL);
2664 	lep->com.cm_id = cm_id;
2665 	ref_cm_id(&lep->com);
2666 	lep->com.dev = dev;
2667 	lep->backlog = backlog;
2668 	lep->com.local_addr = cm_id->local_addr;
2669 	lep->com.thread = curthread;
2670 	cm_id->provider_data = lep;
2671 	lep->com.state = LISTEN;
2672 
2673 	/* In case of INDADDR_ANY, ibcore creates cmid for each device and
2674 	 * invokes iw_cxgbe listener callbacks assuming that iw_cxgbe creates
2675 	 * HW listeners for each device seperately. But toecore expects single
2676 	 * solisten() call with INADDR_ANY address to create HW listeners on
2677 	 * all devices for a given port number. So iw_cxgbe driver calls
2678 	 * solisten() only once for INADDR_ANY(usually done at first time
2679 	 * listener callback from ibcore). And all the subsequent INADDR_ANY
2680 	 * listener callbacks from ibcore(for the same port address) do not
2681 	 * invoke solisten() as first listener callback has already created
2682 	 * listeners for all other devices(via solisten).
2683 	 */
2684 	if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr)) {
2685 		port_info = add_ep_to_listenlist(lep);
2686 		/* skip solisten() if refcnt > 1, as the listeners were
2687 		 * alredy created by 'Master lep'
2688 		 */
2689 		if (port_info->refcnt > 1) {
2690 			/* As there will be only one listener socket for a TCP
2691 			 * port, copy Master lep's socket pointer to other lep's
2692 			 * that are belonging to same TCP port.
2693 			 */
2694 			struct c4iw_listen_ep *head_lep =
2695 					container_of(port_info->lep_list.next,
2696 					struct c4iw_listen_ep, listen_ep_list);
2697 			lep->com.so =  head_lep->com.so;
2698 			goto out;
2699 		}
2700 	}
2701 	rc = c4iw_sock_create(&cm_id->local_addr, &lep->com.so);
2702 	if (rc) {
2703 		CTR2(KTR_IW_CXGBE, "%s:Failed to create socket. err %d",
2704 				__func__, rc);
2705 		goto fail;
2706 	}
2707 
2708 	rc = solisten(lep->com.so, backlog, curthread);
2709 	if (rc) {
2710 		CTR3(KTR_IW_CXGBE, "%s:Failed to listen on sock:%p. err %d",
2711 				__func__, lep->com.so, rc);
2712 		goto fail_free_so;
2713 	}
2714 	init_iwarp_socket(lep->com.so, &lep->com);
2715 out:
2716 	return 0;
2717 
2718 fail_free_so:
2719 	sock_release(lep->com.so);
2720 fail:
2721 	if (port_info)
2722 		rem_ep_from_listenlist(lep);
2723 	deref_cm_id(&lep->com);
2724 	c4iw_put_ep(&lep->com);
2725 	return rc;
2726 }
2727 
2728 int
2729 c4iw_destroy_listen(struct iw_cm_id *cm_id)
2730 {
2731 	struct c4iw_listen_ep *lep = to_listen_ep(cm_id);
2732 
2733 	mutex_lock(&lep->com.mutex);
2734 	CTR3(KTR_IW_CXGBE, "%s: cm_id %p, state %s", __func__, cm_id,
2735 	    states[lep->com.state]);
2736 
2737 	lep->com.state = DEAD;
2738 	if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr)) {
2739 		/* if no refcount then close listen socket */
2740 		if (!rem_ep_from_listenlist(lep))
2741 			close_socket(lep->com.so);
2742 	} else
2743 		close_socket(lep->com.so);
2744 	deref_cm_id(&lep->com);
2745 	mutex_unlock(&lep->com.mutex);
2746 	c4iw_put_ep(&lep->com);
2747 	return 0;
2748 }
2749 
2750 int __c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2751 {
2752 	int ret;
2753 	mutex_lock(&ep->com.mutex);
2754 	ret = c4iw_ep_disconnect(ep, abrupt, gfp);
2755 	mutex_unlock(&ep->com.mutex);
2756 	return ret;
2757 }
2758 
2759 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2760 {
2761 	int ret = 0;
2762 	int close = 0;
2763 	int fatal = 0;
2764 	struct c4iw_rdev *rdev;
2765 
2766 
2767 	CTR2(KTR_IW_CXGBE, "%s:cedB %p", __func__, ep);
2768 
2769 	rdev = &ep->com.dev->rdev;
2770 
2771 	if (c4iw_fatal_error(rdev)) {
2772 
2773 		CTR2(KTR_IW_CXGBE, "%s:ced1 %p", __func__, ep);
2774 		fatal = 1;
2775 		close_complete_upcall(ep, -ECONNRESET);
2776 		send_abort(ep);
2777 		ep->com.state = DEAD;
2778 	}
2779 	CTR3(KTR_IW_CXGBE, "%s:ced2 %p %s", __func__, ep,
2780 	    states[ep->com.state]);
2781 
2782 	/*
2783 	 * Ref the ep here in case we have fatal errors causing the
2784 	 * ep to be released and freed.
2785 	 */
2786 	c4iw_get_ep(&ep->com);
2787 	switch (ep->com.state) {
2788 
2789 		case MPA_REQ_WAIT:
2790 		case MPA_REQ_SENT:
2791 		case MPA_REQ_RCVD:
2792 		case MPA_REP_SENT:
2793 		case FPDU_MODE:
2794 			close = 1;
2795 			if (abrupt)
2796 				ep->com.state = ABORTING;
2797 			else {
2798 				ep->com.state = CLOSING;
2799 				START_EP_TIMER(ep);
2800 			}
2801 			set_bit(CLOSE_SENT, &ep->com.flags);
2802 			break;
2803 
2804 		case CLOSING:
2805 
2806 			if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2807 
2808 				close = 1;
2809 				if (abrupt) {
2810 					STOP_EP_TIMER(ep);
2811 					ep->com.state = ABORTING;
2812 				} else
2813 					ep->com.state = MORIBUND;
2814 			}
2815 			break;
2816 
2817 		case MORIBUND:
2818 		case ABORTING:
2819 		case DEAD:
2820 			CTR3(KTR_IW_CXGBE,
2821 			    "%s ignoring disconnect ep %p state %u", __func__,
2822 			    ep, ep->com.state);
2823 			break;
2824 
2825 		default:
2826 			BUG();
2827 			break;
2828 	}
2829 
2830 
2831 	if (close) {
2832 
2833 		CTR2(KTR_IW_CXGBE, "%s:ced3 %p", __func__, ep);
2834 
2835 		if (abrupt) {
2836 
2837 			CTR2(KTR_IW_CXGBE, "%s:ced4 %p", __func__, ep);
2838 			set_bit(EP_DISC_ABORT, &ep->com.history);
2839 			close_complete_upcall(ep, -ECONNRESET);
2840 			ret = send_abort(ep);
2841 			if (ret)
2842 				fatal = 1;
2843 		} else {
2844 
2845 			CTR2(KTR_IW_CXGBE, "%s:ced5 %p", __func__, ep);
2846 			set_bit(EP_DISC_CLOSE, &ep->com.history);
2847 
2848 			if (!ep->parent_ep)
2849 				ep->com.state = MORIBUND;
2850 
2851 			CURVNET_SET(ep->com.so->so_vnet);
2852 			sodisconnect(ep->com.so);
2853 			CURVNET_RESTORE();
2854 		}
2855 
2856 	}
2857 
2858 	if (fatal) {
2859 		set_bit(EP_DISC_FAIL, &ep->com.history);
2860 		if (!abrupt) {
2861 			STOP_EP_TIMER(ep);
2862 			close_complete_upcall(ep, -EIO);
2863 		}
2864 		if (ep->com.qp) {
2865 			struct c4iw_qp_attributes attrs = {0};
2866 
2867 			attrs.next_state = C4IW_QP_STATE_ERROR;
2868 			ret = c4iw_modify_qp(ep->com.dev, ep->com.qp,
2869 						C4IW_QP_ATTR_NEXT_STATE,
2870 						&attrs, 1);
2871 			if (ret) {
2872 				CTR2(KTR_IW_CXGBE, "%s:ced7 %p", __func__, ep);
2873 				printf("%s - qp <- error failed!\n", __func__);
2874 			}
2875 		}
2876 		release_ep_resources(ep);
2877 		ep->com.state = DEAD;
2878 		CTR2(KTR_IW_CXGBE, "%s:ced6 %p", __func__, ep);
2879 	}
2880 	c4iw_put_ep(&ep->com);
2881 	CTR2(KTR_IW_CXGBE, "%s:cedE %p", __func__, ep);
2882 	return ret;
2883 }
2884 
2885 #ifdef C4IW_EP_REDIRECT
2886 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2887 		struct l2t_entry *l2t)
2888 {
2889 	struct c4iw_ep *ep = ctx;
2890 
2891 	if (ep->dst != old)
2892 		return 0;
2893 
2894 	PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2895 			l2t);
2896 	dst_hold(new);
2897 	cxgb4_l2t_release(ep->l2t);
2898 	ep->l2t = l2t;
2899 	dst_release(old);
2900 	ep->dst = new;
2901 	return 1;
2902 }
2903 #endif
2904 
2905 
2906 
2907 static void ep_timeout(unsigned long arg)
2908 {
2909 	struct c4iw_ep *ep = (struct c4iw_ep *)arg;
2910 
2911 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
2912 
2913 		/*
2914 		 * Only insert if it is not already on the list.
2915 		 */
2916 		if (!(ep->com.ep_events & C4IW_EVENT_TIMEOUT)) {
2917 			CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep);
2918 			add_ep_to_req_list(ep, C4IW_EVENT_TIMEOUT);
2919 		}
2920 	}
2921 }
2922 
2923 static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl)
2924 {
2925 	uint64_t val = be64toh(*rpl);
2926 	int ret;
2927 	struct c4iw_wr_wait *wr_waitp;
2928 
2929 	ret = (int)((val >> 8) & 0xff);
2930 	wr_waitp = (struct c4iw_wr_wait *)rpl[1];
2931 	CTR3(KTR_IW_CXGBE, "%s wr_waitp %p ret %u", __func__, wr_waitp, ret);
2932 	if (wr_waitp)
2933 		c4iw_wake_up(wr_waitp, ret ? -ret : 0);
2934 
2935 	return (0);
2936 }
2937 
2938 static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl)
2939 {
2940 	struct cqe_list_entry *cle;
2941 	unsigned long flag;
2942 
2943 	cle = malloc(sizeof(*cle), M_CXGBE, M_NOWAIT);
2944 	cle->rhp = sc->iwarp_softc;
2945 	cle->err_cqe = *(const struct t4_cqe *)(&rpl[0]);
2946 
2947 	spin_lock_irqsave(&err_cqe_lock, flag);
2948 	list_add_tail(&cle->entry, &err_cqe_list);
2949 	queue_work(c4iw_taskq, &c4iw_task);
2950 	spin_unlock_irqrestore(&err_cqe_lock, flag);
2951 
2952 	return (0);
2953 }
2954 
2955 static int
2956 process_terminate(struct c4iw_ep *ep)
2957 {
2958 	struct c4iw_qp_attributes attrs = {0};
2959 
2960 	CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep);
2961 
2962 	if (ep && ep->com.qp) {
2963 
2964 		printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n",
2965 				ep->hwtid, ep->com.qp->wq.sq.qid);
2966 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
2967 		c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs,
2968 				1);
2969 	} else
2970 		printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n",
2971 								ep->hwtid);
2972 	CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep);
2973 
2974 	return 0;
2975 }
2976 
2977 int __init c4iw_cm_init(void)
2978 {
2979 
2980 	t4_register_cpl_handler(CPL_RDMA_TERMINATE, terminate);
2981 	t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, fw6_wr_rpl);
2982 	t4_register_fw_msg_handler(FW6_TYPE_CQE, fw6_cqe_handler);
2983 	t4_register_an_handler(c4iw_ev_handler);
2984 
2985 	TAILQ_INIT(&req_list);
2986 	spin_lock_init(&req_lock);
2987 	INIT_LIST_HEAD(&err_cqe_list);
2988 	spin_lock_init(&err_cqe_lock);
2989 
2990 	INIT_WORK(&c4iw_task, process_req);
2991 
2992 	c4iw_taskq = create_singlethread_workqueue("iw_cxgbe");
2993 	if (!c4iw_taskq)
2994 		return -ENOMEM;
2995 
2996 	return 0;
2997 }
2998 
2999 void __exit c4iw_cm_term(void)
3000 {
3001 	WARN_ON(!TAILQ_EMPTY(&req_list));
3002 	WARN_ON(!list_empty(&err_cqe_list));
3003 	flush_workqueue(c4iw_taskq);
3004 	destroy_workqueue(c4iw_taskq);
3005 
3006 	t4_register_cpl_handler(CPL_RDMA_TERMINATE, NULL);
3007 	t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, NULL);
3008 	t4_register_fw_msg_handler(FW6_TYPE_CQE, NULL);
3009 	t4_register_an_handler(NULL);
3010 }
3011 #endif
3012