xref: /freebsd/sys/dev/cxgbe/iw_cxgbe/cm.c (revision e17f5b1d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *	  copyright notice, this list of conditions and the following
18  *	  disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *	  copyright notice, this list of conditions and the following
22  *	  disclaimer in the documentation and/or other materials
23  *	  provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_inet.h"
38 
39 #ifdef TCP_OFFLOAD
40 #include <sys/types.h>
41 #include <sys/malloc.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/sockio.h>
45 #include <sys/taskqueue.h>
46 #include <netinet/in.h>
47 #include <net/route.h>
48 #include <net/route/nhop.h>
49 
50 #include <netinet/in_systm.h>
51 #include <netinet/in_pcb.h>
52 #include <netinet6/in6_pcb.h>
53 #include <netinet/ip.h>
54 #include <netinet/in_fib.h>
55 #include <netinet6/in6_fib.h>
56 #include <netinet6/scope6_var.h>
57 #include <netinet/ip_var.h>
58 #include <netinet/tcp_var.h>
59 #include <netinet/tcp.h>
60 #include <netinet/tcpip.h>
61 
62 #include <netinet/toecore.h>
63 
64 struct sge_iq;
65 struct rss_header;
66 struct cpl_set_tcb_rpl;
67 #include <linux/types.h>
68 #include "offload.h"
69 #include "tom/t4_tom.h"
70 
71 #define TOEPCB(so)  ((struct toepcb *)(so_sototcpcb((so))->t_toe))
72 
73 #include "iw_cxgbe.h"
74 #include <linux/module.h>
75 #include <linux/workqueue.h>
76 #include <linux/notifier.h>
77 #include <linux/inetdevice.h>
78 #include <linux/if_vlan.h>
79 #include <net/netevent.h>
80 #include <rdma/rdma_cm.h>
81 
82 static spinlock_t req_lock;
83 static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list;
84 static struct work_struct c4iw_task;
85 static struct workqueue_struct *c4iw_taskq;
86 static LIST_HEAD(err_cqe_list);
87 static spinlock_t err_cqe_lock;
88 static LIST_HEAD(listen_port_list);
89 static DEFINE_MUTEX(listen_port_mutex);
90 
91 static void process_req(struct work_struct *ctx);
92 static void start_ep_timer(struct c4iw_ep *ep);
93 static int stop_ep_timer(struct c4iw_ep *ep);
94 static int set_tcpinfo(struct c4iw_ep *ep);
95 static void process_timeout(struct c4iw_ep *ep);
96 static void process_err_cqes(void);
97 static void *alloc_ep(int size, gfp_t flags);
98 static void close_socket(struct socket *so);
99 static int send_mpa_req(struct c4iw_ep *ep);
100 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen);
101 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen);
102 static void close_complete_upcall(struct c4iw_ep *ep, int status);
103 static int send_abort(struct c4iw_ep *ep);
104 static void peer_close_upcall(struct c4iw_ep *ep);
105 static void peer_abort_upcall(struct c4iw_ep *ep);
106 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
107 static int connect_request_upcall(struct c4iw_ep *ep);
108 static void established_upcall(struct c4iw_ep *ep);
109 static int process_mpa_reply(struct c4iw_ep *ep);
110 static int process_mpa_request(struct c4iw_ep *ep);
111 static void process_peer_close(struct c4iw_ep *ep);
112 static void process_conn_error(struct c4iw_ep *ep);
113 static void process_close_complete(struct c4iw_ep *ep);
114 static void ep_timeout(unsigned long arg);
115 static void setiwsockopt(struct socket *so);
116 static void init_iwarp_socket(struct socket *so, void *arg);
117 static void uninit_iwarp_socket(struct socket *so);
118 static void process_data(struct c4iw_ep *ep);
119 static void process_connected(struct c4iw_ep *ep);
120 static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag);
121 static void process_socket_event(struct c4iw_ep *ep);
122 static void release_ep_resources(struct c4iw_ep *ep);
123 static int process_terminate(struct c4iw_ep *ep);
124 static int terminate(struct sge_iq *iq, const struct rss_header *rss,
125     struct mbuf *m);
126 static int add_ep_to_req_list(struct c4iw_ep *ep, int ep_events);
127 static struct listen_port_info *
128 add_ep_to_listenlist(struct c4iw_listen_ep *lep);
129 static int rem_ep_from_listenlist(struct c4iw_listen_ep *lep);
130 static struct c4iw_listen_ep *
131 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so);
132 static int get_ifnet_from_raddr(struct sockaddr_storage *raddr,
133 		struct ifnet **ifp);
134 static void process_newconn(struct c4iw_listen_ep *master_lep,
135 		struct socket *new_so);
136 #define START_EP_TIMER(ep) \
137     do { \
138 	    CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \
139 		__func__, __LINE__, (ep)); \
140 	    start_ep_timer(ep); \
141     } while (0)
142 
143 #define STOP_EP_TIMER(ep) \
144     ({ \
145 	    CTR3(KTR_IW_CXGBE, "stop_ep_timer (%s:%d) ep %p", \
146 		__func__, __LINE__, (ep)); \
147 	    stop_ep_timer(ep); \
148     })
149 
150 #define GET_LOCAL_ADDR(pladdr, so) \
151 	do { \
152 		struct sockaddr_storage *__a = NULL; \
153 		struct  inpcb *__inp = sotoinpcb(so); \
154 		KASSERT(__inp != NULL, \
155 		   ("GET_LOCAL_ADDR(%s):so:%p, inp = NULL", __func__, so)); \
156 		if (__inp->inp_vflag & INP_IPV4) \
157 			in_getsockaddr(so, (struct sockaddr **)&__a); \
158 		else \
159 			in6_getsockaddr(so, (struct sockaddr **)&__a); \
160 		*(pladdr) = *__a; \
161 		free(__a, M_SONAME); \
162 	} while (0)
163 
164 #define GET_REMOTE_ADDR(praddr, so) \
165 	do { \
166 		struct sockaddr_storage *__a = NULL; \
167 		struct  inpcb *__inp = sotoinpcb(so); \
168 		KASSERT(__inp != NULL, \
169 		   ("GET_REMOTE_ADDR(%s):so:%p, inp = NULL", __func__, so)); \
170 		if (__inp->inp_vflag & INP_IPV4) \
171 			in_getpeeraddr(so, (struct sockaddr **)&__a); \
172 		else \
173 			in6_getpeeraddr(so, (struct sockaddr **)&__a); \
174 		*(praddr) = *__a; \
175 		free(__a, M_SONAME); \
176 	} while (0)
177 
178 static char *states[] = {
179 	"idle",
180 	"listen",
181 	"connecting",
182 	"mpa_wait_req",
183 	"mpa_req_sent",
184 	"mpa_req_rcvd",
185 	"mpa_rep_sent",
186 	"fpdu_mode",
187 	"aborting",
188 	"closing",
189 	"moribund",
190 	"dead",
191 	NULL,
192 };
193 
194 static void deref_cm_id(struct c4iw_ep_common *epc)
195 {
196       epc->cm_id->rem_ref(epc->cm_id);
197       epc->cm_id = NULL;
198       set_bit(CM_ID_DEREFED, &epc->history);
199 }
200 
201 static void ref_cm_id(struct c4iw_ep_common *epc)
202 {
203       set_bit(CM_ID_REFED, &epc->history);
204       epc->cm_id->add_ref(epc->cm_id);
205 }
206 
207 static void deref_qp(struct c4iw_ep *ep)
208 {
209 	c4iw_qp_rem_ref(&ep->com.qp->ibqp);
210 	clear_bit(QP_REFERENCED, &ep->com.flags);
211 	set_bit(QP_DEREFED, &ep->com.history);
212 }
213 
214 static void ref_qp(struct c4iw_ep *ep)
215 {
216 	set_bit(QP_REFERENCED, &ep->com.flags);
217 	set_bit(QP_REFED, &ep->com.history);
218 	c4iw_qp_add_ref(&ep->com.qp->ibqp);
219 }
220 /* allocated per TCP port while listening */
221 struct listen_port_info {
222 	uint16_t port_num; /* TCP port address */
223 	struct list_head list; /* belongs to listen_port_list */
224 	struct list_head lep_list; /* per port lep list */
225 	uint32_t refcnt; /* number of lep's listening */
226 };
227 
228 /*
229  * Following two lists are used to manage INADDR_ANY listeners:
230  * 1)listen_port_list
231  * 2)lep_list
232  *
233  * Below is the INADDR_ANY listener lists overview on a system with a two port
234  * adapter:
235  *   |------------------|
236  *   |listen_port_list  |
237  *   |------------------|
238  *            |
239  *            |              |-----------|       |-----------|
240  *            |              | port_num:X|       | port_num:X|
241  *            |--------------|-list------|-------|-list------|-------....
242  *                           | lep_list----|     | lep_list----|
243  *                           | refcnt    | |     | refcnt    | |
244  *                           |           | |     |           | |
245  *                           |           | |     |           | |
246  *                           |-----------| |     |-----------| |
247  *                                         |                   |
248  *                                         |                   |
249  *                                         |                   |
250  *                                         |                   |         lep1                  lep2
251  *                                         |                   |    |----------------|    |----------------|
252  *                                         |                   |----| listen_ep_list |----| listen_ep_list |
253  *                                         |                        |----------------|    |----------------|
254  *                                         |
255  *                                         |
256  *                                         |        lep1                  lep2
257  *                                         |   |----------------|    |----------------|
258  *                                         |---| listen_ep_list |----| listen_ep_list |
259  *                                             |----------------|    |----------------|
260  *
261  * Because of two port adapter, the number of lep's are two(lep1 & lep2) for
262  * each TCP port number.
263  *
264  * Here 'lep1' is always marked as Master lep, because solisten() is always
265  * called through first lep.
266  *
267  */
268 static struct listen_port_info *
269 add_ep_to_listenlist(struct c4iw_listen_ep *lep)
270 {
271 	uint16_t port;
272 	struct listen_port_info *port_info = NULL;
273 	struct sockaddr_storage *laddr = &lep->com.local_addr;
274 
275 	port = (laddr->ss_family == AF_INET) ?
276 		((struct sockaddr_in *)laddr)->sin_port :
277 		((struct sockaddr_in6 *)laddr)->sin6_port;
278 
279 	mutex_lock(&listen_port_mutex);
280 
281 	list_for_each_entry(port_info, &listen_port_list, list)
282 		if (port_info->port_num == port)
283 			goto found_port;
284 
285 	port_info = malloc(sizeof(*port_info), M_CXGBE, M_WAITOK);
286 	port_info->port_num = port;
287 	port_info->refcnt    = 0;
288 
289 	list_add_tail(&port_info->list, &listen_port_list);
290 	INIT_LIST_HEAD(&port_info->lep_list);
291 
292 found_port:
293 	port_info->refcnt++;
294 	list_add_tail(&lep->listen_ep_list, &port_info->lep_list);
295 	mutex_unlock(&listen_port_mutex);
296 	return port_info;
297 }
298 
299 static int
300 rem_ep_from_listenlist(struct c4iw_listen_ep *lep)
301 {
302 	uint16_t port;
303 	struct listen_port_info *port_info = NULL;
304 	struct sockaddr_storage *laddr = &lep->com.local_addr;
305 	int refcnt = 0;
306 
307 	port = (laddr->ss_family == AF_INET) ?
308 		((struct sockaddr_in *)laddr)->sin_port :
309 		((struct sockaddr_in6 *)laddr)->sin6_port;
310 
311 	mutex_lock(&listen_port_mutex);
312 
313 	/* get the port_info structure based on the lep's port address */
314 	list_for_each_entry(port_info, &listen_port_list, list) {
315 		if (port_info->port_num == port) {
316 			port_info->refcnt--;
317 			refcnt = port_info->refcnt;
318 			/* remove the current lep from the listen list */
319 			list_del(&lep->listen_ep_list);
320 			if (port_info->refcnt == 0) {
321 				/* Remove this entry from the list as there
322 				 * are no more listeners for this port_num.
323 				 */
324 				list_del(&port_info->list);
325 				kfree(port_info);
326 			}
327 			break;
328 		}
329 	}
330 	mutex_unlock(&listen_port_mutex);
331 	return refcnt;
332 }
333 
334 /*
335  * Find the lep that belongs to the ifnet on which the SYN frame was received.
336  */
337 struct c4iw_listen_ep *
338 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so)
339 {
340 	struct adapter *adap = NULL;
341 	struct c4iw_listen_ep *lep = NULL;
342 	struct ifnet *ifp = NULL, *hw_ifp = NULL;
343 	struct listen_port_info *port_info = NULL;
344 	int i = 0, found_portinfo = 0, found_lep = 0;
345 	uint16_t port;
346 
347 	/*
348 	 * STEP 1: Figure out 'ifp' of the physical interface, not pseudo
349 	 * interfaces like vlan, lagg, etc..
350 	 * TBD: lagg support, lagg + vlan support.
351 	 */
352 	ifp = TOEPCB(so)->l2te->ifp;
353 	if (ifp->if_type == IFT_L2VLAN) {
354 		hw_ifp = VLAN_TRUNKDEV(ifp);
355 		if (hw_ifp == NULL) {
356 			CTR4(KTR_IW_CXGBE, "%s: Failed to get parent ifnet of "
357 				"vlan ifnet %p, sock %p, master_lep %p",
358 				__func__, ifp, so, master_lep);
359 			return (NULL);
360 		}
361 	} else
362 		hw_ifp = ifp;
363 
364 	/* STEP 2: Find 'port_info' with listener local port address. */
365 	port = (master_lep->com.local_addr.ss_family == AF_INET) ?
366 		((struct sockaddr_in *)&master_lep->com.local_addr)->sin_port :
367 		((struct sockaddr_in6 *)&master_lep->com.local_addr)->sin6_port;
368 
369 
370 	mutex_lock(&listen_port_mutex);
371 	list_for_each_entry(port_info, &listen_port_list, list)
372 		if (port_info->port_num == port) {
373 			found_portinfo =1;
374 			break;
375 		}
376 	if (!found_portinfo)
377 		goto out;
378 
379 	/* STEP 3: Traverse through list of lep's that are bound to the current
380 	 * TCP port address and find the lep that belongs to the ifnet on which
381 	 * the SYN frame was received.
382 	 */
383 	list_for_each_entry(lep, &port_info->lep_list, listen_ep_list) {
384 		adap = lep->com.dev->rdev.adap;
385 		for_each_port(adap, i) {
386 			if (hw_ifp == adap->port[i]->vi[0].ifp) {
387 				found_lep =1;
388 				goto out;
389 			}
390 		}
391 	}
392 out:
393 	mutex_unlock(&listen_port_mutex);
394 	return found_lep ? lep : (NULL);
395 }
396 
397 static void process_timeout(struct c4iw_ep *ep)
398 {
399 	struct c4iw_qp_attributes attrs = {0};
400 	int abort = 1;
401 
402 	CTR4(KTR_IW_CXGBE, "%s ep :%p, tid:%u, state %d", __func__,
403 			ep, ep->hwtid, ep->com.state);
404 	set_bit(TIMEDOUT, &ep->com.history);
405 	switch (ep->com.state) {
406 	case MPA_REQ_SENT:
407 		connect_reply_upcall(ep, -ETIMEDOUT);
408 		break;
409 	case MPA_REQ_WAIT:
410 	case MPA_REQ_RCVD:
411 	case MPA_REP_SENT:
412 	case FPDU_MODE:
413 		break;
414 	case CLOSING:
415 	case MORIBUND:
416 		if (ep->com.cm_id && ep->com.qp) {
417 			attrs.next_state = C4IW_QP_STATE_ERROR;
418 			c4iw_modify_qp(ep->com.dev, ep->com.qp,
419 					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
420 		}
421 		close_complete_upcall(ep, -ETIMEDOUT);
422 		break;
423 	case ABORTING:
424 	case DEAD:
425 		/*
426 		 * These states are expected if the ep timed out at the same
427 		 * time as another thread was calling stop_ep_timer().
428 		 * So we silently do nothing for these states.
429 		 */
430 		abort = 0;
431 		break;
432 	default:
433 		CTR4(KTR_IW_CXGBE, "%s unexpected state ep %p tid %u state %u"
434 				, __func__, ep, ep->hwtid, ep->com.state);
435 		abort = 0;
436 	}
437 	if (abort)
438 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
439 	c4iw_put_ep(&ep->com);
440 	return;
441 }
442 
443 struct cqe_list_entry {
444 	struct list_head entry;
445 	struct c4iw_dev *rhp;
446 	struct t4_cqe err_cqe;
447 };
448 
449 static void
450 process_err_cqes(void)
451 {
452 	unsigned long flag;
453 	struct cqe_list_entry *cle;
454 
455 	spin_lock_irqsave(&err_cqe_lock, flag);
456 	while (!list_empty(&err_cqe_list)) {
457 		struct list_head *tmp;
458 		tmp = err_cqe_list.next;
459 		list_del(tmp);
460 		tmp->next = tmp->prev = NULL;
461 		spin_unlock_irqrestore(&err_cqe_lock, flag);
462 		cle = list_entry(tmp, struct cqe_list_entry, entry);
463 		c4iw_ev_dispatch(cle->rhp, &cle->err_cqe);
464 		free(cle, M_CXGBE);
465 		spin_lock_irqsave(&err_cqe_lock, flag);
466 	}
467 	spin_unlock_irqrestore(&err_cqe_lock, flag);
468 
469 	return;
470 }
471 
472 static void
473 process_req(struct work_struct *ctx)
474 {
475 	struct c4iw_ep_common *epc;
476 	unsigned long flag;
477 	int ep_events;
478 
479 	process_err_cqes();
480 	spin_lock_irqsave(&req_lock, flag);
481 	while (!TAILQ_EMPTY(&req_list)) {
482 		epc = TAILQ_FIRST(&req_list);
483 		TAILQ_REMOVE(&req_list, epc, entry);
484 		epc->entry.tqe_prev = NULL;
485 		ep_events = epc->ep_events;
486 		epc->ep_events = 0;
487 		spin_unlock_irqrestore(&req_lock, flag);
488 		mutex_lock(&epc->mutex);
489 		CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, ep_state %s events 0x%x",
490 		    __func__, epc->so, epc, states[epc->state], ep_events);
491 		if (ep_events & C4IW_EVENT_TERM)
492 			process_terminate((struct c4iw_ep *)epc);
493 		if (ep_events & C4IW_EVENT_TIMEOUT)
494 			process_timeout((struct c4iw_ep *)epc);
495 		if (ep_events & C4IW_EVENT_SOCKET)
496 			process_socket_event((struct c4iw_ep *)epc);
497 		mutex_unlock(&epc->mutex);
498 		c4iw_put_ep(epc);
499 		process_err_cqes();
500 		spin_lock_irqsave(&req_lock, flag);
501 	}
502 	spin_unlock_irqrestore(&req_lock, flag);
503 }
504 
505 /*
506  * XXX: doesn't belong here in the iWARP driver.
507  * XXX: assumes that the connection was offloaded by cxgbe/t4_tom if TF_TOE is
508  *      set.  Is this a valid assumption for active open?
509  */
510 static int
511 set_tcpinfo(struct c4iw_ep *ep)
512 {
513 	struct socket *so = ep->com.so;
514 	struct inpcb *inp = sotoinpcb(so);
515 	struct tcpcb *tp;
516 	struct toepcb *toep;
517 	int rc = 0;
518 
519 	INP_WLOCK(inp);
520 	tp = intotcpcb(inp);
521 	if ((tp->t_flags & TF_TOE) == 0) {
522 		rc = EINVAL;
523 		log(LOG_ERR, "%s: connection not offloaded (so %p, ep %p)\n",
524 		    __func__, so, ep);
525 		goto done;
526 	}
527 	toep = TOEPCB(so);
528 
529 	ep->hwtid = toep->tid;
530 	ep->snd_seq = tp->snd_nxt;
531 	ep->rcv_seq = tp->rcv_nxt;
532 done:
533 	INP_WUNLOCK(inp);
534 	return (rc);
535 
536 }
537 static int
538 get_ifnet_from_raddr(struct sockaddr_storage *raddr, struct ifnet **ifp)
539 {
540 	int err = 0;
541 	struct nhop_object *nh;
542 
543 	if (raddr->ss_family == AF_INET) {
544 		struct sockaddr_in *raddr4 = (struct sockaddr_in *)raddr;
545 
546 		nh = fib4_lookup(RT_DEFAULT_FIB, raddr4->sin_addr, 0,
547 				NHR_NONE, 0);
548 	} else {
549 		struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)raddr;
550 		struct in6_addr addr6;
551 		uint32_t scopeid;
552 
553 		memset(&addr6, 0, sizeof(addr6));
554 		in6_splitscope((struct in6_addr *)&raddr6->sin6_addr,
555 					&addr6, &scopeid);
556 		nh = fib6_lookup(RT_DEFAULT_FIB, &addr6, scopeid,
557 				NHR_NONE, 0);
558 	}
559 
560 	if (nh == NULL)
561 		err = EHOSTUNREACH;
562 	else
563 		*ifp = nh->nh_ifp;
564 	CTR2(KTR_IW_CXGBE, "%s: return: %d", __func__, err);
565 	return err;
566 }
567 
568 static void
569 close_socket(struct socket *so)
570 {
571 	uninit_iwarp_socket(so);
572 	soclose(so);
573 }
574 
575 static void
576 process_peer_close(struct c4iw_ep *ep)
577 {
578 	struct c4iw_qp_attributes attrs = {0};
579 	int disconnect = 1;
580 	int release = 0;
581 
582 	CTR4(KTR_IW_CXGBE, "%s:ppcB ep %p so %p state %s", __func__, ep,
583 	    ep->com.so, states[ep->com.state]);
584 
585 	switch (ep->com.state) {
586 
587 		case MPA_REQ_WAIT:
588 			CTR2(KTR_IW_CXGBE, "%s:ppc1 %p MPA_REQ_WAIT DEAD",
589 			    __func__, ep);
590 			/* Fallthrough */
591 		case MPA_REQ_SENT:
592 			CTR2(KTR_IW_CXGBE, "%s:ppc2 %p MPA_REQ_SENT DEAD",
593 			    __func__, ep);
594 			ep->com.state = DEAD;
595 			connect_reply_upcall(ep, -ECONNABORTED);
596 
597 			disconnect = 0;
598 			STOP_EP_TIMER(ep);
599 			close_socket(ep->com.so);
600 			deref_cm_id(&ep->com);
601 			release = 1;
602 			break;
603 
604 		case MPA_REQ_RCVD:
605 
606 			/*
607 			 * We're gonna mark this puppy DEAD, but keep
608 			 * the reference on it until the ULP accepts or
609 			 * rejects the CR.
610 			 */
611 			CTR2(KTR_IW_CXGBE, "%s:ppc3 %p MPA_REQ_RCVD CLOSING",
612 			    __func__, ep);
613 			ep->com.state = CLOSING;
614 			break;
615 
616 		case MPA_REP_SENT:
617 			CTR2(KTR_IW_CXGBE, "%s:ppc4 %p MPA_REP_SENT CLOSING",
618 			    __func__, ep);
619 			ep->com.state = CLOSING;
620 			break;
621 
622 		case FPDU_MODE:
623 			CTR2(KTR_IW_CXGBE, "%s:ppc5 %p FPDU_MODE CLOSING",
624 			    __func__, ep);
625 			START_EP_TIMER(ep);
626 			ep->com.state = CLOSING;
627 			attrs.next_state = C4IW_QP_STATE_CLOSING;
628 			c4iw_modify_qp(ep->com.dev, ep->com.qp,
629 					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
630 			peer_close_upcall(ep);
631 			break;
632 
633 		case ABORTING:
634 			CTR2(KTR_IW_CXGBE, "%s:ppc6 %p ABORTING (disconn)",
635 			    __func__, ep);
636 			disconnect = 0;
637 			break;
638 
639 		case CLOSING:
640 			CTR2(KTR_IW_CXGBE, "%s:ppc7 %p CLOSING MORIBUND",
641 			    __func__, ep);
642 			ep->com.state = MORIBUND;
643 			disconnect = 0;
644 			break;
645 
646 		case MORIBUND:
647 			CTR2(KTR_IW_CXGBE, "%s:ppc8 %p MORIBUND DEAD", __func__,
648 			    ep);
649 			STOP_EP_TIMER(ep);
650 			if (ep->com.cm_id && ep->com.qp) {
651 				attrs.next_state = C4IW_QP_STATE_IDLE;
652 				c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
653 						C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
654 			}
655 			close_socket(ep->com.so);
656 			close_complete_upcall(ep, 0);
657 			ep->com.state = DEAD;
658 			release = 1;
659 			disconnect = 0;
660 			break;
661 
662 		case DEAD:
663 			CTR2(KTR_IW_CXGBE, "%s:ppc9 %p DEAD (disconn)",
664 			    __func__, ep);
665 			disconnect = 0;
666 			break;
667 
668 		default:
669 			panic("%s: ep %p state %d", __func__, ep,
670 			    ep->com.state);
671 			break;
672 	}
673 
674 
675 	if (disconnect) {
676 
677 		CTR2(KTR_IW_CXGBE, "%s:ppca %p", __func__, ep);
678 		c4iw_ep_disconnect(ep, 0, M_NOWAIT);
679 	}
680 	if (release) {
681 
682 		CTR2(KTR_IW_CXGBE, "%s:ppcb %p", __func__, ep);
683 		c4iw_put_ep(&ep->com);
684 	}
685 	CTR2(KTR_IW_CXGBE, "%s:ppcE %p", __func__, ep);
686 	return;
687 }
688 
689 static void
690 process_conn_error(struct c4iw_ep *ep)
691 {
692 	struct c4iw_qp_attributes attrs = {0};
693 	int ret;
694 	int state;
695 
696 	state = ep->com.state;
697 	CTR5(KTR_IW_CXGBE, "%s:pceB ep %p so %p so->so_error %u state %s",
698 	    __func__, ep, ep->com.so, ep->com.so->so_error,
699 	    states[ep->com.state]);
700 
701 	switch (state) {
702 
703 		case MPA_REQ_WAIT:
704 			STOP_EP_TIMER(ep);
705 			c4iw_put_ep(&ep->parent_ep->com);
706 			break;
707 
708 		case MPA_REQ_SENT:
709 			STOP_EP_TIMER(ep);
710 			connect_reply_upcall(ep, -ECONNRESET);
711 			break;
712 
713 		case MPA_REP_SENT:
714 			ep->com.rpl_err = ECONNRESET;
715 			CTR1(KTR_IW_CXGBE, "waking up ep %p", ep);
716 			break;
717 
718 		case MPA_REQ_RCVD:
719 			break;
720 
721 		case MORIBUND:
722 		case CLOSING:
723 			STOP_EP_TIMER(ep);
724 			/*FALLTHROUGH*/
725 		case FPDU_MODE:
726 
727 			if (ep->com.cm_id && ep->com.qp) {
728 
729 				attrs.next_state = C4IW_QP_STATE_ERROR;
730 				ret = c4iw_modify_qp(ep->com.qp->rhp,
731 					ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
732 					&attrs, 1);
733 				if (ret)
734 					log(LOG_ERR,
735 							"%s - qp <- error failed!\n",
736 							__func__);
737 			}
738 			peer_abort_upcall(ep);
739 			break;
740 
741 		case ABORTING:
742 			break;
743 
744 		case DEAD:
745 			CTR2(KTR_IW_CXGBE, "%s so_error %d IN DEAD STATE!!!!",
746 			    __func__, ep->com.so->so_error);
747 			return;
748 
749 		default:
750 			panic("%s: ep %p state %d", __func__, ep, state);
751 			break;
752 	}
753 
754 	if (state != ABORTING) {
755 		close_socket(ep->com.so);
756 		ep->com.state = DEAD;
757 		c4iw_put_ep(&ep->com);
758 	}
759 	CTR2(KTR_IW_CXGBE, "%s:pceE %p", __func__, ep);
760 	return;
761 }
762 
763 static void
764 process_close_complete(struct c4iw_ep *ep)
765 {
766 	struct c4iw_qp_attributes attrs = {0};
767 	int release = 0;
768 
769 	CTR4(KTR_IW_CXGBE, "%s:pccB ep %p so %p state %s", __func__, ep,
770 	    ep->com.so, states[ep->com.state]);
771 
772 	/* The cm_id may be null if we failed to connect */
773 	set_bit(CLOSE_CON_RPL, &ep->com.history);
774 
775 	switch (ep->com.state) {
776 
777 		case CLOSING:
778 			CTR2(KTR_IW_CXGBE, "%s:pcc1 %p CLOSING MORIBUND",
779 			    __func__, ep);
780 			ep->com.state = MORIBUND;
781 			break;
782 
783 		case MORIBUND:
784 			CTR2(KTR_IW_CXGBE, "%s:pcc1 %p MORIBUND DEAD", __func__,
785 			    ep);
786 			STOP_EP_TIMER(ep);
787 
788 			if ((ep->com.cm_id) && (ep->com.qp)) {
789 
790 				CTR2(KTR_IW_CXGBE, "%s:pcc2 %p QP_STATE_IDLE",
791 				    __func__, ep);
792 				attrs.next_state = C4IW_QP_STATE_IDLE;
793 				c4iw_modify_qp(ep->com.dev,
794 						ep->com.qp,
795 						C4IW_QP_ATTR_NEXT_STATE,
796 						&attrs, 1);
797 			}
798 
799 			close_socket(ep->com.so);
800 			close_complete_upcall(ep, 0);
801 			ep->com.state = DEAD;
802 			release = 1;
803 			break;
804 
805 		case ABORTING:
806 			CTR2(KTR_IW_CXGBE, "%s:pcc5 %p ABORTING", __func__, ep);
807 			break;
808 
809 		case DEAD:
810 			CTR2(KTR_IW_CXGBE, "%s:pcc6 %p DEAD", __func__, ep);
811 			break;
812 		default:
813 			CTR2(KTR_IW_CXGBE, "%s:pcc7 %p unknown ep state",
814 					__func__, ep);
815 			panic("%s:pcc6 %p unknown ep state", __func__, ep);
816 			break;
817 	}
818 
819 	if (release) {
820 
821 		CTR2(KTR_IW_CXGBE, "%s:pcc8 %p", __func__, ep);
822 		release_ep_resources(ep);
823 	}
824 	CTR2(KTR_IW_CXGBE, "%s:pccE %p", __func__, ep);
825 	return;
826 }
827 
828 static void
829 setiwsockopt(struct socket *so)
830 {
831 	int rc;
832 	struct sockopt sopt;
833 	int on = 1;
834 
835 	sopt.sopt_dir = SOPT_SET;
836 	sopt.sopt_level = IPPROTO_TCP;
837 	sopt.sopt_name = TCP_NODELAY;
838 	sopt.sopt_val = (caddr_t)&on;
839 	sopt.sopt_valsize = sizeof on;
840 	sopt.sopt_td = NULL;
841 	rc = -sosetopt(so, &sopt);
842 	if (rc) {
843 		log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n",
844 		    __func__, so, rc);
845 	}
846 }
847 
848 static void
849 init_iwarp_socket(struct socket *so, void *arg)
850 {
851 	if (SOLISTENING(so)) {
852 		SOLISTEN_LOCK(so);
853 		solisten_upcall_set(so, c4iw_so_upcall, arg);
854 		so->so_state |= SS_NBIO;
855 		SOLISTEN_UNLOCK(so);
856 	} else {
857 		SOCKBUF_LOCK(&so->so_rcv);
858 		soupcall_set(so, SO_RCV, c4iw_so_upcall, arg);
859 		so->so_state |= SS_NBIO;
860 		SOCKBUF_UNLOCK(&so->so_rcv);
861 	}
862 }
863 
864 static void
865 uninit_iwarp_socket(struct socket *so)
866 {
867 	if (SOLISTENING(so)) {
868 		SOLISTEN_LOCK(so);
869 		solisten_upcall_set(so, NULL, NULL);
870 		SOLISTEN_UNLOCK(so);
871 	} else {
872 		SOCKBUF_LOCK(&so->so_rcv);
873 		soupcall_clear(so, SO_RCV);
874 		SOCKBUF_UNLOCK(&so->so_rcv);
875 	}
876 }
877 
878 static void
879 process_data(struct c4iw_ep *ep)
880 {
881 	int ret = 0;
882 	int disconnect = 0;
883 	struct c4iw_qp_attributes attrs = {0};
884 
885 	CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sbused %d", __func__,
886 	    ep->com.so, ep, states[ep->com.state], sbused(&ep->com.so->so_rcv));
887 
888 	switch (ep->com.state) {
889 	case MPA_REQ_SENT:
890 		disconnect = process_mpa_reply(ep);
891 		break;
892 	case MPA_REQ_WAIT:
893 		disconnect = process_mpa_request(ep);
894 		if (disconnect)
895 			/* Refered in process_newconn() */
896 			c4iw_put_ep(&ep->parent_ep->com);
897 		break;
898 	case FPDU_MODE:
899 		MPASS(ep->com.qp != NULL);
900 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
901 		ret = c4iw_modify_qp(ep->com.dev, ep->com.qp,
902 					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
903 		if (ret != -EINPROGRESS)
904 			disconnect = 1;
905 		break;
906 	default:
907 		log(LOG_ERR, "%s: Unexpected streaming data. ep %p, "
908 			    "state %d, so %p, so_state 0x%x, sbused %u\n",
909 			    __func__, ep, ep->com.state, ep->com.so,
910 			    ep->com.so->so_state, sbused(&ep->com.so->so_rcv));
911 		break;
912 	}
913 	if (disconnect)
914 		c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
915 
916 }
917 
918 static void
919 process_connected(struct c4iw_ep *ep)
920 {
921 	struct socket *so = ep->com.so;
922 
923 	if ((so->so_state & SS_ISCONNECTED) && !so->so_error) {
924 		if (send_mpa_req(ep))
925 			goto err;
926 	} else {
927 		connect_reply_upcall(ep, -so->so_error);
928 		goto err;
929 	}
930 	return;
931 err:
932 	close_socket(so);
933 	ep->com.state = DEAD;
934 	c4iw_put_ep(&ep->com);
935 	return;
936 }
937 
938 static inline int c4iw_zero_addr(struct sockaddr *addr)
939 {
940 	struct in6_addr *ip6;
941 
942 	if (addr->sa_family == AF_INET)
943 		return IN_ZERONET(
944 			ntohl(((struct sockaddr_in *)addr)->sin_addr.s_addr));
945 	else {
946 		ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
947 		return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
948 				ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
949 	}
950 }
951 
952 static inline int c4iw_loopback_addr(struct sockaddr *addr)
953 {
954 	if (addr->sa_family == AF_INET)
955 		return IN_LOOPBACK(
956 			ntohl(((struct sockaddr_in *) addr)->sin_addr.s_addr));
957 	else
958 		return IN6_IS_ADDR_LOOPBACK(
959 				&((struct sockaddr_in6 *) addr)->sin6_addr);
960 }
961 
962 static inline int c4iw_any_addr(struct sockaddr *addr)
963 {
964 	return c4iw_zero_addr(addr) || c4iw_loopback_addr(addr);
965 }
966 
967 static void
968 process_newconn(struct c4iw_listen_ep *master_lep, struct socket *new_so)
969 {
970 	struct c4iw_listen_ep *real_lep = NULL;
971 	struct c4iw_ep *new_ep = NULL;
972 	struct sockaddr_in *remote = NULL;
973 	int ret = 0;
974 
975 	MPASS(new_so != NULL);
976 
977 	if (c4iw_any_addr((struct sockaddr *)&master_lep->com.local_addr)) {
978 		/* Here we need to find the 'real_lep' that belongs to the
979 		 * incomming socket's network interface, such that the newly
980 		 * created 'ep' can be attached to the real 'lep'.
981 		 */
982 		real_lep = find_real_listen_ep(master_lep, new_so);
983 		if (real_lep == NULL) {
984 			CTR2(KTR_IW_CXGBE, "%s: Could not find the real listen "
985 					"ep for sock: %p", __func__, new_so);
986 			log(LOG_ERR,"%s: Could not find the real listen ep for "
987 					"sock: %p\n", __func__, new_so);
988 			/* FIXME: properly free the 'new_so' in failure case.
989 			 * Use of soabort() and  soclose() are not legal
990 			 * here(before soaccept()).
991 			 */
992 			return;
993 		}
994 	} else /* for Non-Wildcard address, master_lep is always the real_lep */
995 		real_lep = master_lep;
996 
997 	new_ep = alloc_ep(sizeof(*new_ep), GFP_KERNEL);
998 
999 	CTR6(KTR_IW_CXGBE, "%s: master_lep %p, real_lep: %p, new ep %p, "
1000 	    "listening so %p, new so %p", __func__, master_lep, real_lep,
1001 	    new_ep, master_lep->com.so, new_so);
1002 
1003 	new_ep->com.dev = real_lep->com.dev;
1004 	new_ep->com.so = new_so;
1005 	new_ep->com.cm_id = NULL;
1006 	new_ep->com.thread = real_lep->com.thread;
1007 	new_ep->parent_ep = real_lep;
1008 
1009 	GET_LOCAL_ADDR(&new_ep->com.local_addr, new_so);
1010 	GET_REMOTE_ADDR(&new_ep->com.remote_addr, new_so);
1011 	c4iw_get_ep(&real_lep->com);
1012 	init_timer(&new_ep->timer);
1013 	new_ep->com.state = MPA_REQ_WAIT;
1014 
1015 	setiwsockopt(new_so);
1016 	ret = soaccept(new_so, (struct sockaddr **)&remote);
1017 	if (ret != 0) {
1018 		CTR4(KTR_IW_CXGBE,
1019 				"%s:listen sock:%p, new sock:%p, ret:%d",
1020 				__func__, master_lep->com.so, new_so, ret);
1021 		if (remote != NULL)
1022 			free(remote, M_SONAME);
1023 		soclose(new_so);
1024 		c4iw_put_ep(&new_ep->com);
1025 		c4iw_put_ep(&real_lep->com);
1026 		return;
1027 	}
1028 	free(remote, M_SONAME);
1029 
1030 	START_EP_TIMER(new_ep);
1031 
1032 	/* MPA request might have been queued up on the socket already, so we
1033 	 * initialize the socket/upcall_handler under lock to prevent processing
1034 	 * MPA request on another thread(via process_req()) simultaniously.
1035 	 */
1036 	c4iw_get_ep(&new_ep->com); /* Dereferenced at the end below, this is to
1037 				      avoid freeing of ep before ep unlock. */
1038 	mutex_lock(&new_ep->com.mutex);
1039 	init_iwarp_socket(new_so, &new_ep->com);
1040 
1041 	ret = process_mpa_request(new_ep);
1042 	if (ret) {
1043 		/* ABORT */
1044 		c4iw_ep_disconnect(new_ep, 1, GFP_KERNEL);
1045 		c4iw_put_ep(&real_lep->com);
1046 	}
1047 	mutex_unlock(&new_ep->com.mutex);
1048 	c4iw_put_ep(&new_ep->com);
1049 	return;
1050 }
1051 
1052 static int
1053 add_ep_to_req_list(struct c4iw_ep *ep, int new_ep_event)
1054 {
1055 	unsigned long flag;
1056 
1057 	spin_lock_irqsave(&req_lock, flag);
1058 	if (ep && ep->com.so) {
1059 		ep->com.ep_events |= new_ep_event;
1060 		if (!ep->com.entry.tqe_prev) {
1061 			c4iw_get_ep(&ep->com);
1062 			TAILQ_INSERT_TAIL(&req_list, &ep->com, entry);
1063 			queue_work(c4iw_taskq, &c4iw_task);
1064 		}
1065 	}
1066 	spin_unlock_irqrestore(&req_lock, flag);
1067 
1068 	return (0);
1069 }
1070 
1071 static int
1072 c4iw_so_upcall(struct socket *so, void *arg, int waitflag)
1073 {
1074 	struct c4iw_ep *ep = arg;
1075 
1076 	CTR6(KTR_IW_CXGBE,
1077 	    "%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p",
1078 	    __func__, so, so->so_state, ep, states[ep->com.state],
1079 	    ep->com.entry.tqe_prev);
1080 
1081 	MPASS(ep->com.so == so);
1082 	/*
1083 	 * Wake up any threads waiting in rdma_init()/rdma_fini(),
1084 	 * with locks held.
1085 	 */
1086 	if (so->so_error || (ep->com.dev->rdev.flags & T4_FATAL_ERROR))
1087 		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1088 	add_ep_to_req_list(ep, C4IW_EVENT_SOCKET);
1089 
1090 	return (SU_OK);
1091 }
1092 
1093 
1094 static int
1095 terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1096 {
1097 	struct adapter *sc = iq->adapter;
1098 	const struct cpl_rdma_terminate *cpl = mtod(m, const void *);
1099 	unsigned int tid = GET_TID(cpl);
1100 	struct toepcb *toep = lookup_tid(sc, tid);
1101 	struct socket *so;
1102 	struct c4iw_ep *ep;
1103 
1104 	INP_WLOCK(toep->inp);
1105 	so = inp_inpcbtosocket(toep->inp);
1106 	ep = so->so_rcv.sb_upcallarg;
1107 	INP_WUNLOCK(toep->inp);
1108 
1109 	CTR3(KTR_IW_CXGBE, "%s: so %p, ep %p", __func__, so, ep);
1110 	add_ep_to_req_list(ep, C4IW_EVENT_TERM);
1111 
1112 	return 0;
1113 }
1114 
1115 static void
1116 process_socket_event(struct c4iw_ep *ep)
1117 {
1118 	int state = ep->com.state;
1119 	struct socket *so = ep->com.so;
1120 
1121 	if (ep->com.state == DEAD) {
1122 		CTR3(KTR_IW_CXGBE, "%s: Pending socket event discarded "
1123 			"ep %p ep_state %s", __func__, ep, states[state]);
1124 		return;
1125 	}
1126 
1127 	CTR6(KTR_IW_CXGBE, "process_socket_event: so %p, so_state 0x%x, "
1128 	    "so_err %d, sb_state 0x%x, ep %p, ep_state %s", so, so->so_state,
1129 	    so->so_error, so->so_rcv.sb_state, ep, states[state]);
1130 
1131 	if (state == CONNECTING) {
1132 		process_connected(ep);
1133 		return;
1134 	}
1135 
1136 	if (state == LISTEN) {
1137 		struct c4iw_listen_ep *lep = (struct c4iw_listen_ep *)ep;
1138 		struct socket *listen_so = so, *new_so = NULL;
1139 		int error = 0;
1140 
1141 		SOLISTEN_LOCK(listen_so);
1142 		do {
1143 			error = solisten_dequeue(listen_so, &new_so,
1144 						SOCK_NONBLOCK);
1145 			if (error) {
1146 				CTR4(KTR_IW_CXGBE, "%s: lep %p listen_so %p "
1147 					"error %d", __func__, lep, listen_so,
1148 					error);
1149 				return;
1150 			}
1151 			process_newconn(lep, new_so);
1152 
1153 			/* solisten_dequeue() unlocks while return, so aquire
1154 			 * lock again for sol_qlen and also for next iteration.
1155 			 */
1156 			SOLISTEN_LOCK(listen_so);
1157 		} while (listen_so->sol_qlen);
1158 		SOLISTEN_UNLOCK(listen_so);
1159 
1160 		return;
1161 	}
1162 
1163 	/* connection error */
1164 	if (so->so_error) {
1165 		process_conn_error(ep);
1166 		return;
1167 	}
1168 
1169 	/* peer close */
1170 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state <= CLOSING) {
1171 		process_peer_close(ep);
1172 		/*
1173 		 * check whether socket disconnect event is pending before
1174 		 * returning. Fallthrough if yes.
1175 		 */
1176 		if (!(so->so_state & SS_ISDISCONNECTED))
1177 			return;
1178 	}
1179 
1180 	/* close complete */
1181 	if (so->so_state & SS_ISDISCONNECTED) {
1182 		process_close_complete(ep);
1183 		return;
1184 	}
1185 
1186 	/* rx data */
1187 	if (sbused(&ep->com.so->so_rcv)) {
1188 		process_data(ep);
1189 		return;
1190 	}
1191 
1192 	/* Socket events for 'MPA Request Received' and 'Close Complete'
1193 	 * were already processed earlier in their previous events handlers.
1194 	 * Hence, these socket events are skipped.
1195 	 * And any other socket events must have handled above.
1196 	 */
1197 	MPASS((ep->com.state == MPA_REQ_RCVD) || (ep->com.state == MORIBUND));
1198 
1199 	if ((ep->com.state != MPA_REQ_RCVD) && (ep->com.state != MORIBUND))
1200 		log(LOG_ERR, "%s: Unprocessed socket event so %p, "
1201 		"so_state 0x%x, so_err %d, sb_state 0x%x, ep %p, ep_state %s\n",
1202 		__func__, so, so->so_state, so->so_error, so->so_rcv.sb_state,
1203 			ep, states[state]);
1204 
1205 }
1206 
1207 SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1208     "iw_cxgbe driver parameters");
1209 
1210 static int dack_mode = 0;
1211 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RWTUN, &dack_mode, 0,
1212 		"Delayed ack mode (default = 0)");
1213 
1214 int c4iw_max_read_depth = 8;
1215 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RWTUN, &c4iw_max_read_depth, 0,
1216 		"Per-connection max ORD/IRD (default = 8)");
1217 
1218 static int enable_tcp_timestamps;
1219 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RWTUN, &enable_tcp_timestamps, 0,
1220 		"Enable tcp timestamps (default = 0)");
1221 
1222 static int enable_tcp_sack;
1223 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RWTUN, &enable_tcp_sack, 0,
1224 		"Enable tcp SACK (default = 0)");
1225 
1226 static int enable_tcp_window_scaling = 1;
1227 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RWTUN, &enable_tcp_window_scaling, 0,
1228 		"Enable tcp window scaling (default = 1)");
1229 
1230 int c4iw_debug = 0;
1231 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RWTUN, &c4iw_debug, 0,
1232 		"Enable debug logging (default = 0)");
1233 
1234 static int peer2peer = 1;
1235 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RWTUN, &peer2peer, 0,
1236 		"Support peer2peer ULPs (default = 1)");
1237 
1238 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
1239 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RWTUN, &p2p_type, 0,
1240 		"RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)");
1241 
1242 static int ep_timeout_secs = 60;
1243 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RWTUN, &ep_timeout_secs, 0,
1244 		"CM Endpoint operation timeout in seconds (default = 60)");
1245 
1246 static int mpa_rev = 1;
1247 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0,
1248 		"MPA Revision, 0 supports amso1100, 1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)");
1249 
1250 static int markers_enabled;
1251 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RWTUN, &markers_enabled, 0,
1252 		"Enable MPA MARKERS (default(0) = disabled)");
1253 
1254 static int crc_enabled = 1;
1255 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RWTUN, &crc_enabled, 0,
1256 		"Enable MPA CRC (default(1) = enabled)");
1257 
1258 static int rcv_win = 256 * 1024;
1259 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RWTUN, &rcv_win, 0,
1260 		"TCP receive window in bytes (default = 256KB)");
1261 
1262 static int snd_win = 128 * 1024;
1263 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0,
1264 		"TCP send window in bytes (default = 128KB)");
1265 
1266 int use_dsgl = 1;
1267 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, use_dsgl, CTLFLAG_RWTUN, &use_dsgl, 0,
1268 		"Use DSGL for PBL/FastReg (default=1)");
1269 
1270 int inline_threshold = 128;
1271 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, inline_threshold, CTLFLAG_RWTUN, &inline_threshold, 0,
1272 		"inline vs dsgl threshold (default=128)");
1273 
1274 static int reuseaddr = 0;
1275 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, reuseaddr, CTLFLAG_RWTUN, &reuseaddr, 0,
1276 		"Enable SO_REUSEADDR & SO_REUSEPORT socket options on all iWARP client connections(default = 0)");
1277 
1278 static void
1279 start_ep_timer(struct c4iw_ep *ep)
1280 {
1281 
1282 	if (timer_pending(&ep->timer)) {
1283 		CTR2(KTR_IW_CXGBE, "%s: ep %p, already started", __func__, ep);
1284 		printk(KERN_ERR "%s timer already started! ep %p\n", __func__,
1285 		    ep);
1286 		return;
1287 	}
1288 	clear_bit(TIMEOUT, &ep->com.flags);
1289 	c4iw_get_ep(&ep->com);
1290 	ep->timer.expires = jiffies + ep_timeout_secs * HZ;
1291 	ep->timer.data = (unsigned long)ep;
1292 	ep->timer.function = ep_timeout;
1293 	add_timer(&ep->timer);
1294 }
1295 
1296 static int
1297 stop_ep_timer(struct c4iw_ep *ep)
1298 {
1299 
1300 	del_timer_sync(&ep->timer);
1301 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
1302 		c4iw_put_ep(&ep->com);
1303 		return 0;
1304 	}
1305 	return 1;
1306 }
1307 
1308 static void *
1309 alloc_ep(int size, gfp_t gfp)
1310 {
1311 	struct c4iw_ep_common *epc;
1312 
1313 	epc = kzalloc(size, gfp);
1314 	if (epc == NULL)
1315 		return (NULL);
1316 
1317 	kref_init(&epc->kref);
1318 	mutex_init(&epc->mutex);
1319 	c4iw_init_wr_wait(&epc->wr_wait);
1320 
1321 	return (epc);
1322 }
1323 
1324 void _c4iw_free_ep(struct kref *kref)
1325 {
1326 	struct c4iw_ep *ep;
1327 	struct c4iw_ep_common *epc;
1328 
1329 	ep = container_of(kref, struct c4iw_ep, com.kref);
1330 	epc = &ep->com;
1331 	KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list",
1332 	    __func__, epc));
1333 	if (test_bit(QP_REFERENCED, &ep->com.flags))
1334 		deref_qp(ep);
1335 	CTR4(KTR_IW_CXGBE, "%s: ep %p, history 0x%lx, flags 0x%lx",
1336 	    __func__, ep, epc->history, epc->flags);
1337 	kfree(ep);
1338 }
1339 
1340 static void release_ep_resources(struct c4iw_ep *ep)
1341 {
1342 	CTR2(KTR_IW_CXGBE, "%s:rerB %p", __func__, ep);
1343 	set_bit(RELEASE_RESOURCES, &ep->com.flags);
1344 	c4iw_put_ep(&ep->com);
1345 	CTR2(KTR_IW_CXGBE, "%s:rerE %p", __func__, ep);
1346 }
1347 
1348 static int
1349 send_mpa_req(struct c4iw_ep *ep)
1350 {
1351 	int mpalen;
1352 	struct mpa_message *mpa;
1353 	struct mpa_v2_conn_params mpa_v2_params;
1354 	struct mbuf *m;
1355 	char mpa_rev_to_use = mpa_rev;
1356 	int err = 0;
1357 
1358 	if (ep->retry_with_mpa_v1)
1359 		mpa_rev_to_use = 1;
1360 	mpalen = sizeof(*mpa) + ep->plen;
1361 	if (mpa_rev_to_use == 2)
1362 		mpalen += sizeof(struct mpa_v2_conn_params);
1363 
1364 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1365 	if (mpa == NULL) {
1366 		err = -ENOMEM;
1367 		CTR3(KTR_IW_CXGBE, "%s:smr1 ep: %p , error: %d",
1368 				__func__, ep, err);
1369 		goto err;
1370 	}
1371 
1372 	memset(mpa, 0, mpalen);
1373 	memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
1374 	mpa->flags = (crc_enabled ? MPA_CRC : 0) |
1375 		(markers_enabled ? MPA_MARKERS : 0) |
1376 		(mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
1377 	mpa->private_data_size = htons(ep->plen);
1378 	mpa->revision = mpa_rev_to_use;
1379 
1380 	if (mpa_rev_to_use == 1) {
1381 		ep->tried_with_mpa_v1 = 1;
1382 		ep->retry_with_mpa_v1 = 0;
1383 	}
1384 
1385 	if (mpa_rev_to_use == 2) {
1386 		mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1387 					    sizeof(struct mpa_v2_conn_params));
1388 		mpa_v2_params.ird = htons((u16)ep->ird);
1389 		mpa_v2_params.ord = htons((u16)ep->ord);
1390 
1391 		if (peer2peer) {
1392 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1393 
1394 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1395 				mpa_v2_params.ord |=
1396 				    htons(MPA_V2_RDMA_WRITE_RTR);
1397 			} else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1398 				mpa_v2_params.ord |=
1399 					htons(MPA_V2_RDMA_READ_RTR);
1400 			}
1401 		}
1402 		memcpy(mpa->private_data, &mpa_v2_params,
1403 			sizeof(struct mpa_v2_conn_params));
1404 
1405 		if (ep->plen) {
1406 
1407 			memcpy(mpa->private_data +
1408 				sizeof(struct mpa_v2_conn_params),
1409 				ep->mpa_pkt + sizeof(*mpa), ep->plen);
1410 		}
1411 	} else {
1412 
1413 		if (ep->plen)
1414 			memcpy(mpa->private_data,
1415 					ep->mpa_pkt + sizeof(*mpa), ep->plen);
1416 		CTR2(KTR_IW_CXGBE, "%s:smr7 %p", __func__, ep);
1417 	}
1418 
1419 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1420 	if (m == NULL) {
1421 		err = -ENOMEM;
1422 		CTR3(KTR_IW_CXGBE, "%s:smr2 ep: %p , error: %d",
1423 				__func__, ep, err);
1424 		free(mpa, M_CXGBE);
1425 		goto err;
1426 	}
1427 	m_copyback(m, 0, mpalen, (void *)mpa);
1428 	free(mpa, M_CXGBE);
1429 
1430 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1431 			ep->com.thread);
1432 	if (err) {
1433 		CTR3(KTR_IW_CXGBE, "%s:smr3 ep: %p , error: %d",
1434 				__func__, ep, err);
1435 		goto err;
1436 	}
1437 
1438 	START_EP_TIMER(ep);
1439 	ep->com.state = MPA_REQ_SENT;
1440 	ep->mpa_attr.initiator = 1;
1441 	CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
1442 	return 0;
1443 err:
1444 	connect_reply_upcall(ep, err);
1445 	CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
1446 	return err;
1447 }
1448 
1449 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
1450 {
1451 	int mpalen ;
1452 	struct mpa_message *mpa;
1453 	struct mpa_v2_conn_params mpa_v2_params;
1454 	struct mbuf *m;
1455 	int err;
1456 
1457 	CTR4(KTR_IW_CXGBE, "%s:smrejB %p %u %d", __func__, ep, ep->hwtid,
1458 	    ep->plen);
1459 
1460 	mpalen = sizeof(*mpa) + plen;
1461 
1462 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1463 
1464 		mpalen += sizeof(struct mpa_v2_conn_params);
1465 		CTR4(KTR_IW_CXGBE, "%s:smrej1 %p %u %d", __func__, ep,
1466 		    ep->mpa_attr.version, mpalen);
1467 	}
1468 
1469 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1470 	if (mpa == NULL)
1471 		return (-ENOMEM);
1472 
1473 	memset(mpa, 0, mpalen);
1474 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1475 	mpa->flags = MPA_REJECT;
1476 	mpa->revision = mpa_rev;
1477 	mpa->private_data_size = htons(plen);
1478 
1479 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1480 
1481 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1482 		mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1483 					    sizeof(struct mpa_v2_conn_params));
1484 		mpa_v2_params.ird = htons(((u16)ep->ird) |
1485 				(peer2peer ? MPA_V2_PEER2PEER_MODEL :
1486 				 0));
1487 		mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
1488 					(p2p_type ==
1489 					 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
1490 					 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
1491 					 FW_RI_INIT_P2PTYPE_READ_REQ ?
1492 					 MPA_V2_RDMA_READ_RTR : 0) : 0));
1493 		memcpy(mpa->private_data, &mpa_v2_params,
1494 				sizeof(struct mpa_v2_conn_params));
1495 
1496 		if (ep->plen)
1497 			memcpy(mpa->private_data +
1498 				sizeof(struct mpa_v2_conn_params), pdata, plen);
1499 		CTR5(KTR_IW_CXGBE, "%s:smrej3 %p %d %d %d", __func__, ep,
1500 		    mpa_v2_params.ird, mpa_v2_params.ord, ep->plen);
1501 	} else
1502 		if (plen)
1503 			memcpy(mpa->private_data, pdata, plen);
1504 
1505 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1506 	if (m == NULL) {
1507 		free(mpa, M_CXGBE);
1508 		return (-ENOMEM);
1509 	}
1510 	m_copyback(m, 0, mpalen, (void *)mpa);
1511 	free(mpa, M_CXGBE);
1512 
1513 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread);
1514 	if (!err)
1515 		ep->snd_seq += mpalen;
1516 	CTR4(KTR_IW_CXGBE, "%s:smrejE %p %u %d", __func__, ep, ep->hwtid, err);
1517 	return err;
1518 }
1519 
1520 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
1521 {
1522 	int mpalen;
1523 	struct mpa_message *mpa;
1524 	struct mbuf *m;
1525 	struct mpa_v2_conn_params mpa_v2_params;
1526 	int err;
1527 
1528 	CTR2(KTR_IW_CXGBE, "%s:smrepB %p", __func__, ep);
1529 
1530 	mpalen = sizeof(*mpa) + plen;
1531 
1532 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1533 
1534 		CTR3(KTR_IW_CXGBE, "%s:smrep1 %p %d", __func__, ep,
1535 		    ep->mpa_attr.version);
1536 		mpalen += sizeof(struct mpa_v2_conn_params);
1537 	}
1538 
1539 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1540 	if (mpa == NULL)
1541 		return (-ENOMEM);
1542 
1543 	memset(mpa, 0, sizeof(*mpa));
1544 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1545 	mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
1546 		(markers_enabled ? MPA_MARKERS : 0);
1547 	mpa->revision = ep->mpa_attr.version;
1548 	mpa->private_data_size = htons(plen);
1549 
1550 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1551 
1552 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1553 		mpa->private_data_size +=
1554 			htons(sizeof(struct mpa_v2_conn_params));
1555 		mpa_v2_params.ird = htons((u16)ep->ird);
1556 		mpa_v2_params.ord = htons((u16)ep->ord);
1557 		CTR5(KTR_IW_CXGBE, "%s:smrep3 %p %d %d %d", __func__, ep,
1558 		    ep->mpa_attr.version, mpa_v2_params.ird, mpa_v2_params.ord);
1559 
1560 		if (peer2peer && (ep->mpa_attr.p2p_type !=
1561 			FW_RI_INIT_P2PTYPE_DISABLED)) {
1562 
1563 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1564 
1565 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1566 
1567 				mpa_v2_params.ord |=
1568 					htons(MPA_V2_RDMA_WRITE_RTR);
1569 				CTR5(KTR_IW_CXGBE, "%s:smrep4 %p %d %d %d",
1570 				    __func__, ep, p2p_type, mpa_v2_params.ird,
1571 				    mpa_v2_params.ord);
1572 			}
1573 			else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1574 
1575 				mpa_v2_params.ord |=
1576 					htons(MPA_V2_RDMA_READ_RTR);
1577 				CTR5(KTR_IW_CXGBE, "%s:smrep5 %p %d %d %d",
1578 				    __func__, ep, p2p_type, mpa_v2_params.ird,
1579 				    mpa_v2_params.ord);
1580 			}
1581 		}
1582 
1583 		memcpy(mpa->private_data, &mpa_v2_params,
1584 			sizeof(struct mpa_v2_conn_params));
1585 
1586 		if (ep->plen)
1587 			memcpy(mpa->private_data +
1588 				sizeof(struct mpa_v2_conn_params), pdata, plen);
1589 	} else
1590 		if (plen)
1591 			memcpy(mpa->private_data, pdata, plen);
1592 
1593 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1594 	if (m == NULL) {
1595 		free(mpa, M_CXGBE);
1596 		return (-ENOMEM);
1597 	}
1598 	m_copyback(m, 0, mpalen, (void *)mpa);
1599 	free(mpa, M_CXGBE);
1600 
1601 
1602 	ep->com.state = MPA_REP_SENT;
1603 	ep->snd_seq += mpalen;
1604 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1605 			ep->com.thread);
1606 	CTR3(KTR_IW_CXGBE, "%s:smrepE %p %d", __func__, ep, err);
1607 	return err;
1608 }
1609 
1610 
1611 
1612 static void close_complete_upcall(struct c4iw_ep *ep, int status)
1613 {
1614 	struct iw_cm_event event;
1615 
1616 	CTR2(KTR_IW_CXGBE, "%s:ccuB %p", __func__, ep);
1617 	memset(&event, 0, sizeof(event));
1618 	event.event = IW_CM_EVENT_CLOSE;
1619 	event.status = status;
1620 
1621 	if (ep->com.cm_id) {
1622 
1623 		CTR2(KTR_IW_CXGBE, "%s:ccu1 %1", __func__, ep);
1624 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1625 		deref_cm_id(&ep->com);
1626 		set_bit(CLOSE_UPCALL, &ep->com.history);
1627 	}
1628 	CTR2(KTR_IW_CXGBE, "%s:ccuE %p", __func__, ep);
1629 }
1630 
1631 static int
1632 send_abort(struct c4iw_ep *ep)
1633 {
1634 	struct socket *so = ep->com.so;
1635 	struct sockopt sopt;
1636 	int rc;
1637 	struct linger l;
1638 
1639 	CTR5(KTR_IW_CXGBE, "%s ep %p so %p state %s tid %d", __func__, ep, so,
1640 	    states[ep->com.state], ep->hwtid);
1641 
1642 	l.l_onoff = 1;
1643 	l.l_linger = 0;
1644 
1645 	/* linger_time of 0 forces RST to be sent */
1646 	sopt.sopt_dir = SOPT_SET;
1647 	sopt.sopt_level = SOL_SOCKET;
1648 	sopt.sopt_name = SO_LINGER;
1649 	sopt.sopt_val = (caddr_t)&l;
1650 	sopt.sopt_valsize = sizeof l;
1651 	sopt.sopt_td = NULL;
1652 	rc = -sosetopt(so, &sopt);
1653 	if (rc != 0) {
1654 		log(LOG_ERR, "%s: sosetopt(%p, linger = 0) failed with %d.\n",
1655 		    __func__, so, rc);
1656 	}
1657 
1658 	uninit_iwarp_socket(so);
1659 	soclose(so);
1660 	set_bit(ABORT_CONN, &ep->com.history);
1661 
1662 	/*
1663 	 * TBD: iw_cxgbe driver should receive ABORT reply for every ABORT
1664 	 * request it has sent. But the current TOE driver is not propagating
1665 	 * this ABORT reply event (via do_abort_rpl) to iw_cxgbe. So as a work-
1666 	 * around de-refererece 'ep' here instead of doing it in abort_rpl()
1667 	 * handler(not yet implemented) of iw_cxgbe driver.
1668 	 */
1669 	release_ep_resources(ep);
1670 	ep->com.state = DEAD;
1671 
1672 	return (0);
1673 }
1674 
1675 static void peer_close_upcall(struct c4iw_ep *ep)
1676 {
1677 	struct iw_cm_event event;
1678 
1679 	CTR2(KTR_IW_CXGBE, "%s:pcuB %p", __func__, ep);
1680 	memset(&event, 0, sizeof(event));
1681 	event.event = IW_CM_EVENT_DISCONNECT;
1682 
1683 	if (ep->com.cm_id) {
1684 
1685 		CTR2(KTR_IW_CXGBE, "%s:pcu1 %p", __func__, ep);
1686 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1687 		set_bit(DISCONN_UPCALL, &ep->com.history);
1688 	}
1689 	CTR2(KTR_IW_CXGBE, "%s:pcuE %p", __func__, ep);
1690 }
1691 
1692 static void peer_abort_upcall(struct c4iw_ep *ep)
1693 {
1694 	struct iw_cm_event event;
1695 
1696 	CTR2(KTR_IW_CXGBE, "%s:pauB %p", __func__, ep);
1697 	memset(&event, 0, sizeof(event));
1698 	event.event = IW_CM_EVENT_CLOSE;
1699 	event.status = -ECONNRESET;
1700 
1701 	if (ep->com.cm_id) {
1702 
1703 		CTR2(KTR_IW_CXGBE, "%s:pau1 %p", __func__, ep);
1704 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1705 		deref_cm_id(&ep->com);
1706 		set_bit(ABORT_UPCALL, &ep->com.history);
1707 	}
1708 	CTR2(KTR_IW_CXGBE, "%s:pauE %p", __func__, ep);
1709 }
1710 
1711 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1712 {
1713 	struct iw_cm_event event;
1714 
1715 	CTR3(KTR_IW_CXGBE, "%s:cruB %p, status: %d", __func__, ep, status);
1716 	memset(&event, 0, sizeof(event));
1717 	event.event = IW_CM_EVENT_CONNECT_REPLY;
1718 	event.status = ((status == -ECONNABORTED) || (status == -EPIPE)) ?
1719 					-ECONNRESET : status;
1720 	event.local_addr = ep->com.local_addr;
1721 	event.remote_addr = ep->com.remote_addr;
1722 
1723 	if ((status == 0) || (status == -ECONNREFUSED)) {
1724 
1725 		if (!ep->tried_with_mpa_v1) {
1726 
1727 			CTR2(KTR_IW_CXGBE, "%s:cru1 %p", __func__, ep);
1728 			/* this means MPA_v2 is used */
1729 			event.ord = ep->ird;
1730 			event.ird = ep->ord;
1731 			event.private_data_len = ep->plen -
1732 				sizeof(struct mpa_v2_conn_params);
1733 			event.private_data = ep->mpa_pkt +
1734 				sizeof(struct mpa_message) +
1735 				sizeof(struct mpa_v2_conn_params);
1736 		} else {
1737 
1738 			CTR2(KTR_IW_CXGBE, "%s:cru2 %p", __func__, ep);
1739 			/* this means MPA_v1 is used */
1740 			event.ord = c4iw_max_read_depth;
1741 			event.ird = c4iw_max_read_depth;
1742 			event.private_data_len = ep->plen;
1743 			event.private_data = ep->mpa_pkt +
1744 				sizeof(struct mpa_message);
1745 		}
1746 	}
1747 
1748 	if (ep->com.cm_id) {
1749 
1750 		CTR2(KTR_IW_CXGBE, "%s:cru3 %p", __func__, ep);
1751 		set_bit(CONN_RPL_UPCALL, &ep->com.history);
1752 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1753 	}
1754 
1755 	if(status == -ECONNABORTED) {
1756 
1757 		CTR3(KTR_IW_CXGBE, "%s:cruE %p %d", __func__, ep, status);
1758 		return;
1759 	}
1760 
1761 	if (status < 0) {
1762 
1763 		CTR3(KTR_IW_CXGBE, "%s:cru4 %p %d", __func__, ep, status);
1764 		deref_cm_id(&ep->com);
1765 	}
1766 
1767 	CTR2(KTR_IW_CXGBE, "%s:cruE %p", __func__, ep);
1768 }
1769 
1770 static int connect_request_upcall(struct c4iw_ep *ep)
1771 {
1772 	struct iw_cm_event event;
1773 	int ret;
1774 
1775 	CTR3(KTR_IW_CXGBE, "%s: ep %p, mpa_v1 %d", __func__, ep,
1776 	    ep->tried_with_mpa_v1);
1777 
1778 	memset(&event, 0, sizeof(event));
1779 	event.event = IW_CM_EVENT_CONNECT_REQUEST;
1780 	event.local_addr = ep->com.local_addr;
1781 	event.remote_addr = ep->com.remote_addr;
1782 	event.provider_data = ep;
1783 
1784 	if (!ep->tried_with_mpa_v1) {
1785 		/* this means MPA_v2 is used */
1786 		event.ord = ep->ord;
1787 		event.ird = ep->ird;
1788 		event.private_data_len = ep->plen -
1789 			sizeof(struct mpa_v2_conn_params);
1790 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1791 			sizeof(struct mpa_v2_conn_params);
1792 	} else {
1793 
1794 		/* this means MPA_v1 is used. Send max supported */
1795 		event.ord = c4iw_max_read_depth;
1796 		event.ird = c4iw_max_read_depth;
1797 		event.private_data_len = ep->plen;
1798 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1799 	}
1800 
1801 	c4iw_get_ep(&ep->com);
1802 	ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1803 	    &event);
1804 	if(ret) {
1805 		CTR3(KTR_IW_CXGBE, "%s: ep %p, Failure while notifying event to"
1806 			" IWCM, err:%d", __func__, ep, ret);
1807 		c4iw_put_ep(&ep->com);
1808 	} else
1809 		/* Dereference parent_ep only in success case.
1810 		 * In case of failure, parent_ep is dereferenced by the caller
1811 		 * of process_mpa_request().
1812 		 */
1813 		c4iw_put_ep(&ep->parent_ep->com);
1814 
1815 	set_bit(CONNREQ_UPCALL, &ep->com.history);
1816 	return ret;
1817 }
1818 
1819 static void established_upcall(struct c4iw_ep *ep)
1820 {
1821 	struct iw_cm_event event;
1822 
1823 	CTR2(KTR_IW_CXGBE, "%s:euB %p", __func__, ep);
1824 	memset(&event, 0, sizeof(event));
1825 	event.event = IW_CM_EVENT_ESTABLISHED;
1826 	event.ird = ep->ord;
1827 	event.ord = ep->ird;
1828 
1829 	if (ep->com.cm_id) {
1830 
1831 		CTR2(KTR_IW_CXGBE, "%s:eu1 %p", __func__, ep);
1832 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1833 		set_bit(ESTAB_UPCALL, &ep->com.history);
1834 	}
1835 	CTR2(KTR_IW_CXGBE, "%s:euE %p", __func__, ep);
1836 }
1837 
1838 
1839 #define RELAXED_IRD_NEGOTIATION 1
1840 
1841 /*
1842  * process_mpa_reply - process streaming mode MPA reply
1843  *
1844  * Returns:
1845  *
1846  * 0 upon success indicating a connect request was delivered to the ULP
1847  * or the mpa request is incomplete but valid so far.
1848  *
1849  * 1 if a failure requires the caller to close the connection.
1850  *
1851  * 2 if a failure requires the caller to abort the connection.
1852  */
1853 static int process_mpa_reply(struct c4iw_ep *ep)
1854 {
1855 	struct mpa_message *mpa;
1856 	struct mpa_v2_conn_params *mpa_v2_params;
1857 	u16 plen;
1858 	u16 resp_ird, resp_ord;
1859 	u8 rtr_mismatch = 0, insuff_ird = 0;
1860 	struct c4iw_qp_attributes attrs = {0};
1861 	enum c4iw_qp_attr_mask mask;
1862 	int err;
1863 	struct mbuf *top, *m;
1864 	int flags = MSG_DONTWAIT;
1865 	struct uio uio;
1866 	int disconnect = 0;
1867 
1868 	CTR2(KTR_IW_CXGBE, "%s:pmrB %p", __func__, ep);
1869 
1870 	/*
1871 	 * Stop mpa timer.  If it expired, then
1872 	 * we ignore the MPA reply.  process_timeout()
1873 	 * will abort the connection.
1874 	 */
1875 	if (STOP_EP_TIMER(ep))
1876 		return 0;
1877 
1878 	uio.uio_resid = 1000000;
1879 	uio.uio_td = ep->com.thread;
1880 	err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags);
1881 
1882 	if (err) {
1883 
1884 		if (err == EWOULDBLOCK) {
1885 
1886 			CTR2(KTR_IW_CXGBE, "%s:pmr1 %p", __func__, ep);
1887 			START_EP_TIMER(ep);
1888 			return 0;
1889 		}
1890 		err = -err;
1891 		CTR2(KTR_IW_CXGBE, "%s:pmr2 %p", __func__, ep);
1892 		goto err;
1893 	}
1894 
1895 	if (ep->com.so->so_rcv.sb_mb) {
1896 
1897 		CTR2(KTR_IW_CXGBE, "%s:pmr3 %p", __func__, ep);
1898 		printf("%s data after soreceive called! so %p sb_mb %p top %p\n",
1899 		       __func__, ep->com.so, ep->com.so->so_rcv.sb_mb, top);
1900 	}
1901 
1902 	m = top;
1903 
1904 	do {
1905 
1906 		CTR2(KTR_IW_CXGBE, "%s:pmr4 %p", __func__, ep);
1907 		/*
1908 		 * If we get more than the supported amount of private data
1909 		 * then we must fail this connection.
1910 		 */
1911 		if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) {
1912 
1913 			CTR3(KTR_IW_CXGBE, "%s:pmr5 %p %d", __func__, ep,
1914 			    ep->mpa_pkt_len + m->m_len);
1915 			err = (-EINVAL);
1916 			goto err_stop_timer;
1917 		}
1918 
1919 		/*
1920 		 * copy the new data into our accumulation buffer.
1921 		 */
1922 		m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len]));
1923 		ep->mpa_pkt_len += m->m_len;
1924 		if (!m->m_next)
1925 			m = m->m_nextpkt;
1926 		else
1927 			m = m->m_next;
1928 	} while (m);
1929 
1930 	m_freem(top);
1931 	/*
1932 	 * if we don't even have the mpa message, then bail.
1933 	 */
1934 	if (ep->mpa_pkt_len < sizeof(*mpa)) {
1935 		return 0;
1936 	}
1937 	mpa = (struct mpa_message *) ep->mpa_pkt;
1938 
1939 	/* Validate MPA header. */
1940 	if (mpa->revision > mpa_rev) {
1941 
1942 		CTR4(KTR_IW_CXGBE, "%s:pmr6 %p %d %d", __func__, ep,
1943 		    mpa->revision, mpa_rev);
1944 		printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d, "
1945 				" Received = %d\n", __func__, mpa_rev, mpa->revision);
1946 		err = -EPROTO;
1947 		goto err_stop_timer;
1948 	}
1949 
1950 	if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1951 
1952 		CTR2(KTR_IW_CXGBE, "%s:pmr7 %p", __func__, ep);
1953 		err = -EPROTO;
1954 		goto err_stop_timer;
1955 	}
1956 
1957 	plen = ntohs(mpa->private_data_size);
1958 
1959 	/*
1960 	 * Fail if there's too much private data.
1961 	 */
1962 	if (plen > MPA_MAX_PRIVATE_DATA) {
1963 
1964 		CTR2(KTR_IW_CXGBE, "%s:pmr8 %p", __func__, ep);
1965 		err = -EPROTO;
1966 		goto err_stop_timer;
1967 	}
1968 
1969 	/*
1970 	 * If plen does not account for pkt size
1971 	 */
1972 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1973 
1974 		CTR2(KTR_IW_CXGBE, "%s:pmr9 %p", __func__, ep);
1975 		STOP_EP_TIMER(ep);
1976 		err = -EPROTO;
1977 		goto err_stop_timer;
1978 	}
1979 
1980 	ep->plen = (u8) plen;
1981 
1982 	/*
1983 	 * If we don't have all the pdata yet, then bail.
1984 	 * We'll continue process when more data arrives.
1985 	 */
1986 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) {
1987 
1988 		CTR2(KTR_IW_CXGBE, "%s:pmra %p", __func__, ep);
1989 		return 0;
1990 	}
1991 
1992 	if (mpa->flags & MPA_REJECT) {
1993 
1994 		CTR2(KTR_IW_CXGBE, "%s:pmrb %p", __func__, ep);
1995 		err = -ECONNREFUSED;
1996 		goto err_stop_timer;
1997 	}
1998 
1999 	/*
2000 	 * If we get here we have accumulated the entire mpa
2001 	 * start reply message including private data. And
2002 	 * the MPA header is valid.
2003 	 */
2004 	ep->com.state = FPDU_MODE;
2005 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
2006 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
2007 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
2008 	ep->mpa_attr.version = mpa->revision;
2009 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2010 
2011 	if (mpa->revision == 2) {
2012 
2013 		CTR2(KTR_IW_CXGBE, "%s:pmrc %p", __func__, ep);
2014 		ep->mpa_attr.enhanced_rdma_conn =
2015 			mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
2016 
2017 		if (ep->mpa_attr.enhanced_rdma_conn) {
2018 
2019 			CTR2(KTR_IW_CXGBE, "%s:pmrd %p", __func__, ep);
2020 			mpa_v2_params = (struct mpa_v2_conn_params *)
2021 				(ep->mpa_pkt + sizeof(*mpa));
2022 			resp_ird = ntohs(mpa_v2_params->ird) &
2023 				MPA_V2_IRD_ORD_MASK;
2024 			resp_ord = ntohs(mpa_v2_params->ord) &
2025 				MPA_V2_IRD_ORD_MASK;
2026 
2027 			/*
2028 			 * This is a double-check. Ideally, below checks are
2029 			 * not required since ird/ord stuff has been taken
2030 			 * care of in c4iw_accept_cr
2031 			 */
2032 			if (ep->ird < resp_ord) {
2033 				if (RELAXED_IRD_NEGOTIATION && resp_ord <=
2034 				   ep->com.dev->rdev.adap->params.max_ordird_qp)
2035 					ep->ird = resp_ord;
2036 				else
2037 					insuff_ird = 1;
2038 			} else if (ep->ird > resp_ord) {
2039 				ep->ird = resp_ord;
2040 			}
2041 			if (ep->ord > resp_ird) {
2042 				if (RELAXED_IRD_NEGOTIATION)
2043 					ep->ord = resp_ird;
2044 				else
2045 					insuff_ird = 1;
2046 			}
2047 			if (insuff_ird) {
2048 				err = -ENOMEM;
2049 				ep->ird = resp_ord;
2050 				ep->ord = resp_ird;
2051 			}
2052 
2053 			if (ntohs(mpa_v2_params->ird) &
2054 				MPA_V2_PEER2PEER_MODEL) {
2055 
2056 				CTR2(KTR_IW_CXGBE, "%s:pmrf %p", __func__, ep);
2057 				if (ntohs(mpa_v2_params->ord) &
2058 					MPA_V2_RDMA_WRITE_RTR) {
2059 
2060 					CTR2(KTR_IW_CXGBE, "%s:pmrg %p", __func__, ep);
2061 					ep->mpa_attr.p2p_type =
2062 						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
2063 				}
2064 				else if (ntohs(mpa_v2_params->ord) &
2065 					MPA_V2_RDMA_READ_RTR) {
2066 
2067 					CTR2(KTR_IW_CXGBE, "%s:pmrh %p", __func__, ep);
2068 					ep->mpa_attr.p2p_type =
2069 						FW_RI_INIT_P2PTYPE_READ_REQ;
2070 				}
2071 			}
2072 		}
2073 	} else {
2074 
2075 		CTR2(KTR_IW_CXGBE, "%s:pmri %p", __func__, ep);
2076 
2077 		if (mpa->revision == 1) {
2078 
2079 			CTR2(KTR_IW_CXGBE, "%s:pmrj %p", __func__, ep);
2080 
2081 			if (peer2peer) {
2082 
2083 				CTR2(KTR_IW_CXGBE, "%s:pmrk %p", __func__, ep);
2084 				ep->mpa_attr.p2p_type = p2p_type;
2085 			}
2086 		}
2087 	}
2088 
2089 	if (set_tcpinfo(ep)) {
2090 
2091 		CTR2(KTR_IW_CXGBE, "%s:pmrl %p", __func__, ep);
2092 		printf("%s set_tcpinfo error\n", __func__);
2093 		err = -ECONNRESET;
2094 		goto err;
2095 	}
2096 
2097 	CTR6(KTR_IW_CXGBE, "%s - crc_enabled = %d, recv_marker_enabled = %d, "
2098 	    "xmit_marker_enabled = %d, version = %d p2p_type = %d", __func__,
2099 	    ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
2100 	    ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
2101 	    ep->mpa_attr.p2p_type);
2102 
2103 	/*
2104 	 * If responder's RTR does not match with that of initiator, assign
2105 	 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
2106 	 * generated when moving QP to RTS state.
2107 	 * A TERM message will be sent after QP has moved to RTS state
2108 	 */
2109 	if ((ep->mpa_attr.version == 2) && peer2peer &&
2110 		(ep->mpa_attr.p2p_type != p2p_type)) {
2111 
2112 		CTR2(KTR_IW_CXGBE, "%s:pmrm %p", __func__, ep);
2113 		ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2114 		rtr_mismatch = 1;
2115 	}
2116 
2117 
2118 	//ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
2119 	attrs.mpa_attr = ep->mpa_attr;
2120 	attrs.max_ird = ep->ird;
2121 	attrs.max_ord = ep->ord;
2122 	attrs.llp_stream_handle = ep;
2123 	attrs.next_state = C4IW_QP_STATE_RTS;
2124 
2125 	mask = C4IW_QP_ATTR_NEXT_STATE |
2126 		C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
2127 		C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
2128 
2129 	/* bind QP and TID with INIT_WR */
2130 	err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2131 
2132 	if (err) {
2133 
2134 		CTR2(KTR_IW_CXGBE, "%s:pmrn %p", __func__, ep);
2135 		goto err;
2136 	}
2137 
2138 	/*
2139 	 * If responder's RTR requirement did not match with what initiator
2140 	 * supports, generate TERM message
2141 	 */
2142 	if (rtr_mismatch) {
2143 
2144 		CTR2(KTR_IW_CXGBE, "%s:pmro %p", __func__, ep);
2145 		printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
2146 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
2147 		attrs.ecode = MPA_NOMATCH_RTR;
2148 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
2149 		attrs.send_term = 1;
2150 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2151 			C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2152 		err = -ENOMEM;
2153 		disconnect = 1;
2154 		goto out;
2155 	}
2156 
2157 	/*
2158 	 * Generate TERM if initiator IRD is not sufficient for responder
2159 	 * provided ORD. Currently, we do the same behaviour even when
2160 	 * responder provided IRD is also not sufficient as regards to
2161 	 * initiator ORD.
2162 	 */
2163 	if (insuff_ird) {
2164 
2165 		CTR2(KTR_IW_CXGBE, "%s:pmrp %p", __func__, ep);
2166 		printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
2167 				__func__);
2168 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
2169 		attrs.ecode = MPA_INSUFF_IRD;
2170 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
2171 		attrs.send_term = 1;
2172 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2173 			C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2174 		err = -ENOMEM;
2175 		disconnect = 1;
2176 		goto out;
2177 	}
2178 	goto out;
2179 err_stop_timer:
2180 	STOP_EP_TIMER(ep);
2181 err:
2182 	disconnect = 2;
2183 out:
2184 	connect_reply_upcall(ep, err);
2185 	CTR2(KTR_IW_CXGBE, "%s:pmrE %p", __func__, ep);
2186 	return disconnect;
2187 }
2188 
2189 /*
2190  * process_mpa_request - process streaming mode MPA request
2191  *
2192  * Returns:
2193  *
2194  * 0 upon success indicating a connect request was delivered to the ULP
2195  * or the mpa request is incomplete but valid so far.
2196  *
2197  * 1 if a failure requires the caller to close the connection.
2198  *
2199  * 2 if a failure requires the caller to abort the connection.
2200  */
2201 static int
2202 process_mpa_request(struct c4iw_ep *ep)
2203 {
2204 	struct mpa_message *mpa;
2205 	struct mpa_v2_conn_params *mpa_v2_params;
2206 	u16 plen;
2207 	int flags = MSG_DONTWAIT;
2208 	int rc;
2209 	struct iovec iov;
2210 	struct uio uio;
2211 	enum c4iw_ep_state state = ep->com.state;
2212 
2213 	CTR3(KTR_IW_CXGBE, "%s: ep %p, state %s", __func__, ep, states[state]);
2214 
2215 	if (state != MPA_REQ_WAIT)
2216 		return 0;
2217 
2218 	iov.iov_base = &ep->mpa_pkt[ep->mpa_pkt_len];
2219 	iov.iov_len = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
2220 	uio.uio_iov = &iov;
2221 	uio.uio_iovcnt = 1;
2222 	uio.uio_offset = 0;
2223 	uio.uio_resid = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
2224 	uio.uio_segflg = UIO_SYSSPACE;
2225 	uio.uio_rw = UIO_READ;
2226 	uio.uio_td = NULL; /* uio.uio_td = ep->com.thread; */
2227 
2228 	rc = soreceive(ep->com.so, NULL, &uio, NULL, NULL, &flags);
2229 	if (rc == EAGAIN)
2230 		return 0;
2231 	else if (rc)
2232 		goto err_stop_timer;
2233 
2234 	KASSERT(uio.uio_offset > 0, ("%s: sorecieve on so %p read no data",
2235 	    __func__, ep->com.so));
2236 	ep->mpa_pkt_len += uio.uio_offset;
2237 
2238 	/*
2239 	 * If we get more than the supported amount of private data then we must
2240 	 * fail this connection.  XXX: check so_rcv->sb_cc, or peek with another
2241 	 * soreceive, or increase the size of mpa_pkt by 1 and abort if the last
2242 	 * byte is filled by the soreceive above.
2243 	 */
2244 
2245 	/* Don't even have the MPA message.  Wait for more data to arrive. */
2246 	if (ep->mpa_pkt_len < sizeof(*mpa))
2247 		return 0;
2248 	mpa = (struct mpa_message *) ep->mpa_pkt;
2249 
2250 	/*
2251 	 * Validate MPA Header.
2252 	 */
2253 	if (mpa->revision > mpa_rev) {
2254 		log(LOG_ERR, "%s: MPA version mismatch. Local = %d,"
2255 		    " Received = %d\n", __func__, mpa_rev, mpa->revision);
2256 		goto err_stop_timer;
2257 	}
2258 
2259 	if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
2260 		goto err_stop_timer;
2261 
2262 	/*
2263 	 * Fail if there's too much private data.
2264 	 */
2265 	plen = ntohs(mpa->private_data_size);
2266 	if (plen > MPA_MAX_PRIVATE_DATA)
2267 		goto err_stop_timer;
2268 
2269 	/*
2270 	 * If plen does not account for pkt size
2271 	 */
2272 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
2273 		goto err_stop_timer;
2274 
2275 	ep->plen = (u8) plen;
2276 
2277 	/*
2278 	 * If we don't have all the pdata yet, then bail.
2279 	 */
2280 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
2281 		return 0;
2282 
2283 	/*
2284 	 * If we get here we have accumulated the entire mpa
2285 	 * start reply message including private data.
2286 	 */
2287 	ep->mpa_attr.initiator = 0;
2288 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
2289 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
2290 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
2291 	ep->mpa_attr.version = mpa->revision;
2292 	if (mpa->revision == 1)
2293 		ep->tried_with_mpa_v1 = 1;
2294 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2295 
2296 	if (mpa->revision == 2) {
2297 		ep->mpa_attr.enhanced_rdma_conn =
2298 		    mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
2299 		if (ep->mpa_attr.enhanced_rdma_conn) {
2300 			mpa_v2_params = (struct mpa_v2_conn_params *)
2301 				(ep->mpa_pkt + sizeof(*mpa));
2302 			ep->ird = ntohs(mpa_v2_params->ird) &
2303 				MPA_V2_IRD_ORD_MASK;
2304 			ep->ird = min_t(u32, ep->ird,
2305 					cur_max_read_depth(ep->com.dev));
2306 			ep->ord = ntohs(mpa_v2_params->ord) &
2307 				MPA_V2_IRD_ORD_MASK;
2308 			ep->ord = min_t(u32, ep->ord,
2309 					cur_max_read_depth(ep->com.dev));
2310 			CTR3(KTR_IW_CXGBE, "%s initiator ird %u ord %u",
2311 				 __func__, ep->ird, ep->ord);
2312 			if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
2313 				if (peer2peer) {
2314 					if (ntohs(mpa_v2_params->ord) &
2315 							MPA_V2_RDMA_WRITE_RTR)
2316 						ep->mpa_attr.p2p_type =
2317 						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
2318 					else if (ntohs(mpa_v2_params->ord) &
2319 							MPA_V2_RDMA_READ_RTR)
2320 						ep->mpa_attr.p2p_type =
2321 						FW_RI_INIT_P2PTYPE_READ_REQ;
2322 				}
2323 		}
2324 	} else if (mpa->revision == 1 && peer2peer)
2325 		ep->mpa_attr.p2p_type = p2p_type;
2326 
2327 	if (set_tcpinfo(ep))
2328 		goto err_stop_timer;
2329 
2330 	CTR5(KTR_IW_CXGBE, "%s: crc_enabled = %d, recv_marker_enabled = %d, "
2331 	    "xmit_marker_enabled = %d, version = %d", __func__,
2332 	    ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
2333 	    ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
2334 
2335 	ep->com.state = MPA_REQ_RCVD;
2336 	STOP_EP_TIMER(ep);
2337 
2338 	/* drive upcall */
2339 	if (ep->parent_ep->com.state != DEAD)
2340 		if (connect_request_upcall(ep))
2341 			goto err_out;
2342 	return 0;
2343 
2344 err_stop_timer:
2345 	STOP_EP_TIMER(ep);
2346 err_out:
2347 	return 2;
2348 }
2349 
2350 /*
2351  * Upcall from the adapter indicating data has been transmitted.
2352  * For us its just the single MPA request or reply.  We can now free
2353  * the skb holding the mpa message.
2354  */
2355 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2356 {
2357 	int err;
2358 	struct c4iw_ep *ep = to_ep(cm_id);
2359 	int abort = 0;
2360 
2361 	mutex_lock(&ep->com.mutex);
2362 	CTR2(KTR_IW_CXGBE, "%s:crcB %p", __func__, ep);
2363 
2364 	if ((ep->com.state == DEAD) ||
2365 			(ep->com.state != MPA_REQ_RCVD)) {
2366 
2367 		CTR2(KTR_IW_CXGBE, "%s:crc1 %p", __func__, ep);
2368 		mutex_unlock(&ep->com.mutex);
2369 		c4iw_put_ep(&ep->com);
2370 		return -ECONNRESET;
2371 	}
2372 	set_bit(ULP_REJECT, &ep->com.history);
2373 
2374 	if (mpa_rev == 0) {
2375 
2376 		CTR2(KTR_IW_CXGBE, "%s:crc2 %p", __func__, ep);
2377 		abort = 1;
2378 	}
2379 	else {
2380 
2381 		CTR2(KTR_IW_CXGBE, "%s:crc3 %p", __func__, ep);
2382 		abort = send_mpa_reject(ep, pdata, pdata_len);
2383 	}
2384 	STOP_EP_TIMER(ep);
2385 	err = c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
2386 	mutex_unlock(&ep->com.mutex);
2387 	c4iw_put_ep(&ep->com);
2388 	CTR3(KTR_IW_CXGBE, "%s:crc4 %p, err: %d", __func__, ep, err);
2389 	return 0;
2390 }
2391 
2392 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2393 {
2394 	int err;
2395 	struct c4iw_qp_attributes attrs = {0};
2396 	enum c4iw_qp_attr_mask mask;
2397 	struct c4iw_ep *ep = to_ep(cm_id);
2398 	struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
2399 	struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
2400 	int abort = 0;
2401 
2402 	mutex_lock(&ep->com.mutex);
2403 	CTR2(KTR_IW_CXGBE, "%s:cacB %p", __func__, ep);
2404 
2405 	if ((ep->com.state == DEAD) ||
2406 			(ep->com.state != MPA_REQ_RCVD)) {
2407 
2408 		CTR2(KTR_IW_CXGBE, "%s:cac1 %p", __func__, ep);
2409 		err = -ECONNRESET;
2410 		goto err_out;
2411 	}
2412 
2413 	BUG_ON(!qp);
2414 
2415 	set_bit(ULP_ACCEPT, &ep->com.history);
2416 
2417 	if ((conn_param->ord > c4iw_max_read_depth) ||
2418 		(conn_param->ird > c4iw_max_read_depth)) {
2419 
2420 		CTR2(KTR_IW_CXGBE, "%s:cac2 %p", __func__, ep);
2421 		err = -EINVAL;
2422 		goto err_abort;
2423 	}
2424 
2425 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
2426 
2427 		CTR2(KTR_IW_CXGBE, "%s:cac3 %p", __func__, ep);
2428 
2429 		if (conn_param->ord > ep->ird) {
2430 			if (RELAXED_IRD_NEGOTIATION) {
2431 				conn_param->ord = ep->ird;
2432 			} else {
2433 				ep->ird = conn_param->ird;
2434 				ep->ord = conn_param->ord;
2435 				send_mpa_reject(ep, conn_param->private_data,
2436 						conn_param->private_data_len);
2437 				err = -ENOMEM;
2438 				goto err_abort;
2439 			}
2440 		}
2441 		if (conn_param->ird < ep->ord) {
2442 			if (RELAXED_IRD_NEGOTIATION &&
2443 			    ep->ord <= h->rdev.adap->params.max_ordird_qp) {
2444 				conn_param->ird = ep->ord;
2445 			} else {
2446 				err = -ENOMEM;
2447 				goto err_abort;
2448 			}
2449 		}
2450 	}
2451 	ep->ird = conn_param->ird;
2452 	ep->ord = conn_param->ord;
2453 
2454 	if (ep->mpa_attr.version == 1) {
2455 		if (peer2peer && ep->ird == 0)
2456 			ep->ird = 1;
2457 	} else {
2458 		if (peer2peer &&
2459 		    (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
2460 		    (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0)
2461 			ep->ird = 1;
2462 	}
2463 
2464 	CTR4(KTR_IW_CXGBE, "%s %d ird %d ord %d", __func__, __LINE__,
2465 			ep->ird, ep->ord);
2466 
2467 	ep->com.cm_id = cm_id;
2468 	ref_cm_id(&ep->com);
2469 	ep->com.qp = qp;
2470 	ref_qp(ep);
2471 	//ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
2472 
2473 	/* bind QP to EP and move to RTS */
2474 	attrs.mpa_attr = ep->mpa_attr;
2475 	attrs.max_ird = ep->ird;
2476 	attrs.max_ord = ep->ord;
2477 	attrs.llp_stream_handle = ep;
2478 	attrs.next_state = C4IW_QP_STATE_RTS;
2479 
2480 	/* bind QP and TID with INIT_WR */
2481 	mask = C4IW_QP_ATTR_NEXT_STATE |
2482 		C4IW_QP_ATTR_LLP_STREAM_HANDLE |
2483 		C4IW_QP_ATTR_MPA_ATTR |
2484 		C4IW_QP_ATTR_MAX_IRD |
2485 		C4IW_QP_ATTR_MAX_ORD;
2486 
2487 	err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2488 	if (err) {
2489 		CTR3(KTR_IW_CXGBE, "%s:caca %p, err: %d", __func__, ep, err);
2490 		goto err_defef_cm_id;
2491 	}
2492 
2493 	err = send_mpa_reply(ep, conn_param->private_data,
2494 			conn_param->private_data_len);
2495 	if (err) {
2496 		CTR3(KTR_IW_CXGBE, "%s:cacb %p, err: %d", __func__, ep, err);
2497 		goto err_defef_cm_id;
2498 	}
2499 
2500 	ep->com.state = FPDU_MODE;
2501 	established_upcall(ep);
2502 	mutex_unlock(&ep->com.mutex);
2503 	c4iw_put_ep(&ep->com);
2504 	CTR2(KTR_IW_CXGBE, "%s:cacE %p", __func__, ep);
2505 	return 0;
2506 err_defef_cm_id:
2507 	deref_cm_id(&ep->com);
2508 err_abort:
2509 	abort = 1;
2510 err_out:
2511 	if (abort)
2512 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
2513 	mutex_unlock(&ep->com.mutex);
2514 	c4iw_put_ep(&ep->com);
2515 	CTR2(KTR_IW_CXGBE, "%s:cacE err %p", __func__, ep);
2516 	return err;
2517 }
2518 
2519 static int
2520 c4iw_sock_create(struct sockaddr_storage *laddr, struct socket **so)
2521 {
2522 	int ret;
2523 	int size, on;
2524 	struct socket *sock = NULL;
2525 	struct sockopt sopt;
2526 
2527 	ret = sock_create_kern(laddr->ss_family,
2528 			SOCK_STREAM, IPPROTO_TCP, &sock);
2529 	if (ret) {
2530 		CTR2(KTR_IW_CXGBE, "%s:Failed to create TCP socket. err %d",
2531 				__func__, ret);
2532 		return ret;
2533 	}
2534 
2535 	if (reuseaddr) {
2536 		bzero(&sopt, sizeof(struct sockopt));
2537 		sopt.sopt_dir = SOPT_SET;
2538 		sopt.sopt_level = SOL_SOCKET;
2539 		sopt.sopt_name = SO_REUSEADDR;
2540 		on = 1;
2541 		sopt.sopt_val = &on;
2542 		sopt.sopt_valsize = sizeof(on);
2543 		ret = -sosetopt(sock, &sopt);
2544 		if (ret != 0) {
2545 			log(LOG_ERR, "%s: sosetopt(%p, SO_REUSEADDR) "
2546 				"failed with %d.\n", __func__, sock, ret);
2547 		}
2548 		bzero(&sopt, sizeof(struct sockopt));
2549 		sopt.sopt_dir = SOPT_SET;
2550 		sopt.sopt_level = SOL_SOCKET;
2551 		sopt.sopt_name = SO_REUSEPORT;
2552 		on = 1;
2553 		sopt.sopt_val = &on;
2554 		sopt.sopt_valsize = sizeof(on);
2555 		ret = -sosetopt(sock, &sopt);
2556 		if (ret != 0) {
2557 			log(LOG_ERR, "%s: sosetopt(%p, SO_REUSEPORT) "
2558 				"failed with %d.\n", __func__, sock, ret);
2559 		}
2560 	}
2561 
2562 	ret = -sobind(sock, (struct sockaddr *)laddr, curthread);
2563 	if (ret) {
2564 		CTR2(KTR_IW_CXGBE, "%s:Failed to bind socket. err %p",
2565 				__func__, ret);
2566 		sock_release(sock);
2567 		return ret;
2568 	}
2569 
2570 	size = laddr->ss_family == AF_INET6 ?
2571 		sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in);
2572 	ret = sock_getname(sock, (struct sockaddr *)laddr, &size, 0);
2573 	if (ret) {
2574 		CTR2(KTR_IW_CXGBE, "%s:sock_getname failed. err %p",
2575 				__func__, ret);
2576 		sock_release(sock);
2577 		return ret;
2578 	}
2579 
2580 	*so = sock;
2581 	return 0;
2582 }
2583 
2584 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2585 {
2586 	int err = 0;
2587 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2588 	struct c4iw_ep *ep = NULL;
2589 	struct ifnet    *nh_ifp;        /* Logical egress interface */
2590 	struct epoch_tracker et;
2591 #ifdef VIMAGE
2592 	struct rdma_cm_id *rdma_id = (struct rdma_cm_id*)cm_id->context;
2593 	struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
2594 #endif
2595 
2596 	CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id);
2597 
2598 
2599 	if ((conn_param->ord > c4iw_max_read_depth) ||
2600 		(conn_param->ird > c4iw_max_read_depth)) {
2601 
2602 		CTR2(KTR_IW_CXGBE, "%s:cc1 %p", __func__, cm_id);
2603 		err = -EINVAL;
2604 		goto out;
2605 	}
2606 	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2607 	cm_id->provider_data = ep;
2608 
2609 	init_timer(&ep->timer);
2610 	ep->plen = conn_param->private_data_len;
2611 
2612 	if (ep->plen) {
2613 
2614 		CTR2(KTR_IW_CXGBE, "%s:cc3 %p", __func__, ep);
2615 		memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
2616 				conn_param->private_data, ep->plen);
2617 	}
2618 	ep->ird = conn_param->ird;
2619 	ep->ord = conn_param->ord;
2620 
2621 	if (peer2peer && ep->ord == 0) {
2622 
2623 		CTR2(KTR_IW_CXGBE, "%s:cc4 %p", __func__, ep);
2624 		ep->ord = 1;
2625 	}
2626 
2627 	ep->com.dev = dev;
2628 	ep->com.cm_id = cm_id;
2629 	ref_cm_id(&ep->com);
2630 	ep->com.qp = get_qhp(dev, conn_param->qpn);
2631 
2632 	if (!ep->com.qp) {
2633 
2634 		CTR2(KTR_IW_CXGBE, "%s:cc5 %p", __func__, ep);
2635 		err = -EINVAL;
2636 		goto fail;
2637 	}
2638 	ref_qp(ep);
2639 	ep->com.thread = curthread;
2640 
2641 	NET_EPOCH_ENTER(et);
2642 	CURVNET_SET(vnet);
2643 	err = get_ifnet_from_raddr(&cm_id->remote_addr, &nh_ifp);
2644 	CURVNET_RESTORE();
2645 	NET_EPOCH_EXIT(et);
2646 
2647 	if (err) {
2648 
2649 		CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep);
2650 		printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
2651 		err = EHOSTUNREACH;
2652 		return err;
2653 	}
2654 
2655 	if (!(nh_ifp->if_capenable & IFCAP_TOE) ||
2656 	    TOEDEV(nh_ifp) == NULL) {
2657 		err = -ENOPROTOOPT;
2658 		goto fail;
2659 	}
2660 	ep->com.state = CONNECTING;
2661 	ep->tos = 0;
2662 	ep->com.local_addr = cm_id->local_addr;
2663 	ep->com.remote_addr = cm_id->remote_addr;
2664 
2665 	err = c4iw_sock_create(&cm_id->local_addr, &ep->com.so);
2666 	if (err)
2667 		goto fail;
2668 
2669 	setiwsockopt(ep->com.so);
2670 	init_iwarp_socket(ep->com.so, &ep->com);
2671 	err = -soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr,
2672 		ep->com.thread);
2673 	if (err)
2674 		goto fail_free_so;
2675 	CTR2(KTR_IW_CXGBE, "%s:ccE, ep %p", __func__, ep);
2676 	return 0;
2677 
2678 fail_free_so:
2679 	uninit_iwarp_socket(ep->com.so);
2680 	ep->com.state = DEAD;
2681 	sock_release(ep->com.so);
2682 fail:
2683 	deref_cm_id(&ep->com);
2684 	c4iw_put_ep(&ep->com);
2685 	ep = NULL;
2686 out:
2687 	CTR2(KTR_IW_CXGBE, "%s:ccE Error %d", __func__, err);
2688 	return err;
2689 }
2690 
2691 /*
2692  * iwcm->create_listen.  Returns -errno on failure.
2693  */
2694 int
2695 c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2696 {
2697 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2698 	struct c4iw_listen_ep *lep = NULL;
2699 	struct listen_port_info *port_info = NULL;
2700 	int rc = 0;
2701 
2702 	CTR3(KTR_IW_CXGBE, "%s: cm_id %p, backlog %s", __func__, cm_id,
2703 			backlog);
2704 	if (c4iw_fatal_error(&dev->rdev)) {
2705 		CTR2(KTR_IW_CXGBE, "%s: cm_id %p, fatal error", __func__,
2706 			       cm_id);
2707 		return -EIO;
2708 	}
2709 	lep = alloc_ep(sizeof(*lep), GFP_KERNEL);
2710 	lep->com.cm_id = cm_id;
2711 	ref_cm_id(&lep->com);
2712 	lep->com.dev = dev;
2713 	lep->backlog = backlog;
2714 	lep->com.local_addr = cm_id->local_addr;
2715 	lep->com.thread = curthread;
2716 	cm_id->provider_data = lep;
2717 	lep->com.state = LISTEN;
2718 
2719 	/* In case of INDADDR_ANY, ibcore creates cmid for each device and
2720 	 * invokes iw_cxgbe listener callbacks assuming that iw_cxgbe creates
2721 	 * HW listeners for each device seperately. But toecore expects single
2722 	 * solisten() call with INADDR_ANY address to create HW listeners on
2723 	 * all devices for a given port number. So iw_cxgbe driver calls
2724 	 * solisten() only once for INADDR_ANY(usually done at first time
2725 	 * listener callback from ibcore). And all the subsequent INADDR_ANY
2726 	 * listener callbacks from ibcore(for the same port address) do not
2727 	 * invoke solisten() as first listener callback has already created
2728 	 * listeners for all other devices(via solisten).
2729 	 */
2730 	if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr)) {
2731 		port_info = add_ep_to_listenlist(lep);
2732 		/* skip solisten() if refcnt > 1, as the listeners were
2733 		 * alredy created by 'Master lep'
2734 		 */
2735 		if (port_info->refcnt > 1) {
2736 			/* As there will be only one listener socket for a TCP
2737 			 * port, copy Master lep's socket pointer to other lep's
2738 			 * that are belonging to same TCP port.
2739 			 */
2740 			struct c4iw_listen_ep *head_lep =
2741 					container_of(port_info->lep_list.next,
2742 					struct c4iw_listen_ep, listen_ep_list);
2743 			lep->com.so =  head_lep->com.so;
2744 			goto out;
2745 		}
2746 	}
2747 	rc = c4iw_sock_create(&cm_id->local_addr, &lep->com.so);
2748 	if (rc) {
2749 		CTR2(KTR_IW_CXGBE, "%s:Failed to create socket. err %d",
2750 				__func__, rc);
2751 		goto fail;
2752 	}
2753 
2754 	rc = -solisten(lep->com.so, backlog, curthread);
2755 	if (rc) {
2756 		CTR3(KTR_IW_CXGBE, "%s:Failed to listen on sock:%p. err %d",
2757 				__func__, lep->com.so, rc);
2758 		goto fail_free_so;
2759 	}
2760 	init_iwarp_socket(lep->com.so, &lep->com);
2761 out:
2762 	return 0;
2763 
2764 fail_free_so:
2765 	sock_release(lep->com.so);
2766 fail:
2767 	if (port_info)
2768 		rem_ep_from_listenlist(lep);
2769 	deref_cm_id(&lep->com);
2770 	c4iw_put_ep(&lep->com);
2771 	return rc;
2772 }
2773 
2774 int
2775 c4iw_destroy_listen(struct iw_cm_id *cm_id)
2776 {
2777 	struct c4iw_listen_ep *lep = to_listen_ep(cm_id);
2778 
2779 	mutex_lock(&lep->com.mutex);
2780 	CTR3(KTR_IW_CXGBE, "%s: cm_id %p, state %s", __func__, cm_id,
2781 	    states[lep->com.state]);
2782 
2783 	lep->com.state = DEAD;
2784 	if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr)) {
2785 		/* if no refcount then close listen socket */
2786 		if (!rem_ep_from_listenlist(lep))
2787 			close_socket(lep->com.so);
2788 	} else
2789 		close_socket(lep->com.so);
2790 	deref_cm_id(&lep->com);
2791 	mutex_unlock(&lep->com.mutex);
2792 	c4iw_put_ep(&lep->com);
2793 	return 0;
2794 }
2795 
2796 int __c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2797 {
2798 	int ret;
2799 	mutex_lock(&ep->com.mutex);
2800 	ret = c4iw_ep_disconnect(ep, abrupt, gfp);
2801 	mutex_unlock(&ep->com.mutex);
2802 	return ret;
2803 }
2804 
2805 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2806 {
2807 	int ret = 0;
2808 	int close = 0;
2809 	struct c4iw_rdev *rdev;
2810 
2811 
2812 	CTR2(KTR_IW_CXGBE, "%s:cedB %p", __func__, ep);
2813 
2814 	rdev = &ep->com.dev->rdev;
2815 
2816 	if (c4iw_fatal_error(rdev)) {
2817 		CTR3(KTR_IW_CXGBE, "%s:ced1 fatal error %p %s", __func__, ep,
2818 					states[ep->com.state]);
2819 		if (ep->com.state != DEAD) {
2820 			send_abort(ep);
2821 			ep->com.state = DEAD;
2822 		}
2823 		close_complete_upcall(ep, -ECONNRESET);
2824 		return ECONNRESET;
2825 	}
2826 	CTR3(KTR_IW_CXGBE, "%s:ced2 %p %s", __func__, ep,
2827 	    states[ep->com.state]);
2828 
2829 	/*
2830 	 * Ref the ep here in case we have fatal errors causing the
2831 	 * ep to be released and freed.
2832 	 */
2833 	c4iw_get_ep(&ep->com);
2834 	switch (ep->com.state) {
2835 
2836 		case MPA_REQ_WAIT:
2837 		case MPA_REQ_SENT:
2838 		case MPA_REQ_RCVD:
2839 		case MPA_REP_SENT:
2840 		case FPDU_MODE:
2841 			close = 1;
2842 			if (abrupt)
2843 				ep->com.state = ABORTING;
2844 			else {
2845 				ep->com.state = CLOSING;
2846 				START_EP_TIMER(ep);
2847 			}
2848 			set_bit(CLOSE_SENT, &ep->com.flags);
2849 			break;
2850 
2851 		case CLOSING:
2852 
2853 			if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2854 
2855 				close = 1;
2856 				if (abrupt) {
2857 					STOP_EP_TIMER(ep);
2858 					ep->com.state = ABORTING;
2859 				} else
2860 					ep->com.state = MORIBUND;
2861 			}
2862 			break;
2863 
2864 		case MORIBUND:
2865 		case ABORTING:
2866 		case DEAD:
2867 			CTR3(KTR_IW_CXGBE,
2868 			    "%s ignoring disconnect ep %p state %u", __func__,
2869 			    ep, ep->com.state);
2870 			break;
2871 
2872 		default:
2873 			BUG();
2874 			break;
2875 	}
2876 
2877 
2878 	if (close) {
2879 
2880 		CTR2(KTR_IW_CXGBE, "%s:ced3 %p", __func__, ep);
2881 
2882 		if (abrupt) {
2883 
2884 			CTR2(KTR_IW_CXGBE, "%s:ced4 %p", __func__, ep);
2885 			set_bit(EP_DISC_ABORT, &ep->com.history);
2886 			close_complete_upcall(ep, -ECONNRESET);
2887 			send_abort(ep);
2888 		} else {
2889 
2890 			CTR2(KTR_IW_CXGBE, "%s:ced5 %p", __func__, ep);
2891 			set_bit(EP_DISC_CLOSE, &ep->com.history);
2892 
2893 			if (!ep->parent_ep)
2894 				ep->com.state = MORIBUND;
2895 
2896 			CURVNET_SET(ep->com.so->so_vnet);
2897 			ret = sodisconnect(ep->com.so);
2898 			CURVNET_RESTORE();
2899 			if (ret) {
2900 				CTR2(KTR_IW_CXGBE, "%s:ced6 %p", __func__, ep);
2901 				STOP_EP_TIMER(ep);
2902 				send_abort(ep);
2903 				ep->com.state = DEAD;
2904 				close_complete_upcall(ep, -ECONNRESET);
2905 				set_bit(EP_DISC_FAIL, &ep->com.history);
2906 				if (ep->com.qp) {
2907 					struct c4iw_qp_attributes attrs = {0};
2908 
2909 					attrs.next_state = C4IW_QP_STATE_ERROR;
2910 					ret = c4iw_modify_qp(
2911 							ep->com.dev, ep->com.qp,
2912 							C4IW_QP_ATTR_NEXT_STATE,
2913 							&attrs, 1);
2914 					CTR3(KTR_IW_CXGBE, "%s:ced7 %p ret %d",
2915 						__func__, ep, ret);
2916 				}
2917 			}
2918 		}
2919 	}
2920 	c4iw_put_ep(&ep->com);
2921 	CTR2(KTR_IW_CXGBE, "%s:cedE %p", __func__, ep);
2922 	return ret;
2923 }
2924 
2925 #ifdef C4IW_EP_REDIRECT
2926 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2927 		struct l2t_entry *l2t)
2928 {
2929 	struct c4iw_ep *ep = ctx;
2930 
2931 	if (ep->dst != old)
2932 		return 0;
2933 
2934 	PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2935 			l2t);
2936 	dst_hold(new);
2937 	cxgb4_l2t_release(ep->l2t);
2938 	ep->l2t = l2t;
2939 	dst_release(old);
2940 	ep->dst = new;
2941 	return 1;
2942 }
2943 #endif
2944 
2945 
2946 
2947 static void ep_timeout(unsigned long arg)
2948 {
2949 	struct c4iw_ep *ep = (struct c4iw_ep *)arg;
2950 
2951 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
2952 
2953 		/*
2954 		 * Only insert if it is not already on the list.
2955 		 */
2956 		if (!(ep->com.ep_events & C4IW_EVENT_TIMEOUT)) {
2957 			CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep);
2958 			add_ep_to_req_list(ep, C4IW_EVENT_TIMEOUT);
2959 		}
2960 	}
2961 }
2962 
2963 static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl)
2964 {
2965 	uint64_t val = be64toh(*rpl);
2966 	int ret;
2967 	struct c4iw_wr_wait *wr_waitp;
2968 
2969 	ret = (int)((val >> 8) & 0xff);
2970 	wr_waitp = (struct c4iw_wr_wait *)rpl[1];
2971 	CTR3(KTR_IW_CXGBE, "%s wr_waitp %p ret %u", __func__, wr_waitp, ret);
2972 	if (wr_waitp)
2973 		c4iw_wake_up(wr_waitp, ret ? -ret : 0);
2974 
2975 	return (0);
2976 }
2977 
2978 static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl)
2979 {
2980 	struct cqe_list_entry *cle;
2981 	unsigned long flag;
2982 
2983 	cle = malloc(sizeof(*cle), M_CXGBE, M_NOWAIT);
2984 	cle->rhp = sc->iwarp_softc;
2985 	cle->err_cqe = *(const struct t4_cqe *)(&rpl[0]);
2986 
2987 	spin_lock_irqsave(&err_cqe_lock, flag);
2988 	list_add_tail(&cle->entry, &err_cqe_list);
2989 	queue_work(c4iw_taskq, &c4iw_task);
2990 	spin_unlock_irqrestore(&err_cqe_lock, flag);
2991 
2992 	return (0);
2993 }
2994 
2995 static int
2996 process_terminate(struct c4iw_ep *ep)
2997 {
2998 	struct c4iw_qp_attributes attrs = {0};
2999 
3000 	CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep);
3001 
3002 	if (ep && ep->com.qp) {
3003 
3004 		printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n",
3005 				ep->hwtid, ep->com.qp->wq.sq.qid);
3006 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
3007 		c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs,
3008 				1);
3009 	} else
3010 		printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n",
3011 								ep->hwtid);
3012 	CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep);
3013 
3014 	return 0;
3015 }
3016 
3017 int __init c4iw_cm_init(void)
3018 {
3019 
3020 	t4_register_cpl_handler(CPL_RDMA_TERMINATE, terminate);
3021 	t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, fw6_wr_rpl);
3022 	t4_register_fw_msg_handler(FW6_TYPE_CQE, fw6_cqe_handler);
3023 	t4_register_an_handler(c4iw_ev_handler);
3024 
3025 	TAILQ_INIT(&req_list);
3026 	spin_lock_init(&req_lock);
3027 	INIT_LIST_HEAD(&err_cqe_list);
3028 	spin_lock_init(&err_cqe_lock);
3029 
3030 	INIT_WORK(&c4iw_task, process_req);
3031 
3032 	c4iw_taskq = create_singlethread_workqueue("iw_cxgbe");
3033 	if (!c4iw_taskq)
3034 		return -ENOMEM;
3035 
3036 	return 0;
3037 }
3038 
3039 void __exit c4iw_cm_term(void)
3040 {
3041 	WARN_ON(!TAILQ_EMPTY(&req_list));
3042 	WARN_ON(!list_empty(&err_cqe_list));
3043 	flush_workqueue(c4iw_taskq);
3044 	destroy_workqueue(c4iw_taskq);
3045 
3046 	t4_register_cpl_handler(CPL_RDMA_TERMINATE, NULL);
3047 	t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, NULL);
3048 	t4_register_fw_msg_handler(FW6_TYPE_CQE, NULL);
3049 	t4_register_an_handler(NULL);
3050 }
3051 #endif
3052