1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34 #include <sys/cdefs.h>
35 #include "opt_inet.h"
36
37 #ifdef TCP_OFFLOAD
38 #include <sys/types.h>
39 #include <sys/malloc.h>
40 #include <sys/socket.h>
41 #include <sys/socketvar.h>
42 #include <sys/sockio.h>
43 #include <sys/taskqueue.h>
44 #include <netinet/in.h>
45 #include <net/route.h>
46 #include <net/route/nhop.h>
47
48 #include <netinet/in_systm.h>
49 #include <netinet/in_pcb.h>
50 #include <netinet6/in6_pcb.h>
51 #include <netinet/ip.h>
52 #include <netinet/in_fib.h>
53 #include <netinet6/in6_fib.h>
54 #include <netinet6/scope6_var.h>
55 #include <netinet/ip_var.h>
56 #include <netinet/tcp_var.h>
57 #include <netinet/tcp.h>
58 #include <netinet/tcpip.h>
59
60 #include <netinet/toecore.h>
61
62 struct sge_iq;
63 struct rss_header;
64 struct cpl_set_tcb_rpl;
65 #include <linux/types.h>
66 #include "offload.h"
67 #include "tom/t4_tom.h"
68
69 #define TOEPCB(so) ((struct toepcb *)(sototcpcb((so))->t_toe))
70
71 #include "iw_cxgbe.h"
72 #include <linux/module.h>
73 #include <linux/workqueue.h>
74 #include <linux/if_vlan.h>
75 #include <net/netevent.h>
76 #include <rdma/rdma_cm.h>
77
78 static spinlock_t req_lock;
79 static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list;
80 static struct work_struct c4iw_task;
81 static struct workqueue_struct *c4iw_taskq;
82 static LIST_HEAD(err_cqe_list);
83 static spinlock_t err_cqe_lock;
84 static LIST_HEAD(listen_port_list);
85 static DEFINE_MUTEX(listen_port_mutex);
86
87 static void process_req(struct work_struct *ctx);
88 static void start_ep_timer(struct c4iw_ep *ep);
89 static int stop_ep_timer(struct c4iw_ep *ep);
90 static int set_tcpinfo(struct c4iw_ep *ep);
91 static void process_timeout(struct c4iw_ep *ep);
92 static void process_err_cqes(void);
93 static void *alloc_ep(int size, gfp_t flags);
94 static void close_socket(struct socket *so);
95 static int send_mpa_req(struct c4iw_ep *ep);
96 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen);
97 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen);
98 static void close_complete_upcall(struct c4iw_ep *ep, int status);
99 static int send_abort(struct c4iw_ep *ep);
100 static void peer_close_upcall(struct c4iw_ep *ep);
101 static void peer_abort_upcall(struct c4iw_ep *ep);
102 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
103 static int connect_request_upcall(struct c4iw_ep *ep);
104 static void established_upcall(struct c4iw_ep *ep);
105 static int process_mpa_reply(struct c4iw_ep *ep);
106 static int process_mpa_request(struct c4iw_ep *ep);
107 static void process_peer_close(struct c4iw_ep *ep);
108 static void process_conn_error(struct c4iw_ep *ep);
109 static void process_close_complete(struct c4iw_ep *ep);
110 static void ep_timeout(unsigned long arg);
111 static void setiwsockopt(struct socket *so);
112 static void init_iwarp_socket(struct socket *so, void *arg);
113 static void uninit_iwarp_socket(struct socket *so);
114 static void process_data(struct c4iw_ep *ep);
115 static void process_connected(struct c4iw_ep *ep);
116 static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag);
117 static void process_socket_event(struct c4iw_ep *ep);
118 static void release_ep_resources(struct c4iw_ep *ep);
119 static int process_terminate(struct c4iw_ep *ep);
120 static int terminate(struct sge_iq *iq, const struct rss_header *rss,
121 struct mbuf *m);
122 static int add_ep_to_req_list(struct c4iw_ep *ep, int ep_events);
123 static struct listen_port_info *
124 add_ep_to_listenlist(struct c4iw_listen_ep *lep);
125 static int rem_ep_from_listenlist(struct c4iw_listen_ep *lep);
126 static struct c4iw_listen_ep *
127 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so);
128 static int get_ifnet_from_raddr(struct sockaddr_storage *raddr,
129 if_t *ifp);
130 static void process_newconn(struct c4iw_listen_ep *master_lep,
131 struct socket *new_so);
132 #define START_EP_TIMER(ep) \
133 do { \
134 CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \
135 __func__, __LINE__, (ep)); \
136 start_ep_timer(ep); \
137 } while (0)
138
139 #define STOP_EP_TIMER(ep) \
140 ({ \
141 CTR3(KTR_IW_CXGBE, "stop_ep_timer (%s:%d) ep %p", \
142 __func__, __LINE__, (ep)); \
143 stop_ep_timer(ep); \
144 })
145
146 #define GET_LOCAL_ADDR(pladdr, so) \
147 do { \
148 struct inpcb *__inp = sotoinpcb(so); \
149 KASSERT(__inp != NULL, \
150 ("GET_LOCAL_ADDR(%s):so:%p, inp = NULL", __func__, so)); \
151 if (__inp->inp_vflag & INP_IPV4) \
152 in_getsockaddr(so, (struct sockaddr *)pladdr); \
153 else \
154 in6_getsockaddr(so, (struct sockaddr *)pladdr); \
155 } while (0)
156
157 #define GET_REMOTE_ADDR(praddr, so) \
158 do { \
159 struct inpcb *__inp = sotoinpcb(so); \
160 KASSERT(__inp != NULL, \
161 ("GET_REMOTE_ADDR(%s):so:%p, inp = NULL", __func__, so)); \
162 if (__inp->inp_vflag & INP_IPV4) \
163 in_getpeeraddr(so, (struct sockaddr *)praddr); \
164 else \
165 in6_getpeeraddr(so, (struct sockaddr *)praddr); \
166 } while (0)
167
168 static char *states[] = {
169 "idle",
170 "listen",
171 "connecting",
172 "mpa_wait_req",
173 "mpa_req_sent",
174 "mpa_req_rcvd",
175 "mpa_rep_sent",
176 "fpdu_mode",
177 "aborting",
178 "closing",
179 "moribund",
180 "dead",
181 NULL,
182 };
183
deref_cm_id(struct c4iw_ep_common * epc)184 static void deref_cm_id(struct c4iw_ep_common *epc)
185 {
186 epc->cm_id->rem_ref(epc->cm_id);
187 epc->cm_id = NULL;
188 set_bit(CM_ID_DEREFED, &epc->history);
189 }
190
ref_cm_id(struct c4iw_ep_common * epc)191 static void ref_cm_id(struct c4iw_ep_common *epc)
192 {
193 set_bit(CM_ID_REFED, &epc->history);
194 epc->cm_id->add_ref(epc->cm_id);
195 }
196
deref_qp(struct c4iw_ep * ep)197 static void deref_qp(struct c4iw_ep *ep)
198 {
199 c4iw_qp_rem_ref(&ep->com.qp->ibqp);
200 clear_bit(QP_REFERENCED, &ep->com.flags);
201 set_bit(QP_DEREFED, &ep->com.history);
202 }
203
ref_qp(struct c4iw_ep * ep)204 static void ref_qp(struct c4iw_ep *ep)
205 {
206 set_bit(QP_REFERENCED, &ep->com.flags);
207 set_bit(QP_REFED, &ep->com.history);
208 c4iw_qp_add_ref(&ep->com.qp->ibqp);
209 }
210 /* allocated per TCP port while listening */
211 struct listen_port_info {
212 uint16_t port_num; /* TCP port address */
213 struct list_head list; /* belongs to listen_port_list */
214 struct list_head lep_list; /* per port lep list */
215 uint32_t refcnt; /* number of lep's listening */
216 };
217
218 /*
219 * Following two lists are used to manage INADDR_ANY listeners:
220 * 1)listen_port_list
221 * 2)lep_list
222 *
223 * Below is the INADDR_ANY listener lists overview on a system with a two port
224 * adapter:
225 * |------------------|
226 * |listen_port_list |
227 * |------------------|
228 * |
229 * | |-----------| |-----------|
230 * | | port_num:X| | port_num:X|
231 * |--------------|-list------|-------|-list------|-------....
232 * | lep_list----| | lep_list----|
233 * | refcnt | | | refcnt | |
234 * | | | | | |
235 * | | | | | |
236 * |-----------| | |-----------| |
237 * | |
238 * | |
239 * | |
240 * | | lep1 lep2
241 * | | |----------------| |----------------|
242 * | |----| listen_ep_list |----| listen_ep_list |
243 * | |----------------| |----------------|
244 * |
245 * |
246 * | lep1 lep2
247 * | |----------------| |----------------|
248 * |---| listen_ep_list |----| listen_ep_list |
249 * |----------------| |----------------|
250 *
251 * Because of two port adapter, the number of lep's are two(lep1 & lep2) for
252 * each TCP port number.
253 *
254 * Here 'lep1' is always marked as Master lep, because solisten() is always
255 * called through first lep.
256 *
257 */
258 static struct listen_port_info *
add_ep_to_listenlist(struct c4iw_listen_ep * lep)259 add_ep_to_listenlist(struct c4iw_listen_ep *lep)
260 {
261 uint16_t port;
262 struct listen_port_info *port_info = NULL;
263 struct sockaddr_storage *laddr = &lep->com.local_addr;
264
265 port = (laddr->ss_family == AF_INET) ?
266 ((struct sockaddr_in *)laddr)->sin_port :
267 ((struct sockaddr_in6 *)laddr)->sin6_port;
268
269 mutex_lock(&listen_port_mutex);
270
271 list_for_each_entry(port_info, &listen_port_list, list)
272 if (port_info->port_num == port)
273 goto found_port;
274
275 port_info = malloc(sizeof(*port_info), M_CXGBE, M_WAITOK);
276 port_info->port_num = port;
277 port_info->refcnt = 0;
278
279 list_add_tail(&port_info->list, &listen_port_list);
280 INIT_LIST_HEAD(&port_info->lep_list);
281
282 found_port:
283 port_info->refcnt++;
284 list_add_tail(&lep->listen_ep_list, &port_info->lep_list);
285 mutex_unlock(&listen_port_mutex);
286 return port_info;
287 }
288
289 static int
rem_ep_from_listenlist(struct c4iw_listen_ep * lep)290 rem_ep_from_listenlist(struct c4iw_listen_ep *lep)
291 {
292 uint16_t port;
293 struct listen_port_info *port_info = NULL;
294 struct sockaddr_storage *laddr = &lep->com.local_addr;
295 int refcnt = 0;
296
297 port = (laddr->ss_family == AF_INET) ?
298 ((struct sockaddr_in *)laddr)->sin_port :
299 ((struct sockaddr_in6 *)laddr)->sin6_port;
300
301 mutex_lock(&listen_port_mutex);
302
303 /* get the port_info structure based on the lep's port address */
304 list_for_each_entry(port_info, &listen_port_list, list) {
305 if (port_info->port_num == port) {
306 port_info->refcnt--;
307 refcnt = port_info->refcnt;
308 /* remove the current lep from the listen list */
309 list_del(&lep->listen_ep_list);
310 if (port_info->refcnt == 0) {
311 /* Remove this entry from the list as there
312 * are no more listeners for this port_num.
313 */
314 list_del(&port_info->list);
315 kfree(port_info);
316 }
317 break;
318 }
319 }
320 mutex_unlock(&listen_port_mutex);
321 return refcnt;
322 }
323
324 /*
325 * Find the lep that belongs to the ifnet on which the SYN frame was received.
326 */
327 struct c4iw_listen_ep *
find_real_listen_ep(struct c4iw_listen_ep * master_lep,struct socket * so)328 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so)
329 {
330 struct adapter *adap = NULL;
331 struct c4iw_listen_ep *lep = NULL;
332 if_t ifp = NULL, hw_ifp = NULL;
333 struct listen_port_info *port_info = NULL;
334 int i = 0, found_portinfo = 0, found_lep = 0;
335 uint16_t port;
336
337 /*
338 * STEP 1: Figure out 'ifp' of the physical interface, not pseudo
339 * interfaces like vlan, lagg, etc..
340 * TBD: lagg support, lagg + vlan support.
341 */
342 ifp = TOEPCB(so)->l2te->ifp;
343 if (if_gettype(ifp) == IFT_L2VLAN) {
344 hw_ifp = VLAN_TRUNKDEV(ifp);
345 if (hw_ifp == NULL) {
346 CTR4(KTR_IW_CXGBE, "%s: Failed to get parent ifnet of "
347 "vlan ifnet %p, sock %p, master_lep %p",
348 __func__, ifp, so, master_lep);
349 return (NULL);
350 }
351 } else
352 hw_ifp = ifp;
353
354 /* STEP 2: Find 'port_info' with listener local port address. */
355 port = (master_lep->com.local_addr.ss_family == AF_INET) ?
356 ((struct sockaddr_in *)&master_lep->com.local_addr)->sin_port :
357 ((struct sockaddr_in6 *)&master_lep->com.local_addr)->sin6_port;
358
359
360 mutex_lock(&listen_port_mutex);
361 list_for_each_entry(port_info, &listen_port_list, list)
362 if (port_info->port_num == port) {
363 found_portinfo =1;
364 break;
365 }
366 if (!found_portinfo)
367 goto out;
368
369 /* STEP 3: Traverse through list of lep's that are bound to the current
370 * TCP port address and find the lep that belongs to the ifnet on which
371 * the SYN frame was received.
372 */
373 list_for_each_entry(lep, &port_info->lep_list, listen_ep_list) {
374 adap = lep->com.dev->rdev.adap;
375 for_each_port(adap, i) {
376 if (hw_ifp == adap->port[i]->vi[0].ifp) {
377 found_lep =1;
378 goto out;
379 }
380 }
381 }
382 out:
383 mutex_unlock(&listen_port_mutex);
384 return found_lep ? lep : (NULL);
385 }
386
process_timeout(struct c4iw_ep * ep)387 static void process_timeout(struct c4iw_ep *ep)
388 {
389 struct c4iw_qp_attributes attrs = {0};
390 int abort = 1;
391
392 CTR4(KTR_IW_CXGBE, "%s ep :%p, tid:%u, state %d", __func__,
393 ep, ep->hwtid, ep->com.state);
394 set_bit(TIMEDOUT, &ep->com.history);
395 switch (ep->com.state) {
396 case MPA_REQ_SENT:
397 connect_reply_upcall(ep, -ETIMEDOUT);
398 break;
399 case MPA_REQ_WAIT:
400 case MPA_REQ_RCVD:
401 case MPA_REP_SENT:
402 case FPDU_MODE:
403 break;
404 case CLOSING:
405 case MORIBUND:
406 if (ep->com.cm_id && ep->com.qp) {
407 attrs.next_state = C4IW_QP_STATE_ERROR;
408 c4iw_modify_qp(ep->com.dev, ep->com.qp,
409 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
410 }
411 close_complete_upcall(ep, -ETIMEDOUT);
412 break;
413 case ABORTING:
414 case DEAD:
415 /*
416 * These states are expected if the ep timed out at the same
417 * time as another thread was calling stop_ep_timer().
418 * So we silently do nothing for these states.
419 */
420 abort = 0;
421 break;
422 default:
423 CTR4(KTR_IW_CXGBE, "%s unexpected state ep %p tid %u state %u"
424 , __func__, ep, ep->hwtid, ep->com.state);
425 abort = 0;
426 }
427 if (abort)
428 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
429 c4iw_put_ep(&ep->com);
430 return;
431 }
432
433 struct cqe_list_entry {
434 struct list_head entry;
435 struct c4iw_dev *rhp;
436 struct t4_cqe err_cqe;
437 };
438
439 static void
process_err_cqes(void)440 process_err_cqes(void)
441 {
442 unsigned long flag;
443 struct cqe_list_entry *cle;
444
445 spin_lock_irqsave(&err_cqe_lock, flag);
446 while (!list_empty(&err_cqe_list)) {
447 struct list_head *tmp;
448 tmp = err_cqe_list.next;
449 list_del(tmp);
450 tmp->next = tmp->prev = NULL;
451 spin_unlock_irqrestore(&err_cqe_lock, flag);
452 cle = list_entry(tmp, struct cqe_list_entry, entry);
453 c4iw_ev_dispatch(cle->rhp, &cle->err_cqe);
454 free(cle, M_CXGBE);
455 spin_lock_irqsave(&err_cqe_lock, flag);
456 }
457 spin_unlock_irqrestore(&err_cqe_lock, flag);
458
459 return;
460 }
461
462 static void
process_req(struct work_struct * ctx)463 process_req(struct work_struct *ctx)
464 {
465 struct c4iw_ep_common *epc;
466 unsigned long flag;
467 int ep_events;
468
469 process_err_cqes();
470 spin_lock_irqsave(&req_lock, flag);
471 while (!TAILQ_EMPTY(&req_list)) {
472 epc = TAILQ_FIRST(&req_list);
473 TAILQ_REMOVE(&req_list, epc, entry);
474 epc->entry.tqe_prev = NULL;
475 ep_events = epc->ep_events;
476 epc->ep_events = 0;
477 spin_unlock_irqrestore(&req_lock, flag);
478 mutex_lock(&epc->mutex);
479 CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, ep_state %s events 0x%x",
480 __func__, epc->so, epc, states[epc->state], ep_events);
481 if (ep_events & C4IW_EVENT_TERM)
482 process_terminate((struct c4iw_ep *)epc);
483 if (ep_events & C4IW_EVENT_TIMEOUT)
484 process_timeout((struct c4iw_ep *)epc);
485 if (ep_events & C4IW_EVENT_SOCKET)
486 process_socket_event((struct c4iw_ep *)epc);
487 mutex_unlock(&epc->mutex);
488 c4iw_put_ep(epc);
489 process_err_cqes();
490 spin_lock_irqsave(&req_lock, flag);
491 }
492 spin_unlock_irqrestore(&req_lock, flag);
493 }
494
495 /*
496 * XXX: doesn't belong here in the iWARP driver.
497 * XXX: assumes that the connection was offloaded by cxgbe/t4_tom if TF_TOE is
498 * set. Is this a valid assumption for active open?
499 */
500 static int
set_tcpinfo(struct c4iw_ep * ep)501 set_tcpinfo(struct c4iw_ep *ep)
502 {
503 struct socket *so = ep->com.so;
504 struct inpcb *inp = sotoinpcb(so);
505 struct tcpcb *tp;
506 struct toepcb *toep;
507 int rc = 0;
508
509 INP_WLOCK(inp);
510 tp = intotcpcb(inp);
511 if ((tp->t_flags & TF_TOE) == 0) {
512 rc = EINVAL;
513 log(LOG_ERR, "%s: connection not offloaded (so %p, ep %p)\n",
514 __func__, so, ep);
515 goto done;
516 }
517 toep = TOEPCB(so);
518
519 ep->hwtid = toep->tid;
520 ep->snd_seq = tp->snd_nxt;
521 ep->rcv_seq = tp->rcv_nxt;
522 done:
523 INP_WUNLOCK(inp);
524 return (rc);
525
526 }
527 static int
get_ifnet_from_raddr(struct sockaddr_storage * raddr,if_t * ifp)528 get_ifnet_from_raddr(struct sockaddr_storage *raddr, if_t *ifp)
529 {
530 int err = 0;
531 struct nhop_object *nh;
532
533 if (raddr->ss_family == AF_INET) {
534 struct sockaddr_in *raddr4 = (struct sockaddr_in *)raddr;
535
536 nh = fib4_lookup(RT_DEFAULT_FIB, raddr4->sin_addr, 0,
537 NHR_NONE, 0);
538 } else {
539 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)raddr;
540 struct in6_addr addr6;
541 uint32_t scopeid;
542
543 memset(&addr6, 0, sizeof(addr6));
544 in6_splitscope((struct in6_addr *)&raddr6->sin6_addr,
545 &addr6, &scopeid);
546 nh = fib6_lookup(RT_DEFAULT_FIB, &addr6, scopeid,
547 NHR_NONE, 0);
548 }
549
550 if (nh == NULL)
551 err = EHOSTUNREACH;
552 else
553 *ifp = nh->nh_ifp;
554 CTR2(KTR_IW_CXGBE, "%s: return: %d", __func__, err);
555 return err;
556 }
557
558 static void
close_socket(struct socket * so)559 close_socket(struct socket *so)
560 {
561 uninit_iwarp_socket(so);
562 soclose(so);
563 }
564
565 static void
process_peer_close(struct c4iw_ep * ep)566 process_peer_close(struct c4iw_ep *ep)
567 {
568 struct c4iw_qp_attributes attrs = {0};
569 int disconnect = 1;
570 int release = 0;
571
572 CTR4(KTR_IW_CXGBE, "%s:ppcB ep %p so %p state %s", __func__, ep,
573 ep->com.so, states[ep->com.state]);
574
575 switch (ep->com.state) {
576
577 case MPA_REQ_WAIT:
578 CTR2(KTR_IW_CXGBE, "%s:ppc1 %p MPA_REQ_WAIT DEAD",
579 __func__, ep);
580 /* Fallthrough */
581 case MPA_REQ_SENT:
582 CTR2(KTR_IW_CXGBE, "%s:ppc2 %p MPA_REQ_SENT DEAD",
583 __func__, ep);
584 ep->com.state = DEAD;
585 connect_reply_upcall(ep, -ECONNABORTED);
586
587 disconnect = 0;
588 STOP_EP_TIMER(ep);
589 close_socket(ep->com.so);
590 deref_cm_id(&ep->com);
591 release = 1;
592 break;
593
594 case MPA_REQ_RCVD:
595
596 /*
597 * We're gonna mark this puppy DEAD, but keep
598 * the reference on it until the ULP accepts or
599 * rejects the CR.
600 */
601 CTR2(KTR_IW_CXGBE, "%s:ppc3 %p MPA_REQ_RCVD CLOSING",
602 __func__, ep);
603 ep->com.state = CLOSING;
604 break;
605
606 case MPA_REP_SENT:
607 CTR2(KTR_IW_CXGBE, "%s:ppc4 %p MPA_REP_SENT CLOSING",
608 __func__, ep);
609 ep->com.state = CLOSING;
610 break;
611
612 case FPDU_MODE:
613 CTR2(KTR_IW_CXGBE, "%s:ppc5 %p FPDU_MODE CLOSING",
614 __func__, ep);
615 START_EP_TIMER(ep);
616 ep->com.state = CLOSING;
617 attrs.next_state = C4IW_QP_STATE_CLOSING;
618 c4iw_modify_qp(ep->com.dev, ep->com.qp,
619 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
620 peer_close_upcall(ep);
621 break;
622
623 case ABORTING:
624 CTR2(KTR_IW_CXGBE, "%s:ppc6 %p ABORTING (disconn)",
625 __func__, ep);
626 disconnect = 0;
627 break;
628
629 case CLOSING:
630 CTR2(KTR_IW_CXGBE, "%s:ppc7 %p CLOSING MORIBUND",
631 __func__, ep);
632 ep->com.state = MORIBUND;
633 disconnect = 0;
634 break;
635
636 case MORIBUND:
637 CTR2(KTR_IW_CXGBE, "%s:ppc8 %p MORIBUND DEAD", __func__,
638 ep);
639 STOP_EP_TIMER(ep);
640 if (ep->com.cm_id && ep->com.qp) {
641 attrs.next_state = C4IW_QP_STATE_IDLE;
642 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
643 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
644 }
645 close_socket(ep->com.so);
646 close_complete_upcall(ep, 0);
647 ep->com.state = DEAD;
648 release = 1;
649 disconnect = 0;
650 break;
651
652 case DEAD:
653 CTR2(KTR_IW_CXGBE, "%s:ppc9 %p DEAD (disconn)",
654 __func__, ep);
655 disconnect = 0;
656 break;
657
658 default:
659 panic("%s: ep %p state %d", __func__, ep,
660 ep->com.state);
661 break;
662 }
663
664
665 if (disconnect) {
666
667 CTR2(KTR_IW_CXGBE, "%s:ppca %p", __func__, ep);
668 c4iw_ep_disconnect(ep, 0, M_NOWAIT);
669 }
670 if (release) {
671
672 CTR2(KTR_IW_CXGBE, "%s:ppcb %p", __func__, ep);
673 c4iw_put_ep(&ep->com);
674 }
675 CTR2(KTR_IW_CXGBE, "%s:ppcE %p", __func__, ep);
676 return;
677 }
678
679 static void
process_conn_error(struct c4iw_ep * ep)680 process_conn_error(struct c4iw_ep *ep)
681 {
682 struct c4iw_qp_attributes attrs = {0};
683 int ret;
684 int state;
685
686 state = ep->com.state;
687 CTR5(KTR_IW_CXGBE, "%s:pceB ep %p so %p so->so_error %u state %s",
688 __func__, ep, ep->com.so, ep->com.so->so_error,
689 states[ep->com.state]);
690
691 switch (state) {
692
693 case MPA_REQ_WAIT:
694 STOP_EP_TIMER(ep);
695 c4iw_put_ep(&ep->parent_ep->com);
696 break;
697
698 case MPA_REQ_SENT:
699 STOP_EP_TIMER(ep);
700 connect_reply_upcall(ep, -ECONNRESET);
701 break;
702
703 case MPA_REP_SENT:
704 ep->com.rpl_err = ECONNRESET;
705 CTR1(KTR_IW_CXGBE, "waking up ep %p", ep);
706 break;
707
708 case MPA_REQ_RCVD:
709 break;
710
711 case MORIBUND:
712 case CLOSING:
713 STOP_EP_TIMER(ep);
714 /*FALLTHROUGH*/
715 case FPDU_MODE:
716
717 if (ep->com.cm_id && ep->com.qp) {
718
719 attrs.next_state = C4IW_QP_STATE_ERROR;
720 ret = c4iw_modify_qp(ep->com.qp->rhp,
721 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
722 &attrs, 1);
723 if (ret)
724 log(LOG_ERR,
725 "%s - qp <- error failed!\n",
726 __func__);
727 }
728 peer_abort_upcall(ep);
729 break;
730
731 case ABORTING:
732 break;
733
734 case DEAD:
735 CTR2(KTR_IW_CXGBE, "%s so_error %d IN DEAD STATE!!!!",
736 __func__, ep->com.so->so_error);
737 return;
738
739 default:
740 panic("%s: ep %p state %d", __func__, ep, state);
741 break;
742 }
743
744 if (state != ABORTING) {
745 close_socket(ep->com.so);
746 ep->com.state = DEAD;
747 c4iw_put_ep(&ep->com);
748 }
749 CTR2(KTR_IW_CXGBE, "%s:pceE %p", __func__, ep);
750 return;
751 }
752
753 static void
process_close_complete(struct c4iw_ep * ep)754 process_close_complete(struct c4iw_ep *ep)
755 {
756 struct c4iw_qp_attributes attrs = {0};
757 int release = 0;
758
759 CTR4(KTR_IW_CXGBE, "%s:pccB ep %p so %p state %s", __func__, ep,
760 ep->com.so, states[ep->com.state]);
761
762 /* The cm_id may be null if we failed to connect */
763 set_bit(CLOSE_CON_RPL, &ep->com.history);
764
765 switch (ep->com.state) {
766
767 case CLOSING:
768 CTR2(KTR_IW_CXGBE, "%s:pcc1 %p CLOSING MORIBUND",
769 __func__, ep);
770 ep->com.state = MORIBUND;
771 break;
772
773 case MORIBUND:
774 CTR2(KTR_IW_CXGBE, "%s:pcc1 %p MORIBUND DEAD", __func__,
775 ep);
776 STOP_EP_TIMER(ep);
777
778 if ((ep->com.cm_id) && (ep->com.qp)) {
779
780 CTR2(KTR_IW_CXGBE, "%s:pcc2 %p QP_STATE_IDLE",
781 __func__, ep);
782 attrs.next_state = C4IW_QP_STATE_IDLE;
783 c4iw_modify_qp(ep->com.dev,
784 ep->com.qp,
785 C4IW_QP_ATTR_NEXT_STATE,
786 &attrs, 1);
787 }
788
789 close_socket(ep->com.so);
790 close_complete_upcall(ep, 0);
791 ep->com.state = DEAD;
792 release = 1;
793 break;
794
795 case ABORTING:
796 CTR2(KTR_IW_CXGBE, "%s:pcc5 %p ABORTING", __func__, ep);
797 break;
798
799 case DEAD:
800 CTR2(KTR_IW_CXGBE, "%s:pcc6 %p DEAD", __func__, ep);
801 break;
802 default:
803 CTR2(KTR_IW_CXGBE, "%s:pcc7 %p unknown ep state",
804 __func__, ep);
805 panic("%s:pcc6 %p unknown ep state", __func__, ep);
806 break;
807 }
808
809 if (release) {
810
811 CTR2(KTR_IW_CXGBE, "%s:pcc8 %p", __func__, ep);
812 release_ep_resources(ep);
813 }
814 CTR2(KTR_IW_CXGBE, "%s:pccE %p", __func__, ep);
815 return;
816 }
817
818 static void
setiwsockopt(struct socket * so)819 setiwsockopt(struct socket *so)
820 {
821 int rc;
822 struct sockopt sopt;
823 int on = 1;
824
825 sopt.sopt_dir = SOPT_SET;
826 sopt.sopt_level = IPPROTO_TCP;
827 sopt.sopt_name = TCP_NODELAY;
828 sopt.sopt_val = (caddr_t)&on;
829 sopt.sopt_valsize = sizeof on;
830 sopt.sopt_td = NULL;
831 rc = -sosetopt(so, &sopt);
832 if (rc) {
833 log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n",
834 __func__, so, rc);
835 }
836 }
837
838 static void
init_iwarp_socket(struct socket * so,void * arg)839 init_iwarp_socket(struct socket *so, void *arg)
840 {
841 if (SOLISTENING(so)) {
842 SOLISTEN_LOCK(so);
843 solisten_upcall_set(so, c4iw_so_upcall, arg);
844 so->so_state |= SS_NBIO;
845 SOLISTEN_UNLOCK(so);
846 } else {
847 SOCKBUF_LOCK(&so->so_rcv);
848 soupcall_set(so, SO_RCV, c4iw_so_upcall, arg);
849 so->so_state |= SS_NBIO;
850 SOCKBUF_UNLOCK(&so->so_rcv);
851 }
852 }
853
854 static void
uninit_iwarp_socket(struct socket * so)855 uninit_iwarp_socket(struct socket *so)
856 {
857 if (SOLISTENING(so)) {
858 SOLISTEN_LOCK(so);
859 solisten_upcall_set(so, NULL, NULL);
860 SOLISTEN_UNLOCK(so);
861 } else {
862 SOCKBUF_LOCK(&so->so_rcv);
863 soupcall_clear(so, SO_RCV);
864 SOCKBUF_UNLOCK(&so->so_rcv);
865 }
866 }
867
868 static void
process_data(struct c4iw_ep * ep)869 process_data(struct c4iw_ep *ep)
870 {
871 int ret = 0;
872 int disconnect = 0;
873 struct c4iw_qp_attributes attrs = {0};
874
875 CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sbused %d", __func__,
876 ep->com.so, ep, states[ep->com.state], sbused(&ep->com.so->so_rcv));
877
878 switch (ep->com.state) {
879 case MPA_REQ_SENT:
880 disconnect = process_mpa_reply(ep);
881 break;
882 case MPA_REQ_WAIT:
883 disconnect = process_mpa_request(ep);
884 if (disconnect)
885 /* Refered in process_newconn() */
886 c4iw_put_ep(&ep->parent_ep->com);
887 break;
888 case FPDU_MODE:
889 MPASS(ep->com.qp != NULL);
890 attrs.next_state = C4IW_QP_STATE_TERMINATE;
891 ret = c4iw_modify_qp(ep->com.dev, ep->com.qp,
892 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
893 if (ret != -EINPROGRESS)
894 disconnect = 1;
895 break;
896 default:
897 log(LOG_ERR, "%s: Unexpected streaming data. ep %p, "
898 "state %d, so %p, so_state 0x%x, sbused %u\n",
899 __func__, ep, ep->com.state, ep->com.so,
900 ep->com.so->so_state, sbused(&ep->com.so->so_rcv));
901 break;
902 }
903 if (disconnect)
904 c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
905
906 }
907
908 static void
process_connected(struct c4iw_ep * ep)909 process_connected(struct c4iw_ep *ep)
910 {
911 struct socket *so = ep->com.so;
912
913 if ((so->so_state & SS_ISCONNECTED) && !so->so_error) {
914 if (send_mpa_req(ep))
915 goto err;
916 } else {
917 connect_reply_upcall(ep, -so->so_error);
918 goto err;
919 }
920 return;
921 err:
922 close_socket(so);
923 ep->com.state = DEAD;
924 c4iw_put_ep(&ep->com);
925 return;
926 }
927
c4iw_zero_addr(struct sockaddr * addr)928 static inline bool c4iw_zero_addr(struct sockaddr *addr)
929 {
930 struct in6_addr *ip6;
931
932 if (addr->sa_family == AF_INET)
933 return (((struct sockaddr_in *)addr)->sin_addr.s_addr == 0);
934 else {
935 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
936 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
937 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
938 }
939 }
940
941 #define _IN_LOOPBACK(i) (((in_addr_t)(i) & 0xff000000) == 0x7f000000)
c4iw_loopback_addr(struct sockaddr * addr,struct vnet * vnet)942 static inline bool c4iw_loopback_addr(struct sockaddr *addr, struct vnet *vnet)
943 {
944 bool ret;
945
946 if (addr->sa_family == AF_INET) {
947 if (vnet == NULL)
948 ret = _IN_LOOPBACK(ntohl(((struct sockaddr_in *) addr)->sin_addr.s_addr));
949 else {
950 CURVNET_SET_QUIET(vnet);
951 ret = IN_LOOPBACK(ntohl(((struct sockaddr_in *) addr)->sin_addr.s_addr));
952 CURVNET_RESTORE();
953 }
954 } else {
955 ret = IN6_IS_ADDR_LOOPBACK(&((struct sockaddr_in6 *) addr)->sin6_addr);
956 }
957 return (ret);
958 }
959 #undef _IN_LOOPBACK
960
c4iw_any_addr(struct sockaddr * addr,struct vnet * vnet)961 static inline bool c4iw_any_addr(struct sockaddr *addr, struct vnet *vnet)
962 {
963 return c4iw_zero_addr(addr) || c4iw_loopback_addr(addr, vnet);
964 }
965
966 static void
process_newconn(struct c4iw_listen_ep * master_lep,struct socket * new_so)967 process_newconn(struct c4iw_listen_ep *master_lep, struct socket *new_so)
968 {
969 struct c4iw_listen_ep *real_lep = NULL;
970 struct c4iw_ep *new_ep = NULL;
971 struct sockaddr_storage remote = { .ss_len = sizeof(remote) };
972 int ret = 0;
973
974 MPASS(new_so != NULL);
975
976 if (c4iw_any_addr((struct sockaddr *)&master_lep->com.local_addr,
977 new_so->so_vnet)) {
978 /* Here we need to find the 'real_lep' that belongs to the
979 * incomming socket's network interface, such that the newly
980 * created 'ep' can be attached to the real 'lep'.
981 */
982 real_lep = find_real_listen_ep(master_lep, new_so);
983 if (real_lep == NULL) {
984 CTR2(KTR_IW_CXGBE, "%s: Could not find the real listen "
985 "ep for sock: %p", __func__, new_so);
986 log(LOG_ERR,"%s: Could not find the real listen ep for "
987 "sock: %p\n", __func__, new_so);
988 /* FIXME: properly free the 'new_so' in failure case.
989 * Use of soabort() and soclose() are not legal
990 * here(before soaccept()).
991 */
992 return;
993 }
994 } else /* for Non-Wildcard address, master_lep is always the real_lep */
995 real_lep = master_lep;
996
997 new_ep = alloc_ep(sizeof(*new_ep), GFP_KERNEL);
998
999 CTR6(KTR_IW_CXGBE, "%s: master_lep %p, real_lep: %p, new ep %p, "
1000 "listening so %p, new so %p", __func__, master_lep, real_lep,
1001 new_ep, master_lep->com.so, new_so);
1002
1003 new_ep->com.dev = real_lep->com.dev;
1004 new_ep->com.so = new_so;
1005 new_ep->com.cm_id = NULL;
1006 new_ep->com.thread = real_lep->com.thread;
1007 new_ep->parent_ep = real_lep;
1008
1009 GET_LOCAL_ADDR(&new_ep->com.local_addr, new_so);
1010 GET_REMOTE_ADDR(&new_ep->com.remote_addr, new_so);
1011 c4iw_get_ep(&real_lep->com);
1012 init_timer(&new_ep->timer);
1013 new_ep->com.state = MPA_REQ_WAIT;
1014
1015 setiwsockopt(new_so);
1016 ret = soaccept(new_so, (struct sockaddr *)&remote);
1017 if (ret != 0) {
1018 CTR4(KTR_IW_CXGBE,
1019 "%s:listen sock:%p, new sock:%p, ret:%d",
1020 __func__, master_lep->com.so, new_so, ret);
1021 soclose(new_so);
1022 c4iw_put_ep(&new_ep->com);
1023 c4iw_put_ep(&real_lep->com);
1024 return;
1025 }
1026
1027 START_EP_TIMER(new_ep);
1028
1029 /* MPA request might have been queued up on the socket already, so we
1030 * initialize the socket/upcall_handler under lock to prevent processing
1031 * MPA request on another thread(via process_req()) simultaneously.
1032 */
1033 c4iw_get_ep(&new_ep->com); /* Dereferenced at the end below, this is to
1034 avoid freeing of ep before ep unlock. */
1035 mutex_lock(&new_ep->com.mutex);
1036 init_iwarp_socket(new_so, &new_ep->com);
1037
1038 ret = process_mpa_request(new_ep);
1039 if (ret) {
1040 /* ABORT */
1041 c4iw_ep_disconnect(new_ep, 1, GFP_KERNEL);
1042 c4iw_put_ep(&real_lep->com);
1043 }
1044 mutex_unlock(&new_ep->com.mutex);
1045 c4iw_put_ep(&new_ep->com);
1046 return;
1047 }
1048
1049 static int
add_ep_to_req_list(struct c4iw_ep * ep,int new_ep_event)1050 add_ep_to_req_list(struct c4iw_ep *ep, int new_ep_event)
1051 {
1052 unsigned long flag;
1053
1054 spin_lock_irqsave(&req_lock, flag);
1055 if (ep && ep->com.so) {
1056 ep->com.ep_events |= new_ep_event;
1057 if (!ep->com.entry.tqe_prev) {
1058 c4iw_get_ep(&ep->com);
1059 TAILQ_INSERT_TAIL(&req_list, &ep->com, entry);
1060 queue_work(c4iw_taskq, &c4iw_task);
1061 }
1062 }
1063 spin_unlock_irqrestore(&req_lock, flag);
1064
1065 return (0);
1066 }
1067
1068 static int
c4iw_so_upcall(struct socket * so,void * arg,int waitflag)1069 c4iw_so_upcall(struct socket *so, void *arg, int waitflag)
1070 {
1071 struct c4iw_ep *ep = arg;
1072
1073 CTR6(KTR_IW_CXGBE,
1074 "%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p",
1075 __func__, so, so->so_state, ep, states[ep->com.state],
1076 ep->com.entry.tqe_prev);
1077
1078 MPASS(ep->com.so == so);
1079 /*
1080 * Wake up any threads waiting in rdma_init()/rdma_fini(),
1081 * with locks held.
1082 */
1083 if (so->so_error || c4iw_stopped(&ep->com.dev->rdev))
1084 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1085 add_ep_to_req_list(ep, C4IW_EVENT_SOCKET);
1086
1087 return (SU_OK);
1088 }
1089
1090
1091 static int
terminate(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)1092 terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1093 {
1094 struct adapter *sc = iq->adapter;
1095 const struct cpl_rdma_terminate *cpl = mtod(m, const void *);
1096 unsigned int tid = GET_TID(cpl);
1097 struct toepcb *toep = lookup_tid(sc, tid);
1098 struct socket *so;
1099 struct c4iw_ep *ep;
1100
1101 INP_WLOCK(toep->inp);
1102 so = inp_inpcbtosocket(toep->inp);
1103 ep = so->so_rcv.sb_upcallarg;
1104 INP_WUNLOCK(toep->inp);
1105
1106 CTR3(KTR_IW_CXGBE, "%s: so %p, ep %p", __func__, so, ep);
1107 add_ep_to_req_list(ep, C4IW_EVENT_TERM);
1108
1109 return 0;
1110 }
1111
1112 static void
process_socket_event(struct c4iw_ep * ep)1113 process_socket_event(struct c4iw_ep *ep)
1114 {
1115 int state = ep->com.state;
1116 struct socket *so = ep->com.so;
1117
1118 if (ep->com.state == DEAD) {
1119 CTR3(KTR_IW_CXGBE, "%s: Pending socket event discarded "
1120 "ep %p ep_state %s", __func__, ep, states[state]);
1121 return;
1122 }
1123
1124 CTR6(KTR_IW_CXGBE, "process_socket_event: so %p, so_state 0x%x, "
1125 "so_err %d, sb_state 0x%x, ep %p, ep_state %s", so, so->so_state,
1126 so->so_error, so->so_rcv.sb_state, ep, states[state]);
1127
1128 if (state == CONNECTING) {
1129 process_connected(ep);
1130 return;
1131 }
1132
1133 if (state == LISTEN) {
1134 struct c4iw_listen_ep *lep = (struct c4iw_listen_ep *)ep;
1135 struct socket *listen_so = so, *new_so = NULL;
1136 int error = 0;
1137
1138 SOLISTEN_LOCK(listen_so);
1139 do {
1140 error = solisten_dequeue(listen_so, &new_so,
1141 SOCK_NONBLOCK);
1142 if (error) {
1143 CTR4(KTR_IW_CXGBE, "%s: lep %p listen_so %p "
1144 "error %d", __func__, lep, listen_so,
1145 error);
1146 return;
1147 }
1148 process_newconn(lep, new_so);
1149
1150 /* solisten_dequeue() unlocks while return, so aquire
1151 * lock again for sol_qlen and also for next iteration.
1152 */
1153 SOLISTEN_LOCK(listen_so);
1154 } while (listen_so->sol_qlen);
1155 SOLISTEN_UNLOCK(listen_so);
1156
1157 return;
1158 }
1159
1160 /* connection error */
1161 if (so->so_error) {
1162 process_conn_error(ep);
1163 return;
1164 }
1165
1166 /* peer close */
1167 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state <= CLOSING) {
1168 process_peer_close(ep);
1169 /*
1170 * check whether socket disconnect event is pending before
1171 * returning. Fallthrough if yes.
1172 */
1173 if (!(so->so_state & SS_ISDISCONNECTED))
1174 return;
1175 }
1176
1177 /* close complete */
1178 if (so->so_state & SS_ISDISCONNECTED) {
1179 process_close_complete(ep);
1180 return;
1181 }
1182
1183 /* rx data */
1184 if (sbused(&ep->com.so->so_rcv)) {
1185 process_data(ep);
1186 return;
1187 }
1188
1189 /* Socket events for 'MPA Request Received' and 'Close Complete'
1190 * were already processed earlier in their previous events handlers.
1191 * Hence, these socket events are skipped.
1192 * And any other socket events must have handled above.
1193 */
1194 MPASS((ep->com.state == MPA_REQ_RCVD) || (ep->com.state == MORIBUND));
1195
1196 if ((ep->com.state != MPA_REQ_RCVD) && (ep->com.state != MORIBUND))
1197 log(LOG_ERR, "%s: Unprocessed socket event so %p, "
1198 "so_state 0x%x, so_err %d, sb_state 0x%x, ep %p, ep_state %s\n",
1199 __func__, so, so->so_state, so->so_error, so->so_rcv.sb_state,
1200 ep, states[state]);
1201
1202 }
1203
1204 SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1205 "iw_cxgbe driver parameters");
1206
1207 static int dack_mode = 0;
1208 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RWTUN, &dack_mode, 0,
1209 "Delayed ack mode (default = 0)");
1210
1211 int c4iw_max_read_depth = 8;
1212 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RWTUN, &c4iw_max_read_depth, 0,
1213 "Per-connection max ORD/IRD (default = 8)");
1214
1215 static int enable_tcp_timestamps;
1216 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RWTUN, &enable_tcp_timestamps, 0,
1217 "Enable tcp timestamps (default = 0)");
1218
1219 static int enable_tcp_sack;
1220 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RWTUN, &enable_tcp_sack, 0,
1221 "Enable tcp SACK (default = 0)");
1222
1223 static int enable_tcp_window_scaling = 1;
1224 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RWTUN, &enable_tcp_window_scaling, 0,
1225 "Enable tcp window scaling (default = 1)");
1226
1227 int c4iw_debug = 0;
1228 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RWTUN, &c4iw_debug, 0,
1229 "Enable debug logging (default = 0)");
1230
1231 static int peer2peer = 1;
1232 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RWTUN, &peer2peer, 0,
1233 "Support peer2peer ULPs (default = 1)");
1234
1235 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
1236 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RWTUN, &p2p_type, 0,
1237 "RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)");
1238
1239 static int ep_timeout_secs = 60;
1240 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RWTUN, &ep_timeout_secs, 0,
1241 "CM Endpoint operation timeout in seconds (default = 60)");
1242
1243 static int mpa_rev = 1;
1244 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0,
1245 "MPA Revision, 0 supports amso1100, 1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)");
1246
1247 static int markers_enabled;
1248 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RWTUN, &markers_enabled, 0,
1249 "Enable MPA MARKERS (default(0) = disabled)");
1250
1251 static int crc_enabled = 1;
1252 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RWTUN, &crc_enabled, 0,
1253 "Enable MPA CRC (default(1) = enabled)");
1254
1255 static int rcv_win = 256 * 1024;
1256 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RWTUN, &rcv_win, 0,
1257 "TCP receive window in bytes (default = 256KB)");
1258
1259 static int snd_win = 128 * 1024;
1260 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0,
1261 "TCP send window in bytes (default = 128KB)");
1262
1263 int use_dsgl = 1;
1264 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, use_dsgl, CTLFLAG_RWTUN, &use_dsgl, 0,
1265 "Use DSGL for PBL/FastReg (default=1)");
1266
1267 int inline_threshold = 128;
1268 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, inline_threshold, CTLFLAG_RWTUN, &inline_threshold, 0,
1269 "inline vs dsgl threshold (default=128)");
1270
1271 static int reuseaddr = 0;
1272 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, reuseaddr, CTLFLAG_RWTUN, &reuseaddr, 0,
1273 "Enable SO_REUSEADDR & SO_REUSEPORT socket options on all iWARP client connections(default = 0)");
1274
1275 static void
start_ep_timer(struct c4iw_ep * ep)1276 start_ep_timer(struct c4iw_ep *ep)
1277 {
1278
1279 if (timer_pending(&ep->timer)) {
1280 CTR2(KTR_IW_CXGBE, "%s: ep %p, already started", __func__, ep);
1281 printk(KERN_ERR "%s timer already started! ep %p\n", __func__,
1282 ep);
1283 return;
1284 }
1285 clear_bit(TIMEOUT, &ep->com.flags);
1286 c4iw_get_ep(&ep->com);
1287 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
1288 ep->timer.data = (unsigned long)ep;
1289 ep->timer.function = ep_timeout;
1290 add_timer(&ep->timer);
1291 }
1292
1293 static int
stop_ep_timer(struct c4iw_ep * ep)1294 stop_ep_timer(struct c4iw_ep *ep)
1295 {
1296
1297 del_timer_sync(&ep->timer);
1298 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
1299 c4iw_put_ep(&ep->com);
1300 return 0;
1301 }
1302 return 1;
1303 }
1304
1305 static void *
alloc_ep(int size,gfp_t gfp)1306 alloc_ep(int size, gfp_t gfp)
1307 {
1308 struct c4iw_ep_common *epc;
1309
1310 epc = kzalloc(size, gfp);
1311 if (epc == NULL)
1312 return (NULL);
1313
1314 kref_init(&epc->kref);
1315 mutex_init(&epc->mutex);
1316 c4iw_init_wr_wait(&epc->wr_wait);
1317
1318 return (epc);
1319 }
1320
_c4iw_free_ep(struct kref * kref)1321 void _c4iw_free_ep(struct kref *kref)
1322 {
1323 struct c4iw_ep *ep;
1324 #if defined(KTR) || defined(INVARIANTS)
1325 struct c4iw_ep_common *epc;
1326 #endif
1327
1328 ep = container_of(kref, struct c4iw_ep, com.kref);
1329 #if defined(KTR) || defined(INVARIANTS)
1330 epc = &ep->com;
1331 #endif
1332 KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list",
1333 __func__, epc));
1334 if (test_bit(QP_REFERENCED, &ep->com.flags))
1335 deref_qp(ep);
1336 CTR4(KTR_IW_CXGBE, "%s: ep %p, history 0x%lx, flags 0x%lx",
1337 __func__, ep, epc->history, epc->flags);
1338 kfree(ep);
1339 }
1340
release_ep_resources(struct c4iw_ep * ep)1341 static void release_ep_resources(struct c4iw_ep *ep)
1342 {
1343 CTR2(KTR_IW_CXGBE, "%s:rerB %p", __func__, ep);
1344 set_bit(RELEASE_RESOURCES, &ep->com.flags);
1345 c4iw_put_ep(&ep->com);
1346 CTR2(KTR_IW_CXGBE, "%s:rerE %p", __func__, ep);
1347 }
1348
1349 static int
send_mpa_req(struct c4iw_ep * ep)1350 send_mpa_req(struct c4iw_ep *ep)
1351 {
1352 int mpalen;
1353 struct mpa_message *mpa;
1354 struct mpa_v2_conn_params mpa_v2_params;
1355 struct mbuf *m;
1356 char mpa_rev_to_use = mpa_rev;
1357 int err = 0;
1358
1359 if (ep->retry_with_mpa_v1)
1360 mpa_rev_to_use = 1;
1361 mpalen = sizeof(*mpa) + ep->plen;
1362 if (mpa_rev_to_use == 2)
1363 mpalen += sizeof(struct mpa_v2_conn_params);
1364
1365 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1366 if (mpa == NULL) {
1367 err = -ENOMEM;
1368 CTR3(KTR_IW_CXGBE, "%s:smr1 ep: %p , error: %d",
1369 __func__, ep, err);
1370 goto err;
1371 }
1372
1373 memset(mpa, 0, mpalen);
1374 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
1375 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
1376 (markers_enabled ? MPA_MARKERS : 0) |
1377 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
1378 mpa->private_data_size = htons(ep->plen);
1379 mpa->revision = mpa_rev_to_use;
1380
1381 if (mpa_rev_to_use == 1) {
1382 ep->tried_with_mpa_v1 = 1;
1383 ep->retry_with_mpa_v1 = 0;
1384 }
1385
1386 if (mpa_rev_to_use == 2) {
1387 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1388 sizeof(struct mpa_v2_conn_params));
1389 mpa_v2_params.ird = htons((u16)ep->ird);
1390 mpa_v2_params.ord = htons((u16)ep->ord);
1391
1392 if (peer2peer) {
1393 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1394
1395 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1396 mpa_v2_params.ord |=
1397 htons(MPA_V2_RDMA_WRITE_RTR);
1398 } else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1399 mpa_v2_params.ord |=
1400 htons(MPA_V2_RDMA_READ_RTR);
1401 }
1402 }
1403 memcpy(mpa->private_data, &mpa_v2_params,
1404 sizeof(struct mpa_v2_conn_params));
1405
1406 if (ep->plen) {
1407
1408 memcpy(mpa->private_data +
1409 sizeof(struct mpa_v2_conn_params),
1410 ep->mpa_pkt + sizeof(*mpa), ep->plen);
1411 }
1412 } else {
1413
1414 if (ep->plen)
1415 memcpy(mpa->private_data,
1416 ep->mpa_pkt + sizeof(*mpa), ep->plen);
1417 CTR2(KTR_IW_CXGBE, "%s:smr7 %p", __func__, ep);
1418 }
1419
1420 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1421 if (m == NULL) {
1422 err = -ENOMEM;
1423 CTR3(KTR_IW_CXGBE, "%s:smr2 ep: %p , error: %d",
1424 __func__, ep, err);
1425 free(mpa, M_CXGBE);
1426 goto err;
1427 }
1428 m_copyback(m, 0, mpalen, (void *)mpa);
1429 free(mpa, M_CXGBE);
1430
1431 err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1432 ep->com.thread);
1433 if (err) {
1434 CTR3(KTR_IW_CXGBE, "%s:smr3 ep: %p , error: %d",
1435 __func__, ep, err);
1436 goto err;
1437 }
1438
1439 START_EP_TIMER(ep);
1440 ep->com.state = MPA_REQ_SENT;
1441 ep->mpa_attr.initiator = 1;
1442 CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
1443 return 0;
1444 err:
1445 connect_reply_upcall(ep, err);
1446 CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
1447 return err;
1448 }
1449
send_mpa_reject(struct c4iw_ep * ep,const void * pdata,u8 plen)1450 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
1451 {
1452 int mpalen ;
1453 struct mpa_message *mpa;
1454 struct mpa_v2_conn_params mpa_v2_params;
1455 struct mbuf *m;
1456 int err;
1457
1458 CTR4(KTR_IW_CXGBE, "%s:smrejB %p %u %d", __func__, ep, ep->hwtid,
1459 ep->plen);
1460
1461 mpalen = sizeof(*mpa) + plen;
1462
1463 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1464
1465 mpalen += sizeof(struct mpa_v2_conn_params);
1466 CTR4(KTR_IW_CXGBE, "%s:smrej1 %p %u %d", __func__, ep,
1467 ep->mpa_attr.version, mpalen);
1468 }
1469
1470 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1471 if (mpa == NULL)
1472 return (-ENOMEM);
1473
1474 memset(mpa, 0, mpalen);
1475 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1476 mpa->flags = MPA_REJECT;
1477 mpa->revision = mpa_rev;
1478 mpa->private_data_size = htons(plen);
1479
1480 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1481
1482 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1483 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1484 sizeof(struct mpa_v2_conn_params));
1485 mpa_v2_params.ird = htons(((u16)ep->ird) |
1486 (peer2peer ? MPA_V2_PEER2PEER_MODEL :
1487 0));
1488 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
1489 (p2p_type ==
1490 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
1491 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
1492 FW_RI_INIT_P2PTYPE_READ_REQ ?
1493 MPA_V2_RDMA_READ_RTR : 0) : 0));
1494 memcpy(mpa->private_data, &mpa_v2_params,
1495 sizeof(struct mpa_v2_conn_params));
1496
1497 if (ep->plen)
1498 memcpy(mpa->private_data +
1499 sizeof(struct mpa_v2_conn_params), pdata, plen);
1500 CTR5(KTR_IW_CXGBE, "%s:smrej3 %p %d %d %d", __func__, ep,
1501 mpa_v2_params.ird, mpa_v2_params.ord, ep->plen);
1502 } else
1503 if (plen)
1504 memcpy(mpa->private_data, pdata, plen);
1505
1506 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1507 if (m == NULL) {
1508 free(mpa, M_CXGBE);
1509 return (-ENOMEM);
1510 }
1511 m_copyback(m, 0, mpalen, (void *)mpa);
1512 free(mpa, M_CXGBE);
1513
1514 err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread);
1515 if (!err)
1516 ep->snd_seq += mpalen;
1517 CTR4(KTR_IW_CXGBE, "%s:smrejE %p %u %d", __func__, ep, ep->hwtid, err);
1518 return err;
1519 }
1520
send_mpa_reply(struct c4iw_ep * ep,const void * pdata,u8 plen)1521 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
1522 {
1523 int mpalen;
1524 struct mpa_message *mpa;
1525 struct mbuf *m;
1526 struct mpa_v2_conn_params mpa_v2_params;
1527 int err;
1528
1529 CTR2(KTR_IW_CXGBE, "%s:smrepB %p", __func__, ep);
1530
1531 mpalen = sizeof(*mpa) + plen;
1532
1533 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1534
1535 CTR3(KTR_IW_CXGBE, "%s:smrep1 %p %d", __func__, ep,
1536 ep->mpa_attr.version);
1537 mpalen += sizeof(struct mpa_v2_conn_params);
1538 }
1539
1540 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1541 if (mpa == NULL)
1542 return (-ENOMEM);
1543
1544 memset(mpa, 0, sizeof(*mpa));
1545 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1546 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
1547 (markers_enabled ? MPA_MARKERS : 0);
1548 mpa->revision = ep->mpa_attr.version;
1549 mpa->private_data_size = htons(plen);
1550
1551 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1552
1553 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1554 mpa->private_data_size +=
1555 htons(sizeof(struct mpa_v2_conn_params));
1556 mpa_v2_params.ird = htons((u16)ep->ird);
1557 mpa_v2_params.ord = htons((u16)ep->ord);
1558 CTR5(KTR_IW_CXGBE, "%s:smrep3 %p %d %d %d", __func__, ep,
1559 ep->mpa_attr.version, mpa_v2_params.ird, mpa_v2_params.ord);
1560
1561 if (peer2peer && (ep->mpa_attr.p2p_type !=
1562 FW_RI_INIT_P2PTYPE_DISABLED)) {
1563
1564 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1565
1566 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1567
1568 mpa_v2_params.ord |=
1569 htons(MPA_V2_RDMA_WRITE_RTR);
1570 CTR5(KTR_IW_CXGBE, "%s:smrep4 %p %d %d %d",
1571 __func__, ep, p2p_type, mpa_v2_params.ird,
1572 mpa_v2_params.ord);
1573 }
1574 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1575
1576 mpa_v2_params.ord |=
1577 htons(MPA_V2_RDMA_READ_RTR);
1578 CTR5(KTR_IW_CXGBE, "%s:smrep5 %p %d %d %d",
1579 __func__, ep, p2p_type, mpa_v2_params.ird,
1580 mpa_v2_params.ord);
1581 }
1582 }
1583
1584 memcpy(mpa->private_data, &mpa_v2_params,
1585 sizeof(struct mpa_v2_conn_params));
1586
1587 if (ep->plen)
1588 memcpy(mpa->private_data +
1589 sizeof(struct mpa_v2_conn_params), pdata, plen);
1590 } else
1591 if (plen)
1592 memcpy(mpa->private_data, pdata, plen);
1593
1594 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1595 if (m == NULL) {
1596 free(mpa, M_CXGBE);
1597 return (-ENOMEM);
1598 }
1599 m_copyback(m, 0, mpalen, (void *)mpa);
1600 free(mpa, M_CXGBE);
1601
1602
1603 ep->com.state = MPA_REP_SENT;
1604 ep->snd_seq += mpalen;
1605 err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1606 ep->com.thread);
1607 CTR3(KTR_IW_CXGBE, "%s:smrepE %p %d", __func__, ep, err);
1608 return err;
1609 }
1610
1611
1612
close_complete_upcall(struct c4iw_ep * ep,int status)1613 static void close_complete_upcall(struct c4iw_ep *ep, int status)
1614 {
1615 struct iw_cm_event event;
1616
1617 CTR2(KTR_IW_CXGBE, "%s:ccuB %p", __func__, ep);
1618 memset(&event, 0, sizeof(event));
1619 event.event = IW_CM_EVENT_CLOSE;
1620 event.status = status;
1621
1622 if (ep->com.cm_id) {
1623
1624 CTR2(KTR_IW_CXGBE, "%s:ccu1 %1", __func__, ep);
1625 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1626 deref_cm_id(&ep->com);
1627 set_bit(CLOSE_UPCALL, &ep->com.history);
1628 }
1629 CTR2(KTR_IW_CXGBE, "%s:ccuE %p", __func__, ep);
1630 }
1631
1632 static int
send_abort(struct c4iw_ep * ep)1633 send_abort(struct c4iw_ep *ep)
1634 {
1635 struct socket *so = ep->com.so;
1636 struct sockopt sopt;
1637 int rc;
1638 struct linger l;
1639
1640 CTR5(KTR_IW_CXGBE, "%s ep %p so %p state %s tid %d", __func__, ep, so,
1641 states[ep->com.state], ep->hwtid);
1642
1643 l.l_onoff = 1;
1644 l.l_linger = 0;
1645
1646 /* linger_time of 0 forces RST to be sent */
1647 sopt.sopt_dir = SOPT_SET;
1648 sopt.sopt_level = SOL_SOCKET;
1649 sopt.sopt_name = SO_LINGER;
1650 sopt.sopt_val = (caddr_t)&l;
1651 sopt.sopt_valsize = sizeof l;
1652 sopt.sopt_td = NULL;
1653 rc = -sosetopt(so, &sopt);
1654 if (rc != 0) {
1655 log(LOG_ERR, "%s: sosetopt(%p, linger = 0) failed with %d.\n",
1656 __func__, so, rc);
1657 }
1658
1659 uninit_iwarp_socket(so);
1660 soclose(so);
1661 set_bit(ABORT_CONN, &ep->com.history);
1662
1663 /*
1664 * TBD: iw_cxgbe driver should receive ABORT reply for every ABORT
1665 * request it has sent. But the current TOE driver is not propagating
1666 * this ABORT reply event (via do_abort_rpl) to iw_cxgbe. So as a work-
1667 * around de-refererece 'ep' here instead of doing it in abort_rpl()
1668 * handler(not yet implemented) of iw_cxgbe driver.
1669 */
1670 release_ep_resources(ep);
1671 ep->com.state = DEAD;
1672
1673 return (0);
1674 }
1675
peer_close_upcall(struct c4iw_ep * ep)1676 static void peer_close_upcall(struct c4iw_ep *ep)
1677 {
1678 struct iw_cm_event event;
1679
1680 CTR2(KTR_IW_CXGBE, "%s:pcuB %p", __func__, ep);
1681 memset(&event, 0, sizeof(event));
1682 event.event = IW_CM_EVENT_DISCONNECT;
1683
1684 if (ep->com.cm_id) {
1685
1686 CTR2(KTR_IW_CXGBE, "%s:pcu1 %p", __func__, ep);
1687 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1688 set_bit(DISCONN_UPCALL, &ep->com.history);
1689 }
1690 CTR2(KTR_IW_CXGBE, "%s:pcuE %p", __func__, ep);
1691 }
1692
peer_abort_upcall(struct c4iw_ep * ep)1693 static void peer_abort_upcall(struct c4iw_ep *ep)
1694 {
1695 struct iw_cm_event event;
1696
1697 CTR2(KTR_IW_CXGBE, "%s:pauB %p", __func__, ep);
1698 memset(&event, 0, sizeof(event));
1699 event.event = IW_CM_EVENT_CLOSE;
1700 event.status = -ECONNRESET;
1701
1702 if (ep->com.cm_id) {
1703
1704 CTR2(KTR_IW_CXGBE, "%s:pau1 %p", __func__, ep);
1705 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1706 deref_cm_id(&ep->com);
1707 set_bit(ABORT_UPCALL, &ep->com.history);
1708 }
1709 CTR2(KTR_IW_CXGBE, "%s:pauE %p", __func__, ep);
1710 }
1711
connect_reply_upcall(struct c4iw_ep * ep,int status)1712 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1713 {
1714 struct iw_cm_event event;
1715
1716 CTR3(KTR_IW_CXGBE, "%s:cruB %p, status: %d", __func__, ep, status);
1717 memset(&event, 0, sizeof(event));
1718 event.event = IW_CM_EVENT_CONNECT_REPLY;
1719 event.status = ((status == -ECONNABORTED) || (status == -EPIPE)) ?
1720 -ECONNRESET : status;
1721 event.local_addr = ep->com.local_addr;
1722 event.remote_addr = ep->com.remote_addr;
1723
1724 if ((status == 0) || (status == -ECONNREFUSED)) {
1725
1726 if (!ep->tried_with_mpa_v1) {
1727
1728 CTR2(KTR_IW_CXGBE, "%s:cru1 %p", __func__, ep);
1729 /* this means MPA_v2 is used */
1730 event.ord = ep->ird;
1731 event.ird = ep->ord;
1732 event.private_data_len = ep->plen -
1733 sizeof(struct mpa_v2_conn_params);
1734 event.private_data = ep->mpa_pkt +
1735 sizeof(struct mpa_message) +
1736 sizeof(struct mpa_v2_conn_params);
1737 } else {
1738
1739 CTR2(KTR_IW_CXGBE, "%s:cru2 %p", __func__, ep);
1740 /* this means MPA_v1 is used */
1741 event.ord = c4iw_max_read_depth;
1742 event.ird = c4iw_max_read_depth;
1743 event.private_data_len = ep->plen;
1744 event.private_data = ep->mpa_pkt +
1745 sizeof(struct mpa_message);
1746 }
1747 }
1748
1749 if (ep->com.cm_id) {
1750
1751 CTR2(KTR_IW_CXGBE, "%s:cru3 %p", __func__, ep);
1752 set_bit(CONN_RPL_UPCALL, &ep->com.history);
1753 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1754 }
1755
1756 if(status == -ECONNABORTED) {
1757
1758 CTR3(KTR_IW_CXGBE, "%s:cruE %p %d", __func__, ep, status);
1759 return;
1760 }
1761
1762 if (status < 0) {
1763
1764 CTR3(KTR_IW_CXGBE, "%s:cru4 %p %d", __func__, ep, status);
1765 deref_cm_id(&ep->com);
1766 }
1767
1768 CTR2(KTR_IW_CXGBE, "%s:cruE %p", __func__, ep);
1769 }
1770
connect_request_upcall(struct c4iw_ep * ep)1771 static int connect_request_upcall(struct c4iw_ep *ep)
1772 {
1773 struct iw_cm_event event;
1774 int ret;
1775
1776 CTR3(KTR_IW_CXGBE, "%s: ep %p, mpa_v1 %d", __func__, ep,
1777 ep->tried_with_mpa_v1);
1778
1779 memset(&event, 0, sizeof(event));
1780 event.event = IW_CM_EVENT_CONNECT_REQUEST;
1781 event.local_addr = ep->com.local_addr;
1782 event.remote_addr = ep->com.remote_addr;
1783 event.provider_data = ep;
1784
1785 if (!ep->tried_with_mpa_v1) {
1786 /* this means MPA_v2 is used */
1787 event.ord = ep->ord;
1788 event.ird = ep->ird;
1789 event.private_data_len = ep->plen -
1790 sizeof(struct mpa_v2_conn_params);
1791 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1792 sizeof(struct mpa_v2_conn_params);
1793 } else {
1794
1795 /* this means MPA_v1 is used. Send max supported */
1796 event.ord = c4iw_max_read_depth;
1797 event.ird = c4iw_max_read_depth;
1798 event.private_data_len = ep->plen;
1799 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1800 }
1801
1802 c4iw_get_ep(&ep->com);
1803 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1804 &event);
1805 if(ret) {
1806 CTR3(KTR_IW_CXGBE, "%s: ep %p, Failure while notifying event to"
1807 " IWCM, err:%d", __func__, ep, ret);
1808 c4iw_put_ep(&ep->com);
1809 } else
1810 /* Dereference parent_ep only in success case.
1811 * In case of failure, parent_ep is dereferenced by the caller
1812 * of process_mpa_request().
1813 */
1814 c4iw_put_ep(&ep->parent_ep->com);
1815
1816 set_bit(CONNREQ_UPCALL, &ep->com.history);
1817 return ret;
1818 }
1819
established_upcall(struct c4iw_ep * ep)1820 static void established_upcall(struct c4iw_ep *ep)
1821 {
1822 struct iw_cm_event event;
1823
1824 CTR2(KTR_IW_CXGBE, "%s:euB %p", __func__, ep);
1825 memset(&event, 0, sizeof(event));
1826 event.event = IW_CM_EVENT_ESTABLISHED;
1827 event.ird = ep->ord;
1828 event.ord = ep->ird;
1829
1830 if (ep->com.cm_id) {
1831
1832 CTR2(KTR_IW_CXGBE, "%s:eu1 %p", __func__, ep);
1833 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1834 set_bit(ESTAB_UPCALL, &ep->com.history);
1835 }
1836 CTR2(KTR_IW_CXGBE, "%s:euE %p", __func__, ep);
1837 }
1838
1839
1840 #define RELAXED_IRD_NEGOTIATION 1
1841
1842 /*
1843 * process_mpa_reply - process streaming mode MPA reply
1844 *
1845 * Returns:
1846 *
1847 * 0 upon success indicating a connect request was delivered to the ULP
1848 * or the mpa request is incomplete but valid so far.
1849 *
1850 * 1 if a failure requires the caller to close the connection.
1851 *
1852 * 2 if a failure requires the caller to abort the connection.
1853 */
process_mpa_reply(struct c4iw_ep * ep)1854 static int process_mpa_reply(struct c4iw_ep *ep)
1855 {
1856 struct mpa_message *mpa;
1857 struct mpa_v2_conn_params *mpa_v2_params;
1858 u16 plen;
1859 u16 resp_ird, resp_ord;
1860 u8 rtr_mismatch = 0, insuff_ird = 0;
1861 struct c4iw_qp_attributes attrs = {0};
1862 enum c4iw_qp_attr_mask mask;
1863 int err;
1864 struct mbuf *top, *m;
1865 int flags = MSG_DONTWAIT;
1866 struct uio uio;
1867 int disconnect = 0;
1868
1869 CTR2(KTR_IW_CXGBE, "%s:pmrB %p", __func__, ep);
1870
1871 /*
1872 * Stop mpa timer. If it expired, then
1873 * we ignore the MPA reply. process_timeout()
1874 * will abort the connection.
1875 */
1876 if (STOP_EP_TIMER(ep))
1877 return 0;
1878
1879 uio.uio_resid = 1000000;
1880 uio.uio_td = ep->com.thread;
1881 err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags);
1882
1883 if (err) {
1884
1885 if (err == EWOULDBLOCK) {
1886
1887 CTR2(KTR_IW_CXGBE, "%s:pmr1 %p", __func__, ep);
1888 START_EP_TIMER(ep);
1889 return 0;
1890 }
1891 err = -err;
1892 CTR2(KTR_IW_CXGBE, "%s:pmr2 %p", __func__, ep);
1893 goto err;
1894 }
1895
1896 if (ep->com.so->so_rcv.sb_mb) {
1897
1898 CTR2(KTR_IW_CXGBE, "%s:pmr3 %p", __func__, ep);
1899 printf("%s data after soreceive called! so %p sb_mb %p top %p\n",
1900 __func__, ep->com.so, ep->com.so->so_rcv.sb_mb, top);
1901 }
1902
1903 m = top;
1904
1905 do {
1906
1907 CTR2(KTR_IW_CXGBE, "%s:pmr4 %p", __func__, ep);
1908 /*
1909 * If we get more than the supported amount of private data
1910 * then we must fail this connection.
1911 */
1912 if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) {
1913
1914 CTR3(KTR_IW_CXGBE, "%s:pmr5 %p %d", __func__, ep,
1915 ep->mpa_pkt_len + m->m_len);
1916 err = (-EINVAL);
1917 goto err_stop_timer;
1918 }
1919
1920 /*
1921 * copy the new data into our accumulation buffer.
1922 */
1923 m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len]));
1924 ep->mpa_pkt_len += m->m_len;
1925 if (!m->m_next)
1926 m = m->m_nextpkt;
1927 else
1928 m = m->m_next;
1929 } while (m);
1930
1931 m_freem(top);
1932 /*
1933 * if we don't even have the mpa message, then bail.
1934 */
1935 if (ep->mpa_pkt_len < sizeof(*mpa)) {
1936 return 0;
1937 }
1938 mpa = (struct mpa_message *) ep->mpa_pkt;
1939
1940 /* Validate MPA header. */
1941 if (mpa->revision > mpa_rev) {
1942
1943 CTR4(KTR_IW_CXGBE, "%s:pmr6 %p %d %d", __func__, ep,
1944 mpa->revision, mpa_rev);
1945 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d, "
1946 " Received = %d\n", __func__, mpa_rev, mpa->revision);
1947 err = -EPROTO;
1948 goto err_stop_timer;
1949 }
1950
1951 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1952
1953 CTR2(KTR_IW_CXGBE, "%s:pmr7 %p", __func__, ep);
1954 err = -EPROTO;
1955 goto err_stop_timer;
1956 }
1957
1958 plen = ntohs(mpa->private_data_size);
1959
1960 /*
1961 * Fail if there's too much private data.
1962 */
1963 if (plen > MPA_MAX_PRIVATE_DATA) {
1964
1965 CTR2(KTR_IW_CXGBE, "%s:pmr8 %p", __func__, ep);
1966 err = -EPROTO;
1967 goto err_stop_timer;
1968 }
1969
1970 /*
1971 * If plen does not account for pkt size
1972 */
1973 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1974
1975 CTR2(KTR_IW_CXGBE, "%s:pmr9 %p", __func__, ep);
1976 STOP_EP_TIMER(ep);
1977 err = -EPROTO;
1978 goto err_stop_timer;
1979 }
1980
1981 ep->plen = (u8) plen;
1982
1983 /*
1984 * If we don't have all the pdata yet, then bail.
1985 * We'll continue process when more data arrives.
1986 */
1987 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) {
1988
1989 CTR2(KTR_IW_CXGBE, "%s:pmra %p", __func__, ep);
1990 return 0;
1991 }
1992
1993 if (mpa->flags & MPA_REJECT) {
1994
1995 CTR2(KTR_IW_CXGBE, "%s:pmrb %p", __func__, ep);
1996 err = -ECONNREFUSED;
1997 goto err_stop_timer;
1998 }
1999
2000 /*
2001 * If we get here we have accumulated the entire mpa
2002 * start reply message including private data. And
2003 * the MPA header is valid.
2004 */
2005 ep->com.state = FPDU_MODE;
2006 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
2007 ep->mpa_attr.recv_marker_enabled = markers_enabled;
2008 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
2009 ep->mpa_attr.version = mpa->revision;
2010 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2011
2012 if (mpa->revision == 2) {
2013
2014 CTR2(KTR_IW_CXGBE, "%s:pmrc %p", __func__, ep);
2015 ep->mpa_attr.enhanced_rdma_conn =
2016 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
2017
2018 if (ep->mpa_attr.enhanced_rdma_conn) {
2019
2020 CTR2(KTR_IW_CXGBE, "%s:pmrd %p", __func__, ep);
2021 mpa_v2_params = (struct mpa_v2_conn_params *)
2022 (ep->mpa_pkt + sizeof(*mpa));
2023 resp_ird = ntohs(mpa_v2_params->ird) &
2024 MPA_V2_IRD_ORD_MASK;
2025 resp_ord = ntohs(mpa_v2_params->ord) &
2026 MPA_V2_IRD_ORD_MASK;
2027
2028 /*
2029 * This is a double-check. Ideally, below checks are
2030 * not required since ird/ord stuff has been taken
2031 * care of in c4iw_accept_cr
2032 */
2033 if (ep->ird < resp_ord) {
2034 if (RELAXED_IRD_NEGOTIATION && resp_ord <=
2035 ep->com.dev->rdev.adap->params.max_ordird_qp)
2036 ep->ird = resp_ord;
2037 else
2038 insuff_ird = 1;
2039 } else if (ep->ird > resp_ord) {
2040 ep->ird = resp_ord;
2041 }
2042 if (ep->ord > resp_ird) {
2043 if (RELAXED_IRD_NEGOTIATION)
2044 ep->ord = resp_ird;
2045 else
2046 insuff_ird = 1;
2047 }
2048 if (insuff_ird) {
2049 err = -ENOMEM;
2050 ep->ird = resp_ord;
2051 ep->ord = resp_ird;
2052 }
2053
2054 if (ntohs(mpa_v2_params->ird) &
2055 MPA_V2_PEER2PEER_MODEL) {
2056
2057 CTR2(KTR_IW_CXGBE, "%s:pmrf %p", __func__, ep);
2058 if (ntohs(mpa_v2_params->ord) &
2059 MPA_V2_RDMA_WRITE_RTR) {
2060
2061 CTR2(KTR_IW_CXGBE, "%s:pmrg %p", __func__, ep);
2062 ep->mpa_attr.p2p_type =
2063 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
2064 }
2065 else if (ntohs(mpa_v2_params->ord) &
2066 MPA_V2_RDMA_READ_RTR) {
2067
2068 CTR2(KTR_IW_CXGBE, "%s:pmrh %p", __func__, ep);
2069 ep->mpa_attr.p2p_type =
2070 FW_RI_INIT_P2PTYPE_READ_REQ;
2071 }
2072 }
2073 }
2074 } else {
2075
2076 CTR2(KTR_IW_CXGBE, "%s:pmri %p", __func__, ep);
2077
2078 if (mpa->revision == 1) {
2079
2080 CTR2(KTR_IW_CXGBE, "%s:pmrj %p", __func__, ep);
2081
2082 if (peer2peer) {
2083
2084 CTR2(KTR_IW_CXGBE, "%s:pmrk %p", __func__, ep);
2085 ep->mpa_attr.p2p_type = p2p_type;
2086 }
2087 }
2088 }
2089
2090 if (set_tcpinfo(ep)) {
2091
2092 CTR2(KTR_IW_CXGBE, "%s:pmrl %p", __func__, ep);
2093 printf("%s set_tcpinfo error\n", __func__);
2094 err = -ECONNRESET;
2095 goto err;
2096 }
2097
2098 CTR6(KTR_IW_CXGBE, "%s - crc_enabled = %d, recv_marker_enabled = %d, "
2099 "xmit_marker_enabled = %d, version = %d p2p_type = %d", __func__,
2100 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
2101 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
2102 ep->mpa_attr.p2p_type);
2103
2104 /*
2105 * If responder's RTR does not match with that of initiator, assign
2106 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
2107 * generated when moving QP to RTS state.
2108 * A TERM message will be sent after QP has moved to RTS state
2109 */
2110 if ((ep->mpa_attr.version == 2) && peer2peer &&
2111 (ep->mpa_attr.p2p_type != p2p_type)) {
2112
2113 CTR2(KTR_IW_CXGBE, "%s:pmrm %p", __func__, ep);
2114 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2115 rtr_mismatch = 1;
2116 }
2117
2118
2119 //ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
2120 attrs.mpa_attr = ep->mpa_attr;
2121 attrs.max_ird = ep->ird;
2122 attrs.max_ord = ep->ord;
2123 attrs.llp_stream_handle = ep;
2124 attrs.next_state = C4IW_QP_STATE_RTS;
2125
2126 mask = C4IW_QP_ATTR_NEXT_STATE |
2127 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
2128 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
2129
2130 /* bind QP and TID with INIT_WR */
2131 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2132
2133 if (err) {
2134
2135 CTR2(KTR_IW_CXGBE, "%s:pmrn %p", __func__, ep);
2136 goto err;
2137 }
2138
2139 /*
2140 * If responder's RTR requirement did not match with what initiator
2141 * supports, generate TERM message
2142 */
2143 if (rtr_mismatch) {
2144
2145 CTR2(KTR_IW_CXGBE, "%s:pmro %p", __func__, ep);
2146 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
2147 attrs.layer_etype = LAYER_MPA | DDP_LLP;
2148 attrs.ecode = MPA_NOMATCH_RTR;
2149 attrs.next_state = C4IW_QP_STATE_TERMINATE;
2150 attrs.send_term = 1;
2151 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2152 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2153 err = -ENOMEM;
2154 disconnect = 1;
2155 goto out;
2156 }
2157
2158 /*
2159 * Generate TERM if initiator IRD is not sufficient for responder
2160 * provided ORD. Currently, we do the same behaviour even when
2161 * responder provided IRD is also not sufficient as regards to
2162 * initiator ORD.
2163 */
2164 if (insuff_ird) {
2165
2166 CTR2(KTR_IW_CXGBE, "%s:pmrp %p", __func__, ep);
2167 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
2168 __func__);
2169 attrs.layer_etype = LAYER_MPA | DDP_LLP;
2170 attrs.ecode = MPA_INSUFF_IRD;
2171 attrs.next_state = C4IW_QP_STATE_TERMINATE;
2172 attrs.send_term = 1;
2173 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2174 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2175 err = -ENOMEM;
2176 disconnect = 1;
2177 goto out;
2178 }
2179 goto out;
2180 err_stop_timer:
2181 STOP_EP_TIMER(ep);
2182 err:
2183 disconnect = 2;
2184 out:
2185 connect_reply_upcall(ep, err);
2186 CTR2(KTR_IW_CXGBE, "%s:pmrE %p", __func__, ep);
2187 return disconnect;
2188 }
2189
2190 /*
2191 * process_mpa_request - process streaming mode MPA request
2192 *
2193 * Returns:
2194 *
2195 * 0 upon success indicating a connect request was delivered to the ULP
2196 * or the mpa request is incomplete but valid so far.
2197 *
2198 * 1 if a failure requires the caller to close the connection.
2199 *
2200 * 2 if a failure requires the caller to abort the connection.
2201 */
2202 static int
process_mpa_request(struct c4iw_ep * ep)2203 process_mpa_request(struct c4iw_ep *ep)
2204 {
2205 struct mpa_message *mpa;
2206 struct mpa_v2_conn_params *mpa_v2_params;
2207 u16 plen;
2208 int flags = MSG_DONTWAIT;
2209 int rc;
2210 struct iovec iov;
2211 struct uio uio;
2212 enum c4iw_ep_state state = ep->com.state;
2213
2214 CTR3(KTR_IW_CXGBE, "%s: ep %p, state %s", __func__, ep, states[state]);
2215
2216 if (state != MPA_REQ_WAIT)
2217 return 0;
2218
2219 iov.iov_base = &ep->mpa_pkt[ep->mpa_pkt_len];
2220 iov.iov_len = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
2221 uio.uio_iov = &iov;
2222 uio.uio_iovcnt = 1;
2223 uio.uio_offset = 0;
2224 uio.uio_resid = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
2225 uio.uio_segflg = UIO_SYSSPACE;
2226 uio.uio_rw = UIO_READ;
2227 uio.uio_td = NULL; /* uio.uio_td = ep->com.thread; */
2228
2229 rc = soreceive(ep->com.so, NULL, &uio, NULL, NULL, &flags);
2230 if (rc == EAGAIN)
2231 return 0;
2232 else if (rc)
2233 goto err_stop_timer;
2234
2235 KASSERT(uio.uio_offset > 0, ("%s: sorecieve on so %p read no data",
2236 __func__, ep->com.so));
2237 ep->mpa_pkt_len += uio.uio_offset;
2238
2239 /*
2240 * If we get more than the supported amount of private data then we must
2241 * fail this connection. XXX: check so_rcv->sb_cc, or peek with another
2242 * soreceive, or increase the size of mpa_pkt by 1 and abort if the last
2243 * byte is filled by the soreceive above.
2244 */
2245
2246 /* Don't even have the MPA message. Wait for more data to arrive. */
2247 if (ep->mpa_pkt_len < sizeof(*mpa))
2248 return 0;
2249 mpa = (struct mpa_message *) ep->mpa_pkt;
2250
2251 /*
2252 * Validate MPA Header.
2253 */
2254 if (mpa->revision > mpa_rev) {
2255 log(LOG_ERR, "%s: MPA version mismatch. Local = %d,"
2256 " Received = %d\n", __func__, mpa_rev, mpa->revision);
2257 goto err_stop_timer;
2258 }
2259
2260 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
2261 goto err_stop_timer;
2262
2263 /*
2264 * Fail if there's too much private data.
2265 */
2266 plen = ntohs(mpa->private_data_size);
2267 if (plen > MPA_MAX_PRIVATE_DATA)
2268 goto err_stop_timer;
2269
2270 /*
2271 * If plen does not account for pkt size
2272 */
2273 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
2274 goto err_stop_timer;
2275
2276 ep->plen = (u8) plen;
2277
2278 /*
2279 * If we don't have all the pdata yet, then bail.
2280 */
2281 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
2282 return 0;
2283
2284 /*
2285 * If we get here we have accumulated the entire mpa
2286 * start reply message including private data.
2287 */
2288 ep->mpa_attr.initiator = 0;
2289 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
2290 ep->mpa_attr.recv_marker_enabled = markers_enabled;
2291 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
2292 ep->mpa_attr.version = mpa->revision;
2293 if (mpa->revision == 1)
2294 ep->tried_with_mpa_v1 = 1;
2295 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2296
2297 if (mpa->revision == 2) {
2298 ep->mpa_attr.enhanced_rdma_conn =
2299 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
2300 if (ep->mpa_attr.enhanced_rdma_conn) {
2301 mpa_v2_params = (struct mpa_v2_conn_params *)
2302 (ep->mpa_pkt + sizeof(*mpa));
2303 ep->ird = ntohs(mpa_v2_params->ird) &
2304 MPA_V2_IRD_ORD_MASK;
2305 ep->ird = min_t(u32, ep->ird,
2306 cur_max_read_depth(ep->com.dev));
2307 ep->ord = ntohs(mpa_v2_params->ord) &
2308 MPA_V2_IRD_ORD_MASK;
2309 ep->ord = min_t(u32, ep->ord,
2310 cur_max_read_depth(ep->com.dev));
2311 CTR3(KTR_IW_CXGBE, "%s initiator ird %u ord %u",
2312 __func__, ep->ird, ep->ord);
2313 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
2314 if (peer2peer) {
2315 if (ntohs(mpa_v2_params->ord) &
2316 MPA_V2_RDMA_WRITE_RTR)
2317 ep->mpa_attr.p2p_type =
2318 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
2319 else if (ntohs(mpa_v2_params->ord) &
2320 MPA_V2_RDMA_READ_RTR)
2321 ep->mpa_attr.p2p_type =
2322 FW_RI_INIT_P2PTYPE_READ_REQ;
2323 }
2324 }
2325 } else if (mpa->revision == 1 && peer2peer)
2326 ep->mpa_attr.p2p_type = p2p_type;
2327
2328 if (set_tcpinfo(ep))
2329 goto err_stop_timer;
2330
2331 CTR5(KTR_IW_CXGBE, "%s: crc_enabled = %d, recv_marker_enabled = %d, "
2332 "xmit_marker_enabled = %d, version = %d", __func__,
2333 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
2334 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
2335
2336 ep->com.state = MPA_REQ_RCVD;
2337 STOP_EP_TIMER(ep);
2338
2339 /* drive upcall */
2340 if (ep->parent_ep->com.state != DEAD)
2341 if (connect_request_upcall(ep))
2342 goto err_out;
2343 return 0;
2344
2345 err_stop_timer:
2346 STOP_EP_TIMER(ep);
2347 err_out:
2348 return 2;
2349 }
2350
2351 /*
2352 * Upcall from the adapter indicating data has been transmitted.
2353 * For us its just the single MPA request or reply. We can now free
2354 * the skb holding the mpa message.
2355 */
c4iw_reject_cr(struct iw_cm_id * cm_id,const void * pdata,u8 pdata_len)2356 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2357 {
2358 #ifdef KTR
2359 int err;
2360 #endif
2361 struct c4iw_ep *ep = to_ep(cm_id);
2362 int abort = 0;
2363
2364 mutex_lock(&ep->com.mutex);
2365 CTR2(KTR_IW_CXGBE, "%s:crcB %p", __func__, ep);
2366
2367 if ((ep->com.state == DEAD) ||
2368 (ep->com.state != MPA_REQ_RCVD)) {
2369
2370 CTR2(KTR_IW_CXGBE, "%s:crc1 %p", __func__, ep);
2371 mutex_unlock(&ep->com.mutex);
2372 c4iw_put_ep(&ep->com);
2373 return -ECONNRESET;
2374 }
2375 set_bit(ULP_REJECT, &ep->com.history);
2376
2377 if (mpa_rev == 0) {
2378
2379 CTR2(KTR_IW_CXGBE, "%s:crc2 %p", __func__, ep);
2380 abort = 1;
2381 }
2382 else {
2383
2384 CTR2(KTR_IW_CXGBE, "%s:crc3 %p", __func__, ep);
2385 abort = send_mpa_reject(ep, pdata, pdata_len);
2386 }
2387 STOP_EP_TIMER(ep);
2388 #ifdef KTR
2389 err = c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
2390 #else
2391 c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
2392 #endif
2393 mutex_unlock(&ep->com.mutex);
2394 c4iw_put_ep(&ep->com);
2395 CTR3(KTR_IW_CXGBE, "%s:crc4 %p, err: %d", __func__, ep, err);
2396 return 0;
2397 }
2398
c4iw_accept_cr(struct iw_cm_id * cm_id,struct iw_cm_conn_param * conn_param)2399 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2400 {
2401 int err;
2402 struct c4iw_qp_attributes attrs = {0};
2403 enum c4iw_qp_attr_mask mask;
2404 struct c4iw_ep *ep = to_ep(cm_id);
2405 struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
2406 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
2407 int abort = 0;
2408
2409 mutex_lock(&ep->com.mutex);
2410 CTR2(KTR_IW_CXGBE, "%s:cacB %p", __func__, ep);
2411
2412 if ((ep->com.state == DEAD) ||
2413 (ep->com.state != MPA_REQ_RCVD)) {
2414
2415 CTR2(KTR_IW_CXGBE, "%s:cac1 %p", __func__, ep);
2416 err = -ECONNRESET;
2417 goto err_out;
2418 }
2419
2420 BUG_ON(!qp);
2421
2422 set_bit(ULP_ACCEPT, &ep->com.history);
2423
2424 if ((conn_param->ord > c4iw_max_read_depth) ||
2425 (conn_param->ird > c4iw_max_read_depth)) {
2426
2427 CTR2(KTR_IW_CXGBE, "%s:cac2 %p", __func__, ep);
2428 err = -EINVAL;
2429 goto err_abort;
2430 }
2431
2432 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
2433
2434 CTR2(KTR_IW_CXGBE, "%s:cac3 %p", __func__, ep);
2435
2436 if (conn_param->ord > ep->ird) {
2437 if (RELAXED_IRD_NEGOTIATION) {
2438 conn_param->ord = ep->ird;
2439 } else {
2440 ep->ird = conn_param->ird;
2441 ep->ord = conn_param->ord;
2442 send_mpa_reject(ep, conn_param->private_data,
2443 conn_param->private_data_len);
2444 err = -ENOMEM;
2445 goto err_abort;
2446 }
2447 }
2448 if (conn_param->ird < ep->ord) {
2449 if (RELAXED_IRD_NEGOTIATION &&
2450 ep->ord <= h->rdev.adap->params.max_ordird_qp) {
2451 conn_param->ird = ep->ord;
2452 } else {
2453 err = -ENOMEM;
2454 goto err_abort;
2455 }
2456 }
2457 }
2458 ep->ird = conn_param->ird;
2459 ep->ord = conn_param->ord;
2460
2461 if (ep->mpa_attr.version == 1) {
2462 if (peer2peer && ep->ird == 0)
2463 ep->ird = 1;
2464 } else {
2465 if (peer2peer &&
2466 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
2467 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0)
2468 ep->ird = 1;
2469 }
2470
2471 CTR4(KTR_IW_CXGBE, "%s %d ird %d ord %d", __func__, __LINE__,
2472 ep->ird, ep->ord);
2473
2474 ep->com.cm_id = cm_id;
2475 ref_cm_id(&ep->com);
2476 ep->com.qp = qp;
2477 ref_qp(ep);
2478 //ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
2479
2480 /* bind QP to EP and move to RTS */
2481 attrs.mpa_attr = ep->mpa_attr;
2482 attrs.max_ird = ep->ird;
2483 attrs.max_ord = ep->ord;
2484 attrs.llp_stream_handle = ep;
2485 attrs.next_state = C4IW_QP_STATE_RTS;
2486
2487 /* bind QP and TID with INIT_WR */
2488 mask = C4IW_QP_ATTR_NEXT_STATE |
2489 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
2490 C4IW_QP_ATTR_MPA_ATTR |
2491 C4IW_QP_ATTR_MAX_IRD |
2492 C4IW_QP_ATTR_MAX_ORD;
2493
2494 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2495 if (err) {
2496 CTR3(KTR_IW_CXGBE, "%s:caca %p, err: %d", __func__, ep, err);
2497 goto err_defef_cm_id;
2498 }
2499
2500 err = send_mpa_reply(ep, conn_param->private_data,
2501 conn_param->private_data_len);
2502 if (err) {
2503 CTR3(KTR_IW_CXGBE, "%s:cacb %p, err: %d", __func__, ep, err);
2504 goto err_defef_cm_id;
2505 }
2506
2507 ep->com.state = FPDU_MODE;
2508 established_upcall(ep);
2509 mutex_unlock(&ep->com.mutex);
2510 c4iw_put_ep(&ep->com);
2511 CTR2(KTR_IW_CXGBE, "%s:cacE %p", __func__, ep);
2512 return 0;
2513 err_defef_cm_id:
2514 deref_cm_id(&ep->com);
2515 err_abort:
2516 abort = 1;
2517 err_out:
2518 if (abort)
2519 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
2520 mutex_unlock(&ep->com.mutex);
2521 c4iw_put_ep(&ep->com);
2522 CTR2(KTR_IW_CXGBE, "%s:cacE err %p", __func__, ep);
2523 return err;
2524 }
2525
2526 static int
c4iw_sock_create(struct sockaddr_storage * laddr,struct socket ** so)2527 c4iw_sock_create(struct sockaddr_storage *laddr, struct socket **so)
2528 {
2529 int ret;
2530 int size, on;
2531 struct socket *sock = NULL;
2532 struct sockopt sopt;
2533
2534 ret = sock_create_kern(laddr->ss_family,
2535 SOCK_STREAM, IPPROTO_TCP, &sock);
2536 if (ret) {
2537 CTR2(KTR_IW_CXGBE, "%s:Failed to create TCP socket. err %d",
2538 __func__, ret);
2539 return ret;
2540 }
2541
2542 if (reuseaddr) {
2543 bzero(&sopt, sizeof(struct sockopt));
2544 sopt.sopt_dir = SOPT_SET;
2545 sopt.sopt_level = SOL_SOCKET;
2546 sopt.sopt_name = SO_REUSEADDR;
2547 on = 1;
2548 sopt.sopt_val = &on;
2549 sopt.sopt_valsize = sizeof(on);
2550 ret = -sosetopt(sock, &sopt);
2551 if (ret != 0) {
2552 log(LOG_ERR, "%s: sosetopt(%p, SO_REUSEADDR) "
2553 "failed with %d.\n", __func__, sock, ret);
2554 }
2555 bzero(&sopt, sizeof(struct sockopt));
2556 sopt.sopt_dir = SOPT_SET;
2557 sopt.sopt_level = SOL_SOCKET;
2558 sopt.sopt_name = SO_REUSEPORT;
2559 on = 1;
2560 sopt.sopt_val = &on;
2561 sopt.sopt_valsize = sizeof(on);
2562 ret = -sosetopt(sock, &sopt);
2563 if (ret != 0) {
2564 log(LOG_ERR, "%s: sosetopt(%p, SO_REUSEPORT) "
2565 "failed with %d.\n", __func__, sock, ret);
2566 }
2567 }
2568
2569 ret = -sobind(sock, (struct sockaddr *)laddr, curthread);
2570 if (ret) {
2571 CTR2(KTR_IW_CXGBE, "%s:Failed to bind socket. err %p",
2572 __func__, ret);
2573 sock_release(sock);
2574 return ret;
2575 }
2576
2577 size = laddr->ss_family == AF_INET6 ?
2578 sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in);
2579 ret = sock_getname(sock, (struct sockaddr *)laddr, &size, 0);
2580 if (ret) {
2581 CTR2(KTR_IW_CXGBE, "%s:sock_getname failed. err %p",
2582 __func__, ret);
2583 sock_release(sock);
2584 return ret;
2585 }
2586
2587 *so = sock;
2588 return 0;
2589 }
2590
c4iw_connect(struct iw_cm_id * cm_id,struct iw_cm_conn_param * conn_param)2591 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2592 {
2593 int err = 0;
2594 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2595 struct c4iw_ep *ep = NULL;
2596 if_t nh_ifp; /* Logical egress interface */
2597 struct epoch_tracker et;
2598 #ifdef VIMAGE
2599 struct rdma_cm_id *rdma_id = (struct rdma_cm_id*)cm_id->context;
2600 struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
2601 #endif
2602
2603 CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id);
2604
2605 if (__predict_false(c4iw_stopped(&dev->rdev)))
2606 return -EIO;
2607
2608 if ((conn_param->ord > c4iw_max_read_depth) ||
2609 (conn_param->ird > c4iw_max_read_depth)) {
2610
2611 CTR2(KTR_IW_CXGBE, "%s:cc1 %p", __func__, cm_id);
2612 err = -EINVAL;
2613 goto out;
2614 }
2615 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2616 cm_id->provider_data = ep;
2617
2618 init_timer(&ep->timer);
2619 ep->plen = conn_param->private_data_len;
2620
2621 if (ep->plen) {
2622
2623 CTR2(KTR_IW_CXGBE, "%s:cc3 %p", __func__, ep);
2624 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
2625 conn_param->private_data, ep->plen);
2626 }
2627 ep->ird = conn_param->ird;
2628 ep->ord = conn_param->ord;
2629
2630 if (peer2peer && ep->ord == 0) {
2631
2632 CTR2(KTR_IW_CXGBE, "%s:cc4 %p", __func__, ep);
2633 ep->ord = 1;
2634 }
2635
2636 ep->com.dev = dev;
2637 ep->com.cm_id = cm_id;
2638 ref_cm_id(&ep->com);
2639 ep->com.qp = get_qhp(dev, conn_param->qpn);
2640
2641 if (!ep->com.qp) {
2642
2643 CTR2(KTR_IW_CXGBE, "%s:cc5 %p", __func__, ep);
2644 err = -EINVAL;
2645 goto fail;
2646 }
2647 ref_qp(ep);
2648 ep->com.thread = curthread;
2649
2650 NET_EPOCH_ENTER(et);
2651 CURVNET_SET(vnet);
2652 err = get_ifnet_from_raddr(&cm_id->remote_addr, &nh_ifp);
2653 CURVNET_RESTORE();
2654 NET_EPOCH_EXIT(et);
2655
2656 if (err) {
2657
2658 CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep);
2659 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
2660 return -EHOSTUNREACH;
2661 }
2662
2663 if (!(if_getcapenable(nh_ifp) & IFCAP_TOE) ||
2664 TOEDEV(nh_ifp) == NULL) {
2665 err = -ENOPROTOOPT;
2666 goto fail;
2667 }
2668 ep->com.state = CONNECTING;
2669 ep->tos = 0;
2670 ep->com.local_addr = cm_id->local_addr;
2671 ep->com.remote_addr = cm_id->remote_addr;
2672
2673 err = c4iw_sock_create(&cm_id->local_addr, &ep->com.so);
2674 if (err)
2675 goto fail;
2676
2677 setiwsockopt(ep->com.so);
2678 init_iwarp_socket(ep->com.so, &ep->com);
2679 err = -soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr,
2680 ep->com.thread);
2681 if (err)
2682 goto fail_free_so;
2683 CTR2(KTR_IW_CXGBE, "%s:ccE, ep %p", __func__, ep);
2684 return 0;
2685
2686 fail_free_so:
2687 uninit_iwarp_socket(ep->com.so);
2688 ep->com.state = DEAD;
2689 sock_release(ep->com.so);
2690 fail:
2691 deref_cm_id(&ep->com);
2692 c4iw_put_ep(&ep->com);
2693 ep = NULL;
2694 out:
2695 CTR2(KTR_IW_CXGBE, "%s:ccE Error %d", __func__, err);
2696 return err;
2697 }
2698
2699 /*
2700 * iwcm->create_listen. Returns -errno on failure.
2701 */
2702 int
c4iw_create_listen(struct iw_cm_id * cm_id,int backlog)2703 c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2704 {
2705 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2706 struct c4iw_listen_ep *lep = NULL;
2707 struct listen_port_info *port_info = NULL;
2708 int rc = 0;
2709
2710 CTR3(KTR_IW_CXGBE, "%s: cm_id %p, backlog %d", __func__, cm_id,
2711 backlog);
2712 if (c4iw_stopped(&dev->rdev)) {
2713 CTR2(KTR_IW_CXGBE, "%s: cm_id %p, stopped", __func__, cm_id);
2714 return -EIO;
2715 }
2716 lep = alloc_ep(sizeof(*lep), GFP_KERNEL);
2717 lep->com.cm_id = cm_id;
2718 ref_cm_id(&lep->com);
2719 lep->com.dev = dev;
2720 lep->backlog = backlog;
2721 lep->com.local_addr = cm_id->local_addr;
2722 lep->com.thread = curthread;
2723 cm_id->provider_data = lep;
2724 lep->com.state = LISTEN;
2725
2726 /* In case of INDADDR_ANY, ibcore creates cmid for each device and
2727 * invokes iw_cxgbe listener callbacks assuming that iw_cxgbe creates
2728 * HW listeners for each device seperately. But toecore expects single
2729 * solisten() call with INADDR_ANY address to create HW listeners on
2730 * all devices for a given port number. So iw_cxgbe driver calls
2731 * solisten() only once for INADDR_ANY(usually done at first time
2732 * listener callback from ibcore). And all the subsequent INADDR_ANY
2733 * listener callbacks from ibcore(for the same port address) do not
2734 * invoke solisten() as first listener callback has already created
2735 * listeners for all other devices(via solisten).
2736 */
2737 if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr, NULL)) {
2738 port_info = add_ep_to_listenlist(lep);
2739 /* skip solisten() if refcnt > 1, as the listeners were
2740 * already created by 'Master lep'
2741 */
2742 if (port_info->refcnt > 1) {
2743 /* As there will be only one listener socket for a TCP
2744 * port, copy Master lep's socket pointer to other lep's
2745 * that are belonging to same TCP port.
2746 */
2747 struct c4iw_listen_ep *head_lep =
2748 container_of(port_info->lep_list.next,
2749 struct c4iw_listen_ep, listen_ep_list);
2750 lep->com.so = head_lep->com.so;
2751 goto out;
2752 }
2753 }
2754 rc = c4iw_sock_create(&cm_id->local_addr, &lep->com.so);
2755 if (rc) {
2756 CTR2(KTR_IW_CXGBE, "%s:Failed to create socket. err %d",
2757 __func__, rc);
2758 goto fail;
2759 }
2760
2761 rc = -solisten(lep->com.so, backlog, curthread);
2762 if (rc) {
2763 CTR3(KTR_IW_CXGBE, "%s:Failed to listen on sock:%p. err %d",
2764 __func__, lep->com.so, rc);
2765 goto fail_free_so;
2766 }
2767 init_iwarp_socket(lep->com.so, &lep->com);
2768 out:
2769 return 0;
2770
2771 fail_free_so:
2772 sock_release(lep->com.so);
2773 fail:
2774 if (port_info)
2775 rem_ep_from_listenlist(lep);
2776 deref_cm_id(&lep->com);
2777 c4iw_put_ep(&lep->com);
2778 return rc;
2779 }
2780
2781 int
c4iw_destroy_listen(struct iw_cm_id * cm_id)2782 c4iw_destroy_listen(struct iw_cm_id *cm_id)
2783 {
2784 struct c4iw_listen_ep *lep = to_listen_ep(cm_id);
2785
2786 mutex_lock(&lep->com.mutex);
2787 CTR3(KTR_IW_CXGBE, "%s: cm_id %p, state %s", __func__, cm_id,
2788 states[lep->com.state]);
2789
2790 lep->com.state = DEAD;
2791 if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr,
2792 lep->com.so->so_vnet)) {
2793 /* if no refcount then close listen socket */
2794 if (!rem_ep_from_listenlist(lep))
2795 close_socket(lep->com.so);
2796 } else
2797 close_socket(lep->com.so);
2798 deref_cm_id(&lep->com);
2799 mutex_unlock(&lep->com.mutex);
2800 c4iw_put_ep(&lep->com);
2801 return 0;
2802 }
2803
__c4iw_ep_disconnect(struct c4iw_ep * ep,int abrupt,gfp_t gfp)2804 int __c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2805 {
2806 int ret;
2807 mutex_lock(&ep->com.mutex);
2808 ret = c4iw_ep_disconnect(ep, abrupt, gfp);
2809 mutex_unlock(&ep->com.mutex);
2810 return ret;
2811 }
2812
c4iw_ep_disconnect(struct c4iw_ep * ep,int abrupt,gfp_t gfp)2813 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2814 {
2815 int ret = 0;
2816 int close = 0;
2817 struct c4iw_rdev *rdev;
2818
2819
2820 CTR2(KTR_IW_CXGBE, "%s:cedB %p", __func__, ep);
2821
2822 rdev = &ep->com.dev->rdev;
2823
2824 if (c4iw_stopped(rdev)) {
2825 CTR3(KTR_IW_CXGBE, "%s:ced1 stopped %p %s", __func__, ep,
2826 states[ep->com.state]);
2827 if (ep->com.state != DEAD) {
2828 send_abort(ep);
2829 ep->com.state = DEAD;
2830 }
2831 close_complete_upcall(ep, -ECONNRESET);
2832 return ECONNRESET;
2833 }
2834 CTR3(KTR_IW_CXGBE, "%s:ced2 %p %s", __func__, ep,
2835 states[ep->com.state]);
2836
2837 /*
2838 * Ref the ep here in case we have fatal errors causing the
2839 * ep to be released and freed.
2840 */
2841 c4iw_get_ep(&ep->com);
2842 switch (ep->com.state) {
2843
2844 case MPA_REQ_WAIT:
2845 case MPA_REQ_SENT:
2846 case MPA_REQ_RCVD:
2847 case MPA_REP_SENT:
2848 case FPDU_MODE:
2849 close = 1;
2850 if (abrupt)
2851 ep->com.state = ABORTING;
2852 else {
2853 ep->com.state = CLOSING;
2854 START_EP_TIMER(ep);
2855 }
2856 set_bit(CLOSE_SENT, &ep->com.flags);
2857 break;
2858
2859 case CLOSING:
2860
2861 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2862
2863 close = 1;
2864 if (abrupt) {
2865 STOP_EP_TIMER(ep);
2866 ep->com.state = ABORTING;
2867 } else
2868 ep->com.state = MORIBUND;
2869 }
2870 break;
2871
2872 case MORIBUND:
2873 case ABORTING:
2874 case DEAD:
2875 CTR3(KTR_IW_CXGBE,
2876 "%s ignoring disconnect ep %p state %u", __func__,
2877 ep, ep->com.state);
2878 break;
2879
2880 default:
2881 BUG();
2882 break;
2883 }
2884
2885
2886 if (close) {
2887
2888 CTR2(KTR_IW_CXGBE, "%s:ced3 %p", __func__, ep);
2889
2890 if (abrupt) {
2891
2892 CTR2(KTR_IW_CXGBE, "%s:ced4 %p", __func__, ep);
2893 set_bit(EP_DISC_ABORT, &ep->com.history);
2894 close_complete_upcall(ep, -ECONNRESET);
2895 send_abort(ep);
2896 } else {
2897
2898 CTR2(KTR_IW_CXGBE, "%s:ced5 %p", __func__, ep);
2899 set_bit(EP_DISC_CLOSE, &ep->com.history);
2900
2901 if (!ep->parent_ep)
2902 ep->com.state = MORIBUND;
2903
2904 CURVNET_SET(ep->com.so->so_vnet);
2905 ret = sodisconnect(ep->com.so);
2906 CURVNET_RESTORE();
2907 if (ret) {
2908 CTR2(KTR_IW_CXGBE, "%s:ced6 %p", __func__, ep);
2909 STOP_EP_TIMER(ep);
2910 send_abort(ep);
2911 ep->com.state = DEAD;
2912 close_complete_upcall(ep, -ECONNRESET);
2913 set_bit(EP_DISC_FAIL, &ep->com.history);
2914 if (ep->com.qp) {
2915 struct c4iw_qp_attributes attrs = {0};
2916
2917 attrs.next_state = C4IW_QP_STATE_ERROR;
2918 ret = c4iw_modify_qp(
2919 ep->com.dev, ep->com.qp,
2920 C4IW_QP_ATTR_NEXT_STATE,
2921 &attrs, 1);
2922 CTR3(KTR_IW_CXGBE, "%s:ced7 %p ret %d",
2923 __func__, ep, ret);
2924 }
2925 }
2926 }
2927 }
2928 c4iw_put_ep(&ep->com);
2929 CTR2(KTR_IW_CXGBE, "%s:cedE %p", __func__, ep);
2930 return ret;
2931 }
2932
2933 #ifdef C4IW_EP_REDIRECT
c4iw_ep_redirect(void * ctx,struct dst_entry * old,struct dst_entry * new,struct l2t_entry * l2t)2934 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2935 struct l2t_entry *l2t)
2936 {
2937 struct c4iw_ep *ep = ctx;
2938
2939 if (ep->dst != old)
2940 return 0;
2941
2942 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2943 l2t);
2944 dst_hold(new);
2945 cxgb4_l2t_release(ep->l2t);
2946 ep->l2t = l2t;
2947 dst_release(old);
2948 ep->dst = new;
2949 return 1;
2950 }
2951 #endif
2952
2953
2954
ep_timeout(unsigned long arg)2955 static void ep_timeout(unsigned long arg)
2956 {
2957 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
2958
2959 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
2960
2961 /*
2962 * Only insert if it is not already on the list.
2963 */
2964 if (!(ep->com.ep_events & C4IW_EVENT_TIMEOUT)) {
2965 CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep);
2966 add_ep_to_req_list(ep, C4IW_EVENT_TIMEOUT);
2967 }
2968 }
2969 }
2970
fw6_wr_rpl(struct adapter * sc,const __be64 * rpl)2971 static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl)
2972 {
2973 uint64_t val = be64toh(*rpl);
2974 int ret;
2975 struct c4iw_wr_wait *wr_waitp;
2976
2977 ret = (int)((val >> 8) & 0xff);
2978 wr_waitp = (struct c4iw_wr_wait *)rpl[1];
2979 CTR3(KTR_IW_CXGBE, "%s wr_waitp %p ret %u", __func__, wr_waitp, ret);
2980 if (wr_waitp)
2981 c4iw_wake_up(wr_waitp, ret ? -ret : 0);
2982
2983 return (0);
2984 }
2985
fw6_cqe_handler(struct adapter * sc,const __be64 * rpl)2986 static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl)
2987 {
2988 struct cqe_list_entry *cle;
2989 unsigned long flag;
2990
2991 cle = malloc(sizeof(*cle), M_CXGBE, M_NOWAIT);
2992 cle->rhp = sc->iwarp_softc;
2993 cle->err_cqe = *(const struct t4_cqe *)(&rpl[0]);
2994
2995 spin_lock_irqsave(&err_cqe_lock, flag);
2996 list_add_tail(&cle->entry, &err_cqe_list);
2997 queue_work(c4iw_taskq, &c4iw_task);
2998 spin_unlock_irqrestore(&err_cqe_lock, flag);
2999
3000 return (0);
3001 }
3002
3003 static int
process_terminate(struct c4iw_ep * ep)3004 process_terminate(struct c4iw_ep *ep)
3005 {
3006 struct c4iw_qp_attributes attrs = {0};
3007
3008 CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep);
3009
3010 if (ep && ep->com.qp) {
3011
3012 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n",
3013 ep->hwtid, ep->com.qp->wq.sq.qid);
3014 attrs.next_state = C4IW_QP_STATE_TERMINATE;
3015 c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs,
3016 1);
3017 } else
3018 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n",
3019 ep->hwtid);
3020 CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep);
3021
3022 return 0;
3023 }
3024
c4iw_cm_init(void)3025 int __init c4iw_cm_init(void)
3026 {
3027
3028 t4_register_cpl_handler(CPL_RDMA_TERMINATE, terminate);
3029 t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, fw6_wr_rpl);
3030 t4_register_fw_msg_handler(FW6_TYPE_CQE, fw6_cqe_handler);
3031 t4_register_an_handler(c4iw_ev_handler);
3032
3033 TAILQ_INIT(&req_list);
3034 spin_lock_init(&req_lock);
3035 INIT_LIST_HEAD(&err_cqe_list);
3036 spin_lock_init(&err_cqe_lock);
3037
3038 INIT_WORK(&c4iw_task, process_req);
3039
3040 c4iw_taskq = create_singlethread_workqueue("iw_cxgbe");
3041 if (!c4iw_taskq)
3042 return -ENOMEM;
3043
3044 return 0;
3045 }
3046
c4iw_cm_term(void)3047 void __exit c4iw_cm_term(void)
3048 {
3049 WARN_ON(!TAILQ_EMPTY(&req_list));
3050 WARN_ON(!list_empty(&err_cqe_list));
3051 flush_workqueue(c4iw_taskq);
3052 destroy_workqueue(c4iw_taskq);
3053
3054 t4_register_cpl_handler(CPL_RDMA_TERMINATE, NULL);
3055 t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, NULL);
3056 t4_register_fw_msg_handler(FW6_TYPE_CQE, NULL);
3057 t4_register_an_handler(NULL);
3058 }
3059 #endif
3060