xref: /linux/drivers/infiniband/sw/rxe/rxe_req.c (revision dd093fb0)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6 
7 #include <linux/skbuff.h>
8 #include <crypto/hash.h>
9 
10 #include "rxe.h"
11 #include "rxe_loc.h"
12 #include "rxe_queue.h"
13 
14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
15 		       u32 opcode);
16 
17 static inline void retry_first_write_send(struct rxe_qp *qp,
18 					  struct rxe_send_wqe *wqe, int npsn)
19 {
20 	int i;
21 
22 	for (i = 0; i < npsn; i++) {
23 		int to_send = (wqe->dma.resid > qp->mtu) ?
24 				qp->mtu : wqe->dma.resid;
25 
26 		qp->req.opcode = next_opcode(qp, wqe,
27 					     wqe->wr.opcode);
28 
29 		if (wqe->wr.send_flags & IB_SEND_INLINE) {
30 			wqe->dma.resid -= to_send;
31 			wqe->dma.sge_offset += to_send;
32 		} else {
33 			advance_dma_data(&wqe->dma, to_send);
34 		}
35 	}
36 }
37 
38 static void req_retry(struct rxe_qp *qp)
39 {
40 	struct rxe_send_wqe *wqe;
41 	unsigned int wqe_index;
42 	unsigned int mask;
43 	int npsn;
44 	int first = 1;
45 	struct rxe_queue *q = qp->sq.queue;
46 	unsigned int cons;
47 	unsigned int prod;
48 
49 	cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
50 	prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
51 
52 	qp->req.wqe_index	= cons;
53 	qp->req.psn		= qp->comp.psn;
54 	qp->req.opcode		= -1;
55 
56 	for (wqe_index = cons; wqe_index != prod;
57 			wqe_index = queue_next_index(q, wqe_index)) {
58 		wqe = queue_addr_from_index(qp->sq.queue, wqe_index);
59 		mask = wr_opcode_mask(wqe->wr.opcode, qp);
60 
61 		if (wqe->state == wqe_state_posted)
62 			break;
63 
64 		if (wqe->state == wqe_state_done)
65 			continue;
66 
67 		wqe->iova = (mask & WR_ATOMIC_MASK) ?
68 			     wqe->wr.wr.atomic.remote_addr :
69 			     (mask & WR_READ_OR_WRITE_MASK) ?
70 			     wqe->wr.wr.rdma.remote_addr :
71 			     0;
72 
73 		if (!first || (mask & WR_READ_MASK) == 0) {
74 			wqe->dma.resid = wqe->dma.length;
75 			wqe->dma.cur_sge = 0;
76 			wqe->dma.sge_offset = 0;
77 		}
78 
79 		if (first) {
80 			first = 0;
81 
82 			if (mask & WR_WRITE_OR_SEND_MASK) {
83 				npsn = (qp->comp.psn - wqe->first_psn) &
84 					BTH_PSN_MASK;
85 				retry_first_write_send(qp, wqe, npsn);
86 			}
87 
88 			if (mask & WR_READ_MASK) {
89 				npsn = (wqe->dma.length - wqe->dma.resid) /
90 					qp->mtu;
91 				wqe->iova += npsn * qp->mtu;
92 			}
93 		}
94 
95 		wqe->state = wqe_state_posted;
96 	}
97 }
98 
99 void rnr_nak_timer(struct timer_list *t)
100 {
101 	struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
102 
103 	rxe_dbg_qp(qp, "nak timer fired\n");
104 
105 	/* request a send queue retry */
106 	qp->req.need_retry = 1;
107 	qp->req.wait_for_rnr_timer = 0;
108 	rxe_sched_task(&qp->req.task);
109 }
110 
111 static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
112 {
113 	struct rxe_send_wqe *wqe;
114 	struct rxe_queue *q = qp->sq.queue;
115 	unsigned int index = qp->req.wqe_index;
116 	unsigned int cons;
117 	unsigned int prod;
118 
119 	wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
120 	cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
121 	prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
122 
123 	if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
124 		/* check to see if we are drained;
125 		 * state_lock used by requester and completer
126 		 */
127 		spin_lock_bh(&qp->state_lock);
128 		do {
129 			if (qp->req.state != QP_STATE_DRAIN) {
130 				/* comp just finished */
131 				spin_unlock_bh(&qp->state_lock);
132 				break;
133 			}
134 
135 			if (wqe && ((index != cons) ||
136 				(wqe->state != wqe_state_posted))) {
137 				/* comp not done yet */
138 				spin_unlock_bh(&qp->state_lock);
139 				break;
140 			}
141 
142 			qp->req.state = QP_STATE_DRAINED;
143 			spin_unlock_bh(&qp->state_lock);
144 
145 			if (qp->ibqp.event_handler) {
146 				struct ib_event ev;
147 
148 				ev.device = qp->ibqp.device;
149 				ev.element.qp = &qp->ibqp;
150 				ev.event = IB_EVENT_SQ_DRAINED;
151 				qp->ibqp.event_handler(&ev,
152 					qp->ibqp.qp_context);
153 			}
154 		} while (0);
155 	}
156 
157 	if (index == prod)
158 		return NULL;
159 
160 	wqe = queue_addr_from_index(q, index);
161 
162 	if (unlikely((qp->req.state == QP_STATE_DRAIN ||
163 		      qp->req.state == QP_STATE_DRAINED) &&
164 		     (wqe->state != wqe_state_processing)))
165 		return NULL;
166 
167 	wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
168 	return wqe;
169 }
170 
171 /**
172  * rxe_wqe_is_fenced - check if next wqe is fenced
173  * @qp: the queue pair
174  * @wqe: the next wqe
175  *
176  * Returns: 1 if wqe needs to wait
177  *	    0 if wqe is ready to go
178  */
179 static int rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
180 {
181 	/* Local invalidate fence (LIF) see IBA 10.6.5.1
182 	 * Requires ALL previous operations on the send queue
183 	 * are complete. Make mandatory for the rxe driver.
184 	 */
185 	if (wqe->wr.opcode == IB_WR_LOCAL_INV)
186 		return qp->req.wqe_index != queue_get_consumer(qp->sq.queue,
187 						QUEUE_TYPE_FROM_CLIENT);
188 
189 	/* Fence see IBA 10.8.3.3
190 	 * Requires that all previous read and atomic operations
191 	 * are complete.
192 	 */
193 	return (wqe->wr.send_flags & IB_SEND_FENCE) &&
194 		atomic_read(&qp->req.rd_atomic) != qp->attr.max_rd_atomic;
195 }
196 
197 static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
198 {
199 	switch (opcode) {
200 	case IB_WR_RDMA_WRITE:
201 		if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
202 		    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
203 			return fits ?
204 				IB_OPCODE_RC_RDMA_WRITE_LAST :
205 				IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
206 		else
207 			return fits ?
208 				IB_OPCODE_RC_RDMA_WRITE_ONLY :
209 				IB_OPCODE_RC_RDMA_WRITE_FIRST;
210 
211 	case IB_WR_RDMA_WRITE_WITH_IMM:
212 		if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
213 		    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
214 			return fits ?
215 				IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
216 				IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
217 		else
218 			return fits ?
219 				IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
220 				IB_OPCODE_RC_RDMA_WRITE_FIRST;
221 
222 	case IB_WR_SEND:
223 		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
224 		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
225 			return fits ?
226 				IB_OPCODE_RC_SEND_LAST :
227 				IB_OPCODE_RC_SEND_MIDDLE;
228 		else
229 			return fits ?
230 				IB_OPCODE_RC_SEND_ONLY :
231 				IB_OPCODE_RC_SEND_FIRST;
232 
233 	case IB_WR_SEND_WITH_IMM:
234 		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
235 		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
236 			return fits ?
237 				IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
238 				IB_OPCODE_RC_SEND_MIDDLE;
239 		else
240 			return fits ?
241 				IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
242 				IB_OPCODE_RC_SEND_FIRST;
243 
244 	case IB_WR_FLUSH:
245 		return IB_OPCODE_RC_FLUSH;
246 
247 	case IB_WR_RDMA_READ:
248 		return IB_OPCODE_RC_RDMA_READ_REQUEST;
249 
250 	case IB_WR_ATOMIC_CMP_AND_SWP:
251 		return IB_OPCODE_RC_COMPARE_SWAP;
252 
253 	case IB_WR_ATOMIC_FETCH_AND_ADD:
254 		return IB_OPCODE_RC_FETCH_ADD;
255 
256 	case IB_WR_SEND_WITH_INV:
257 		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
258 		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
259 			return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
260 				IB_OPCODE_RC_SEND_MIDDLE;
261 		else
262 			return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
263 				IB_OPCODE_RC_SEND_FIRST;
264 
265 	case IB_WR_ATOMIC_WRITE:
266 		return IB_OPCODE_RC_ATOMIC_WRITE;
267 
268 	case IB_WR_REG_MR:
269 	case IB_WR_LOCAL_INV:
270 		return opcode;
271 	}
272 
273 	return -EINVAL;
274 }
275 
276 static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
277 {
278 	switch (opcode) {
279 	case IB_WR_RDMA_WRITE:
280 		if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
281 		    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
282 			return fits ?
283 				IB_OPCODE_UC_RDMA_WRITE_LAST :
284 				IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
285 		else
286 			return fits ?
287 				IB_OPCODE_UC_RDMA_WRITE_ONLY :
288 				IB_OPCODE_UC_RDMA_WRITE_FIRST;
289 
290 	case IB_WR_RDMA_WRITE_WITH_IMM:
291 		if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
292 		    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
293 			return fits ?
294 				IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
295 				IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
296 		else
297 			return fits ?
298 				IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
299 				IB_OPCODE_UC_RDMA_WRITE_FIRST;
300 
301 	case IB_WR_SEND:
302 		if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
303 		    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
304 			return fits ?
305 				IB_OPCODE_UC_SEND_LAST :
306 				IB_OPCODE_UC_SEND_MIDDLE;
307 		else
308 			return fits ?
309 				IB_OPCODE_UC_SEND_ONLY :
310 				IB_OPCODE_UC_SEND_FIRST;
311 
312 	case IB_WR_SEND_WITH_IMM:
313 		if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
314 		    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
315 			return fits ?
316 				IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
317 				IB_OPCODE_UC_SEND_MIDDLE;
318 		else
319 			return fits ?
320 				IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
321 				IB_OPCODE_UC_SEND_FIRST;
322 	}
323 
324 	return -EINVAL;
325 }
326 
327 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
328 		       u32 opcode)
329 {
330 	int fits = (wqe->dma.resid <= qp->mtu);
331 
332 	switch (qp_type(qp)) {
333 	case IB_QPT_RC:
334 		return next_opcode_rc(qp, opcode, fits);
335 
336 	case IB_QPT_UC:
337 		return next_opcode_uc(qp, opcode, fits);
338 
339 	case IB_QPT_UD:
340 	case IB_QPT_GSI:
341 		switch (opcode) {
342 		case IB_WR_SEND:
343 			return IB_OPCODE_UD_SEND_ONLY;
344 
345 		case IB_WR_SEND_WITH_IMM:
346 			return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
347 		}
348 		break;
349 
350 	default:
351 		break;
352 	}
353 
354 	return -EINVAL;
355 }
356 
357 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
358 {
359 	int depth;
360 
361 	if (wqe->has_rd_atomic)
362 		return 0;
363 
364 	qp->req.need_rd_atomic = 1;
365 	depth = atomic_dec_return(&qp->req.rd_atomic);
366 
367 	if (depth >= 0) {
368 		qp->req.need_rd_atomic = 0;
369 		wqe->has_rd_atomic = 1;
370 		return 0;
371 	}
372 
373 	atomic_inc(&qp->req.rd_atomic);
374 	return -EAGAIN;
375 }
376 
377 static inline int get_mtu(struct rxe_qp *qp)
378 {
379 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
380 
381 	if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
382 		return qp->mtu;
383 
384 	return rxe->port.mtu_cap;
385 }
386 
387 static struct sk_buff *init_req_packet(struct rxe_qp *qp,
388 				       struct rxe_av *av,
389 				       struct rxe_send_wqe *wqe,
390 				       int opcode, u32 payload,
391 				       struct rxe_pkt_info *pkt)
392 {
393 	struct rxe_dev		*rxe = to_rdev(qp->ibqp.device);
394 	struct sk_buff		*skb;
395 	struct rxe_send_wr	*ibwr = &wqe->wr;
396 	int			pad = (-payload) & 0x3;
397 	int			paylen;
398 	int			solicited;
399 	u32			qp_num;
400 	int			ack_req;
401 
402 	/* length from start of bth to end of icrc */
403 	paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
404 	pkt->paylen = paylen;
405 
406 	/* init skb */
407 	skb = rxe_init_packet(rxe, av, paylen, pkt);
408 	if (unlikely(!skb))
409 		return NULL;
410 
411 	/* init bth */
412 	solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
413 			(pkt->mask & RXE_END_MASK) &&
414 			((pkt->mask & (RXE_SEND_MASK)) ||
415 			(pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
416 			(RXE_WRITE_MASK | RXE_IMMDT_MASK));
417 
418 	qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
419 					 qp->attr.dest_qp_num;
420 
421 	ack_req = ((pkt->mask & RXE_END_MASK) ||
422 		(qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
423 	if (ack_req)
424 		qp->req.noack_pkts = 0;
425 
426 	bth_init(pkt, pkt->opcode, solicited, 0, pad, IB_DEFAULT_PKEY_FULL, qp_num,
427 		 ack_req, pkt->psn);
428 
429 	/* init optional headers */
430 	if (pkt->mask & RXE_RETH_MASK) {
431 		if (pkt->mask & RXE_FETH_MASK)
432 			reth_set_rkey(pkt, ibwr->wr.flush.rkey);
433 		else
434 			reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
435 		reth_set_va(pkt, wqe->iova);
436 		reth_set_len(pkt, wqe->dma.resid);
437 	}
438 
439 	/* Fill Flush Extension Transport Header */
440 	if (pkt->mask & RXE_FETH_MASK)
441 		feth_init(pkt, ibwr->wr.flush.type, ibwr->wr.flush.level);
442 
443 	if (pkt->mask & RXE_IMMDT_MASK)
444 		immdt_set_imm(pkt, ibwr->ex.imm_data);
445 
446 	if (pkt->mask & RXE_IETH_MASK)
447 		ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
448 
449 	if (pkt->mask & RXE_ATMETH_MASK) {
450 		atmeth_set_va(pkt, wqe->iova);
451 		if (opcode == IB_OPCODE_RC_COMPARE_SWAP) {
452 			atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
453 			atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
454 		} else {
455 			atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
456 		}
457 		atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
458 	}
459 
460 	if (pkt->mask & RXE_DETH_MASK) {
461 		if (qp->ibqp.qp_num == 1)
462 			deth_set_qkey(pkt, GSI_QKEY);
463 		else
464 			deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
465 		deth_set_sqp(pkt, qp->ibqp.qp_num);
466 	}
467 
468 	return skb;
469 }
470 
471 static int finish_packet(struct rxe_qp *qp, struct rxe_av *av,
472 			 struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt,
473 			 struct sk_buff *skb, u32 payload)
474 {
475 	int err;
476 
477 	err = rxe_prepare(av, pkt, skb);
478 	if (err)
479 		return err;
480 
481 	if (pkt->mask & RXE_WRITE_OR_SEND_MASK) {
482 		if (wqe->wr.send_flags & IB_SEND_INLINE) {
483 			u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
484 
485 			memcpy(payload_addr(pkt), tmp, payload);
486 
487 			wqe->dma.resid -= payload;
488 			wqe->dma.sge_offset += payload;
489 		} else {
490 			err = copy_data(qp->pd, 0, &wqe->dma,
491 					payload_addr(pkt), payload,
492 					RXE_FROM_MR_OBJ);
493 			if (err)
494 				return err;
495 		}
496 		if (bth_pad(pkt)) {
497 			u8 *pad = payload_addr(pkt) + payload;
498 
499 			memset(pad, 0, bth_pad(pkt));
500 		}
501 	} else if (pkt->mask & RXE_FLUSH_MASK) {
502 		/* oA19-2: shall have no payload. */
503 		wqe->dma.resid = 0;
504 	}
505 
506 	if (pkt->mask & RXE_ATOMIC_WRITE_MASK) {
507 		memcpy(payload_addr(pkt), wqe->dma.atomic_wr, payload);
508 		wqe->dma.resid -= payload;
509 	}
510 
511 	return 0;
512 }
513 
514 static void update_wqe_state(struct rxe_qp *qp,
515 		struct rxe_send_wqe *wqe,
516 		struct rxe_pkt_info *pkt)
517 {
518 	if (pkt->mask & RXE_END_MASK) {
519 		if (qp_type(qp) == IB_QPT_RC)
520 			wqe->state = wqe_state_pending;
521 	} else {
522 		wqe->state = wqe_state_processing;
523 	}
524 }
525 
526 static void update_wqe_psn(struct rxe_qp *qp,
527 			   struct rxe_send_wqe *wqe,
528 			   struct rxe_pkt_info *pkt,
529 			   u32 payload)
530 {
531 	/* number of packets left to send including current one */
532 	int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
533 
534 	/* handle zero length packet case */
535 	if (num_pkt == 0)
536 		num_pkt = 1;
537 
538 	if (pkt->mask & RXE_START_MASK) {
539 		wqe->first_psn = qp->req.psn;
540 		wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
541 	}
542 
543 	if (pkt->mask & RXE_READ_MASK)
544 		qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
545 	else
546 		qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
547 }
548 
549 static void save_state(struct rxe_send_wqe *wqe,
550 		       struct rxe_qp *qp,
551 		       struct rxe_send_wqe *rollback_wqe,
552 		       u32 *rollback_psn)
553 {
554 	rollback_wqe->state     = wqe->state;
555 	rollback_wqe->first_psn = wqe->first_psn;
556 	rollback_wqe->last_psn  = wqe->last_psn;
557 	*rollback_psn		= qp->req.psn;
558 }
559 
560 static void rollback_state(struct rxe_send_wqe *wqe,
561 			   struct rxe_qp *qp,
562 			   struct rxe_send_wqe *rollback_wqe,
563 			   u32 rollback_psn)
564 {
565 	wqe->state     = rollback_wqe->state;
566 	wqe->first_psn = rollback_wqe->first_psn;
567 	wqe->last_psn  = rollback_wqe->last_psn;
568 	qp->req.psn    = rollback_psn;
569 }
570 
571 static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
572 {
573 	qp->req.opcode = pkt->opcode;
574 
575 	if (pkt->mask & RXE_END_MASK)
576 		qp->req.wqe_index = queue_next_index(qp->sq.queue,
577 						     qp->req.wqe_index);
578 
579 	qp->need_req_skb = 0;
580 
581 	if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
582 		mod_timer(&qp->retrans_timer,
583 			  jiffies + qp->qp_timeout_jiffies);
584 }
585 
586 static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
587 {
588 	u8 opcode = wqe->wr.opcode;
589 	u32 rkey;
590 	int ret;
591 
592 	switch (opcode) {
593 	case IB_WR_LOCAL_INV:
594 		rkey = wqe->wr.ex.invalidate_rkey;
595 		if (rkey_is_mw(rkey))
596 			ret = rxe_invalidate_mw(qp, rkey);
597 		else
598 			ret = rxe_invalidate_mr(qp, rkey);
599 
600 		if (unlikely(ret)) {
601 			wqe->status = IB_WC_LOC_QP_OP_ERR;
602 			return ret;
603 		}
604 		break;
605 	case IB_WR_REG_MR:
606 		ret = rxe_reg_fast_mr(qp, wqe);
607 		if (unlikely(ret)) {
608 			wqe->status = IB_WC_LOC_QP_OP_ERR;
609 			return ret;
610 		}
611 		break;
612 	case IB_WR_BIND_MW:
613 		ret = rxe_bind_mw(qp, wqe);
614 		if (unlikely(ret)) {
615 			wqe->status = IB_WC_MW_BIND_ERR;
616 			return ret;
617 		}
618 		break;
619 	default:
620 		rxe_dbg_qp(qp, "Unexpected send wqe opcode %d\n", opcode);
621 		wqe->status = IB_WC_LOC_QP_OP_ERR;
622 		return -EINVAL;
623 	}
624 
625 	wqe->state = wqe_state_done;
626 	wqe->status = IB_WC_SUCCESS;
627 	qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
628 
629 	/* There is no ack coming for local work requests
630 	 * which can lead to a deadlock. So go ahead and complete
631 	 * it now.
632 	 */
633 	rxe_sched_task(&qp->comp.task);
634 
635 	return 0;
636 }
637 
638 int rxe_requester(void *arg)
639 {
640 	struct rxe_qp *qp = (struct rxe_qp *)arg;
641 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
642 	struct rxe_pkt_info pkt;
643 	struct sk_buff *skb;
644 	struct rxe_send_wqe *wqe;
645 	enum rxe_hdr_mask mask;
646 	u32 payload;
647 	int mtu;
648 	int opcode;
649 	int err;
650 	int ret;
651 	struct rxe_send_wqe rollback_wqe;
652 	u32 rollback_psn;
653 	struct rxe_queue *q = qp->sq.queue;
654 	struct rxe_ah *ah;
655 	struct rxe_av *av;
656 
657 	if (!rxe_get(qp))
658 		return -EAGAIN;
659 
660 	if (unlikely(!qp->valid))
661 		goto exit;
662 
663 	if (unlikely(qp->req.state == QP_STATE_ERROR)) {
664 		wqe = req_next_wqe(qp);
665 		if (wqe)
666 			/*
667 			 * Generate an error completion for error qp state
668 			 */
669 			goto err;
670 		else
671 			goto exit;
672 	}
673 
674 	if (unlikely(qp->req.state == QP_STATE_RESET)) {
675 		qp->req.wqe_index = queue_get_consumer(q,
676 						QUEUE_TYPE_FROM_CLIENT);
677 		qp->req.opcode = -1;
678 		qp->req.need_rd_atomic = 0;
679 		qp->req.wait_psn = 0;
680 		qp->req.need_retry = 0;
681 		qp->req.wait_for_rnr_timer = 0;
682 		goto exit;
683 	}
684 
685 	/* we come here if the retransmit timer has fired
686 	 * or if the rnr timer has fired. If the retransmit
687 	 * timer fires while we are processing an RNR NAK wait
688 	 * until the rnr timer has fired before starting the
689 	 * retry flow
690 	 */
691 	if (unlikely(qp->req.need_retry && !qp->req.wait_for_rnr_timer)) {
692 		req_retry(qp);
693 		qp->req.need_retry = 0;
694 	}
695 
696 	wqe = req_next_wqe(qp);
697 	if (unlikely(!wqe))
698 		goto exit;
699 
700 	if (rxe_wqe_is_fenced(qp, wqe)) {
701 		qp->req.wait_fence = 1;
702 		goto exit;
703 	}
704 
705 	if (wqe->mask & WR_LOCAL_OP_MASK) {
706 		err = rxe_do_local_ops(qp, wqe);
707 		if (unlikely(err))
708 			goto err;
709 		else
710 			goto done;
711 	}
712 
713 	if (unlikely(qp_type(qp) == IB_QPT_RC &&
714 		psn_compare(qp->req.psn, (qp->comp.psn +
715 				RXE_MAX_UNACKED_PSNS)) > 0)) {
716 		qp->req.wait_psn = 1;
717 		goto exit;
718 	}
719 
720 	/* Limit the number of inflight SKBs per QP */
721 	if (unlikely(atomic_read(&qp->skb_out) >
722 		     RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
723 		qp->need_req_skb = 1;
724 		goto exit;
725 	}
726 
727 	opcode = next_opcode(qp, wqe, wqe->wr.opcode);
728 	if (unlikely(opcode < 0)) {
729 		wqe->status = IB_WC_LOC_QP_OP_ERR;
730 		goto err;
731 	}
732 
733 	mask = rxe_opcode[opcode].mask;
734 	if (unlikely(mask & (RXE_READ_OR_ATOMIC_MASK |
735 			RXE_ATOMIC_WRITE_MASK))) {
736 		if (check_init_depth(qp, wqe))
737 			goto exit;
738 	}
739 
740 	mtu = get_mtu(qp);
741 	payload = (mask & (RXE_WRITE_OR_SEND_MASK | RXE_ATOMIC_WRITE_MASK)) ?
742 			wqe->dma.resid : 0;
743 	if (payload > mtu) {
744 		if (qp_type(qp) == IB_QPT_UD) {
745 			/* C10-93.1.1: If the total sum of all the buffer lengths specified for a
746 			 * UD message exceeds the MTU of the port as returned by QueryHCA, the CI
747 			 * shall not emit any packets for this message. Further, the CI shall not
748 			 * generate an error due to this condition.
749 			 */
750 
751 			/* fake a successful UD send */
752 			wqe->first_psn = qp->req.psn;
753 			wqe->last_psn = qp->req.psn;
754 			qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
755 			qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
756 			qp->req.wqe_index = queue_next_index(qp->sq.queue,
757 						       qp->req.wqe_index);
758 			wqe->state = wqe_state_done;
759 			wqe->status = IB_WC_SUCCESS;
760 			rxe_run_task(&qp->comp.task);
761 			goto done;
762 		}
763 		payload = mtu;
764 	}
765 
766 	pkt.rxe = rxe;
767 	pkt.opcode = opcode;
768 	pkt.qp = qp;
769 	pkt.psn = qp->req.psn;
770 	pkt.mask = rxe_opcode[opcode].mask;
771 	pkt.wqe = wqe;
772 
773 	av = rxe_get_av(&pkt, &ah);
774 	if (unlikely(!av)) {
775 		rxe_dbg_qp(qp, "Failed no address vector\n");
776 		wqe->status = IB_WC_LOC_QP_OP_ERR;
777 		goto err;
778 	}
779 
780 	skb = init_req_packet(qp, av, wqe, opcode, payload, &pkt);
781 	if (unlikely(!skb)) {
782 		rxe_dbg_qp(qp, "Failed allocating skb\n");
783 		wqe->status = IB_WC_LOC_QP_OP_ERR;
784 		if (ah)
785 			rxe_put(ah);
786 		goto err;
787 	}
788 
789 	err = finish_packet(qp, av, wqe, &pkt, skb, payload);
790 	if (unlikely(err)) {
791 		rxe_dbg_qp(qp, "Error during finish packet\n");
792 		if (err == -EFAULT)
793 			wqe->status = IB_WC_LOC_PROT_ERR;
794 		else
795 			wqe->status = IB_WC_LOC_QP_OP_ERR;
796 		kfree_skb(skb);
797 		if (ah)
798 			rxe_put(ah);
799 		goto err;
800 	}
801 
802 	if (ah)
803 		rxe_put(ah);
804 
805 	/*
806 	 * To prevent a race on wqe access between requester and completer,
807 	 * wqe members state and psn need to be set before calling
808 	 * rxe_xmit_packet().
809 	 * Otherwise, completer might initiate an unjustified retry flow.
810 	 */
811 	save_state(wqe, qp, &rollback_wqe, &rollback_psn);
812 	update_wqe_state(qp, wqe, &pkt);
813 	update_wqe_psn(qp, wqe, &pkt, payload);
814 
815 	err = rxe_xmit_packet(qp, &pkt, skb);
816 	if (err) {
817 		qp->need_req_skb = 1;
818 
819 		rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
820 
821 		if (err == -EAGAIN) {
822 			rxe_sched_task(&qp->req.task);
823 			goto exit;
824 		}
825 
826 		wqe->status = IB_WC_LOC_QP_OP_ERR;
827 		goto err;
828 	}
829 
830 	update_state(qp, &pkt);
831 
832 	/* A non-zero return value will cause rxe_do_task to
833 	 * exit its loop and end the tasklet. A zero return
834 	 * will continue looping and return to rxe_requester
835 	 */
836 done:
837 	ret = 0;
838 	goto out;
839 err:
840 	/* update wqe_index for each wqe completion */
841 	qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
842 	wqe->state = wqe_state_error;
843 	qp->req.state = QP_STATE_ERROR;
844 	rxe_run_task(&qp->comp.task);
845 exit:
846 	ret = -EAGAIN;
847 out:
848 	rxe_put(qp);
849 
850 	return ret;
851 }
852