1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2015-2020, Oracle and/or its affiliates.
4  *
5  * Support for reverse-direction RPCs on RPC/RDMA.
6  */
7 
8 #include <linux/sunrpc/xprt.h>
9 #include <linux/sunrpc/svc.h>
10 #include <linux/sunrpc/svc_xprt.h>
11 #include <linux/sunrpc/svc_rdma.h>
12 
13 #include "xprt_rdma.h"
14 #include <trace/events/rpcrdma.h>
15 
16 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
17 # define RPCDBG_FACILITY	RPCDBG_TRANS
18 #endif
19 
20 #undef RPCRDMA_BACKCHANNEL_DEBUG
21 
22 /**
23  * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
24  * @xprt: transport associated with these backchannel resources
25  * @reqs: number of concurrent incoming requests to expect
26  *
27  * Returns 0 on success; otherwise a negative errno
28  */
xprt_rdma_bc_setup(struct rpc_xprt * xprt,unsigned int reqs)29 int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
30 {
31 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
32 
33 	r_xprt->rx_buf.rb_bc_srv_max_requests = RPCRDMA_BACKWARD_WRS >> 1;
34 	trace_xprtrdma_cb_setup(r_xprt, reqs);
35 	return 0;
36 }
37 
38 /**
39  * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
40  * @xprt: transport
41  *
42  * Returns maximum size, in bytes, of a backchannel message
43  */
xprt_rdma_bc_maxpayload(struct rpc_xprt * xprt)44 size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
45 {
46 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
47 	struct rpcrdma_ep *ep = r_xprt->rx_ep;
48 	size_t maxmsg;
49 
50 	maxmsg = min_t(unsigned int, ep->re_inline_send, ep->re_inline_recv);
51 	maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE);
52 	return maxmsg - RPCRDMA_HDRLEN_MIN;
53 }
54 
xprt_rdma_bc_max_slots(struct rpc_xprt * xprt)55 unsigned int xprt_rdma_bc_max_slots(struct rpc_xprt *xprt)
56 {
57 	return RPCRDMA_BACKWARD_WRS >> 1;
58 }
59 
rpcrdma_bc_marshal_reply(struct rpc_rqst * rqst)60 static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
61 {
62 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
63 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
64 	__be32 *p;
65 
66 	rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
67 	xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf,
68 			rdmab_data(req->rl_rdmabuf), rqst);
69 
70 	p = xdr_reserve_space(&req->rl_stream, 28);
71 	if (unlikely(!p))
72 		return -EIO;
73 	*p++ = rqst->rq_xid;
74 	*p++ = rpcrdma_version;
75 	*p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
76 	*p++ = rdma_msg;
77 	*p++ = xdr_zero;
78 	*p++ = xdr_zero;
79 	*p = xdr_zero;
80 
81 	if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
82 				      &rqst->rq_snd_buf, rpcrdma_noch_pullup))
83 		return -EIO;
84 
85 	trace_xprtrdma_cb_reply(r_xprt, rqst);
86 	return 0;
87 }
88 
89 /**
90  * xprt_rdma_bc_send_reply - marshal and send a backchannel reply
91  * @rqst: RPC rqst with a backchannel RPC reply in rq_snd_buf
92  *
93  * Caller holds the transport's write lock.
94  *
95  * Returns:
96  *	%0 if the RPC message has been sent
97  *	%-ENOTCONN if the caller should reconnect and call again
98  *	%-EIO if a permanent error occurred and the request was not
99  *		sent. Do not try to send this message again.
100  */
xprt_rdma_bc_send_reply(struct rpc_rqst * rqst)101 int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
102 {
103 	struct rpc_xprt *xprt = rqst->rq_xprt;
104 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
105 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
106 	int rc;
107 
108 	if (!xprt_connected(xprt))
109 		return -ENOTCONN;
110 
111 	if (!xprt_request_get_cong(xprt, rqst))
112 		return -EBADSLT;
113 
114 	rc = rpcrdma_bc_marshal_reply(rqst);
115 	if (rc < 0)
116 		goto failed_marshal;
117 
118 	if (rpcrdma_post_sends(r_xprt, req))
119 		goto drop_connection;
120 	return 0;
121 
122 failed_marshal:
123 	if (rc != -ENOTCONN)
124 		return rc;
125 drop_connection:
126 	xprt_rdma_close(xprt);
127 	return -ENOTCONN;
128 }
129 
130 /**
131  * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
132  * @xprt: transport associated with these backchannel resources
133  * @reqs: number of incoming requests to destroy; ignored
134  */
xprt_rdma_bc_destroy(struct rpc_xprt * xprt,unsigned int reqs)135 void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
136 {
137 	struct rpc_rqst *rqst, *tmp;
138 
139 	spin_lock(&xprt->bc_pa_lock);
140 	list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
141 		list_del(&rqst->rq_bc_pa_list);
142 		spin_unlock(&xprt->bc_pa_lock);
143 
144 		rpcrdma_req_destroy(rpcr_to_rdmar(rqst));
145 
146 		spin_lock(&xprt->bc_pa_lock);
147 	}
148 	spin_unlock(&xprt->bc_pa_lock);
149 }
150 
151 /**
152  * xprt_rdma_bc_free_rqst - Release a backchannel rqst
153  * @rqst: request to release
154  */
xprt_rdma_bc_free_rqst(struct rpc_rqst * rqst)155 void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
156 {
157 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
158 	struct rpcrdma_rep *rep = req->rl_reply;
159 	struct rpc_xprt *xprt = rqst->rq_xprt;
160 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
161 
162 	rpcrdma_rep_put(&r_xprt->rx_buf, rep);
163 	req->rl_reply = NULL;
164 
165 	spin_lock(&xprt->bc_pa_lock);
166 	list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
167 	spin_unlock(&xprt->bc_pa_lock);
168 	xprt_put(xprt);
169 }
170 
rpcrdma_bc_rqst_get(struct rpcrdma_xprt * r_xprt)171 static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt)
172 {
173 	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
174 	struct rpcrdma_req *req;
175 	struct rpc_rqst *rqst;
176 	size_t size;
177 
178 	spin_lock(&xprt->bc_pa_lock);
179 	rqst = list_first_entry_or_null(&xprt->bc_pa_list, struct rpc_rqst,
180 					rq_bc_pa_list);
181 	if (!rqst)
182 		goto create_req;
183 	list_del(&rqst->rq_bc_pa_list);
184 	spin_unlock(&xprt->bc_pa_lock);
185 	return rqst;
186 
187 create_req:
188 	spin_unlock(&xprt->bc_pa_lock);
189 
190 	/* Set a limit to prevent a remote from overrunning our resources.
191 	 */
192 	if (xprt->bc_alloc_count >= RPCRDMA_BACKWARD_WRS)
193 		return NULL;
194 
195 	size = min_t(size_t, r_xprt->rx_ep->re_inline_recv, PAGE_SIZE);
196 	req = rpcrdma_req_create(r_xprt, size, GFP_KERNEL);
197 	if (!req)
198 		return NULL;
199 	if (rpcrdma_req_setup(r_xprt, req)) {
200 		rpcrdma_req_destroy(req);
201 		return NULL;
202 	}
203 
204 	xprt->bc_alloc_count++;
205 	rqst = &req->rl_slot;
206 	rqst->rq_xprt = xprt;
207 	__set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
208 	xdr_buf_init(&rqst->rq_snd_buf, rdmab_data(req->rl_sendbuf), size);
209 	return rqst;
210 }
211 
212 /**
213  * rpcrdma_bc_receive_call - Handle a reverse-direction Call
214  * @r_xprt: transport receiving the call
215  * @rep: receive buffer containing the call
216  *
217  * Operational assumptions:
218  *    o Backchannel credits are ignored, just as the NFS server
219  *      forechannel currently does
220  *    o The ULP manages a replay cache (eg, NFSv4.1 sessions).
221  *      No replay detection is done at the transport level
222  */
rpcrdma_bc_receive_call(struct rpcrdma_xprt * r_xprt,struct rpcrdma_rep * rep)223 void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
224 			     struct rpcrdma_rep *rep)
225 {
226 	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
227 	struct svc_serv *bc_serv;
228 	struct rpcrdma_req *req;
229 	struct rpc_rqst *rqst;
230 	struct xdr_buf *buf;
231 	size_t size;
232 	__be32 *p;
233 
234 	p = xdr_inline_decode(&rep->rr_stream, 0);
235 	size = xdr_stream_remaining(&rep->rr_stream);
236 
237 #ifdef RPCRDMA_BACKCHANNEL_DEBUG
238 	pr_info("RPC:       %s: callback XID %08x, length=%u\n",
239 		__func__, be32_to_cpup(p), size);
240 	pr_info("RPC:       %s: %*ph\n", __func__, size, p);
241 #endif
242 
243 	rqst = rpcrdma_bc_rqst_get(r_xprt);
244 	if (!rqst)
245 		goto out_overflow;
246 
247 	rqst->rq_reply_bytes_recvd = 0;
248 	rqst->rq_xid = *p;
249 
250 	rqst->rq_private_buf.len = size;
251 
252 	buf = &rqst->rq_rcv_buf;
253 	memset(buf, 0, sizeof(*buf));
254 	buf->head[0].iov_base = p;
255 	buf->head[0].iov_len = size;
256 	buf->len = size;
257 
258 	/* The receive buffer has to be hooked to the rpcrdma_req
259 	 * so that it is not released while the req is pointing
260 	 * to its buffer, and so that it can be reposted after
261 	 * the Upper Layer is done decoding it.
262 	 */
263 	req = rpcr_to_rdmar(rqst);
264 	req->rl_reply = rep;
265 	trace_xprtrdma_cb_call(r_xprt, rqst);
266 
267 	/* Queue rqst for ULP's callback service */
268 	bc_serv = xprt->bc_serv;
269 	xprt_get(xprt);
270 	spin_lock(&bc_serv->sv_cb_lock);
271 	list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
272 	spin_unlock(&bc_serv->sv_cb_lock);
273 
274 	wake_up(&bc_serv->sv_cb_waitq);
275 
276 	r_xprt->rx_stats.bcall_count++;
277 	return;
278 
279 out_overflow:
280 	pr_warn("RPC/RDMA backchannel overflow\n");
281 	xprt_force_disconnect(xprt);
282 	/* This receive buffer gets reposted automatically
283 	 * when the connection is re-established.
284 	 */
285 	return;
286 }
287