xref: /linux/include/linux/sunrpc/svc.h (revision 3f6ef182)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * linux/include/linux/sunrpc/svc.h
4  *
5  * RPC server declarations.
6  *
7  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8  */
9 
10 
11 #ifndef SUNRPC_SVC_H
12 #define SUNRPC_SVC_H
13 
14 #include <linux/in.h>
15 #include <linux/in6.h>
16 #include <linux/sunrpc/types.h>
17 #include <linux/sunrpc/xdr.h>
18 #include <linux/sunrpc/auth.h>
19 #include <linux/sunrpc/svcauth.h>
20 #include <linux/lwq.h>
21 #include <linux/wait.h>
22 #include <linux/mm.h>
23 #include <linux/pagevec.h>
24 
25 /*
26  *
27  * RPC service thread pool.
28  *
29  * Pool of threads and temporary sockets.  Generally there is only
30  * a single one of these per RPC service, but on NUMA machines those
31  * services that can benefit from it (i.e. nfs but not lockd) will
32  * have one pool per NUMA node.  This optimisation reduces cross-
33  * node traffic on multi-node NUMA NFS servers.
34  */
35 struct svc_pool {
36 	unsigned int		sp_id;	    	/* pool id; also node id on NUMA */
37 	struct lwq		sp_xprts;	/* pending transports */
38 	atomic_t		sp_nrthreads;	/* # of threads in pool */
39 	struct list_head	sp_all_threads;	/* all server threads */
40 	struct llist_head	sp_idle_threads; /* idle server threads */
41 
42 	/* statistics on pool operation */
43 	struct percpu_counter	sp_messages_arrived;
44 	struct percpu_counter	sp_sockets_queued;
45 	struct percpu_counter	sp_threads_woken;
46 
47 	unsigned long		sp_flags;
48 } ____cacheline_aligned_in_smp;
49 
50 /* bits for sp_flags */
51 enum {
52 	SP_TASK_PENDING,	/* still work to do even if no xprt is queued */
53 	SP_NEED_VICTIM,		/* One thread needs to agree to exit */
54 	SP_VICTIM_REMAINS,	/* One thread needs to actually exit */
55 };
56 
57 
58 /*
59  * RPC service.
60  *
61  * An RPC service is a ``daemon,'' possibly multithreaded, which
62  * receives and processes incoming RPC messages.
63  * It has one or more transport sockets associated with it, and maintains
64  * a list of idle threads waiting for input.
65  *
66  * We currently do not support more than one RPC program per daemon.
67  */
68 struct svc_serv {
69 	struct svc_program *	sv_program;	/* RPC program */
70 	struct svc_stat *	sv_stats;	/* RPC statistics */
71 	spinlock_t		sv_lock;
72 	unsigned int		sv_nrthreads;	/* # of server threads */
73 	unsigned int		sv_maxconn;	/* max connections allowed or
74 						 * '0' causing max to be based
75 						 * on number of threads. */
76 
77 	unsigned int		sv_max_payload;	/* datagram payload size */
78 	unsigned int		sv_max_mesg;	/* max_payload + 1 page for overheads */
79 	unsigned int		sv_xdrsize;	/* XDR buffer size */
80 	struct list_head	sv_permsocks;	/* all permanent sockets */
81 	struct list_head	sv_tempsocks;	/* all temporary sockets */
82 	int			sv_tmpcnt;	/* count of temporary sockets */
83 	struct timer_list	sv_temptimer;	/* timer for aging temporary sockets */
84 
85 	char *			sv_name;	/* service name */
86 
87 	unsigned int		sv_nrpools;	/* number of thread pools */
88 	struct svc_pool *	sv_pools;	/* array of thread pools */
89 	int			(*sv_threadfn)(void *data);
90 
91 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
92 	struct lwq		sv_cb_list;	/* queue for callback requests
93 						 * that arrive over the same
94 						 * connection */
95 	bool			sv_bc_enabled;	/* service uses backchannel */
96 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
97 };
98 
99 /* This is used by pool_stats to find and lock an svc */
100 struct svc_info {
101 	struct svc_serv		*serv;
102 	struct mutex		*mutex;
103 };
104 
105 void svc_destroy(struct svc_serv **svcp);
106 
107 /*
108  * Maximum payload size supported by a kernel RPC server.
109  * This is use to determine the max number of pages nfsd is
110  * willing to return in a single READ operation.
111  *
112  * These happen to all be powers of 2, which is not strictly
113  * necessary but helps enforce the real limitation, which is
114  * that they should be multiples of PAGE_SIZE.
115  *
116  * For UDP transports, a block plus NFS,RPC, and UDP headers
117  * has to fit into the IP datagram limit of 64K.  The largest
118  * feasible number for all known page sizes is probably 48K,
119  * but we choose 32K here.  This is the same as the historical
120  * Linux limit; someone who cares more about NFS/UDP performance
121  * can test a larger number.
122  *
123  * For TCP transports we have more freedom.  A size of 1MB is
124  * chosen to match the client limit.  Other OSes are known to
125  * have larger limits, but those numbers are probably beyond
126  * the point of diminishing returns.
127  */
128 #define RPCSVC_MAXPAYLOAD	(1*1024*1024u)
129 #define RPCSVC_MAXPAYLOAD_TCP	RPCSVC_MAXPAYLOAD
130 #define RPCSVC_MAXPAYLOAD_UDP	(32*1024u)
131 
132 extern u32 svc_max_payload(const struct svc_rqst *rqstp);
133 
134 /*
135  * RPC Requests and replies are stored in one or more pages.
136  * We maintain an array of pages for each server thread.
137  * Requests are copied into these pages as they arrive.  Remaining
138  * pages are available to write the reply into.
139  *
140  * Pages are sent using ->sendmsg with MSG_SPLICE_PAGES so each server thread
141  * needs to allocate more to replace those used in sending.  To help keep track
142  * of these pages we have a receive list where all pages initialy live, and a
143  * send list where pages are moved to when there are to be part of a reply.
144  *
145  * We use xdr_buf for holding responses as it fits well with NFS
146  * read responses (that have a header, and some data pages, and possibly
147  * a tail) and means we can share some client side routines.
148  *
149  * The xdr_buf.head kvec always points to the first page in the rq_*pages
150  * list.  The xdr_buf.pages pointer points to the second page on that
151  * list.  xdr_buf.tail points to the end of the first page.
152  * This assumes that the non-page part of an rpc reply will fit
153  * in a page - NFSd ensures this.  lockd also has no trouble.
154  *
155  * Each request/reply pair can have at most one "payload", plus two pages,
156  * one for the request, and one for the reply.
157  * We using ->sendfile to return read data, we might need one extra page
158  * if the request is not page-aligned.  So add another '1'.
159  */
160 #define RPCSVC_MAXPAGES		((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \
161 				+ 2 + 1)
162 
163 /*
164  * The context of a single thread, including the request currently being
165  * processed.
166  */
167 struct svc_rqst {
168 	struct list_head	rq_all;		/* all threads list */
169 	struct llist_node	rq_idle;	/* On the idle list */
170 	struct rcu_head		rq_rcu_head;	/* for RCU deferred kfree */
171 	struct svc_xprt *	rq_xprt;	/* transport ptr */
172 
173 	struct sockaddr_storage	rq_addr;	/* peer address */
174 	size_t			rq_addrlen;
175 	struct sockaddr_storage	rq_daddr;	/* dest addr of request
176 						 *  - reply from here */
177 	size_t			rq_daddrlen;
178 
179 	struct svc_serv *	rq_server;	/* RPC service definition */
180 	struct svc_pool *	rq_pool;	/* thread pool */
181 	const struct svc_procedure *rq_procinfo;/* procedure info */
182 	struct auth_ops *	rq_authop;	/* authentication flavour */
183 	struct svc_cred		rq_cred;	/* auth info */
184 	void *			rq_xprt_ctxt;	/* transport specific context ptr */
185 	struct svc_deferred_req*rq_deferred;	/* deferred request we are replaying */
186 
187 	struct xdr_buf		rq_arg;
188 	struct xdr_stream	rq_arg_stream;
189 	struct xdr_stream	rq_res_stream;
190 	struct page		*rq_scratch_page;
191 	struct xdr_buf		rq_res;
192 	struct page		*rq_pages[RPCSVC_MAXPAGES + 1];
193 	struct page *		*rq_respages;	/* points into rq_pages */
194 	struct page *		*rq_next_page; /* next reply page to use */
195 	struct page *		*rq_page_end;  /* one past the last page */
196 
197 	struct folio_batch	rq_fbatch;
198 	struct kvec		rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
199 	struct bio_vec		rq_bvec[RPCSVC_MAXPAGES];
200 
201 	__be32			rq_xid;		/* transmission id */
202 	u32			rq_prog;	/* program number */
203 	u32			rq_vers;	/* program version */
204 	u32			rq_proc;	/* procedure number */
205 	u32			rq_prot;	/* IP protocol */
206 	int			rq_cachetype;	/* catering to nfsd */
207 	unsigned long		rq_flags;	/* flags field */
208 	ktime_t			rq_qtime;	/* enqueue time */
209 
210 	void *			rq_argp;	/* decoded arguments */
211 	void *			rq_resp;	/* xdr'd results */
212 	__be32			*rq_accept_statp;
213 	void *			rq_auth_data;	/* flavor-specific data */
214 	__be32			rq_auth_stat;	/* authentication status */
215 	int			rq_auth_slack;	/* extra space xdr code
216 						 * should leave in head
217 						 * for krb5i, krb5p.
218 						 */
219 	int			rq_reserved;	/* space on socket outq
220 						 * reserved for this request
221 						 */
222 	ktime_t			rq_stime;	/* start time */
223 
224 	struct cache_req	rq_chandle;	/* handle passed to caches for
225 						 * request delaying
226 						 */
227 	/* Catering to nfsd */
228 	struct auth_domain *	rq_client;	/* RPC peer info */
229 	struct auth_domain *	rq_gssclient;	/* "gss/"-style peer info */
230 	struct task_struct	*rq_task;	/* service thread */
231 	struct net		*rq_bc_net;	/* pointer to backchannel's
232 						 * net namespace
233 						 */
234 	unsigned long	bc_to_initval;
235 	unsigned int	bc_to_retries;
236 	void **			rq_lease_breaker; /* The v4 client breaking a lease */
237 	unsigned int		rq_status_counter; /* RPC processing counter */
238 };
239 
240 /* bits for rq_flags */
241 enum {
242 	RQ_SECURE,		/* secure port */
243 	RQ_LOCAL,		/* local request */
244 	RQ_USEDEFERRAL,		/* use deferral */
245 	RQ_DROPME,		/* drop current reply */
246 	RQ_VICTIM,		/* Have agreed to shut down */
247 	RQ_DATA,		/* request has data */
248 };
249 
250 #define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
251 
252 /*
253  * Rigorous type checking on sockaddr type conversions
254  */
svc_addr_in(const struct svc_rqst * rqst)255 static inline struct sockaddr_in *svc_addr_in(const struct svc_rqst *rqst)
256 {
257 	return (struct sockaddr_in *) &rqst->rq_addr;
258 }
259 
svc_addr_in6(const struct svc_rqst * rqst)260 static inline struct sockaddr_in6 *svc_addr_in6(const struct svc_rqst *rqst)
261 {
262 	return (struct sockaddr_in6 *) &rqst->rq_addr;
263 }
264 
svc_addr(const struct svc_rqst * rqst)265 static inline struct sockaddr *svc_addr(const struct svc_rqst *rqst)
266 {
267 	return (struct sockaddr *) &rqst->rq_addr;
268 }
269 
svc_daddr_in(const struct svc_rqst * rqst)270 static inline struct sockaddr_in *svc_daddr_in(const struct svc_rqst *rqst)
271 {
272 	return (struct sockaddr_in *) &rqst->rq_daddr;
273 }
274 
svc_daddr_in6(const struct svc_rqst * rqst)275 static inline struct sockaddr_in6 *svc_daddr_in6(const struct svc_rqst *rqst)
276 {
277 	return (struct sockaddr_in6 *) &rqst->rq_daddr;
278 }
279 
svc_daddr(const struct svc_rqst * rqst)280 static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst)
281 {
282 	return (struct sockaddr *) &rqst->rq_daddr;
283 }
284 
285 /**
286  * svc_thread_should_stop - check if this thread should stop
287  * @rqstp: the thread that might need to stop
288  *
289  * To stop an svc thread, the pool flags SP_NEED_VICTIM and SP_VICTIM_REMAINS
290  * are set.  The first thread which sees SP_NEED_VICTIM clears it, becoming
291  * the victim using this function.  It should then promptly call
292  * svc_exit_thread() to complete the process, clearing SP_VICTIM_REMAINS
293  * so the task waiting for a thread to exit can wake and continue.
294  *
295  * Return values:
296  *   %true: caller should invoke svc_exit_thread()
297  *   %false: caller should do nothing
298  */
svc_thread_should_stop(struct svc_rqst * rqstp)299 static inline bool svc_thread_should_stop(struct svc_rqst *rqstp)
300 {
301 	if (test_and_clear_bit(SP_NEED_VICTIM, &rqstp->rq_pool->sp_flags))
302 		set_bit(RQ_VICTIM, &rqstp->rq_flags);
303 
304 	return test_bit(RQ_VICTIM, &rqstp->rq_flags);
305 }
306 
307 struct svc_deferred_req {
308 	u32			prot;	/* protocol (UDP or TCP) */
309 	struct svc_xprt		*xprt;
310 	struct sockaddr_storage	addr;	/* where reply must go */
311 	size_t			addrlen;
312 	struct sockaddr_storage	daddr;	/* where reply must come from */
313 	size_t			daddrlen;
314 	void			*xprt_ctxt;
315 	struct cache_deferred_req handle;
316 	int			argslen;
317 	__be32			args[];
318 };
319 
320 struct svc_process_info {
321 	union {
322 		int  (*dispatch)(struct svc_rqst *rqstp);
323 		struct {
324 			unsigned int lovers;
325 			unsigned int hivers;
326 		} mismatch;
327 	};
328 };
329 
330 /*
331  * List of RPC programs on the same transport endpoint
332  */
333 struct svc_program {
334 	struct svc_program *	pg_next;	/* other programs (same xprt) */
335 	u32			pg_prog;	/* program number */
336 	unsigned int		pg_lovers;	/* lowest version */
337 	unsigned int		pg_hivers;	/* highest version */
338 	unsigned int		pg_nvers;	/* number of versions */
339 	const struct svc_version **pg_vers;	/* version array */
340 	char *			pg_name;	/* service name */
341 	char *			pg_class;	/* class name: services sharing authentication */
342 	enum svc_auth_status	(*pg_authenticate)(struct svc_rqst *rqstp);
343 	__be32			(*pg_init_request)(struct svc_rqst *,
344 						   const struct svc_program *,
345 						   struct svc_process_info *);
346 	int			(*pg_rpcbind_set)(struct net *net,
347 						  const struct svc_program *,
348 						  u32 version, int family,
349 						  unsigned short proto,
350 						  unsigned short port);
351 };
352 
353 /*
354  * RPC program version
355  */
356 struct svc_version {
357 	u32			vs_vers;	/* version number */
358 	u32			vs_nproc;	/* number of procedures */
359 	const struct svc_procedure *vs_proc;	/* per-procedure info */
360 	unsigned long __percpu	*vs_count;	/* call counts */
361 	u32			vs_xdrsize;	/* xdrsize needed for this version */
362 
363 	/* Don't register with rpcbind */
364 	bool			vs_hidden;
365 
366 	/* Don't care if the rpcbind registration fails */
367 	bool			vs_rpcb_optnl;
368 
369 	/* Need xprt with congestion control */
370 	bool			vs_need_cong_ctrl;
371 
372 	/* Dispatch function */
373 	int			(*vs_dispatch)(struct svc_rqst *rqstp);
374 };
375 
376 /*
377  * RPC procedure info
378  */
379 struct svc_procedure {
380 	/* process the request: */
381 	__be32			(*pc_func)(struct svc_rqst *);
382 	/* XDR decode args: */
383 	bool			(*pc_decode)(struct svc_rqst *rqstp,
384 					     struct xdr_stream *xdr);
385 	/* XDR encode result: */
386 	bool			(*pc_encode)(struct svc_rqst *rqstp,
387 					     struct xdr_stream *xdr);
388 	/* XDR free result: */
389 	void			(*pc_release)(struct svc_rqst *);
390 	unsigned int		pc_argsize;	/* argument struct size */
391 	unsigned int		pc_argzero;	/* how much of argument to clear */
392 	unsigned int		pc_ressize;	/* result struct size */
393 	unsigned int		pc_cachetype;	/* cache info (NFS) */
394 	unsigned int		pc_xdrressize;	/* maximum size of XDR reply */
395 	const char *		pc_name;	/* for display */
396 };
397 
398 /*
399  * Function prototypes.
400  */
401 int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
402 void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
403 int svc_bind(struct svc_serv *serv, struct net *net);
404 struct svc_serv *svc_create(struct svc_program *, unsigned int,
405 			    int (*threadfn)(void *data));
406 struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
407 					struct svc_pool *pool, int node);
408 bool		   svc_rqst_replace_page(struct svc_rqst *rqstp,
409 					 struct page *page);
410 void		   svc_rqst_release_pages(struct svc_rqst *rqstp);
411 void		   svc_rqst_free(struct svc_rqst *);
412 void		   svc_exit_thread(struct svc_rqst *);
413 struct svc_serv *  svc_create_pooled(struct svc_program *prog,
414 				     struct svc_stat *stats,
415 				     unsigned int bufsize,
416 				     int (*threadfn)(void *data));
417 int		   svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
418 int		   svc_pool_stats_open(struct svc_info *si, struct file *file);
419 void		   svc_process(struct svc_rqst *rqstp);
420 void		   svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp);
421 int		   svc_register(const struct svc_serv *, struct net *, const int,
422 				const unsigned short, const unsigned short);
423 
424 void		   svc_wake_up(struct svc_serv *);
425 void		   svc_reserve(struct svc_rqst *rqstp, int space);
426 void		   svc_pool_wake_idle_thread(struct svc_pool *pool);
427 struct svc_pool   *svc_pool_for_cpu(struct svc_serv *serv);
428 char *		   svc_print_addr(struct svc_rqst *, char *, size_t);
429 const char *	   svc_proc_name(const struct svc_rqst *rqstp);
430 int		   svc_encode_result_payload(struct svc_rqst *rqstp,
431 					     unsigned int offset,
432 					     unsigned int length);
433 unsigned int	   svc_fill_write_vector(struct svc_rqst *rqstp,
434 					 struct xdr_buf *payload);
435 char		  *svc_fill_symlink_pathname(struct svc_rqst *rqstp,
436 					     struct kvec *first, void *p,
437 					     size_t total);
438 __be32		   svc_generic_init_request(struct svc_rqst *rqstp,
439 					    const struct svc_program *progp,
440 					    struct svc_process_info *procinfo);
441 int		   svc_generic_rpcbind_set(struct net *net,
442 					   const struct svc_program *progp,
443 					   u32 version, int family,
444 					   unsigned short proto,
445 					   unsigned short port);
446 int		   svc_rpcbind_set_version(struct net *net,
447 					   const struct svc_program *progp,
448 					   u32 version, int family,
449 					   unsigned short proto,
450 					   unsigned short port);
451 
452 #define	RPC_MAX_ADDRBUFLEN	(63U)
453 
454 /*
455  * When we want to reduce the size of the reserved space in the response
456  * buffer, we need to take into account the size of any checksum data that
457  * may be at the end of the packet. This is difficult to determine exactly
458  * for all cases without actually generating the checksum, so we just use a
459  * static value.
460  */
svc_reserve_auth(struct svc_rqst * rqstp,int space)461 static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space)
462 {
463 	svc_reserve(rqstp, space + rqstp->rq_auth_slack);
464 }
465 
466 /**
467  * svcxdr_init_decode - Prepare an xdr_stream for Call decoding
468  * @rqstp: controlling server RPC transaction context
469  *
470  */
svcxdr_init_decode(struct svc_rqst * rqstp)471 static inline void svcxdr_init_decode(struct svc_rqst *rqstp)
472 {
473 	struct xdr_stream *xdr = &rqstp->rq_arg_stream;
474 	struct xdr_buf *buf = &rqstp->rq_arg;
475 	struct kvec *argv = buf->head;
476 
477 	WARN_ON(buf->len != buf->head->iov_len + buf->page_len + buf->tail->iov_len);
478 	buf->len = buf->head->iov_len + buf->page_len + buf->tail->iov_len;
479 
480 	xdr_init_decode(xdr, buf, argv->iov_base, NULL);
481 	xdr_set_scratch_page(xdr, rqstp->rq_scratch_page);
482 }
483 
484 /**
485  * svcxdr_init_encode - Prepare an xdr_stream for svc Reply encoding
486  * @rqstp: controlling server RPC transaction context
487  *
488  */
svcxdr_init_encode(struct svc_rqst * rqstp)489 static inline void svcxdr_init_encode(struct svc_rqst *rqstp)
490 {
491 	struct xdr_stream *xdr = &rqstp->rq_res_stream;
492 	struct xdr_buf *buf = &rqstp->rq_res;
493 	struct kvec *resv = buf->head;
494 
495 	xdr_reset_scratch_buffer(xdr);
496 
497 	xdr->buf = buf;
498 	xdr->iov = resv;
499 	xdr->p   = resv->iov_base + resv->iov_len;
500 	xdr->end = resv->iov_base + PAGE_SIZE;
501 	buf->len = resv->iov_len;
502 	xdr->page_ptr = buf->pages - 1;
503 	buf->buflen = PAGE_SIZE * (rqstp->rq_page_end - buf->pages);
504 	xdr->rqst = NULL;
505 }
506 
507 /**
508  * svcxdr_encode_opaque_pages - Insert pages into an xdr_stream
509  * @xdr: xdr_stream to be updated
510  * @pages: array of pages to insert
511  * @base: starting offset of first data byte in @pages
512  * @len: number of data bytes in @pages to insert
513  *
514  * After the @pages are added, the tail iovec is instantiated pointing
515  * to end of the head buffer, and the stream is set up to encode
516  * subsequent items into the tail.
517  */
svcxdr_encode_opaque_pages(struct svc_rqst * rqstp,struct xdr_stream * xdr,struct page ** pages,unsigned int base,unsigned int len)518 static inline void svcxdr_encode_opaque_pages(struct svc_rqst *rqstp,
519 					      struct xdr_stream *xdr,
520 					      struct page **pages,
521 					      unsigned int base,
522 					      unsigned int len)
523 {
524 	xdr_write_pages(xdr, pages, base, len);
525 	xdr->page_ptr = rqstp->rq_next_page - 1;
526 }
527 
528 /**
529  * svcxdr_set_auth_slack -
530  * @rqstp: RPC transaction
531  * @slack: buffer space to reserve for the transaction's security flavor
532  *
533  * Set the request's slack space requirement, and set aside that much
534  * space in the rqstp's rq_res.head for use when the auth wraps the Reply.
535  */
svcxdr_set_auth_slack(struct svc_rqst * rqstp,int slack)536 static inline void svcxdr_set_auth_slack(struct svc_rqst *rqstp, int slack)
537 {
538 	struct xdr_stream *xdr = &rqstp->rq_res_stream;
539 	struct xdr_buf *buf = &rqstp->rq_res;
540 	struct kvec *resv = buf->head;
541 
542 	rqstp->rq_auth_slack = slack;
543 
544 	xdr->end -= XDR_QUADLEN(slack);
545 	buf->buflen -= rqstp->rq_auth_slack;
546 
547 	WARN_ON(xdr->iov != resv);
548 	WARN_ON(xdr->p > xdr->end);
549 }
550 
551 /**
552  * svcxdr_set_accept_stat - Reserve space for the accept_stat field
553  * @rqstp: RPC transaction context
554  *
555  * Return values:
556  *   %true: Success
557  *   %false: No response buffer space was available
558  */
svcxdr_set_accept_stat(struct svc_rqst * rqstp)559 static inline bool svcxdr_set_accept_stat(struct svc_rqst *rqstp)
560 {
561 	struct xdr_stream *xdr = &rqstp->rq_res_stream;
562 
563 	rqstp->rq_accept_statp = xdr_reserve_space(xdr, XDR_UNIT);
564 	if (unlikely(!rqstp->rq_accept_statp))
565 		return false;
566 	*rqstp->rq_accept_statp = rpc_success;
567 	return true;
568 }
569 
570 #endif /* SUNRPC_SVC_H */
571