1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * linux/include/linux/sunrpc/svc.h
4  *
5  * RPC server declarations.
6  *
7  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8  */
9 
10 
11 #ifndef SUNRPC_SVC_H
12 #define SUNRPC_SVC_H
13 
14 #include <linux/in.h>
15 #include <linux/in6.h>
16 #include <linux/sunrpc/types.h>
17 #include <linux/sunrpc/xdr.h>
18 #include <linux/sunrpc/auth.h>
19 #include <linux/sunrpc/svcauth.h>
20 #include <linux/wait.h>
21 #include <linux/mm.h>
22 
23 /* statistics for svc_pool structures */
24 struct svc_pool_stats {
25 	atomic_long_t	packets;
26 	unsigned long	sockets_queued;
27 	atomic_long_t	threads_woken;
28 	atomic_long_t	threads_timedout;
29 };
30 
31 /*
32  *
33  * RPC service thread pool.
34  *
35  * Pool of threads and temporary sockets.  Generally there is only
36  * a single one of these per RPC service, but on NUMA machines those
37  * services that can benefit from it (i.e. nfs but not lockd) will
38  * have one pool per NUMA node.  This optimisation reduces cross-
39  * node traffic on multi-node NUMA NFS servers.
40  */
41 struct svc_pool {
42 	unsigned int		sp_id;	    	/* pool id; also node id on NUMA */
43 	spinlock_t		sp_lock;	/* protects all fields */
44 	struct list_head	sp_sockets;	/* pending sockets */
45 	unsigned int		sp_nrthreads;	/* # of threads in pool */
46 	struct list_head	sp_all_threads;	/* all server threads */
47 	struct svc_pool_stats	sp_stats;	/* statistics on pool operation */
48 #define	SP_TASK_PENDING		(0)		/* still work to do even if no
49 						 * xprt is queued. */
50 #define SP_CONGESTED		(1)
51 	unsigned long		sp_flags;
52 } ____cacheline_aligned_in_smp;
53 
54 struct svc_serv;
55 
56 struct svc_serv_ops {
57 	/* Callback to use when last thread exits. */
58 	void		(*svo_shutdown)(struct svc_serv *, struct net *);
59 
60 	/* function for service threads to run */
61 	int		(*svo_function)(void *);
62 
63 	/* queue up a transport for servicing */
64 	void		(*svo_enqueue_xprt)(struct svc_xprt *);
65 
66 	/* set up thread (or whatever) execution context */
67 	int		(*svo_setup)(struct svc_serv *, struct svc_pool *, int);
68 
69 	/* optional module to count when adding threads (pooled svcs only) */
70 	struct module	*svo_module;
71 };
72 
73 /*
74  * RPC service.
75  *
76  * An RPC service is a ``daemon,'' possibly multithreaded, which
77  * receives and processes incoming RPC messages.
78  * It has one or more transport sockets associated with it, and maintains
79  * a list of idle threads waiting for input.
80  *
81  * We currently do not support more than one RPC program per daemon.
82  */
83 struct svc_serv {
84 	struct svc_program *	sv_program;	/* RPC program */
85 	struct svc_stat *	sv_stats;	/* RPC statistics */
86 	spinlock_t		sv_lock;
87 	unsigned int		sv_nrthreads;	/* # of server threads */
88 	unsigned int		sv_maxconn;	/* max connections allowed or
89 						 * '0' causing max to be based
90 						 * on number of threads. */
91 
92 	unsigned int		sv_max_payload;	/* datagram payload size */
93 	unsigned int		sv_max_mesg;	/* max_payload + 1 page for overheads */
94 	unsigned int		sv_xdrsize;	/* XDR buffer size */
95 	struct list_head	sv_permsocks;	/* all permanent sockets */
96 	struct list_head	sv_tempsocks;	/* all temporary sockets */
97 	int			sv_tmpcnt;	/* count of temporary sockets */
98 	struct timer_list	sv_temptimer;	/* timer for aging temporary sockets */
99 
100 	char *			sv_name;	/* service name */
101 
102 	unsigned int		sv_nrpools;	/* number of thread pools */
103 	struct svc_pool *	sv_pools;	/* array of thread pools */
104 	const struct svc_serv_ops *sv_ops;	/* server operations */
105 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
106 	struct list_head	sv_cb_list;	/* queue for callback requests
107 						 * that arrive over the same
108 						 * connection */
109 	spinlock_t		sv_cb_lock;	/* protects the svc_cb_list */
110 	wait_queue_head_t	sv_cb_waitq;	/* sleep here if there are no
111 						 * entries in the svc_cb_list */
112 	struct svc_xprt		*sv_bc_xprt;	/* callback on fore channel */
113 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
114 };
115 
116 /*
117  * We use sv_nrthreads as a reference count.  svc_destroy() drops
118  * this refcount, so we need to bump it up around operations that
119  * change the number of threads.  Horrible, but there it is.
120  * Should be called with the "service mutex" held.
121  */
svc_get(struct svc_serv * serv)122 static inline void svc_get(struct svc_serv *serv)
123 {
124 	serv->sv_nrthreads++;
125 }
126 
127 /*
128  * Maximum payload size supported by a kernel RPC server.
129  * This is use to determine the max number of pages nfsd is
130  * willing to return in a single READ operation.
131  *
132  * These happen to all be powers of 2, which is not strictly
133  * necessary but helps enforce the real limitation, which is
134  * that they should be multiples of PAGE_SIZE.
135  *
136  * For UDP transports, a block plus NFS,RPC, and UDP headers
137  * has to fit into the IP datagram limit of 64K.  The largest
138  * feasible number for all known page sizes is probably 48K,
139  * but we choose 32K here.  This is the same as the historical
140  * Linux limit; someone who cares more about NFS/UDP performance
141  * can test a larger number.
142  *
143  * For TCP transports we have more freedom.  A size of 1MB is
144  * chosen to match the client limit.  Other OSes are known to
145  * have larger limits, but those numbers are probably beyond
146  * the point of diminishing returns.
147  */
148 #define RPCSVC_MAXPAYLOAD	(1*1024*1024u)
149 #define RPCSVC_MAXPAYLOAD_TCP	RPCSVC_MAXPAYLOAD
150 #define RPCSVC_MAXPAYLOAD_UDP	(32*1024u)
151 
152 extern u32 svc_max_payload(const struct svc_rqst *rqstp);
153 
154 /*
155  * RPC Requsts and replies are stored in one or more pages.
156  * We maintain an array of pages for each server thread.
157  * Requests are copied into these pages as they arrive.  Remaining
158  * pages are available to write the reply into.
159  *
160  * Pages are sent using ->sendpage so each server thread needs to
161  * allocate more to replace those used in sending.  To help keep track
162  * of these pages we have a receive list where all pages initialy live,
163  * and a send list where pages are moved to when there are to be part
164  * of a reply.
165  *
166  * We use xdr_buf for holding responses as it fits well with NFS
167  * read responses (that have a header, and some data pages, and possibly
168  * a tail) and means we can share some client side routines.
169  *
170  * The xdr_buf.head kvec always points to the first page in the rq_*pages
171  * list.  The xdr_buf.pages pointer points to the second page on that
172  * list.  xdr_buf.tail points to the end of the first page.
173  * This assumes that the non-page part of an rpc reply will fit
174  * in a page - NFSd ensures this.  lockd also has no trouble.
175  *
176  * Each request/reply pair can have at most one "payload", plus two pages,
177  * one for the request, and one for the reply.
178  * We using ->sendfile to return read data, we might need one extra page
179  * if the request is not page-aligned.  So add another '1'.
180  */
181 #define RPCSVC_MAXPAGES		((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \
182 				+ 2 + 1)
183 
svc_getnl(struct kvec * iov)184 static inline u32 svc_getnl(struct kvec *iov)
185 {
186 	__be32 val, *vp;
187 	vp = iov->iov_base;
188 	val = *vp++;
189 	iov->iov_base = (void*)vp;
190 	iov->iov_len -= sizeof(__be32);
191 	return ntohl(val);
192 }
193 
svc_putnl(struct kvec * iov,u32 val)194 static inline void svc_putnl(struct kvec *iov, u32 val)
195 {
196 	__be32 *vp = iov->iov_base + iov->iov_len;
197 	*vp = htonl(val);
198 	iov->iov_len += sizeof(__be32);
199 }
200 
svc_getu32(struct kvec * iov)201 static inline __be32 svc_getu32(struct kvec *iov)
202 {
203 	__be32 val, *vp;
204 	vp = iov->iov_base;
205 	val = *vp++;
206 	iov->iov_base = (void*)vp;
207 	iov->iov_len -= sizeof(__be32);
208 	return val;
209 }
210 
svc_ungetu32(struct kvec * iov)211 static inline void svc_ungetu32(struct kvec *iov)
212 {
213 	__be32 *vp = (__be32 *)iov->iov_base;
214 	iov->iov_base = (void *)(vp - 1);
215 	iov->iov_len += sizeof(*vp);
216 }
217 
svc_putu32(struct kvec * iov,__be32 val)218 static inline void svc_putu32(struct kvec *iov, __be32 val)
219 {
220 	__be32 *vp = iov->iov_base + iov->iov_len;
221 	*vp = val;
222 	iov->iov_len += sizeof(__be32);
223 }
224 
225 /*
226  * The context of a single thread, including the request currently being
227  * processed.
228  */
229 struct svc_rqst {
230 	struct list_head	rq_all;		/* all threads list */
231 	struct rcu_head		rq_rcu_head;	/* for RCU deferred kfree */
232 	struct svc_xprt *	rq_xprt;	/* transport ptr */
233 
234 	struct sockaddr_storage	rq_addr;	/* peer address */
235 	size_t			rq_addrlen;
236 	struct sockaddr_storage	rq_daddr;	/* dest addr of request
237 						 *  - reply from here */
238 	size_t			rq_daddrlen;
239 
240 	struct svc_serv *	rq_server;	/* RPC service definition */
241 	struct svc_pool *	rq_pool;	/* thread pool */
242 	const struct svc_procedure *rq_procinfo;/* procedure info */
243 	struct auth_ops *	rq_authop;	/* authentication flavour */
244 	struct svc_cred		rq_cred;	/* auth info */
245 	void *			rq_xprt_ctxt;	/* transport specific context ptr */
246 	struct svc_deferred_req*rq_deferred;	/* deferred request we are replaying */
247 
248 	size_t			rq_xprt_hlen;	/* xprt header len */
249 	struct xdr_buf		rq_arg;
250 	struct xdr_buf		rq_res;
251 	struct page		*rq_pages[RPCSVC_MAXPAGES + 1];
252 	struct page *		*rq_respages;	/* points into rq_pages */
253 	struct page *		*rq_next_page; /* next reply page to use */
254 	struct page *		*rq_page_end;  /* one past the last page */
255 
256 	struct kvec		rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
257 
258 	__be32			rq_xid;		/* transmission id */
259 	u32			rq_prog;	/* program number */
260 	u32			rq_vers;	/* program version */
261 	u32			rq_proc;	/* procedure number */
262 	u32			rq_prot;	/* IP protocol */
263 	int			rq_cachetype;	/* catering to nfsd */
264 #define	RQ_SECURE	(0)			/* secure port */
265 #define	RQ_LOCAL	(1)			/* local request */
266 #define	RQ_USEDEFERRAL	(2)			/* use deferral */
267 #define	RQ_DROPME	(3)			/* drop current reply */
268 #define	RQ_SPLICE_OK	(4)			/* turned off in gss privacy
269 						 * to prevent encrypting page
270 						 * cache pages */
271 #define	RQ_VICTIM	(5)			/* about to be shut down */
272 #define	RQ_BUSY		(6)			/* request is busy */
273 #define	RQ_DATA		(7)			/* request has data */
274 #define RQ_AUTHERR	(8)			/* Request status is auth error */
275 	unsigned long		rq_flags;	/* flags field */
276 	ktime_t			rq_qtime;	/* enqueue time */
277 
278 	void *			rq_argp;	/* decoded arguments */
279 	void *			rq_resp;	/* xdr'd results */
280 	void *			rq_auth_data;	/* flavor-specific data */
281 	int			rq_auth_slack;	/* extra space xdr code
282 						 * should leave in head
283 						 * for krb5i, krb5p.
284 						 */
285 	int			rq_reserved;	/* space on socket outq
286 						 * reserved for this request
287 						 */
288 	ktime_t			rq_stime;	/* start time */
289 
290 	struct cache_req	rq_chandle;	/* handle passed to caches for
291 						 * request delaying
292 						 */
293 	/* Catering to nfsd */
294 	struct auth_domain *	rq_client;	/* RPC peer info */
295 	struct auth_domain *	rq_gssclient;	/* "gss/"-style peer info */
296 	struct svc_cacherep *	rq_cacherep;	/* cache info */
297 	struct task_struct	*rq_task;	/* service thread */
298 	spinlock_t		rq_lock;	/* per-request lock */
299 	struct net		*rq_bc_net;	/* pointer to backchannel's
300 						 * net namespace
301 						 */
302 };
303 
304 #define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
305 
306 /*
307  * Rigorous type checking on sockaddr type conversions
308  */
svc_addr_in(const struct svc_rqst * rqst)309 static inline struct sockaddr_in *svc_addr_in(const struct svc_rqst *rqst)
310 {
311 	return (struct sockaddr_in *) &rqst->rq_addr;
312 }
313 
svc_addr_in6(const struct svc_rqst * rqst)314 static inline struct sockaddr_in6 *svc_addr_in6(const struct svc_rqst *rqst)
315 {
316 	return (struct sockaddr_in6 *) &rqst->rq_addr;
317 }
318 
svc_addr(const struct svc_rqst * rqst)319 static inline struct sockaddr *svc_addr(const struct svc_rqst *rqst)
320 {
321 	return (struct sockaddr *) &rqst->rq_addr;
322 }
323 
svc_daddr_in(const struct svc_rqst * rqst)324 static inline struct sockaddr_in *svc_daddr_in(const struct svc_rqst *rqst)
325 {
326 	return (struct sockaddr_in *) &rqst->rq_daddr;
327 }
328 
svc_daddr_in6(const struct svc_rqst * rqst)329 static inline struct sockaddr_in6 *svc_daddr_in6(const struct svc_rqst *rqst)
330 {
331 	return (struct sockaddr_in6 *) &rqst->rq_daddr;
332 }
333 
svc_daddr(const struct svc_rqst * rqst)334 static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst)
335 {
336 	return (struct sockaddr *) &rqst->rq_daddr;
337 }
338 
339 /*
340  * Check buffer bounds after decoding arguments
341  */
342 static inline int
xdr_argsize_check(struct svc_rqst * rqstp,__be32 * p)343 xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p)
344 {
345 	char *cp = (char *)p;
346 	struct kvec *vec = &rqstp->rq_arg.head[0];
347 	return cp >= (char*)vec->iov_base
348 		&& cp <= (char*)vec->iov_base + vec->iov_len;
349 }
350 
351 static inline int
xdr_ressize_check(struct svc_rqst * rqstp,__be32 * p)352 xdr_ressize_check(struct svc_rqst *rqstp, __be32 *p)
353 {
354 	struct kvec *vec = &rqstp->rq_res.head[0];
355 	char *cp = (char*)p;
356 
357 	vec->iov_len = cp - (char*)vec->iov_base;
358 
359 	return vec->iov_len <= PAGE_SIZE;
360 }
361 
svc_free_res_pages(struct svc_rqst * rqstp)362 static inline void svc_free_res_pages(struct svc_rqst *rqstp)
363 {
364 	while (rqstp->rq_next_page != rqstp->rq_respages) {
365 		struct page **pp = --rqstp->rq_next_page;
366 		if (*pp) {
367 			put_page(*pp);
368 			*pp = NULL;
369 		}
370 	}
371 }
372 
373 struct svc_deferred_req {
374 	u32			prot;	/* protocol (UDP or TCP) */
375 	struct svc_xprt		*xprt;
376 	struct sockaddr_storage	addr;	/* where reply must go */
377 	size_t			addrlen;
378 	struct sockaddr_storage	daddr;	/* where reply must come from */
379 	size_t			daddrlen;
380 	struct cache_deferred_req handle;
381 	size_t			xprt_hlen;
382 	int			argslen;
383 	__be32			args[0];
384 };
385 
386 /*
387  * List of RPC programs on the same transport endpoint
388  */
389 struct svc_program {
390 	struct svc_program *	pg_next;	/* other programs (same xprt) */
391 	u32			pg_prog;	/* program number */
392 	unsigned int		pg_lovers;	/* lowest version */
393 	unsigned int		pg_hivers;	/* highest version */
394 	unsigned int		pg_nvers;	/* number of versions */
395 	const struct svc_version **pg_vers;	/* version array */
396 	char *			pg_name;	/* service name */
397 	char *			pg_class;	/* class name: services sharing authentication */
398 	struct svc_stat *	pg_stats;	/* rpc statistics */
399 	int			(*pg_authenticate)(struct svc_rqst *);
400 };
401 
402 /*
403  * RPC program version
404  */
405 struct svc_version {
406 	u32			vs_vers;	/* version number */
407 	u32			vs_nproc;	/* number of procedures */
408 	const struct svc_procedure *vs_proc;	/* per-procedure info */
409 	unsigned int		*vs_count;	/* call counts */
410 	u32			vs_xdrsize;	/* xdrsize needed for this version */
411 
412 	/* Don't register with rpcbind */
413 	bool			vs_hidden;
414 
415 	/* Don't care if the rpcbind registration fails */
416 	bool			vs_rpcb_optnl;
417 
418 	/* Need xprt with congestion control */
419 	bool			vs_need_cong_ctrl;
420 
421 	/* Override dispatch function (e.g. when caching replies).
422 	 * A return value of 0 means drop the request.
423 	 * vs_dispatch == NULL means use default dispatcher.
424 	 */
425 	int			(*vs_dispatch)(struct svc_rqst *, __be32 *);
426 };
427 
428 /*
429  * RPC procedure info
430  */
431 struct svc_procedure {
432 	/* process the request: */
433 	__be32			(*pc_func)(struct svc_rqst *);
434 	/* XDR decode args: */
435 	int			(*pc_decode)(struct svc_rqst *, __be32 *data);
436 	/* XDR encode result: */
437 	int			(*pc_encode)(struct svc_rqst *, __be32 *data);
438 	/* XDR free result: */
439 	void			(*pc_release)(struct svc_rqst *);
440 	unsigned int		pc_argsize;	/* argument struct size */
441 	unsigned int		pc_ressize;	/* result struct size */
442 	unsigned int		pc_cachetype;	/* cache info (NFS) */
443 	unsigned int		pc_xdrressize;	/* maximum size of XDR reply */
444 };
445 
446 /*
447  * Mode for mapping cpus to pools.
448  */
449 enum {
450 	SVC_POOL_AUTO = -1,	/* choose one of the others */
451 	SVC_POOL_GLOBAL,	/* no mapping, just a single global pool
452 				 * (legacy & UP mode) */
453 	SVC_POOL_PERCPU,	/* one pool per cpu */
454 	SVC_POOL_PERNODE	/* one pool per numa node */
455 };
456 
457 struct svc_pool_map {
458 	int count;			/* How many svc_servs use us */
459 	int mode;			/* Note: int not enum to avoid
460 					 * warnings about "enumeration value
461 					 * not handled in switch" */
462 	unsigned int npools;
463 	unsigned int *pool_to;		/* maps pool id to cpu or node */
464 	unsigned int *to_pool;		/* maps cpu or node to pool id */
465 };
466 
467 extern struct svc_pool_map svc_pool_map;
468 
469 /*
470  * Function prototypes.
471  */
472 int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
473 void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
474 int svc_bind(struct svc_serv *serv, struct net *net);
475 struct svc_serv *svc_create(struct svc_program *, unsigned int,
476 			    const struct svc_serv_ops *);
477 struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
478 					struct svc_pool *pool, int node);
479 struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
480 					struct svc_pool *pool, int node);
481 void		   svc_rqst_free(struct svc_rqst *);
482 void		   svc_exit_thread(struct svc_rqst *);
483 unsigned int	   svc_pool_map_get(void);
484 void		   svc_pool_map_put(void);
485 struct svc_serv *  svc_create_pooled(struct svc_program *, unsigned int,
486 			const struct svc_serv_ops *);
487 int		   svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
488 int		   svc_set_num_threads_sync(struct svc_serv *, struct svc_pool *, int);
489 int		   svc_pool_stats_open(struct svc_serv *serv, struct file *file);
490 void		   svc_destroy(struct svc_serv *);
491 void		   svc_shutdown_net(struct svc_serv *, struct net *);
492 int		   svc_process(struct svc_rqst *);
493 int		   bc_svc_process(struct svc_serv *, struct rpc_rqst *,
494 			struct svc_rqst *);
495 int		   svc_register(const struct svc_serv *, struct net *, const int,
496 				const unsigned short, const unsigned short);
497 
498 void		   svc_wake_up(struct svc_serv *);
499 void		   svc_reserve(struct svc_rqst *rqstp, int space);
500 struct svc_pool *  svc_pool_for_cpu(struct svc_serv *serv, int cpu);
501 char *		   svc_print_addr(struct svc_rqst *, char *, size_t);
502 unsigned int	   svc_fill_write_vector(struct svc_rqst *rqstp,
503 					 struct page **pages,
504 					 struct kvec *first, size_t total);
505 char		  *svc_fill_symlink_pathname(struct svc_rqst *rqstp,
506 					     struct kvec *first, void *p,
507 					     size_t total);
508 __be32		   svc_return_autherr(struct svc_rqst *rqstp, __be32 auth_err);
509 
510 #define	RPC_MAX_ADDRBUFLEN	(63U)
511 
512 /*
513  * When we want to reduce the size of the reserved space in the response
514  * buffer, we need to take into account the size of any checksum data that
515  * may be at the end of the packet. This is difficult to determine exactly
516  * for all cases without actually generating the checksum, so we just use a
517  * static value.
518  */
svc_reserve_auth(struct svc_rqst * rqstp,int space)519 static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space)
520 {
521 	svc_reserve(rqstp, space + rqstp->rq_auth_slack);
522 }
523 
524 #endif /* SUNRPC_SVC_H */
525