1 /*
2  *  Copyright (c) 2001 The Regents of the University of Michigan.
3  *  All rights reserved.
4  *
5  *  Kendrick Smith <kmsmith@umich.edu>
6  *  Andy Adamson <andros@umich.edu>
7  *
8  *  Redistribution and use in source and binary forms, with or without
9  *  modification, are permitted provided that the following conditions
10  *  are met:
11  *
12  *  1. Redistributions of source code must retain the above copyright
13  *     notice, this list of conditions and the following disclaimer.
14  *  2. Redistributions in binary form must reproduce the above copyright
15  *     notice, this list of conditions and the following disclaimer in the
16  *     documentation and/or other materials provided with the distribution.
17  *  3. Neither the name of the University nor the names of its
18  *     contributors may be used to endorse or promote products derived
19  *     from this software without specific prior written permission.
20  *
21  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <linux/sunrpc/clnt.h>
35 #include <linux/sunrpc/xprt.h>
36 #include <linux/sunrpc/svc_xprt.h>
37 #include <linux/slab.h>
38 #include "nfsd.h"
39 #include "state.h"
40 #include "netns.h"
41 #include "xdr4cb.h"
42 
43 #define NFSDDBG_FACILITY                NFSDDBG_PROC
44 
45 static void nfsd4_mark_cb_fault(struct nfs4_client *, int reason);
46 
47 #define NFSPROC4_CB_NULL 0
48 #define NFSPROC4_CB_COMPOUND 1
49 
50 /* Index of predefined Linux callback client operations */
51 
52 struct nfs4_cb_compound_hdr {
53 	/* args */
54 	u32		ident;	/* minorversion 0 only */
55 	u32		nops;
56 	__be32		*nops_p;
57 	u32		minorversion;
58 	/* res */
59 	int		status;
60 };
61 
62 /*
63  * Handle decode buffer overflows out-of-line.
64  */
print_overflow_msg(const char * func,const struct xdr_stream * xdr)65 static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
66 {
67 	dprintk("NFS: %s prematurely hit the end of our receive buffer. "
68 		"Remaining buffer length is %tu words.\n",
69 		func, xdr->end - xdr->p);
70 }
71 
xdr_encode_empty_array(__be32 * p)72 static __be32 *xdr_encode_empty_array(__be32 *p)
73 {
74 	*p++ = xdr_zero;
75 	return p;
76 }
77 
78 /*
79  * Encode/decode NFSv4 CB basic data types
80  *
81  * Basic NFSv4 callback data types are defined in section 15 of RFC
82  * 3530: "Network File System (NFS) version 4 Protocol" and section
83  * 20 of RFC 5661: "Network File System (NFS) Version 4 Minor Version
84  * 1 Protocol"
85  */
86 
87 /*
88  *	nfs_cb_opnum4
89  *
90  *	enum nfs_cb_opnum4 {
91  *		OP_CB_GETATTR		= 3,
92  *		  ...
93  *	};
94  */
95 enum nfs_cb_opnum4 {
96 	OP_CB_GETATTR			= 3,
97 	OP_CB_RECALL			= 4,
98 	OP_CB_LAYOUTRECALL		= 5,
99 	OP_CB_NOTIFY			= 6,
100 	OP_CB_PUSH_DELEG		= 7,
101 	OP_CB_RECALL_ANY		= 8,
102 	OP_CB_RECALLABLE_OBJ_AVAIL	= 9,
103 	OP_CB_RECALL_SLOT		= 10,
104 	OP_CB_SEQUENCE			= 11,
105 	OP_CB_WANTS_CANCELLED		= 12,
106 	OP_CB_NOTIFY_LOCK		= 13,
107 	OP_CB_NOTIFY_DEVICEID		= 14,
108 	OP_CB_ILLEGAL			= 10044
109 };
110 
encode_nfs_cb_opnum4(struct xdr_stream * xdr,enum nfs_cb_opnum4 op)111 static void encode_nfs_cb_opnum4(struct xdr_stream *xdr, enum nfs_cb_opnum4 op)
112 {
113 	__be32 *p;
114 
115 	p = xdr_reserve_space(xdr, 4);
116 	*p = cpu_to_be32(op);
117 }
118 
119 /*
120  * nfs_fh4
121  *
122  *	typedef opaque nfs_fh4<NFS4_FHSIZE>;
123  */
encode_nfs_fh4(struct xdr_stream * xdr,const struct knfsd_fh * fh)124 static void encode_nfs_fh4(struct xdr_stream *xdr, const struct knfsd_fh *fh)
125 {
126 	u32 length = fh->fh_size;
127 	__be32 *p;
128 
129 	BUG_ON(length > NFS4_FHSIZE);
130 	p = xdr_reserve_space(xdr, 4 + length);
131 	xdr_encode_opaque(p, &fh->fh_base, length);
132 }
133 
134 /*
135  * stateid4
136  *
137  *	struct stateid4 {
138  *		uint32_t	seqid;
139  *		opaque		other[12];
140  *	};
141  */
encode_stateid4(struct xdr_stream * xdr,const stateid_t * sid)142 static void encode_stateid4(struct xdr_stream *xdr, const stateid_t *sid)
143 {
144 	__be32 *p;
145 
146 	p = xdr_reserve_space(xdr, NFS4_STATEID_SIZE);
147 	*p++ = cpu_to_be32(sid->si_generation);
148 	xdr_encode_opaque_fixed(p, &sid->si_opaque, NFS4_STATEID_OTHER_SIZE);
149 }
150 
151 /*
152  * sessionid4
153  *
154  *	typedef opaque sessionid4[NFS4_SESSIONID_SIZE];
155  */
encode_sessionid4(struct xdr_stream * xdr,const struct nfsd4_session * session)156 static void encode_sessionid4(struct xdr_stream *xdr,
157 			      const struct nfsd4_session *session)
158 {
159 	__be32 *p;
160 
161 	p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN);
162 	xdr_encode_opaque_fixed(p, session->se_sessionid.data,
163 					NFS4_MAX_SESSIONID_LEN);
164 }
165 
166 /*
167  * nfsstat4
168  */
169 static const struct {
170 	int stat;
171 	int errno;
172 } nfs_cb_errtbl[] = {
173 	{ NFS4_OK,		0		},
174 	{ NFS4ERR_PERM,		-EPERM		},
175 	{ NFS4ERR_NOENT,	-ENOENT		},
176 	{ NFS4ERR_IO,		-EIO		},
177 	{ NFS4ERR_NXIO,		-ENXIO		},
178 	{ NFS4ERR_ACCESS,	-EACCES		},
179 	{ NFS4ERR_EXIST,	-EEXIST		},
180 	{ NFS4ERR_XDEV,		-EXDEV		},
181 	{ NFS4ERR_NOTDIR,	-ENOTDIR	},
182 	{ NFS4ERR_ISDIR,	-EISDIR		},
183 	{ NFS4ERR_INVAL,	-EINVAL		},
184 	{ NFS4ERR_FBIG,		-EFBIG		},
185 	{ NFS4ERR_NOSPC,	-ENOSPC		},
186 	{ NFS4ERR_ROFS,		-EROFS		},
187 	{ NFS4ERR_MLINK,	-EMLINK		},
188 	{ NFS4ERR_NAMETOOLONG,	-ENAMETOOLONG	},
189 	{ NFS4ERR_NOTEMPTY,	-ENOTEMPTY	},
190 	{ NFS4ERR_DQUOT,	-EDQUOT		},
191 	{ NFS4ERR_STALE,	-ESTALE		},
192 	{ NFS4ERR_BADHANDLE,	-EBADHANDLE	},
193 	{ NFS4ERR_BAD_COOKIE,	-EBADCOOKIE	},
194 	{ NFS4ERR_NOTSUPP,	-ENOTSUPP	},
195 	{ NFS4ERR_TOOSMALL,	-ETOOSMALL	},
196 	{ NFS4ERR_SERVERFAULT,	-ESERVERFAULT	},
197 	{ NFS4ERR_BADTYPE,	-EBADTYPE	},
198 	{ NFS4ERR_LOCKED,	-EAGAIN		},
199 	{ NFS4ERR_RESOURCE,	-EREMOTEIO	},
200 	{ NFS4ERR_SYMLINK,	-ELOOP		},
201 	{ NFS4ERR_OP_ILLEGAL,	-EOPNOTSUPP	},
202 	{ NFS4ERR_DEADLOCK,	-EDEADLK	},
203 	{ -1,			-EIO		}
204 };
205 
206 /*
207  * If we cannot translate the error, the recovery routines should
208  * handle it.
209  *
210  * Note: remaining NFSv4 error codes have values > 10000, so should
211  * not conflict with native Linux error codes.
212  */
nfs_cb_stat_to_errno(int status)213 static int nfs_cb_stat_to_errno(int status)
214 {
215 	int i;
216 
217 	for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
218 		if (nfs_cb_errtbl[i].stat == status)
219 			return nfs_cb_errtbl[i].errno;
220 	}
221 
222 	dprintk("NFSD: Unrecognized NFS CB status value: %u\n", status);
223 	return -status;
224 }
225 
decode_cb_op_status(struct xdr_stream * xdr,enum nfs_cb_opnum4 expected,int * status)226 static int decode_cb_op_status(struct xdr_stream *xdr,
227 			       enum nfs_cb_opnum4 expected, int *status)
228 {
229 	__be32 *p;
230 	u32 op;
231 
232 	p = xdr_inline_decode(xdr, 4 + 4);
233 	if (unlikely(p == NULL))
234 		goto out_overflow;
235 	op = be32_to_cpup(p++);
236 	if (unlikely(op != expected))
237 		goto out_unexpected;
238 	*status = nfs_cb_stat_to_errno(be32_to_cpup(p));
239 	return 0;
240 out_overflow:
241 	print_overflow_msg(__func__, xdr);
242 	return -EIO;
243 out_unexpected:
244 	dprintk("NFSD: Callback server returned operation %d but "
245 		"we issued a request for %d\n", op, expected);
246 	return -EIO;
247 }
248 
249 /*
250  * CB_COMPOUND4args
251  *
252  *	struct CB_COMPOUND4args {
253  *		utf8str_cs	tag;
254  *		uint32_t	minorversion;
255  *		uint32_t	callback_ident;
256  *		nfs_cb_argop4	argarray<>;
257  *	};
258 */
encode_cb_compound4args(struct xdr_stream * xdr,struct nfs4_cb_compound_hdr * hdr)259 static void encode_cb_compound4args(struct xdr_stream *xdr,
260 				    struct nfs4_cb_compound_hdr *hdr)
261 {
262 	__be32 * p;
263 
264 	p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4);
265 	p = xdr_encode_empty_array(p);		/* empty tag */
266 	*p++ = cpu_to_be32(hdr->minorversion);
267 	*p++ = cpu_to_be32(hdr->ident);
268 
269 	hdr->nops_p = p;
270 	*p = cpu_to_be32(hdr->nops);		/* argarray element count */
271 }
272 
273 /*
274  * Update argarray element count
275  */
encode_cb_nops(struct nfs4_cb_compound_hdr * hdr)276 static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
277 {
278 	BUG_ON(hdr->nops > NFS4_MAX_BACK_CHANNEL_OPS);
279 	*hdr->nops_p = cpu_to_be32(hdr->nops);
280 }
281 
282 /*
283  * CB_COMPOUND4res
284  *
285  *	struct CB_COMPOUND4res {
286  *		nfsstat4	status;
287  *		utf8str_cs	tag;
288  *		nfs_cb_resop4	resarray<>;
289  *	};
290  */
decode_cb_compound4res(struct xdr_stream * xdr,struct nfs4_cb_compound_hdr * hdr)291 static int decode_cb_compound4res(struct xdr_stream *xdr,
292 				  struct nfs4_cb_compound_hdr *hdr)
293 {
294 	u32 length;
295 	__be32 *p;
296 
297 	p = xdr_inline_decode(xdr, 4 + 4);
298 	if (unlikely(p == NULL))
299 		goto out_overflow;
300 	hdr->status = be32_to_cpup(p++);
301 	/* Ignore the tag */
302 	length = be32_to_cpup(p++);
303 	p = xdr_inline_decode(xdr, length + 4);
304 	if (unlikely(p == NULL))
305 		goto out_overflow;
306 	p += XDR_QUADLEN(length);
307 	hdr->nops = be32_to_cpup(p);
308 	return 0;
309 out_overflow:
310 	print_overflow_msg(__func__, xdr);
311 	return -EIO;
312 }
313 
314 /*
315  * CB_RECALL4args
316  *
317  *	struct CB_RECALL4args {
318  *		stateid4	stateid;
319  *		bool		truncate;
320  *		nfs_fh4		fh;
321  *	};
322  */
encode_cb_recall4args(struct xdr_stream * xdr,const struct nfs4_delegation * dp,struct nfs4_cb_compound_hdr * hdr)323 static void encode_cb_recall4args(struct xdr_stream *xdr,
324 				  const struct nfs4_delegation *dp,
325 				  struct nfs4_cb_compound_hdr *hdr)
326 {
327 	__be32 *p;
328 
329 	encode_nfs_cb_opnum4(xdr, OP_CB_RECALL);
330 	encode_stateid4(xdr, &dp->dl_stid.sc_stateid);
331 
332 	p = xdr_reserve_space(xdr, 4);
333 	*p++ = xdr_zero;			/* truncate */
334 
335 	encode_nfs_fh4(xdr, &dp->dl_stid.sc_file->fi_fhandle);
336 
337 	hdr->nops++;
338 }
339 
340 /*
341  * CB_SEQUENCE4args
342  *
343  *	struct CB_SEQUENCE4args {
344  *		sessionid4		csa_sessionid;
345  *		sequenceid4		csa_sequenceid;
346  *		slotid4			csa_slotid;
347  *		slotid4			csa_highest_slotid;
348  *		bool			csa_cachethis;
349  *		referring_call_list4	csa_referring_call_lists<>;
350  *	};
351  */
encode_cb_sequence4args(struct xdr_stream * xdr,const struct nfsd4_callback * cb,struct nfs4_cb_compound_hdr * hdr)352 static void encode_cb_sequence4args(struct xdr_stream *xdr,
353 				    const struct nfsd4_callback *cb,
354 				    struct nfs4_cb_compound_hdr *hdr)
355 {
356 	struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
357 	__be32 *p;
358 
359 	if (hdr->minorversion == 0)
360 		return;
361 
362 	encode_nfs_cb_opnum4(xdr, OP_CB_SEQUENCE);
363 	encode_sessionid4(xdr, session);
364 
365 	p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4 + 4);
366 	*p++ = cpu_to_be32(session->se_cb_seq_nr);	/* csa_sequenceid */
367 	*p++ = xdr_zero;			/* csa_slotid */
368 	*p++ = xdr_zero;			/* csa_highest_slotid */
369 	*p++ = xdr_zero;			/* csa_cachethis */
370 	xdr_encode_empty_array(p);		/* csa_referring_call_lists */
371 
372 	hdr->nops++;
373 }
374 
375 /*
376  * CB_SEQUENCE4resok
377  *
378  *	struct CB_SEQUENCE4resok {
379  *		sessionid4	csr_sessionid;
380  *		sequenceid4	csr_sequenceid;
381  *		slotid4		csr_slotid;
382  *		slotid4		csr_highest_slotid;
383  *		slotid4		csr_target_highest_slotid;
384  *	};
385  *
386  *	union CB_SEQUENCE4res switch (nfsstat4 csr_status) {
387  *	case NFS4_OK:
388  *		CB_SEQUENCE4resok	csr_resok4;
389  *	default:
390  *		void;
391  *	};
392  *
393  * Our current back channel implmentation supports a single backchannel
394  * with a single slot.
395  */
decode_cb_sequence4resok(struct xdr_stream * xdr,struct nfsd4_callback * cb)396 static int decode_cb_sequence4resok(struct xdr_stream *xdr,
397 				    struct nfsd4_callback *cb)
398 {
399 	struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
400 	int status = -ESERVERFAULT;
401 	__be32 *p;
402 	u32 dummy;
403 
404 	/*
405 	 * If the server returns different values for sessionID, slotID or
406 	 * sequence number, the server is looney tunes.
407 	 */
408 	p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4 + 4 + 4);
409 	if (unlikely(p == NULL))
410 		goto out_overflow;
411 
412 	if (memcmp(p, session->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) {
413 		dprintk("NFS: %s Invalid session id\n", __func__);
414 		goto out;
415 	}
416 	p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
417 
418 	dummy = be32_to_cpup(p++);
419 	if (dummy != session->se_cb_seq_nr) {
420 		dprintk("NFS: %s Invalid sequence number\n", __func__);
421 		goto out;
422 	}
423 
424 	dummy = be32_to_cpup(p++);
425 	if (dummy != 0) {
426 		dprintk("NFS: %s Invalid slotid\n", __func__);
427 		goto out;
428 	}
429 
430 	/*
431 	 * FIXME: process highest slotid and target highest slotid
432 	 */
433 	status = 0;
434 out:
435 	cb->cb_seq_status = status;
436 	return status;
437 out_overflow:
438 	print_overflow_msg(__func__, xdr);
439 	status = -EIO;
440 	goto out;
441 }
442 
decode_cb_sequence4res(struct xdr_stream * xdr,struct nfsd4_callback * cb)443 static int decode_cb_sequence4res(struct xdr_stream *xdr,
444 				  struct nfsd4_callback *cb)
445 {
446 	int status;
447 
448 	if (cb->cb_clp->cl_minorversion == 0)
449 		return 0;
450 
451 	status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_seq_status);
452 	if (unlikely(status || cb->cb_seq_status))
453 		return status;
454 
455 	return decode_cb_sequence4resok(xdr, cb);
456 }
457 
458 /*
459  * NFSv4.0 and NFSv4.1 XDR encode functions
460  *
461  * NFSv4.0 callback argument types are defined in section 15 of RFC
462  * 3530: "Network File System (NFS) version 4 Protocol" and section 20
463  * of RFC 5661:  "Network File System (NFS) Version 4 Minor Version 1
464  * Protocol".
465  */
466 
467 /*
468  * NB: Without this zero space reservation, callbacks over krb5p fail
469  */
nfs4_xdr_enc_cb_null(struct rpc_rqst * req,struct xdr_stream * xdr,const void * __unused)470 static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
471 				 const void *__unused)
472 {
473 	xdr_reserve_space(xdr, 0);
474 }
475 
476 /*
477  * 20.2. Operation 4: CB_RECALL - Recall a Delegation
478  */
nfs4_xdr_enc_cb_recall(struct rpc_rqst * req,struct xdr_stream * xdr,const void * data)479 static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
480 				   const void *data)
481 {
482 	const struct nfsd4_callback *cb = data;
483 	const struct nfs4_delegation *dp = cb_to_delegation(cb);
484 	struct nfs4_cb_compound_hdr hdr = {
485 		.ident = cb->cb_clp->cl_cb_ident,
486 		.minorversion = cb->cb_clp->cl_minorversion,
487 	};
488 
489 	encode_cb_compound4args(xdr, &hdr);
490 	encode_cb_sequence4args(xdr, cb, &hdr);
491 	encode_cb_recall4args(xdr, dp, &hdr);
492 	encode_cb_nops(&hdr);
493 }
494 
495 
496 /*
497  * NFSv4.0 and NFSv4.1 XDR decode functions
498  *
499  * NFSv4.0 callback result types are defined in section 15 of RFC
500  * 3530: "Network File System (NFS) version 4 Protocol" and section 20
501  * of RFC 5661:  "Network File System (NFS) Version 4 Minor Version 1
502  * Protocol".
503  */
504 
nfs4_xdr_dec_cb_null(struct rpc_rqst * req,struct xdr_stream * xdr,void * __unused)505 static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
506 				void *__unused)
507 {
508 	return 0;
509 }
510 
511 /*
512  * 20.2. Operation 4: CB_RECALL - Recall a Delegation
513  */
nfs4_xdr_dec_cb_recall(struct rpc_rqst * rqstp,struct xdr_stream * xdr,void * data)514 static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
515 				  struct xdr_stream *xdr,
516 				  void *data)
517 {
518 	struct nfsd4_callback *cb = data;
519 	struct nfs4_cb_compound_hdr hdr;
520 	int status;
521 
522 	status = decode_cb_compound4res(xdr, &hdr);
523 	if (unlikely(status))
524 		return status;
525 
526 	if (cb != NULL) {
527 		status = decode_cb_sequence4res(xdr, cb);
528 		if (unlikely(status || cb->cb_seq_status))
529 			return status;
530 	}
531 
532 	return decode_cb_op_status(xdr, OP_CB_RECALL, &cb->cb_status);
533 }
534 
535 #ifdef CONFIG_NFSD_PNFS
536 /*
537  * CB_LAYOUTRECALL4args
538  *
539  *	struct layoutrecall_file4 {
540  *		nfs_fh4         lor_fh;
541  *		offset4         lor_offset;
542  *		length4         lor_length;
543  *		stateid4        lor_stateid;
544  *	};
545  *
546  *	union layoutrecall4 switch(layoutrecall_type4 lor_recalltype) {
547  *	case LAYOUTRECALL4_FILE:
548  *		layoutrecall_file4 lor_layout;
549  *	case LAYOUTRECALL4_FSID:
550  *		fsid4              lor_fsid;
551  *	case LAYOUTRECALL4_ALL:
552  *		void;
553  *	};
554  *
555  *	struct CB_LAYOUTRECALL4args {
556  *		layouttype4             clora_type;
557  *		layoutiomode4           clora_iomode;
558  *		bool                    clora_changed;
559  *		layoutrecall4           clora_recall;
560  *	};
561  */
encode_cb_layout4args(struct xdr_stream * xdr,const struct nfs4_layout_stateid * ls,struct nfs4_cb_compound_hdr * hdr)562 static void encode_cb_layout4args(struct xdr_stream *xdr,
563 				  const struct nfs4_layout_stateid *ls,
564 				  struct nfs4_cb_compound_hdr *hdr)
565 {
566 	__be32 *p;
567 
568 	BUG_ON(hdr->minorversion == 0);
569 
570 	p = xdr_reserve_space(xdr, 5 * 4);
571 	*p++ = cpu_to_be32(OP_CB_LAYOUTRECALL);
572 	*p++ = cpu_to_be32(ls->ls_layout_type);
573 	*p++ = cpu_to_be32(IOMODE_ANY);
574 	*p++ = cpu_to_be32(1);
575 	*p = cpu_to_be32(RETURN_FILE);
576 
577 	encode_nfs_fh4(xdr, &ls->ls_stid.sc_file->fi_fhandle);
578 
579 	p = xdr_reserve_space(xdr, 2 * 8);
580 	p = xdr_encode_hyper(p, 0);
581 	xdr_encode_hyper(p, NFS4_MAX_UINT64);
582 
583 	encode_stateid4(xdr, &ls->ls_recall_sid);
584 
585 	hdr->nops++;
586 }
587 
nfs4_xdr_enc_cb_layout(struct rpc_rqst * req,struct xdr_stream * xdr,const void * data)588 static void nfs4_xdr_enc_cb_layout(struct rpc_rqst *req,
589 				   struct xdr_stream *xdr,
590 				   const void *data)
591 {
592 	const struct nfsd4_callback *cb = data;
593 	const struct nfs4_layout_stateid *ls =
594 		container_of(cb, struct nfs4_layout_stateid, ls_recall);
595 	struct nfs4_cb_compound_hdr hdr = {
596 		.ident = 0,
597 		.minorversion = cb->cb_clp->cl_minorversion,
598 	};
599 
600 	encode_cb_compound4args(xdr, &hdr);
601 	encode_cb_sequence4args(xdr, cb, &hdr);
602 	encode_cb_layout4args(xdr, ls, &hdr);
603 	encode_cb_nops(&hdr);
604 }
605 
nfs4_xdr_dec_cb_layout(struct rpc_rqst * rqstp,struct xdr_stream * xdr,void * data)606 static int nfs4_xdr_dec_cb_layout(struct rpc_rqst *rqstp,
607 				  struct xdr_stream *xdr,
608 				  void *data)
609 {
610 	struct nfsd4_callback *cb = data;
611 	struct nfs4_cb_compound_hdr hdr;
612 	int status;
613 
614 	status = decode_cb_compound4res(xdr, &hdr);
615 	if (unlikely(status))
616 		return status;
617 
618 	if (cb) {
619 		status = decode_cb_sequence4res(xdr, cb);
620 		if (unlikely(status || cb->cb_seq_status))
621 			return status;
622 	}
623 	return decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &cb->cb_status);
624 }
625 #endif /* CONFIG_NFSD_PNFS */
626 
encode_stateowner(struct xdr_stream * xdr,struct nfs4_stateowner * so)627 static void encode_stateowner(struct xdr_stream *xdr, struct nfs4_stateowner *so)
628 {
629 	__be32	*p;
630 
631 	p = xdr_reserve_space(xdr, 8 + 4 + so->so_owner.len);
632 	p = xdr_encode_opaque_fixed(p, &so->so_client->cl_clientid, 8);
633 	xdr_encode_opaque(p, so->so_owner.data, so->so_owner.len);
634 }
635 
nfs4_xdr_enc_cb_notify_lock(struct rpc_rqst * req,struct xdr_stream * xdr,const void * data)636 static void nfs4_xdr_enc_cb_notify_lock(struct rpc_rqst *req,
637 					struct xdr_stream *xdr,
638 					const void *data)
639 {
640 	const struct nfsd4_callback *cb = data;
641 	const struct nfsd4_blocked_lock *nbl =
642 		container_of(cb, struct nfsd4_blocked_lock, nbl_cb);
643 	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)nbl->nbl_lock.fl_owner;
644 	struct nfs4_cb_compound_hdr hdr = {
645 		.ident = 0,
646 		.minorversion = cb->cb_clp->cl_minorversion,
647 	};
648 
649 	__be32 *p;
650 
651 	BUG_ON(hdr.minorversion == 0);
652 
653 	encode_cb_compound4args(xdr, &hdr);
654 	encode_cb_sequence4args(xdr, cb, &hdr);
655 
656 	p = xdr_reserve_space(xdr, 4);
657 	*p = cpu_to_be32(OP_CB_NOTIFY_LOCK);
658 	encode_nfs_fh4(xdr, &nbl->nbl_fh);
659 	encode_stateowner(xdr, &lo->lo_owner);
660 	hdr.nops++;
661 
662 	encode_cb_nops(&hdr);
663 }
664 
nfs4_xdr_dec_cb_notify_lock(struct rpc_rqst * rqstp,struct xdr_stream * xdr,void * data)665 static int nfs4_xdr_dec_cb_notify_lock(struct rpc_rqst *rqstp,
666 					struct xdr_stream *xdr,
667 					void *data)
668 {
669 	struct nfsd4_callback *cb = data;
670 	struct nfs4_cb_compound_hdr hdr;
671 	int status;
672 
673 	status = decode_cb_compound4res(xdr, &hdr);
674 	if (unlikely(status))
675 		return status;
676 
677 	if (cb) {
678 		status = decode_cb_sequence4res(xdr, cb);
679 		if (unlikely(status || cb->cb_seq_status))
680 			return status;
681 	}
682 	return decode_cb_op_status(xdr, OP_CB_NOTIFY_LOCK, &cb->cb_status);
683 }
684 
685 /*
686  * RPC procedure tables
687  */
688 #define PROC(proc, call, argtype, restype)				\
689 [NFSPROC4_CLNT_##proc] = {						\
690 	.p_proc    = NFSPROC4_CB_##call,				\
691 	.p_encode  = nfs4_xdr_enc_##argtype,		\
692 	.p_decode  = nfs4_xdr_dec_##restype,				\
693 	.p_arglen  = NFS4_enc_##argtype##_sz,				\
694 	.p_replen  = NFS4_dec_##restype##_sz,				\
695 	.p_statidx = NFSPROC4_CB_##call,				\
696 	.p_name    = #proc,						\
697 }
698 
699 static const struct rpc_procinfo nfs4_cb_procedures[] = {
700 	PROC(CB_NULL,	NULL,		cb_null,	cb_null),
701 	PROC(CB_RECALL,	COMPOUND,	cb_recall,	cb_recall),
702 #ifdef CONFIG_NFSD_PNFS
703 	PROC(CB_LAYOUT,	COMPOUND,	cb_layout,	cb_layout),
704 #endif
705 	PROC(CB_NOTIFY_LOCK,	COMPOUND,	cb_notify_lock,	cb_notify_lock),
706 };
707 
708 static unsigned int nfs4_cb_counts[ARRAY_SIZE(nfs4_cb_procedures)];
709 static const struct rpc_version nfs_cb_version4 = {
710 /*
711  * Note on the callback rpc program version number: despite language in rfc
712  * 5661 section 18.36.3 requiring servers to use 4 in this field, the
713  * official xdr descriptions for both 4.0 and 4.1 specify version 1, and
714  * in practice that appears to be what implementations use.  The section
715  * 18.36.3 language is expected to be fixed in an erratum.
716  */
717 	.number			= 1,
718 	.nrprocs		= ARRAY_SIZE(nfs4_cb_procedures),
719 	.procs			= nfs4_cb_procedures,
720 	.counts			= nfs4_cb_counts,
721 };
722 
723 static const struct rpc_version *nfs_cb_version[2] = {
724 	[1] = &nfs_cb_version4,
725 };
726 
727 static const struct rpc_program cb_program;
728 
729 static struct rpc_stat cb_stats = {
730 	.program		= &cb_program
731 };
732 
733 #define NFS4_CALLBACK 0x40000000
734 static const struct rpc_program cb_program = {
735 	.name			= "nfs4_cb",
736 	.number			= NFS4_CALLBACK,
737 	.nrvers			= ARRAY_SIZE(nfs_cb_version),
738 	.version		= nfs_cb_version,
739 	.stats			= &cb_stats,
740 	.pipe_dir_name		= "nfsd4_cb",
741 };
742 
max_cb_time(struct net * net)743 static int max_cb_time(struct net *net)
744 {
745 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
746 	return max(nn->nfsd4_lease/10, (time_t)1) * HZ;
747 }
748 
get_backchannel_cred(struct nfs4_client * clp,struct rpc_clnt * client,struct nfsd4_session * ses)749 static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses)
750 {
751 	if (clp->cl_minorversion == 0) {
752 		char *principal = clp->cl_cred.cr_targ_princ ?
753 					clp->cl_cred.cr_targ_princ : "nfs";
754 		struct rpc_cred *cred;
755 
756 		cred = rpc_lookup_machine_cred(principal);
757 		if (!IS_ERR(cred))
758 			get_rpccred(cred);
759 		return cred;
760 	} else {
761 		struct rpc_auth *auth = client->cl_auth;
762 		struct auth_cred acred = {};
763 
764 		acred.uid = ses->se_cb_sec.uid;
765 		acred.gid = ses->se_cb_sec.gid;
766 		return auth->au_ops->lookup_cred(client->cl_auth, &acred, 0);
767 	}
768 }
769 
setup_callback_client(struct nfs4_client * clp,struct nfs4_cb_conn * conn,struct nfsd4_session * ses)770 static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
771 {
772 	int maxtime = max_cb_time(clp->net);
773 	struct rpc_timeout	timeparms = {
774 		.to_initval	= maxtime,
775 		.to_retries	= 0,
776 		.to_maxval	= maxtime,
777 	};
778 	struct rpc_create_args args = {
779 		.net		= clp->net,
780 		.address	= (struct sockaddr *) &conn->cb_addr,
781 		.addrsize	= conn->cb_addrlen,
782 		.saddress	= (struct sockaddr *) &conn->cb_saddr,
783 		.timeout	= &timeparms,
784 		.program	= &cb_program,
785 		.version	= 1,
786 		.flags		= (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
787 	};
788 	struct rpc_clnt *client;
789 	struct rpc_cred *cred;
790 
791 	if (clp->cl_minorversion == 0) {
792 		if (!clp->cl_cred.cr_principal &&
793 				(clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5))
794 			return -EINVAL;
795 		args.client_name = clp->cl_cred.cr_principal;
796 		args.prognumber	= conn->cb_prog;
797 		args.protocol = XPRT_TRANSPORT_TCP;
798 		args.authflavor = clp->cl_cred.cr_flavor;
799 		clp->cl_cb_ident = conn->cb_ident;
800 	} else {
801 		if (!conn->cb_xprt)
802 			return -EINVAL;
803 		clp->cl_cb_session = ses;
804 		args.bc_xprt = conn->cb_xprt;
805 		args.prognumber = clp->cl_cb_session->se_cb_prog;
806 		args.protocol = conn->cb_xprt->xpt_class->xcl_ident |
807 				XPRT_TRANSPORT_BC;
808 		args.authflavor = ses->se_cb_sec.flavor;
809 	}
810 	/* Create RPC client */
811 	client = rpc_create(&args);
812 	if (IS_ERR(client)) {
813 		dprintk("NFSD: couldn't create callback client: %ld\n",
814 			PTR_ERR(client));
815 		return PTR_ERR(client);
816 	}
817 	cred = get_backchannel_cred(clp, client, ses);
818 	if (IS_ERR(cred)) {
819 		rpc_shutdown_client(client);
820 		return PTR_ERR(cred);
821 	}
822 
823 	if (clp->cl_minorversion != 0)
824 		clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
825 	clp->cl_cb_client = client;
826 	clp->cl_cb_cred = cred;
827 	return 0;
828 }
829 
warn_no_callback_path(struct nfs4_client * clp,int reason)830 static void warn_no_callback_path(struct nfs4_client *clp, int reason)
831 {
832 	dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
833 		(int)clp->cl_name.len, clp->cl_name.data, reason);
834 }
835 
nfsd4_mark_cb_down(struct nfs4_client * clp,int reason)836 static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason)
837 {
838 	if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
839 		return;
840 	clp->cl_cb_state = NFSD4_CB_DOWN;
841 	warn_no_callback_path(clp, reason);
842 }
843 
nfsd4_mark_cb_fault(struct nfs4_client * clp,int reason)844 static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason)
845 {
846 	if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
847 		return;
848 	clp->cl_cb_state = NFSD4_CB_FAULT;
849 	warn_no_callback_path(clp, reason);
850 }
851 
nfsd4_cb_probe_done(struct rpc_task * task,void * calldata)852 static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
853 {
854 	struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
855 
856 	if (task->tk_status)
857 		nfsd4_mark_cb_down(clp, task->tk_status);
858 	else
859 		clp->cl_cb_state = NFSD4_CB_UP;
860 }
861 
862 static const struct rpc_call_ops nfsd4_cb_probe_ops = {
863 	/* XXX: release method to ensure we set the cb channel down if
864 	 * necessary on early failure? */
865 	.rpc_call_done = nfsd4_cb_probe_done,
866 };
867 
868 static struct workqueue_struct *callback_wq;
869 
870 /*
871  * Poke the callback thread to process any updates to the callback
872  * parameters, and send a null probe.
873  */
nfsd4_probe_callback(struct nfs4_client * clp)874 void nfsd4_probe_callback(struct nfs4_client *clp)
875 {
876 	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
877 	set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
878 	nfsd4_run_cb(&clp->cl_cb_null);
879 }
880 
nfsd4_probe_callback_sync(struct nfs4_client * clp)881 void nfsd4_probe_callback_sync(struct nfs4_client *clp)
882 {
883 	nfsd4_probe_callback(clp);
884 	flush_workqueue(callback_wq);
885 }
886 
nfsd4_change_callback(struct nfs4_client * clp,struct nfs4_cb_conn * conn)887 void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
888 {
889 	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
890 	spin_lock(&clp->cl_lock);
891 	memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
892 	spin_unlock(&clp->cl_lock);
893 }
894 
895 /*
896  * There's currently a single callback channel slot.
897  * If the slot is available, then mark it busy.  Otherwise, set the
898  * thread for sleeping on the callback RPC wait queue.
899  */
nfsd41_cb_get_slot(struct nfs4_client * clp,struct rpc_task * task)900 static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task)
901 {
902 	if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
903 		rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
904 		/* Race breaker */
905 		if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
906 			dprintk("%s slot is busy\n", __func__);
907 			return false;
908 		}
909 		rpc_wake_up_queued_task(&clp->cl_cb_waitq, task);
910 	}
911 	return true;
912 }
913 
914 /*
915  * TODO: cb_sequence should support referring call lists, cachethis, multiple
916  * slots, and mark callback channel down on communication errors.
917  */
nfsd4_cb_prepare(struct rpc_task * task,void * calldata)918 static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
919 {
920 	struct nfsd4_callback *cb = calldata;
921 	struct nfs4_client *clp = cb->cb_clp;
922 	u32 minorversion = clp->cl_minorversion;
923 
924 	/*
925 	 * cb_seq_status is only set in decode_cb_sequence4res,
926 	 * and so will remain 1 if an rpc level failure occurs.
927 	 */
928 	cb->cb_seq_status = 1;
929 	cb->cb_status = 0;
930 	if (minorversion) {
931 		if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task))
932 			return;
933 		cb->cb_holds_slot = true;
934 	}
935 	rpc_call_start(task);
936 }
937 
nfsd4_cb_sequence_done(struct rpc_task * task,struct nfsd4_callback * cb)938 static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback *cb)
939 {
940 	struct nfs4_client *clp = cb->cb_clp;
941 	struct nfsd4_session *session = clp->cl_cb_session;
942 	bool ret = true;
943 
944 	if (!clp->cl_minorversion) {
945 		/*
946 		 * If the backchannel connection was shut down while this
947 		 * task was queued, we need to resubmit it after setting up
948 		 * a new backchannel connection.
949 		 *
950 		 * Note that if we lost our callback connection permanently
951 		 * the submission code will error out, so we don't need to
952 		 * handle that case here.
953 		 */
954 		if (task->tk_flags & RPC_TASK_KILLED)
955 			goto need_restart;
956 
957 		return true;
958 	}
959 
960 	if (!cb->cb_holds_slot)
961 		goto need_restart;
962 
963 	switch (cb->cb_seq_status) {
964 	case 0:
965 		/*
966 		 * No need for lock, access serialized in nfsd4_cb_prepare
967 		 *
968 		 * RFC5661 20.9.3
969 		 * If CB_SEQUENCE returns an error, then the state of the slot
970 		 * (sequence ID, cached reply) MUST NOT change.
971 		 */
972 		++session->se_cb_seq_nr;
973 		break;
974 	case -ESERVERFAULT:
975 		++session->se_cb_seq_nr;
976 		/* Fall through */
977 	case 1:
978 	case -NFS4ERR_BADSESSION:
979 		nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status);
980 		ret = false;
981 		break;
982 	case -NFS4ERR_DELAY:
983 		if (!rpc_restart_call(task))
984 			goto out;
985 
986 		rpc_delay(task, 2 * HZ);
987 		return false;
988 	case -NFS4ERR_BADSLOT:
989 		goto retry_nowait;
990 	case -NFS4ERR_SEQ_MISORDERED:
991 		if (session->se_cb_seq_nr != 1) {
992 			session->se_cb_seq_nr = 1;
993 			goto retry_nowait;
994 		}
995 		break;
996 	default:
997 		dprintk("%s: unprocessed error %d\n", __func__,
998 			cb->cb_seq_status);
999 	}
1000 
1001 	cb->cb_holds_slot = false;
1002 	clear_bit(0, &clp->cl_cb_slot_busy);
1003 	rpc_wake_up_next(&clp->cl_cb_waitq);
1004 	dprintk("%s: freed slot, new seqid=%d\n", __func__,
1005 		clp->cl_cb_session->se_cb_seq_nr);
1006 
1007 	if (task->tk_flags & RPC_TASK_KILLED)
1008 		goto need_restart;
1009 out:
1010 	return ret;
1011 retry_nowait:
1012 	if (rpc_restart_call_prepare(task))
1013 		ret = false;
1014 	goto out;
1015 need_restart:
1016 	task->tk_status = 0;
1017 	cb->cb_need_restart = true;
1018 	return false;
1019 }
1020 
nfsd4_cb_done(struct rpc_task * task,void * calldata)1021 static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
1022 {
1023 	struct nfsd4_callback *cb = calldata;
1024 	struct nfs4_client *clp = cb->cb_clp;
1025 
1026 	dprintk("%s: minorversion=%d\n", __func__,
1027 		clp->cl_minorversion);
1028 
1029 	if (!nfsd4_cb_sequence_done(task, cb))
1030 		return;
1031 
1032 	if (cb->cb_status) {
1033 		WARN_ON_ONCE(task->tk_status);
1034 		task->tk_status = cb->cb_status;
1035 	}
1036 
1037 	switch (cb->cb_ops->done(cb, task)) {
1038 	case 0:
1039 		task->tk_status = 0;
1040 		rpc_restart_call_prepare(task);
1041 		return;
1042 	case 1:
1043 		break;
1044 	case -1:
1045 		/* Network partition? */
1046 		nfsd4_mark_cb_down(clp, task->tk_status);
1047 		break;
1048 	default:
1049 		BUG();
1050 	}
1051 }
1052 
nfsd4_cb_release(void * calldata)1053 static void nfsd4_cb_release(void *calldata)
1054 {
1055 	struct nfsd4_callback *cb = calldata;
1056 
1057 	if (cb->cb_need_restart)
1058 		nfsd4_run_cb(cb);
1059 	else
1060 		cb->cb_ops->release(cb);
1061 
1062 }
1063 
1064 static const struct rpc_call_ops nfsd4_cb_ops = {
1065 	.rpc_call_prepare = nfsd4_cb_prepare,
1066 	.rpc_call_done = nfsd4_cb_done,
1067 	.rpc_release = nfsd4_cb_release,
1068 };
1069 
nfsd4_create_callback_queue(void)1070 int nfsd4_create_callback_queue(void)
1071 {
1072 	callback_wq = alloc_ordered_workqueue("nfsd4_callbacks", 0);
1073 	if (!callback_wq)
1074 		return -ENOMEM;
1075 	return 0;
1076 }
1077 
nfsd4_destroy_callback_queue(void)1078 void nfsd4_destroy_callback_queue(void)
1079 {
1080 	destroy_workqueue(callback_wq);
1081 }
1082 
1083 /* must be called under the state lock */
nfsd4_shutdown_callback(struct nfs4_client * clp)1084 void nfsd4_shutdown_callback(struct nfs4_client *clp)
1085 {
1086 	set_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags);
1087 	/*
1088 	 * Note this won't actually result in a null callback;
1089 	 * instead, nfsd4_run_cb_null() will detect the killed
1090 	 * client, destroy the rpc client, and stop:
1091 	 */
1092 	nfsd4_run_cb(&clp->cl_cb_null);
1093 	flush_workqueue(callback_wq);
1094 }
1095 
1096 /* requires cl_lock: */
__nfsd4_find_backchannel(struct nfs4_client * clp)1097 static struct nfsd4_conn * __nfsd4_find_backchannel(struct nfs4_client *clp)
1098 {
1099 	struct nfsd4_session *s;
1100 	struct nfsd4_conn *c;
1101 
1102 	list_for_each_entry(s, &clp->cl_sessions, se_perclnt) {
1103 		list_for_each_entry(c, &s->se_conns, cn_persession) {
1104 			if (c->cn_flags & NFS4_CDFC4_BACK)
1105 				return c;
1106 		}
1107 	}
1108 	return NULL;
1109 }
1110 
nfsd4_process_cb_update(struct nfsd4_callback * cb)1111 static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
1112 {
1113 	struct nfs4_cb_conn conn;
1114 	struct nfs4_client *clp = cb->cb_clp;
1115 	struct nfsd4_session *ses = NULL;
1116 	struct nfsd4_conn *c;
1117 	int err;
1118 
1119 	/*
1120 	 * This is either an update, or the client dying; in either case,
1121 	 * kill the old client:
1122 	 */
1123 	if (clp->cl_cb_client) {
1124 		rpc_shutdown_client(clp->cl_cb_client);
1125 		clp->cl_cb_client = NULL;
1126 		put_rpccred(clp->cl_cb_cred);
1127 		clp->cl_cb_cred = NULL;
1128 	}
1129 	if (clp->cl_cb_conn.cb_xprt) {
1130 		svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1131 		clp->cl_cb_conn.cb_xprt = NULL;
1132 	}
1133 	if (test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags))
1134 		return;
1135 	spin_lock(&clp->cl_lock);
1136 	/*
1137 	 * Only serialized callback code is allowed to clear these
1138 	 * flags; main nfsd code can only set them:
1139 	 */
1140 	BUG_ON(!(clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK));
1141 	clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
1142 	memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
1143 	c = __nfsd4_find_backchannel(clp);
1144 	if (c) {
1145 		svc_xprt_get(c->cn_xprt);
1146 		conn.cb_xprt = c->cn_xprt;
1147 		ses = c->cn_session;
1148 	}
1149 	spin_unlock(&clp->cl_lock);
1150 
1151 	err = setup_callback_client(clp, &conn, ses);
1152 	if (err) {
1153 		nfsd4_mark_cb_down(clp, err);
1154 		if (c)
1155 			svc_xprt_put(c->cn_xprt);
1156 		return;
1157 	}
1158 }
1159 
1160 static void
nfsd4_run_cb_work(struct work_struct * work)1161 nfsd4_run_cb_work(struct work_struct *work)
1162 {
1163 	struct nfsd4_callback *cb =
1164 		container_of(work, struct nfsd4_callback, cb_work);
1165 	struct nfs4_client *clp = cb->cb_clp;
1166 	struct rpc_clnt *clnt;
1167 
1168 	if (cb->cb_need_restart) {
1169 		cb->cb_need_restart = false;
1170 	} else {
1171 		if (cb->cb_ops && cb->cb_ops->prepare)
1172 			cb->cb_ops->prepare(cb);
1173 	}
1174 
1175 	if (clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK)
1176 		nfsd4_process_cb_update(cb);
1177 
1178 	clnt = clp->cl_cb_client;
1179 	if (!clnt) {
1180 		/* Callback channel broken, or client killed; give up: */
1181 		if (cb->cb_ops && cb->cb_ops->release)
1182 			cb->cb_ops->release(cb);
1183 		return;
1184 	}
1185 
1186 	/*
1187 	 * Don't send probe messages for 4.1 or later.
1188 	 */
1189 	if (!cb->cb_ops && clp->cl_minorversion) {
1190 		clp->cl_cb_state = NFSD4_CB_UP;
1191 		return;
1192 	}
1193 
1194 	cb->cb_msg.rpc_cred = clp->cl_cb_cred;
1195 	rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
1196 			cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb);
1197 }
1198 
nfsd4_init_cb(struct nfsd4_callback * cb,struct nfs4_client * clp,const struct nfsd4_callback_ops * ops,enum nfsd4_cb_op op)1199 void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
1200 		const struct nfsd4_callback_ops *ops, enum nfsd4_cb_op op)
1201 {
1202 	cb->cb_clp = clp;
1203 	cb->cb_msg.rpc_proc = &nfs4_cb_procedures[op];
1204 	cb->cb_msg.rpc_argp = cb;
1205 	cb->cb_msg.rpc_resp = cb;
1206 	cb->cb_ops = ops;
1207 	INIT_WORK(&cb->cb_work, nfsd4_run_cb_work);
1208 	cb->cb_seq_status = 1;
1209 	cb->cb_status = 0;
1210 	cb->cb_need_restart = false;
1211 	cb->cb_holds_slot = false;
1212 }
1213 
nfsd4_run_cb(struct nfsd4_callback * cb)1214 void nfsd4_run_cb(struct nfsd4_callback *cb)
1215 {
1216 	queue_work(callback_wq, &cb->cb_work);
1217 }
1218