1 /*
2  *  Copyright (c) 2001 The Regents of the University of Michigan.
3  *  All rights reserved.
4  *
5  *  Kendrick Smith <kmsmith@umich.edu>
6  *  Andy Adamson <andros@umich.edu>
7  *
8  *  Redistribution and use in source and binary forms, with or without
9  *  modification, are permitted provided that the following conditions
10  *  are met:
11  *
12  *  1. Redistributions of source code must retain the above copyright
13  *     notice, this list of conditions and the following disclaimer.
14  *  2. Redistributions in binary form must reproduce the above copyright
15  *     notice, this list of conditions and the following disclaimer in the
16  *     documentation and/or other materials provided with the distribution.
17  *  3. Neither the name of the University nor the names of its
18  *     contributors may be used to endorse or promote products derived
19  *     from this software without specific prior written permission.
20  *
21  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <linux/sunrpc/clnt.h>
35 #include <linux/sunrpc/xprt.h>
36 #include <linux/sunrpc/svc_xprt.h>
37 #include <linux/slab.h>
38 #include "nfsd.h"
39 #include "state.h"
40 #include "netns.h"
41 #include "xdr4cb.h"
42 
43 #define NFSDDBG_FACILITY                NFSDDBG_PROC
44 
45 static void nfsd4_mark_cb_fault(struct nfs4_client *, int reason);
46 
47 #define NFSPROC4_CB_NULL 0
48 #define NFSPROC4_CB_COMPOUND 1
49 
50 /* Index of predefined Linux callback client operations */
51 
52 struct nfs4_cb_compound_hdr {
53 	/* args */
54 	u32		ident;	/* minorversion 0 only */
55 	u32		nops;
56 	__be32		*nops_p;
57 	u32		minorversion;
58 	/* res */
59 	int		status;
60 };
61 
62 /*
63  * Handle decode buffer overflows out-of-line.
64  */
print_overflow_msg(const char * func,const struct xdr_stream * xdr)65 static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
66 {
67 	dprintk("NFS: %s prematurely hit the end of our receive buffer. "
68 		"Remaining buffer length is %tu words.\n",
69 		func, xdr->end - xdr->p);
70 }
71 
xdr_encode_empty_array(__be32 * p)72 static __be32 *xdr_encode_empty_array(__be32 *p)
73 {
74 	*p++ = xdr_zero;
75 	return p;
76 }
77 
78 /*
79  * Encode/decode NFSv4 CB basic data types
80  *
81  * Basic NFSv4 callback data types are defined in section 15 of RFC
82  * 3530: "Network File System (NFS) version 4 Protocol" and section
83  * 20 of RFC 5661: "Network File System (NFS) Version 4 Minor Version
84  * 1 Protocol"
85  */
86 
87 /*
88  *	nfs_cb_opnum4
89  *
90  *	enum nfs_cb_opnum4 {
91  *		OP_CB_GETATTR		= 3,
92  *		  ...
93  *	};
94  */
95 enum nfs_cb_opnum4 {
96 	OP_CB_GETATTR			= 3,
97 	OP_CB_RECALL			= 4,
98 	OP_CB_LAYOUTRECALL		= 5,
99 	OP_CB_NOTIFY			= 6,
100 	OP_CB_PUSH_DELEG		= 7,
101 	OP_CB_RECALL_ANY		= 8,
102 	OP_CB_RECALLABLE_OBJ_AVAIL	= 9,
103 	OP_CB_RECALL_SLOT		= 10,
104 	OP_CB_SEQUENCE			= 11,
105 	OP_CB_WANTS_CANCELLED		= 12,
106 	OP_CB_NOTIFY_LOCK		= 13,
107 	OP_CB_NOTIFY_DEVICEID		= 14,
108 	OP_CB_ILLEGAL			= 10044
109 };
110 
encode_nfs_cb_opnum4(struct xdr_stream * xdr,enum nfs_cb_opnum4 op)111 static void encode_nfs_cb_opnum4(struct xdr_stream *xdr, enum nfs_cb_opnum4 op)
112 {
113 	__be32 *p;
114 
115 	p = xdr_reserve_space(xdr, 4);
116 	*p = cpu_to_be32(op);
117 }
118 
119 /*
120  * nfs_fh4
121  *
122  *	typedef opaque nfs_fh4<NFS4_FHSIZE>;
123  */
encode_nfs_fh4(struct xdr_stream * xdr,const struct knfsd_fh * fh)124 static void encode_nfs_fh4(struct xdr_stream *xdr, const struct knfsd_fh *fh)
125 {
126 	u32 length = fh->fh_size;
127 	__be32 *p;
128 
129 	BUG_ON(length > NFS4_FHSIZE);
130 	p = xdr_reserve_space(xdr, 4 + length);
131 	xdr_encode_opaque(p, &fh->fh_base, length);
132 }
133 
134 /*
135  * stateid4
136  *
137  *	struct stateid4 {
138  *		uint32_t	seqid;
139  *		opaque		other[12];
140  *	};
141  */
encode_stateid4(struct xdr_stream * xdr,const stateid_t * sid)142 static void encode_stateid4(struct xdr_stream *xdr, const stateid_t *sid)
143 {
144 	__be32 *p;
145 
146 	p = xdr_reserve_space(xdr, NFS4_STATEID_SIZE);
147 	*p++ = cpu_to_be32(sid->si_generation);
148 	xdr_encode_opaque_fixed(p, &sid->si_opaque, NFS4_STATEID_OTHER_SIZE);
149 }
150 
151 /*
152  * sessionid4
153  *
154  *	typedef opaque sessionid4[NFS4_SESSIONID_SIZE];
155  */
encode_sessionid4(struct xdr_stream * xdr,const struct nfsd4_session * session)156 static void encode_sessionid4(struct xdr_stream *xdr,
157 			      const struct nfsd4_session *session)
158 {
159 	__be32 *p;
160 
161 	p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN);
162 	xdr_encode_opaque_fixed(p, session->se_sessionid.data,
163 					NFS4_MAX_SESSIONID_LEN);
164 }
165 
166 /*
167  * nfsstat4
168  */
169 static const struct {
170 	int stat;
171 	int errno;
172 } nfs_cb_errtbl[] = {
173 	{ NFS4_OK,		0		},
174 	{ NFS4ERR_PERM,		-EPERM		},
175 	{ NFS4ERR_NOENT,	-ENOENT		},
176 	{ NFS4ERR_IO,		-EIO		},
177 	{ NFS4ERR_NXIO,		-ENXIO		},
178 	{ NFS4ERR_ACCESS,	-EACCES		},
179 	{ NFS4ERR_EXIST,	-EEXIST		},
180 	{ NFS4ERR_XDEV,		-EXDEV		},
181 	{ NFS4ERR_NOTDIR,	-ENOTDIR	},
182 	{ NFS4ERR_ISDIR,	-EISDIR		},
183 	{ NFS4ERR_INVAL,	-EINVAL		},
184 	{ NFS4ERR_FBIG,		-EFBIG		},
185 	{ NFS4ERR_NOSPC,	-ENOSPC		},
186 	{ NFS4ERR_ROFS,		-EROFS		},
187 	{ NFS4ERR_MLINK,	-EMLINK		},
188 	{ NFS4ERR_NAMETOOLONG,	-ENAMETOOLONG	},
189 	{ NFS4ERR_NOTEMPTY,	-ENOTEMPTY	},
190 	{ NFS4ERR_DQUOT,	-EDQUOT		},
191 	{ NFS4ERR_STALE,	-ESTALE		},
192 	{ NFS4ERR_BADHANDLE,	-EBADHANDLE	},
193 	{ NFS4ERR_BAD_COOKIE,	-EBADCOOKIE	},
194 	{ NFS4ERR_NOTSUPP,	-ENOTSUPP	},
195 	{ NFS4ERR_TOOSMALL,	-ETOOSMALL	},
196 	{ NFS4ERR_SERVERFAULT,	-ESERVERFAULT	},
197 	{ NFS4ERR_BADTYPE,	-EBADTYPE	},
198 	{ NFS4ERR_LOCKED,	-EAGAIN		},
199 	{ NFS4ERR_RESOURCE,	-EREMOTEIO	},
200 	{ NFS4ERR_SYMLINK,	-ELOOP		},
201 	{ NFS4ERR_OP_ILLEGAL,	-EOPNOTSUPP	},
202 	{ NFS4ERR_DEADLOCK,	-EDEADLK	},
203 	{ -1,			-EIO		}
204 };
205 
206 /*
207  * If we cannot translate the error, the recovery routines should
208  * handle it.
209  *
210  * Note: remaining NFSv4 error codes have values > 10000, so should
211  * not conflict with native Linux error codes.
212  */
nfs_cb_stat_to_errno(int status)213 static int nfs_cb_stat_to_errno(int status)
214 {
215 	int i;
216 
217 	for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
218 		if (nfs_cb_errtbl[i].stat == status)
219 			return nfs_cb_errtbl[i].errno;
220 	}
221 
222 	dprintk("NFSD: Unrecognized NFS CB status value: %u\n", status);
223 	return -status;
224 }
225 
decode_cb_op_status(struct xdr_stream * xdr,enum nfs_cb_opnum4 expected,int * status)226 static int decode_cb_op_status(struct xdr_stream *xdr,
227 			       enum nfs_cb_opnum4 expected, int *status)
228 {
229 	__be32 *p;
230 	u32 op;
231 
232 	p = xdr_inline_decode(xdr, 4 + 4);
233 	if (unlikely(p == NULL))
234 		goto out_overflow;
235 	op = be32_to_cpup(p++);
236 	if (unlikely(op != expected))
237 		goto out_unexpected;
238 	*status = nfs_cb_stat_to_errno(be32_to_cpup(p));
239 	return 0;
240 out_overflow:
241 	print_overflow_msg(__func__, xdr);
242 	return -EIO;
243 out_unexpected:
244 	dprintk("NFSD: Callback server returned operation %d but "
245 		"we issued a request for %d\n", op, expected);
246 	return -EIO;
247 }
248 
249 /*
250  * CB_COMPOUND4args
251  *
252  *	struct CB_COMPOUND4args {
253  *		utf8str_cs	tag;
254  *		uint32_t	minorversion;
255  *		uint32_t	callback_ident;
256  *		nfs_cb_argop4	argarray<>;
257  *	};
258 */
encode_cb_compound4args(struct xdr_stream * xdr,struct nfs4_cb_compound_hdr * hdr)259 static void encode_cb_compound4args(struct xdr_stream *xdr,
260 				    struct nfs4_cb_compound_hdr *hdr)
261 {
262 	__be32 * p;
263 
264 	p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4);
265 	p = xdr_encode_empty_array(p);		/* empty tag */
266 	*p++ = cpu_to_be32(hdr->minorversion);
267 	*p++ = cpu_to_be32(hdr->ident);
268 
269 	hdr->nops_p = p;
270 	*p = cpu_to_be32(hdr->nops);		/* argarray element count */
271 }
272 
273 /*
274  * Update argarray element count
275  */
encode_cb_nops(struct nfs4_cb_compound_hdr * hdr)276 static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
277 {
278 	BUG_ON(hdr->nops > NFS4_MAX_BACK_CHANNEL_OPS);
279 	*hdr->nops_p = cpu_to_be32(hdr->nops);
280 }
281 
282 /*
283  * CB_COMPOUND4res
284  *
285  *	struct CB_COMPOUND4res {
286  *		nfsstat4	status;
287  *		utf8str_cs	tag;
288  *		nfs_cb_resop4	resarray<>;
289  *	};
290  */
decode_cb_compound4res(struct xdr_stream * xdr,struct nfs4_cb_compound_hdr * hdr)291 static int decode_cb_compound4res(struct xdr_stream *xdr,
292 				  struct nfs4_cb_compound_hdr *hdr)
293 {
294 	u32 length;
295 	__be32 *p;
296 
297 	p = xdr_inline_decode(xdr, 4 + 4);
298 	if (unlikely(p == NULL))
299 		goto out_overflow;
300 	hdr->status = be32_to_cpup(p++);
301 	/* Ignore the tag */
302 	length = be32_to_cpup(p++);
303 	p = xdr_inline_decode(xdr, length + 4);
304 	if (unlikely(p == NULL))
305 		goto out_overflow;
306 	p += XDR_QUADLEN(length);
307 	hdr->nops = be32_to_cpup(p);
308 	return 0;
309 out_overflow:
310 	print_overflow_msg(__func__, xdr);
311 	return -EIO;
312 }
313 
314 /*
315  * CB_RECALL4args
316  *
317  *	struct CB_RECALL4args {
318  *		stateid4	stateid;
319  *		bool		truncate;
320  *		nfs_fh4		fh;
321  *	};
322  */
encode_cb_recall4args(struct xdr_stream * xdr,const struct nfs4_delegation * dp,struct nfs4_cb_compound_hdr * hdr)323 static void encode_cb_recall4args(struct xdr_stream *xdr,
324 				  const struct nfs4_delegation *dp,
325 				  struct nfs4_cb_compound_hdr *hdr)
326 {
327 	__be32 *p;
328 
329 	encode_nfs_cb_opnum4(xdr, OP_CB_RECALL);
330 	encode_stateid4(xdr, &dp->dl_stid.sc_stateid);
331 
332 	p = xdr_reserve_space(xdr, 4);
333 	*p++ = xdr_zero;			/* truncate */
334 
335 	encode_nfs_fh4(xdr, &dp->dl_stid.sc_file->fi_fhandle);
336 
337 	hdr->nops++;
338 }
339 
340 /*
341  * CB_SEQUENCE4args
342  *
343  *	struct CB_SEQUENCE4args {
344  *		sessionid4		csa_sessionid;
345  *		sequenceid4		csa_sequenceid;
346  *		slotid4			csa_slotid;
347  *		slotid4			csa_highest_slotid;
348  *		bool			csa_cachethis;
349  *		referring_call_list4	csa_referring_call_lists<>;
350  *	};
351  */
encode_cb_sequence4args(struct xdr_stream * xdr,const struct nfsd4_callback * cb,struct nfs4_cb_compound_hdr * hdr)352 static void encode_cb_sequence4args(struct xdr_stream *xdr,
353 				    const struct nfsd4_callback *cb,
354 				    struct nfs4_cb_compound_hdr *hdr)
355 {
356 	struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
357 	__be32 *p;
358 
359 	if (hdr->minorversion == 0)
360 		return;
361 
362 	encode_nfs_cb_opnum4(xdr, OP_CB_SEQUENCE);
363 	encode_sessionid4(xdr, session);
364 
365 	p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4 + 4);
366 	*p++ = cpu_to_be32(session->se_cb_seq_nr);	/* csa_sequenceid */
367 	*p++ = xdr_zero;			/* csa_slotid */
368 	*p++ = xdr_zero;			/* csa_highest_slotid */
369 	*p++ = xdr_zero;			/* csa_cachethis */
370 	xdr_encode_empty_array(p);		/* csa_referring_call_lists */
371 
372 	hdr->nops++;
373 }
374 
375 /*
376  * CB_SEQUENCE4resok
377  *
378  *	struct CB_SEQUENCE4resok {
379  *		sessionid4	csr_sessionid;
380  *		sequenceid4	csr_sequenceid;
381  *		slotid4		csr_slotid;
382  *		slotid4		csr_highest_slotid;
383  *		slotid4		csr_target_highest_slotid;
384  *	};
385  *
386  *	union CB_SEQUENCE4res switch (nfsstat4 csr_status) {
387  *	case NFS4_OK:
388  *		CB_SEQUENCE4resok	csr_resok4;
389  *	default:
390  *		void;
391  *	};
392  *
393  * Our current back channel implmentation supports a single backchannel
394  * with a single slot.
395  */
decode_cb_sequence4resok(struct xdr_stream * xdr,struct nfsd4_callback * cb)396 static int decode_cb_sequence4resok(struct xdr_stream *xdr,
397 				    struct nfsd4_callback *cb)
398 {
399 	struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
400 	int status = -ESERVERFAULT;
401 	__be32 *p;
402 	u32 dummy;
403 
404 	/*
405 	 * If the server returns different values for sessionID, slotID or
406 	 * sequence number, the server is looney tunes.
407 	 */
408 	p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4 + 4 + 4);
409 	if (unlikely(p == NULL))
410 		goto out_overflow;
411 
412 	if (memcmp(p, session->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) {
413 		dprintk("NFS: %s Invalid session id\n", __func__);
414 		goto out;
415 	}
416 	p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
417 
418 	dummy = be32_to_cpup(p++);
419 	if (dummy != session->se_cb_seq_nr) {
420 		dprintk("NFS: %s Invalid sequence number\n", __func__);
421 		goto out;
422 	}
423 
424 	dummy = be32_to_cpup(p++);
425 	if (dummy != 0) {
426 		dprintk("NFS: %s Invalid slotid\n", __func__);
427 		goto out;
428 	}
429 
430 	/*
431 	 * FIXME: process highest slotid and target highest slotid
432 	 */
433 	status = 0;
434 out:
435 	cb->cb_seq_status = status;
436 	return status;
437 out_overflow:
438 	print_overflow_msg(__func__, xdr);
439 	status = -EIO;
440 	goto out;
441 }
442 
decode_cb_sequence4res(struct xdr_stream * xdr,struct nfsd4_callback * cb)443 static int decode_cb_sequence4res(struct xdr_stream *xdr,
444 				  struct nfsd4_callback *cb)
445 {
446 	int status;
447 
448 	if (cb->cb_clp->cl_minorversion == 0)
449 		return 0;
450 
451 	status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_seq_status);
452 	if (unlikely(status || cb->cb_seq_status))
453 		return status;
454 
455 	return decode_cb_sequence4resok(xdr, cb);
456 }
457 
458 /*
459  * NFSv4.0 and NFSv4.1 XDR encode functions
460  *
461  * NFSv4.0 callback argument types are defined in section 15 of RFC
462  * 3530: "Network File System (NFS) version 4 Protocol" and section 20
463  * of RFC 5661:  "Network File System (NFS) Version 4 Minor Version 1
464  * Protocol".
465  */
466 
467 /*
468  * NB: Without this zero space reservation, callbacks over krb5p fail
469  */
nfs4_xdr_enc_cb_null(struct rpc_rqst * req,struct xdr_stream * xdr,const void * __unused)470 static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
471 				 const void *__unused)
472 {
473 	xdr_reserve_space(xdr, 0);
474 }
475 
476 /*
477  * 20.2. Operation 4: CB_RECALL - Recall a Delegation
478  */
nfs4_xdr_enc_cb_recall(struct rpc_rqst * req,struct xdr_stream * xdr,const void * data)479 static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
480 				   const void *data)
481 {
482 	const struct nfsd4_callback *cb = data;
483 	const struct nfs4_delegation *dp = cb_to_delegation(cb);
484 	struct nfs4_cb_compound_hdr hdr = {
485 		.ident = cb->cb_clp->cl_cb_ident,
486 		.minorversion = cb->cb_clp->cl_minorversion,
487 	};
488 
489 	encode_cb_compound4args(xdr, &hdr);
490 	encode_cb_sequence4args(xdr, cb, &hdr);
491 	encode_cb_recall4args(xdr, dp, &hdr);
492 	encode_cb_nops(&hdr);
493 }
494 
495 
496 /*
497  * NFSv4.0 and NFSv4.1 XDR decode functions
498  *
499  * NFSv4.0 callback result types are defined in section 15 of RFC
500  * 3530: "Network File System (NFS) version 4 Protocol" and section 20
501  * of RFC 5661:  "Network File System (NFS) Version 4 Minor Version 1
502  * Protocol".
503  */
504 
nfs4_xdr_dec_cb_null(struct rpc_rqst * req,struct xdr_stream * xdr,void * __unused)505 static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
506 				void *__unused)
507 {
508 	return 0;
509 }
510 
511 /*
512  * 20.2. Operation 4: CB_RECALL - Recall a Delegation
513  */
nfs4_xdr_dec_cb_recall(struct rpc_rqst * rqstp,struct xdr_stream * xdr,void * data)514 static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
515 				  struct xdr_stream *xdr,
516 				  void *data)
517 {
518 	struct nfsd4_callback *cb = data;
519 	struct nfs4_cb_compound_hdr hdr;
520 	int status;
521 
522 	status = decode_cb_compound4res(xdr, &hdr);
523 	if (unlikely(status))
524 		return status;
525 
526 	if (cb != NULL) {
527 		status = decode_cb_sequence4res(xdr, cb);
528 		if (unlikely(status || cb->cb_seq_status))
529 			return status;
530 	}
531 
532 	return decode_cb_op_status(xdr, OP_CB_RECALL, &cb->cb_status);
533 }
534 
535 #ifdef CONFIG_NFSD_PNFS
536 /*
537  * CB_LAYOUTRECALL4args
538  *
539  *	struct layoutrecall_file4 {
540  *		nfs_fh4         lor_fh;
541  *		offset4         lor_offset;
542  *		length4         lor_length;
543  *		stateid4        lor_stateid;
544  *	};
545  *
546  *	union layoutrecall4 switch(layoutrecall_type4 lor_recalltype) {
547  *	case LAYOUTRECALL4_FILE:
548  *		layoutrecall_file4 lor_layout;
549  *	case LAYOUTRECALL4_FSID:
550  *		fsid4              lor_fsid;
551  *	case LAYOUTRECALL4_ALL:
552  *		void;
553  *	};
554  *
555  *	struct CB_LAYOUTRECALL4args {
556  *		layouttype4             clora_type;
557  *		layoutiomode4           clora_iomode;
558  *		bool                    clora_changed;
559  *		layoutrecall4           clora_recall;
560  *	};
561  */
encode_cb_layout4args(struct xdr_stream * xdr,const struct nfs4_layout_stateid * ls,struct nfs4_cb_compound_hdr * hdr)562 static void encode_cb_layout4args(struct xdr_stream *xdr,
563 				  const struct nfs4_layout_stateid *ls,
564 				  struct nfs4_cb_compound_hdr *hdr)
565 {
566 	__be32 *p;
567 
568 	BUG_ON(hdr->minorversion == 0);
569 
570 	p = xdr_reserve_space(xdr, 5 * 4);
571 	*p++ = cpu_to_be32(OP_CB_LAYOUTRECALL);
572 	*p++ = cpu_to_be32(ls->ls_layout_type);
573 	*p++ = cpu_to_be32(IOMODE_ANY);
574 	*p++ = cpu_to_be32(1);
575 	*p = cpu_to_be32(RETURN_FILE);
576 
577 	encode_nfs_fh4(xdr, &ls->ls_stid.sc_file->fi_fhandle);
578 
579 	p = xdr_reserve_space(xdr, 2 * 8);
580 	p = xdr_encode_hyper(p, 0);
581 	xdr_encode_hyper(p, NFS4_MAX_UINT64);
582 
583 	encode_stateid4(xdr, &ls->ls_recall_sid);
584 
585 	hdr->nops++;
586 }
587 
nfs4_xdr_enc_cb_layout(struct rpc_rqst * req,struct xdr_stream * xdr,const void * data)588 static void nfs4_xdr_enc_cb_layout(struct rpc_rqst *req,
589 				   struct xdr_stream *xdr,
590 				   const void *data)
591 {
592 	const struct nfsd4_callback *cb = data;
593 	const struct nfs4_layout_stateid *ls =
594 		container_of(cb, struct nfs4_layout_stateid, ls_recall);
595 	struct nfs4_cb_compound_hdr hdr = {
596 		.ident = 0,
597 		.minorversion = cb->cb_clp->cl_minorversion,
598 	};
599 
600 	encode_cb_compound4args(xdr, &hdr);
601 	encode_cb_sequence4args(xdr, cb, &hdr);
602 	encode_cb_layout4args(xdr, ls, &hdr);
603 	encode_cb_nops(&hdr);
604 }
605 
nfs4_xdr_dec_cb_layout(struct rpc_rqst * rqstp,struct xdr_stream * xdr,void * data)606 static int nfs4_xdr_dec_cb_layout(struct rpc_rqst *rqstp,
607 				  struct xdr_stream *xdr,
608 				  void *data)
609 {
610 	struct nfsd4_callback *cb = data;
611 	struct nfs4_cb_compound_hdr hdr;
612 	int status;
613 
614 	status = decode_cb_compound4res(xdr, &hdr);
615 	if (unlikely(status))
616 		return status;
617 
618 	if (cb) {
619 		status = decode_cb_sequence4res(xdr, cb);
620 		if (unlikely(status || cb->cb_seq_status))
621 			return status;
622 	}
623 	return decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &cb->cb_status);
624 }
625 #endif /* CONFIG_NFSD_PNFS */
626 
encode_stateowner(struct xdr_stream * xdr,struct nfs4_stateowner * so)627 static void encode_stateowner(struct xdr_stream *xdr, struct nfs4_stateowner *so)
628 {
629 	__be32	*p;
630 
631 	p = xdr_reserve_space(xdr, 8 + 4 + so->so_owner.len);
632 	p = xdr_encode_opaque_fixed(p, &so->so_client->cl_clientid, 8);
633 	xdr_encode_opaque(p, so->so_owner.data, so->so_owner.len);
634 }
635 
nfs4_xdr_enc_cb_notify_lock(struct rpc_rqst * req,struct xdr_stream * xdr,const void * data)636 static void nfs4_xdr_enc_cb_notify_lock(struct rpc_rqst *req,
637 					struct xdr_stream *xdr,
638 					const void *data)
639 {
640 	const struct nfsd4_callback *cb = data;
641 	const struct nfsd4_blocked_lock *nbl =
642 		container_of(cb, struct nfsd4_blocked_lock, nbl_cb);
643 	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)nbl->nbl_lock.fl_owner;
644 	struct nfs4_cb_compound_hdr hdr = {
645 		.ident = 0,
646 		.minorversion = cb->cb_clp->cl_minorversion,
647 	};
648 
649 	__be32 *p;
650 
651 	BUG_ON(hdr.minorversion == 0);
652 
653 	encode_cb_compound4args(xdr, &hdr);
654 	encode_cb_sequence4args(xdr, cb, &hdr);
655 
656 	p = xdr_reserve_space(xdr, 4);
657 	*p = cpu_to_be32(OP_CB_NOTIFY_LOCK);
658 	encode_nfs_fh4(xdr, &nbl->nbl_fh);
659 	encode_stateowner(xdr, &lo->lo_owner);
660 	hdr.nops++;
661 
662 	encode_cb_nops(&hdr);
663 }
664 
nfs4_xdr_dec_cb_notify_lock(struct rpc_rqst * rqstp,struct xdr_stream * xdr,void * data)665 static int nfs4_xdr_dec_cb_notify_lock(struct rpc_rqst *rqstp,
666 					struct xdr_stream *xdr,
667 					void *data)
668 {
669 	struct nfsd4_callback *cb = data;
670 	struct nfs4_cb_compound_hdr hdr;
671 	int status;
672 
673 	status = decode_cb_compound4res(xdr, &hdr);
674 	if (unlikely(status))
675 		return status;
676 
677 	if (cb) {
678 		status = decode_cb_sequence4res(xdr, cb);
679 		if (unlikely(status || cb->cb_seq_status))
680 			return status;
681 	}
682 	return decode_cb_op_status(xdr, OP_CB_NOTIFY_LOCK, &cb->cb_status);
683 }
684 
685 /*
686  * RPC procedure tables
687  */
688 #define PROC(proc, call, argtype, restype)				\
689 [NFSPROC4_CLNT_##proc] = {						\
690 	.p_proc    = NFSPROC4_CB_##call,				\
691 	.p_encode  = nfs4_xdr_enc_##argtype,		\
692 	.p_decode  = nfs4_xdr_dec_##restype,				\
693 	.p_arglen  = NFS4_enc_##argtype##_sz,				\
694 	.p_replen  = NFS4_dec_##restype##_sz,				\
695 	.p_statidx = NFSPROC4_CB_##call,				\
696 	.p_name    = #proc,						\
697 }
698 
699 static const struct rpc_procinfo nfs4_cb_procedures[] = {
700 	PROC(CB_NULL,	NULL,		cb_null,	cb_null),
701 	PROC(CB_RECALL,	COMPOUND,	cb_recall,	cb_recall),
702 #ifdef CONFIG_NFSD_PNFS
703 	PROC(CB_LAYOUT,	COMPOUND,	cb_layout,	cb_layout),
704 #endif
705 	PROC(CB_NOTIFY_LOCK,	COMPOUND,	cb_notify_lock,	cb_notify_lock),
706 };
707 
708 static unsigned int nfs4_cb_counts[ARRAY_SIZE(nfs4_cb_procedures)];
709 static const struct rpc_version nfs_cb_version4 = {
710 /*
711  * Note on the callback rpc program version number: despite language in rfc
712  * 5661 section 18.36.3 requiring servers to use 4 in this field, the
713  * official xdr descriptions for both 4.0 and 4.1 specify version 1, and
714  * in practice that appears to be what implementations use.  The section
715  * 18.36.3 language is expected to be fixed in an erratum.
716  */
717 	.number			= 1,
718 	.nrprocs		= ARRAY_SIZE(nfs4_cb_procedures),
719 	.procs			= nfs4_cb_procedures,
720 	.counts			= nfs4_cb_counts,
721 };
722 
723 static const struct rpc_version *nfs_cb_version[2] = {
724 	[1] = &nfs_cb_version4,
725 };
726 
727 static const struct rpc_program cb_program;
728 
729 static struct rpc_stat cb_stats = {
730 	.program		= &cb_program
731 };
732 
733 #define NFS4_CALLBACK 0x40000000
734 static const struct rpc_program cb_program = {
735 	.name			= "nfs4_cb",
736 	.number			= NFS4_CALLBACK,
737 	.nrvers			= ARRAY_SIZE(nfs_cb_version),
738 	.version		= nfs_cb_version,
739 	.stats			= &cb_stats,
740 	.pipe_dir_name		= "nfsd4_cb",
741 };
742 
max_cb_time(struct net * net)743 static int max_cb_time(struct net *net)
744 {
745 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
746 	return max(nn->nfsd4_lease/10, (time_t)1) * HZ;
747 }
748 
get_backchannel_cred(struct nfs4_client * clp,struct rpc_clnt * client,struct nfsd4_session * ses)749 static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses)
750 {
751 	if (clp->cl_minorversion == 0) {
752 		char *principal = clp->cl_cred.cr_targ_princ ?
753 					clp->cl_cred.cr_targ_princ : "nfs";
754 		struct rpc_cred *cred;
755 
756 		cred = rpc_lookup_machine_cred(principal);
757 		if (!IS_ERR(cred))
758 			get_rpccred(cred);
759 		return cred;
760 	} else {
761 		struct rpc_auth *auth = client->cl_auth;
762 		struct auth_cred acred = {};
763 
764 		acred.uid = ses->se_cb_sec.uid;
765 		acred.gid = ses->se_cb_sec.gid;
766 		return auth->au_ops->lookup_cred(client->cl_auth, &acred, 0);
767 	}
768 }
769 
setup_callback_client(struct nfs4_client * clp,struct nfs4_cb_conn * conn,struct nfsd4_session * ses)770 static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
771 {
772 	int maxtime = max_cb_time(clp->net);
773 	struct rpc_timeout	timeparms = {
774 		.to_initval	= maxtime,
775 		.to_retries	= 0,
776 		.to_maxval	= maxtime,
777 	};
778 	struct rpc_create_args args = {
779 		.net		= clp->net,
780 		.address	= (struct sockaddr *) &conn->cb_addr,
781 		.addrsize	= conn->cb_addrlen,
782 		.saddress	= (struct sockaddr *) &conn->cb_saddr,
783 		.timeout	= &timeparms,
784 		.program	= &cb_program,
785 		.version	= 1,
786 		.flags		= (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
787 	};
788 	struct rpc_clnt *client;
789 	struct rpc_cred *cred;
790 
791 	if (clp->cl_minorversion == 0) {
792 		if (!clp->cl_cred.cr_principal &&
793 				(clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5))
794 			return -EINVAL;
795 		args.client_name = clp->cl_cred.cr_principal;
796 		args.prognumber	= conn->cb_prog;
797 		args.protocol = XPRT_TRANSPORT_TCP;
798 		args.authflavor = clp->cl_cred.cr_flavor;
799 		clp->cl_cb_ident = conn->cb_ident;
800 	} else {
801 		if (!conn->cb_xprt)
802 			return -EINVAL;
803 		clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
804 		clp->cl_cb_session = ses;
805 		args.bc_xprt = conn->cb_xprt;
806 		args.prognumber = clp->cl_cb_session->se_cb_prog;
807 		args.protocol = conn->cb_xprt->xpt_class->xcl_ident |
808 				XPRT_TRANSPORT_BC;
809 		args.authflavor = ses->se_cb_sec.flavor;
810 	}
811 	/* Create RPC client */
812 	client = rpc_create(&args);
813 	if (IS_ERR(client)) {
814 		dprintk("NFSD: couldn't create callback client: %ld\n",
815 			PTR_ERR(client));
816 		return PTR_ERR(client);
817 	}
818 	cred = get_backchannel_cred(clp, client, ses);
819 	if (IS_ERR(cred)) {
820 		rpc_shutdown_client(client);
821 		return PTR_ERR(cred);
822 	}
823 	clp->cl_cb_client = client;
824 	clp->cl_cb_cred = cred;
825 	return 0;
826 }
827 
warn_no_callback_path(struct nfs4_client * clp,int reason)828 static void warn_no_callback_path(struct nfs4_client *clp, int reason)
829 {
830 	dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
831 		(int)clp->cl_name.len, clp->cl_name.data, reason);
832 }
833 
nfsd4_mark_cb_down(struct nfs4_client * clp,int reason)834 static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason)
835 {
836 	if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
837 		return;
838 	clp->cl_cb_state = NFSD4_CB_DOWN;
839 	warn_no_callback_path(clp, reason);
840 }
841 
nfsd4_mark_cb_fault(struct nfs4_client * clp,int reason)842 static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason)
843 {
844 	if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
845 		return;
846 	clp->cl_cb_state = NFSD4_CB_FAULT;
847 	warn_no_callback_path(clp, reason);
848 }
849 
nfsd4_cb_probe_done(struct rpc_task * task,void * calldata)850 static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
851 {
852 	struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
853 
854 	if (task->tk_status)
855 		nfsd4_mark_cb_down(clp, task->tk_status);
856 	else
857 		clp->cl_cb_state = NFSD4_CB_UP;
858 }
859 
860 static const struct rpc_call_ops nfsd4_cb_probe_ops = {
861 	/* XXX: release method to ensure we set the cb channel down if
862 	 * necessary on early failure? */
863 	.rpc_call_done = nfsd4_cb_probe_done,
864 };
865 
866 static struct workqueue_struct *callback_wq;
867 
868 /*
869  * Poke the callback thread to process any updates to the callback
870  * parameters, and send a null probe.
871  */
nfsd4_probe_callback(struct nfs4_client * clp)872 void nfsd4_probe_callback(struct nfs4_client *clp)
873 {
874 	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
875 	set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
876 	nfsd4_run_cb(&clp->cl_cb_null);
877 }
878 
nfsd4_probe_callback_sync(struct nfs4_client * clp)879 void nfsd4_probe_callback_sync(struct nfs4_client *clp)
880 {
881 	nfsd4_probe_callback(clp);
882 	flush_workqueue(callback_wq);
883 }
884 
nfsd4_change_callback(struct nfs4_client * clp,struct nfs4_cb_conn * conn)885 void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
886 {
887 	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
888 	spin_lock(&clp->cl_lock);
889 	memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
890 	spin_unlock(&clp->cl_lock);
891 }
892 
893 /*
894  * There's currently a single callback channel slot.
895  * If the slot is available, then mark it busy.  Otherwise, set the
896  * thread for sleeping on the callback RPC wait queue.
897  */
nfsd41_cb_get_slot(struct nfs4_client * clp,struct rpc_task * task)898 static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task)
899 {
900 	if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
901 		rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
902 		/* Race breaker */
903 		if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
904 			dprintk("%s slot is busy\n", __func__);
905 			return false;
906 		}
907 		rpc_wake_up_queued_task(&clp->cl_cb_waitq, task);
908 	}
909 	return true;
910 }
911 
912 /*
913  * TODO: cb_sequence should support referring call lists, cachethis, multiple
914  * slots, and mark callback channel down on communication errors.
915  */
nfsd4_cb_prepare(struct rpc_task * task,void * calldata)916 static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
917 {
918 	struct nfsd4_callback *cb = calldata;
919 	struct nfs4_client *clp = cb->cb_clp;
920 	u32 minorversion = clp->cl_minorversion;
921 
922 	/*
923 	 * cb_seq_status is only set in decode_cb_sequence4res,
924 	 * and so will remain 1 if an rpc level failure occurs.
925 	 */
926 	cb->cb_seq_status = 1;
927 	cb->cb_status = 0;
928 	if (minorversion) {
929 		if (!nfsd41_cb_get_slot(clp, task))
930 			return;
931 	}
932 	rpc_call_start(task);
933 }
934 
nfsd4_cb_sequence_done(struct rpc_task * task,struct nfsd4_callback * cb)935 static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback *cb)
936 {
937 	struct nfs4_client *clp = cb->cb_clp;
938 	struct nfsd4_session *session = clp->cl_cb_session;
939 	bool ret = true;
940 
941 	if (!clp->cl_minorversion) {
942 		/*
943 		 * If the backchannel connection was shut down while this
944 		 * task was queued, we need to resubmit it after setting up
945 		 * a new backchannel connection.
946 		 *
947 		 * Note that if we lost our callback connection permanently
948 		 * the submission code will error out, so we don't need to
949 		 * handle that case here.
950 		 */
951 		if (task->tk_flags & RPC_TASK_KILLED)
952 			goto need_restart;
953 
954 		return true;
955 	}
956 
957 	switch (cb->cb_seq_status) {
958 	case 0:
959 		/*
960 		 * No need for lock, access serialized in nfsd4_cb_prepare
961 		 *
962 		 * RFC5661 20.9.3
963 		 * If CB_SEQUENCE returns an error, then the state of the slot
964 		 * (sequence ID, cached reply) MUST NOT change.
965 		 */
966 		++session->se_cb_seq_nr;
967 		break;
968 	case -ESERVERFAULT:
969 		++session->se_cb_seq_nr;
970 		/* Fall through */
971 	case 1:
972 	case -NFS4ERR_BADSESSION:
973 		nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status);
974 		ret = false;
975 		break;
976 	case -NFS4ERR_DELAY:
977 		if (!rpc_restart_call(task))
978 			goto out;
979 
980 		rpc_delay(task, 2 * HZ);
981 		return false;
982 	case -NFS4ERR_BADSLOT:
983 		goto retry_nowait;
984 	case -NFS4ERR_SEQ_MISORDERED:
985 		if (session->se_cb_seq_nr != 1) {
986 			session->se_cb_seq_nr = 1;
987 			goto retry_nowait;
988 		}
989 		break;
990 	default:
991 		dprintk("%s: unprocessed error %d\n", __func__,
992 			cb->cb_seq_status);
993 	}
994 
995 	clear_bit(0, &clp->cl_cb_slot_busy);
996 	rpc_wake_up_next(&clp->cl_cb_waitq);
997 	dprintk("%s: freed slot, new seqid=%d\n", __func__,
998 		clp->cl_cb_session->se_cb_seq_nr);
999 
1000 	if (task->tk_flags & RPC_TASK_KILLED)
1001 		goto need_restart;
1002 out:
1003 	return ret;
1004 retry_nowait:
1005 	if (rpc_restart_call_prepare(task))
1006 		ret = false;
1007 	goto out;
1008 need_restart:
1009 	task->tk_status = 0;
1010 	cb->cb_need_restart = true;
1011 	return false;
1012 }
1013 
nfsd4_cb_done(struct rpc_task * task,void * calldata)1014 static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
1015 {
1016 	struct nfsd4_callback *cb = calldata;
1017 	struct nfs4_client *clp = cb->cb_clp;
1018 
1019 	dprintk("%s: minorversion=%d\n", __func__,
1020 		clp->cl_minorversion);
1021 
1022 	if (!nfsd4_cb_sequence_done(task, cb))
1023 		return;
1024 
1025 	if (cb->cb_status) {
1026 		WARN_ON_ONCE(task->tk_status);
1027 		task->tk_status = cb->cb_status;
1028 	}
1029 
1030 	switch (cb->cb_ops->done(cb, task)) {
1031 	case 0:
1032 		task->tk_status = 0;
1033 		rpc_restart_call_prepare(task);
1034 		return;
1035 	case 1:
1036 		break;
1037 	case -1:
1038 		/* Network partition? */
1039 		nfsd4_mark_cb_down(clp, task->tk_status);
1040 		break;
1041 	default:
1042 		BUG();
1043 	}
1044 }
1045 
nfsd4_cb_release(void * calldata)1046 static void nfsd4_cb_release(void *calldata)
1047 {
1048 	struct nfsd4_callback *cb = calldata;
1049 
1050 	if (cb->cb_need_restart)
1051 		nfsd4_run_cb(cb);
1052 	else
1053 		cb->cb_ops->release(cb);
1054 
1055 }
1056 
1057 static const struct rpc_call_ops nfsd4_cb_ops = {
1058 	.rpc_call_prepare = nfsd4_cb_prepare,
1059 	.rpc_call_done = nfsd4_cb_done,
1060 	.rpc_release = nfsd4_cb_release,
1061 };
1062 
nfsd4_create_callback_queue(void)1063 int nfsd4_create_callback_queue(void)
1064 {
1065 	callback_wq = alloc_ordered_workqueue("nfsd4_callbacks", 0);
1066 	if (!callback_wq)
1067 		return -ENOMEM;
1068 	return 0;
1069 }
1070 
nfsd4_destroy_callback_queue(void)1071 void nfsd4_destroy_callback_queue(void)
1072 {
1073 	destroy_workqueue(callback_wq);
1074 }
1075 
1076 /* must be called under the state lock */
nfsd4_shutdown_callback(struct nfs4_client * clp)1077 void nfsd4_shutdown_callback(struct nfs4_client *clp)
1078 {
1079 	set_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags);
1080 	/*
1081 	 * Note this won't actually result in a null callback;
1082 	 * instead, nfsd4_run_cb_null() will detect the killed
1083 	 * client, destroy the rpc client, and stop:
1084 	 */
1085 	nfsd4_run_cb(&clp->cl_cb_null);
1086 	flush_workqueue(callback_wq);
1087 }
1088 
1089 /* requires cl_lock: */
__nfsd4_find_backchannel(struct nfs4_client * clp)1090 static struct nfsd4_conn * __nfsd4_find_backchannel(struct nfs4_client *clp)
1091 {
1092 	struct nfsd4_session *s;
1093 	struct nfsd4_conn *c;
1094 
1095 	list_for_each_entry(s, &clp->cl_sessions, se_perclnt) {
1096 		list_for_each_entry(c, &s->se_conns, cn_persession) {
1097 			if (c->cn_flags & NFS4_CDFC4_BACK)
1098 				return c;
1099 		}
1100 	}
1101 	return NULL;
1102 }
1103 
nfsd4_process_cb_update(struct nfsd4_callback * cb)1104 static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
1105 {
1106 	struct nfs4_cb_conn conn;
1107 	struct nfs4_client *clp = cb->cb_clp;
1108 	struct nfsd4_session *ses = NULL;
1109 	struct nfsd4_conn *c;
1110 	int err;
1111 
1112 	/*
1113 	 * This is either an update, or the client dying; in either case,
1114 	 * kill the old client:
1115 	 */
1116 	if (clp->cl_cb_client) {
1117 		rpc_shutdown_client(clp->cl_cb_client);
1118 		clp->cl_cb_client = NULL;
1119 		put_rpccred(clp->cl_cb_cred);
1120 		clp->cl_cb_cred = NULL;
1121 	}
1122 	if (clp->cl_cb_conn.cb_xprt) {
1123 		svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1124 		clp->cl_cb_conn.cb_xprt = NULL;
1125 	}
1126 	if (test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags))
1127 		return;
1128 	spin_lock(&clp->cl_lock);
1129 	/*
1130 	 * Only serialized callback code is allowed to clear these
1131 	 * flags; main nfsd code can only set them:
1132 	 */
1133 	BUG_ON(!(clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK));
1134 	clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
1135 	memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
1136 	c = __nfsd4_find_backchannel(clp);
1137 	if (c) {
1138 		svc_xprt_get(c->cn_xprt);
1139 		conn.cb_xprt = c->cn_xprt;
1140 		ses = c->cn_session;
1141 	}
1142 	spin_unlock(&clp->cl_lock);
1143 
1144 	err = setup_callback_client(clp, &conn, ses);
1145 	if (err) {
1146 		nfsd4_mark_cb_down(clp, err);
1147 		return;
1148 	}
1149 }
1150 
1151 static void
nfsd4_run_cb_work(struct work_struct * work)1152 nfsd4_run_cb_work(struct work_struct *work)
1153 {
1154 	struct nfsd4_callback *cb =
1155 		container_of(work, struct nfsd4_callback, cb_work);
1156 	struct nfs4_client *clp = cb->cb_clp;
1157 	struct rpc_clnt *clnt;
1158 
1159 	if (cb->cb_need_restart) {
1160 		cb->cb_need_restart = false;
1161 	} else {
1162 		if (cb->cb_ops && cb->cb_ops->prepare)
1163 			cb->cb_ops->prepare(cb);
1164 	}
1165 
1166 	if (clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK)
1167 		nfsd4_process_cb_update(cb);
1168 
1169 	clnt = clp->cl_cb_client;
1170 	if (!clnt) {
1171 		/* Callback channel broken, or client killed; give up: */
1172 		if (cb->cb_ops && cb->cb_ops->release)
1173 			cb->cb_ops->release(cb);
1174 		return;
1175 	}
1176 
1177 	/*
1178 	 * Don't send probe messages for 4.1 or later.
1179 	 */
1180 	if (!cb->cb_ops && clp->cl_minorversion) {
1181 		clp->cl_cb_state = NFSD4_CB_UP;
1182 		return;
1183 	}
1184 
1185 	cb->cb_msg.rpc_cred = clp->cl_cb_cred;
1186 	rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
1187 			cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb);
1188 }
1189 
nfsd4_init_cb(struct nfsd4_callback * cb,struct nfs4_client * clp,const struct nfsd4_callback_ops * ops,enum nfsd4_cb_op op)1190 void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
1191 		const struct nfsd4_callback_ops *ops, enum nfsd4_cb_op op)
1192 {
1193 	cb->cb_clp = clp;
1194 	cb->cb_msg.rpc_proc = &nfs4_cb_procedures[op];
1195 	cb->cb_msg.rpc_argp = cb;
1196 	cb->cb_msg.rpc_resp = cb;
1197 	cb->cb_ops = ops;
1198 	INIT_WORK(&cb->cb_work, nfsd4_run_cb_work);
1199 	cb->cb_seq_status = 1;
1200 	cb->cb_status = 0;
1201 	cb->cb_need_restart = false;
1202 }
1203 
nfsd4_run_cb(struct nfsd4_callback * cb)1204 void nfsd4_run_cb(struct nfsd4_callback *cb)
1205 {
1206 	queue_work(callback_wq, &cb->cb_work);
1207 }
1208