1 /*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/string.h>
42 #include <linux/ratelimit.h>
43 #include <linux/printk.h>
44 #include <linux/slab.h>
45 #include <linux/sunrpc/clnt.h>
46 #include <linux/nfs.h>
47 #include <linux/nfs4.h>
48 #include <linux/nfs_fs.h>
49 #include <linux/nfs_page.h>
50 #include <linux/nfs_mount.h>
51 #include <linux/namei.h>
52 #include <linux/mount.h>
53 #include <linux/module.h>
54 #include <linux/xattr.h>
55 #include <linux/utsname.h>
56 #include <linux/freezer.h>
57 #include <linux/iversion.h>
58
59 #include "nfs4_fs.h"
60 #include "delegation.h"
61 #include "internal.h"
62 #include "iostat.h"
63 #include "callback.h"
64 #include "pnfs.h"
65 #include "netns.h"
66 #include "sysfs.h"
67 #include "nfs4idmap.h"
68 #include "nfs4session.h"
69 #include "fscache.h"
70 #include "nfs42.h"
71
72 #include "nfs4trace.h"
73
74 #define NFSDBG_FACILITY NFSDBG_PROC
75
76 #define NFS4_BITMASK_SZ 3
77
78 #define NFS4_POLL_RETRY_MIN (HZ/10)
79 #define NFS4_POLL_RETRY_MAX (15*HZ)
80
81 /* file attributes which can be mapped to nfs attributes */
82 #define NFS4_VALID_ATTRS (ATTR_MODE \
83 | ATTR_UID \
84 | ATTR_GID \
85 | ATTR_SIZE \
86 | ATTR_ATIME \
87 | ATTR_MTIME \
88 | ATTR_CTIME \
89 | ATTR_ATIME_SET \
90 | ATTR_MTIME_SET)
91
92 struct nfs4_opendata;
93 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
94 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
95 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
96 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
97 struct nfs_fattr *fattr, struct inode *inode);
98 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
99 struct nfs_fattr *fattr, struct iattr *sattr,
100 struct nfs_open_context *ctx, struct nfs4_label *ilabel);
101 #ifdef CONFIG_NFS_V4_1
102 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
103 const struct cred *cred,
104 struct nfs4_slot *slot,
105 bool is_privileged);
106 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
107 const struct cred *);
108 static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
109 const struct cred *, bool);
110 #endif
111
112 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
113 static inline struct nfs4_label *
nfs4_label_init_security(struct inode * dir,struct dentry * dentry,struct iattr * sattr,struct nfs4_label * label)114 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
115 struct iattr *sattr, struct nfs4_label *label)
116 {
117 int err;
118
119 if (label == NULL)
120 return NULL;
121
122 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
123 return NULL;
124
125 err = security_dentry_init_security(dentry, sattr->ia_mode,
126 &dentry->d_name, NULL,
127 (void **)&label->label, &label->len);
128 if (err == 0)
129 return label;
130
131 return NULL;
132 }
133 static inline void
nfs4_label_release_security(struct nfs4_label * label)134 nfs4_label_release_security(struct nfs4_label *label)
135 {
136 if (label)
137 security_release_secctx(label->label, label->len);
138 }
nfs4_bitmask(struct nfs_server * server,struct nfs4_label * label)139 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
140 {
141 if (label)
142 return server->attr_bitmask;
143
144 return server->attr_bitmask_nl;
145 }
146 #else
147 static inline struct nfs4_label *
nfs4_label_init_security(struct inode * dir,struct dentry * dentry,struct iattr * sattr,struct nfs4_label * l)148 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
149 struct iattr *sattr, struct nfs4_label *l)
150 { return NULL; }
151 static inline void
nfs4_label_release_security(struct nfs4_label * label)152 nfs4_label_release_security(struct nfs4_label *label)
153 { return; }
154 static inline u32 *
nfs4_bitmask(struct nfs_server * server,struct nfs4_label * label)155 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
156 { return server->attr_bitmask; }
157 #endif
158
159 /* Prevent leaks of NFSv4 errors into userland */
nfs4_map_errors(int err)160 static int nfs4_map_errors(int err)
161 {
162 if (err >= -1000)
163 return err;
164 switch (err) {
165 case -NFS4ERR_RESOURCE:
166 case -NFS4ERR_LAYOUTTRYLATER:
167 case -NFS4ERR_RECALLCONFLICT:
168 return -EREMOTEIO;
169 case -NFS4ERR_WRONGSEC:
170 case -NFS4ERR_WRONG_CRED:
171 return -EPERM;
172 case -NFS4ERR_BADOWNER:
173 case -NFS4ERR_BADNAME:
174 return -EINVAL;
175 case -NFS4ERR_SHARE_DENIED:
176 return -EACCES;
177 case -NFS4ERR_MINOR_VERS_MISMATCH:
178 return -EPROTONOSUPPORT;
179 case -NFS4ERR_FILE_OPEN:
180 return -EBUSY;
181 case -NFS4ERR_NOT_SAME:
182 return -ENOTSYNC;
183 default:
184 dprintk("%s could not handle NFSv4 error %d\n",
185 __func__, -err);
186 break;
187 }
188 return -EIO;
189 }
190
191 /*
192 * This is our standard bitmap for GETATTR requests.
193 */
194 const u32 nfs4_fattr_bitmap[3] = {
195 FATTR4_WORD0_TYPE
196 | FATTR4_WORD0_CHANGE
197 | FATTR4_WORD0_SIZE
198 | FATTR4_WORD0_FSID
199 | FATTR4_WORD0_FILEID,
200 FATTR4_WORD1_MODE
201 | FATTR4_WORD1_NUMLINKS
202 | FATTR4_WORD1_OWNER
203 | FATTR4_WORD1_OWNER_GROUP
204 | FATTR4_WORD1_RAWDEV
205 | FATTR4_WORD1_SPACE_USED
206 | FATTR4_WORD1_TIME_ACCESS
207 | FATTR4_WORD1_TIME_METADATA
208 | FATTR4_WORD1_TIME_MODIFY
209 | FATTR4_WORD1_MOUNTED_ON_FILEID,
210 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
211 FATTR4_WORD2_SECURITY_LABEL
212 #endif
213 };
214
215 static const u32 nfs4_pnfs_open_bitmap[3] = {
216 FATTR4_WORD0_TYPE
217 | FATTR4_WORD0_CHANGE
218 | FATTR4_WORD0_SIZE
219 | FATTR4_WORD0_FSID
220 | FATTR4_WORD0_FILEID,
221 FATTR4_WORD1_MODE
222 | FATTR4_WORD1_NUMLINKS
223 | FATTR4_WORD1_OWNER
224 | FATTR4_WORD1_OWNER_GROUP
225 | FATTR4_WORD1_RAWDEV
226 | FATTR4_WORD1_SPACE_USED
227 | FATTR4_WORD1_TIME_ACCESS
228 | FATTR4_WORD1_TIME_METADATA
229 | FATTR4_WORD1_TIME_MODIFY,
230 FATTR4_WORD2_MDSTHRESHOLD
231 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
232 | FATTR4_WORD2_SECURITY_LABEL
233 #endif
234 };
235
236 static const u32 nfs4_open_noattr_bitmap[3] = {
237 FATTR4_WORD0_TYPE
238 | FATTR4_WORD0_FILEID,
239 };
240
241 const u32 nfs4_statfs_bitmap[3] = {
242 FATTR4_WORD0_FILES_AVAIL
243 | FATTR4_WORD0_FILES_FREE
244 | FATTR4_WORD0_FILES_TOTAL,
245 FATTR4_WORD1_SPACE_AVAIL
246 | FATTR4_WORD1_SPACE_FREE
247 | FATTR4_WORD1_SPACE_TOTAL
248 };
249
250 const u32 nfs4_pathconf_bitmap[3] = {
251 FATTR4_WORD0_MAXLINK
252 | FATTR4_WORD0_MAXNAME,
253 0
254 };
255
256 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
257 | FATTR4_WORD0_MAXREAD
258 | FATTR4_WORD0_MAXWRITE
259 | FATTR4_WORD0_LEASE_TIME,
260 FATTR4_WORD1_TIME_DELTA
261 | FATTR4_WORD1_FS_LAYOUT_TYPES,
262 FATTR4_WORD2_LAYOUT_BLKSIZE
263 | FATTR4_WORD2_CLONE_BLKSIZE
264 | FATTR4_WORD2_CHANGE_ATTR_TYPE
265 | FATTR4_WORD2_XATTR_SUPPORT
266 };
267
268 const u32 nfs4_fs_locations_bitmap[3] = {
269 FATTR4_WORD0_CHANGE
270 | FATTR4_WORD0_SIZE
271 | FATTR4_WORD0_FSID
272 | FATTR4_WORD0_FILEID
273 | FATTR4_WORD0_FS_LOCATIONS,
274 FATTR4_WORD1_OWNER
275 | FATTR4_WORD1_OWNER_GROUP
276 | FATTR4_WORD1_RAWDEV
277 | FATTR4_WORD1_SPACE_USED
278 | FATTR4_WORD1_TIME_ACCESS
279 | FATTR4_WORD1_TIME_METADATA
280 | FATTR4_WORD1_TIME_MODIFY
281 | FATTR4_WORD1_MOUNTED_ON_FILEID,
282 };
283
nfs4_bitmap_copy_adjust(__u32 * dst,const __u32 * src,struct inode * inode,unsigned long flags)284 static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src,
285 struct inode *inode, unsigned long flags)
286 {
287 unsigned long cache_validity;
288
289 memcpy(dst, src, NFS4_BITMASK_SZ*sizeof(*dst));
290 if (!inode || !nfs4_have_delegation(inode, FMODE_READ))
291 return;
292
293 cache_validity = READ_ONCE(NFS_I(inode)->cache_validity) | flags;
294
295 /* Remove the attributes over which we have full control */
296 dst[1] &= ~FATTR4_WORD1_RAWDEV;
297 if (!(cache_validity & NFS_INO_INVALID_SIZE))
298 dst[0] &= ~FATTR4_WORD0_SIZE;
299
300 if (!(cache_validity & NFS_INO_INVALID_CHANGE))
301 dst[0] &= ~FATTR4_WORD0_CHANGE;
302
303 if (!(cache_validity & NFS_INO_INVALID_MODE))
304 dst[1] &= ~FATTR4_WORD1_MODE;
305 if (!(cache_validity & NFS_INO_INVALID_OTHER))
306 dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP);
307 }
308
nfs4_setup_readdir(u64 cookie,__be32 * verifier,struct dentry * dentry,struct nfs4_readdir_arg * readdir)309 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
310 struct nfs4_readdir_arg *readdir)
311 {
312 unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE;
313 __be32 *start, *p;
314
315 if (cookie > 2) {
316 readdir->cookie = cookie;
317 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
318 return;
319 }
320
321 readdir->cookie = 0;
322 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
323 if (cookie == 2)
324 return;
325
326 /*
327 * NFSv4 servers do not return entries for '.' and '..'
328 * Therefore, we fake these entries here. We let '.'
329 * have cookie 0 and '..' have cookie 1. Note that
330 * when talking to the server, we always send cookie 0
331 * instead of 1 or 2.
332 */
333 start = p = kmap_atomic(*readdir->pages);
334
335 if (cookie == 0) {
336 *p++ = xdr_one; /* next */
337 *p++ = xdr_zero; /* cookie, first word */
338 *p++ = xdr_one; /* cookie, second word */
339 *p++ = xdr_one; /* entry len */
340 memcpy(p, ".\0\0\0", 4); /* entry */
341 p++;
342 *p++ = xdr_one; /* bitmap length */
343 *p++ = htonl(attrs); /* bitmap */
344 *p++ = htonl(12); /* attribute buffer length */
345 *p++ = htonl(NF4DIR);
346 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
347 }
348
349 *p++ = xdr_one; /* next */
350 *p++ = xdr_zero; /* cookie, first word */
351 *p++ = xdr_two; /* cookie, second word */
352 *p++ = xdr_two; /* entry len */
353 memcpy(p, "..\0\0", 4); /* entry */
354 p++;
355 *p++ = xdr_one; /* bitmap length */
356 *p++ = htonl(attrs); /* bitmap */
357 *p++ = htonl(12); /* attribute buffer length */
358 *p++ = htonl(NF4DIR);
359 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
360
361 readdir->pgbase = (char *)p - (char *)start;
362 readdir->count -= readdir->pgbase;
363 kunmap_atomic(start);
364 }
365
nfs4_fattr_set_prechange(struct nfs_fattr * fattr,u64 version)366 static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version)
367 {
368 if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) {
369 fattr->pre_change_attr = version;
370 fattr->valid |= NFS_ATTR_FATTR_PRECHANGE;
371 }
372 }
373
nfs4_test_and_free_stateid(struct nfs_server * server,nfs4_stateid * stateid,const struct cred * cred)374 static void nfs4_test_and_free_stateid(struct nfs_server *server,
375 nfs4_stateid *stateid,
376 const struct cred *cred)
377 {
378 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops;
379
380 ops->test_and_free_expired(server, stateid, cred);
381 }
382
__nfs4_free_revoked_stateid(struct nfs_server * server,nfs4_stateid * stateid,const struct cred * cred)383 static void __nfs4_free_revoked_stateid(struct nfs_server *server,
384 nfs4_stateid *stateid,
385 const struct cred *cred)
386 {
387 stateid->type = NFS4_REVOKED_STATEID_TYPE;
388 nfs4_test_and_free_stateid(server, stateid, cred);
389 }
390
nfs4_free_revoked_stateid(struct nfs_server * server,const nfs4_stateid * stateid,const struct cred * cred)391 static void nfs4_free_revoked_stateid(struct nfs_server *server,
392 const nfs4_stateid *stateid,
393 const struct cred *cred)
394 {
395 nfs4_stateid tmp;
396
397 nfs4_stateid_copy(&tmp, stateid);
398 __nfs4_free_revoked_stateid(server, &tmp, cred);
399 }
400
nfs4_update_delay(long * timeout)401 static long nfs4_update_delay(long *timeout)
402 {
403 long ret;
404 if (!timeout)
405 return NFS4_POLL_RETRY_MAX;
406 if (*timeout <= 0)
407 *timeout = NFS4_POLL_RETRY_MIN;
408 if (*timeout > NFS4_POLL_RETRY_MAX)
409 *timeout = NFS4_POLL_RETRY_MAX;
410 ret = *timeout;
411 *timeout <<= 1;
412 return ret;
413 }
414
nfs4_delay_killable(long * timeout)415 static int nfs4_delay_killable(long *timeout)
416 {
417 might_sleep();
418
419 __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
420 schedule_timeout(nfs4_update_delay(timeout));
421 if (!__fatal_signal_pending(current))
422 return 0;
423 return -EINTR;
424 }
425
nfs4_delay_interruptible(long * timeout)426 static int nfs4_delay_interruptible(long *timeout)
427 {
428 might_sleep();
429
430 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE);
431 schedule_timeout(nfs4_update_delay(timeout));
432 if (!signal_pending(current))
433 return 0;
434 return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS;
435 }
436
nfs4_delay(long * timeout,bool interruptible)437 static int nfs4_delay(long *timeout, bool interruptible)
438 {
439 if (interruptible)
440 return nfs4_delay_interruptible(timeout);
441 return nfs4_delay_killable(timeout);
442 }
443
444 static const nfs4_stateid *
nfs4_recoverable_stateid(const nfs4_stateid * stateid)445 nfs4_recoverable_stateid(const nfs4_stateid *stateid)
446 {
447 if (!stateid)
448 return NULL;
449 switch (stateid->type) {
450 case NFS4_OPEN_STATEID_TYPE:
451 case NFS4_LOCK_STATEID_TYPE:
452 case NFS4_DELEGATION_STATEID_TYPE:
453 return stateid;
454 default:
455 break;
456 }
457 return NULL;
458 }
459
460 /* This is the error handling routine for processes that are allowed
461 * to sleep.
462 */
nfs4_do_handle_exception(struct nfs_server * server,int errorcode,struct nfs4_exception * exception)463 static int nfs4_do_handle_exception(struct nfs_server *server,
464 int errorcode, struct nfs4_exception *exception)
465 {
466 struct nfs_client *clp = server->nfs_client;
467 struct nfs4_state *state = exception->state;
468 const nfs4_stateid *stateid;
469 struct inode *inode = exception->inode;
470 int ret = errorcode;
471
472 exception->delay = 0;
473 exception->recovering = 0;
474 exception->retry = 0;
475
476 stateid = nfs4_recoverable_stateid(exception->stateid);
477 if (stateid == NULL && state != NULL)
478 stateid = nfs4_recoverable_stateid(&state->stateid);
479
480 switch(errorcode) {
481 case 0:
482 return 0;
483 case -NFS4ERR_BADHANDLE:
484 case -ESTALE:
485 if (inode != NULL && S_ISREG(inode->i_mode))
486 pnfs_destroy_layout(NFS_I(inode));
487 break;
488 case -NFS4ERR_DELEG_REVOKED:
489 case -NFS4ERR_ADMIN_REVOKED:
490 case -NFS4ERR_EXPIRED:
491 case -NFS4ERR_BAD_STATEID:
492 case -NFS4ERR_PARTNER_NO_AUTH:
493 if (inode != NULL && stateid != NULL) {
494 nfs_inode_find_state_and_recover(inode,
495 stateid);
496 goto wait_on_recovery;
497 }
498 fallthrough;
499 case -NFS4ERR_OPENMODE:
500 if (inode) {
501 int err;
502
503 err = nfs_async_inode_return_delegation(inode,
504 stateid);
505 if (err == 0)
506 goto wait_on_recovery;
507 if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) {
508 exception->retry = 1;
509 break;
510 }
511 }
512 if (state == NULL)
513 break;
514 ret = nfs4_schedule_stateid_recovery(server, state);
515 if (ret < 0)
516 break;
517 goto wait_on_recovery;
518 case -NFS4ERR_STALE_STATEID:
519 case -NFS4ERR_STALE_CLIENTID:
520 nfs4_schedule_lease_recovery(clp);
521 goto wait_on_recovery;
522 case -NFS4ERR_MOVED:
523 ret = nfs4_schedule_migration_recovery(server);
524 if (ret < 0)
525 break;
526 goto wait_on_recovery;
527 case -NFS4ERR_LEASE_MOVED:
528 nfs4_schedule_lease_moved_recovery(clp);
529 goto wait_on_recovery;
530 #if defined(CONFIG_NFS_V4_1)
531 case -NFS4ERR_BADSESSION:
532 case -NFS4ERR_BADSLOT:
533 case -NFS4ERR_BAD_HIGH_SLOT:
534 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
535 case -NFS4ERR_DEADSESSION:
536 case -NFS4ERR_SEQ_FALSE_RETRY:
537 case -NFS4ERR_SEQ_MISORDERED:
538 /* Handled in nfs41_sequence_process() */
539 goto wait_on_recovery;
540 #endif /* defined(CONFIG_NFS_V4_1) */
541 case -NFS4ERR_FILE_OPEN:
542 if (exception->timeout > HZ) {
543 /* We have retried a decent amount, time to
544 * fail
545 */
546 ret = -EBUSY;
547 break;
548 }
549 fallthrough;
550 case -NFS4ERR_DELAY:
551 nfs_inc_server_stats(server, NFSIOS_DELAY);
552 fallthrough;
553 case -NFS4ERR_GRACE:
554 case -NFS4ERR_LAYOUTTRYLATER:
555 case -NFS4ERR_RECALLCONFLICT:
556 exception->delay = 1;
557 return 0;
558
559 case -NFS4ERR_RETRY_UNCACHED_REP:
560 case -NFS4ERR_OLD_STATEID:
561 exception->retry = 1;
562 break;
563 case -NFS4ERR_BADOWNER:
564 /* The following works around a Linux server bug! */
565 case -NFS4ERR_BADNAME:
566 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
567 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
568 exception->retry = 1;
569 printk(KERN_WARNING "NFS: v4 server %s "
570 "does not accept raw "
571 "uid/gids. "
572 "Reenabling the idmapper.\n",
573 server->nfs_client->cl_hostname);
574 }
575 }
576 /* We failed to handle the error */
577 return nfs4_map_errors(ret);
578 wait_on_recovery:
579 exception->recovering = 1;
580 return 0;
581 }
582
583 /* This is the error handling routine for processes that are allowed
584 * to sleep.
585 */
nfs4_handle_exception(struct nfs_server * server,int errorcode,struct nfs4_exception * exception)586 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
587 {
588 struct nfs_client *clp = server->nfs_client;
589 int ret;
590
591 ret = nfs4_do_handle_exception(server, errorcode, exception);
592 if (exception->delay) {
593 ret = nfs4_delay(&exception->timeout,
594 exception->interruptible);
595 goto out_retry;
596 }
597 if (exception->recovering) {
598 if (exception->task_is_privileged)
599 return -EDEADLOCK;
600 ret = nfs4_wait_clnt_recover(clp);
601 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
602 return -EIO;
603 goto out_retry;
604 }
605 return ret;
606 out_retry:
607 if (ret == 0)
608 exception->retry = 1;
609 return ret;
610 }
611
612 static int
nfs4_async_handle_exception(struct rpc_task * task,struct nfs_server * server,int errorcode,struct nfs4_exception * exception)613 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
614 int errorcode, struct nfs4_exception *exception)
615 {
616 struct nfs_client *clp = server->nfs_client;
617 int ret;
618
619 ret = nfs4_do_handle_exception(server, errorcode, exception);
620 if (exception->delay) {
621 rpc_delay(task, nfs4_update_delay(&exception->timeout));
622 goto out_retry;
623 }
624 if (exception->recovering) {
625 if (exception->task_is_privileged)
626 return -EDEADLOCK;
627 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
628 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
629 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
630 goto out_retry;
631 }
632 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
633 ret = -EIO;
634 return ret;
635 out_retry:
636 if (ret == 0) {
637 exception->retry = 1;
638 /*
639 * For NFS4ERR_MOVED, the client transport will need to
640 * be recomputed after migration recovery has completed.
641 */
642 if (errorcode == -NFS4ERR_MOVED)
643 rpc_task_release_transport(task);
644 }
645 return ret;
646 }
647
648 int
nfs4_async_handle_error(struct rpc_task * task,struct nfs_server * server,struct nfs4_state * state,long * timeout)649 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server,
650 struct nfs4_state *state, long *timeout)
651 {
652 struct nfs4_exception exception = {
653 .state = state,
654 };
655
656 if (task->tk_status >= 0)
657 return 0;
658 if (timeout)
659 exception.timeout = *timeout;
660 task->tk_status = nfs4_async_handle_exception(task, server,
661 task->tk_status,
662 &exception);
663 if (exception.delay && timeout)
664 *timeout = exception.timeout;
665 if (exception.retry)
666 return -EAGAIN;
667 return 0;
668 }
669
670 /*
671 * Return 'true' if 'clp' is using an rpc_client that is integrity protected
672 * or 'false' otherwise.
673 */
_nfs4_is_integrity_protected(struct nfs_client * clp)674 static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
675 {
676 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
677 return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P);
678 }
679
do_renew_lease(struct nfs_client * clp,unsigned long timestamp)680 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
681 {
682 spin_lock(&clp->cl_lock);
683 if (time_before(clp->cl_last_renewal,timestamp))
684 clp->cl_last_renewal = timestamp;
685 spin_unlock(&clp->cl_lock);
686 }
687
renew_lease(const struct nfs_server * server,unsigned long timestamp)688 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
689 {
690 struct nfs_client *clp = server->nfs_client;
691
692 if (!nfs4_has_session(clp))
693 do_renew_lease(clp, timestamp);
694 }
695
696 struct nfs4_call_sync_data {
697 const struct nfs_server *seq_server;
698 struct nfs4_sequence_args *seq_args;
699 struct nfs4_sequence_res *seq_res;
700 };
701
nfs4_init_sequence(struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,int cache_reply,int privileged)702 void nfs4_init_sequence(struct nfs4_sequence_args *args,
703 struct nfs4_sequence_res *res, int cache_reply,
704 int privileged)
705 {
706 args->sa_slot = NULL;
707 args->sa_cache_this = cache_reply;
708 args->sa_privileged = privileged;
709
710 res->sr_slot = NULL;
711 }
712
nfs40_sequence_free_slot(struct nfs4_sequence_res * res)713 static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res)
714 {
715 struct nfs4_slot *slot = res->sr_slot;
716 struct nfs4_slot_table *tbl;
717
718 tbl = slot->table;
719 spin_lock(&tbl->slot_tbl_lock);
720 if (!nfs41_wake_and_assign_slot(tbl, slot))
721 nfs4_free_slot(tbl, slot);
722 spin_unlock(&tbl->slot_tbl_lock);
723
724 res->sr_slot = NULL;
725 }
726
nfs40_sequence_done(struct rpc_task * task,struct nfs4_sequence_res * res)727 static int nfs40_sequence_done(struct rpc_task *task,
728 struct nfs4_sequence_res *res)
729 {
730 if (res->sr_slot != NULL)
731 nfs40_sequence_free_slot(res);
732 return 1;
733 }
734
735 #if defined(CONFIG_NFS_V4_1)
736
nfs41_release_slot(struct nfs4_slot * slot)737 static void nfs41_release_slot(struct nfs4_slot *slot)
738 {
739 struct nfs4_session *session;
740 struct nfs4_slot_table *tbl;
741 bool send_new_highest_used_slotid = false;
742
743 if (!slot)
744 return;
745 tbl = slot->table;
746 session = tbl->session;
747
748 /* Bump the slot sequence number */
749 if (slot->seq_done)
750 slot->seq_nr++;
751 slot->seq_done = 0;
752
753 spin_lock(&tbl->slot_tbl_lock);
754 /* Be nice to the server: try to ensure that the last transmitted
755 * value for highest_user_slotid <= target_highest_slotid
756 */
757 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
758 send_new_highest_used_slotid = true;
759
760 if (nfs41_wake_and_assign_slot(tbl, slot)) {
761 send_new_highest_used_slotid = false;
762 goto out_unlock;
763 }
764 nfs4_free_slot(tbl, slot);
765
766 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
767 send_new_highest_used_slotid = false;
768 out_unlock:
769 spin_unlock(&tbl->slot_tbl_lock);
770 if (send_new_highest_used_slotid)
771 nfs41_notify_server(session->clp);
772 if (waitqueue_active(&tbl->slot_waitq))
773 wake_up_all(&tbl->slot_waitq);
774 }
775
nfs41_sequence_free_slot(struct nfs4_sequence_res * res)776 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
777 {
778 nfs41_release_slot(res->sr_slot);
779 res->sr_slot = NULL;
780 }
781
nfs4_slot_sequence_record_sent(struct nfs4_slot * slot,u32 seqnr)782 static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot,
783 u32 seqnr)
784 {
785 if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0)
786 slot->seq_nr_highest_sent = seqnr;
787 }
nfs4_slot_sequence_acked(struct nfs4_slot * slot,u32 seqnr)788 static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, u32 seqnr)
789 {
790 nfs4_slot_sequence_record_sent(slot, seqnr);
791 slot->seq_nr_last_acked = seqnr;
792 }
793
nfs4_probe_sequence(struct nfs_client * client,const struct cred * cred,struct nfs4_slot * slot)794 static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred,
795 struct nfs4_slot *slot)
796 {
797 struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true);
798 if (!IS_ERR(task))
799 rpc_put_task_async(task);
800 }
801
nfs41_sequence_process(struct rpc_task * task,struct nfs4_sequence_res * res)802 static int nfs41_sequence_process(struct rpc_task *task,
803 struct nfs4_sequence_res *res)
804 {
805 struct nfs4_session *session;
806 struct nfs4_slot *slot = res->sr_slot;
807 struct nfs_client *clp;
808 int status;
809 int ret = 1;
810
811 if (slot == NULL)
812 goto out_noaction;
813 /* don't increment the sequence number if the task wasn't sent */
814 if (!RPC_WAS_SENT(task) || slot->seq_done)
815 goto out;
816
817 session = slot->table->session;
818 clp = session->clp;
819
820 trace_nfs4_sequence_done(session, res);
821
822 status = res->sr_status;
823 if (task->tk_status == -NFS4ERR_DEADSESSION)
824 status = -NFS4ERR_DEADSESSION;
825
826 /* Check the SEQUENCE operation status */
827 switch (status) {
828 case 0:
829 /* Mark this sequence number as having been acked */
830 nfs4_slot_sequence_acked(slot, slot->seq_nr);
831 /* Update the slot's sequence and clientid lease timer */
832 slot->seq_done = 1;
833 do_renew_lease(clp, res->sr_timestamp);
834 /* Check sequence flags */
835 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags,
836 !!slot->privileged);
837 nfs41_update_target_slotid(slot->table, slot, res);
838 break;
839 case 1:
840 /*
841 * sr_status remains 1 if an RPC level error occurred.
842 * The server may or may not have processed the sequence
843 * operation..
844 */
845 nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
846 slot->seq_done = 1;
847 goto out;
848 case -NFS4ERR_DELAY:
849 /* The server detected a resend of the RPC call and
850 * returned NFS4ERR_DELAY as per Section 2.10.6.2
851 * of RFC5661.
852 */
853 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
854 __func__,
855 slot->slot_nr,
856 slot->seq_nr);
857 goto out_retry;
858 case -NFS4ERR_RETRY_UNCACHED_REP:
859 case -NFS4ERR_SEQ_FALSE_RETRY:
860 /*
861 * The server thinks we tried to replay a request.
862 * Retry the call after bumping the sequence ID.
863 */
864 nfs4_slot_sequence_acked(slot, slot->seq_nr);
865 goto retry_new_seq;
866 case -NFS4ERR_BADSLOT:
867 /*
868 * The slot id we used was probably retired. Try again
869 * using a different slot id.
870 */
871 if (slot->slot_nr < slot->table->target_highest_slotid)
872 goto session_recover;
873 goto retry_nowait;
874 case -NFS4ERR_SEQ_MISORDERED:
875 nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
876 /*
877 * Were one or more calls using this slot interrupted?
878 * If the server never received the request, then our
879 * transmitted slot sequence number may be too high. However,
880 * if the server did receive the request then it might
881 * accidentally give us a reply with a mismatched operation.
882 * We can sort this out by sending a lone sequence operation
883 * to the server on the same slot.
884 */
885 if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) {
886 slot->seq_nr--;
887 if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) {
888 nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot);
889 res->sr_slot = NULL;
890 }
891 goto retry_nowait;
892 }
893 /*
894 * RFC5661:
895 * A retry might be sent while the original request is
896 * still in progress on the replier. The replier SHOULD
897 * deal with the issue by returning NFS4ERR_DELAY as the
898 * reply to SEQUENCE or CB_SEQUENCE operation, but
899 * implementations MAY return NFS4ERR_SEQ_MISORDERED.
900 *
901 * Restart the search after a delay.
902 */
903 slot->seq_nr = slot->seq_nr_highest_sent;
904 goto out_retry;
905 case -NFS4ERR_BADSESSION:
906 case -NFS4ERR_DEADSESSION:
907 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
908 goto session_recover;
909 default:
910 /* Just update the slot sequence no. */
911 slot->seq_done = 1;
912 }
913 out:
914 /* The session may be reset by one of the error handlers. */
915 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
916 out_noaction:
917 return ret;
918 session_recover:
919 nfs4_schedule_session_recovery(session, status);
920 dprintk("%s ERROR: %d Reset session\n", __func__, status);
921 nfs41_sequence_free_slot(res);
922 goto out;
923 retry_new_seq:
924 ++slot->seq_nr;
925 retry_nowait:
926 if (rpc_restart_call_prepare(task)) {
927 nfs41_sequence_free_slot(res);
928 task->tk_status = 0;
929 ret = 0;
930 }
931 goto out;
932 out_retry:
933 if (!rpc_restart_call(task))
934 goto out;
935 rpc_delay(task, NFS4_POLL_RETRY_MAX);
936 return 0;
937 }
938
nfs41_sequence_done(struct rpc_task * task,struct nfs4_sequence_res * res)939 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
940 {
941 if (!nfs41_sequence_process(task, res))
942 return 0;
943 if (res->sr_slot != NULL)
944 nfs41_sequence_free_slot(res);
945 return 1;
946
947 }
948 EXPORT_SYMBOL_GPL(nfs41_sequence_done);
949
nfs4_sequence_process(struct rpc_task * task,struct nfs4_sequence_res * res)950 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
951 {
952 if (res->sr_slot == NULL)
953 return 1;
954 if (res->sr_slot->table->session != NULL)
955 return nfs41_sequence_process(task, res);
956 return nfs40_sequence_done(task, res);
957 }
958
nfs4_sequence_free_slot(struct nfs4_sequence_res * res)959 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
960 {
961 if (res->sr_slot != NULL) {
962 if (res->sr_slot->table->session != NULL)
963 nfs41_sequence_free_slot(res);
964 else
965 nfs40_sequence_free_slot(res);
966 }
967 }
968
nfs4_sequence_done(struct rpc_task * task,struct nfs4_sequence_res * res)969 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
970 {
971 if (res->sr_slot == NULL)
972 return 1;
973 if (!res->sr_slot->table->session)
974 return nfs40_sequence_done(task, res);
975 return nfs41_sequence_done(task, res);
976 }
977 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
978
nfs41_call_sync_prepare(struct rpc_task * task,void * calldata)979 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
980 {
981 struct nfs4_call_sync_data *data = calldata;
982
983 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
984
985 nfs4_setup_sequence(data->seq_server->nfs_client,
986 data->seq_args, data->seq_res, task);
987 }
988
nfs41_call_sync_done(struct rpc_task * task,void * calldata)989 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
990 {
991 struct nfs4_call_sync_data *data = calldata;
992
993 nfs41_sequence_done(task, data->seq_res);
994 }
995
996 static const struct rpc_call_ops nfs41_call_sync_ops = {
997 .rpc_call_prepare = nfs41_call_sync_prepare,
998 .rpc_call_done = nfs41_call_sync_done,
999 };
1000
1001 #else /* !CONFIG_NFS_V4_1 */
1002
nfs4_sequence_process(struct rpc_task * task,struct nfs4_sequence_res * res)1003 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
1004 {
1005 return nfs40_sequence_done(task, res);
1006 }
1007
nfs4_sequence_free_slot(struct nfs4_sequence_res * res)1008 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
1009 {
1010 if (res->sr_slot != NULL)
1011 nfs40_sequence_free_slot(res);
1012 }
1013
nfs4_sequence_done(struct rpc_task * task,struct nfs4_sequence_res * res)1014 int nfs4_sequence_done(struct rpc_task *task,
1015 struct nfs4_sequence_res *res)
1016 {
1017 return nfs40_sequence_done(task, res);
1018 }
1019 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
1020
1021 #endif /* !CONFIG_NFS_V4_1 */
1022
nfs41_sequence_res_init(struct nfs4_sequence_res * res)1023 static void nfs41_sequence_res_init(struct nfs4_sequence_res *res)
1024 {
1025 res->sr_timestamp = jiffies;
1026 res->sr_status_flags = 0;
1027 res->sr_status = 1;
1028 }
1029
1030 static
nfs4_sequence_attach_slot(struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,struct nfs4_slot * slot)1031 void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
1032 struct nfs4_sequence_res *res,
1033 struct nfs4_slot *slot)
1034 {
1035 if (!slot)
1036 return;
1037 slot->privileged = args->sa_privileged ? 1 : 0;
1038 args->sa_slot = slot;
1039
1040 res->sr_slot = slot;
1041 }
1042
nfs4_setup_sequence(struct nfs_client * client,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,struct rpc_task * task)1043 int nfs4_setup_sequence(struct nfs_client *client,
1044 struct nfs4_sequence_args *args,
1045 struct nfs4_sequence_res *res,
1046 struct rpc_task *task)
1047 {
1048 struct nfs4_session *session = nfs4_get_session(client);
1049 struct nfs4_slot_table *tbl = client->cl_slot_tbl;
1050 struct nfs4_slot *slot;
1051
1052 /* slot already allocated? */
1053 if (res->sr_slot != NULL)
1054 goto out_start;
1055
1056 if (session)
1057 tbl = &session->fc_slot_table;
1058
1059 spin_lock(&tbl->slot_tbl_lock);
1060 /* The state manager will wait until the slot table is empty */
1061 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
1062 goto out_sleep;
1063
1064 slot = nfs4_alloc_slot(tbl);
1065 if (IS_ERR(slot)) {
1066 if (slot == ERR_PTR(-ENOMEM))
1067 goto out_sleep_timeout;
1068 goto out_sleep;
1069 }
1070 spin_unlock(&tbl->slot_tbl_lock);
1071
1072 nfs4_sequence_attach_slot(args, res, slot);
1073
1074 trace_nfs4_setup_sequence(session, args);
1075 out_start:
1076 nfs41_sequence_res_init(res);
1077 rpc_call_start(task);
1078 return 0;
1079 out_sleep_timeout:
1080 /* Try again in 1/4 second */
1081 if (args->sa_privileged)
1082 rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task,
1083 jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED);
1084 else
1085 rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task,
1086 NULL, jiffies + (HZ >> 2));
1087 spin_unlock(&tbl->slot_tbl_lock);
1088 return -EAGAIN;
1089 out_sleep:
1090 if (args->sa_privileged)
1091 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
1092 RPC_PRIORITY_PRIVILEGED);
1093 else
1094 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
1095 spin_unlock(&tbl->slot_tbl_lock);
1096 return -EAGAIN;
1097 }
1098 EXPORT_SYMBOL_GPL(nfs4_setup_sequence);
1099
nfs40_call_sync_prepare(struct rpc_task * task,void * calldata)1100 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
1101 {
1102 struct nfs4_call_sync_data *data = calldata;
1103 nfs4_setup_sequence(data->seq_server->nfs_client,
1104 data->seq_args, data->seq_res, task);
1105 }
1106
nfs40_call_sync_done(struct rpc_task * task,void * calldata)1107 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
1108 {
1109 struct nfs4_call_sync_data *data = calldata;
1110 nfs4_sequence_done(task, data->seq_res);
1111 }
1112
1113 static const struct rpc_call_ops nfs40_call_sync_ops = {
1114 .rpc_call_prepare = nfs40_call_sync_prepare,
1115 .rpc_call_done = nfs40_call_sync_done,
1116 };
1117
nfs4_call_sync_custom(struct rpc_task_setup * task_setup)1118 static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup)
1119 {
1120 int ret;
1121 struct rpc_task *task;
1122
1123 task = rpc_run_task(task_setup);
1124 if (IS_ERR(task))
1125 return PTR_ERR(task);
1126
1127 ret = task->tk_status;
1128 rpc_put_task(task);
1129 return ret;
1130 }
1131
nfs4_do_call_sync(struct rpc_clnt * clnt,struct nfs_server * server,struct rpc_message * msg,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,unsigned short task_flags)1132 static int nfs4_do_call_sync(struct rpc_clnt *clnt,
1133 struct nfs_server *server,
1134 struct rpc_message *msg,
1135 struct nfs4_sequence_args *args,
1136 struct nfs4_sequence_res *res,
1137 unsigned short task_flags)
1138 {
1139 struct nfs_client *clp = server->nfs_client;
1140 struct nfs4_call_sync_data data = {
1141 .seq_server = server,
1142 .seq_args = args,
1143 .seq_res = res,
1144 };
1145 struct rpc_task_setup task_setup = {
1146 .rpc_client = clnt,
1147 .rpc_message = msg,
1148 .callback_ops = clp->cl_mvops->call_sync_ops,
1149 .callback_data = &data,
1150 .flags = task_flags,
1151 };
1152
1153 return nfs4_call_sync_custom(&task_setup);
1154 }
1155
nfs4_call_sync_sequence(struct rpc_clnt * clnt,struct nfs_server * server,struct rpc_message * msg,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res)1156 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
1157 struct nfs_server *server,
1158 struct rpc_message *msg,
1159 struct nfs4_sequence_args *args,
1160 struct nfs4_sequence_res *res)
1161 {
1162 unsigned short task_flags = 0;
1163
1164 if (server->caps & NFS_CAP_MOVEABLE)
1165 task_flags = RPC_TASK_MOVEABLE;
1166 return nfs4_do_call_sync(clnt, server, msg, args, res, task_flags);
1167 }
1168
1169
nfs4_call_sync(struct rpc_clnt * clnt,struct nfs_server * server,struct rpc_message * msg,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,int cache_reply)1170 int nfs4_call_sync(struct rpc_clnt *clnt,
1171 struct nfs_server *server,
1172 struct rpc_message *msg,
1173 struct nfs4_sequence_args *args,
1174 struct nfs4_sequence_res *res,
1175 int cache_reply)
1176 {
1177 nfs4_init_sequence(args, res, cache_reply, 0);
1178 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
1179 }
1180
1181 static void
nfs4_inc_nlink_locked(struct inode * inode)1182 nfs4_inc_nlink_locked(struct inode *inode)
1183 {
1184 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
1185 NFS_INO_INVALID_CTIME |
1186 NFS_INO_INVALID_NLINK);
1187 inc_nlink(inode);
1188 }
1189
1190 static void
nfs4_inc_nlink(struct inode * inode)1191 nfs4_inc_nlink(struct inode *inode)
1192 {
1193 spin_lock(&inode->i_lock);
1194 nfs4_inc_nlink_locked(inode);
1195 spin_unlock(&inode->i_lock);
1196 }
1197
1198 static void
nfs4_dec_nlink_locked(struct inode * inode)1199 nfs4_dec_nlink_locked(struct inode *inode)
1200 {
1201 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
1202 NFS_INO_INVALID_CTIME |
1203 NFS_INO_INVALID_NLINK);
1204 drop_nlink(inode);
1205 }
1206
1207 static void
nfs4_update_changeattr_locked(struct inode * inode,struct nfs4_change_info * cinfo,unsigned long timestamp,unsigned long cache_validity)1208 nfs4_update_changeattr_locked(struct inode *inode,
1209 struct nfs4_change_info *cinfo,
1210 unsigned long timestamp, unsigned long cache_validity)
1211 {
1212 struct nfs_inode *nfsi = NFS_I(inode);
1213 u64 change_attr = inode_peek_iversion_raw(inode);
1214
1215 cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME;
1216 if (S_ISDIR(inode->i_mode))
1217 cache_validity |= NFS_INO_INVALID_DATA;
1218
1219 switch (NFS_SERVER(inode)->change_attr_type) {
1220 case NFS4_CHANGE_TYPE_IS_UNDEFINED:
1221 if (cinfo->after == change_attr)
1222 goto out;
1223 break;
1224 default:
1225 if ((s64)(change_attr - cinfo->after) >= 0)
1226 goto out;
1227 }
1228
1229 inode_set_iversion_raw(inode, cinfo->after);
1230 if (!cinfo->atomic || cinfo->before != change_attr) {
1231 if (S_ISDIR(inode->i_mode))
1232 nfs_force_lookup_revalidate(inode);
1233
1234 if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
1235 cache_validity |=
1236 NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
1237 NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER |
1238 NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK |
1239 NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR;
1240 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
1241 }
1242 nfsi->attrtimeo_timestamp = jiffies;
1243 nfsi->read_cache_jiffies = timestamp;
1244 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
1245 nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE;
1246 out:
1247 nfs_set_cache_invalid(inode, cache_validity);
1248 }
1249
1250 void
nfs4_update_changeattr(struct inode * dir,struct nfs4_change_info * cinfo,unsigned long timestamp,unsigned long cache_validity)1251 nfs4_update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
1252 unsigned long timestamp, unsigned long cache_validity)
1253 {
1254 spin_lock(&dir->i_lock);
1255 nfs4_update_changeattr_locked(dir, cinfo, timestamp, cache_validity);
1256 spin_unlock(&dir->i_lock);
1257 }
1258
1259 struct nfs4_open_createattrs {
1260 struct nfs4_label *label;
1261 struct iattr *sattr;
1262 const __u32 verf[2];
1263 };
1264
nfs4_clear_cap_atomic_open_v1(struct nfs_server * server,int err,struct nfs4_exception * exception)1265 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
1266 int err, struct nfs4_exception *exception)
1267 {
1268 if (err != -EINVAL)
1269 return false;
1270 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1271 return false;
1272 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
1273 exception->retry = 1;
1274 return true;
1275 }
1276
_nfs4_ctx_to_accessmode(const struct nfs_open_context * ctx)1277 static fmode_t _nfs4_ctx_to_accessmode(const struct nfs_open_context *ctx)
1278 {
1279 return ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
1280 }
1281
_nfs4_ctx_to_openmode(const struct nfs_open_context * ctx)1282 static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx)
1283 {
1284 fmode_t ret = ctx->mode & (FMODE_READ|FMODE_WRITE);
1285
1286 return (ctx->mode & FMODE_EXEC) ? FMODE_READ | ret : ret;
1287 }
1288
1289 static u32
nfs4_map_atomic_open_share(struct nfs_server * server,fmode_t fmode,int openflags)1290 nfs4_map_atomic_open_share(struct nfs_server *server,
1291 fmode_t fmode, int openflags)
1292 {
1293 u32 res = 0;
1294
1295 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
1296 case FMODE_READ:
1297 res = NFS4_SHARE_ACCESS_READ;
1298 break;
1299 case FMODE_WRITE:
1300 res = NFS4_SHARE_ACCESS_WRITE;
1301 break;
1302 case FMODE_READ|FMODE_WRITE:
1303 res = NFS4_SHARE_ACCESS_BOTH;
1304 }
1305 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1306 goto out;
1307 /* Want no delegation if we're using O_DIRECT */
1308 if (openflags & O_DIRECT)
1309 res |= NFS4_SHARE_WANT_NO_DELEG;
1310 out:
1311 return res;
1312 }
1313
1314 static enum open_claim_type4
nfs4_map_atomic_open_claim(struct nfs_server * server,enum open_claim_type4 claim)1315 nfs4_map_atomic_open_claim(struct nfs_server *server,
1316 enum open_claim_type4 claim)
1317 {
1318 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
1319 return claim;
1320 switch (claim) {
1321 default:
1322 return claim;
1323 case NFS4_OPEN_CLAIM_FH:
1324 return NFS4_OPEN_CLAIM_NULL;
1325 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1326 return NFS4_OPEN_CLAIM_DELEGATE_CUR;
1327 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1328 return NFS4_OPEN_CLAIM_DELEGATE_PREV;
1329 }
1330 }
1331
nfs4_init_opendata_res(struct nfs4_opendata * p)1332 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
1333 {
1334 p->o_res.f_attr = &p->f_attr;
1335 p->o_res.seqid = p->o_arg.seqid;
1336 p->c_res.seqid = p->c_arg.seqid;
1337 p->o_res.server = p->o_arg.server;
1338 p->o_res.access_request = p->o_arg.access;
1339 nfs_fattr_init(&p->f_attr);
1340 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
1341 }
1342
nfs4_opendata_alloc(struct dentry * dentry,struct nfs4_state_owner * sp,fmode_t fmode,int flags,const struct nfs4_open_createattrs * c,enum open_claim_type4 claim,gfp_t gfp_mask)1343 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1344 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
1345 const struct nfs4_open_createattrs *c,
1346 enum open_claim_type4 claim,
1347 gfp_t gfp_mask)
1348 {
1349 struct dentry *parent = dget_parent(dentry);
1350 struct inode *dir = d_inode(parent);
1351 struct nfs_server *server = NFS_SERVER(dir);
1352 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1353 struct nfs4_label *label = (c != NULL) ? c->label : NULL;
1354 struct nfs4_opendata *p;
1355
1356 p = kzalloc(sizeof(*p), gfp_mask);
1357 if (p == NULL)
1358 goto err;
1359
1360 p->f_attr.label = nfs4_label_alloc(server, gfp_mask);
1361 if (IS_ERR(p->f_attr.label))
1362 goto err_free_p;
1363
1364 p->a_label = nfs4_label_alloc(server, gfp_mask);
1365 if (IS_ERR(p->a_label))
1366 goto err_free_f;
1367
1368 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1369 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1370 if (IS_ERR(p->o_arg.seqid))
1371 goto err_free_label;
1372 nfs_sb_active(dentry->d_sb);
1373 p->dentry = dget(dentry);
1374 p->dir = parent;
1375 p->owner = sp;
1376 atomic_inc(&sp->so_count);
1377 p->o_arg.open_flags = flags;
1378 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
1379 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1380 p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1381 fmode, flags);
1382 if (flags & O_CREAT) {
1383 p->o_arg.umask = current_umask();
1384 p->o_arg.label = nfs4_label_copy(p->a_label, label);
1385 if (c->sattr != NULL && c->sattr->ia_valid != 0) {
1386 p->o_arg.u.attrs = &p->attrs;
1387 memcpy(&p->attrs, c->sattr, sizeof(p->attrs));
1388
1389 memcpy(p->o_arg.u.verifier.data, c->verf,
1390 sizeof(p->o_arg.u.verifier.data));
1391 }
1392 }
1393 /* ask server to check for all possible rights as results
1394 * are cached */
1395 switch (p->o_arg.claim) {
1396 default:
1397 break;
1398 case NFS4_OPEN_CLAIM_NULL:
1399 case NFS4_OPEN_CLAIM_FH:
1400 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
1401 NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE |
1402 NFS4_ACCESS_EXECUTE |
1403 nfs_access_xattr_mask(server);
1404 }
1405 p->o_arg.clientid = server->nfs_client->cl_clientid;
1406 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1407 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1408 p->o_arg.name = &dentry->d_name;
1409 p->o_arg.server = server;
1410 p->o_arg.bitmask = nfs4_bitmask(server, label);
1411 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1412 switch (p->o_arg.claim) {
1413 case NFS4_OPEN_CLAIM_NULL:
1414 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1415 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1416 p->o_arg.fh = NFS_FH(dir);
1417 break;
1418 case NFS4_OPEN_CLAIM_PREVIOUS:
1419 case NFS4_OPEN_CLAIM_FH:
1420 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1421 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1422 p->o_arg.fh = NFS_FH(d_inode(dentry));
1423 }
1424 p->c_arg.fh = &p->o_res.fh;
1425 p->c_arg.stateid = &p->o_res.stateid;
1426 p->c_arg.seqid = p->o_arg.seqid;
1427 nfs4_init_opendata_res(p);
1428 kref_init(&p->kref);
1429 return p;
1430
1431 err_free_label:
1432 nfs4_label_free(p->a_label);
1433 err_free_f:
1434 nfs4_label_free(p->f_attr.label);
1435 err_free_p:
1436 kfree(p);
1437 err:
1438 dput(parent);
1439 return NULL;
1440 }
1441
nfs4_opendata_free(struct kref * kref)1442 static void nfs4_opendata_free(struct kref *kref)
1443 {
1444 struct nfs4_opendata *p = container_of(kref,
1445 struct nfs4_opendata, kref);
1446 struct super_block *sb = p->dentry->d_sb;
1447
1448 nfs4_lgopen_release(p->lgp);
1449 nfs_free_seqid(p->o_arg.seqid);
1450 nfs4_sequence_free_slot(&p->o_res.seq_res);
1451 if (p->state != NULL)
1452 nfs4_put_open_state(p->state);
1453 nfs4_put_state_owner(p->owner);
1454
1455 nfs4_label_free(p->a_label);
1456 nfs4_label_free(p->f_attr.label);
1457
1458 dput(p->dir);
1459 dput(p->dentry);
1460 nfs_sb_deactive(sb);
1461 nfs_fattr_free_names(&p->f_attr);
1462 kfree(p->f_attr.mdsthreshold);
1463 kfree(p);
1464 }
1465
nfs4_opendata_put(struct nfs4_opendata * p)1466 static void nfs4_opendata_put(struct nfs4_opendata *p)
1467 {
1468 if (p != NULL)
1469 kref_put(&p->kref, nfs4_opendata_free);
1470 }
1471
nfs4_mode_match_open_stateid(struct nfs4_state * state,fmode_t fmode)1472 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
1473 fmode_t fmode)
1474 {
1475 switch(fmode & (FMODE_READ|FMODE_WRITE)) {
1476 case FMODE_READ|FMODE_WRITE:
1477 return state->n_rdwr != 0;
1478 case FMODE_WRITE:
1479 return state->n_wronly != 0;
1480 case FMODE_READ:
1481 return state->n_rdonly != 0;
1482 }
1483 WARN_ON_ONCE(1);
1484 return false;
1485 }
1486
can_open_cached(struct nfs4_state * state,fmode_t mode,int open_mode,enum open_claim_type4 claim)1487 static int can_open_cached(struct nfs4_state *state, fmode_t mode,
1488 int open_mode, enum open_claim_type4 claim)
1489 {
1490 int ret = 0;
1491
1492 if (open_mode & (O_EXCL|O_TRUNC))
1493 goto out;
1494 switch (claim) {
1495 case NFS4_OPEN_CLAIM_NULL:
1496 case NFS4_OPEN_CLAIM_FH:
1497 goto out;
1498 default:
1499 break;
1500 }
1501 switch (mode & (FMODE_READ|FMODE_WRITE)) {
1502 case FMODE_READ:
1503 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1504 && state->n_rdonly != 0;
1505 break;
1506 case FMODE_WRITE:
1507 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1508 && state->n_wronly != 0;
1509 break;
1510 case FMODE_READ|FMODE_WRITE:
1511 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1512 && state->n_rdwr != 0;
1513 }
1514 out:
1515 return ret;
1516 }
1517
can_open_delegated(struct nfs_delegation * delegation,fmode_t fmode,enum open_claim_type4 claim)1518 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
1519 enum open_claim_type4 claim)
1520 {
1521 if (delegation == NULL)
1522 return 0;
1523 if ((delegation->type & fmode) != fmode)
1524 return 0;
1525 switch (claim) {
1526 case NFS4_OPEN_CLAIM_NULL:
1527 case NFS4_OPEN_CLAIM_FH:
1528 break;
1529 case NFS4_OPEN_CLAIM_PREVIOUS:
1530 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
1531 break;
1532 fallthrough;
1533 default:
1534 return 0;
1535 }
1536 nfs_mark_delegation_referenced(delegation);
1537 return 1;
1538 }
1539
update_open_stateflags(struct nfs4_state * state,fmode_t fmode)1540 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1541 {
1542 switch (fmode) {
1543 case FMODE_WRITE:
1544 state->n_wronly++;
1545 break;
1546 case FMODE_READ:
1547 state->n_rdonly++;
1548 break;
1549 case FMODE_READ|FMODE_WRITE:
1550 state->n_rdwr++;
1551 }
1552 nfs4_state_set_mode_locked(state, state->state | fmode);
1553 }
1554
1555 #ifdef CONFIG_NFS_V4_1
nfs_open_stateid_recover_openmode(struct nfs4_state * state)1556 static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state)
1557 {
1558 if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags))
1559 return true;
1560 if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags))
1561 return true;
1562 if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags))
1563 return true;
1564 return false;
1565 }
1566 #endif /* CONFIG_NFS_V4_1 */
1567
nfs_state_log_update_open_stateid(struct nfs4_state * state)1568 static void nfs_state_log_update_open_stateid(struct nfs4_state *state)
1569 {
1570 if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags))
1571 wake_up_all(&state->waitq);
1572 }
1573
nfs_test_and_clear_all_open_stateid(struct nfs4_state * state)1574 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1575 {
1576 struct nfs_client *clp = state->owner->so_server->nfs_client;
1577 bool need_recover = false;
1578
1579 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1580 need_recover = true;
1581 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1582 need_recover = true;
1583 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1584 need_recover = true;
1585 if (need_recover)
1586 nfs4_state_mark_reclaim_nograce(clp, state);
1587 }
1588
1589 /*
1590 * Check for whether or not the caller may update the open stateid
1591 * to the value passed in by stateid.
1592 *
1593 * Note: This function relies heavily on the server implementing
1594 * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2
1595 * correctly.
1596 * i.e. The stateid seqids have to be initialised to 1, and
1597 * are then incremented on every state transition.
1598 */
nfs_stateid_is_sequential(struct nfs4_state * state,const nfs4_stateid * stateid)1599 static bool nfs_stateid_is_sequential(struct nfs4_state *state,
1600 const nfs4_stateid *stateid)
1601 {
1602 if (test_bit(NFS_OPEN_STATE, &state->flags)) {
1603 /* The common case - we're updating to a new sequence number */
1604 if (nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1605 if (nfs4_stateid_is_next(&state->open_stateid, stateid))
1606 return true;
1607 return false;
1608 }
1609 /* The server returned a new stateid */
1610 }
1611 /* This is the first OPEN in this generation */
1612 if (stateid->seqid == cpu_to_be32(1))
1613 return true;
1614 return false;
1615 }
1616
nfs_resync_open_stateid_locked(struct nfs4_state * state)1617 static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1618 {
1619 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1620 return;
1621 if (state->n_wronly)
1622 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1623 if (state->n_rdonly)
1624 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1625 if (state->n_rdwr)
1626 set_bit(NFS_O_RDWR_STATE, &state->flags);
1627 set_bit(NFS_OPEN_STATE, &state->flags);
1628 }
1629
nfs_clear_open_stateid_locked(struct nfs4_state * state,nfs4_stateid * stateid,fmode_t fmode)1630 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1631 nfs4_stateid *stateid, fmode_t fmode)
1632 {
1633 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1634 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1635 case FMODE_WRITE:
1636 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1637 break;
1638 case FMODE_READ:
1639 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1640 break;
1641 case 0:
1642 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1643 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1644 clear_bit(NFS_OPEN_STATE, &state->flags);
1645 }
1646 if (stateid == NULL)
1647 return;
1648 /* Handle OPEN+OPEN_DOWNGRADE races */
1649 if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1650 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
1651 nfs_resync_open_stateid_locked(state);
1652 goto out;
1653 }
1654 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1655 nfs4_stateid_copy(&state->stateid, stateid);
1656 nfs4_stateid_copy(&state->open_stateid, stateid);
1657 trace_nfs4_open_stateid_update(state->inode, stateid, 0);
1658 out:
1659 nfs_state_log_update_open_stateid(state);
1660 }
1661
nfs_clear_open_stateid(struct nfs4_state * state,nfs4_stateid * arg_stateid,nfs4_stateid * stateid,fmode_t fmode)1662 static void nfs_clear_open_stateid(struct nfs4_state *state,
1663 nfs4_stateid *arg_stateid,
1664 nfs4_stateid *stateid, fmode_t fmode)
1665 {
1666 write_seqlock(&state->seqlock);
1667 /* Ignore, if the CLOSE argment doesn't match the current stateid */
1668 if (nfs4_state_match_open_stateid_other(state, arg_stateid))
1669 nfs_clear_open_stateid_locked(state, stateid, fmode);
1670 write_sequnlock(&state->seqlock);
1671 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1672 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1673 }
1674
nfs_set_open_stateid_locked(struct nfs4_state * state,const nfs4_stateid * stateid,nfs4_stateid * freeme)1675 static void nfs_set_open_stateid_locked(struct nfs4_state *state,
1676 const nfs4_stateid *stateid, nfs4_stateid *freeme)
1677 __must_hold(&state->owner->so_lock)
1678 __must_hold(&state->seqlock)
1679 __must_hold(RCU)
1680
1681 {
1682 DEFINE_WAIT(wait);
1683 int status = 0;
1684 for (;;) {
1685
1686 if (nfs_stateid_is_sequential(state, stateid))
1687 break;
1688
1689 if (status)
1690 break;
1691 /* Rely on seqids for serialisation with NFSv4.0 */
1692 if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client))
1693 break;
1694
1695 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
1696 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
1697 /*
1698 * Ensure we process the state changes in the same order
1699 * in which the server processed them by delaying the
1700 * update of the stateid until we are in sequence.
1701 */
1702 write_sequnlock(&state->seqlock);
1703 spin_unlock(&state->owner->so_lock);
1704 rcu_read_unlock();
1705 trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
1706
1707 if (!fatal_signal_pending(current)) {
1708 if (schedule_timeout(5*HZ) == 0)
1709 status = -EAGAIN;
1710 else
1711 status = 0;
1712 } else
1713 status = -EINTR;
1714 finish_wait(&state->waitq, &wait);
1715 rcu_read_lock();
1716 spin_lock(&state->owner->so_lock);
1717 write_seqlock(&state->seqlock);
1718 }
1719
1720 if (test_bit(NFS_OPEN_STATE, &state->flags) &&
1721 !nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1722 nfs4_stateid_copy(freeme, &state->open_stateid);
1723 nfs_test_and_clear_all_open_stateid(state);
1724 }
1725
1726 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1727 nfs4_stateid_copy(&state->stateid, stateid);
1728 nfs4_stateid_copy(&state->open_stateid, stateid);
1729 trace_nfs4_open_stateid_update(state->inode, stateid, status);
1730 nfs_state_log_update_open_stateid(state);
1731 }
1732
nfs_state_set_open_stateid(struct nfs4_state * state,const nfs4_stateid * open_stateid,fmode_t fmode,nfs4_stateid * freeme)1733 static void nfs_state_set_open_stateid(struct nfs4_state *state,
1734 const nfs4_stateid *open_stateid,
1735 fmode_t fmode,
1736 nfs4_stateid *freeme)
1737 {
1738 /*
1739 * Protect the call to nfs4_state_set_mode_locked and
1740 * serialise the stateid update
1741 */
1742 write_seqlock(&state->seqlock);
1743 nfs_set_open_stateid_locked(state, open_stateid, freeme);
1744 switch (fmode) {
1745 case FMODE_READ:
1746 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1747 break;
1748 case FMODE_WRITE:
1749 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1750 break;
1751 case FMODE_READ|FMODE_WRITE:
1752 set_bit(NFS_O_RDWR_STATE, &state->flags);
1753 }
1754 set_bit(NFS_OPEN_STATE, &state->flags);
1755 write_sequnlock(&state->seqlock);
1756 }
1757
nfs_state_clear_open_state_flags(struct nfs4_state * state)1758 static void nfs_state_clear_open_state_flags(struct nfs4_state *state)
1759 {
1760 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1761 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1762 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1763 clear_bit(NFS_OPEN_STATE, &state->flags);
1764 }
1765
nfs_state_set_delegation(struct nfs4_state * state,const nfs4_stateid * deleg_stateid,fmode_t fmode)1766 static void nfs_state_set_delegation(struct nfs4_state *state,
1767 const nfs4_stateid *deleg_stateid,
1768 fmode_t fmode)
1769 {
1770 /*
1771 * Protect the call to nfs4_state_set_mode_locked and
1772 * serialise the stateid update
1773 */
1774 write_seqlock(&state->seqlock);
1775 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1776 set_bit(NFS_DELEGATED_STATE, &state->flags);
1777 write_sequnlock(&state->seqlock);
1778 }
1779
nfs_state_clear_delegation(struct nfs4_state * state)1780 static void nfs_state_clear_delegation(struct nfs4_state *state)
1781 {
1782 write_seqlock(&state->seqlock);
1783 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1784 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1785 write_sequnlock(&state->seqlock);
1786 }
1787
update_open_stateid(struct nfs4_state * state,const nfs4_stateid * open_stateid,const nfs4_stateid * delegation,fmode_t fmode)1788 int update_open_stateid(struct nfs4_state *state,
1789 const nfs4_stateid *open_stateid,
1790 const nfs4_stateid *delegation,
1791 fmode_t fmode)
1792 {
1793 struct nfs_server *server = NFS_SERVER(state->inode);
1794 struct nfs_client *clp = server->nfs_client;
1795 struct nfs_inode *nfsi = NFS_I(state->inode);
1796 struct nfs_delegation *deleg_cur;
1797 nfs4_stateid freeme = { };
1798 int ret = 0;
1799
1800 fmode &= (FMODE_READ|FMODE_WRITE);
1801
1802 rcu_read_lock();
1803 spin_lock(&state->owner->so_lock);
1804 if (open_stateid != NULL) {
1805 nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme);
1806 ret = 1;
1807 }
1808
1809 deleg_cur = nfs4_get_valid_delegation(state->inode);
1810 if (deleg_cur == NULL)
1811 goto no_delegation;
1812
1813 spin_lock(&deleg_cur->lock);
1814 if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1815 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1816 (deleg_cur->type & fmode) != fmode)
1817 goto no_delegation_unlock;
1818
1819 if (delegation == NULL)
1820 delegation = &deleg_cur->stateid;
1821 else if (!nfs4_stateid_match_other(&deleg_cur->stateid, delegation))
1822 goto no_delegation_unlock;
1823
1824 nfs_mark_delegation_referenced(deleg_cur);
1825 nfs_state_set_delegation(state, &deleg_cur->stateid, fmode);
1826 ret = 1;
1827 no_delegation_unlock:
1828 spin_unlock(&deleg_cur->lock);
1829 no_delegation:
1830 if (ret)
1831 update_open_stateflags(state, fmode);
1832 spin_unlock(&state->owner->so_lock);
1833 rcu_read_unlock();
1834
1835 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1836 nfs4_schedule_state_manager(clp);
1837 if (freeme.type != 0)
1838 nfs4_test_and_free_stateid(server, &freeme,
1839 state->owner->so_cred);
1840
1841 return ret;
1842 }
1843
nfs4_update_lock_stateid(struct nfs4_lock_state * lsp,const nfs4_stateid * stateid)1844 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1845 const nfs4_stateid *stateid)
1846 {
1847 struct nfs4_state *state = lsp->ls_state;
1848 bool ret = false;
1849
1850 spin_lock(&state->state_lock);
1851 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1852 goto out_noupdate;
1853 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1854 goto out_noupdate;
1855 nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1856 ret = true;
1857 out_noupdate:
1858 spin_unlock(&state->state_lock);
1859 return ret;
1860 }
1861
nfs4_return_incompatible_delegation(struct inode * inode,fmode_t fmode)1862 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1863 {
1864 struct nfs_delegation *delegation;
1865
1866 fmode &= FMODE_READ|FMODE_WRITE;
1867 rcu_read_lock();
1868 delegation = nfs4_get_valid_delegation(inode);
1869 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1870 rcu_read_unlock();
1871 return;
1872 }
1873 rcu_read_unlock();
1874 nfs4_inode_return_delegation(inode);
1875 }
1876
nfs4_try_open_cached(struct nfs4_opendata * opendata)1877 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1878 {
1879 struct nfs4_state *state = opendata->state;
1880 struct nfs_delegation *delegation;
1881 int open_mode = opendata->o_arg.open_flags;
1882 fmode_t fmode = opendata->o_arg.fmode;
1883 enum open_claim_type4 claim = opendata->o_arg.claim;
1884 nfs4_stateid stateid;
1885 int ret = -EAGAIN;
1886
1887 for (;;) {
1888 spin_lock(&state->owner->so_lock);
1889 if (can_open_cached(state, fmode, open_mode, claim)) {
1890 update_open_stateflags(state, fmode);
1891 spin_unlock(&state->owner->so_lock);
1892 goto out_return_state;
1893 }
1894 spin_unlock(&state->owner->so_lock);
1895 rcu_read_lock();
1896 delegation = nfs4_get_valid_delegation(state->inode);
1897 if (!can_open_delegated(delegation, fmode, claim)) {
1898 rcu_read_unlock();
1899 break;
1900 }
1901 /* Save the delegation */
1902 nfs4_stateid_copy(&stateid, &delegation->stateid);
1903 rcu_read_unlock();
1904 nfs_release_seqid(opendata->o_arg.seqid);
1905 if (!opendata->is_recover) {
1906 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1907 if (ret != 0)
1908 goto out;
1909 }
1910 ret = -EAGAIN;
1911
1912 /* Try to update the stateid using the delegation */
1913 if (update_open_stateid(state, NULL, &stateid, fmode))
1914 goto out_return_state;
1915 }
1916 out:
1917 return ERR_PTR(ret);
1918 out_return_state:
1919 refcount_inc(&state->count);
1920 return state;
1921 }
1922
1923 static void
nfs4_opendata_check_deleg(struct nfs4_opendata * data,struct nfs4_state * state)1924 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1925 {
1926 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1927 struct nfs_delegation *delegation;
1928 int delegation_flags = 0;
1929
1930 rcu_read_lock();
1931 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1932 if (delegation)
1933 delegation_flags = delegation->flags;
1934 rcu_read_unlock();
1935 switch (data->o_arg.claim) {
1936 default:
1937 break;
1938 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1939 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1940 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1941 "returning a delegation for "
1942 "OPEN(CLAIM_DELEGATE_CUR)\n",
1943 clp->cl_hostname);
1944 return;
1945 }
1946 if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1947 nfs_inode_set_delegation(state->inode,
1948 data->owner->so_cred,
1949 data->o_res.delegation_type,
1950 &data->o_res.delegation,
1951 data->o_res.pagemod_limit);
1952 else
1953 nfs_inode_reclaim_delegation(state->inode,
1954 data->owner->so_cred,
1955 data->o_res.delegation_type,
1956 &data->o_res.delegation,
1957 data->o_res.pagemod_limit);
1958
1959 if (data->o_res.do_recall)
1960 nfs_async_inode_return_delegation(state->inode,
1961 &data->o_res.delegation);
1962 }
1963
1964 /*
1965 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1966 * and update the nfs4_state.
1967 */
1968 static struct nfs4_state *
_nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata * data)1969 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1970 {
1971 struct inode *inode = data->state->inode;
1972 struct nfs4_state *state = data->state;
1973 int ret;
1974
1975 if (!data->rpc_done) {
1976 if (data->rpc_status)
1977 return ERR_PTR(data->rpc_status);
1978 /* cached opens have already been processed */
1979 goto update;
1980 }
1981
1982 ret = nfs_refresh_inode(inode, &data->f_attr);
1983 if (ret)
1984 return ERR_PTR(ret);
1985
1986 if (data->o_res.delegation_type != 0)
1987 nfs4_opendata_check_deleg(data, state);
1988 update:
1989 if (!update_open_stateid(state, &data->o_res.stateid,
1990 NULL, data->o_arg.fmode))
1991 return ERR_PTR(-EAGAIN);
1992 refcount_inc(&state->count);
1993
1994 return state;
1995 }
1996
1997 static struct inode *
nfs4_opendata_get_inode(struct nfs4_opendata * data)1998 nfs4_opendata_get_inode(struct nfs4_opendata *data)
1999 {
2000 struct inode *inode;
2001
2002 switch (data->o_arg.claim) {
2003 case NFS4_OPEN_CLAIM_NULL:
2004 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
2005 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
2006 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
2007 return ERR_PTR(-EAGAIN);
2008 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh,
2009 &data->f_attr);
2010 break;
2011 default:
2012 inode = d_inode(data->dentry);
2013 ihold(inode);
2014 nfs_refresh_inode(inode, &data->f_attr);
2015 }
2016 return inode;
2017 }
2018
2019 static struct nfs4_state *
nfs4_opendata_find_nfs4_state(struct nfs4_opendata * data)2020 nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data)
2021 {
2022 struct nfs4_state *state;
2023 struct inode *inode;
2024
2025 inode = nfs4_opendata_get_inode(data);
2026 if (IS_ERR(inode))
2027 return ERR_CAST(inode);
2028 if (data->state != NULL && data->state->inode == inode) {
2029 state = data->state;
2030 refcount_inc(&state->count);
2031 } else
2032 state = nfs4_get_open_state(inode, data->owner);
2033 iput(inode);
2034 if (state == NULL)
2035 state = ERR_PTR(-ENOMEM);
2036 return state;
2037 }
2038
2039 static struct nfs4_state *
_nfs4_opendata_to_nfs4_state(struct nfs4_opendata * data)2040 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
2041 {
2042 struct nfs4_state *state;
2043
2044 if (!data->rpc_done) {
2045 state = nfs4_try_open_cached(data);
2046 trace_nfs4_cached_open(data->state);
2047 goto out;
2048 }
2049
2050 state = nfs4_opendata_find_nfs4_state(data);
2051 if (IS_ERR(state))
2052 goto out;
2053
2054 if (data->o_res.delegation_type != 0)
2055 nfs4_opendata_check_deleg(data, state);
2056 if (!update_open_stateid(state, &data->o_res.stateid,
2057 NULL, data->o_arg.fmode)) {
2058 nfs4_put_open_state(state);
2059 state = ERR_PTR(-EAGAIN);
2060 }
2061 out:
2062 nfs_release_seqid(data->o_arg.seqid);
2063 return state;
2064 }
2065
2066 static struct nfs4_state *
nfs4_opendata_to_nfs4_state(struct nfs4_opendata * data)2067 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
2068 {
2069 struct nfs4_state *ret;
2070
2071 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
2072 ret =_nfs4_opendata_reclaim_to_nfs4_state(data);
2073 else
2074 ret = _nfs4_opendata_to_nfs4_state(data);
2075 nfs4_sequence_free_slot(&data->o_res.seq_res);
2076 return ret;
2077 }
2078
2079 static struct nfs_open_context *
nfs4_state_find_open_context_mode(struct nfs4_state * state,fmode_t mode)2080 nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode)
2081 {
2082 struct nfs_inode *nfsi = NFS_I(state->inode);
2083 struct nfs_open_context *ctx;
2084
2085 rcu_read_lock();
2086 list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
2087 if (ctx->state != state)
2088 continue;
2089 if ((ctx->mode & mode) != mode)
2090 continue;
2091 if (!get_nfs_open_context(ctx))
2092 continue;
2093 rcu_read_unlock();
2094 return ctx;
2095 }
2096 rcu_read_unlock();
2097 return ERR_PTR(-ENOENT);
2098 }
2099
2100 static struct nfs_open_context *
nfs4_state_find_open_context(struct nfs4_state * state)2101 nfs4_state_find_open_context(struct nfs4_state *state)
2102 {
2103 struct nfs_open_context *ctx;
2104
2105 ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE);
2106 if (!IS_ERR(ctx))
2107 return ctx;
2108 ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE);
2109 if (!IS_ERR(ctx))
2110 return ctx;
2111 return nfs4_state_find_open_context_mode(state, FMODE_READ);
2112 }
2113
nfs4_open_recoverdata_alloc(struct nfs_open_context * ctx,struct nfs4_state * state,enum open_claim_type4 claim)2114 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
2115 struct nfs4_state *state, enum open_claim_type4 claim)
2116 {
2117 struct nfs4_opendata *opendata;
2118
2119 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
2120 NULL, claim, GFP_NOFS);
2121 if (opendata == NULL)
2122 return ERR_PTR(-ENOMEM);
2123 opendata->state = state;
2124 refcount_inc(&state->count);
2125 return opendata;
2126 }
2127
nfs4_open_recover_helper(struct nfs4_opendata * opendata,fmode_t fmode)2128 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
2129 fmode_t fmode)
2130 {
2131 struct nfs4_state *newstate;
2132 int ret;
2133
2134 if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
2135 return 0;
2136 opendata->o_arg.open_flags = 0;
2137 opendata->o_arg.fmode = fmode;
2138 opendata->o_arg.share_access = nfs4_map_atomic_open_share(
2139 NFS_SB(opendata->dentry->d_sb),
2140 fmode, 0);
2141 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
2142 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
2143 nfs4_init_opendata_res(opendata);
2144 ret = _nfs4_recover_proc_open(opendata);
2145 if (ret != 0)
2146 return ret;
2147 newstate = nfs4_opendata_to_nfs4_state(opendata);
2148 if (IS_ERR(newstate))
2149 return PTR_ERR(newstate);
2150 if (newstate != opendata->state)
2151 ret = -ESTALE;
2152 nfs4_close_state(newstate, fmode);
2153 return ret;
2154 }
2155
nfs4_open_recover(struct nfs4_opendata * opendata,struct nfs4_state * state)2156 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
2157 {
2158 int ret;
2159
2160 /* memory barrier prior to reading state->n_* */
2161 smp_rmb();
2162 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
2163 if (ret != 0)
2164 return ret;
2165 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
2166 if (ret != 0)
2167 return ret;
2168 ret = nfs4_open_recover_helper(opendata, FMODE_READ);
2169 if (ret != 0)
2170 return ret;
2171 /*
2172 * We may have performed cached opens for all three recoveries.
2173 * Check if we need to update the current stateid.
2174 */
2175 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
2176 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
2177 write_seqlock(&state->seqlock);
2178 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
2179 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2180 write_sequnlock(&state->seqlock);
2181 }
2182 return 0;
2183 }
2184
2185 /*
2186 * OPEN_RECLAIM:
2187 * reclaim state on the server after a reboot.
2188 */
_nfs4_do_open_reclaim(struct nfs_open_context * ctx,struct nfs4_state * state)2189 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
2190 {
2191 struct nfs_delegation *delegation;
2192 struct nfs4_opendata *opendata;
2193 fmode_t delegation_type = 0;
2194 int status;
2195
2196 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2197 NFS4_OPEN_CLAIM_PREVIOUS);
2198 if (IS_ERR(opendata))
2199 return PTR_ERR(opendata);
2200 rcu_read_lock();
2201 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2202 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
2203 delegation_type = delegation->type;
2204 rcu_read_unlock();
2205 opendata->o_arg.u.delegation_type = delegation_type;
2206 status = nfs4_open_recover(opendata, state);
2207 nfs4_opendata_put(opendata);
2208 return status;
2209 }
2210
nfs4_do_open_reclaim(struct nfs_open_context * ctx,struct nfs4_state * state)2211 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
2212 {
2213 struct nfs_server *server = NFS_SERVER(state->inode);
2214 struct nfs4_exception exception = { };
2215 int err;
2216 do {
2217 err = _nfs4_do_open_reclaim(ctx, state);
2218 trace_nfs4_open_reclaim(ctx, 0, err);
2219 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2220 continue;
2221 if (err != -NFS4ERR_DELAY)
2222 break;
2223 nfs4_handle_exception(server, err, &exception);
2224 } while (exception.retry);
2225 return err;
2226 }
2227
nfs4_open_reclaim(struct nfs4_state_owner * sp,struct nfs4_state * state)2228 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
2229 {
2230 struct nfs_open_context *ctx;
2231 int ret;
2232
2233 ctx = nfs4_state_find_open_context(state);
2234 if (IS_ERR(ctx))
2235 return -EAGAIN;
2236 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2237 nfs_state_clear_open_state_flags(state);
2238 ret = nfs4_do_open_reclaim(ctx, state);
2239 put_nfs_open_context(ctx);
2240 return ret;
2241 }
2242
nfs4_handle_delegation_recall_error(struct nfs_server * server,struct nfs4_state * state,const nfs4_stateid * stateid,struct file_lock * fl,int err)2243 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err)
2244 {
2245 switch (err) {
2246 default:
2247 printk(KERN_ERR "NFS: %s: unhandled error "
2248 "%d.\n", __func__, err);
2249 fallthrough;
2250 case 0:
2251 case -ENOENT:
2252 case -EAGAIN:
2253 case -ESTALE:
2254 case -ETIMEDOUT:
2255 break;
2256 case -NFS4ERR_BADSESSION:
2257 case -NFS4ERR_BADSLOT:
2258 case -NFS4ERR_BAD_HIGH_SLOT:
2259 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
2260 case -NFS4ERR_DEADSESSION:
2261 return -EAGAIN;
2262 case -NFS4ERR_STALE_CLIENTID:
2263 case -NFS4ERR_STALE_STATEID:
2264 /* Don't recall a delegation if it was lost */
2265 nfs4_schedule_lease_recovery(server->nfs_client);
2266 return -EAGAIN;
2267 case -NFS4ERR_MOVED:
2268 nfs4_schedule_migration_recovery(server);
2269 return -EAGAIN;
2270 case -NFS4ERR_LEASE_MOVED:
2271 nfs4_schedule_lease_moved_recovery(server->nfs_client);
2272 return -EAGAIN;
2273 case -NFS4ERR_DELEG_REVOKED:
2274 case -NFS4ERR_ADMIN_REVOKED:
2275 case -NFS4ERR_EXPIRED:
2276 case -NFS4ERR_BAD_STATEID:
2277 case -NFS4ERR_OPENMODE:
2278 nfs_inode_find_state_and_recover(state->inode,
2279 stateid);
2280 nfs4_schedule_stateid_recovery(server, state);
2281 return -EAGAIN;
2282 case -NFS4ERR_DELAY:
2283 case -NFS4ERR_GRACE:
2284 ssleep(1);
2285 return -EAGAIN;
2286 case -ENOMEM:
2287 case -NFS4ERR_DENIED:
2288 if (fl) {
2289 struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner;
2290 if (lsp)
2291 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2292 }
2293 return 0;
2294 }
2295 return err;
2296 }
2297
nfs4_open_delegation_recall(struct nfs_open_context * ctx,struct nfs4_state * state,const nfs4_stateid * stateid)2298 int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
2299 struct nfs4_state *state, const nfs4_stateid *stateid)
2300 {
2301 struct nfs_server *server = NFS_SERVER(state->inode);
2302 struct nfs4_opendata *opendata;
2303 int err = 0;
2304
2305 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2306 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
2307 if (IS_ERR(opendata))
2308 return PTR_ERR(opendata);
2309 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
2310 if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) {
2311 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
2312 if (err)
2313 goto out;
2314 }
2315 if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) {
2316 err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
2317 if (err)
2318 goto out;
2319 }
2320 if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) {
2321 err = nfs4_open_recover_helper(opendata, FMODE_READ);
2322 if (err)
2323 goto out;
2324 }
2325 nfs_state_clear_delegation(state);
2326 out:
2327 nfs4_opendata_put(opendata);
2328 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
2329 }
2330
nfs4_open_confirm_prepare(struct rpc_task * task,void * calldata)2331 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
2332 {
2333 struct nfs4_opendata *data = calldata;
2334
2335 nfs4_setup_sequence(data->o_arg.server->nfs_client,
2336 &data->c_arg.seq_args, &data->c_res.seq_res, task);
2337 }
2338
nfs4_open_confirm_done(struct rpc_task * task,void * calldata)2339 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
2340 {
2341 struct nfs4_opendata *data = calldata;
2342
2343 nfs40_sequence_done(task, &data->c_res.seq_res);
2344
2345 data->rpc_status = task->tk_status;
2346 if (data->rpc_status == 0) {
2347 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
2348 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2349 renew_lease(data->o_res.server, data->timestamp);
2350 data->rpc_done = true;
2351 }
2352 }
2353
nfs4_open_confirm_release(void * calldata)2354 static void nfs4_open_confirm_release(void *calldata)
2355 {
2356 struct nfs4_opendata *data = calldata;
2357 struct nfs4_state *state = NULL;
2358
2359 /* If this request hasn't been cancelled, do nothing */
2360 if (!data->cancelled)
2361 goto out_free;
2362 /* In case of error, no cleanup! */
2363 if (!data->rpc_done)
2364 goto out_free;
2365 state = nfs4_opendata_to_nfs4_state(data);
2366 if (!IS_ERR(state))
2367 nfs4_close_state(state, data->o_arg.fmode);
2368 out_free:
2369 nfs4_opendata_put(data);
2370 }
2371
2372 static const struct rpc_call_ops nfs4_open_confirm_ops = {
2373 .rpc_call_prepare = nfs4_open_confirm_prepare,
2374 .rpc_call_done = nfs4_open_confirm_done,
2375 .rpc_release = nfs4_open_confirm_release,
2376 };
2377
2378 /*
2379 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
2380 */
_nfs4_proc_open_confirm(struct nfs4_opendata * data)2381 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
2382 {
2383 struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
2384 struct rpc_task *task;
2385 struct rpc_message msg = {
2386 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
2387 .rpc_argp = &data->c_arg,
2388 .rpc_resp = &data->c_res,
2389 .rpc_cred = data->owner->so_cred,
2390 };
2391 struct rpc_task_setup task_setup_data = {
2392 .rpc_client = server->client,
2393 .rpc_message = &msg,
2394 .callback_ops = &nfs4_open_confirm_ops,
2395 .callback_data = data,
2396 .workqueue = nfsiod_workqueue,
2397 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
2398 };
2399 int status;
2400
2401 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1,
2402 data->is_recover);
2403 kref_get(&data->kref);
2404 data->rpc_done = false;
2405 data->rpc_status = 0;
2406 data->timestamp = jiffies;
2407 task = rpc_run_task(&task_setup_data);
2408 if (IS_ERR(task))
2409 return PTR_ERR(task);
2410 status = rpc_wait_for_completion_task(task);
2411 if (status != 0) {
2412 data->cancelled = true;
2413 smp_wmb();
2414 } else
2415 status = data->rpc_status;
2416 rpc_put_task(task);
2417 return status;
2418 }
2419
nfs4_open_prepare(struct rpc_task * task,void * calldata)2420 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
2421 {
2422 struct nfs4_opendata *data = calldata;
2423 struct nfs4_state_owner *sp = data->owner;
2424 struct nfs_client *clp = sp->so_server->nfs_client;
2425 enum open_claim_type4 claim = data->o_arg.claim;
2426
2427 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
2428 goto out_wait;
2429 /*
2430 * Check if we still need to send an OPEN call, or if we can use
2431 * a delegation instead.
2432 */
2433 if (data->state != NULL) {
2434 struct nfs_delegation *delegation;
2435
2436 if (can_open_cached(data->state, data->o_arg.fmode,
2437 data->o_arg.open_flags, claim))
2438 goto out_no_action;
2439 rcu_read_lock();
2440 delegation = nfs4_get_valid_delegation(data->state->inode);
2441 if (can_open_delegated(delegation, data->o_arg.fmode, claim))
2442 goto unlock_no_action;
2443 rcu_read_unlock();
2444 }
2445 /* Update client id. */
2446 data->o_arg.clientid = clp->cl_clientid;
2447 switch (claim) {
2448 default:
2449 break;
2450 case NFS4_OPEN_CLAIM_PREVIOUS:
2451 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
2452 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
2453 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
2454 fallthrough;
2455 case NFS4_OPEN_CLAIM_FH:
2456 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
2457 }
2458 data->timestamp = jiffies;
2459 if (nfs4_setup_sequence(data->o_arg.server->nfs_client,
2460 &data->o_arg.seq_args,
2461 &data->o_res.seq_res,
2462 task) != 0)
2463 nfs_release_seqid(data->o_arg.seqid);
2464
2465 /* Set the create mode (note dependency on the session type) */
2466 data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
2467 if (data->o_arg.open_flags & O_EXCL) {
2468 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
2469 if (clp->cl_mvops->minor_version == 0) {
2470 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
2471 /* don't put an ACCESS op in OPEN compound if O_EXCL,
2472 * because ACCESS will return permission denied for
2473 * all bits until close */
2474 data->o_res.access_request = data->o_arg.access = 0;
2475 } else if (nfs4_has_persistent_session(clp))
2476 data->o_arg.createmode = NFS4_CREATE_GUARDED;
2477 }
2478 return;
2479 unlock_no_action:
2480 trace_nfs4_cached_open(data->state);
2481 rcu_read_unlock();
2482 out_no_action:
2483 task->tk_action = NULL;
2484 out_wait:
2485 nfs4_sequence_done(task, &data->o_res.seq_res);
2486 }
2487
nfs4_open_done(struct rpc_task * task,void * calldata)2488 static void nfs4_open_done(struct rpc_task *task, void *calldata)
2489 {
2490 struct nfs4_opendata *data = calldata;
2491
2492 data->rpc_status = task->tk_status;
2493
2494 if (!nfs4_sequence_process(task, &data->o_res.seq_res))
2495 return;
2496
2497 if (task->tk_status == 0) {
2498 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
2499 switch (data->o_res.f_attr->mode & S_IFMT) {
2500 case S_IFREG:
2501 break;
2502 case S_IFLNK:
2503 data->rpc_status = -ELOOP;
2504 break;
2505 case S_IFDIR:
2506 data->rpc_status = -EISDIR;
2507 break;
2508 default:
2509 data->rpc_status = -ENOTDIR;
2510 }
2511 }
2512 renew_lease(data->o_res.server, data->timestamp);
2513 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
2514 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2515 }
2516 data->rpc_done = true;
2517 }
2518
nfs4_open_release(void * calldata)2519 static void nfs4_open_release(void *calldata)
2520 {
2521 struct nfs4_opendata *data = calldata;
2522 struct nfs4_state *state = NULL;
2523
2524 /* If this request hasn't been cancelled, do nothing */
2525 if (!data->cancelled)
2526 goto out_free;
2527 /* In case of error, no cleanup! */
2528 if (data->rpc_status != 0 || !data->rpc_done)
2529 goto out_free;
2530 /* In case we need an open_confirm, no cleanup! */
2531 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
2532 goto out_free;
2533 state = nfs4_opendata_to_nfs4_state(data);
2534 if (!IS_ERR(state))
2535 nfs4_close_state(state, data->o_arg.fmode);
2536 out_free:
2537 nfs4_opendata_put(data);
2538 }
2539
2540 static const struct rpc_call_ops nfs4_open_ops = {
2541 .rpc_call_prepare = nfs4_open_prepare,
2542 .rpc_call_done = nfs4_open_done,
2543 .rpc_release = nfs4_open_release,
2544 };
2545
nfs4_run_open_task(struct nfs4_opendata * data,struct nfs_open_context * ctx)2546 static int nfs4_run_open_task(struct nfs4_opendata *data,
2547 struct nfs_open_context *ctx)
2548 {
2549 struct inode *dir = d_inode(data->dir);
2550 struct nfs_server *server = NFS_SERVER(dir);
2551 struct nfs_openargs *o_arg = &data->o_arg;
2552 struct nfs_openres *o_res = &data->o_res;
2553 struct rpc_task *task;
2554 struct rpc_message msg = {
2555 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
2556 .rpc_argp = o_arg,
2557 .rpc_resp = o_res,
2558 .rpc_cred = data->owner->so_cred,
2559 };
2560 struct rpc_task_setup task_setup_data = {
2561 .rpc_client = server->client,
2562 .rpc_message = &msg,
2563 .callback_ops = &nfs4_open_ops,
2564 .callback_data = data,
2565 .workqueue = nfsiod_workqueue,
2566 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
2567 };
2568 int status;
2569
2570 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE))
2571 task_setup_data.flags |= RPC_TASK_MOVEABLE;
2572
2573 kref_get(&data->kref);
2574 data->rpc_done = false;
2575 data->rpc_status = 0;
2576 data->cancelled = false;
2577 data->is_recover = false;
2578 if (!ctx) {
2579 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1);
2580 data->is_recover = true;
2581 task_setup_data.flags |= RPC_TASK_TIMEOUT;
2582 } else {
2583 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0);
2584 pnfs_lgopen_prepare(data, ctx);
2585 }
2586 task = rpc_run_task(&task_setup_data);
2587 if (IS_ERR(task))
2588 return PTR_ERR(task);
2589 status = rpc_wait_for_completion_task(task);
2590 if (status != 0) {
2591 data->cancelled = true;
2592 smp_wmb();
2593 } else
2594 status = data->rpc_status;
2595 rpc_put_task(task);
2596
2597 return status;
2598 }
2599
_nfs4_recover_proc_open(struct nfs4_opendata * data)2600 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2601 {
2602 struct inode *dir = d_inode(data->dir);
2603 struct nfs_openres *o_res = &data->o_res;
2604 int status;
2605
2606 status = nfs4_run_open_task(data, NULL);
2607 if (status != 0 || !data->rpc_done)
2608 return status;
2609
2610 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2611
2612 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM)
2613 status = _nfs4_proc_open_confirm(data);
2614
2615 return status;
2616 }
2617
2618 /*
2619 * Additional permission checks in order to distinguish between an
2620 * open for read, and an open for execute. This works around the
2621 * fact that NFSv4 OPEN treats read and execute permissions as being
2622 * the same.
2623 * Note that in the non-execute case, we want to turn off permission
2624 * checking if we just created a new file (POSIX open() semantics).
2625 */
nfs4_opendata_access(const struct cred * cred,struct nfs4_opendata * opendata,struct nfs4_state * state,fmode_t fmode,int openflags)2626 static int nfs4_opendata_access(const struct cred *cred,
2627 struct nfs4_opendata *opendata,
2628 struct nfs4_state *state, fmode_t fmode,
2629 int openflags)
2630 {
2631 struct nfs_access_entry cache;
2632 u32 mask, flags;
2633
2634 /* access call failed or for some reason the server doesn't
2635 * support any access modes -- defer access call until later */
2636 if (opendata->o_res.access_supported == 0)
2637 return 0;
2638
2639 mask = 0;
2640 /*
2641 * Use openflags to check for exec, because fmode won't
2642 * always have FMODE_EXEC set when file open for exec.
2643 */
2644 if (openflags & __FMODE_EXEC) {
2645 /* ONLY check for exec rights */
2646 if (S_ISDIR(state->inode->i_mode))
2647 mask = NFS4_ACCESS_LOOKUP;
2648 else
2649 mask = NFS4_ACCESS_EXECUTE;
2650 } else if ((fmode & FMODE_READ) && !opendata->file_created)
2651 mask = NFS4_ACCESS_READ;
2652
2653 nfs_access_set_mask(&cache, opendata->o_res.access_result);
2654 nfs_access_add_cache(state->inode, &cache, cred);
2655
2656 flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP;
2657 if ((mask & ~cache.mask & flags) == 0)
2658 return 0;
2659
2660 return -EACCES;
2661 }
2662
2663 /*
2664 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2665 */
_nfs4_proc_open(struct nfs4_opendata * data,struct nfs_open_context * ctx)2666 static int _nfs4_proc_open(struct nfs4_opendata *data,
2667 struct nfs_open_context *ctx)
2668 {
2669 struct inode *dir = d_inode(data->dir);
2670 struct nfs_server *server = NFS_SERVER(dir);
2671 struct nfs_openargs *o_arg = &data->o_arg;
2672 struct nfs_openres *o_res = &data->o_res;
2673 int status;
2674
2675 status = nfs4_run_open_task(data, ctx);
2676 if (!data->rpc_done)
2677 return status;
2678 if (status != 0) {
2679 if (status == -NFS4ERR_BADNAME &&
2680 !(o_arg->open_flags & O_CREAT))
2681 return -ENOENT;
2682 return status;
2683 }
2684
2685 nfs_fattr_map_and_free_names(server, &data->f_attr);
2686
2687 if (o_arg->open_flags & O_CREAT) {
2688 if (o_arg->open_flags & O_EXCL)
2689 data->file_created = true;
2690 else if (o_res->cinfo.before != o_res->cinfo.after)
2691 data->file_created = true;
2692 if (data->file_created ||
2693 inode_peek_iversion_raw(dir) != o_res->cinfo.after)
2694 nfs4_update_changeattr(dir, &o_res->cinfo,
2695 o_res->f_attr->time_start,
2696 NFS_INO_INVALID_DATA);
2697 }
2698 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2699 server->caps &= ~NFS_CAP_POSIX_LOCK;
2700 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2701 status = _nfs4_proc_open_confirm(data);
2702 if (status != 0)
2703 return status;
2704 }
2705 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) {
2706 nfs4_sequence_free_slot(&o_res->seq_res);
2707 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, NULL);
2708 }
2709 return 0;
2710 }
2711
2712 /*
2713 * OPEN_EXPIRED:
2714 * reclaim state on the server after a network partition.
2715 * Assumes caller holds the appropriate lock
2716 */
_nfs4_open_expired(struct nfs_open_context * ctx,struct nfs4_state * state)2717 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2718 {
2719 struct nfs4_opendata *opendata;
2720 int ret;
2721
2722 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2723 NFS4_OPEN_CLAIM_FH);
2724 if (IS_ERR(opendata))
2725 return PTR_ERR(opendata);
2726 ret = nfs4_open_recover(opendata, state);
2727 if (ret == -ESTALE)
2728 d_drop(ctx->dentry);
2729 nfs4_opendata_put(opendata);
2730 return ret;
2731 }
2732
nfs4_do_open_expired(struct nfs_open_context * ctx,struct nfs4_state * state)2733 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2734 {
2735 struct nfs_server *server = NFS_SERVER(state->inode);
2736 struct nfs4_exception exception = { };
2737 int err;
2738
2739 do {
2740 err = _nfs4_open_expired(ctx, state);
2741 trace_nfs4_open_expired(ctx, 0, err);
2742 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2743 continue;
2744 switch (err) {
2745 default:
2746 goto out;
2747 case -NFS4ERR_GRACE:
2748 case -NFS4ERR_DELAY:
2749 nfs4_handle_exception(server, err, &exception);
2750 err = 0;
2751 }
2752 } while (exception.retry);
2753 out:
2754 return err;
2755 }
2756
nfs4_open_expired(struct nfs4_state_owner * sp,struct nfs4_state * state)2757 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2758 {
2759 struct nfs_open_context *ctx;
2760 int ret;
2761
2762 ctx = nfs4_state_find_open_context(state);
2763 if (IS_ERR(ctx))
2764 return -EAGAIN;
2765 ret = nfs4_do_open_expired(ctx, state);
2766 put_nfs_open_context(ctx);
2767 return ret;
2768 }
2769
nfs_finish_clear_delegation_stateid(struct nfs4_state * state,const nfs4_stateid * stateid)2770 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state,
2771 const nfs4_stateid *stateid)
2772 {
2773 nfs_remove_bad_delegation(state->inode, stateid);
2774 nfs_state_clear_delegation(state);
2775 }
2776
nfs40_clear_delegation_stateid(struct nfs4_state * state)2777 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2778 {
2779 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2780 nfs_finish_clear_delegation_stateid(state, NULL);
2781 }
2782
nfs40_open_expired(struct nfs4_state_owner * sp,struct nfs4_state * state)2783 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2784 {
2785 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2786 nfs40_clear_delegation_stateid(state);
2787 nfs_state_clear_open_state_flags(state);
2788 return nfs4_open_expired(sp, state);
2789 }
2790
nfs40_test_and_free_expired_stateid(struct nfs_server * server,nfs4_stateid * stateid,const struct cred * cred)2791 static int nfs40_test_and_free_expired_stateid(struct nfs_server *server,
2792 nfs4_stateid *stateid,
2793 const struct cred *cred)
2794 {
2795 return -NFS4ERR_BAD_STATEID;
2796 }
2797
2798 #if defined(CONFIG_NFS_V4_1)
nfs41_test_and_free_expired_stateid(struct nfs_server * server,nfs4_stateid * stateid,const struct cred * cred)2799 static int nfs41_test_and_free_expired_stateid(struct nfs_server *server,
2800 nfs4_stateid *stateid,
2801 const struct cred *cred)
2802 {
2803 int status;
2804
2805 switch (stateid->type) {
2806 default:
2807 break;
2808 case NFS4_INVALID_STATEID_TYPE:
2809 case NFS4_SPECIAL_STATEID_TYPE:
2810 return -NFS4ERR_BAD_STATEID;
2811 case NFS4_REVOKED_STATEID_TYPE:
2812 goto out_free;
2813 }
2814
2815 status = nfs41_test_stateid(server, stateid, cred);
2816 switch (status) {
2817 case -NFS4ERR_EXPIRED:
2818 case -NFS4ERR_ADMIN_REVOKED:
2819 case -NFS4ERR_DELEG_REVOKED:
2820 break;
2821 default:
2822 return status;
2823 }
2824 out_free:
2825 /* Ack the revoked state to the server */
2826 nfs41_free_stateid(server, stateid, cred, true);
2827 return -NFS4ERR_EXPIRED;
2828 }
2829
nfs41_check_delegation_stateid(struct nfs4_state * state)2830 static int nfs41_check_delegation_stateid(struct nfs4_state *state)
2831 {
2832 struct nfs_server *server = NFS_SERVER(state->inode);
2833 nfs4_stateid stateid;
2834 struct nfs_delegation *delegation;
2835 const struct cred *cred = NULL;
2836 int status, ret = NFS_OK;
2837
2838 /* Get the delegation credential for use by test/free_stateid */
2839 rcu_read_lock();
2840 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2841 if (delegation == NULL) {
2842 rcu_read_unlock();
2843 nfs_state_clear_delegation(state);
2844 return NFS_OK;
2845 }
2846
2847 spin_lock(&delegation->lock);
2848 nfs4_stateid_copy(&stateid, &delegation->stateid);
2849
2850 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
2851 &delegation->flags)) {
2852 spin_unlock(&delegation->lock);
2853 rcu_read_unlock();
2854 return NFS_OK;
2855 }
2856
2857 if (delegation->cred)
2858 cred = get_cred(delegation->cred);
2859 spin_unlock(&delegation->lock);
2860 rcu_read_unlock();
2861 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
2862 trace_nfs4_test_delegation_stateid(state, NULL, status);
2863 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
2864 nfs_finish_clear_delegation_stateid(state, &stateid);
2865 else
2866 ret = status;
2867
2868 put_cred(cred);
2869 return ret;
2870 }
2871
nfs41_delegation_recover_stateid(struct nfs4_state * state)2872 static void nfs41_delegation_recover_stateid(struct nfs4_state *state)
2873 {
2874 nfs4_stateid tmp;
2875
2876 if (test_bit(NFS_DELEGATED_STATE, &state->flags) &&
2877 nfs4_copy_delegation_stateid(state->inode, state->state,
2878 &tmp, NULL) &&
2879 nfs4_stateid_match_other(&state->stateid, &tmp))
2880 nfs_state_set_delegation(state, &tmp, state->state);
2881 else
2882 nfs_state_clear_delegation(state);
2883 }
2884
2885 /**
2886 * nfs41_check_expired_locks - possibly free a lock stateid
2887 *
2888 * @state: NFSv4 state for an inode
2889 *
2890 * Returns NFS_OK if recovery for this stateid is now finished.
2891 * Otherwise a negative NFS4ERR value is returned.
2892 */
nfs41_check_expired_locks(struct nfs4_state * state)2893 static int nfs41_check_expired_locks(struct nfs4_state *state)
2894 {
2895 int status, ret = NFS_OK;
2896 struct nfs4_lock_state *lsp, *prev = NULL;
2897 struct nfs_server *server = NFS_SERVER(state->inode);
2898
2899 if (!test_bit(LK_STATE_IN_USE, &state->flags))
2900 goto out;
2901
2902 spin_lock(&state->state_lock);
2903 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
2904 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
2905 const struct cred *cred = lsp->ls_state->owner->so_cred;
2906
2907 refcount_inc(&lsp->ls_count);
2908 spin_unlock(&state->state_lock);
2909
2910 nfs4_put_lock_state(prev);
2911 prev = lsp;
2912
2913 status = nfs41_test_and_free_expired_stateid(server,
2914 &lsp->ls_stateid,
2915 cred);
2916 trace_nfs4_test_lock_stateid(state, lsp, status);
2917 if (status == -NFS4ERR_EXPIRED ||
2918 status == -NFS4ERR_BAD_STATEID) {
2919 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
2920 lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE;
2921 if (!recover_lost_locks)
2922 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2923 } else if (status != NFS_OK) {
2924 ret = status;
2925 nfs4_put_lock_state(prev);
2926 goto out;
2927 }
2928 spin_lock(&state->state_lock);
2929 }
2930 }
2931 spin_unlock(&state->state_lock);
2932 nfs4_put_lock_state(prev);
2933 out:
2934 return ret;
2935 }
2936
2937 /**
2938 * nfs41_check_open_stateid - possibly free an open stateid
2939 *
2940 * @state: NFSv4 state for an inode
2941 *
2942 * Returns NFS_OK if recovery for this stateid is now finished.
2943 * Otherwise a negative NFS4ERR value is returned.
2944 */
nfs41_check_open_stateid(struct nfs4_state * state)2945 static int nfs41_check_open_stateid(struct nfs4_state *state)
2946 {
2947 struct nfs_server *server = NFS_SERVER(state->inode);
2948 nfs4_stateid *stateid = &state->open_stateid;
2949 const struct cred *cred = state->owner->so_cred;
2950 int status;
2951
2952 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0)
2953 return -NFS4ERR_BAD_STATEID;
2954 status = nfs41_test_and_free_expired_stateid(server, stateid, cred);
2955 trace_nfs4_test_open_stateid(state, NULL, status);
2956 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) {
2957 nfs_state_clear_open_state_flags(state);
2958 stateid->type = NFS4_INVALID_STATEID_TYPE;
2959 return status;
2960 }
2961 if (nfs_open_stateid_recover_openmode(state))
2962 return -NFS4ERR_OPENMODE;
2963 return NFS_OK;
2964 }
2965
nfs41_open_expired(struct nfs4_state_owner * sp,struct nfs4_state * state)2966 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2967 {
2968 int status;
2969
2970 status = nfs41_check_delegation_stateid(state);
2971 if (status != NFS_OK)
2972 return status;
2973 nfs41_delegation_recover_stateid(state);
2974
2975 status = nfs41_check_expired_locks(state);
2976 if (status != NFS_OK)
2977 return status;
2978 status = nfs41_check_open_stateid(state);
2979 if (status != NFS_OK)
2980 status = nfs4_open_expired(sp, state);
2981 return status;
2982 }
2983 #endif
2984
2985 /*
2986 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
2987 * fields corresponding to attributes that were used to store the verifier.
2988 * Make sure we clobber those fields in the later setattr call
2989 */
nfs4_exclusive_attrset(struct nfs4_opendata * opendata,struct iattr * sattr,struct nfs4_label ** label)2990 static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
2991 struct iattr *sattr, struct nfs4_label **label)
2992 {
2993 const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask;
2994 __u32 attrset[3];
2995 unsigned ret;
2996 unsigned i;
2997
2998 for (i = 0; i < ARRAY_SIZE(attrset); i++) {
2999 attrset[i] = opendata->o_res.attrset[i];
3000 if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1)
3001 attrset[i] &= ~bitmask[i];
3002 }
3003
3004 ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ?
3005 sattr->ia_valid : 0;
3006
3007 if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) {
3008 if (sattr->ia_valid & ATTR_ATIME_SET)
3009 ret |= ATTR_ATIME_SET;
3010 else
3011 ret |= ATTR_ATIME;
3012 }
3013
3014 if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) {
3015 if (sattr->ia_valid & ATTR_MTIME_SET)
3016 ret |= ATTR_MTIME_SET;
3017 else
3018 ret |= ATTR_MTIME;
3019 }
3020
3021 if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL))
3022 *label = NULL;
3023 return ret;
3024 }
3025
_nfs4_open_and_get_state(struct nfs4_opendata * opendata,int flags,struct nfs_open_context * ctx)3026 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
3027 int flags, struct nfs_open_context *ctx)
3028 {
3029 struct nfs4_state_owner *sp = opendata->owner;
3030 struct nfs_server *server = sp->so_server;
3031 struct dentry *dentry;
3032 struct nfs4_state *state;
3033 fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx);
3034 struct inode *dir = d_inode(opendata->dir);
3035 unsigned long dir_verifier;
3036 unsigned int seq;
3037 int ret;
3038
3039 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
3040 dir_verifier = nfs_save_change_attribute(dir);
3041
3042 ret = _nfs4_proc_open(opendata, ctx);
3043 if (ret != 0)
3044 goto out;
3045
3046 state = _nfs4_opendata_to_nfs4_state(opendata);
3047 ret = PTR_ERR(state);
3048 if (IS_ERR(state))
3049 goto out;
3050 ctx->state = state;
3051 if (server->caps & NFS_CAP_POSIX_LOCK)
3052 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
3053 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK)
3054 set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags);
3055 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_PRESERVE_UNLINKED)
3056 set_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(state->inode)->flags);
3057
3058 dentry = opendata->dentry;
3059 if (d_really_is_negative(dentry)) {
3060 struct dentry *alias;
3061 d_drop(dentry);
3062 alias = d_exact_alias(dentry, state->inode);
3063 if (!alias)
3064 alias = d_splice_alias(igrab(state->inode), dentry);
3065 /* d_splice_alias() can't fail here - it's a non-directory */
3066 if (alias) {
3067 dput(ctx->dentry);
3068 ctx->dentry = dentry = alias;
3069 }
3070 }
3071
3072 switch(opendata->o_arg.claim) {
3073 default:
3074 break;
3075 case NFS4_OPEN_CLAIM_NULL:
3076 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
3077 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
3078 if (!opendata->rpc_done)
3079 break;
3080 if (opendata->o_res.delegation_type != 0)
3081 dir_verifier = nfs_save_change_attribute(dir);
3082 nfs_set_verifier(dentry, dir_verifier);
3083 }
3084
3085 /* Parse layoutget results before we check for access */
3086 pnfs_parse_lgopen(state->inode, opendata->lgp, ctx);
3087
3088 ret = nfs4_opendata_access(sp->so_cred, opendata, state,
3089 acc_mode, flags);
3090 if (ret != 0)
3091 goto out;
3092
3093 if (d_inode(dentry) == state->inode) {
3094 nfs_inode_attach_open_context(ctx);
3095 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
3096 nfs4_schedule_stateid_recovery(server, state);
3097 }
3098
3099 out:
3100 if (!opendata->cancelled) {
3101 if (opendata->lgp) {
3102 nfs4_lgopen_release(opendata->lgp);
3103 opendata->lgp = NULL;
3104 }
3105 nfs4_sequence_free_slot(&opendata->o_res.seq_res);
3106 }
3107 return ret;
3108 }
3109
3110 /*
3111 * Returns a referenced nfs4_state
3112 */
_nfs4_do_open(struct inode * dir,struct nfs_open_context * ctx,int flags,const struct nfs4_open_createattrs * c,int * opened)3113 static int _nfs4_do_open(struct inode *dir,
3114 struct nfs_open_context *ctx,
3115 int flags,
3116 const struct nfs4_open_createattrs *c,
3117 int *opened)
3118 {
3119 struct nfs4_state_owner *sp;
3120 struct nfs4_state *state = NULL;
3121 struct nfs_server *server = NFS_SERVER(dir);
3122 struct nfs4_opendata *opendata;
3123 struct dentry *dentry = ctx->dentry;
3124 const struct cred *cred = ctx->cred;
3125 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
3126 fmode_t fmode = _nfs4_ctx_to_openmode(ctx);
3127 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
3128 struct iattr *sattr = c->sattr;
3129 struct nfs4_label *label = c->label;
3130 int status;
3131
3132 /* Protect against reboot recovery conflicts */
3133 status = -ENOMEM;
3134 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
3135 if (sp == NULL) {
3136 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
3137 goto out_err;
3138 }
3139 status = nfs4_client_recover_expired_lease(server->nfs_client);
3140 if (status != 0)
3141 goto err_put_state_owner;
3142 if (d_really_is_positive(dentry))
3143 nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
3144 status = -ENOMEM;
3145 if (d_really_is_positive(dentry))
3146 claim = NFS4_OPEN_CLAIM_FH;
3147 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags,
3148 c, claim, GFP_KERNEL);
3149 if (opendata == NULL)
3150 goto err_put_state_owner;
3151
3152 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
3153 if (!opendata->f_attr.mdsthreshold) {
3154 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
3155 if (!opendata->f_attr.mdsthreshold)
3156 goto err_opendata_put;
3157 }
3158 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
3159 }
3160 if (d_really_is_positive(dentry))
3161 opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
3162
3163 status = _nfs4_open_and_get_state(opendata, flags, ctx);
3164 if (status != 0)
3165 goto err_opendata_put;
3166 state = ctx->state;
3167
3168 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
3169 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
3170 unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label);
3171 /*
3172 * send create attributes which was not set by open
3173 * with an extra setattr.
3174 */
3175 if (attrs || label) {
3176 unsigned ia_old = sattr->ia_valid;
3177
3178 sattr->ia_valid = attrs;
3179 nfs_fattr_init(opendata->o_res.f_attr);
3180 status = nfs4_do_setattr(state->inode, cred,
3181 opendata->o_res.f_attr, sattr,
3182 ctx, label);
3183 if (status == 0) {
3184 nfs_setattr_update_inode(state->inode, sattr,
3185 opendata->o_res.f_attr);
3186 nfs_setsecurity(state->inode, opendata->o_res.f_attr);
3187 }
3188 sattr->ia_valid = ia_old;
3189 }
3190 }
3191 if (opened && opendata->file_created)
3192 *opened = 1;
3193
3194 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
3195 *ctx_th = opendata->f_attr.mdsthreshold;
3196 opendata->f_attr.mdsthreshold = NULL;
3197 }
3198
3199 nfs4_opendata_put(opendata);
3200 nfs4_put_state_owner(sp);
3201 return 0;
3202 err_opendata_put:
3203 nfs4_opendata_put(opendata);
3204 err_put_state_owner:
3205 nfs4_put_state_owner(sp);
3206 out_err:
3207 return status;
3208 }
3209
3210
nfs4_do_open(struct inode * dir,struct nfs_open_context * ctx,int flags,struct iattr * sattr,struct nfs4_label * label,int * opened)3211 static struct nfs4_state *nfs4_do_open(struct inode *dir,
3212 struct nfs_open_context *ctx,
3213 int flags,
3214 struct iattr *sattr,
3215 struct nfs4_label *label,
3216 int *opened)
3217 {
3218 struct nfs_server *server = NFS_SERVER(dir);
3219 struct nfs4_exception exception = {
3220 .interruptible = true,
3221 };
3222 struct nfs4_state *res;
3223 struct nfs4_open_createattrs c = {
3224 .label = label,
3225 .sattr = sattr,
3226 .verf = {
3227 [0] = (__u32)jiffies,
3228 [1] = (__u32)current->pid,
3229 },
3230 };
3231 int status;
3232
3233 do {
3234 status = _nfs4_do_open(dir, ctx, flags, &c, opened);
3235 res = ctx->state;
3236 trace_nfs4_open_file(ctx, flags, status);
3237 if (status == 0)
3238 break;
3239 /* NOTE: BAD_SEQID means the server and client disagree about the
3240 * book-keeping w.r.t. state-changing operations
3241 * (OPEN/CLOSE/LOCK/LOCKU...)
3242 * It is actually a sign of a bug on the client or on the server.
3243 *
3244 * If we receive a BAD_SEQID error in the particular case of
3245 * doing an OPEN, we assume that nfs_increment_open_seqid() will
3246 * have unhashed the old state_owner for us, and that we can
3247 * therefore safely retry using a new one. We should still warn
3248 * the user though...
3249 */
3250 if (status == -NFS4ERR_BAD_SEQID) {
3251 pr_warn_ratelimited("NFS: v4 server %s "
3252 " returned a bad sequence-id error!\n",
3253 NFS_SERVER(dir)->nfs_client->cl_hostname);
3254 exception.retry = 1;
3255 continue;
3256 }
3257 /*
3258 * BAD_STATEID on OPEN means that the server cancelled our
3259 * state before it received the OPEN_CONFIRM.
3260 * Recover by retrying the request as per the discussion
3261 * on Page 181 of RFC3530.
3262 */
3263 if (status == -NFS4ERR_BAD_STATEID) {
3264 exception.retry = 1;
3265 continue;
3266 }
3267 if (status == -NFS4ERR_EXPIRED) {
3268 nfs4_schedule_lease_recovery(server->nfs_client);
3269 exception.retry = 1;
3270 continue;
3271 }
3272 if (status == -EAGAIN) {
3273 /* We must have found a delegation */
3274 exception.retry = 1;
3275 continue;
3276 }
3277 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
3278 continue;
3279 res = ERR_PTR(nfs4_handle_exception(server,
3280 status, &exception));
3281 } while (exception.retry);
3282 return res;
3283 }
3284
_nfs4_do_setattr(struct inode * inode,struct nfs_setattrargs * arg,struct nfs_setattrres * res,const struct cred * cred,struct nfs_open_context * ctx)3285 static int _nfs4_do_setattr(struct inode *inode,
3286 struct nfs_setattrargs *arg,
3287 struct nfs_setattrres *res,
3288 const struct cred *cred,
3289 struct nfs_open_context *ctx)
3290 {
3291 struct nfs_server *server = NFS_SERVER(inode);
3292 struct rpc_message msg = {
3293 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
3294 .rpc_argp = arg,
3295 .rpc_resp = res,
3296 .rpc_cred = cred,
3297 };
3298 const struct cred *delegation_cred = NULL;
3299 unsigned long timestamp = jiffies;
3300 bool truncate;
3301 int status;
3302
3303 nfs_fattr_init(res->fattr);
3304
3305 /* Servers should only apply open mode checks for file size changes */
3306 truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false;
3307 if (!truncate) {
3308 nfs4_inode_make_writeable(inode);
3309 goto zero_stateid;
3310 }
3311
3312 if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
3313 /* Use that stateid */
3314 } else if (ctx != NULL && ctx->state) {
3315 struct nfs_lock_context *l_ctx;
3316 if (!nfs4_valid_open_stateid(ctx->state))
3317 return -EBADF;
3318 l_ctx = nfs_get_lock_context(ctx);
3319 if (IS_ERR(l_ctx))
3320 return PTR_ERR(l_ctx);
3321 status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx,
3322 &arg->stateid, &delegation_cred);
3323 nfs_put_lock_context(l_ctx);
3324 if (status == -EIO)
3325 return -EBADF;
3326 else if (status == -EAGAIN)
3327 goto zero_stateid;
3328 } else {
3329 zero_stateid:
3330 nfs4_stateid_copy(&arg->stateid, &zero_stateid);
3331 }
3332 if (delegation_cred)
3333 msg.rpc_cred = delegation_cred;
3334
3335 status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1);
3336
3337 put_cred(delegation_cred);
3338 if (status == 0 && ctx != NULL)
3339 renew_lease(server, timestamp);
3340 trace_nfs4_setattr(inode, &arg->stateid, status);
3341 return status;
3342 }
3343
nfs4_do_setattr(struct inode * inode,const struct cred * cred,struct nfs_fattr * fattr,struct iattr * sattr,struct nfs_open_context * ctx,struct nfs4_label * ilabel)3344 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
3345 struct nfs_fattr *fattr, struct iattr *sattr,
3346 struct nfs_open_context *ctx, struct nfs4_label *ilabel)
3347 {
3348 struct nfs_server *server = NFS_SERVER(inode);
3349 __u32 bitmask[NFS4_BITMASK_SZ];
3350 struct nfs4_state *state = ctx ? ctx->state : NULL;
3351 struct nfs_setattrargs arg = {
3352 .fh = NFS_FH(inode),
3353 .iap = sattr,
3354 .server = server,
3355 .bitmask = bitmask,
3356 .label = ilabel,
3357 };
3358 struct nfs_setattrres res = {
3359 .fattr = fattr,
3360 .server = server,
3361 };
3362 struct nfs4_exception exception = {
3363 .state = state,
3364 .inode = inode,
3365 .stateid = &arg.stateid,
3366 };
3367 unsigned long adjust_flags = NFS_INO_INVALID_CHANGE;
3368 int err;
3369
3370 if (sattr->ia_valid & (ATTR_MODE | ATTR_KILL_SUID | ATTR_KILL_SGID))
3371 adjust_flags |= NFS_INO_INVALID_MODE;
3372 if (sattr->ia_valid & (ATTR_UID | ATTR_GID))
3373 adjust_flags |= NFS_INO_INVALID_OTHER;
3374
3375 do {
3376 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label),
3377 inode, adjust_flags);
3378
3379 err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx);
3380 switch (err) {
3381 case -NFS4ERR_OPENMODE:
3382 if (!(sattr->ia_valid & ATTR_SIZE)) {
3383 pr_warn_once("NFSv4: server %s is incorrectly "
3384 "applying open mode checks to "
3385 "a SETATTR that is not "
3386 "changing file size.\n",
3387 server->nfs_client->cl_hostname);
3388 }
3389 if (state && !(state->state & FMODE_WRITE)) {
3390 err = -EBADF;
3391 if (sattr->ia_valid & ATTR_OPEN)
3392 err = -EACCES;
3393 goto out;
3394 }
3395 }
3396 err = nfs4_handle_exception(server, err, &exception);
3397 } while (exception.retry);
3398 out:
3399 return err;
3400 }
3401
3402 static bool
nfs4_wait_on_layoutreturn(struct inode * inode,struct rpc_task * task)3403 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
3404 {
3405 if (inode == NULL || !nfs_have_layout(inode))
3406 return false;
3407
3408 return pnfs_wait_on_layoutreturn(inode, task);
3409 }
3410
3411 /*
3412 * Update the seqid of an open stateid
3413 */
nfs4_sync_open_stateid(nfs4_stateid * dst,struct nfs4_state * state)3414 static void nfs4_sync_open_stateid(nfs4_stateid *dst,
3415 struct nfs4_state *state)
3416 {
3417 __be32 seqid_open;
3418 u32 dst_seqid;
3419 int seq;
3420
3421 for (;;) {
3422 if (!nfs4_valid_open_stateid(state))
3423 break;
3424 seq = read_seqbegin(&state->seqlock);
3425 if (!nfs4_state_match_open_stateid_other(state, dst)) {
3426 nfs4_stateid_copy(dst, &state->open_stateid);
3427 if (read_seqretry(&state->seqlock, seq))
3428 continue;
3429 break;
3430 }
3431 seqid_open = state->open_stateid.seqid;
3432 if (read_seqretry(&state->seqlock, seq))
3433 continue;
3434
3435 dst_seqid = be32_to_cpu(dst->seqid);
3436 if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0)
3437 dst->seqid = seqid_open;
3438 break;
3439 }
3440 }
3441
3442 /*
3443 * Update the seqid of an open stateid after receiving
3444 * NFS4ERR_OLD_STATEID
3445 */
nfs4_refresh_open_old_stateid(nfs4_stateid * dst,struct nfs4_state * state)3446 static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
3447 struct nfs4_state *state)
3448 {
3449 __be32 seqid_open;
3450 u32 dst_seqid;
3451 bool ret;
3452 int seq, status = -EAGAIN;
3453 DEFINE_WAIT(wait);
3454
3455 for (;;) {
3456 ret = false;
3457 if (!nfs4_valid_open_stateid(state))
3458 break;
3459 seq = read_seqbegin(&state->seqlock);
3460 if (!nfs4_state_match_open_stateid_other(state, dst)) {
3461 if (read_seqretry(&state->seqlock, seq))
3462 continue;
3463 break;
3464 }
3465
3466 write_seqlock(&state->seqlock);
3467 seqid_open = state->open_stateid.seqid;
3468
3469 dst_seqid = be32_to_cpu(dst->seqid);
3470
3471 /* Did another OPEN bump the state's seqid? try again: */
3472 if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) {
3473 dst->seqid = seqid_open;
3474 write_sequnlock(&state->seqlock);
3475 ret = true;
3476 break;
3477 }
3478
3479 /* server says we're behind but we haven't seen the update yet */
3480 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
3481 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
3482 write_sequnlock(&state->seqlock);
3483 trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
3484
3485 if (fatal_signal_pending(current))
3486 status = -EINTR;
3487 else
3488 if (schedule_timeout(5*HZ) != 0)
3489 status = 0;
3490
3491 finish_wait(&state->waitq, &wait);
3492
3493 if (!status)
3494 continue;
3495 if (status == -EINTR)
3496 break;
3497
3498 /* we slept the whole 5 seconds, we must have lost a seqid */
3499 dst->seqid = cpu_to_be32(dst_seqid + 1);
3500 ret = true;
3501 break;
3502 }
3503
3504 return ret;
3505 }
3506
3507 struct nfs4_closedata {
3508 struct inode *inode;
3509 struct nfs4_state *state;
3510 struct nfs_closeargs arg;
3511 struct nfs_closeres res;
3512 struct {
3513 struct nfs4_layoutreturn_args arg;
3514 struct nfs4_layoutreturn_res res;
3515 struct nfs4_xdr_opaque_data ld_private;
3516 u32 roc_barrier;
3517 bool roc;
3518 } lr;
3519 struct nfs_fattr fattr;
3520 unsigned long timestamp;
3521 };
3522
nfs4_free_closedata(void * data)3523 static void nfs4_free_closedata(void *data)
3524 {
3525 struct nfs4_closedata *calldata = data;
3526 struct nfs4_state_owner *sp = calldata->state->owner;
3527 struct super_block *sb = calldata->state->inode->i_sb;
3528
3529 if (calldata->lr.roc)
3530 pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res,
3531 calldata->res.lr_ret);
3532 nfs4_put_open_state(calldata->state);
3533 nfs_free_seqid(calldata->arg.seqid);
3534 nfs4_put_state_owner(sp);
3535 nfs_sb_deactive(sb);
3536 kfree(calldata);
3537 }
3538
nfs4_close_done(struct rpc_task * task,void * data)3539 static void nfs4_close_done(struct rpc_task *task, void *data)
3540 {
3541 struct nfs4_closedata *calldata = data;
3542 struct nfs4_state *state = calldata->state;
3543 struct nfs_server *server = NFS_SERVER(calldata->inode);
3544 nfs4_stateid *res_stateid = NULL;
3545 struct nfs4_exception exception = {
3546 .state = state,
3547 .inode = calldata->inode,
3548 .stateid = &calldata->arg.stateid,
3549 };
3550
3551 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
3552 return;
3553 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
3554
3555 /* Handle Layoutreturn errors */
3556 if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res,
3557 &calldata->res.lr_ret) == -EAGAIN)
3558 goto out_restart;
3559
3560 /* hmm. we are done with the inode, and in the process of freeing
3561 * the state_owner. we keep this around to process errors
3562 */
3563 switch (task->tk_status) {
3564 case 0:
3565 res_stateid = &calldata->res.stateid;
3566 renew_lease(server, calldata->timestamp);
3567 break;
3568 case -NFS4ERR_ACCESS:
3569 if (calldata->arg.bitmask != NULL) {
3570 calldata->arg.bitmask = NULL;
3571 calldata->res.fattr = NULL;
3572 goto out_restart;
3573
3574 }
3575 break;
3576 case -NFS4ERR_OLD_STATEID:
3577 /* Did we race with OPEN? */
3578 if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid,
3579 state))
3580 goto out_restart;
3581 goto out_release;
3582 case -NFS4ERR_ADMIN_REVOKED:
3583 case -NFS4ERR_STALE_STATEID:
3584 case -NFS4ERR_EXPIRED:
3585 nfs4_free_revoked_stateid(server,
3586 &calldata->arg.stateid,
3587 task->tk_msg.rpc_cred);
3588 fallthrough;
3589 case -NFS4ERR_BAD_STATEID:
3590 if (calldata->arg.fmode == 0)
3591 break;
3592 fallthrough;
3593 default:
3594 task->tk_status = nfs4_async_handle_exception(task,
3595 server, task->tk_status, &exception);
3596 if (exception.retry)
3597 goto out_restart;
3598 }
3599 nfs_clear_open_stateid(state, &calldata->arg.stateid,
3600 res_stateid, calldata->arg.fmode);
3601 out_release:
3602 task->tk_status = 0;
3603 nfs_release_seqid(calldata->arg.seqid);
3604 nfs_refresh_inode(calldata->inode, &calldata->fattr);
3605 dprintk("%s: ret = %d\n", __func__, task->tk_status);
3606 return;
3607 out_restart:
3608 task->tk_status = 0;
3609 rpc_restart_call_prepare(task);
3610 goto out_release;
3611 }
3612
nfs4_close_prepare(struct rpc_task * task,void * data)3613 static void nfs4_close_prepare(struct rpc_task *task, void *data)
3614 {
3615 struct nfs4_closedata *calldata = data;
3616 struct nfs4_state *state = calldata->state;
3617 struct inode *inode = calldata->inode;
3618 struct nfs_server *server = NFS_SERVER(inode);
3619 struct pnfs_layout_hdr *lo;
3620 bool is_rdonly, is_wronly, is_rdwr;
3621 int call_close = 0;
3622
3623 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
3624 goto out_wait;
3625
3626 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
3627 spin_lock(&state->owner->so_lock);
3628 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
3629 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
3630 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
3631 /* Calculate the change in open mode */
3632 calldata->arg.fmode = 0;
3633 if (state->n_rdwr == 0) {
3634 if (state->n_rdonly == 0)
3635 call_close |= is_rdonly;
3636 else if (is_rdonly)
3637 calldata->arg.fmode |= FMODE_READ;
3638 if (state->n_wronly == 0)
3639 call_close |= is_wronly;
3640 else if (is_wronly)
3641 calldata->arg.fmode |= FMODE_WRITE;
3642 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
3643 call_close |= is_rdwr;
3644 } else if (is_rdwr)
3645 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
3646
3647 nfs4_sync_open_stateid(&calldata->arg.stateid, state);
3648 if (!nfs4_valid_open_stateid(state))
3649 call_close = 0;
3650 spin_unlock(&state->owner->so_lock);
3651
3652 if (!call_close) {
3653 /* Note: exit _without_ calling nfs4_close_done */
3654 goto out_no_action;
3655 }
3656
3657 if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) {
3658 nfs_release_seqid(calldata->arg.seqid);
3659 goto out_wait;
3660 }
3661
3662 lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL;
3663 if (lo && !pnfs_layout_is_valid(lo)) {
3664 calldata->arg.lr_args = NULL;
3665 calldata->res.lr_res = NULL;
3666 }
3667
3668 if (calldata->arg.fmode == 0)
3669 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
3670
3671 if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
3672 /* Close-to-open cache consistency revalidation */
3673 if (!nfs4_have_delegation(inode, FMODE_READ)) {
3674 nfs4_bitmask_set(calldata->arg.bitmask_store,
3675 server->cache_consistency_bitmask,
3676 inode, 0);
3677 calldata->arg.bitmask = calldata->arg.bitmask_store;
3678 } else
3679 calldata->arg.bitmask = NULL;
3680 }
3681
3682 calldata->arg.share_access =
3683 nfs4_map_atomic_open_share(NFS_SERVER(inode),
3684 calldata->arg.fmode, 0);
3685
3686 if (calldata->res.fattr == NULL)
3687 calldata->arg.bitmask = NULL;
3688 else if (calldata->arg.bitmask == NULL)
3689 calldata->res.fattr = NULL;
3690 calldata->timestamp = jiffies;
3691 if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client,
3692 &calldata->arg.seq_args,
3693 &calldata->res.seq_res,
3694 task) != 0)
3695 nfs_release_seqid(calldata->arg.seqid);
3696 return;
3697 out_no_action:
3698 task->tk_action = NULL;
3699 out_wait:
3700 nfs4_sequence_done(task, &calldata->res.seq_res);
3701 }
3702
3703 static const struct rpc_call_ops nfs4_close_ops = {
3704 .rpc_call_prepare = nfs4_close_prepare,
3705 .rpc_call_done = nfs4_close_done,
3706 .rpc_release = nfs4_free_closedata,
3707 };
3708
3709 /*
3710 * It is possible for data to be read/written from a mem-mapped file
3711 * after the sys_close call (which hits the vfs layer as a flush).
3712 * This means that we can't safely call nfsv4 close on a file until
3713 * the inode is cleared. This in turn means that we are not good
3714 * NFSv4 citizens - we do not indicate to the server to update the file's
3715 * share state even when we are done with one of the three share
3716 * stateid's in the inode.
3717 *
3718 * NOTE: Caller must be holding the sp->so_owner semaphore!
3719 */
nfs4_do_close(struct nfs4_state * state,gfp_t gfp_mask,int wait)3720 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
3721 {
3722 struct nfs_server *server = NFS_SERVER(state->inode);
3723 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
3724 struct nfs4_closedata *calldata;
3725 struct nfs4_state_owner *sp = state->owner;
3726 struct rpc_task *task;
3727 struct rpc_message msg = {
3728 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
3729 .rpc_cred = state->owner->so_cred,
3730 };
3731 struct rpc_task_setup task_setup_data = {
3732 .rpc_client = server->client,
3733 .rpc_message = &msg,
3734 .callback_ops = &nfs4_close_ops,
3735 .workqueue = nfsiod_workqueue,
3736 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
3737 };
3738 int status = -ENOMEM;
3739
3740 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE))
3741 task_setup_data.flags |= RPC_TASK_MOVEABLE;
3742
3743 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
3744 &task_setup_data.rpc_client, &msg);
3745
3746 calldata = kzalloc(sizeof(*calldata), gfp_mask);
3747 if (calldata == NULL)
3748 goto out;
3749 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1, 0);
3750 calldata->inode = state->inode;
3751 calldata->state = state;
3752 calldata->arg.fh = NFS_FH(state->inode);
3753 if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state))
3754 goto out_free_calldata;
3755 /* Serialization for the sequence id */
3756 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
3757 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
3758 if (IS_ERR(calldata->arg.seqid))
3759 goto out_free_calldata;
3760 nfs_fattr_init(&calldata->fattr);
3761 calldata->arg.fmode = 0;
3762 calldata->lr.arg.ld_private = &calldata->lr.ld_private;
3763 calldata->res.fattr = &calldata->fattr;
3764 calldata->res.seqid = calldata->arg.seqid;
3765 calldata->res.server = server;
3766 calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
3767 calldata->lr.roc = pnfs_roc(state->inode,
3768 &calldata->lr.arg, &calldata->lr.res, msg.rpc_cred);
3769 if (calldata->lr.roc) {
3770 calldata->arg.lr_args = &calldata->lr.arg;
3771 calldata->res.lr_res = &calldata->lr.res;
3772 }
3773 nfs_sb_active(calldata->inode->i_sb);
3774
3775 msg.rpc_argp = &calldata->arg;
3776 msg.rpc_resp = &calldata->res;
3777 task_setup_data.callback_data = calldata;
3778 task = rpc_run_task(&task_setup_data);
3779 if (IS_ERR(task))
3780 return PTR_ERR(task);
3781 status = 0;
3782 if (wait)
3783 status = rpc_wait_for_completion_task(task);
3784 rpc_put_task(task);
3785 return status;
3786 out_free_calldata:
3787 kfree(calldata);
3788 out:
3789 nfs4_put_open_state(state);
3790 nfs4_put_state_owner(sp);
3791 return status;
3792 }
3793
3794 static struct inode *
nfs4_atomic_open(struct inode * dir,struct nfs_open_context * ctx,int open_flags,struct iattr * attr,int * opened)3795 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
3796 int open_flags, struct iattr *attr, int *opened)
3797 {
3798 struct nfs4_state *state;
3799 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
3800
3801 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
3802
3803 /* Protect against concurrent sillydeletes */
3804 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
3805
3806 nfs4_label_release_security(label);
3807
3808 if (IS_ERR(state))
3809 return ERR_CAST(state);
3810 return state->inode;
3811 }
3812
nfs4_close_context(struct nfs_open_context * ctx,int is_sync)3813 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
3814 {
3815 if (ctx->state == NULL)
3816 return;
3817 if (is_sync)
3818 nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx));
3819 else
3820 nfs4_close_state(ctx->state, _nfs4_ctx_to_openmode(ctx));
3821 }
3822
3823 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
3824 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
3825 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_XATTR_SUPPORT - 1UL)
3826
_nfs4_server_capabilities(struct nfs_server * server,struct nfs_fh * fhandle)3827 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3828 {
3829 u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion;
3830 struct nfs4_server_caps_arg args = {
3831 .fhandle = fhandle,
3832 .bitmask = bitmask,
3833 };
3834 struct nfs4_server_caps_res res = {};
3835 struct rpc_message msg = {
3836 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
3837 .rpc_argp = &args,
3838 .rpc_resp = &res,
3839 };
3840 int status;
3841 int i;
3842
3843 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
3844 FATTR4_WORD0_FH_EXPIRE_TYPE |
3845 FATTR4_WORD0_LINK_SUPPORT |
3846 FATTR4_WORD0_SYMLINK_SUPPORT |
3847 FATTR4_WORD0_ACLSUPPORT |
3848 FATTR4_WORD0_CASE_INSENSITIVE |
3849 FATTR4_WORD0_CASE_PRESERVING;
3850 if (minorversion)
3851 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
3852
3853 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3854 if (status == 0) {
3855 /* Sanity check the server answers */
3856 switch (minorversion) {
3857 case 0:
3858 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
3859 res.attr_bitmask[2] = 0;
3860 break;
3861 case 1:
3862 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
3863 break;
3864 case 2:
3865 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
3866 }
3867 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
3868 server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS |
3869 NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL);
3870 server->fattr_valid = NFS_ATTR_FATTR_V4;
3871 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
3872 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3873 server->caps |= NFS_CAP_ACLS;
3874 if (res.has_links != 0)
3875 server->caps |= NFS_CAP_HARDLINKS;
3876 if (res.has_symlinks != 0)
3877 server->caps |= NFS_CAP_SYMLINKS;
3878 if (res.case_insensitive)
3879 server->caps |= NFS_CAP_CASE_INSENSITIVE;
3880 if (res.case_preserving)
3881 server->caps |= NFS_CAP_CASE_PRESERVING;
3882 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
3883 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
3884 server->caps |= NFS_CAP_SECURITY_LABEL;
3885 #endif
3886 if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS)
3887 server->caps |= NFS_CAP_FS_LOCATIONS;
3888 if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID))
3889 server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID;
3890 if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE))
3891 server->fattr_valid &= ~NFS_ATTR_FATTR_MODE;
3892 if (!(res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS))
3893 server->fattr_valid &= ~NFS_ATTR_FATTR_NLINK;
3894 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER))
3895 server->fattr_valid &= ~(NFS_ATTR_FATTR_OWNER |
3896 NFS_ATTR_FATTR_OWNER_NAME);
3897 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP))
3898 server->fattr_valid &= ~(NFS_ATTR_FATTR_GROUP |
3899 NFS_ATTR_FATTR_GROUP_NAME);
3900 if (!(res.attr_bitmask[1] & FATTR4_WORD1_SPACE_USED))
3901 server->fattr_valid &= ~NFS_ATTR_FATTR_SPACE_USED;
3902 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS))
3903 server->fattr_valid &= ~NFS_ATTR_FATTR_ATIME;
3904 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA))
3905 server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME;
3906 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY))
3907 server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME;
3908 memcpy(server->attr_bitmask_nl, res.attr_bitmask,
3909 sizeof(server->attr_bitmask));
3910 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
3911
3912 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
3913 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
3914 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
3915 server->cache_consistency_bitmask[2] = 0;
3916
3917 /* Avoid a regression due to buggy server */
3918 for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++)
3919 res.exclcreat_bitmask[i] &= res.attr_bitmask[i];
3920 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
3921 sizeof(server->exclcreat_bitmask));
3922
3923 server->acl_bitmask = res.acl_bitmask;
3924 server->fh_expire_type = res.fh_expire_type;
3925 }
3926
3927 return status;
3928 }
3929
nfs4_server_capabilities(struct nfs_server * server,struct nfs_fh * fhandle)3930 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3931 {
3932 struct nfs4_exception exception = {
3933 .interruptible = true,
3934 };
3935 int err;
3936
3937 nfs4_server_set_init_caps(server);
3938 do {
3939 err = nfs4_handle_exception(server,
3940 _nfs4_server_capabilities(server, fhandle),
3941 &exception);
3942 } while (exception.retry);
3943 return err;
3944 }
3945
test_fs_location_for_trunking(struct nfs4_fs_location * location,struct nfs_client * clp,struct nfs_server * server)3946 static void test_fs_location_for_trunking(struct nfs4_fs_location *location,
3947 struct nfs_client *clp,
3948 struct nfs_server *server)
3949 {
3950 int i;
3951
3952 for (i = 0; i < location->nservers; i++) {
3953 struct nfs4_string *srv_loc = &location->servers[i];
3954 struct sockaddr_storage addr;
3955 size_t addrlen;
3956 struct xprt_create xprt_args = {
3957 .ident = 0,
3958 .net = clp->cl_net,
3959 };
3960 struct nfs4_add_xprt_data xprtdata = {
3961 .clp = clp,
3962 };
3963 struct rpc_add_xprt_test rpcdata = {
3964 .add_xprt_test = clp->cl_mvops->session_trunk,
3965 .data = &xprtdata,
3966 };
3967 char *servername = NULL;
3968
3969 if (!srv_loc->len)
3970 continue;
3971
3972 addrlen = nfs_parse_server_name(srv_loc->data, srv_loc->len,
3973 &addr, sizeof(addr),
3974 clp->cl_net, server->port);
3975 if (!addrlen)
3976 return;
3977 xprt_args.dstaddr = (struct sockaddr *)&addr;
3978 xprt_args.addrlen = addrlen;
3979 servername = kmalloc(srv_loc->len + 1, GFP_KERNEL);
3980 if (!servername)
3981 return;
3982 memcpy(servername, srv_loc->data, srv_loc->len);
3983 servername[srv_loc->len] = '\0';
3984 xprt_args.servername = servername;
3985
3986 xprtdata.cred = nfs4_get_clid_cred(clp);
3987 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
3988 rpc_clnt_setup_test_and_add_xprt,
3989 &rpcdata);
3990 if (xprtdata.cred)
3991 put_cred(xprtdata.cred);
3992 kfree(servername);
3993 }
3994 }
3995
_nfs4_discover_trunking(struct nfs_server * server,struct nfs_fh * fhandle)3996 static int _nfs4_discover_trunking(struct nfs_server *server,
3997 struct nfs_fh *fhandle)
3998 {
3999 struct nfs4_fs_locations *locations = NULL;
4000 struct page *page;
4001 const struct cred *cred;
4002 struct nfs_client *clp = server->nfs_client;
4003 const struct nfs4_state_maintenance_ops *ops =
4004 clp->cl_mvops->state_renewal_ops;
4005 int status = -ENOMEM, i;
4006
4007 cred = ops->get_state_renewal_cred(clp);
4008 if (cred == NULL) {
4009 cred = nfs4_get_clid_cred(clp);
4010 if (cred == NULL)
4011 return -ENOKEY;
4012 }
4013
4014 page = alloc_page(GFP_KERNEL);
4015 if (!page)
4016 return -ENOMEM;
4017 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
4018 if (!locations)
4019 goto out_free;
4020 locations->fattr = nfs_alloc_fattr();
4021 if (!locations->fattr)
4022 goto out_free_2;
4023
4024 status = nfs4_proc_get_locations(server, fhandle, locations, page,
4025 cred);
4026 if (status)
4027 goto out_free_3;
4028
4029 for (i = 0; i < locations->nlocations; i++)
4030 test_fs_location_for_trunking(&locations->locations[i], clp,
4031 server);
4032 out_free_3:
4033 kfree(locations->fattr);
4034 out_free_2:
4035 kfree(locations);
4036 out_free:
4037 __free_page(page);
4038 return status;
4039 }
4040
nfs4_discover_trunking(struct nfs_server * server,struct nfs_fh * fhandle)4041 static int nfs4_discover_trunking(struct nfs_server *server,
4042 struct nfs_fh *fhandle)
4043 {
4044 struct nfs4_exception exception = {
4045 .interruptible = true,
4046 };
4047 struct nfs_client *clp = server->nfs_client;
4048 int err = 0;
4049
4050 if (!nfs4_has_session(clp))
4051 goto out;
4052 do {
4053 err = nfs4_handle_exception(server,
4054 _nfs4_discover_trunking(server, fhandle),
4055 &exception);
4056 } while (exception.retry);
4057 out:
4058 return err;
4059 }
4060
_nfs4_lookup_root(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)4061 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
4062 struct nfs_fsinfo *info)
4063 {
4064 u32 bitmask[3];
4065 struct nfs4_lookup_root_arg args = {
4066 .bitmask = bitmask,
4067 };
4068 struct nfs4_lookup_res res = {
4069 .server = server,
4070 .fattr = info->fattr,
4071 .fh = fhandle,
4072 };
4073 struct rpc_message msg = {
4074 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
4075 .rpc_argp = &args,
4076 .rpc_resp = &res,
4077 };
4078
4079 bitmask[0] = nfs4_fattr_bitmap[0];
4080 bitmask[1] = nfs4_fattr_bitmap[1];
4081 /*
4082 * Process the label in the upcoming getfattr
4083 */
4084 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
4085
4086 nfs_fattr_init(info->fattr);
4087 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4088 }
4089
nfs4_lookup_root(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)4090 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
4091 struct nfs_fsinfo *info)
4092 {
4093 struct nfs4_exception exception = {
4094 .interruptible = true,
4095 };
4096 int err;
4097 do {
4098 err = _nfs4_lookup_root(server, fhandle, info);
4099 trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
4100 switch (err) {
4101 case 0:
4102 case -NFS4ERR_WRONGSEC:
4103 goto out;
4104 default:
4105 err = nfs4_handle_exception(server, err, &exception);
4106 }
4107 } while (exception.retry);
4108 out:
4109 return err;
4110 }
4111
nfs4_lookup_root_sec(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info,rpc_authflavor_t flavor)4112 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
4113 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
4114 {
4115 struct rpc_auth_create_args auth_args = {
4116 .pseudoflavor = flavor,
4117 };
4118 struct rpc_auth *auth;
4119
4120 auth = rpcauth_create(&auth_args, server->client);
4121 if (IS_ERR(auth))
4122 return -EACCES;
4123 return nfs4_lookup_root(server, fhandle, info);
4124 }
4125
4126 /*
4127 * Retry pseudoroot lookup with various security flavors. We do this when:
4128 *
4129 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
4130 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
4131 *
4132 * Returns zero on success, or a negative NFS4ERR value, or a
4133 * negative errno value.
4134 */
nfs4_find_root_sec(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)4135 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
4136 struct nfs_fsinfo *info)
4137 {
4138 /* Per 3530bis 15.33.5 */
4139 static const rpc_authflavor_t flav_array[] = {
4140 RPC_AUTH_GSS_KRB5P,
4141 RPC_AUTH_GSS_KRB5I,
4142 RPC_AUTH_GSS_KRB5,
4143 RPC_AUTH_UNIX, /* courtesy */
4144 RPC_AUTH_NULL,
4145 };
4146 int status = -EPERM;
4147 size_t i;
4148
4149 if (server->auth_info.flavor_len > 0) {
4150 /* try each flavor specified by user */
4151 for (i = 0; i < server->auth_info.flavor_len; i++) {
4152 status = nfs4_lookup_root_sec(server, fhandle, info,
4153 server->auth_info.flavors[i]);
4154 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
4155 continue;
4156 break;
4157 }
4158 } else {
4159 /* no flavors specified by user, try default list */
4160 for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
4161 status = nfs4_lookup_root_sec(server, fhandle, info,
4162 flav_array[i]);
4163 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
4164 continue;
4165 break;
4166 }
4167 }
4168
4169 /*
4170 * -EACCES could mean that the user doesn't have correct permissions
4171 * to access the mount. It could also mean that we tried to mount
4172 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
4173 * existing mount programs don't handle -EACCES very well so it should
4174 * be mapped to -EPERM instead.
4175 */
4176 if (status == -EACCES)
4177 status = -EPERM;
4178 return status;
4179 }
4180
4181 /**
4182 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
4183 * @server: initialized nfs_server handle
4184 * @fhandle: we fill in the pseudo-fs root file handle
4185 * @info: we fill in an FSINFO struct
4186 * @auth_probe: probe the auth flavours
4187 *
4188 * Returns zero on success, or a negative errno.
4189 */
nfs4_proc_get_rootfh(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info,bool auth_probe)4190 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
4191 struct nfs_fsinfo *info,
4192 bool auth_probe)
4193 {
4194 int status = 0;
4195
4196 if (!auth_probe)
4197 status = nfs4_lookup_root(server, fhandle, info);
4198
4199 if (auth_probe || status == NFS4ERR_WRONGSEC)
4200 status = server->nfs_client->cl_mvops->find_root_sec(server,
4201 fhandle, info);
4202
4203 if (status == 0)
4204 status = nfs4_server_capabilities(server, fhandle);
4205 if (status == 0)
4206 status = nfs4_do_fsinfo(server, fhandle, info);
4207
4208 return nfs4_map_errors(status);
4209 }
4210
nfs4_proc_get_root(struct nfs_server * server,struct nfs_fh * mntfh,struct nfs_fsinfo * info)4211 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
4212 struct nfs_fsinfo *info)
4213 {
4214 int error;
4215 struct nfs_fattr *fattr = info->fattr;
4216
4217 error = nfs4_server_capabilities(server, mntfh);
4218 if (error < 0) {
4219 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
4220 return error;
4221 }
4222
4223 error = nfs4_proc_getattr(server, mntfh, fattr, NULL);
4224 if (error < 0) {
4225 dprintk("nfs4_get_root: getattr error = %d\n", -error);
4226 goto out;
4227 }
4228
4229 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
4230 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
4231 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
4232
4233 out:
4234 return error;
4235 }
4236
4237 /*
4238 * Get locations and (maybe) other attributes of a referral.
4239 * Note that we'll actually follow the referral later when
4240 * we detect fsid mismatch in inode revalidation
4241 */
nfs4_get_referral(struct rpc_clnt * client,struct inode * dir,const struct qstr * name,struct nfs_fattr * fattr,struct nfs_fh * fhandle)4242 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
4243 const struct qstr *name, struct nfs_fattr *fattr,
4244 struct nfs_fh *fhandle)
4245 {
4246 int status = -ENOMEM;
4247 struct page *page = NULL;
4248 struct nfs4_fs_locations *locations = NULL;
4249
4250 page = alloc_page(GFP_KERNEL);
4251 if (page == NULL)
4252 goto out;
4253 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
4254 if (locations == NULL)
4255 goto out;
4256
4257 locations->fattr = fattr;
4258
4259 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
4260 if (status != 0)
4261 goto out;
4262
4263 /*
4264 * If the fsid didn't change, this is a migration event, not a
4265 * referral. Cause us to drop into the exception handler, which
4266 * will kick off migration recovery.
4267 */
4268 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &fattr->fsid)) {
4269 dprintk("%s: server did not return a different fsid for"
4270 " a referral at %s\n", __func__, name->name);
4271 status = -NFS4ERR_MOVED;
4272 goto out;
4273 }
4274 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
4275 nfs_fixup_referral_attributes(fattr);
4276 memset(fhandle, 0, sizeof(struct nfs_fh));
4277 out:
4278 if (page)
4279 __free_page(page);
4280 kfree(locations);
4281 return status;
4282 }
4283
_nfs4_proc_getattr(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fattr * fattr,struct inode * inode)4284 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
4285 struct nfs_fattr *fattr, struct inode *inode)
4286 {
4287 __u32 bitmask[NFS4_BITMASK_SZ];
4288 struct nfs4_getattr_arg args = {
4289 .fh = fhandle,
4290 .bitmask = bitmask,
4291 };
4292 struct nfs4_getattr_res res = {
4293 .fattr = fattr,
4294 .server = server,
4295 };
4296 struct rpc_message msg = {
4297 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
4298 .rpc_argp = &args,
4299 .rpc_resp = &res,
4300 };
4301 unsigned short task_flags = 0;
4302
4303 if (nfs4_has_session(server->nfs_client))
4304 task_flags = RPC_TASK_MOVEABLE;
4305
4306 /* Is this is an attribute revalidation, subject to softreval? */
4307 if (inode && (server->flags & NFS_MOUNT_SOFTREVAL))
4308 task_flags |= RPC_TASK_TIMEOUT;
4309
4310 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), inode, 0);
4311 nfs_fattr_init(fattr);
4312 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
4313 return nfs4_do_call_sync(server->client, server, &msg,
4314 &args.seq_args, &res.seq_res, task_flags);
4315 }
4316
nfs4_proc_getattr(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fattr * fattr,struct inode * inode)4317 int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
4318 struct nfs_fattr *fattr, struct inode *inode)
4319 {
4320 struct nfs4_exception exception = {
4321 .interruptible = true,
4322 };
4323 int err;
4324 do {
4325 err = _nfs4_proc_getattr(server, fhandle, fattr, inode);
4326 trace_nfs4_getattr(server, fhandle, fattr, err);
4327 err = nfs4_handle_exception(server, err,
4328 &exception);
4329 } while (exception.retry);
4330 return err;
4331 }
4332
4333 /*
4334 * The file is not closed if it is opened due to the a request to change
4335 * the size of the file. The open call will not be needed once the
4336 * VFS layer lookup-intents are implemented.
4337 *
4338 * Close is called when the inode is destroyed.
4339 * If we haven't opened the file for O_WRONLY, we
4340 * need to in the size_change case to obtain a stateid.
4341 *
4342 * Got race?
4343 * Because OPEN is always done by name in nfsv4, it is
4344 * possible that we opened a different file by the same
4345 * name. We can recognize this race condition, but we
4346 * can't do anything about it besides returning an error.
4347 *
4348 * This will be fixed with VFS changes (lookup-intent).
4349 */
4350 static int
nfs4_proc_setattr(struct dentry * dentry,struct nfs_fattr * fattr,struct iattr * sattr)4351 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
4352 struct iattr *sattr)
4353 {
4354 struct inode *inode = d_inode(dentry);
4355 const struct cred *cred = NULL;
4356 struct nfs_open_context *ctx = NULL;
4357 int status;
4358
4359 if (pnfs_ld_layoutret_on_setattr(inode) &&
4360 sattr->ia_valid & ATTR_SIZE &&
4361 sattr->ia_size < i_size_read(inode))
4362 pnfs_commit_and_return_layout(inode);
4363
4364 nfs_fattr_init(fattr);
4365
4366 /* Deal with open(O_TRUNC) */
4367 if (sattr->ia_valid & ATTR_OPEN)
4368 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
4369
4370 /* Optimization: if the end result is no change, don't RPC */
4371 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
4372 return 0;
4373
4374 /* Search for an existing open(O_WRITE) file */
4375 if (sattr->ia_valid & ATTR_FILE) {
4376
4377 ctx = nfs_file_open_context(sattr->ia_file);
4378 if (ctx)
4379 cred = ctx->cred;
4380 }
4381
4382 /* Return any delegations if we're going to change ACLs */
4383 if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
4384 nfs4_inode_make_writeable(inode);
4385
4386 status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL);
4387 if (status == 0) {
4388 nfs_setattr_update_inode(inode, sattr, fattr);
4389 nfs_setsecurity(inode, fattr);
4390 }
4391 return status;
4392 }
4393
_nfs4_proc_lookup(struct rpc_clnt * clnt,struct inode * dir,struct dentry * dentry,struct nfs_fh * fhandle,struct nfs_fattr * fattr)4394 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
4395 struct dentry *dentry, struct nfs_fh *fhandle,
4396 struct nfs_fattr *fattr)
4397 {
4398 struct nfs_server *server = NFS_SERVER(dir);
4399 int status;
4400 struct nfs4_lookup_arg args = {
4401 .bitmask = server->attr_bitmask,
4402 .dir_fh = NFS_FH(dir),
4403 .name = &dentry->d_name,
4404 };
4405 struct nfs4_lookup_res res = {
4406 .server = server,
4407 .fattr = fattr,
4408 .fh = fhandle,
4409 };
4410 struct rpc_message msg = {
4411 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
4412 .rpc_argp = &args,
4413 .rpc_resp = &res,
4414 };
4415 unsigned short task_flags = 0;
4416
4417 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE))
4418 task_flags = RPC_TASK_MOVEABLE;
4419
4420 /* Is this is an attribute revalidation, subject to softreval? */
4421 if (nfs_lookup_is_soft_revalidate(dentry))
4422 task_flags |= RPC_TASK_TIMEOUT;
4423
4424 args.bitmask = nfs4_bitmask(server, fattr->label);
4425
4426 nfs_fattr_init(fattr);
4427
4428 dprintk("NFS call lookup %pd2\n", dentry);
4429 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
4430 status = nfs4_do_call_sync(clnt, server, &msg,
4431 &args.seq_args, &res.seq_res, task_flags);
4432 dprintk("NFS reply lookup: %d\n", status);
4433 return status;
4434 }
4435
nfs_fixup_secinfo_attributes(struct nfs_fattr * fattr)4436 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
4437 {
4438 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
4439 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
4440 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
4441 fattr->nlink = 2;
4442 }
4443
nfs4_proc_lookup_common(struct rpc_clnt ** clnt,struct inode * dir,struct dentry * dentry,struct nfs_fh * fhandle,struct nfs_fattr * fattr)4444 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
4445 struct dentry *dentry, struct nfs_fh *fhandle,
4446 struct nfs_fattr *fattr)
4447 {
4448 struct nfs4_exception exception = {
4449 .interruptible = true,
4450 };
4451 struct rpc_clnt *client = *clnt;
4452 const struct qstr *name = &dentry->d_name;
4453 int err;
4454 do {
4455 err = _nfs4_proc_lookup(client, dir, dentry, fhandle, fattr);
4456 trace_nfs4_lookup(dir, name, err);
4457 switch (err) {
4458 case -NFS4ERR_BADNAME:
4459 err = -ENOENT;
4460 goto out;
4461 case -NFS4ERR_MOVED:
4462 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
4463 if (err == -NFS4ERR_MOVED)
4464 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
4465 goto out;
4466 case -NFS4ERR_WRONGSEC:
4467 err = -EPERM;
4468 if (client != *clnt)
4469 goto out;
4470 client = nfs4_negotiate_security(client, dir, name);
4471 if (IS_ERR(client))
4472 return PTR_ERR(client);
4473
4474 exception.retry = 1;
4475 break;
4476 default:
4477 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
4478 }
4479 } while (exception.retry);
4480
4481 out:
4482 if (err == 0)
4483 *clnt = client;
4484 else if (client != *clnt)
4485 rpc_shutdown_client(client);
4486
4487 return err;
4488 }
4489
nfs4_proc_lookup(struct inode * dir,struct dentry * dentry,struct nfs_fh * fhandle,struct nfs_fattr * fattr)4490 static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry,
4491 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
4492 {
4493 int status;
4494 struct rpc_clnt *client = NFS_CLIENT(dir);
4495
4496 status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr);
4497 if (client != NFS_CLIENT(dir)) {
4498 rpc_shutdown_client(client);
4499 nfs_fixup_secinfo_attributes(fattr);
4500 }
4501 return status;
4502 }
4503
4504 struct rpc_clnt *
nfs4_proc_lookup_mountpoint(struct inode * dir,struct dentry * dentry,struct nfs_fh * fhandle,struct nfs_fattr * fattr)4505 nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry,
4506 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
4507 {
4508 struct rpc_clnt *client = NFS_CLIENT(dir);
4509 int status;
4510
4511 status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr);
4512 if (status < 0)
4513 return ERR_PTR(status);
4514 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
4515 }
4516
_nfs4_proc_lookupp(struct inode * inode,struct nfs_fh * fhandle,struct nfs_fattr * fattr)4517 static int _nfs4_proc_lookupp(struct inode *inode,
4518 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
4519 {
4520 struct rpc_clnt *clnt = NFS_CLIENT(inode);
4521 struct nfs_server *server = NFS_SERVER(inode);
4522 int status;
4523 struct nfs4_lookupp_arg args = {
4524 .bitmask = server->attr_bitmask,
4525 .fh = NFS_FH(inode),
4526 };
4527 struct nfs4_lookupp_res res = {
4528 .server = server,
4529 .fattr = fattr,
4530 .fh = fhandle,
4531 };
4532 struct rpc_message msg = {
4533 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP],
4534 .rpc_argp = &args,
4535 .rpc_resp = &res,
4536 };
4537 unsigned short task_flags = 0;
4538
4539 if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
4540 task_flags |= RPC_TASK_TIMEOUT;
4541
4542 args.bitmask = nfs4_bitmask(server, fattr->label);
4543
4544 nfs_fattr_init(fattr);
4545
4546 dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino);
4547 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
4548 &res.seq_res, task_flags);
4549 dprintk("NFS reply lookupp: %d\n", status);
4550 return status;
4551 }
4552
nfs4_proc_lookupp(struct inode * inode,struct nfs_fh * fhandle,struct nfs_fattr * fattr)4553 static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle,
4554 struct nfs_fattr *fattr)
4555 {
4556 struct nfs4_exception exception = {
4557 .interruptible = true,
4558 };
4559 int err;
4560 do {
4561 err = _nfs4_proc_lookupp(inode, fhandle, fattr);
4562 trace_nfs4_lookupp(inode, err);
4563 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4564 &exception);
4565 } while (exception.retry);
4566 return err;
4567 }
4568
_nfs4_proc_access(struct inode * inode,struct nfs_access_entry * entry,const struct cred * cred)4569 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry,
4570 const struct cred *cred)
4571 {
4572 struct nfs_server *server = NFS_SERVER(inode);
4573 struct nfs4_accessargs args = {
4574 .fh = NFS_FH(inode),
4575 .access = entry->mask,
4576 };
4577 struct nfs4_accessres res = {
4578 .server = server,
4579 };
4580 struct rpc_message msg = {
4581 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
4582 .rpc_argp = &args,
4583 .rpc_resp = &res,
4584 .rpc_cred = cred,
4585 };
4586 int status = 0;
4587
4588 if (!nfs4_have_delegation(inode, FMODE_READ)) {
4589 res.fattr = nfs_alloc_fattr();
4590 if (res.fattr == NULL)
4591 return -ENOMEM;
4592 args.bitmask = server->cache_consistency_bitmask;
4593 }
4594 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4595 if (!status) {
4596 nfs_access_set_mask(entry, res.access);
4597 if (res.fattr)
4598 nfs_refresh_inode(inode, res.fattr);
4599 }
4600 nfs_free_fattr(res.fattr);
4601 return status;
4602 }
4603
nfs4_proc_access(struct inode * inode,struct nfs_access_entry * entry,const struct cred * cred)4604 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry,
4605 const struct cred *cred)
4606 {
4607 struct nfs4_exception exception = {
4608 .interruptible = true,
4609 };
4610 int err;
4611 do {
4612 err = _nfs4_proc_access(inode, entry, cred);
4613 trace_nfs4_access(inode, err);
4614 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4615 &exception);
4616 } while (exception.retry);
4617 return err;
4618 }
4619
4620 /*
4621 * TODO: For the time being, we don't try to get any attributes
4622 * along with any of the zero-copy operations READ, READDIR,
4623 * READLINK, WRITE.
4624 *
4625 * In the case of the first three, we want to put the GETATTR
4626 * after the read-type operation -- this is because it is hard
4627 * to predict the length of a GETATTR response in v4, and thus
4628 * align the READ data correctly. This means that the GETATTR
4629 * may end up partially falling into the page cache, and we should
4630 * shift it into the 'tail' of the xdr_buf before processing.
4631 * To do this efficiently, we need to know the total length
4632 * of data received, which doesn't seem to be available outside
4633 * of the RPC layer.
4634 *
4635 * In the case of WRITE, we also want to put the GETATTR after
4636 * the operation -- in this case because we want to make sure
4637 * we get the post-operation mtime and size.
4638 *
4639 * Both of these changes to the XDR layer would in fact be quite
4640 * minor, but I decided to leave them for a subsequent patch.
4641 */
_nfs4_proc_readlink(struct inode * inode,struct page * page,unsigned int pgbase,unsigned int pglen)4642 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
4643 unsigned int pgbase, unsigned int pglen)
4644 {
4645 struct nfs4_readlink args = {
4646 .fh = NFS_FH(inode),
4647 .pgbase = pgbase,
4648 .pglen = pglen,
4649 .pages = &page,
4650 };
4651 struct nfs4_readlink_res res;
4652 struct rpc_message msg = {
4653 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
4654 .rpc_argp = &args,
4655 .rpc_resp = &res,
4656 };
4657
4658 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
4659 }
4660
nfs4_proc_readlink(struct inode * inode,struct page * page,unsigned int pgbase,unsigned int pglen)4661 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
4662 unsigned int pgbase, unsigned int pglen)
4663 {
4664 struct nfs4_exception exception = {
4665 .interruptible = true,
4666 };
4667 int err;
4668 do {
4669 err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
4670 trace_nfs4_readlink(inode, err);
4671 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4672 &exception);
4673 } while (exception.retry);
4674 return err;
4675 }
4676
4677 /*
4678 * This is just for mknod. open(O_CREAT) will always do ->open_context().
4679 */
4680 static int
nfs4_proc_create(struct inode * dir,struct dentry * dentry,struct iattr * sattr,int flags)4681 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
4682 int flags)
4683 {
4684 struct nfs_server *server = NFS_SERVER(dir);
4685 struct nfs4_label l, *ilabel = NULL;
4686 struct nfs_open_context *ctx;
4687 struct nfs4_state *state;
4688 int status = 0;
4689
4690 ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL);
4691 if (IS_ERR(ctx))
4692 return PTR_ERR(ctx);
4693
4694 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
4695
4696 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
4697 sattr->ia_mode &= ~current_umask();
4698 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
4699 if (IS_ERR(state)) {
4700 status = PTR_ERR(state);
4701 goto out;
4702 }
4703 out:
4704 nfs4_label_release_security(ilabel);
4705 put_nfs_open_context(ctx);
4706 return status;
4707 }
4708
4709 static int
_nfs4_proc_remove(struct inode * dir,const struct qstr * name,u32 ftype)4710 _nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype)
4711 {
4712 struct nfs_server *server = NFS_SERVER(dir);
4713 struct nfs_removeargs args = {
4714 .fh = NFS_FH(dir),
4715 .name = *name,
4716 };
4717 struct nfs_removeres res = {
4718 .server = server,
4719 };
4720 struct rpc_message msg = {
4721 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
4722 .rpc_argp = &args,
4723 .rpc_resp = &res,
4724 };
4725 unsigned long timestamp = jiffies;
4726 int status;
4727
4728 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
4729 if (status == 0) {
4730 spin_lock(&dir->i_lock);
4731 /* Removing a directory decrements nlink in the parent */
4732 if (ftype == NF4DIR && dir->i_nlink > 2)
4733 nfs4_dec_nlink_locked(dir);
4734 nfs4_update_changeattr_locked(dir, &res.cinfo, timestamp,
4735 NFS_INO_INVALID_DATA);
4736 spin_unlock(&dir->i_lock);
4737 }
4738 return status;
4739 }
4740
nfs4_proc_remove(struct inode * dir,struct dentry * dentry)4741 static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry)
4742 {
4743 struct nfs4_exception exception = {
4744 .interruptible = true,
4745 };
4746 struct inode *inode = d_inode(dentry);
4747 int err;
4748
4749 if (inode) {
4750 if (inode->i_nlink == 1)
4751 nfs4_inode_return_delegation(inode);
4752 else
4753 nfs4_inode_make_writeable(inode);
4754 }
4755 do {
4756 err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG);
4757 trace_nfs4_remove(dir, &dentry->d_name, err);
4758 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4759 &exception);
4760 } while (exception.retry);
4761 return err;
4762 }
4763
nfs4_proc_rmdir(struct inode * dir,const struct qstr * name)4764 static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name)
4765 {
4766 struct nfs4_exception exception = {
4767 .interruptible = true,
4768 };
4769 int err;
4770
4771 do {
4772 err = _nfs4_proc_remove(dir, name, NF4DIR);
4773 trace_nfs4_remove(dir, name, err);
4774 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4775 &exception);
4776 } while (exception.retry);
4777 return err;
4778 }
4779
nfs4_proc_unlink_setup(struct rpc_message * msg,struct dentry * dentry,struct inode * inode)4780 static void nfs4_proc_unlink_setup(struct rpc_message *msg,
4781 struct dentry *dentry,
4782 struct inode *inode)
4783 {
4784 struct nfs_removeargs *args = msg->rpc_argp;
4785 struct nfs_removeres *res = msg->rpc_resp;
4786
4787 res->server = NFS_SB(dentry->d_sb);
4788 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
4789 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1, 0);
4790
4791 nfs_fattr_init(res->dir_attr);
4792
4793 if (inode) {
4794 nfs4_inode_return_delegation(inode);
4795 nfs_d_prune_case_insensitive_aliases(inode);
4796 }
4797 }
4798
nfs4_proc_unlink_rpc_prepare(struct rpc_task * task,struct nfs_unlinkdata * data)4799 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
4800 {
4801 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client,
4802 &data->args.seq_args,
4803 &data->res.seq_res,
4804 task);
4805 }
4806
nfs4_proc_unlink_done(struct rpc_task * task,struct inode * dir)4807 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
4808 {
4809 struct nfs_unlinkdata *data = task->tk_calldata;
4810 struct nfs_removeres *res = &data->res;
4811
4812 if (!nfs4_sequence_done(task, &res->seq_res))
4813 return 0;
4814 if (nfs4_async_handle_error(task, res->server, NULL,
4815 &data->timeout) == -EAGAIN)
4816 return 0;
4817 if (task->tk_status == 0)
4818 nfs4_update_changeattr(dir, &res->cinfo,
4819 res->dir_attr->time_start,
4820 NFS_INO_INVALID_DATA);
4821 return 1;
4822 }
4823
nfs4_proc_rename_setup(struct rpc_message * msg,struct dentry * old_dentry,struct dentry * new_dentry)4824 static void nfs4_proc_rename_setup(struct rpc_message *msg,
4825 struct dentry *old_dentry,
4826 struct dentry *new_dentry)
4827 {
4828 struct nfs_renameargs *arg = msg->rpc_argp;
4829 struct nfs_renameres *res = msg->rpc_resp;
4830 struct inode *old_inode = d_inode(old_dentry);
4831 struct inode *new_inode = d_inode(new_dentry);
4832
4833 if (old_inode)
4834 nfs4_inode_make_writeable(old_inode);
4835 if (new_inode)
4836 nfs4_inode_return_delegation(new_inode);
4837 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
4838 res->server = NFS_SB(old_dentry->d_sb);
4839 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1, 0);
4840 }
4841
nfs4_proc_rename_rpc_prepare(struct rpc_task * task,struct nfs_renamedata * data)4842 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
4843 {
4844 nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client,
4845 &data->args.seq_args,
4846 &data->res.seq_res,
4847 task);
4848 }
4849
nfs4_proc_rename_done(struct rpc_task * task,struct inode * old_dir,struct inode * new_dir)4850 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
4851 struct inode *new_dir)
4852 {
4853 struct nfs_renamedata *data = task->tk_calldata;
4854 struct nfs_renameres *res = &data->res;
4855
4856 if (!nfs4_sequence_done(task, &res->seq_res))
4857 return 0;
4858 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
4859 return 0;
4860
4861 if (task->tk_status == 0) {
4862 nfs_d_prune_case_insensitive_aliases(d_inode(data->old_dentry));
4863 if (new_dir != old_dir) {
4864 /* Note: If we moved a directory, nlink will change */
4865 nfs4_update_changeattr(old_dir, &res->old_cinfo,
4866 res->old_fattr->time_start,
4867 NFS_INO_INVALID_NLINK |
4868 NFS_INO_INVALID_DATA);
4869 nfs4_update_changeattr(new_dir, &res->new_cinfo,
4870 res->new_fattr->time_start,
4871 NFS_INO_INVALID_NLINK |
4872 NFS_INO_INVALID_DATA);
4873 } else
4874 nfs4_update_changeattr(old_dir, &res->old_cinfo,
4875 res->old_fattr->time_start,
4876 NFS_INO_INVALID_DATA);
4877 }
4878 return 1;
4879 }
4880
_nfs4_proc_link(struct inode * inode,struct inode * dir,const struct qstr * name)4881 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
4882 {
4883 struct nfs_server *server = NFS_SERVER(inode);
4884 __u32 bitmask[NFS4_BITMASK_SZ];
4885 struct nfs4_link_arg arg = {
4886 .fh = NFS_FH(inode),
4887 .dir_fh = NFS_FH(dir),
4888 .name = name,
4889 .bitmask = bitmask,
4890 };
4891 struct nfs4_link_res res = {
4892 .server = server,
4893 };
4894 struct rpc_message msg = {
4895 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
4896 .rpc_argp = &arg,
4897 .rpc_resp = &res,
4898 };
4899 int status = -ENOMEM;
4900
4901 res.fattr = nfs_alloc_fattr_with_label(server);
4902 if (res.fattr == NULL)
4903 goto out;
4904
4905 nfs4_inode_make_writeable(inode);
4906 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, res.fattr->label), inode,
4907 NFS_INO_INVALID_CHANGE);
4908 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4909 if (!status) {
4910 nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start,
4911 NFS_INO_INVALID_DATA);
4912 nfs4_inc_nlink(inode);
4913 status = nfs_post_op_update_inode(inode, res.fattr);
4914 if (!status)
4915 nfs_setsecurity(inode, res.fattr);
4916 }
4917
4918 out:
4919 nfs_free_fattr(res.fattr);
4920 return status;
4921 }
4922
nfs4_proc_link(struct inode * inode,struct inode * dir,const struct qstr * name)4923 static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
4924 {
4925 struct nfs4_exception exception = {
4926 .interruptible = true,
4927 };
4928 int err;
4929 do {
4930 err = nfs4_handle_exception(NFS_SERVER(inode),
4931 _nfs4_proc_link(inode, dir, name),
4932 &exception);
4933 } while (exception.retry);
4934 return err;
4935 }
4936
4937 struct nfs4_createdata {
4938 struct rpc_message msg;
4939 struct nfs4_create_arg arg;
4940 struct nfs4_create_res res;
4941 struct nfs_fh fh;
4942 struct nfs_fattr fattr;
4943 };
4944
nfs4_alloc_createdata(struct inode * dir,const struct qstr * name,struct iattr * sattr,u32 ftype)4945 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
4946 const struct qstr *name, struct iattr *sattr, u32 ftype)
4947 {
4948 struct nfs4_createdata *data;
4949
4950 data = kzalloc(sizeof(*data), GFP_KERNEL);
4951 if (data != NULL) {
4952 struct nfs_server *server = NFS_SERVER(dir);
4953
4954 data->fattr.label = nfs4_label_alloc(server, GFP_KERNEL);
4955 if (IS_ERR(data->fattr.label))
4956 goto out_free;
4957
4958 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
4959 data->msg.rpc_argp = &data->arg;
4960 data->msg.rpc_resp = &data->res;
4961 data->arg.dir_fh = NFS_FH(dir);
4962 data->arg.server = server;
4963 data->arg.name = name;
4964 data->arg.attrs = sattr;
4965 data->arg.ftype = ftype;
4966 data->arg.bitmask = nfs4_bitmask(server, data->fattr.label);
4967 data->arg.umask = current_umask();
4968 data->res.server = server;
4969 data->res.fh = &data->fh;
4970 data->res.fattr = &data->fattr;
4971 nfs_fattr_init(data->res.fattr);
4972 }
4973 return data;
4974 out_free:
4975 kfree(data);
4976 return NULL;
4977 }
4978
nfs4_do_create(struct inode * dir,struct dentry * dentry,struct nfs4_createdata * data)4979 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
4980 {
4981 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
4982 &data->arg.seq_args, &data->res.seq_res, 1);
4983 if (status == 0) {
4984 spin_lock(&dir->i_lock);
4985 /* Creating a directory bumps nlink in the parent */
4986 if (data->arg.ftype == NF4DIR)
4987 nfs4_inc_nlink_locked(dir);
4988 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo,
4989 data->res.fattr->time_start,
4990 NFS_INO_INVALID_DATA);
4991 spin_unlock(&dir->i_lock);
4992 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
4993 }
4994 return status;
4995 }
4996
nfs4_free_createdata(struct nfs4_createdata * data)4997 static void nfs4_free_createdata(struct nfs4_createdata *data)
4998 {
4999 nfs4_label_free(data->fattr.label);
5000 kfree(data);
5001 }
5002
_nfs4_proc_symlink(struct inode * dir,struct dentry * dentry,struct page * page,unsigned int len,struct iattr * sattr,struct nfs4_label * label)5003 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
5004 struct page *page, unsigned int len, struct iattr *sattr,
5005 struct nfs4_label *label)
5006 {
5007 struct nfs4_createdata *data;
5008 int status = -ENAMETOOLONG;
5009
5010 if (len > NFS4_MAXPATHLEN)
5011 goto out;
5012
5013 status = -ENOMEM;
5014 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
5015 if (data == NULL)
5016 goto out;
5017
5018 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
5019 data->arg.u.symlink.pages = &page;
5020 data->arg.u.symlink.len = len;
5021 data->arg.label = label;
5022
5023 status = nfs4_do_create(dir, dentry, data);
5024
5025 nfs4_free_createdata(data);
5026 out:
5027 return status;
5028 }
5029
nfs4_proc_symlink(struct inode * dir,struct dentry * dentry,struct page * page,unsigned int len,struct iattr * sattr)5030 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
5031 struct page *page, unsigned int len, struct iattr *sattr)
5032 {
5033 struct nfs4_exception exception = {
5034 .interruptible = true,
5035 };
5036 struct nfs4_label l, *label = NULL;
5037 int err;
5038
5039 label = nfs4_label_init_security(dir, dentry, sattr, &l);
5040
5041 do {
5042 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
5043 trace_nfs4_symlink(dir, &dentry->d_name, err);
5044 err = nfs4_handle_exception(NFS_SERVER(dir), err,
5045 &exception);
5046 } while (exception.retry);
5047
5048 nfs4_label_release_security(label);
5049 return err;
5050 }
5051
_nfs4_proc_mkdir(struct inode * dir,struct dentry * dentry,struct iattr * sattr,struct nfs4_label * label)5052 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
5053 struct iattr *sattr, struct nfs4_label *label)
5054 {
5055 struct nfs4_createdata *data;
5056 int status = -ENOMEM;
5057
5058 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
5059 if (data == NULL)
5060 goto out;
5061
5062 data->arg.label = label;
5063 status = nfs4_do_create(dir, dentry, data);
5064
5065 nfs4_free_createdata(data);
5066 out:
5067 return status;
5068 }
5069
nfs4_proc_mkdir(struct inode * dir,struct dentry * dentry,struct iattr * sattr)5070 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
5071 struct iattr *sattr)
5072 {
5073 struct nfs_server *server = NFS_SERVER(dir);
5074 struct nfs4_exception exception = {
5075 .interruptible = true,
5076 };
5077 struct nfs4_label l, *label = NULL;
5078 int err;
5079
5080 label = nfs4_label_init_security(dir, dentry, sattr, &l);
5081
5082 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
5083 sattr->ia_mode &= ~current_umask();
5084 do {
5085 err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
5086 trace_nfs4_mkdir(dir, &dentry->d_name, err);
5087 err = nfs4_handle_exception(NFS_SERVER(dir), err,
5088 &exception);
5089 } while (exception.retry);
5090 nfs4_label_release_security(label);
5091
5092 return err;
5093 }
5094
_nfs4_proc_readdir(struct nfs_readdir_arg * nr_arg,struct nfs_readdir_res * nr_res)5095 static int _nfs4_proc_readdir(struct nfs_readdir_arg *nr_arg,
5096 struct nfs_readdir_res *nr_res)
5097 {
5098 struct inode *dir = d_inode(nr_arg->dentry);
5099 struct nfs_server *server = NFS_SERVER(dir);
5100 struct nfs4_readdir_arg args = {
5101 .fh = NFS_FH(dir),
5102 .pages = nr_arg->pages,
5103 .pgbase = 0,
5104 .count = nr_arg->page_len,
5105 .plus = nr_arg->plus,
5106 };
5107 struct nfs4_readdir_res res;
5108 struct rpc_message msg = {
5109 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
5110 .rpc_argp = &args,
5111 .rpc_resp = &res,
5112 .rpc_cred = nr_arg->cred,
5113 };
5114 int status;
5115
5116 dprintk("%s: dentry = %pd2, cookie = %llu\n", __func__,
5117 nr_arg->dentry, (unsigned long long)nr_arg->cookie);
5118 if (!(server->caps & NFS_CAP_SECURITY_LABEL))
5119 args.bitmask = server->attr_bitmask_nl;
5120 else
5121 args.bitmask = server->attr_bitmask;
5122
5123 nfs4_setup_readdir(nr_arg->cookie, nr_arg->verf, nr_arg->dentry, &args);
5124 res.pgbase = args.pgbase;
5125 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
5126 &res.seq_res, 0);
5127 if (status >= 0) {
5128 memcpy(nr_res->verf, res.verifier.data, NFS4_VERIFIER_SIZE);
5129 status += args.pgbase;
5130 }
5131
5132 nfs_invalidate_atime(dir);
5133
5134 dprintk("%s: returns %d\n", __func__, status);
5135 return status;
5136 }
5137
nfs4_proc_readdir(struct nfs_readdir_arg * arg,struct nfs_readdir_res * res)5138 static int nfs4_proc_readdir(struct nfs_readdir_arg *arg,
5139 struct nfs_readdir_res *res)
5140 {
5141 struct nfs4_exception exception = {
5142 .interruptible = true,
5143 };
5144 int err;
5145 do {
5146 err = _nfs4_proc_readdir(arg, res);
5147 trace_nfs4_readdir(d_inode(arg->dentry), err);
5148 err = nfs4_handle_exception(NFS_SERVER(d_inode(arg->dentry)),
5149 err, &exception);
5150 } while (exception.retry);
5151 return err;
5152 }
5153
_nfs4_proc_mknod(struct inode * dir,struct dentry * dentry,struct iattr * sattr,struct nfs4_label * label,dev_t rdev)5154 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
5155 struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
5156 {
5157 struct nfs4_createdata *data;
5158 int mode = sattr->ia_mode;
5159 int status = -ENOMEM;
5160
5161 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
5162 if (data == NULL)
5163 goto out;
5164
5165 if (S_ISFIFO(mode))
5166 data->arg.ftype = NF4FIFO;
5167 else if (S_ISBLK(mode)) {
5168 data->arg.ftype = NF4BLK;
5169 data->arg.u.device.specdata1 = MAJOR(rdev);
5170 data->arg.u.device.specdata2 = MINOR(rdev);
5171 }
5172 else if (S_ISCHR(mode)) {
5173 data->arg.ftype = NF4CHR;
5174 data->arg.u.device.specdata1 = MAJOR(rdev);
5175 data->arg.u.device.specdata2 = MINOR(rdev);
5176 } else if (!S_ISSOCK(mode)) {
5177 status = -EINVAL;
5178 goto out_free;
5179 }
5180
5181 data->arg.label = label;
5182 status = nfs4_do_create(dir, dentry, data);
5183 out_free:
5184 nfs4_free_createdata(data);
5185 out:
5186 return status;
5187 }
5188
nfs4_proc_mknod(struct inode * dir,struct dentry * dentry,struct iattr * sattr,dev_t rdev)5189 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
5190 struct iattr *sattr, dev_t rdev)
5191 {
5192 struct nfs_server *server = NFS_SERVER(dir);
5193 struct nfs4_exception exception = {
5194 .interruptible = true,
5195 };
5196 struct nfs4_label l, *label = NULL;
5197 int err;
5198
5199 label = nfs4_label_init_security(dir, dentry, sattr, &l);
5200
5201 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
5202 sattr->ia_mode &= ~current_umask();
5203 do {
5204 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
5205 trace_nfs4_mknod(dir, &dentry->d_name, err);
5206 err = nfs4_handle_exception(NFS_SERVER(dir), err,
5207 &exception);
5208 } while (exception.retry);
5209
5210 nfs4_label_release_security(label);
5211
5212 return err;
5213 }
5214
_nfs4_proc_statfs(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsstat * fsstat)5215 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
5216 struct nfs_fsstat *fsstat)
5217 {
5218 struct nfs4_statfs_arg args = {
5219 .fh = fhandle,
5220 .bitmask = server->attr_bitmask,
5221 };
5222 struct nfs4_statfs_res res = {
5223 .fsstat = fsstat,
5224 };
5225 struct rpc_message msg = {
5226 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
5227 .rpc_argp = &args,
5228 .rpc_resp = &res,
5229 };
5230
5231 nfs_fattr_init(fsstat->fattr);
5232 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5233 }
5234
nfs4_proc_statfs(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsstat * fsstat)5235 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
5236 {
5237 struct nfs4_exception exception = {
5238 .interruptible = true,
5239 };
5240 int err;
5241 do {
5242 err = nfs4_handle_exception(server,
5243 _nfs4_proc_statfs(server, fhandle, fsstat),
5244 &exception);
5245 } while (exception.retry);
5246 return err;
5247 }
5248
_nfs4_do_fsinfo(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * fsinfo)5249 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
5250 struct nfs_fsinfo *fsinfo)
5251 {
5252 struct nfs4_fsinfo_arg args = {
5253 .fh = fhandle,
5254 .bitmask = server->attr_bitmask,
5255 };
5256 struct nfs4_fsinfo_res res = {
5257 .fsinfo = fsinfo,
5258 };
5259 struct rpc_message msg = {
5260 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
5261 .rpc_argp = &args,
5262 .rpc_resp = &res,
5263 };
5264
5265 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5266 }
5267
nfs4_do_fsinfo(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * fsinfo)5268 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
5269 {
5270 struct nfs4_exception exception = {
5271 .interruptible = true,
5272 };
5273 int err;
5274
5275 do {
5276 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
5277 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
5278 if (err == 0) {
5279 nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ);
5280 break;
5281 }
5282 err = nfs4_handle_exception(server, err, &exception);
5283 } while (exception.retry);
5284 return err;
5285 }
5286
nfs4_proc_fsinfo(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * fsinfo)5287 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
5288 {
5289 int error;
5290
5291 nfs_fattr_init(fsinfo->fattr);
5292 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
5293 if (error == 0) {
5294 /* block layout checks this! */
5295 server->pnfs_blksize = fsinfo->blksize;
5296 set_pnfs_layoutdriver(server, fhandle, fsinfo);
5297 }
5298
5299 return error;
5300 }
5301
_nfs4_proc_pathconf(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_pathconf * pathconf)5302 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
5303 struct nfs_pathconf *pathconf)
5304 {
5305 struct nfs4_pathconf_arg args = {
5306 .fh = fhandle,
5307 .bitmask = server->attr_bitmask,
5308 };
5309 struct nfs4_pathconf_res res = {
5310 .pathconf = pathconf,
5311 };
5312 struct rpc_message msg = {
5313 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
5314 .rpc_argp = &args,
5315 .rpc_resp = &res,
5316 };
5317
5318 /* None of the pathconf attributes are mandatory to implement */
5319 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
5320 memset(pathconf, 0, sizeof(*pathconf));
5321 return 0;
5322 }
5323
5324 nfs_fattr_init(pathconf->fattr);
5325 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5326 }
5327
nfs4_proc_pathconf(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_pathconf * pathconf)5328 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
5329 struct nfs_pathconf *pathconf)
5330 {
5331 struct nfs4_exception exception = {
5332 .interruptible = true,
5333 };
5334 int err;
5335
5336 do {
5337 err = nfs4_handle_exception(server,
5338 _nfs4_proc_pathconf(server, fhandle, pathconf),
5339 &exception);
5340 } while (exception.retry);
5341 return err;
5342 }
5343
nfs4_set_rw_stateid(nfs4_stateid * stateid,const struct nfs_open_context * ctx,const struct nfs_lock_context * l_ctx,fmode_t fmode)5344 int nfs4_set_rw_stateid(nfs4_stateid *stateid,
5345 const struct nfs_open_context *ctx,
5346 const struct nfs_lock_context *l_ctx,
5347 fmode_t fmode)
5348 {
5349 return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL);
5350 }
5351 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
5352
nfs4_stateid_is_current(nfs4_stateid * stateid,const struct nfs_open_context * ctx,const struct nfs_lock_context * l_ctx,fmode_t fmode)5353 static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
5354 const struct nfs_open_context *ctx,
5355 const struct nfs_lock_context *l_ctx,
5356 fmode_t fmode)
5357 {
5358 nfs4_stateid _current_stateid;
5359
5360 /* If the current stateid represents a lost lock, then exit */
5361 if (nfs4_set_rw_stateid(&_current_stateid, ctx, l_ctx, fmode) == -EIO)
5362 return true;
5363 return nfs4_stateid_match(stateid, &_current_stateid);
5364 }
5365
nfs4_error_stateid_expired(int err)5366 static bool nfs4_error_stateid_expired(int err)
5367 {
5368 switch (err) {
5369 case -NFS4ERR_DELEG_REVOKED:
5370 case -NFS4ERR_ADMIN_REVOKED:
5371 case -NFS4ERR_BAD_STATEID:
5372 case -NFS4ERR_STALE_STATEID:
5373 case -NFS4ERR_OLD_STATEID:
5374 case -NFS4ERR_OPENMODE:
5375 case -NFS4ERR_EXPIRED:
5376 return true;
5377 }
5378 return false;
5379 }
5380
nfs4_read_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)5381 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
5382 {
5383 struct nfs_server *server = NFS_SERVER(hdr->inode);
5384
5385 trace_nfs4_read(hdr, task->tk_status);
5386 if (task->tk_status < 0) {
5387 struct nfs4_exception exception = {
5388 .inode = hdr->inode,
5389 .state = hdr->args.context->state,
5390 .stateid = &hdr->args.stateid,
5391 };
5392 task->tk_status = nfs4_async_handle_exception(task,
5393 server, task->tk_status, &exception);
5394 if (exception.retry) {
5395 rpc_restart_call_prepare(task);
5396 return -EAGAIN;
5397 }
5398 }
5399
5400 if (task->tk_status > 0)
5401 renew_lease(server, hdr->timestamp);
5402 return 0;
5403 }
5404
nfs4_read_stateid_changed(struct rpc_task * task,struct nfs_pgio_args * args)5405 static bool nfs4_read_stateid_changed(struct rpc_task *task,
5406 struct nfs_pgio_args *args)
5407 {
5408
5409 if (!nfs4_error_stateid_expired(task->tk_status) ||
5410 nfs4_stateid_is_current(&args->stateid,
5411 args->context,
5412 args->lock_context,
5413 FMODE_READ))
5414 return false;
5415 rpc_restart_call_prepare(task);
5416 return true;
5417 }
5418
nfs4_read_plus_not_supported(struct rpc_task * task,struct nfs_pgio_header * hdr)5419 static bool nfs4_read_plus_not_supported(struct rpc_task *task,
5420 struct nfs_pgio_header *hdr)
5421 {
5422 struct nfs_server *server = NFS_SERVER(hdr->inode);
5423 struct rpc_message *msg = &task->tk_msg;
5424
5425 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] &&
5426 server->caps & NFS_CAP_READ_PLUS && task->tk_status == -ENOTSUPP) {
5427 server->caps &= ~NFS_CAP_READ_PLUS;
5428 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
5429 rpc_restart_call_prepare(task);
5430 return true;
5431 }
5432 return false;
5433 }
5434
nfs4_read_done(struct rpc_task * task,struct nfs_pgio_header * hdr)5435 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
5436 {
5437 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
5438 return -EAGAIN;
5439 if (nfs4_read_stateid_changed(task, &hdr->args))
5440 return -EAGAIN;
5441 if (nfs4_read_plus_not_supported(task, hdr))
5442 return -EAGAIN;
5443 if (task->tk_status > 0)
5444 nfs_invalidate_atime(hdr->inode);
5445 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
5446 nfs4_read_done_cb(task, hdr);
5447 }
5448
5449 #if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS
nfs42_read_plus_support(struct nfs_pgio_header * hdr,struct rpc_message * msg)5450 static void nfs42_read_plus_support(struct nfs_pgio_header *hdr,
5451 struct rpc_message *msg)
5452 {
5453 /* Note: We don't use READ_PLUS with pNFS yet */
5454 if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp)
5455 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS];
5456 }
5457 #else
nfs42_read_plus_support(struct nfs_pgio_header * hdr,struct rpc_message * msg)5458 static void nfs42_read_plus_support(struct nfs_pgio_header *hdr,
5459 struct rpc_message *msg)
5460 {
5461 }
5462 #endif /* CONFIG_NFS_V4_2 */
5463
nfs4_proc_read_setup(struct nfs_pgio_header * hdr,struct rpc_message * msg)5464 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
5465 struct rpc_message *msg)
5466 {
5467 hdr->timestamp = jiffies;
5468 if (!hdr->pgio_done_cb)
5469 hdr->pgio_done_cb = nfs4_read_done_cb;
5470 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
5471 nfs42_read_plus_support(hdr, msg);
5472 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
5473 }
5474
nfs4_proc_pgio_rpc_prepare(struct rpc_task * task,struct nfs_pgio_header * hdr)5475 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
5476 struct nfs_pgio_header *hdr)
5477 {
5478 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client,
5479 &hdr->args.seq_args,
5480 &hdr->res.seq_res,
5481 task))
5482 return 0;
5483 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
5484 hdr->args.lock_context,
5485 hdr->rw_mode) == -EIO)
5486 return -EIO;
5487 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
5488 return -EIO;
5489 return 0;
5490 }
5491
nfs4_write_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)5492 static int nfs4_write_done_cb(struct rpc_task *task,
5493 struct nfs_pgio_header *hdr)
5494 {
5495 struct inode *inode = hdr->inode;
5496
5497 trace_nfs4_write(hdr, task->tk_status);
5498 if (task->tk_status < 0) {
5499 struct nfs4_exception exception = {
5500 .inode = hdr->inode,
5501 .state = hdr->args.context->state,
5502 .stateid = &hdr->args.stateid,
5503 };
5504 task->tk_status = nfs4_async_handle_exception(task,
5505 NFS_SERVER(inode), task->tk_status,
5506 &exception);
5507 if (exception.retry) {
5508 rpc_restart_call_prepare(task);
5509 return -EAGAIN;
5510 }
5511 }
5512 if (task->tk_status >= 0) {
5513 renew_lease(NFS_SERVER(inode), hdr->timestamp);
5514 nfs_writeback_update_inode(hdr);
5515 }
5516 return 0;
5517 }
5518
nfs4_write_stateid_changed(struct rpc_task * task,struct nfs_pgio_args * args)5519 static bool nfs4_write_stateid_changed(struct rpc_task *task,
5520 struct nfs_pgio_args *args)
5521 {
5522
5523 if (!nfs4_error_stateid_expired(task->tk_status) ||
5524 nfs4_stateid_is_current(&args->stateid,
5525 args->context,
5526 args->lock_context,
5527 FMODE_WRITE))
5528 return false;
5529 rpc_restart_call_prepare(task);
5530 return true;
5531 }
5532
nfs4_write_done(struct rpc_task * task,struct nfs_pgio_header * hdr)5533 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
5534 {
5535 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
5536 return -EAGAIN;
5537 if (nfs4_write_stateid_changed(task, &hdr->args))
5538 return -EAGAIN;
5539 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
5540 nfs4_write_done_cb(task, hdr);
5541 }
5542
5543 static
nfs4_write_need_cache_consistency_data(struct nfs_pgio_header * hdr)5544 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
5545 {
5546 /* Don't request attributes for pNFS or O_DIRECT writes */
5547 if (hdr->ds_clp != NULL || hdr->dreq != NULL)
5548 return false;
5549 /* Otherwise, request attributes if and only if we don't hold
5550 * a delegation
5551 */
5552 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
5553 }
5554
nfs4_bitmask_set(__u32 bitmask[],const __u32 src[],struct inode * inode,unsigned long cache_validity)5555 void nfs4_bitmask_set(__u32 bitmask[], const __u32 src[],
5556 struct inode *inode, unsigned long cache_validity)
5557 {
5558 struct nfs_server *server = NFS_SERVER(inode);
5559 unsigned int i;
5560
5561 memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ);
5562 cache_validity |= READ_ONCE(NFS_I(inode)->cache_validity);
5563
5564 if (cache_validity & NFS_INO_INVALID_CHANGE)
5565 bitmask[0] |= FATTR4_WORD0_CHANGE;
5566 if (cache_validity & NFS_INO_INVALID_ATIME)
5567 bitmask[1] |= FATTR4_WORD1_TIME_ACCESS;
5568 if (cache_validity & NFS_INO_INVALID_MODE)
5569 bitmask[1] |= FATTR4_WORD1_MODE;
5570 if (cache_validity & NFS_INO_INVALID_OTHER)
5571 bitmask[1] |= FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP;
5572 if (cache_validity & NFS_INO_INVALID_NLINK)
5573 bitmask[1] |= FATTR4_WORD1_NUMLINKS;
5574 if (cache_validity & NFS_INO_INVALID_CTIME)
5575 bitmask[1] |= FATTR4_WORD1_TIME_METADATA;
5576 if (cache_validity & NFS_INO_INVALID_MTIME)
5577 bitmask[1] |= FATTR4_WORD1_TIME_MODIFY;
5578 if (cache_validity & NFS_INO_INVALID_BLOCKS)
5579 bitmask[1] |= FATTR4_WORD1_SPACE_USED;
5580
5581 if (cache_validity & NFS_INO_INVALID_SIZE)
5582 bitmask[0] |= FATTR4_WORD0_SIZE;
5583
5584 for (i = 0; i < NFS4_BITMASK_SZ; i++)
5585 bitmask[i] &= server->attr_bitmask[i];
5586 }
5587
nfs4_proc_write_setup(struct nfs_pgio_header * hdr,struct rpc_message * msg,struct rpc_clnt ** clnt)5588 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
5589 struct rpc_message *msg,
5590 struct rpc_clnt **clnt)
5591 {
5592 struct nfs_server *server = NFS_SERVER(hdr->inode);
5593
5594 if (!nfs4_write_need_cache_consistency_data(hdr)) {
5595 hdr->args.bitmask = NULL;
5596 hdr->res.fattr = NULL;
5597 } else {
5598 nfs4_bitmask_set(hdr->args.bitmask_store,
5599 server->cache_consistency_bitmask,
5600 hdr->inode, NFS_INO_INVALID_BLOCKS);
5601 hdr->args.bitmask = hdr->args.bitmask_store;
5602 }
5603
5604 if (!hdr->pgio_done_cb)
5605 hdr->pgio_done_cb = nfs4_write_done_cb;
5606 hdr->res.server = server;
5607 hdr->timestamp = jiffies;
5608
5609 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
5610 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
5611 nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
5612 }
5613
nfs4_proc_commit_rpc_prepare(struct rpc_task * task,struct nfs_commit_data * data)5614 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
5615 {
5616 nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client,
5617 &data->args.seq_args,
5618 &data->res.seq_res,
5619 task);
5620 }
5621
nfs4_commit_done_cb(struct rpc_task * task,struct nfs_commit_data * data)5622 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
5623 {
5624 struct inode *inode = data->inode;
5625
5626 trace_nfs4_commit(data, task->tk_status);
5627 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
5628 NULL, NULL) == -EAGAIN) {
5629 rpc_restart_call_prepare(task);
5630 return -EAGAIN;
5631 }
5632 return 0;
5633 }
5634
nfs4_commit_done(struct rpc_task * task,struct nfs_commit_data * data)5635 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
5636 {
5637 if (!nfs4_sequence_done(task, &data->res.seq_res))
5638 return -EAGAIN;
5639 return data->commit_done_cb(task, data);
5640 }
5641
nfs4_proc_commit_setup(struct nfs_commit_data * data,struct rpc_message * msg,struct rpc_clnt ** clnt)5642 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg,
5643 struct rpc_clnt **clnt)
5644 {
5645 struct nfs_server *server = NFS_SERVER(data->inode);
5646
5647 if (data->commit_done_cb == NULL)
5648 data->commit_done_cb = nfs4_commit_done_cb;
5649 data->res.server = server;
5650 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
5651 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
5652 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
5653 }
5654
_nfs4_proc_commit(struct file * dst,struct nfs_commitargs * args,struct nfs_commitres * res)5655 static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args,
5656 struct nfs_commitres *res)
5657 {
5658 struct inode *dst_inode = file_inode(dst);
5659 struct nfs_server *server = NFS_SERVER(dst_inode);
5660 struct rpc_message msg = {
5661 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT],
5662 .rpc_argp = args,
5663 .rpc_resp = res,
5664 };
5665
5666 args->fh = NFS_FH(dst_inode);
5667 return nfs4_call_sync(server->client, server, &msg,
5668 &args->seq_args, &res->seq_res, 1);
5669 }
5670
nfs4_proc_commit(struct file * dst,__u64 offset,__u32 count,struct nfs_commitres * res)5671 int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res)
5672 {
5673 struct nfs_commitargs args = {
5674 .offset = offset,
5675 .count = count,
5676 };
5677 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
5678 struct nfs4_exception exception = { };
5679 int status;
5680
5681 do {
5682 status = _nfs4_proc_commit(dst, &args, res);
5683 status = nfs4_handle_exception(dst_server, status, &exception);
5684 } while (exception.retry);
5685
5686 return status;
5687 }
5688
5689 struct nfs4_renewdata {
5690 struct nfs_client *client;
5691 unsigned long timestamp;
5692 };
5693
5694 /*
5695 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
5696 * standalone procedure for queueing an asynchronous RENEW.
5697 */
nfs4_renew_release(void * calldata)5698 static void nfs4_renew_release(void *calldata)
5699 {
5700 struct nfs4_renewdata *data = calldata;
5701 struct nfs_client *clp = data->client;
5702
5703 if (refcount_read(&clp->cl_count) > 1)
5704 nfs4_schedule_state_renewal(clp);
5705 nfs_put_client(clp);
5706 kfree(data);
5707 }
5708
nfs4_renew_done(struct rpc_task * task,void * calldata)5709 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
5710 {
5711 struct nfs4_renewdata *data = calldata;
5712 struct nfs_client *clp = data->client;
5713 unsigned long timestamp = data->timestamp;
5714
5715 trace_nfs4_renew_async(clp, task->tk_status);
5716 switch (task->tk_status) {
5717 case 0:
5718 break;
5719 case -NFS4ERR_LEASE_MOVED:
5720 nfs4_schedule_lease_moved_recovery(clp);
5721 break;
5722 default:
5723 /* Unless we're shutting down, schedule state recovery! */
5724 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
5725 return;
5726 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
5727 nfs4_schedule_lease_recovery(clp);
5728 return;
5729 }
5730 nfs4_schedule_path_down_recovery(clp);
5731 }
5732 do_renew_lease(clp, timestamp);
5733 }
5734
5735 static const struct rpc_call_ops nfs4_renew_ops = {
5736 .rpc_call_done = nfs4_renew_done,
5737 .rpc_release = nfs4_renew_release,
5738 };
5739
nfs4_proc_async_renew(struct nfs_client * clp,const struct cred * cred,unsigned renew_flags)5740 static int nfs4_proc_async_renew(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
5741 {
5742 struct rpc_message msg = {
5743 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
5744 .rpc_argp = clp,
5745 .rpc_cred = cred,
5746 };
5747 struct nfs4_renewdata *data;
5748
5749 if (renew_flags == 0)
5750 return 0;
5751 if (!refcount_inc_not_zero(&clp->cl_count))
5752 return -EIO;
5753 data = kmalloc(sizeof(*data), GFP_NOFS);
5754 if (data == NULL) {
5755 nfs_put_client(clp);
5756 return -ENOMEM;
5757 }
5758 data->client = clp;
5759 data->timestamp = jiffies;
5760 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
5761 &nfs4_renew_ops, data);
5762 }
5763
nfs4_proc_renew(struct nfs_client * clp,const struct cred * cred)5764 static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred)
5765 {
5766 struct rpc_message msg = {
5767 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
5768 .rpc_argp = clp,
5769 .rpc_cred = cred,
5770 };
5771 unsigned long now = jiffies;
5772 int status;
5773
5774 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5775 if (status < 0)
5776 return status;
5777 do_renew_lease(clp, now);
5778 return 0;
5779 }
5780
nfs4_server_supports_acls(const struct nfs_server * server,enum nfs4_acl_type type)5781 static bool nfs4_server_supports_acls(const struct nfs_server *server,
5782 enum nfs4_acl_type type)
5783 {
5784 switch (type) {
5785 default:
5786 return server->attr_bitmask[0] & FATTR4_WORD0_ACL;
5787 case NFS4ACL_DACL:
5788 return server->attr_bitmask[1] & FATTR4_WORD1_DACL;
5789 case NFS4ACL_SACL:
5790 return server->attr_bitmask[1] & FATTR4_WORD1_SACL;
5791 }
5792 }
5793
5794 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
5795 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
5796 * the stack.
5797 */
5798 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
5799
nfs4_buf_to_pages_noslab(const void * buf,size_t buflen,struct page ** pages)5800 int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen,
5801 struct page **pages)
5802 {
5803 struct page *newpage, **spages;
5804 int rc = 0;
5805 size_t len;
5806 spages = pages;
5807
5808 do {
5809 len = min_t(size_t, PAGE_SIZE, buflen);
5810 newpage = alloc_page(GFP_KERNEL);
5811
5812 if (newpage == NULL)
5813 goto unwind;
5814 memcpy(page_address(newpage), buf, len);
5815 buf += len;
5816 buflen -= len;
5817 *pages++ = newpage;
5818 rc++;
5819 } while (buflen != 0);
5820
5821 return rc;
5822
5823 unwind:
5824 for(; rc > 0; rc--)
5825 __free_page(spages[rc-1]);
5826 return -ENOMEM;
5827 }
5828
5829 struct nfs4_cached_acl {
5830 enum nfs4_acl_type type;
5831 int cached;
5832 size_t len;
5833 char data[];
5834 };
5835
nfs4_set_cached_acl(struct inode * inode,struct nfs4_cached_acl * acl)5836 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
5837 {
5838 struct nfs_inode *nfsi = NFS_I(inode);
5839
5840 spin_lock(&inode->i_lock);
5841 kfree(nfsi->nfs4_acl);
5842 nfsi->nfs4_acl = acl;
5843 spin_unlock(&inode->i_lock);
5844 }
5845
nfs4_zap_acl_attr(struct inode * inode)5846 static void nfs4_zap_acl_attr(struct inode *inode)
5847 {
5848 nfs4_set_cached_acl(inode, NULL);
5849 }
5850
nfs4_read_cached_acl(struct inode * inode,char * buf,size_t buflen,enum nfs4_acl_type type)5851 static ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf,
5852 size_t buflen, enum nfs4_acl_type type)
5853 {
5854 struct nfs_inode *nfsi = NFS_I(inode);
5855 struct nfs4_cached_acl *acl;
5856 int ret = -ENOENT;
5857
5858 spin_lock(&inode->i_lock);
5859 acl = nfsi->nfs4_acl;
5860 if (acl == NULL)
5861 goto out;
5862 if (acl->type != type)
5863 goto out;
5864 if (buf == NULL) /* user is just asking for length */
5865 goto out_len;
5866 if (acl->cached == 0)
5867 goto out;
5868 ret = -ERANGE; /* see getxattr(2) man page */
5869 if (acl->len > buflen)
5870 goto out;
5871 memcpy(buf, acl->data, acl->len);
5872 out_len:
5873 ret = acl->len;
5874 out:
5875 spin_unlock(&inode->i_lock);
5876 return ret;
5877 }
5878
nfs4_write_cached_acl(struct inode * inode,struct page ** pages,size_t pgbase,size_t acl_len,enum nfs4_acl_type type)5879 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages,
5880 size_t pgbase, size_t acl_len,
5881 enum nfs4_acl_type type)
5882 {
5883 struct nfs4_cached_acl *acl;
5884 size_t buflen = sizeof(*acl) + acl_len;
5885
5886 if (buflen <= PAGE_SIZE) {
5887 acl = kmalloc(buflen, GFP_KERNEL);
5888 if (acl == NULL)
5889 goto out;
5890 acl->cached = 1;
5891 _copy_from_pages(acl->data, pages, pgbase, acl_len);
5892 } else {
5893 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
5894 if (acl == NULL)
5895 goto out;
5896 acl->cached = 0;
5897 }
5898 acl->type = type;
5899 acl->len = acl_len;
5900 out:
5901 nfs4_set_cached_acl(inode, acl);
5902 }
5903
5904 /*
5905 * The getxattr API returns the required buffer length when called with a
5906 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
5907 * the required buf. On a NULL buf, we send a page of data to the server
5908 * guessing that the ACL request can be serviced by a page. If so, we cache
5909 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
5910 * the cache. If not so, we throw away the page, and cache the required
5911 * length. The next getxattr call will then produce another round trip to
5912 * the server, this time with the input buf of the required size.
5913 */
__nfs4_get_acl_uncached(struct inode * inode,void * buf,size_t buflen,enum nfs4_acl_type type)5914 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf,
5915 size_t buflen, enum nfs4_acl_type type)
5916 {
5917 struct page **pages;
5918 struct nfs_getaclargs args = {
5919 .fh = NFS_FH(inode),
5920 .acl_type = type,
5921 .acl_len = buflen,
5922 };
5923 struct nfs_getaclres res = {
5924 .acl_type = type,
5925 .acl_len = buflen,
5926 };
5927 struct rpc_message msg = {
5928 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
5929 .rpc_argp = &args,
5930 .rpc_resp = &res,
5931 };
5932 unsigned int npages;
5933 int ret = -ENOMEM, i;
5934 struct nfs_server *server = NFS_SERVER(inode);
5935
5936 if (buflen == 0)
5937 buflen = server->rsize;
5938
5939 npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
5940 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
5941 if (!pages)
5942 return -ENOMEM;
5943
5944 args.acl_pages = pages;
5945
5946 for (i = 0; i < npages; i++) {
5947 pages[i] = alloc_page(GFP_KERNEL);
5948 if (!pages[i])
5949 goto out_free;
5950 }
5951
5952 /* for decoding across pages */
5953 res.acl_scratch = alloc_page(GFP_KERNEL);
5954 if (!res.acl_scratch)
5955 goto out_free;
5956
5957 args.acl_len = npages * PAGE_SIZE;
5958
5959 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
5960 __func__, buf, buflen, npages, args.acl_len);
5961 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
5962 &msg, &args.seq_args, &res.seq_res, 0);
5963 if (ret)
5964 goto out_free;
5965
5966 /* Handle the case where the passed-in buffer is too short */
5967 if (res.acl_flags & NFS4_ACL_TRUNC) {
5968 /* Did the user only issue a request for the acl length? */
5969 if (buf == NULL)
5970 goto out_ok;
5971 ret = -ERANGE;
5972 goto out_free;
5973 }
5974 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len,
5975 type);
5976 if (buf) {
5977 if (res.acl_len > buflen) {
5978 ret = -ERANGE;
5979 goto out_free;
5980 }
5981 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
5982 }
5983 out_ok:
5984 ret = res.acl_len;
5985 out_free:
5986 for (i = 0; i < npages; i++)
5987 if (pages[i])
5988 __free_page(pages[i]);
5989 if (res.acl_scratch)
5990 __free_page(res.acl_scratch);
5991 kfree(pages);
5992 return ret;
5993 }
5994
nfs4_get_acl_uncached(struct inode * inode,void * buf,size_t buflen,enum nfs4_acl_type type)5995 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf,
5996 size_t buflen, enum nfs4_acl_type type)
5997 {
5998 struct nfs4_exception exception = {
5999 .interruptible = true,
6000 };
6001 ssize_t ret;
6002 do {
6003 ret = __nfs4_get_acl_uncached(inode, buf, buflen, type);
6004 trace_nfs4_get_acl(inode, ret);
6005 if (ret >= 0)
6006 break;
6007 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
6008 } while (exception.retry);
6009 return ret;
6010 }
6011
nfs4_proc_get_acl(struct inode * inode,void * buf,size_t buflen,enum nfs4_acl_type type)6012 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen,
6013 enum nfs4_acl_type type)
6014 {
6015 struct nfs_server *server = NFS_SERVER(inode);
6016 int ret;
6017
6018 if (!nfs4_server_supports_acls(server, type))
6019 return -EOPNOTSUPP;
6020 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
6021 if (ret < 0)
6022 return ret;
6023 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
6024 nfs_zap_acl_cache(inode);
6025 ret = nfs4_read_cached_acl(inode, buf, buflen, type);
6026 if (ret != -ENOENT)
6027 /* -ENOENT is returned if there is no ACL or if there is an ACL
6028 * but no cached acl data, just the acl length */
6029 return ret;
6030 return nfs4_get_acl_uncached(inode, buf, buflen, type);
6031 }
6032
__nfs4_proc_set_acl(struct inode * inode,const void * buf,size_t buflen,enum nfs4_acl_type type)6033 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf,
6034 size_t buflen, enum nfs4_acl_type type)
6035 {
6036 struct nfs_server *server = NFS_SERVER(inode);
6037 struct page *pages[NFS4ACL_MAXPAGES];
6038 struct nfs_setaclargs arg = {
6039 .fh = NFS_FH(inode),
6040 .acl_type = type,
6041 .acl_len = buflen,
6042 .acl_pages = pages,
6043 };
6044 struct nfs_setaclres res;
6045 struct rpc_message msg = {
6046 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
6047 .rpc_argp = &arg,
6048 .rpc_resp = &res,
6049 };
6050 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
6051 int ret, i;
6052
6053 /* You can't remove system.nfs4_acl: */
6054 if (buflen == 0)
6055 return -EINVAL;
6056 if (!nfs4_server_supports_acls(server, type))
6057 return -EOPNOTSUPP;
6058 if (npages > ARRAY_SIZE(pages))
6059 return -ERANGE;
6060 i = nfs4_buf_to_pages_noslab(buf, buflen, arg.acl_pages);
6061 if (i < 0)
6062 return i;
6063 nfs4_inode_make_writeable(inode);
6064 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6065
6066 /*
6067 * Free each page after tx, so the only ref left is
6068 * held by the network stack
6069 */
6070 for (; i > 0; i--)
6071 put_page(pages[i-1]);
6072
6073 /*
6074 * Acl update can result in inode attribute update.
6075 * so mark the attribute cache invalid.
6076 */
6077 spin_lock(&inode->i_lock);
6078 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
6079 NFS_INO_INVALID_CTIME |
6080 NFS_INO_REVAL_FORCED);
6081 spin_unlock(&inode->i_lock);
6082 nfs_access_zap_cache(inode);
6083 nfs_zap_acl_cache(inode);
6084 return ret;
6085 }
6086
nfs4_proc_set_acl(struct inode * inode,const void * buf,size_t buflen,enum nfs4_acl_type type)6087 static int nfs4_proc_set_acl(struct inode *inode, const void *buf,
6088 size_t buflen, enum nfs4_acl_type type)
6089 {
6090 struct nfs4_exception exception = { };
6091 int err;
6092 do {
6093 err = __nfs4_proc_set_acl(inode, buf, buflen, type);
6094 trace_nfs4_set_acl(inode, err);
6095 if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) {
6096 /*
6097 * no need to retry since the kernel
6098 * isn't involved in encoding the ACEs.
6099 */
6100 err = -EINVAL;
6101 break;
6102 }
6103 err = nfs4_handle_exception(NFS_SERVER(inode), err,
6104 &exception);
6105 } while (exception.retry);
6106 return err;
6107 }
6108
6109 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
_nfs4_get_security_label(struct inode * inode,void * buf,size_t buflen)6110 static int _nfs4_get_security_label(struct inode *inode, void *buf,
6111 size_t buflen)
6112 {
6113 struct nfs_server *server = NFS_SERVER(inode);
6114 struct nfs4_label label = {0, 0, buflen, buf};
6115
6116 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
6117 struct nfs_fattr fattr = {
6118 .label = &label,
6119 };
6120 struct nfs4_getattr_arg arg = {
6121 .fh = NFS_FH(inode),
6122 .bitmask = bitmask,
6123 };
6124 struct nfs4_getattr_res res = {
6125 .fattr = &fattr,
6126 .server = server,
6127 };
6128 struct rpc_message msg = {
6129 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
6130 .rpc_argp = &arg,
6131 .rpc_resp = &res,
6132 };
6133 int ret;
6134
6135 nfs_fattr_init(&fattr);
6136
6137 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
6138 if (ret)
6139 return ret;
6140 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
6141 return -ENOENT;
6142 return label.len;
6143 }
6144
nfs4_get_security_label(struct inode * inode,void * buf,size_t buflen)6145 static int nfs4_get_security_label(struct inode *inode, void *buf,
6146 size_t buflen)
6147 {
6148 struct nfs4_exception exception = {
6149 .interruptible = true,
6150 };
6151 int err;
6152
6153 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
6154 return -EOPNOTSUPP;
6155
6156 do {
6157 err = _nfs4_get_security_label(inode, buf, buflen);
6158 trace_nfs4_get_security_label(inode, err);
6159 err = nfs4_handle_exception(NFS_SERVER(inode), err,
6160 &exception);
6161 } while (exception.retry);
6162 return err;
6163 }
6164
_nfs4_do_set_security_label(struct inode * inode,struct nfs4_label * ilabel,struct nfs_fattr * fattr)6165 static int _nfs4_do_set_security_label(struct inode *inode,
6166 struct nfs4_label *ilabel,
6167 struct nfs_fattr *fattr)
6168 {
6169
6170 struct iattr sattr = {0};
6171 struct nfs_server *server = NFS_SERVER(inode);
6172 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
6173 struct nfs_setattrargs arg = {
6174 .fh = NFS_FH(inode),
6175 .iap = &sattr,
6176 .server = server,
6177 .bitmask = bitmask,
6178 .label = ilabel,
6179 };
6180 struct nfs_setattrres res = {
6181 .fattr = fattr,
6182 .server = server,
6183 };
6184 struct rpc_message msg = {
6185 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
6186 .rpc_argp = &arg,
6187 .rpc_resp = &res,
6188 };
6189 int status;
6190
6191 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
6192
6193 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6194 if (status)
6195 dprintk("%s failed: %d\n", __func__, status);
6196
6197 return status;
6198 }
6199
nfs4_do_set_security_label(struct inode * inode,struct nfs4_label * ilabel,struct nfs_fattr * fattr)6200 static int nfs4_do_set_security_label(struct inode *inode,
6201 struct nfs4_label *ilabel,
6202 struct nfs_fattr *fattr)
6203 {
6204 struct nfs4_exception exception = { };
6205 int err;
6206
6207 do {
6208 err = _nfs4_do_set_security_label(inode, ilabel, fattr);
6209 trace_nfs4_set_security_label(inode, err);
6210 err = nfs4_handle_exception(NFS_SERVER(inode), err,
6211 &exception);
6212 } while (exception.retry);
6213 return err;
6214 }
6215
6216 static int
nfs4_set_security_label(struct inode * inode,const void * buf,size_t buflen)6217 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
6218 {
6219 struct nfs4_label ilabel = {0, 0, buflen, (char *)buf };
6220 struct nfs_fattr *fattr;
6221 int status;
6222
6223 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
6224 return -EOPNOTSUPP;
6225
6226 fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode));
6227 if (fattr == NULL)
6228 return -ENOMEM;
6229
6230 status = nfs4_do_set_security_label(inode, &ilabel, fattr);
6231 if (status == 0)
6232 nfs_setsecurity(inode, fattr);
6233
6234 return status;
6235 }
6236 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
6237
6238
nfs4_init_boot_verifier(const struct nfs_client * clp,nfs4_verifier * bootverf)6239 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
6240 nfs4_verifier *bootverf)
6241 {
6242 __be32 verf[2];
6243
6244 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
6245 /* An impossible timestamp guarantees this value
6246 * will never match a generated boot time. */
6247 verf[0] = cpu_to_be32(U32_MAX);
6248 verf[1] = cpu_to_be32(U32_MAX);
6249 } else {
6250 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
6251 u64 ns = ktime_to_ns(nn->boot_time);
6252
6253 verf[0] = cpu_to_be32(ns >> 32);
6254 verf[1] = cpu_to_be32(ns);
6255 }
6256 memcpy(bootverf->data, verf, sizeof(bootverf->data));
6257 }
6258
6259 static size_t
nfs4_get_uniquifier(struct nfs_client * clp,char * buf,size_t buflen)6260 nfs4_get_uniquifier(struct nfs_client *clp, char *buf, size_t buflen)
6261 {
6262 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
6263 struct nfs_netns_client *nn_clp = nn->nfs_client;
6264 const char *id;
6265
6266 buf[0] = '\0';
6267
6268 if (nn_clp) {
6269 rcu_read_lock();
6270 id = rcu_dereference(nn_clp->identifier);
6271 if (id)
6272 strscpy(buf, id, buflen);
6273 rcu_read_unlock();
6274 }
6275
6276 if (nfs4_client_id_uniquifier[0] != '\0' && buf[0] == '\0')
6277 strscpy(buf, nfs4_client_id_uniquifier, buflen);
6278
6279 return strlen(buf);
6280 }
6281
6282 static int
nfs4_init_nonuniform_client_string(struct nfs_client * clp)6283 nfs4_init_nonuniform_client_string(struct nfs_client *clp)
6284 {
6285 char buf[NFS4_CLIENT_ID_UNIQ_LEN];
6286 size_t buflen;
6287 size_t len;
6288 char *str;
6289
6290 if (clp->cl_owner_id != NULL)
6291 return 0;
6292
6293 rcu_read_lock();
6294 len = 14 +
6295 strlen(clp->cl_rpcclient->cl_nodename) +
6296 1 +
6297 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
6298 1;
6299 rcu_read_unlock();
6300
6301 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf));
6302 if (buflen)
6303 len += buflen + 1;
6304
6305 if (len > NFS4_OPAQUE_LIMIT + 1)
6306 return -EINVAL;
6307
6308 /*
6309 * Since this string is allocated at mount time, and held until the
6310 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
6311 * about a memory-reclaim deadlock.
6312 */
6313 str = kmalloc(len, GFP_KERNEL);
6314 if (!str)
6315 return -ENOMEM;
6316
6317 rcu_read_lock();
6318 if (buflen)
6319 scnprintf(str, len, "Linux NFSv4.0 %s/%s/%s",
6320 clp->cl_rpcclient->cl_nodename, buf,
6321 rpc_peeraddr2str(clp->cl_rpcclient,
6322 RPC_DISPLAY_ADDR));
6323 else
6324 scnprintf(str, len, "Linux NFSv4.0 %s/%s",
6325 clp->cl_rpcclient->cl_nodename,
6326 rpc_peeraddr2str(clp->cl_rpcclient,
6327 RPC_DISPLAY_ADDR));
6328 rcu_read_unlock();
6329
6330 clp->cl_owner_id = str;
6331 return 0;
6332 }
6333
6334 static int
nfs4_init_uniform_client_string(struct nfs_client * clp)6335 nfs4_init_uniform_client_string(struct nfs_client *clp)
6336 {
6337 char buf[NFS4_CLIENT_ID_UNIQ_LEN];
6338 size_t buflen;
6339 size_t len;
6340 char *str;
6341
6342 if (clp->cl_owner_id != NULL)
6343 return 0;
6344
6345 len = 10 + 10 + 1 + 10 + 1 +
6346 strlen(clp->cl_rpcclient->cl_nodename) + 1;
6347
6348 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf));
6349 if (buflen)
6350 len += buflen + 1;
6351
6352 if (len > NFS4_OPAQUE_LIMIT + 1)
6353 return -EINVAL;
6354
6355 /*
6356 * Since this string is allocated at mount time, and held until the
6357 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
6358 * about a memory-reclaim deadlock.
6359 */
6360 str = kmalloc(len, GFP_KERNEL);
6361 if (!str)
6362 return -ENOMEM;
6363
6364 if (buflen)
6365 scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
6366 clp->rpc_ops->version, clp->cl_minorversion,
6367 buf, clp->cl_rpcclient->cl_nodename);
6368 else
6369 scnprintf(str, len, "Linux NFSv%u.%u %s",
6370 clp->rpc_ops->version, clp->cl_minorversion,
6371 clp->cl_rpcclient->cl_nodename);
6372 clp->cl_owner_id = str;
6373 return 0;
6374 }
6375
6376 /*
6377 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
6378 * services. Advertise one based on the address family of the
6379 * clientaddr.
6380 */
6381 static unsigned int
nfs4_init_callback_netid(const struct nfs_client * clp,char * buf,size_t len)6382 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
6383 {
6384 if (strchr(clp->cl_ipaddr, ':') != NULL)
6385 return scnprintf(buf, len, "tcp6");
6386 else
6387 return scnprintf(buf, len, "tcp");
6388 }
6389
nfs4_setclientid_done(struct rpc_task * task,void * calldata)6390 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
6391 {
6392 struct nfs4_setclientid *sc = calldata;
6393
6394 if (task->tk_status == 0)
6395 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
6396 }
6397
6398 static const struct rpc_call_ops nfs4_setclientid_ops = {
6399 .rpc_call_done = nfs4_setclientid_done,
6400 };
6401
6402 /**
6403 * nfs4_proc_setclientid - Negotiate client ID
6404 * @clp: state data structure
6405 * @program: RPC program for NFSv4 callback service
6406 * @port: IP port number for NFS4 callback service
6407 * @cred: credential to use for this call
6408 * @res: where to place the result
6409 *
6410 * Returns zero, a negative errno, or a negative NFS4ERR status code.
6411 */
nfs4_proc_setclientid(struct nfs_client * clp,u32 program,unsigned short port,const struct cred * cred,struct nfs4_setclientid_res * res)6412 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
6413 unsigned short port, const struct cred *cred,
6414 struct nfs4_setclientid_res *res)
6415 {
6416 nfs4_verifier sc_verifier;
6417 struct nfs4_setclientid setclientid = {
6418 .sc_verifier = &sc_verifier,
6419 .sc_prog = program,
6420 .sc_clnt = clp,
6421 };
6422 struct rpc_message msg = {
6423 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
6424 .rpc_argp = &setclientid,
6425 .rpc_resp = res,
6426 .rpc_cred = cred,
6427 };
6428 struct rpc_task_setup task_setup_data = {
6429 .rpc_client = clp->cl_rpcclient,
6430 .rpc_message = &msg,
6431 .callback_ops = &nfs4_setclientid_ops,
6432 .callback_data = &setclientid,
6433 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
6434 };
6435 unsigned long now = jiffies;
6436 int status;
6437
6438 /* nfs_client_id4 */
6439 nfs4_init_boot_verifier(clp, &sc_verifier);
6440
6441 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
6442 status = nfs4_init_uniform_client_string(clp);
6443 else
6444 status = nfs4_init_nonuniform_client_string(clp);
6445
6446 if (status)
6447 goto out;
6448
6449 /* cb_client4 */
6450 setclientid.sc_netid_len =
6451 nfs4_init_callback_netid(clp,
6452 setclientid.sc_netid,
6453 sizeof(setclientid.sc_netid));
6454 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
6455 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
6456 clp->cl_ipaddr, port >> 8, port & 255);
6457
6458 dprintk("NFS call setclientid auth=%s, '%s'\n",
6459 clp->cl_rpcclient->cl_auth->au_ops->au_name,
6460 clp->cl_owner_id);
6461
6462 status = nfs4_call_sync_custom(&task_setup_data);
6463 if (setclientid.sc_cred) {
6464 kfree(clp->cl_acceptor);
6465 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
6466 put_rpccred(setclientid.sc_cred);
6467 }
6468
6469 if (status == 0)
6470 do_renew_lease(clp, now);
6471 out:
6472 trace_nfs4_setclientid(clp, status);
6473 dprintk("NFS reply setclientid: %d\n", status);
6474 return status;
6475 }
6476
6477 /**
6478 * nfs4_proc_setclientid_confirm - Confirm client ID
6479 * @clp: state data structure
6480 * @arg: result of a previous SETCLIENTID
6481 * @cred: credential to use for this call
6482 *
6483 * Returns zero, a negative errno, or a negative NFS4ERR status code.
6484 */
nfs4_proc_setclientid_confirm(struct nfs_client * clp,struct nfs4_setclientid_res * arg,const struct cred * cred)6485 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
6486 struct nfs4_setclientid_res *arg,
6487 const struct cred *cred)
6488 {
6489 struct rpc_message msg = {
6490 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
6491 .rpc_argp = arg,
6492 .rpc_cred = cred,
6493 };
6494 int status;
6495
6496 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
6497 clp->cl_rpcclient->cl_auth->au_ops->au_name,
6498 clp->cl_clientid);
6499 status = rpc_call_sync(clp->cl_rpcclient, &msg,
6500 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
6501 trace_nfs4_setclientid_confirm(clp, status);
6502 dprintk("NFS reply setclientid_confirm: %d\n", status);
6503 return status;
6504 }
6505
6506 struct nfs4_delegreturndata {
6507 struct nfs4_delegreturnargs args;
6508 struct nfs4_delegreturnres res;
6509 struct nfs_fh fh;
6510 nfs4_stateid stateid;
6511 unsigned long timestamp;
6512 struct {
6513 struct nfs4_layoutreturn_args arg;
6514 struct nfs4_layoutreturn_res res;
6515 struct nfs4_xdr_opaque_data ld_private;
6516 u32 roc_barrier;
6517 bool roc;
6518 } lr;
6519 struct nfs_fattr fattr;
6520 int rpc_status;
6521 struct inode *inode;
6522 };
6523
nfs4_delegreturn_done(struct rpc_task * task,void * calldata)6524 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
6525 {
6526 struct nfs4_delegreturndata *data = calldata;
6527 struct nfs4_exception exception = {
6528 .inode = data->inode,
6529 .stateid = &data->stateid,
6530 .task_is_privileged = data->args.seq_args.sa_privileged,
6531 };
6532
6533 if (!nfs4_sequence_done(task, &data->res.seq_res))
6534 return;
6535
6536 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
6537
6538 /* Handle Layoutreturn errors */
6539 if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res,
6540 &data->res.lr_ret) == -EAGAIN)
6541 goto out_restart;
6542
6543 switch (task->tk_status) {
6544 case 0:
6545 renew_lease(data->res.server, data->timestamp);
6546 break;
6547 case -NFS4ERR_ADMIN_REVOKED:
6548 case -NFS4ERR_DELEG_REVOKED:
6549 case -NFS4ERR_EXPIRED:
6550 nfs4_free_revoked_stateid(data->res.server,
6551 data->args.stateid,
6552 task->tk_msg.rpc_cred);
6553 fallthrough;
6554 case -NFS4ERR_BAD_STATEID:
6555 case -NFS4ERR_STALE_STATEID:
6556 case -ETIMEDOUT:
6557 task->tk_status = 0;
6558 break;
6559 case -NFS4ERR_OLD_STATEID:
6560 if (!nfs4_refresh_delegation_stateid(&data->stateid, data->inode))
6561 nfs4_stateid_seqid_inc(&data->stateid);
6562 if (data->args.bitmask) {
6563 data->args.bitmask = NULL;
6564 data->res.fattr = NULL;
6565 }
6566 goto out_restart;
6567 case -NFS4ERR_ACCESS:
6568 if (data->args.bitmask) {
6569 data->args.bitmask = NULL;
6570 data->res.fattr = NULL;
6571 goto out_restart;
6572 }
6573 fallthrough;
6574 default:
6575 task->tk_status = nfs4_async_handle_exception(task,
6576 data->res.server, task->tk_status,
6577 &exception);
6578 if (exception.retry)
6579 goto out_restart;
6580 }
6581 nfs_delegation_mark_returned(data->inode, data->args.stateid);
6582 data->rpc_status = task->tk_status;
6583 return;
6584 out_restart:
6585 task->tk_status = 0;
6586 rpc_restart_call_prepare(task);
6587 }
6588
nfs4_delegreturn_release(void * calldata)6589 static void nfs4_delegreturn_release(void *calldata)
6590 {
6591 struct nfs4_delegreturndata *data = calldata;
6592 struct inode *inode = data->inode;
6593
6594 if (data->lr.roc)
6595 pnfs_roc_release(&data->lr.arg, &data->lr.res,
6596 data->res.lr_ret);
6597 if (inode) {
6598 nfs4_fattr_set_prechange(&data->fattr,
6599 inode_peek_iversion_raw(inode));
6600 nfs_refresh_inode(inode, &data->fattr);
6601 nfs_iput_and_deactive(inode);
6602 }
6603 kfree(calldata);
6604 }
6605
nfs4_delegreturn_prepare(struct rpc_task * task,void * data)6606 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
6607 {
6608 struct nfs4_delegreturndata *d_data;
6609 struct pnfs_layout_hdr *lo;
6610
6611 d_data = data;
6612
6613 if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) {
6614 nfs4_sequence_done(task, &d_data->res.seq_res);
6615 return;
6616 }
6617
6618 lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL;
6619 if (lo && !pnfs_layout_is_valid(lo)) {
6620 d_data->args.lr_args = NULL;
6621 d_data->res.lr_res = NULL;
6622 }
6623
6624 nfs4_setup_sequence(d_data->res.server->nfs_client,
6625 &d_data->args.seq_args,
6626 &d_data->res.seq_res,
6627 task);
6628 }
6629
6630 static const struct rpc_call_ops nfs4_delegreturn_ops = {
6631 .rpc_call_prepare = nfs4_delegreturn_prepare,
6632 .rpc_call_done = nfs4_delegreturn_done,
6633 .rpc_release = nfs4_delegreturn_release,
6634 };
6635
_nfs4_proc_delegreturn(struct inode * inode,const struct cred * cred,const nfs4_stateid * stateid,int issync)6636 static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync)
6637 {
6638 struct nfs4_delegreturndata *data;
6639 struct nfs_server *server = NFS_SERVER(inode);
6640 struct rpc_task *task;
6641 struct rpc_message msg = {
6642 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
6643 .rpc_cred = cred,
6644 };
6645 struct rpc_task_setup task_setup_data = {
6646 .rpc_client = server->client,
6647 .rpc_message = &msg,
6648 .callback_ops = &nfs4_delegreturn_ops,
6649 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
6650 };
6651 int status = 0;
6652
6653 if (nfs_server_capable(inode, NFS_CAP_MOVEABLE))
6654 task_setup_data.flags |= RPC_TASK_MOVEABLE;
6655
6656 data = kzalloc(sizeof(*data), GFP_KERNEL);
6657 if (data == NULL)
6658 return -ENOMEM;
6659
6660 nfs4_state_protect(server->nfs_client,
6661 NFS_SP4_MACH_CRED_CLEANUP,
6662 &task_setup_data.rpc_client, &msg);
6663
6664 data->args.fhandle = &data->fh;
6665 data->args.stateid = &data->stateid;
6666 nfs4_bitmask_set(data->args.bitmask_store,
6667 server->cache_consistency_bitmask, inode, 0);
6668 data->args.bitmask = data->args.bitmask_store;
6669 nfs_copy_fh(&data->fh, NFS_FH(inode));
6670 nfs4_stateid_copy(&data->stateid, stateid);
6671 data->res.fattr = &data->fattr;
6672 data->res.server = server;
6673 data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
6674 data->lr.arg.ld_private = &data->lr.ld_private;
6675 nfs_fattr_init(data->res.fattr);
6676 data->timestamp = jiffies;
6677 data->rpc_status = 0;
6678 data->inode = nfs_igrab_and_active(inode);
6679 if (data->inode || issync) {
6680 data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res,
6681 cred);
6682 if (data->lr.roc) {
6683 data->args.lr_args = &data->lr.arg;
6684 data->res.lr_res = &data->lr.res;
6685 }
6686 }
6687
6688 if (!data->inode)
6689 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
6690 1);
6691 else
6692 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
6693 0);
6694 task_setup_data.callback_data = data;
6695 msg.rpc_argp = &data->args;
6696 msg.rpc_resp = &data->res;
6697 task = rpc_run_task(&task_setup_data);
6698 if (IS_ERR(task))
6699 return PTR_ERR(task);
6700 if (!issync)
6701 goto out;
6702 status = rpc_wait_for_completion_task(task);
6703 if (status != 0)
6704 goto out;
6705 status = data->rpc_status;
6706 out:
6707 rpc_put_task(task);
6708 return status;
6709 }
6710
nfs4_proc_delegreturn(struct inode * inode,const struct cred * cred,const nfs4_stateid * stateid,int issync)6711 int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync)
6712 {
6713 struct nfs_server *server = NFS_SERVER(inode);
6714 struct nfs4_exception exception = { };
6715 int err;
6716 do {
6717 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
6718 trace_nfs4_delegreturn(inode, stateid, err);
6719 switch (err) {
6720 case -NFS4ERR_STALE_STATEID:
6721 case -NFS4ERR_EXPIRED:
6722 case 0:
6723 return 0;
6724 }
6725 err = nfs4_handle_exception(server, err, &exception);
6726 } while (exception.retry);
6727 return err;
6728 }
6729
_nfs4_proc_getlk(struct nfs4_state * state,int cmd,struct file_lock * request)6730 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6731 {
6732 struct inode *inode = state->inode;
6733 struct nfs_server *server = NFS_SERVER(inode);
6734 struct nfs_client *clp = server->nfs_client;
6735 struct nfs_lockt_args arg = {
6736 .fh = NFS_FH(inode),
6737 .fl = request,
6738 };
6739 struct nfs_lockt_res res = {
6740 .denied = request,
6741 };
6742 struct rpc_message msg = {
6743 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
6744 .rpc_argp = &arg,
6745 .rpc_resp = &res,
6746 .rpc_cred = state->owner->so_cred,
6747 };
6748 struct nfs4_lock_state *lsp;
6749 int status;
6750
6751 arg.lock_owner.clientid = clp->cl_clientid;
6752 status = nfs4_set_lock_state(state, request);
6753 if (status != 0)
6754 goto out;
6755 lsp = request->fl_u.nfs4_fl.owner;
6756 arg.lock_owner.id = lsp->ls_seqid.owner_id;
6757 arg.lock_owner.s_dev = server->s_dev;
6758 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6759 switch (status) {
6760 case 0:
6761 request->fl_type = F_UNLCK;
6762 break;
6763 case -NFS4ERR_DENIED:
6764 status = 0;
6765 }
6766 request->fl_ops->fl_release_private(request);
6767 request->fl_ops = NULL;
6768 out:
6769 return status;
6770 }
6771
nfs4_proc_getlk(struct nfs4_state * state,int cmd,struct file_lock * request)6772 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6773 {
6774 struct nfs4_exception exception = {
6775 .interruptible = true,
6776 };
6777 int err;
6778
6779 do {
6780 err = _nfs4_proc_getlk(state, cmd, request);
6781 trace_nfs4_get_lock(request, state, cmd, err);
6782 err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
6783 &exception);
6784 } while (exception.retry);
6785 return err;
6786 }
6787
6788 /*
6789 * Update the seqid of a lock stateid after receiving
6790 * NFS4ERR_OLD_STATEID
6791 */
nfs4_refresh_lock_old_stateid(nfs4_stateid * dst,struct nfs4_lock_state * lsp)6792 static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst,
6793 struct nfs4_lock_state *lsp)
6794 {
6795 struct nfs4_state *state = lsp->ls_state;
6796 bool ret = false;
6797
6798 spin_lock(&state->state_lock);
6799 if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid))
6800 goto out;
6801 if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst))
6802 nfs4_stateid_seqid_inc(dst);
6803 else
6804 dst->seqid = lsp->ls_stateid.seqid;
6805 ret = true;
6806 out:
6807 spin_unlock(&state->state_lock);
6808 return ret;
6809 }
6810
nfs4_sync_lock_stateid(nfs4_stateid * dst,struct nfs4_lock_state * lsp)6811 static bool nfs4_sync_lock_stateid(nfs4_stateid *dst,
6812 struct nfs4_lock_state *lsp)
6813 {
6814 struct nfs4_state *state = lsp->ls_state;
6815 bool ret;
6816
6817 spin_lock(&state->state_lock);
6818 ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid);
6819 nfs4_stateid_copy(dst, &lsp->ls_stateid);
6820 spin_unlock(&state->state_lock);
6821 return ret;
6822 }
6823
6824 struct nfs4_unlockdata {
6825 struct nfs_locku_args arg;
6826 struct nfs_locku_res res;
6827 struct nfs4_lock_state *lsp;
6828 struct nfs_open_context *ctx;
6829 struct nfs_lock_context *l_ctx;
6830 struct file_lock fl;
6831 struct nfs_server *server;
6832 unsigned long timestamp;
6833 };
6834
nfs4_alloc_unlockdata(struct file_lock * fl,struct nfs_open_context * ctx,struct nfs4_lock_state * lsp,struct nfs_seqid * seqid)6835 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
6836 struct nfs_open_context *ctx,
6837 struct nfs4_lock_state *lsp,
6838 struct nfs_seqid *seqid)
6839 {
6840 struct nfs4_unlockdata *p;
6841 struct nfs4_state *state = lsp->ls_state;
6842 struct inode *inode = state->inode;
6843
6844 p = kzalloc(sizeof(*p), GFP_KERNEL);
6845 if (p == NULL)
6846 return NULL;
6847 p->arg.fh = NFS_FH(inode);
6848 p->arg.fl = &p->fl;
6849 p->arg.seqid = seqid;
6850 p->res.seqid = seqid;
6851 p->lsp = lsp;
6852 /* Ensure we don't close file until we're done freeing locks! */
6853 p->ctx = get_nfs_open_context(ctx);
6854 p->l_ctx = nfs_get_lock_context(ctx);
6855 locks_init_lock(&p->fl);
6856 locks_copy_lock(&p->fl, fl);
6857 p->server = NFS_SERVER(inode);
6858 spin_lock(&state->state_lock);
6859 nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid);
6860 spin_unlock(&state->state_lock);
6861 return p;
6862 }
6863
nfs4_locku_release_calldata(void * data)6864 static void nfs4_locku_release_calldata(void *data)
6865 {
6866 struct nfs4_unlockdata *calldata = data;
6867 nfs_free_seqid(calldata->arg.seqid);
6868 nfs4_put_lock_state(calldata->lsp);
6869 nfs_put_lock_context(calldata->l_ctx);
6870 put_nfs_open_context(calldata->ctx);
6871 kfree(calldata);
6872 }
6873
nfs4_locku_done(struct rpc_task * task,void * data)6874 static void nfs4_locku_done(struct rpc_task *task, void *data)
6875 {
6876 struct nfs4_unlockdata *calldata = data;
6877 struct nfs4_exception exception = {
6878 .inode = calldata->lsp->ls_state->inode,
6879 .stateid = &calldata->arg.stateid,
6880 };
6881
6882 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
6883 return;
6884 switch (task->tk_status) {
6885 case 0:
6886 renew_lease(calldata->server, calldata->timestamp);
6887 locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl);
6888 if (nfs4_update_lock_stateid(calldata->lsp,
6889 &calldata->res.stateid))
6890 break;
6891 fallthrough;
6892 case -NFS4ERR_ADMIN_REVOKED:
6893 case -NFS4ERR_EXPIRED:
6894 nfs4_free_revoked_stateid(calldata->server,
6895 &calldata->arg.stateid,
6896 task->tk_msg.rpc_cred);
6897 fallthrough;
6898 case -NFS4ERR_BAD_STATEID:
6899 case -NFS4ERR_STALE_STATEID:
6900 if (nfs4_sync_lock_stateid(&calldata->arg.stateid,
6901 calldata->lsp))
6902 rpc_restart_call_prepare(task);
6903 break;
6904 case -NFS4ERR_OLD_STATEID:
6905 if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid,
6906 calldata->lsp))
6907 rpc_restart_call_prepare(task);
6908 break;
6909 default:
6910 task->tk_status = nfs4_async_handle_exception(task,
6911 calldata->server, task->tk_status,
6912 &exception);
6913 if (exception.retry)
6914 rpc_restart_call_prepare(task);
6915 }
6916 nfs_release_seqid(calldata->arg.seqid);
6917 }
6918
nfs4_locku_prepare(struct rpc_task * task,void * data)6919 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
6920 {
6921 struct nfs4_unlockdata *calldata = data;
6922
6923 if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) &&
6924 nfs_async_iocounter_wait(task, calldata->l_ctx))
6925 return;
6926
6927 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
6928 goto out_wait;
6929 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
6930 /* Note: exit _without_ running nfs4_locku_done */
6931 goto out_no_action;
6932 }
6933 calldata->timestamp = jiffies;
6934 if (nfs4_setup_sequence(calldata->server->nfs_client,
6935 &calldata->arg.seq_args,
6936 &calldata->res.seq_res,
6937 task) != 0)
6938 nfs_release_seqid(calldata->arg.seqid);
6939 return;
6940 out_no_action:
6941 task->tk_action = NULL;
6942 out_wait:
6943 nfs4_sequence_done(task, &calldata->res.seq_res);
6944 }
6945
6946 static const struct rpc_call_ops nfs4_locku_ops = {
6947 .rpc_call_prepare = nfs4_locku_prepare,
6948 .rpc_call_done = nfs4_locku_done,
6949 .rpc_release = nfs4_locku_release_calldata,
6950 };
6951
nfs4_do_unlck(struct file_lock * fl,struct nfs_open_context * ctx,struct nfs4_lock_state * lsp,struct nfs_seqid * seqid)6952 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
6953 struct nfs_open_context *ctx,
6954 struct nfs4_lock_state *lsp,
6955 struct nfs_seqid *seqid)
6956 {
6957 struct nfs4_unlockdata *data;
6958 struct rpc_message msg = {
6959 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
6960 .rpc_cred = ctx->cred,
6961 };
6962 struct rpc_task_setup task_setup_data = {
6963 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
6964 .rpc_message = &msg,
6965 .callback_ops = &nfs4_locku_ops,
6966 .workqueue = nfsiod_workqueue,
6967 .flags = RPC_TASK_ASYNC,
6968 };
6969
6970 if (nfs_server_capable(lsp->ls_state->inode, NFS_CAP_MOVEABLE))
6971 task_setup_data.flags |= RPC_TASK_MOVEABLE;
6972
6973 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
6974 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
6975
6976 /* Ensure this is an unlock - when canceling a lock, the
6977 * canceled lock is passed in, and it won't be an unlock.
6978 */
6979 fl->fl_type = F_UNLCK;
6980 if (fl->fl_flags & FL_CLOSE)
6981 set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags);
6982
6983 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
6984 if (data == NULL) {
6985 nfs_free_seqid(seqid);
6986 return ERR_PTR(-ENOMEM);
6987 }
6988
6989 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 0);
6990 msg.rpc_argp = &data->arg;
6991 msg.rpc_resp = &data->res;
6992 task_setup_data.callback_data = data;
6993 return rpc_run_task(&task_setup_data);
6994 }
6995
nfs4_proc_unlck(struct nfs4_state * state,int cmd,struct file_lock * request)6996 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
6997 {
6998 struct inode *inode = state->inode;
6999 struct nfs4_state_owner *sp = state->owner;
7000 struct nfs_inode *nfsi = NFS_I(inode);
7001 struct nfs_seqid *seqid;
7002 struct nfs4_lock_state *lsp;
7003 struct rpc_task *task;
7004 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
7005 int status = 0;
7006 unsigned char fl_flags = request->fl_flags;
7007
7008 status = nfs4_set_lock_state(state, request);
7009 /* Unlock _before_ we do the RPC call */
7010 request->fl_flags |= FL_EXISTS;
7011 /* Exclude nfs_delegation_claim_locks() */
7012 mutex_lock(&sp->so_delegreturn_mutex);
7013 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
7014 down_read(&nfsi->rwsem);
7015 if (locks_lock_inode_wait(inode, request) == -ENOENT) {
7016 up_read(&nfsi->rwsem);
7017 mutex_unlock(&sp->so_delegreturn_mutex);
7018 goto out;
7019 }
7020 up_read(&nfsi->rwsem);
7021 mutex_unlock(&sp->so_delegreturn_mutex);
7022 if (status != 0)
7023 goto out;
7024 /* Is this a delegated lock? */
7025 lsp = request->fl_u.nfs4_fl.owner;
7026 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
7027 goto out;
7028 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
7029 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
7030 status = -ENOMEM;
7031 if (IS_ERR(seqid))
7032 goto out;
7033 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
7034 status = PTR_ERR(task);
7035 if (IS_ERR(task))
7036 goto out;
7037 status = rpc_wait_for_completion_task(task);
7038 rpc_put_task(task);
7039 out:
7040 request->fl_flags = fl_flags;
7041 trace_nfs4_unlock(request, state, F_SETLK, status);
7042 return status;
7043 }
7044
7045 struct nfs4_lockdata {
7046 struct nfs_lock_args arg;
7047 struct nfs_lock_res res;
7048 struct nfs4_lock_state *lsp;
7049 struct nfs_open_context *ctx;
7050 struct file_lock fl;
7051 unsigned long timestamp;
7052 int rpc_status;
7053 int cancelled;
7054 struct nfs_server *server;
7055 };
7056
nfs4_alloc_lockdata(struct file_lock * fl,struct nfs_open_context * ctx,struct nfs4_lock_state * lsp,gfp_t gfp_mask)7057 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
7058 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
7059 gfp_t gfp_mask)
7060 {
7061 struct nfs4_lockdata *p;
7062 struct inode *inode = lsp->ls_state->inode;
7063 struct nfs_server *server = NFS_SERVER(inode);
7064 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
7065
7066 p = kzalloc(sizeof(*p), gfp_mask);
7067 if (p == NULL)
7068 return NULL;
7069
7070 p->arg.fh = NFS_FH(inode);
7071 p->arg.fl = &p->fl;
7072 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
7073 if (IS_ERR(p->arg.open_seqid))
7074 goto out_free;
7075 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
7076 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
7077 if (IS_ERR(p->arg.lock_seqid))
7078 goto out_free_seqid;
7079 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
7080 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
7081 p->arg.lock_owner.s_dev = server->s_dev;
7082 p->res.lock_seqid = p->arg.lock_seqid;
7083 p->lsp = lsp;
7084 p->server = server;
7085 p->ctx = get_nfs_open_context(ctx);
7086 locks_init_lock(&p->fl);
7087 locks_copy_lock(&p->fl, fl);
7088 return p;
7089 out_free_seqid:
7090 nfs_free_seqid(p->arg.open_seqid);
7091 out_free:
7092 kfree(p);
7093 return NULL;
7094 }
7095
nfs4_lock_prepare(struct rpc_task * task,void * calldata)7096 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
7097 {
7098 struct nfs4_lockdata *data = calldata;
7099 struct nfs4_state *state = data->lsp->ls_state;
7100
7101 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
7102 goto out_wait;
7103 /* Do we need to do an open_to_lock_owner? */
7104 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
7105 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
7106 goto out_release_lock_seqid;
7107 }
7108 nfs4_stateid_copy(&data->arg.open_stateid,
7109 &state->open_stateid);
7110 data->arg.new_lock_owner = 1;
7111 data->res.open_seqid = data->arg.open_seqid;
7112 } else {
7113 data->arg.new_lock_owner = 0;
7114 nfs4_stateid_copy(&data->arg.lock_stateid,
7115 &data->lsp->ls_stateid);
7116 }
7117 if (!nfs4_valid_open_stateid(state)) {
7118 data->rpc_status = -EBADF;
7119 task->tk_action = NULL;
7120 goto out_release_open_seqid;
7121 }
7122 data->timestamp = jiffies;
7123 if (nfs4_setup_sequence(data->server->nfs_client,
7124 &data->arg.seq_args,
7125 &data->res.seq_res,
7126 task) == 0)
7127 return;
7128 out_release_open_seqid:
7129 nfs_release_seqid(data->arg.open_seqid);
7130 out_release_lock_seqid:
7131 nfs_release_seqid(data->arg.lock_seqid);
7132 out_wait:
7133 nfs4_sequence_done(task, &data->res.seq_res);
7134 dprintk("%s: ret = %d\n", __func__, data->rpc_status);
7135 }
7136
nfs4_lock_done(struct rpc_task * task,void * calldata)7137 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
7138 {
7139 struct nfs4_lockdata *data = calldata;
7140 struct nfs4_lock_state *lsp = data->lsp;
7141 struct nfs_server *server = NFS_SERVER(d_inode(data->ctx->dentry));
7142
7143 if (!nfs4_sequence_done(task, &data->res.seq_res))
7144 return;
7145
7146 data->rpc_status = task->tk_status;
7147 switch (task->tk_status) {
7148 case 0:
7149 renew_lease(server, data->timestamp);
7150 if (data->arg.new_lock && !data->cancelled) {
7151 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
7152 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
7153 goto out_restart;
7154 }
7155 if (data->arg.new_lock_owner != 0) {
7156 nfs_confirm_seqid(&lsp->ls_seqid, 0);
7157 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
7158 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
7159 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
7160 goto out_restart;
7161 break;
7162 case -NFS4ERR_BAD_STATEID:
7163 case -NFS4ERR_OLD_STATEID:
7164 case -NFS4ERR_STALE_STATEID:
7165 case -NFS4ERR_EXPIRED:
7166 if (data->arg.new_lock_owner != 0) {
7167 if (!nfs4_stateid_match(&data->arg.open_stateid,
7168 &lsp->ls_state->open_stateid))
7169 goto out_restart;
7170 else if (nfs4_async_handle_error(task, server, lsp->ls_state, NULL) == -EAGAIN)
7171 goto out_restart;
7172 } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
7173 &lsp->ls_stateid))
7174 goto out_restart;
7175 }
7176 out_done:
7177 dprintk("%s: ret = %d!\n", __func__, data->rpc_status);
7178 return;
7179 out_restart:
7180 if (!data->cancelled)
7181 rpc_restart_call_prepare(task);
7182 goto out_done;
7183 }
7184
nfs4_lock_release(void * calldata)7185 static void nfs4_lock_release(void *calldata)
7186 {
7187 struct nfs4_lockdata *data = calldata;
7188
7189 nfs_free_seqid(data->arg.open_seqid);
7190 if (data->cancelled && data->rpc_status == 0) {
7191 struct rpc_task *task;
7192 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
7193 data->arg.lock_seqid);
7194 if (!IS_ERR(task))
7195 rpc_put_task_async(task);
7196 dprintk("%s: cancelling lock!\n", __func__);
7197 } else
7198 nfs_free_seqid(data->arg.lock_seqid);
7199 nfs4_put_lock_state(data->lsp);
7200 put_nfs_open_context(data->ctx);
7201 kfree(data);
7202 }
7203
7204 static const struct rpc_call_ops nfs4_lock_ops = {
7205 .rpc_call_prepare = nfs4_lock_prepare,
7206 .rpc_call_done = nfs4_lock_done,
7207 .rpc_release = nfs4_lock_release,
7208 };
7209
nfs4_handle_setlk_error(struct nfs_server * server,struct nfs4_lock_state * lsp,int new_lock_owner,int error)7210 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
7211 {
7212 switch (error) {
7213 case -NFS4ERR_ADMIN_REVOKED:
7214 case -NFS4ERR_EXPIRED:
7215 case -NFS4ERR_BAD_STATEID:
7216 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
7217 if (new_lock_owner != 0 ||
7218 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
7219 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
7220 break;
7221 case -NFS4ERR_STALE_STATEID:
7222 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
7223 nfs4_schedule_lease_recovery(server->nfs_client);
7224 }
7225 }
7226
_nfs4_do_setlk(struct nfs4_state * state,int cmd,struct file_lock * fl,int recovery_type)7227 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
7228 {
7229 struct nfs4_lockdata *data;
7230 struct rpc_task *task;
7231 struct rpc_message msg = {
7232 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
7233 .rpc_cred = state->owner->so_cred,
7234 };
7235 struct rpc_task_setup task_setup_data = {
7236 .rpc_client = NFS_CLIENT(state->inode),
7237 .rpc_message = &msg,
7238 .callback_ops = &nfs4_lock_ops,
7239 .workqueue = nfsiod_workqueue,
7240 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
7241 };
7242 int ret;
7243
7244 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE))
7245 task_setup_data.flags |= RPC_TASK_MOVEABLE;
7246
7247 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
7248 fl->fl_u.nfs4_fl.owner, GFP_KERNEL);
7249 if (data == NULL)
7250 return -ENOMEM;
7251 if (IS_SETLKW(cmd))
7252 data->arg.block = 1;
7253 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1,
7254 recovery_type > NFS_LOCK_NEW);
7255 msg.rpc_argp = &data->arg;
7256 msg.rpc_resp = &data->res;
7257 task_setup_data.callback_data = data;
7258 if (recovery_type > NFS_LOCK_NEW) {
7259 if (recovery_type == NFS_LOCK_RECLAIM)
7260 data->arg.reclaim = NFS_LOCK_RECLAIM;
7261 } else
7262 data->arg.new_lock = 1;
7263 task = rpc_run_task(&task_setup_data);
7264 if (IS_ERR(task))
7265 return PTR_ERR(task);
7266 ret = rpc_wait_for_completion_task(task);
7267 if (ret == 0) {
7268 ret = data->rpc_status;
7269 if (ret)
7270 nfs4_handle_setlk_error(data->server, data->lsp,
7271 data->arg.new_lock_owner, ret);
7272 } else
7273 data->cancelled = true;
7274 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
7275 rpc_put_task(task);
7276 dprintk("%s: ret = %d\n", __func__, ret);
7277 return ret;
7278 }
7279
nfs4_lock_reclaim(struct nfs4_state * state,struct file_lock * request)7280 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
7281 {
7282 struct nfs_server *server = NFS_SERVER(state->inode);
7283 struct nfs4_exception exception = {
7284 .inode = state->inode,
7285 };
7286 int err;
7287
7288 do {
7289 /* Cache the lock if possible... */
7290 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
7291 return 0;
7292 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
7293 if (err != -NFS4ERR_DELAY)
7294 break;
7295 nfs4_handle_exception(server, err, &exception);
7296 } while (exception.retry);
7297 return err;
7298 }
7299
nfs4_lock_expired(struct nfs4_state * state,struct file_lock * request)7300 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
7301 {
7302 struct nfs_server *server = NFS_SERVER(state->inode);
7303 struct nfs4_exception exception = {
7304 .inode = state->inode,
7305 };
7306 int err;
7307
7308 err = nfs4_set_lock_state(state, request);
7309 if (err != 0)
7310 return err;
7311 if (!recover_lost_locks) {
7312 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
7313 return 0;
7314 }
7315 do {
7316 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
7317 return 0;
7318 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
7319 switch (err) {
7320 default:
7321 goto out;
7322 case -NFS4ERR_GRACE:
7323 case -NFS4ERR_DELAY:
7324 nfs4_handle_exception(server, err, &exception);
7325 err = 0;
7326 }
7327 } while (exception.retry);
7328 out:
7329 return err;
7330 }
7331
7332 #if defined(CONFIG_NFS_V4_1)
nfs41_lock_expired(struct nfs4_state * state,struct file_lock * request)7333 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
7334 {
7335 struct nfs4_lock_state *lsp;
7336 int status;
7337
7338 status = nfs4_set_lock_state(state, request);
7339 if (status != 0)
7340 return status;
7341 lsp = request->fl_u.nfs4_fl.owner;
7342 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) ||
7343 test_bit(NFS_LOCK_LOST, &lsp->ls_flags))
7344 return 0;
7345 return nfs4_lock_expired(state, request);
7346 }
7347 #endif
7348
_nfs4_proc_setlk(struct nfs4_state * state,int cmd,struct file_lock * request)7349 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7350 {
7351 struct nfs_inode *nfsi = NFS_I(state->inode);
7352 struct nfs4_state_owner *sp = state->owner;
7353 unsigned char fl_flags = request->fl_flags;
7354 int status;
7355
7356 request->fl_flags |= FL_ACCESS;
7357 status = locks_lock_inode_wait(state->inode, request);
7358 if (status < 0)
7359 goto out;
7360 mutex_lock(&sp->so_delegreturn_mutex);
7361 down_read(&nfsi->rwsem);
7362 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
7363 /* Yes: cache locks! */
7364 /* ...but avoid races with delegation recall... */
7365 request->fl_flags = fl_flags & ~FL_SLEEP;
7366 status = locks_lock_inode_wait(state->inode, request);
7367 up_read(&nfsi->rwsem);
7368 mutex_unlock(&sp->so_delegreturn_mutex);
7369 goto out;
7370 }
7371 up_read(&nfsi->rwsem);
7372 mutex_unlock(&sp->so_delegreturn_mutex);
7373 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
7374 out:
7375 request->fl_flags = fl_flags;
7376 return status;
7377 }
7378
nfs4_proc_setlk(struct nfs4_state * state,int cmd,struct file_lock * request)7379 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7380 {
7381 struct nfs4_exception exception = {
7382 .state = state,
7383 .inode = state->inode,
7384 .interruptible = true,
7385 };
7386 int err;
7387
7388 do {
7389 err = _nfs4_proc_setlk(state, cmd, request);
7390 if (err == -NFS4ERR_DENIED)
7391 err = -EAGAIN;
7392 err = nfs4_handle_exception(NFS_SERVER(state->inode),
7393 err, &exception);
7394 } while (exception.retry);
7395 return err;
7396 }
7397
7398 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
7399 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
7400
7401 static int
nfs4_retry_setlk_simple(struct nfs4_state * state,int cmd,struct file_lock * request)7402 nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd,
7403 struct file_lock *request)
7404 {
7405 int status = -ERESTARTSYS;
7406 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
7407
7408 while(!signalled()) {
7409 status = nfs4_proc_setlk(state, cmd, request);
7410 if ((status != -EAGAIN) || IS_SETLK(cmd))
7411 break;
7412 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
7413 schedule_timeout(timeout);
7414 timeout *= 2;
7415 timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout);
7416 status = -ERESTARTSYS;
7417 }
7418 return status;
7419 }
7420
7421 #ifdef CONFIG_NFS_V4_1
7422 struct nfs4_lock_waiter {
7423 struct inode *inode;
7424 struct nfs_lowner owner;
7425 wait_queue_entry_t wait;
7426 };
7427
7428 static int
nfs4_wake_lock_waiter(wait_queue_entry_t * wait,unsigned int mode,int flags,void * key)7429 nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key)
7430 {
7431 struct nfs4_lock_waiter *waiter =
7432 container_of(wait, struct nfs4_lock_waiter, wait);
7433
7434 /* NULL key means to wake up everyone */
7435 if (key) {
7436 struct cb_notify_lock_args *cbnl = key;
7437 struct nfs_lowner *lowner = &cbnl->cbnl_owner,
7438 *wowner = &waiter->owner;
7439
7440 /* Only wake if the callback was for the same owner. */
7441 if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev)
7442 return 0;
7443
7444 /* Make sure it's for the right inode */
7445 if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
7446 return 0;
7447 }
7448
7449 return woken_wake_function(wait, mode, flags, key);
7450 }
7451
7452 static int
nfs4_retry_setlk(struct nfs4_state * state,int cmd,struct file_lock * request)7453 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7454 {
7455 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
7456 struct nfs_server *server = NFS_SERVER(state->inode);
7457 struct nfs_client *clp = server->nfs_client;
7458 wait_queue_head_t *q = &clp->cl_lock_waitq;
7459 struct nfs4_lock_waiter waiter = {
7460 .inode = state->inode,
7461 .owner = { .clientid = clp->cl_clientid,
7462 .id = lsp->ls_seqid.owner_id,
7463 .s_dev = server->s_dev },
7464 };
7465 int status;
7466
7467 /* Don't bother with waitqueue if we don't expect a callback */
7468 if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags))
7469 return nfs4_retry_setlk_simple(state, cmd, request);
7470
7471 init_wait(&waiter.wait);
7472 waiter.wait.func = nfs4_wake_lock_waiter;
7473 add_wait_queue(q, &waiter.wait);
7474
7475 do {
7476 status = nfs4_proc_setlk(state, cmd, request);
7477 if (status != -EAGAIN || IS_SETLK(cmd))
7478 break;
7479
7480 status = -ERESTARTSYS;
7481 wait_woken(&waiter.wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE,
7482 NFS4_LOCK_MAXTIMEOUT);
7483 } while (!signalled());
7484
7485 remove_wait_queue(q, &waiter.wait);
7486
7487 return status;
7488 }
7489 #else /* !CONFIG_NFS_V4_1 */
7490 static inline int
nfs4_retry_setlk(struct nfs4_state * state,int cmd,struct file_lock * request)7491 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7492 {
7493 return nfs4_retry_setlk_simple(state, cmd, request);
7494 }
7495 #endif
7496
7497 static int
nfs4_proc_lock(struct file * filp,int cmd,struct file_lock * request)7498 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
7499 {
7500 struct nfs_open_context *ctx;
7501 struct nfs4_state *state;
7502 int status;
7503
7504 /* verify open state */
7505 ctx = nfs_file_open_context(filp);
7506 state = ctx->state;
7507
7508 if (IS_GETLK(cmd)) {
7509 if (state != NULL)
7510 return nfs4_proc_getlk(state, F_GETLK, request);
7511 return 0;
7512 }
7513
7514 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
7515 return -EINVAL;
7516
7517 if (request->fl_type == F_UNLCK) {
7518 if (state != NULL)
7519 return nfs4_proc_unlck(state, cmd, request);
7520 return 0;
7521 }
7522
7523 if (state == NULL)
7524 return -ENOLCK;
7525
7526 if ((request->fl_flags & FL_POSIX) &&
7527 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
7528 return -ENOLCK;
7529
7530 /*
7531 * Don't rely on the VFS having checked the file open mode,
7532 * since it won't do this for flock() locks.
7533 */
7534 switch (request->fl_type) {
7535 case F_RDLCK:
7536 if (!(filp->f_mode & FMODE_READ))
7537 return -EBADF;
7538 break;
7539 case F_WRLCK:
7540 if (!(filp->f_mode & FMODE_WRITE))
7541 return -EBADF;
7542 }
7543
7544 status = nfs4_set_lock_state(state, request);
7545 if (status != 0)
7546 return status;
7547
7548 return nfs4_retry_setlk(state, cmd, request);
7549 }
7550
nfs4_delete_lease(struct file * file,void ** priv)7551 static int nfs4_delete_lease(struct file *file, void **priv)
7552 {
7553 return generic_setlease(file, F_UNLCK, NULL, priv);
7554 }
7555
nfs4_add_lease(struct file * file,long arg,struct file_lock ** lease,void ** priv)7556 static int nfs4_add_lease(struct file *file, long arg, struct file_lock **lease,
7557 void **priv)
7558 {
7559 struct inode *inode = file_inode(file);
7560 fmode_t type = arg == F_RDLCK ? FMODE_READ : FMODE_WRITE;
7561 int ret;
7562
7563 /* No delegation, no lease */
7564 if (!nfs4_have_delegation(inode, type))
7565 return -EAGAIN;
7566 ret = generic_setlease(file, arg, lease, priv);
7567 if (ret || nfs4_have_delegation(inode, type))
7568 return ret;
7569 /* We raced with a delegation return */
7570 nfs4_delete_lease(file, priv);
7571 return -EAGAIN;
7572 }
7573
nfs4_proc_setlease(struct file * file,long arg,struct file_lock ** lease,void ** priv)7574 int nfs4_proc_setlease(struct file *file, long arg, struct file_lock **lease,
7575 void **priv)
7576 {
7577 switch (arg) {
7578 case F_RDLCK:
7579 case F_WRLCK:
7580 return nfs4_add_lease(file, arg, lease, priv);
7581 case F_UNLCK:
7582 return nfs4_delete_lease(file, priv);
7583 default:
7584 return -EINVAL;
7585 }
7586 }
7587
nfs4_lock_delegation_recall(struct file_lock * fl,struct nfs4_state * state,const nfs4_stateid * stateid)7588 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
7589 {
7590 struct nfs_server *server = NFS_SERVER(state->inode);
7591 int err;
7592
7593 err = nfs4_set_lock_state(state, fl);
7594 if (err != 0)
7595 return err;
7596 do {
7597 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
7598 if (err != -NFS4ERR_DELAY)
7599 break;
7600 ssleep(1);
7601 } while (err == -NFS4ERR_DELAY);
7602 return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
7603 }
7604
7605 struct nfs_release_lockowner_data {
7606 struct nfs4_lock_state *lsp;
7607 struct nfs_server *server;
7608 struct nfs_release_lockowner_args args;
7609 struct nfs_release_lockowner_res res;
7610 unsigned long timestamp;
7611 };
7612
nfs4_release_lockowner_prepare(struct rpc_task * task,void * calldata)7613 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
7614 {
7615 struct nfs_release_lockowner_data *data = calldata;
7616 struct nfs_server *server = data->server;
7617 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
7618 &data->res.seq_res, task);
7619 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
7620 data->timestamp = jiffies;
7621 }
7622
nfs4_release_lockowner_done(struct rpc_task * task,void * calldata)7623 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
7624 {
7625 struct nfs_release_lockowner_data *data = calldata;
7626 struct nfs_server *server = data->server;
7627
7628 nfs40_sequence_done(task, &data->res.seq_res);
7629
7630 switch (task->tk_status) {
7631 case 0:
7632 renew_lease(server, data->timestamp);
7633 break;
7634 case -NFS4ERR_STALE_CLIENTID:
7635 case -NFS4ERR_EXPIRED:
7636 nfs4_schedule_lease_recovery(server->nfs_client);
7637 break;
7638 case -NFS4ERR_LEASE_MOVED:
7639 case -NFS4ERR_DELAY:
7640 if (nfs4_async_handle_error(task, server,
7641 NULL, NULL) == -EAGAIN)
7642 rpc_restart_call_prepare(task);
7643 }
7644 }
7645
nfs4_release_lockowner_release(void * calldata)7646 static void nfs4_release_lockowner_release(void *calldata)
7647 {
7648 struct nfs_release_lockowner_data *data = calldata;
7649 nfs4_free_lock_state(data->server, data->lsp);
7650 kfree(calldata);
7651 }
7652
7653 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
7654 .rpc_call_prepare = nfs4_release_lockowner_prepare,
7655 .rpc_call_done = nfs4_release_lockowner_done,
7656 .rpc_release = nfs4_release_lockowner_release,
7657 };
7658
7659 static void
nfs4_release_lockowner(struct nfs_server * server,struct nfs4_lock_state * lsp)7660 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
7661 {
7662 struct nfs_release_lockowner_data *data;
7663 struct rpc_message msg = {
7664 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
7665 };
7666
7667 if (server->nfs_client->cl_mvops->minor_version != 0)
7668 return;
7669
7670 data = kmalloc(sizeof(*data), GFP_KERNEL);
7671 if (!data)
7672 return;
7673 data->lsp = lsp;
7674 data->server = server;
7675 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
7676 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
7677 data->args.lock_owner.s_dev = server->s_dev;
7678
7679 msg.rpc_argp = &data->args;
7680 msg.rpc_resp = &data->res;
7681 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
7682 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
7683 }
7684
7685 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
7686
nfs4_xattr_set_nfs4_acl(const struct xattr_handler * handler,struct user_namespace * mnt_userns,struct dentry * unused,struct inode * inode,const char * key,const void * buf,size_t buflen,int flags)7687 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
7688 struct user_namespace *mnt_userns,
7689 struct dentry *unused, struct inode *inode,
7690 const char *key, const void *buf,
7691 size_t buflen, int flags)
7692 {
7693 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_ACL);
7694 }
7695
nfs4_xattr_get_nfs4_acl(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * key,void * buf,size_t buflen)7696 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
7697 struct dentry *unused, struct inode *inode,
7698 const char *key, void *buf, size_t buflen)
7699 {
7700 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_ACL);
7701 }
7702
nfs4_xattr_list_nfs4_acl(struct dentry * dentry)7703 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
7704 {
7705 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_ACL);
7706 }
7707
7708 #if defined(CONFIG_NFS_V4_1)
7709 #define XATTR_NAME_NFSV4_DACL "system.nfs4_dacl"
7710
nfs4_xattr_set_nfs4_dacl(const struct xattr_handler * handler,struct user_namespace * mnt_userns,struct dentry * unused,struct inode * inode,const char * key,const void * buf,size_t buflen,int flags)7711 static int nfs4_xattr_set_nfs4_dacl(const struct xattr_handler *handler,
7712 struct user_namespace *mnt_userns,
7713 struct dentry *unused, struct inode *inode,
7714 const char *key, const void *buf,
7715 size_t buflen, int flags)
7716 {
7717 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_DACL);
7718 }
7719
nfs4_xattr_get_nfs4_dacl(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * key,void * buf,size_t buflen)7720 static int nfs4_xattr_get_nfs4_dacl(const struct xattr_handler *handler,
7721 struct dentry *unused, struct inode *inode,
7722 const char *key, void *buf, size_t buflen)
7723 {
7724 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_DACL);
7725 }
7726
nfs4_xattr_list_nfs4_dacl(struct dentry * dentry)7727 static bool nfs4_xattr_list_nfs4_dacl(struct dentry *dentry)
7728 {
7729 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_DACL);
7730 }
7731
7732 #define XATTR_NAME_NFSV4_SACL "system.nfs4_sacl"
7733
nfs4_xattr_set_nfs4_sacl(const struct xattr_handler * handler,struct user_namespace * mnt_userns,struct dentry * unused,struct inode * inode,const char * key,const void * buf,size_t buflen,int flags)7734 static int nfs4_xattr_set_nfs4_sacl(const struct xattr_handler *handler,
7735 struct user_namespace *mnt_userns,
7736 struct dentry *unused, struct inode *inode,
7737 const char *key, const void *buf,
7738 size_t buflen, int flags)
7739 {
7740 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_SACL);
7741 }
7742
nfs4_xattr_get_nfs4_sacl(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * key,void * buf,size_t buflen)7743 static int nfs4_xattr_get_nfs4_sacl(const struct xattr_handler *handler,
7744 struct dentry *unused, struct inode *inode,
7745 const char *key, void *buf, size_t buflen)
7746 {
7747 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_SACL);
7748 }
7749
nfs4_xattr_list_nfs4_sacl(struct dentry * dentry)7750 static bool nfs4_xattr_list_nfs4_sacl(struct dentry *dentry)
7751 {
7752 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_SACL);
7753 }
7754
7755 #endif
7756
7757 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
7758
nfs4_xattr_set_nfs4_label(const struct xattr_handler * handler,struct user_namespace * mnt_userns,struct dentry * unused,struct inode * inode,const char * key,const void * buf,size_t buflen,int flags)7759 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
7760 struct user_namespace *mnt_userns,
7761 struct dentry *unused, struct inode *inode,
7762 const char *key, const void *buf,
7763 size_t buflen, int flags)
7764 {
7765 if (security_ismaclabel(key))
7766 return nfs4_set_security_label(inode, buf, buflen);
7767
7768 return -EOPNOTSUPP;
7769 }
7770
nfs4_xattr_get_nfs4_label(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * key,void * buf,size_t buflen)7771 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler,
7772 struct dentry *unused, struct inode *inode,
7773 const char *key, void *buf, size_t buflen)
7774 {
7775 if (security_ismaclabel(key))
7776 return nfs4_get_security_label(inode, buf, buflen);
7777 return -EOPNOTSUPP;
7778 }
7779
7780 static ssize_t
nfs4_listxattr_nfs4_label(struct inode * inode,char * list,size_t list_len)7781 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
7782 {
7783 int len = 0;
7784
7785 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) {
7786 len = security_inode_listsecurity(inode, list, list_len);
7787 if (len >= 0 && list_len && len > list_len)
7788 return -ERANGE;
7789 }
7790 return len;
7791 }
7792
7793 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
7794 .prefix = XATTR_SECURITY_PREFIX,
7795 .get = nfs4_xattr_get_nfs4_label,
7796 .set = nfs4_xattr_set_nfs4_label,
7797 };
7798
7799 #else
7800
7801 static ssize_t
nfs4_listxattr_nfs4_label(struct inode * inode,char * list,size_t list_len)7802 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
7803 {
7804 return 0;
7805 }
7806
7807 #endif
7808
7809 #ifdef CONFIG_NFS_V4_2
nfs4_xattr_set_nfs4_user(const struct xattr_handler * handler,struct user_namespace * mnt_userns,struct dentry * unused,struct inode * inode,const char * key,const void * buf,size_t buflen,int flags)7810 static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler,
7811 struct user_namespace *mnt_userns,
7812 struct dentry *unused, struct inode *inode,
7813 const char *key, const void *buf,
7814 size_t buflen, int flags)
7815 {
7816 u32 mask;
7817 int ret;
7818
7819 if (!nfs_server_capable(inode, NFS_CAP_XATTR))
7820 return -EOPNOTSUPP;
7821
7822 /*
7823 * There is no mapping from the MAY_* flags to the NFS_ACCESS_XA*
7824 * flags right now. Handling of xattr operations use the normal
7825 * file read/write permissions.
7826 *
7827 * Just in case the server has other ideas (which RFC 8276 allows),
7828 * do a cached access check for the XA* flags to possibly avoid
7829 * doing an RPC and getting EACCES back.
7830 */
7831 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
7832 if (!(mask & NFS_ACCESS_XAWRITE))
7833 return -EACCES;
7834 }
7835
7836 if (buf == NULL) {
7837 ret = nfs42_proc_removexattr(inode, key);
7838 if (!ret)
7839 nfs4_xattr_cache_remove(inode, key);
7840 } else {
7841 ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags);
7842 if (!ret)
7843 nfs4_xattr_cache_add(inode, key, buf, NULL, buflen);
7844 }
7845
7846 return ret;
7847 }
7848
nfs4_xattr_get_nfs4_user(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * key,void * buf,size_t buflen)7849 static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler,
7850 struct dentry *unused, struct inode *inode,
7851 const char *key, void *buf, size_t buflen)
7852 {
7853 u32 mask;
7854 ssize_t ret;
7855
7856 if (!nfs_server_capable(inode, NFS_CAP_XATTR))
7857 return -EOPNOTSUPP;
7858
7859 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
7860 if (!(mask & NFS_ACCESS_XAREAD))
7861 return -EACCES;
7862 }
7863
7864 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
7865 if (ret)
7866 return ret;
7867
7868 ret = nfs4_xattr_cache_get(inode, key, buf, buflen);
7869 if (ret >= 0 || (ret < 0 && ret != -ENOENT))
7870 return ret;
7871
7872 ret = nfs42_proc_getxattr(inode, key, buf, buflen);
7873
7874 return ret;
7875 }
7876
7877 static ssize_t
nfs4_listxattr_nfs4_user(struct inode * inode,char * list,size_t list_len)7878 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
7879 {
7880 u64 cookie;
7881 bool eof;
7882 ssize_t ret, size;
7883 char *buf;
7884 size_t buflen;
7885 u32 mask;
7886
7887 if (!nfs_server_capable(inode, NFS_CAP_XATTR))
7888 return 0;
7889
7890 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
7891 if (!(mask & NFS_ACCESS_XALIST))
7892 return 0;
7893 }
7894
7895 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
7896 if (ret)
7897 return ret;
7898
7899 ret = nfs4_xattr_cache_list(inode, list, list_len);
7900 if (ret >= 0 || (ret < 0 && ret != -ENOENT))
7901 return ret;
7902
7903 cookie = 0;
7904 eof = false;
7905 buflen = list_len ? list_len : XATTR_LIST_MAX;
7906 buf = list_len ? list : NULL;
7907 size = 0;
7908
7909 while (!eof) {
7910 ret = nfs42_proc_listxattrs(inode, buf, buflen,
7911 &cookie, &eof);
7912 if (ret < 0)
7913 return ret;
7914
7915 if (list_len) {
7916 buf += ret;
7917 buflen -= ret;
7918 }
7919 size += ret;
7920 }
7921
7922 if (list_len)
7923 nfs4_xattr_cache_set_list(inode, list, size);
7924
7925 return size;
7926 }
7927
7928 #else
7929
7930 static ssize_t
nfs4_listxattr_nfs4_user(struct inode * inode,char * list,size_t list_len)7931 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
7932 {
7933 return 0;
7934 }
7935 #endif /* CONFIG_NFS_V4_2 */
7936
7937 /*
7938 * nfs_fhget will use either the mounted_on_fileid or the fileid
7939 */
nfs_fixup_referral_attributes(struct nfs_fattr * fattr)7940 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
7941 {
7942 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
7943 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
7944 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
7945 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
7946 return;
7947
7948 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
7949 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
7950 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
7951 fattr->nlink = 2;
7952 }
7953
_nfs4_proc_fs_locations(struct rpc_clnt * client,struct inode * dir,const struct qstr * name,struct nfs4_fs_locations * fs_locations,struct page * page)7954 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
7955 const struct qstr *name,
7956 struct nfs4_fs_locations *fs_locations,
7957 struct page *page)
7958 {
7959 struct nfs_server *server = NFS_SERVER(dir);
7960 u32 bitmask[3];
7961 struct nfs4_fs_locations_arg args = {
7962 .dir_fh = NFS_FH(dir),
7963 .name = name,
7964 .page = page,
7965 .bitmask = bitmask,
7966 };
7967 struct nfs4_fs_locations_res res = {
7968 .fs_locations = fs_locations,
7969 };
7970 struct rpc_message msg = {
7971 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
7972 .rpc_argp = &args,
7973 .rpc_resp = &res,
7974 };
7975 int status;
7976
7977 dprintk("%s: start\n", __func__);
7978
7979 bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS;
7980 bitmask[1] = nfs4_fattr_bitmap[1];
7981
7982 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
7983 * is not supported */
7984 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
7985 bitmask[0] &= ~FATTR4_WORD0_FILEID;
7986 else
7987 bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
7988
7989 nfs_fattr_init(fs_locations->fattr);
7990 fs_locations->server = server;
7991 fs_locations->nlocations = 0;
7992 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
7993 dprintk("%s: returned status = %d\n", __func__, status);
7994 return status;
7995 }
7996
nfs4_proc_fs_locations(struct rpc_clnt * client,struct inode * dir,const struct qstr * name,struct nfs4_fs_locations * fs_locations,struct page * page)7997 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
7998 const struct qstr *name,
7999 struct nfs4_fs_locations *fs_locations,
8000 struct page *page)
8001 {
8002 struct nfs4_exception exception = {
8003 .interruptible = true,
8004 };
8005 int err;
8006 do {
8007 err = _nfs4_proc_fs_locations(client, dir, name,
8008 fs_locations, page);
8009 trace_nfs4_get_fs_locations(dir, name, err);
8010 err = nfs4_handle_exception(NFS_SERVER(dir), err,
8011 &exception);
8012 } while (exception.retry);
8013 return err;
8014 }
8015
8016 /*
8017 * This operation also signals the server that this client is
8018 * performing migration recovery. The server can stop returning
8019 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
8020 * appended to this compound to identify the client ID which is
8021 * performing recovery.
8022 */
_nfs40_proc_get_locations(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs4_fs_locations * locations,struct page * page,const struct cred * cred)8023 static int _nfs40_proc_get_locations(struct nfs_server *server,
8024 struct nfs_fh *fhandle,
8025 struct nfs4_fs_locations *locations,
8026 struct page *page, const struct cred *cred)
8027 {
8028 struct rpc_clnt *clnt = server->client;
8029 u32 bitmask[2] = {
8030 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
8031 };
8032 struct nfs4_fs_locations_arg args = {
8033 .clientid = server->nfs_client->cl_clientid,
8034 .fh = fhandle,
8035 .page = page,
8036 .bitmask = bitmask,
8037 .migration = 1, /* skip LOOKUP */
8038 .renew = 1, /* append RENEW */
8039 };
8040 struct nfs4_fs_locations_res res = {
8041 .fs_locations = locations,
8042 .migration = 1,
8043 .renew = 1,
8044 };
8045 struct rpc_message msg = {
8046 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
8047 .rpc_argp = &args,
8048 .rpc_resp = &res,
8049 .rpc_cred = cred,
8050 };
8051 unsigned long now = jiffies;
8052 int status;
8053
8054 nfs_fattr_init(locations->fattr);
8055 locations->server = server;
8056 locations->nlocations = 0;
8057
8058 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8059 status = nfs4_call_sync_sequence(clnt, server, &msg,
8060 &args.seq_args, &res.seq_res);
8061 if (status)
8062 return status;
8063
8064 renew_lease(server, now);
8065 return 0;
8066 }
8067
8068 #ifdef CONFIG_NFS_V4_1
8069
8070 /*
8071 * This operation also signals the server that this client is
8072 * performing migration recovery. The server can stop asserting
8073 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
8074 * performing this operation is identified in the SEQUENCE
8075 * operation in this compound.
8076 *
8077 * When the client supports GETATTR(fs_locations_info), it can
8078 * be plumbed in here.
8079 */
_nfs41_proc_get_locations(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs4_fs_locations * locations,struct page * page,const struct cred * cred)8080 static int _nfs41_proc_get_locations(struct nfs_server *server,
8081 struct nfs_fh *fhandle,
8082 struct nfs4_fs_locations *locations,
8083 struct page *page, const struct cred *cred)
8084 {
8085 struct rpc_clnt *clnt = server->client;
8086 u32 bitmask[2] = {
8087 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
8088 };
8089 struct nfs4_fs_locations_arg args = {
8090 .fh = fhandle,
8091 .page = page,
8092 .bitmask = bitmask,
8093 .migration = 1, /* skip LOOKUP */
8094 };
8095 struct nfs4_fs_locations_res res = {
8096 .fs_locations = locations,
8097 .migration = 1,
8098 };
8099 struct rpc_message msg = {
8100 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
8101 .rpc_argp = &args,
8102 .rpc_resp = &res,
8103 .rpc_cred = cred,
8104 };
8105 struct nfs4_call_sync_data data = {
8106 .seq_server = server,
8107 .seq_args = &args.seq_args,
8108 .seq_res = &res.seq_res,
8109 };
8110 struct rpc_task_setup task_setup_data = {
8111 .rpc_client = clnt,
8112 .rpc_message = &msg,
8113 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops,
8114 .callback_data = &data,
8115 .flags = RPC_TASK_NO_ROUND_ROBIN,
8116 };
8117 int status;
8118
8119 nfs_fattr_init(locations->fattr);
8120 locations->server = server;
8121 locations->nlocations = 0;
8122
8123 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8124 status = nfs4_call_sync_custom(&task_setup_data);
8125 if (status == NFS4_OK &&
8126 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
8127 status = -NFS4ERR_LEASE_MOVED;
8128 return status;
8129 }
8130
8131 #endif /* CONFIG_NFS_V4_1 */
8132
8133 /**
8134 * nfs4_proc_get_locations - discover locations for a migrated FSID
8135 * @server: pointer to nfs_server to process
8136 * @fhandle: pointer to the kernel NFS client file handle
8137 * @locations: result of query
8138 * @page: buffer
8139 * @cred: credential to use for this operation
8140 *
8141 * Returns NFS4_OK on success, a negative NFS4ERR status code if the
8142 * operation failed, or a negative errno if a local error occurred.
8143 *
8144 * On success, "locations" is filled in, but if the server has
8145 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
8146 * asserted.
8147 *
8148 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
8149 * from this client that require migration recovery.
8150 */
nfs4_proc_get_locations(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs4_fs_locations * locations,struct page * page,const struct cred * cred)8151 int nfs4_proc_get_locations(struct nfs_server *server,
8152 struct nfs_fh *fhandle,
8153 struct nfs4_fs_locations *locations,
8154 struct page *page, const struct cred *cred)
8155 {
8156 struct nfs_client *clp = server->nfs_client;
8157 const struct nfs4_mig_recovery_ops *ops =
8158 clp->cl_mvops->mig_recovery_ops;
8159 struct nfs4_exception exception = {
8160 .interruptible = true,
8161 };
8162 int status;
8163
8164 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
8165 (unsigned long long)server->fsid.major,
8166 (unsigned long long)server->fsid.minor,
8167 clp->cl_hostname);
8168 nfs_display_fhandle(fhandle, __func__);
8169
8170 do {
8171 status = ops->get_locations(server, fhandle, locations, page,
8172 cred);
8173 if (status != -NFS4ERR_DELAY)
8174 break;
8175 nfs4_handle_exception(server, status, &exception);
8176 } while (exception.retry);
8177 return status;
8178 }
8179
8180 /*
8181 * This operation also signals the server that this client is
8182 * performing "lease moved" recovery. The server can stop
8183 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
8184 * is appended to this compound to identify the client ID which is
8185 * performing recovery.
8186 */
_nfs40_proc_fsid_present(struct inode * inode,const struct cred * cred)8187 static int _nfs40_proc_fsid_present(struct inode *inode, const struct cred *cred)
8188 {
8189 struct nfs_server *server = NFS_SERVER(inode);
8190 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
8191 struct rpc_clnt *clnt = server->client;
8192 struct nfs4_fsid_present_arg args = {
8193 .fh = NFS_FH(inode),
8194 .clientid = clp->cl_clientid,
8195 .renew = 1, /* append RENEW */
8196 };
8197 struct nfs4_fsid_present_res res = {
8198 .renew = 1,
8199 };
8200 struct rpc_message msg = {
8201 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
8202 .rpc_argp = &args,
8203 .rpc_resp = &res,
8204 .rpc_cred = cred,
8205 };
8206 unsigned long now = jiffies;
8207 int status;
8208
8209 res.fh = nfs_alloc_fhandle();
8210 if (res.fh == NULL)
8211 return -ENOMEM;
8212
8213 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8214 status = nfs4_call_sync_sequence(clnt, server, &msg,
8215 &args.seq_args, &res.seq_res);
8216 nfs_free_fhandle(res.fh);
8217 if (status)
8218 return status;
8219
8220 do_renew_lease(clp, now);
8221 return 0;
8222 }
8223
8224 #ifdef CONFIG_NFS_V4_1
8225
8226 /*
8227 * This operation also signals the server that this client is
8228 * performing "lease moved" recovery. The server can stop asserting
8229 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
8230 * this operation is identified in the SEQUENCE operation in this
8231 * compound.
8232 */
_nfs41_proc_fsid_present(struct inode * inode,const struct cred * cred)8233 static int _nfs41_proc_fsid_present(struct inode *inode, const struct cred *cred)
8234 {
8235 struct nfs_server *server = NFS_SERVER(inode);
8236 struct rpc_clnt *clnt = server->client;
8237 struct nfs4_fsid_present_arg args = {
8238 .fh = NFS_FH(inode),
8239 };
8240 struct nfs4_fsid_present_res res = {
8241 };
8242 struct rpc_message msg = {
8243 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
8244 .rpc_argp = &args,
8245 .rpc_resp = &res,
8246 .rpc_cred = cred,
8247 };
8248 int status;
8249
8250 res.fh = nfs_alloc_fhandle();
8251 if (res.fh == NULL)
8252 return -ENOMEM;
8253
8254 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8255 status = nfs4_call_sync_sequence(clnt, server, &msg,
8256 &args.seq_args, &res.seq_res);
8257 nfs_free_fhandle(res.fh);
8258 if (status == NFS4_OK &&
8259 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
8260 status = -NFS4ERR_LEASE_MOVED;
8261 return status;
8262 }
8263
8264 #endif /* CONFIG_NFS_V4_1 */
8265
8266 /**
8267 * nfs4_proc_fsid_present - Is this FSID present or absent on server?
8268 * @inode: inode on FSID to check
8269 * @cred: credential to use for this operation
8270 *
8271 * Server indicates whether the FSID is present, moved, or not
8272 * recognized. This operation is necessary to clear a LEASE_MOVED
8273 * condition for this client ID.
8274 *
8275 * Returns NFS4_OK if the FSID is present on this server,
8276 * -NFS4ERR_MOVED if the FSID is no longer present, a negative
8277 * NFS4ERR code if some error occurred on the server, or a
8278 * negative errno if a local failure occurred.
8279 */
nfs4_proc_fsid_present(struct inode * inode,const struct cred * cred)8280 int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred)
8281 {
8282 struct nfs_server *server = NFS_SERVER(inode);
8283 struct nfs_client *clp = server->nfs_client;
8284 const struct nfs4_mig_recovery_ops *ops =
8285 clp->cl_mvops->mig_recovery_ops;
8286 struct nfs4_exception exception = {
8287 .interruptible = true,
8288 };
8289 int status;
8290
8291 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
8292 (unsigned long long)server->fsid.major,
8293 (unsigned long long)server->fsid.minor,
8294 clp->cl_hostname);
8295 nfs_display_fhandle(NFS_FH(inode), __func__);
8296
8297 do {
8298 status = ops->fsid_present(inode, cred);
8299 if (status != -NFS4ERR_DELAY)
8300 break;
8301 nfs4_handle_exception(server, status, &exception);
8302 } while (exception.retry);
8303 return status;
8304 }
8305
8306 /*
8307 * If 'use_integrity' is true and the state managment nfs_client
8308 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
8309 * and the machine credential as per RFC3530bis and RFC5661 Security
8310 * Considerations sections. Otherwise, just use the user cred with the
8311 * filesystem's rpc_client.
8312 */
_nfs4_proc_secinfo(struct inode * dir,const struct qstr * name,struct nfs4_secinfo_flavors * flavors,bool use_integrity)8313 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
8314 {
8315 int status;
8316 struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
8317 struct nfs_client *clp = NFS_SERVER(dir)->nfs_client;
8318 struct nfs4_secinfo_arg args = {
8319 .dir_fh = NFS_FH(dir),
8320 .name = name,
8321 };
8322 struct nfs4_secinfo_res res = {
8323 .flavors = flavors,
8324 };
8325 struct rpc_message msg = {
8326 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
8327 .rpc_argp = &args,
8328 .rpc_resp = &res,
8329 };
8330 struct nfs4_call_sync_data data = {
8331 .seq_server = NFS_SERVER(dir),
8332 .seq_args = &args.seq_args,
8333 .seq_res = &res.seq_res,
8334 };
8335 struct rpc_task_setup task_setup = {
8336 .rpc_client = clnt,
8337 .rpc_message = &msg,
8338 .callback_ops = clp->cl_mvops->call_sync_ops,
8339 .callback_data = &data,
8340 .flags = RPC_TASK_NO_ROUND_ROBIN,
8341 };
8342 const struct cred *cred = NULL;
8343
8344 if (use_integrity) {
8345 clnt = clp->cl_rpcclient;
8346 task_setup.rpc_client = clnt;
8347
8348 cred = nfs4_get_clid_cred(clp);
8349 msg.rpc_cred = cred;
8350 }
8351
8352 dprintk("NFS call secinfo %s\n", name->name);
8353
8354 nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
8355 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
8356 status = nfs4_call_sync_custom(&task_setup);
8357
8358 dprintk("NFS reply secinfo: %d\n", status);
8359
8360 put_cred(cred);
8361 return status;
8362 }
8363
nfs4_proc_secinfo(struct inode * dir,const struct qstr * name,struct nfs4_secinfo_flavors * flavors)8364 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
8365 struct nfs4_secinfo_flavors *flavors)
8366 {
8367 struct nfs4_exception exception = {
8368 .interruptible = true,
8369 };
8370 int err;
8371 do {
8372 err = -NFS4ERR_WRONGSEC;
8373
8374 /* try to use integrity protection with machine cred */
8375 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
8376 err = _nfs4_proc_secinfo(dir, name, flavors, true);
8377
8378 /*
8379 * if unable to use integrity protection, or SECINFO with
8380 * integrity protection returns NFS4ERR_WRONGSEC (which is
8381 * disallowed by spec, but exists in deployed servers) use
8382 * the current filesystem's rpc_client and the user cred.
8383 */
8384 if (err == -NFS4ERR_WRONGSEC)
8385 err = _nfs4_proc_secinfo(dir, name, flavors, false);
8386
8387 trace_nfs4_secinfo(dir, name, err);
8388 err = nfs4_handle_exception(NFS_SERVER(dir), err,
8389 &exception);
8390 } while (exception.retry);
8391 return err;
8392 }
8393
8394 #ifdef CONFIG_NFS_V4_1
8395 /*
8396 * Check the exchange flags returned by the server for invalid flags, having
8397 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
8398 * DS flags set.
8399 */
nfs4_check_cl_exchange_flags(u32 flags,u32 version)8400 static int nfs4_check_cl_exchange_flags(u32 flags, u32 version)
8401 {
8402 if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R))
8403 goto out_inval;
8404 else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R))
8405 goto out_inval;
8406 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
8407 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
8408 goto out_inval;
8409 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
8410 goto out_inval;
8411 return NFS_OK;
8412 out_inval:
8413 return -NFS4ERR_INVAL;
8414 }
8415
8416 static bool
nfs41_same_server_scope(struct nfs41_server_scope * a,struct nfs41_server_scope * b)8417 nfs41_same_server_scope(struct nfs41_server_scope *a,
8418 struct nfs41_server_scope *b)
8419 {
8420 if (a->server_scope_sz != b->server_scope_sz)
8421 return false;
8422 return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0;
8423 }
8424
8425 static void
nfs4_bind_one_conn_to_session_done(struct rpc_task * task,void * calldata)8426 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
8427 {
8428 struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp;
8429 struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp;
8430 struct nfs_client *clp = args->client;
8431
8432 switch (task->tk_status) {
8433 case -NFS4ERR_BADSESSION:
8434 case -NFS4ERR_DEADSESSION:
8435 nfs4_schedule_session_recovery(clp->cl_session,
8436 task->tk_status);
8437 return;
8438 }
8439 if (args->dir == NFS4_CDFC4_FORE_OR_BOTH &&
8440 res->dir != NFS4_CDFS4_BOTH) {
8441 rpc_task_close_connection(task);
8442 if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES)
8443 rpc_restart_call(task);
8444 }
8445 }
8446
8447 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
8448 .rpc_call_done = nfs4_bind_one_conn_to_session_done,
8449 };
8450
8451 /*
8452 * nfs4_proc_bind_one_conn_to_session()
8453 *
8454 * The 4.1 client currently uses the same TCP connection for the
8455 * fore and backchannel.
8456 */
8457 static
nfs4_proc_bind_one_conn_to_session(struct rpc_clnt * clnt,struct rpc_xprt * xprt,struct nfs_client * clp,const struct cred * cred)8458 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt,
8459 struct rpc_xprt *xprt,
8460 struct nfs_client *clp,
8461 const struct cred *cred)
8462 {
8463 int status;
8464 struct nfs41_bind_conn_to_session_args args = {
8465 .client = clp,
8466 .dir = NFS4_CDFC4_FORE_OR_BOTH,
8467 .retries = 0,
8468 };
8469 struct nfs41_bind_conn_to_session_res res;
8470 struct rpc_message msg = {
8471 .rpc_proc =
8472 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
8473 .rpc_argp = &args,
8474 .rpc_resp = &res,
8475 .rpc_cred = cred,
8476 };
8477 struct rpc_task_setup task_setup_data = {
8478 .rpc_client = clnt,
8479 .rpc_xprt = xprt,
8480 .callback_ops = &nfs4_bind_one_conn_to_session_ops,
8481 .rpc_message = &msg,
8482 .flags = RPC_TASK_TIMEOUT,
8483 };
8484 struct rpc_task *task;
8485
8486 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
8487 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
8488 args.dir = NFS4_CDFC4_FORE;
8489
8490 /* Do not set the backchannel flag unless this is clnt->cl_xprt */
8491 if (xprt != rcu_access_pointer(clnt->cl_xprt))
8492 args.dir = NFS4_CDFC4_FORE;
8493
8494 task = rpc_run_task(&task_setup_data);
8495 if (!IS_ERR(task)) {
8496 status = task->tk_status;
8497 rpc_put_task(task);
8498 } else
8499 status = PTR_ERR(task);
8500 trace_nfs4_bind_conn_to_session(clp, status);
8501 if (status == 0) {
8502 if (memcmp(res.sessionid.data,
8503 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
8504 dprintk("NFS: %s: Session ID mismatch\n", __func__);
8505 return -EIO;
8506 }
8507 if ((res.dir & args.dir) != res.dir || res.dir == 0) {
8508 dprintk("NFS: %s: Unexpected direction from server\n",
8509 __func__);
8510 return -EIO;
8511 }
8512 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
8513 dprintk("NFS: %s: Server returned RDMA mode = true\n",
8514 __func__);
8515 return -EIO;
8516 }
8517 }
8518
8519 return status;
8520 }
8521
8522 struct rpc_bind_conn_calldata {
8523 struct nfs_client *clp;
8524 const struct cred *cred;
8525 };
8526
8527 static int
nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt * clnt,struct rpc_xprt * xprt,void * calldata)8528 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt,
8529 struct rpc_xprt *xprt,
8530 void *calldata)
8531 {
8532 struct rpc_bind_conn_calldata *p = calldata;
8533
8534 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred);
8535 }
8536
nfs4_proc_bind_conn_to_session(struct nfs_client * clp,const struct cred * cred)8537 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, const struct cred *cred)
8538 {
8539 struct rpc_bind_conn_calldata data = {
8540 .clp = clp,
8541 .cred = cred,
8542 };
8543 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient,
8544 nfs4_proc_bind_conn_to_session_callback, &data);
8545 }
8546
8547 /*
8548 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
8549 * and operations we'd like to see to enable certain features in the allow map
8550 */
8551 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
8552 .how = SP4_MACH_CRED,
8553 .enforce.u.words = {
8554 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
8555 1 << (OP_EXCHANGE_ID - 32) |
8556 1 << (OP_CREATE_SESSION - 32) |
8557 1 << (OP_DESTROY_SESSION - 32) |
8558 1 << (OP_DESTROY_CLIENTID - 32)
8559 },
8560 .allow.u.words = {
8561 [0] = 1 << (OP_CLOSE) |
8562 1 << (OP_OPEN_DOWNGRADE) |
8563 1 << (OP_LOCKU) |
8564 1 << (OP_DELEGRETURN) |
8565 1 << (OP_COMMIT),
8566 [1] = 1 << (OP_SECINFO - 32) |
8567 1 << (OP_SECINFO_NO_NAME - 32) |
8568 1 << (OP_LAYOUTRETURN - 32) |
8569 1 << (OP_TEST_STATEID - 32) |
8570 1 << (OP_FREE_STATEID - 32) |
8571 1 << (OP_WRITE - 32)
8572 }
8573 };
8574
8575 /*
8576 * Select the state protection mode for client `clp' given the server results
8577 * from exchange_id in `sp'.
8578 *
8579 * Returns 0 on success, negative errno otherwise.
8580 */
nfs4_sp4_select_mode(struct nfs_client * clp,struct nfs41_state_protection * sp)8581 static int nfs4_sp4_select_mode(struct nfs_client *clp,
8582 struct nfs41_state_protection *sp)
8583 {
8584 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
8585 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
8586 1 << (OP_EXCHANGE_ID - 32) |
8587 1 << (OP_CREATE_SESSION - 32) |
8588 1 << (OP_DESTROY_SESSION - 32) |
8589 1 << (OP_DESTROY_CLIENTID - 32)
8590 };
8591 unsigned long flags = 0;
8592 unsigned int i;
8593 int ret = 0;
8594
8595 if (sp->how == SP4_MACH_CRED) {
8596 /* Print state protect result */
8597 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
8598 for (i = 0; i <= LAST_NFS4_OP; i++) {
8599 if (test_bit(i, sp->enforce.u.longs))
8600 dfprintk(MOUNT, " enforce op %d\n", i);
8601 if (test_bit(i, sp->allow.u.longs))
8602 dfprintk(MOUNT, " allow op %d\n", i);
8603 }
8604
8605 /* make sure nothing is on enforce list that isn't supported */
8606 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
8607 if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
8608 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
8609 ret = -EINVAL;
8610 goto out;
8611 }
8612 }
8613
8614 /*
8615 * Minimal mode - state operations are allowed to use machine
8616 * credential. Note this already happens by default, so the
8617 * client doesn't have to do anything more than the negotiation.
8618 *
8619 * NOTE: we don't care if EXCHANGE_ID is in the list -
8620 * we're already using the machine cred for exchange_id
8621 * and will never use a different cred.
8622 */
8623 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
8624 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
8625 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
8626 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
8627 dfprintk(MOUNT, "sp4_mach_cred:\n");
8628 dfprintk(MOUNT, " minimal mode enabled\n");
8629 __set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags);
8630 } else {
8631 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
8632 ret = -EINVAL;
8633 goto out;
8634 }
8635
8636 if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
8637 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) &&
8638 test_bit(OP_DELEGRETURN, sp->allow.u.longs) &&
8639 test_bit(OP_LOCKU, sp->allow.u.longs)) {
8640 dfprintk(MOUNT, " cleanup mode enabled\n");
8641 __set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags);
8642 }
8643
8644 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) {
8645 dfprintk(MOUNT, " pnfs cleanup mode enabled\n");
8646 __set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags);
8647 }
8648
8649 if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
8650 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
8651 dfprintk(MOUNT, " secinfo mode enabled\n");
8652 __set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags);
8653 }
8654
8655 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
8656 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
8657 dfprintk(MOUNT, " stateid mode enabled\n");
8658 __set_bit(NFS_SP4_MACH_CRED_STATEID, &flags);
8659 }
8660
8661 if (test_bit(OP_WRITE, sp->allow.u.longs)) {
8662 dfprintk(MOUNT, " write mode enabled\n");
8663 __set_bit(NFS_SP4_MACH_CRED_WRITE, &flags);
8664 }
8665
8666 if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
8667 dfprintk(MOUNT, " commit mode enabled\n");
8668 __set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags);
8669 }
8670 }
8671 out:
8672 clp->cl_sp4_flags = flags;
8673 return ret;
8674 }
8675
8676 struct nfs41_exchange_id_data {
8677 struct nfs41_exchange_id_res res;
8678 struct nfs41_exchange_id_args args;
8679 };
8680
nfs4_exchange_id_release(void * data)8681 static void nfs4_exchange_id_release(void *data)
8682 {
8683 struct nfs41_exchange_id_data *cdata =
8684 (struct nfs41_exchange_id_data *)data;
8685
8686 nfs_put_client(cdata->args.client);
8687 kfree(cdata->res.impl_id);
8688 kfree(cdata->res.server_scope);
8689 kfree(cdata->res.server_owner);
8690 kfree(cdata);
8691 }
8692
8693 static const struct rpc_call_ops nfs4_exchange_id_call_ops = {
8694 .rpc_release = nfs4_exchange_id_release,
8695 };
8696
8697 /*
8698 * _nfs4_proc_exchange_id()
8699 *
8700 * Wrapper for EXCHANGE_ID operation.
8701 */
8702 static struct rpc_task *
nfs4_run_exchange_id(struct nfs_client * clp,const struct cred * cred,u32 sp4_how,struct rpc_xprt * xprt)8703 nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred,
8704 u32 sp4_how, struct rpc_xprt *xprt)
8705 {
8706 struct rpc_message msg = {
8707 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
8708 .rpc_cred = cred,
8709 };
8710 struct rpc_task_setup task_setup_data = {
8711 .rpc_client = clp->cl_rpcclient,
8712 .callback_ops = &nfs4_exchange_id_call_ops,
8713 .rpc_message = &msg,
8714 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
8715 };
8716 struct nfs41_exchange_id_data *calldata;
8717 int status;
8718
8719 if (!refcount_inc_not_zero(&clp->cl_count))
8720 return ERR_PTR(-EIO);
8721
8722 status = -ENOMEM;
8723 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
8724 if (!calldata)
8725 goto out;
8726
8727 nfs4_init_boot_verifier(clp, &calldata->args.verifier);
8728
8729 status = nfs4_init_uniform_client_string(clp);
8730 if (status)
8731 goto out_calldata;
8732
8733 calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
8734 GFP_NOFS);
8735 status = -ENOMEM;
8736 if (unlikely(calldata->res.server_owner == NULL))
8737 goto out_calldata;
8738
8739 calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
8740 GFP_NOFS);
8741 if (unlikely(calldata->res.server_scope == NULL))
8742 goto out_server_owner;
8743
8744 calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
8745 if (unlikely(calldata->res.impl_id == NULL))
8746 goto out_server_scope;
8747
8748 switch (sp4_how) {
8749 case SP4_NONE:
8750 calldata->args.state_protect.how = SP4_NONE;
8751 break;
8752
8753 case SP4_MACH_CRED:
8754 calldata->args.state_protect = nfs4_sp4_mach_cred_request;
8755 break;
8756
8757 default:
8758 /* unsupported! */
8759 WARN_ON_ONCE(1);
8760 status = -EINVAL;
8761 goto out_impl_id;
8762 }
8763 if (xprt) {
8764 task_setup_data.rpc_xprt = xprt;
8765 task_setup_data.flags |= RPC_TASK_SOFTCONN;
8766 memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
8767 sizeof(calldata->args.verifier.data));
8768 }
8769 calldata->args.client = clp;
8770 calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
8771 EXCHGID4_FLAG_BIND_PRINC_STATEID;
8772 #ifdef CONFIG_NFS_V4_1_MIGRATION
8773 calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR;
8774 #endif
8775 msg.rpc_argp = &calldata->args;
8776 msg.rpc_resp = &calldata->res;
8777 task_setup_data.callback_data = calldata;
8778
8779 return rpc_run_task(&task_setup_data);
8780
8781 out_impl_id:
8782 kfree(calldata->res.impl_id);
8783 out_server_scope:
8784 kfree(calldata->res.server_scope);
8785 out_server_owner:
8786 kfree(calldata->res.server_owner);
8787 out_calldata:
8788 kfree(calldata);
8789 out:
8790 nfs_put_client(clp);
8791 return ERR_PTR(status);
8792 }
8793
8794 /*
8795 * _nfs4_proc_exchange_id()
8796 *
8797 * Wrapper for EXCHANGE_ID operation.
8798 */
_nfs4_proc_exchange_id(struct nfs_client * clp,const struct cred * cred,u32 sp4_how)8799 static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred,
8800 u32 sp4_how)
8801 {
8802 struct rpc_task *task;
8803 struct nfs41_exchange_id_args *argp;
8804 struct nfs41_exchange_id_res *resp;
8805 unsigned long now = jiffies;
8806 int status;
8807
8808 task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL);
8809 if (IS_ERR(task))
8810 return PTR_ERR(task);
8811
8812 argp = task->tk_msg.rpc_argp;
8813 resp = task->tk_msg.rpc_resp;
8814 status = task->tk_status;
8815 if (status != 0)
8816 goto out;
8817
8818 status = nfs4_check_cl_exchange_flags(resp->flags,
8819 clp->cl_mvops->minor_version);
8820 if (status != 0)
8821 goto out;
8822
8823 status = nfs4_sp4_select_mode(clp, &resp->state_protect);
8824 if (status != 0)
8825 goto out;
8826
8827 do_renew_lease(clp, now);
8828
8829 clp->cl_clientid = resp->clientid;
8830 clp->cl_exchange_flags = resp->flags;
8831 clp->cl_seqid = resp->seqid;
8832 /* Client ID is not confirmed */
8833 if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R))
8834 clear_bit(NFS4_SESSION_ESTABLISHED,
8835 &clp->cl_session->session_state);
8836
8837 if (clp->cl_serverscope != NULL &&
8838 !nfs41_same_server_scope(clp->cl_serverscope,
8839 resp->server_scope)) {
8840 dprintk("%s: server_scope mismatch detected\n",
8841 __func__);
8842 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
8843 }
8844
8845 swap(clp->cl_serverowner, resp->server_owner);
8846 swap(clp->cl_serverscope, resp->server_scope);
8847 swap(clp->cl_implid, resp->impl_id);
8848
8849 /* Save the EXCHANGE_ID verifier session trunk tests */
8850 memcpy(clp->cl_confirm.data, argp->verifier.data,
8851 sizeof(clp->cl_confirm.data));
8852 out:
8853 trace_nfs4_exchange_id(clp, status);
8854 rpc_put_task(task);
8855 return status;
8856 }
8857
8858 /*
8859 * nfs4_proc_exchange_id()
8860 *
8861 * Returns zero, a negative errno, or a negative NFS4ERR status code.
8862 *
8863 * Since the clientid has expired, all compounds using sessions
8864 * associated with the stale clientid will be returning
8865 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
8866 * be in some phase of session reset.
8867 *
8868 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
8869 */
nfs4_proc_exchange_id(struct nfs_client * clp,const struct cred * cred)8870 int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred)
8871 {
8872 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
8873 int status;
8874
8875 /* try SP4_MACH_CRED if krb5i/p */
8876 if (authflavor == RPC_AUTH_GSS_KRB5I ||
8877 authflavor == RPC_AUTH_GSS_KRB5P) {
8878 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
8879 if (!status)
8880 return 0;
8881 }
8882
8883 /* try SP4_NONE */
8884 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
8885 }
8886
8887 /**
8888 * nfs4_test_session_trunk
8889 *
8890 * This is an add_xprt_test() test function called from
8891 * rpc_clnt_setup_test_and_add_xprt.
8892 *
8893 * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt
8894 * and is dereferrenced in nfs4_exchange_id_release
8895 *
8896 * Upon success, add the new transport to the rpc_clnt
8897 *
8898 * @clnt: struct rpc_clnt to get new transport
8899 * @xprt: the rpc_xprt to test
8900 * @data: call data for _nfs4_proc_exchange_id.
8901 */
nfs4_test_session_trunk(struct rpc_clnt * clnt,struct rpc_xprt * xprt,void * data)8902 void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
8903 void *data)
8904 {
8905 struct nfs4_add_xprt_data *adata = data;
8906 struct rpc_task *task;
8907 int status;
8908
8909 u32 sp4_how;
8910
8911 dprintk("--> %s try %s\n", __func__,
8912 xprt->address_strings[RPC_DISPLAY_ADDR]);
8913
8914 sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
8915
8916 /* Test connection for session trunking. Async exchange_id call */
8917 task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
8918 if (IS_ERR(task))
8919 return;
8920
8921 status = task->tk_status;
8922 if (status == 0)
8923 status = nfs4_detect_session_trunking(adata->clp,
8924 task->tk_msg.rpc_resp, xprt);
8925
8926 if (status == 0)
8927 rpc_clnt_xprt_switch_add_xprt(clnt, xprt);
8928 else if (rpc_clnt_xprt_switch_has_addr(clnt,
8929 (struct sockaddr *)&xprt->addr))
8930 rpc_clnt_xprt_switch_remove_xprt(clnt, xprt);
8931
8932 rpc_put_task(task);
8933 }
8934 EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
8935
_nfs4_proc_destroy_clientid(struct nfs_client * clp,const struct cred * cred)8936 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
8937 const struct cred *cred)
8938 {
8939 struct rpc_message msg = {
8940 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
8941 .rpc_argp = clp,
8942 .rpc_cred = cred,
8943 };
8944 int status;
8945
8946 status = rpc_call_sync(clp->cl_rpcclient, &msg,
8947 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
8948 trace_nfs4_destroy_clientid(clp, status);
8949 if (status)
8950 dprintk("NFS: Got error %d from the server %s on "
8951 "DESTROY_CLIENTID.", status, clp->cl_hostname);
8952 return status;
8953 }
8954
nfs4_proc_destroy_clientid(struct nfs_client * clp,const struct cred * cred)8955 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
8956 const struct cred *cred)
8957 {
8958 unsigned int loop;
8959 int ret;
8960
8961 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
8962 ret = _nfs4_proc_destroy_clientid(clp, cred);
8963 switch (ret) {
8964 case -NFS4ERR_DELAY:
8965 case -NFS4ERR_CLIENTID_BUSY:
8966 ssleep(1);
8967 break;
8968 default:
8969 return ret;
8970 }
8971 }
8972 return 0;
8973 }
8974
nfs4_destroy_clientid(struct nfs_client * clp)8975 int nfs4_destroy_clientid(struct nfs_client *clp)
8976 {
8977 const struct cred *cred;
8978 int ret = 0;
8979
8980 if (clp->cl_mvops->minor_version < 1)
8981 goto out;
8982 if (clp->cl_exchange_flags == 0)
8983 goto out;
8984 if (clp->cl_preserve_clid)
8985 goto out;
8986 cred = nfs4_get_clid_cred(clp);
8987 ret = nfs4_proc_destroy_clientid(clp, cred);
8988 put_cred(cred);
8989 switch (ret) {
8990 case 0:
8991 case -NFS4ERR_STALE_CLIENTID:
8992 clp->cl_exchange_flags = 0;
8993 }
8994 out:
8995 return ret;
8996 }
8997
8998 #endif /* CONFIG_NFS_V4_1 */
8999
9000 struct nfs4_get_lease_time_data {
9001 struct nfs4_get_lease_time_args *args;
9002 struct nfs4_get_lease_time_res *res;
9003 struct nfs_client *clp;
9004 };
9005
nfs4_get_lease_time_prepare(struct rpc_task * task,void * calldata)9006 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
9007 void *calldata)
9008 {
9009 struct nfs4_get_lease_time_data *data =
9010 (struct nfs4_get_lease_time_data *)calldata;
9011
9012 /* just setup sequence, do not trigger session recovery
9013 since we're invoked within one */
9014 nfs4_setup_sequence(data->clp,
9015 &data->args->la_seq_args,
9016 &data->res->lr_seq_res,
9017 task);
9018 }
9019
9020 /*
9021 * Called from nfs4_state_manager thread for session setup, so don't recover
9022 * from sequence operation or clientid errors.
9023 */
nfs4_get_lease_time_done(struct rpc_task * task,void * calldata)9024 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
9025 {
9026 struct nfs4_get_lease_time_data *data =
9027 (struct nfs4_get_lease_time_data *)calldata;
9028
9029 if (!nfs4_sequence_done(task, &data->res->lr_seq_res))
9030 return;
9031 switch (task->tk_status) {
9032 case -NFS4ERR_DELAY:
9033 case -NFS4ERR_GRACE:
9034 rpc_delay(task, NFS4_POLL_RETRY_MIN);
9035 task->tk_status = 0;
9036 fallthrough;
9037 case -NFS4ERR_RETRY_UNCACHED_REP:
9038 rpc_restart_call_prepare(task);
9039 return;
9040 }
9041 }
9042
9043 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
9044 .rpc_call_prepare = nfs4_get_lease_time_prepare,
9045 .rpc_call_done = nfs4_get_lease_time_done,
9046 };
9047
nfs4_proc_get_lease_time(struct nfs_client * clp,struct nfs_fsinfo * fsinfo)9048 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
9049 {
9050 struct nfs4_get_lease_time_args args;
9051 struct nfs4_get_lease_time_res res = {
9052 .lr_fsinfo = fsinfo,
9053 };
9054 struct nfs4_get_lease_time_data data = {
9055 .args = &args,
9056 .res = &res,
9057 .clp = clp,
9058 };
9059 struct rpc_message msg = {
9060 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
9061 .rpc_argp = &args,
9062 .rpc_resp = &res,
9063 };
9064 struct rpc_task_setup task_setup = {
9065 .rpc_client = clp->cl_rpcclient,
9066 .rpc_message = &msg,
9067 .callback_ops = &nfs4_get_lease_time_ops,
9068 .callback_data = &data,
9069 .flags = RPC_TASK_TIMEOUT,
9070 };
9071
9072 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1);
9073 return nfs4_call_sync_custom(&task_setup);
9074 }
9075
9076 #ifdef CONFIG_NFS_V4_1
9077
9078 /*
9079 * Initialize the values to be used by the client in CREATE_SESSION
9080 * If nfs4_init_session set the fore channel request and response sizes,
9081 * use them.
9082 *
9083 * Set the back channel max_resp_sz_cached to zero to force the client to
9084 * always set csa_cachethis to FALSE because the current implementation
9085 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
9086 */
nfs4_init_channel_attrs(struct nfs41_create_session_args * args,struct rpc_clnt * clnt)9087 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args,
9088 struct rpc_clnt *clnt)
9089 {
9090 unsigned int max_rqst_sz, max_resp_sz;
9091 unsigned int max_bc_payload = rpc_max_bc_payload(clnt);
9092 unsigned int max_bc_slots = rpc_num_bc_slots(clnt);
9093
9094 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
9095 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
9096
9097 /* Fore channel attributes */
9098 args->fc_attrs.max_rqst_sz = max_rqst_sz;
9099 args->fc_attrs.max_resp_sz = max_resp_sz;
9100 args->fc_attrs.max_ops = NFS4_MAX_OPS;
9101 args->fc_attrs.max_reqs = max_session_slots;
9102
9103 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
9104 "max_ops=%u max_reqs=%u\n",
9105 __func__,
9106 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
9107 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
9108
9109 /* Back channel attributes */
9110 args->bc_attrs.max_rqst_sz = max_bc_payload;
9111 args->bc_attrs.max_resp_sz = max_bc_payload;
9112 args->bc_attrs.max_resp_sz_cached = 0;
9113 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
9114 args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1);
9115 if (args->bc_attrs.max_reqs > max_bc_slots)
9116 args->bc_attrs.max_reqs = max_bc_slots;
9117
9118 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
9119 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
9120 __func__,
9121 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
9122 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
9123 args->bc_attrs.max_reqs);
9124 }
9125
nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args * args,struct nfs41_create_session_res * res)9126 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
9127 struct nfs41_create_session_res *res)
9128 {
9129 struct nfs4_channel_attrs *sent = &args->fc_attrs;
9130 struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
9131
9132 if (rcvd->max_resp_sz > sent->max_resp_sz)
9133 return -EINVAL;
9134 /*
9135 * Our requested max_ops is the minimum we need; we're not
9136 * prepared to break up compounds into smaller pieces than that.
9137 * So, no point even trying to continue if the server won't
9138 * cooperate:
9139 */
9140 if (rcvd->max_ops < sent->max_ops)
9141 return -EINVAL;
9142 if (rcvd->max_reqs == 0)
9143 return -EINVAL;
9144 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
9145 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
9146 return 0;
9147 }
9148
nfs4_verify_back_channel_attrs(struct nfs41_create_session_args * args,struct nfs41_create_session_res * res)9149 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
9150 struct nfs41_create_session_res *res)
9151 {
9152 struct nfs4_channel_attrs *sent = &args->bc_attrs;
9153 struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
9154
9155 if (!(res->flags & SESSION4_BACK_CHAN))
9156 goto out;
9157 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
9158 return -EINVAL;
9159 if (rcvd->max_resp_sz < sent->max_resp_sz)
9160 return -EINVAL;
9161 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
9162 return -EINVAL;
9163 if (rcvd->max_ops > sent->max_ops)
9164 return -EINVAL;
9165 if (rcvd->max_reqs > sent->max_reqs)
9166 return -EINVAL;
9167 out:
9168 return 0;
9169 }
9170
nfs4_verify_channel_attrs(struct nfs41_create_session_args * args,struct nfs41_create_session_res * res)9171 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
9172 struct nfs41_create_session_res *res)
9173 {
9174 int ret;
9175
9176 ret = nfs4_verify_fore_channel_attrs(args, res);
9177 if (ret)
9178 return ret;
9179 return nfs4_verify_back_channel_attrs(args, res);
9180 }
9181
nfs4_update_session(struct nfs4_session * session,struct nfs41_create_session_res * res)9182 static void nfs4_update_session(struct nfs4_session *session,
9183 struct nfs41_create_session_res *res)
9184 {
9185 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
9186 /* Mark client id and session as being confirmed */
9187 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
9188 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
9189 session->flags = res->flags;
9190 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
9191 if (res->flags & SESSION4_BACK_CHAN)
9192 memcpy(&session->bc_attrs, &res->bc_attrs,
9193 sizeof(session->bc_attrs));
9194 }
9195
_nfs4_proc_create_session(struct nfs_client * clp,const struct cred * cred)9196 static int _nfs4_proc_create_session(struct nfs_client *clp,
9197 const struct cred *cred)
9198 {
9199 struct nfs4_session *session = clp->cl_session;
9200 struct nfs41_create_session_args args = {
9201 .client = clp,
9202 .clientid = clp->cl_clientid,
9203 .seqid = clp->cl_seqid,
9204 .cb_program = NFS4_CALLBACK,
9205 };
9206 struct nfs41_create_session_res res;
9207
9208 struct rpc_message msg = {
9209 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
9210 .rpc_argp = &args,
9211 .rpc_resp = &res,
9212 .rpc_cred = cred,
9213 };
9214 int status;
9215
9216 nfs4_init_channel_attrs(&args, clp->cl_rpcclient);
9217 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
9218
9219 status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
9220 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
9221 trace_nfs4_create_session(clp, status);
9222
9223 switch (status) {
9224 case -NFS4ERR_STALE_CLIENTID:
9225 case -NFS4ERR_DELAY:
9226 case -ETIMEDOUT:
9227 case -EACCES:
9228 case -EAGAIN:
9229 goto out;
9230 }
9231
9232 clp->cl_seqid++;
9233 if (!status) {
9234 /* Verify the session's negotiated channel_attrs values */
9235 status = nfs4_verify_channel_attrs(&args, &res);
9236 /* Increment the clientid slot sequence id */
9237 if (status)
9238 goto out;
9239 nfs4_update_session(session, &res);
9240 }
9241 out:
9242 return status;
9243 }
9244
9245 /*
9246 * Issues a CREATE_SESSION operation to the server.
9247 * It is the responsibility of the caller to verify the session is
9248 * expired before calling this routine.
9249 */
nfs4_proc_create_session(struct nfs_client * clp,const struct cred * cred)9250 int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred)
9251 {
9252 int status;
9253 unsigned *ptr;
9254 struct nfs4_session *session = clp->cl_session;
9255 struct nfs4_add_xprt_data xprtdata = {
9256 .clp = clp,
9257 };
9258 struct rpc_add_xprt_test rpcdata = {
9259 .add_xprt_test = clp->cl_mvops->session_trunk,
9260 .data = &xprtdata,
9261 };
9262
9263 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
9264
9265 status = _nfs4_proc_create_session(clp, cred);
9266 if (status)
9267 goto out;
9268
9269 /* Init or reset the session slot tables */
9270 status = nfs4_setup_session_slot_tables(session);
9271 dprintk("slot table setup returned %d\n", status);
9272 if (status)
9273 goto out;
9274
9275 ptr = (unsigned *)&session->sess_id.data[0];
9276 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
9277 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
9278 rpc_clnt_probe_trunked_xprts(clp->cl_rpcclient, &rpcdata);
9279 out:
9280 return status;
9281 }
9282
9283 /*
9284 * Issue the over-the-wire RPC DESTROY_SESSION.
9285 * The caller must serialize access to this routine.
9286 */
nfs4_proc_destroy_session(struct nfs4_session * session,const struct cred * cred)9287 int nfs4_proc_destroy_session(struct nfs4_session *session,
9288 const struct cred *cred)
9289 {
9290 struct rpc_message msg = {
9291 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
9292 .rpc_argp = session,
9293 .rpc_cred = cred,
9294 };
9295 int status = 0;
9296
9297 /* session is still being setup */
9298 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
9299 return 0;
9300
9301 status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
9302 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
9303 trace_nfs4_destroy_session(session->clp, status);
9304
9305 if (status)
9306 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
9307 "Session has been destroyed regardless...\n", status);
9308 rpc_clnt_manage_trunked_xprts(session->clp->cl_rpcclient);
9309 return status;
9310 }
9311
9312 /*
9313 * Renew the cl_session lease.
9314 */
9315 struct nfs4_sequence_data {
9316 struct nfs_client *clp;
9317 struct nfs4_sequence_args args;
9318 struct nfs4_sequence_res res;
9319 };
9320
nfs41_sequence_release(void * data)9321 static void nfs41_sequence_release(void *data)
9322 {
9323 struct nfs4_sequence_data *calldata = data;
9324 struct nfs_client *clp = calldata->clp;
9325
9326 if (refcount_read(&clp->cl_count) > 1)
9327 nfs4_schedule_state_renewal(clp);
9328 nfs_put_client(clp);
9329 kfree(calldata);
9330 }
9331
nfs41_sequence_handle_errors(struct rpc_task * task,struct nfs_client * clp)9332 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
9333 {
9334 switch(task->tk_status) {
9335 case -NFS4ERR_DELAY:
9336 rpc_delay(task, NFS4_POLL_RETRY_MAX);
9337 return -EAGAIN;
9338 default:
9339 nfs4_schedule_lease_recovery(clp);
9340 }
9341 return 0;
9342 }
9343
nfs41_sequence_call_done(struct rpc_task * task,void * data)9344 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
9345 {
9346 struct nfs4_sequence_data *calldata = data;
9347 struct nfs_client *clp = calldata->clp;
9348
9349 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
9350 return;
9351
9352 trace_nfs4_sequence(clp, task->tk_status);
9353 if (task->tk_status < 0) {
9354 dprintk("%s ERROR %d\n", __func__, task->tk_status);
9355 if (refcount_read(&clp->cl_count) == 1)
9356 return;
9357
9358 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
9359 rpc_restart_call_prepare(task);
9360 return;
9361 }
9362 }
9363 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
9364 }
9365
nfs41_sequence_prepare(struct rpc_task * task,void * data)9366 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
9367 {
9368 struct nfs4_sequence_data *calldata = data;
9369 struct nfs_client *clp = calldata->clp;
9370 struct nfs4_sequence_args *args;
9371 struct nfs4_sequence_res *res;
9372
9373 args = task->tk_msg.rpc_argp;
9374 res = task->tk_msg.rpc_resp;
9375
9376 nfs4_setup_sequence(clp, args, res, task);
9377 }
9378
9379 static const struct rpc_call_ops nfs41_sequence_ops = {
9380 .rpc_call_done = nfs41_sequence_call_done,
9381 .rpc_call_prepare = nfs41_sequence_prepare,
9382 .rpc_release = nfs41_sequence_release,
9383 };
9384
_nfs41_proc_sequence(struct nfs_client * clp,const struct cred * cred,struct nfs4_slot * slot,bool is_privileged)9385 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
9386 const struct cred *cred,
9387 struct nfs4_slot *slot,
9388 bool is_privileged)
9389 {
9390 struct nfs4_sequence_data *calldata;
9391 struct rpc_message msg = {
9392 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
9393 .rpc_cred = cred,
9394 };
9395 struct rpc_task_setup task_setup_data = {
9396 .rpc_client = clp->cl_rpcclient,
9397 .rpc_message = &msg,
9398 .callback_ops = &nfs41_sequence_ops,
9399 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE,
9400 };
9401 struct rpc_task *ret;
9402
9403 ret = ERR_PTR(-EIO);
9404 if (!refcount_inc_not_zero(&clp->cl_count))
9405 goto out_err;
9406
9407 ret = ERR_PTR(-ENOMEM);
9408 calldata = kzalloc(sizeof(*calldata), GFP_KERNEL);
9409 if (calldata == NULL)
9410 goto out_put_clp;
9411 nfs4_init_sequence(&calldata->args, &calldata->res, 0, is_privileged);
9412 nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot);
9413 msg.rpc_argp = &calldata->args;
9414 msg.rpc_resp = &calldata->res;
9415 calldata->clp = clp;
9416 task_setup_data.callback_data = calldata;
9417
9418 ret = rpc_run_task(&task_setup_data);
9419 if (IS_ERR(ret))
9420 goto out_err;
9421 return ret;
9422 out_put_clp:
9423 nfs_put_client(clp);
9424 out_err:
9425 nfs41_release_slot(slot);
9426 return ret;
9427 }
9428
nfs41_proc_async_sequence(struct nfs_client * clp,const struct cred * cred,unsigned renew_flags)9429 static int nfs41_proc_async_sequence(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
9430 {
9431 struct rpc_task *task;
9432 int ret = 0;
9433
9434 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
9435 return -EAGAIN;
9436 task = _nfs41_proc_sequence(clp, cred, NULL, false);
9437 if (IS_ERR(task))
9438 ret = PTR_ERR(task);
9439 else
9440 rpc_put_task_async(task);
9441 dprintk("<-- %s status=%d\n", __func__, ret);
9442 return ret;
9443 }
9444
nfs4_proc_sequence(struct nfs_client * clp,const struct cred * cred)9445 static int nfs4_proc_sequence(struct nfs_client *clp, const struct cred *cred)
9446 {
9447 struct rpc_task *task;
9448 int ret;
9449
9450 task = _nfs41_proc_sequence(clp, cred, NULL, true);
9451 if (IS_ERR(task)) {
9452 ret = PTR_ERR(task);
9453 goto out;
9454 }
9455 ret = rpc_wait_for_completion_task(task);
9456 if (!ret)
9457 ret = task->tk_status;
9458 rpc_put_task(task);
9459 out:
9460 dprintk("<-- %s status=%d\n", __func__, ret);
9461 return ret;
9462 }
9463
9464 struct nfs4_reclaim_complete_data {
9465 struct nfs_client *clp;
9466 struct nfs41_reclaim_complete_args arg;
9467 struct nfs41_reclaim_complete_res res;
9468 };
9469
nfs4_reclaim_complete_prepare(struct rpc_task * task,void * data)9470 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
9471 {
9472 struct nfs4_reclaim_complete_data *calldata = data;
9473
9474 nfs4_setup_sequence(calldata->clp,
9475 &calldata->arg.seq_args,
9476 &calldata->res.seq_res,
9477 task);
9478 }
9479
nfs41_reclaim_complete_handle_errors(struct rpc_task * task,struct nfs_client * clp)9480 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
9481 {
9482 switch(task->tk_status) {
9483 case 0:
9484 wake_up_all(&clp->cl_lock_waitq);
9485 fallthrough;
9486 case -NFS4ERR_COMPLETE_ALREADY:
9487 case -NFS4ERR_WRONG_CRED: /* What to do here? */
9488 break;
9489 case -NFS4ERR_DELAY:
9490 rpc_delay(task, NFS4_POLL_RETRY_MAX);
9491 fallthrough;
9492 case -NFS4ERR_RETRY_UNCACHED_REP:
9493 case -EACCES:
9494 dprintk("%s: failed to reclaim complete error %d for server %s, retrying\n",
9495 __func__, task->tk_status, clp->cl_hostname);
9496 return -EAGAIN;
9497 case -NFS4ERR_BADSESSION:
9498 case -NFS4ERR_DEADSESSION:
9499 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
9500 break;
9501 default:
9502 nfs4_schedule_lease_recovery(clp);
9503 }
9504 return 0;
9505 }
9506
nfs4_reclaim_complete_done(struct rpc_task * task,void * data)9507 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
9508 {
9509 struct nfs4_reclaim_complete_data *calldata = data;
9510 struct nfs_client *clp = calldata->clp;
9511 struct nfs4_sequence_res *res = &calldata->res.seq_res;
9512
9513 if (!nfs41_sequence_done(task, res))
9514 return;
9515
9516 trace_nfs4_reclaim_complete(clp, task->tk_status);
9517 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
9518 rpc_restart_call_prepare(task);
9519 return;
9520 }
9521 }
9522
nfs4_free_reclaim_complete_data(void * data)9523 static void nfs4_free_reclaim_complete_data(void *data)
9524 {
9525 struct nfs4_reclaim_complete_data *calldata = data;
9526
9527 kfree(calldata);
9528 }
9529
9530 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
9531 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
9532 .rpc_call_done = nfs4_reclaim_complete_done,
9533 .rpc_release = nfs4_free_reclaim_complete_data,
9534 };
9535
9536 /*
9537 * Issue a global reclaim complete.
9538 */
nfs41_proc_reclaim_complete(struct nfs_client * clp,const struct cred * cred)9539 static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
9540 const struct cred *cred)
9541 {
9542 struct nfs4_reclaim_complete_data *calldata;
9543 struct rpc_message msg = {
9544 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
9545 .rpc_cred = cred,
9546 };
9547 struct rpc_task_setup task_setup_data = {
9548 .rpc_client = clp->cl_rpcclient,
9549 .rpc_message = &msg,
9550 .callback_ops = &nfs4_reclaim_complete_call_ops,
9551 .flags = RPC_TASK_NO_ROUND_ROBIN,
9552 };
9553 int status = -ENOMEM;
9554
9555 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
9556 if (calldata == NULL)
9557 goto out;
9558 calldata->clp = clp;
9559 calldata->arg.one_fs = 0;
9560
9561 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0, 1);
9562 msg.rpc_argp = &calldata->arg;
9563 msg.rpc_resp = &calldata->res;
9564 task_setup_data.callback_data = calldata;
9565 status = nfs4_call_sync_custom(&task_setup_data);
9566 out:
9567 dprintk("<-- %s status=%d\n", __func__, status);
9568 return status;
9569 }
9570
9571 static void
nfs4_layoutget_prepare(struct rpc_task * task,void * calldata)9572 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
9573 {
9574 struct nfs4_layoutget *lgp = calldata;
9575 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
9576
9577 nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args,
9578 &lgp->res.seq_res, task);
9579 }
9580
nfs4_layoutget_done(struct rpc_task * task,void * calldata)9581 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
9582 {
9583 struct nfs4_layoutget *lgp = calldata;
9584
9585 nfs41_sequence_process(task, &lgp->res.seq_res);
9586 }
9587
9588 static int
nfs4_layoutget_handle_exception(struct rpc_task * task,struct nfs4_layoutget * lgp,struct nfs4_exception * exception)9589 nfs4_layoutget_handle_exception(struct rpc_task *task,
9590 struct nfs4_layoutget *lgp, struct nfs4_exception *exception)
9591 {
9592 struct inode *inode = lgp->args.inode;
9593 struct nfs_server *server = NFS_SERVER(inode);
9594 struct pnfs_layout_hdr *lo = lgp->lo;
9595 int nfs4err = task->tk_status;
9596 int err, status = 0;
9597 LIST_HEAD(head);
9598
9599 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
9600
9601 nfs4_sequence_free_slot(&lgp->res.seq_res);
9602
9603 switch (nfs4err) {
9604 case 0:
9605 goto out;
9606
9607 /*
9608 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs
9609 * on the file. set tk_status to -ENODATA to tell upper layer to
9610 * retry go inband.
9611 */
9612 case -NFS4ERR_LAYOUTUNAVAILABLE:
9613 status = -ENODATA;
9614 goto out;
9615 /*
9616 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
9617 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
9618 */
9619 case -NFS4ERR_BADLAYOUT:
9620 status = -EOVERFLOW;
9621 goto out;
9622 /*
9623 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
9624 * (or clients) writing to the same RAID stripe except when
9625 * the minlength argument is 0 (see RFC5661 section 18.43.3).
9626 *
9627 * Treat it like we would RECALLCONFLICT -- we retry for a little
9628 * while, and then eventually give up.
9629 */
9630 case -NFS4ERR_LAYOUTTRYLATER:
9631 if (lgp->args.minlength == 0) {
9632 status = -EOVERFLOW;
9633 goto out;
9634 }
9635 status = -EBUSY;
9636 break;
9637 case -NFS4ERR_RECALLCONFLICT:
9638 status = -ERECALLCONFLICT;
9639 break;
9640 case -NFS4ERR_DELEG_REVOKED:
9641 case -NFS4ERR_ADMIN_REVOKED:
9642 case -NFS4ERR_EXPIRED:
9643 case -NFS4ERR_BAD_STATEID:
9644 exception->timeout = 0;
9645 spin_lock(&inode->i_lock);
9646 /* If the open stateid was bad, then recover it. */
9647 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
9648 !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
9649 spin_unlock(&inode->i_lock);
9650 exception->state = lgp->args.ctx->state;
9651 exception->stateid = &lgp->args.stateid;
9652 break;
9653 }
9654
9655 /*
9656 * Mark the bad layout state as invalid, then retry
9657 */
9658 pnfs_mark_layout_stateid_invalid(lo, &head);
9659 spin_unlock(&inode->i_lock);
9660 nfs_commit_inode(inode, 0);
9661 pnfs_free_lseg_list(&head);
9662 status = -EAGAIN;
9663 goto out;
9664 }
9665
9666 err = nfs4_handle_exception(server, nfs4err, exception);
9667 if (!status) {
9668 if (exception->retry)
9669 status = -EAGAIN;
9670 else
9671 status = err;
9672 }
9673 out:
9674 return status;
9675 }
9676
max_response_pages(struct nfs_server * server)9677 size_t max_response_pages(struct nfs_server *server)
9678 {
9679 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
9680 return nfs_page_array_len(0, max_resp_sz);
9681 }
9682
nfs4_layoutget_release(void * calldata)9683 static void nfs4_layoutget_release(void *calldata)
9684 {
9685 struct nfs4_layoutget *lgp = calldata;
9686
9687 nfs4_sequence_free_slot(&lgp->res.seq_res);
9688 pnfs_layoutget_free(lgp);
9689 }
9690
9691 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
9692 .rpc_call_prepare = nfs4_layoutget_prepare,
9693 .rpc_call_done = nfs4_layoutget_done,
9694 .rpc_release = nfs4_layoutget_release,
9695 };
9696
9697 struct pnfs_layout_segment *
nfs4_proc_layoutget(struct nfs4_layoutget * lgp,long * timeout)9698 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
9699 {
9700 struct inode *inode = lgp->args.inode;
9701 struct nfs_server *server = NFS_SERVER(inode);
9702 struct rpc_task *task;
9703 struct rpc_message msg = {
9704 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
9705 .rpc_argp = &lgp->args,
9706 .rpc_resp = &lgp->res,
9707 .rpc_cred = lgp->cred,
9708 };
9709 struct rpc_task_setup task_setup_data = {
9710 .rpc_client = server->client,
9711 .rpc_message = &msg,
9712 .callback_ops = &nfs4_layoutget_call_ops,
9713 .callback_data = lgp,
9714 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF |
9715 RPC_TASK_MOVEABLE,
9716 };
9717 struct pnfs_layout_segment *lseg = NULL;
9718 struct nfs4_exception exception = {
9719 .inode = inode,
9720 .timeout = *timeout,
9721 };
9722 int status = 0;
9723
9724 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0);
9725
9726 task = rpc_run_task(&task_setup_data);
9727 if (IS_ERR(task))
9728 return ERR_CAST(task);
9729
9730 status = rpc_wait_for_completion_task(task);
9731 if (status != 0)
9732 goto out;
9733
9734 if (task->tk_status < 0) {
9735 status = nfs4_layoutget_handle_exception(task, lgp, &exception);
9736 *timeout = exception.timeout;
9737 } else if (lgp->res.layoutp->len == 0) {
9738 status = -EAGAIN;
9739 *timeout = nfs4_update_delay(&exception.timeout);
9740 } else
9741 lseg = pnfs_layout_process(lgp);
9742 out:
9743 trace_nfs4_layoutget(lgp->args.ctx,
9744 &lgp->args.range,
9745 &lgp->res.range,
9746 &lgp->res.stateid,
9747 status);
9748
9749 rpc_put_task(task);
9750 dprintk("<-- %s status=%d\n", __func__, status);
9751 if (status)
9752 return ERR_PTR(status);
9753 return lseg;
9754 }
9755
9756 static void
nfs4_layoutreturn_prepare(struct rpc_task * task,void * calldata)9757 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
9758 {
9759 struct nfs4_layoutreturn *lrp = calldata;
9760
9761 nfs4_setup_sequence(lrp->clp,
9762 &lrp->args.seq_args,
9763 &lrp->res.seq_res,
9764 task);
9765 if (!pnfs_layout_is_valid(lrp->args.layout))
9766 rpc_exit(task, 0);
9767 }
9768
nfs4_layoutreturn_done(struct rpc_task * task,void * calldata)9769 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
9770 {
9771 struct nfs4_layoutreturn *lrp = calldata;
9772 struct nfs_server *server;
9773
9774 if (!nfs41_sequence_process(task, &lrp->res.seq_res))
9775 return;
9776
9777 /*
9778 * Was there an RPC level error? Assume the call succeeded,
9779 * and that we need to release the layout
9780 */
9781 if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) {
9782 lrp->res.lrs_present = 0;
9783 return;
9784 }
9785
9786 server = NFS_SERVER(lrp->args.inode);
9787 switch (task->tk_status) {
9788 case -NFS4ERR_OLD_STATEID:
9789 if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid,
9790 &lrp->args.range,
9791 lrp->args.inode))
9792 goto out_restart;
9793 fallthrough;
9794 default:
9795 task->tk_status = 0;
9796 fallthrough;
9797 case 0:
9798 break;
9799 case -NFS4ERR_DELAY:
9800 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
9801 break;
9802 goto out_restart;
9803 }
9804 return;
9805 out_restart:
9806 task->tk_status = 0;
9807 nfs4_sequence_free_slot(&lrp->res.seq_res);
9808 rpc_restart_call_prepare(task);
9809 }
9810
nfs4_layoutreturn_release(void * calldata)9811 static void nfs4_layoutreturn_release(void *calldata)
9812 {
9813 struct nfs4_layoutreturn *lrp = calldata;
9814 struct pnfs_layout_hdr *lo = lrp->args.layout;
9815
9816 pnfs_layoutreturn_free_lsegs(lo, &lrp->args.stateid, &lrp->args.range,
9817 lrp->res.lrs_present ? &lrp->res.stateid : NULL);
9818 nfs4_sequence_free_slot(&lrp->res.seq_res);
9819 if (lrp->ld_private.ops && lrp->ld_private.ops->free)
9820 lrp->ld_private.ops->free(&lrp->ld_private);
9821 pnfs_put_layout_hdr(lrp->args.layout);
9822 nfs_iput_and_deactive(lrp->inode);
9823 put_cred(lrp->cred);
9824 kfree(calldata);
9825 }
9826
9827 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
9828 .rpc_call_prepare = nfs4_layoutreturn_prepare,
9829 .rpc_call_done = nfs4_layoutreturn_done,
9830 .rpc_release = nfs4_layoutreturn_release,
9831 };
9832
nfs4_proc_layoutreturn(struct nfs4_layoutreturn * lrp,bool sync)9833 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
9834 {
9835 struct rpc_task *task;
9836 struct rpc_message msg = {
9837 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
9838 .rpc_argp = &lrp->args,
9839 .rpc_resp = &lrp->res,
9840 .rpc_cred = lrp->cred,
9841 };
9842 struct rpc_task_setup task_setup_data = {
9843 .rpc_client = NFS_SERVER(lrp->args.inode)->client,
9844 .rpc_message = &msg,
9845 .callback_ops = &nfs4_layoutreturn_call_ops,
9846 .callback_data = lrp,
9847 .flags = RPC_TASK_MOVEABLE,
9848 };
9849 int status = 0;
9850
9851 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client,
9852 NFS_SP4_MACH_CRED_PNFS_CLEANUP,
9853 &task_setup_data.rpc_client, &msg);
9854
9855 lrp->inode = nfs_igrab_and_active(lrp->args.inode);
9856 if (!sync) {
9857 if (!lrp->inode) {
9858 nfs4_layoutreturn_release(lrp);
9859 return -EAGAIN;
9860 }
9861 task_setup_data.flags |= RPC_TASK_ASYNC;
9862 }
9863 if (!lrp->inode)
9864 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
9865 1);
9866 else
9867 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
9868 0);
9869 task = rpc_run_task(&task_setup_data);
9870 if (IS_ERR(task))
9871 return PTR_ERR(task);
9872 if (sync)
9873 status = task->tk_status;
9874 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status);
9875 dprintk("<-- %s status=%d\n", __func__, status);
9876 rpc_put_task(task);
9877 return status;
9878 }
9879
9880 static int
_nfs4_proc_getdeviceinfo(struct nfs_server * server,struct pnfs_device * pdev,const struct cred * cred)9881 _nfs4_proc_getdeviceinfo(struct nfs_server *server,
9882 struct pnfs_device *pdev,
9883 const struct cred *cred)
9884 {
9885 struct nfs4_getdeviceinfo_args args = {
9886 .pdev = pdev,
9887 .notify_types = NOTIFY_DEVICEID4_CHANGE |
9888 NOTIFY_DEVICEID4_DELETE,
9889 };
9890 struct nfs4_getdeviceinfo_res res = {
9891 .pdev = pdev,
9892 };
9893 struct rpc_message msg = {
9894 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
9895 .rpc_argp = &args,
9896 .rpc_resp = &res,
9897 .rpc_cred = cred,
9898 };
9899 int status;
9900
9901 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
9902 if (res.notification & ~args.notify_types)
9903 dprintk("%s: unsupported notification\n", __func__);
9904 if (res.notification != args.notify_types)
9905 pdev->nocache = 1;
9906
9907 trace_nfs4_getdeviceinfo(server, &pdev->dev_id, status);
9908
9909 dprintk("<-- %s status=%d\n", __func__, status);
9910
9911 return status;
9912 }
9913
nfs4_proc_getdeviceinfo(struct nfs_server * server,struct pnfs_device * pdev,const struct cred * cred)9914 int nfs4_proc_getdeviceinfo(struct nfs_server *server,
9915 struct pnfs_device *pdev,
9916 const struct cred *cred)
9917 {
9918 struct nfs4_exception exception = { };
9919 int err;
9920
9921 do {
9922 err = nfs4_handle_exception(server,
9923 _nfs4_proc_getdeviceinfo(server, pdev, cred),
9924 &exception);
9925 } while (exception.retry);
9926 return err;
9927 }
9928 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
9929
nfs4_layoutcommit_prepare(struct rpc_task * task,void * calldata)9930 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
9931 {
9932 struct nfs4_layoutcommit_data *data = calldata;
9933 struct nfs_server *server = NFS_SERVER(data->args.inode);
9934
9935 nfs4_setup_sequence(server->nfs_client,
9936 &data->args.seq_args,
9937 &data->res.seq_res,
9938 task);
9939 }
9940
9941 static void
nfs4_layoutcommit_done(struct rpc_task * task,void * calldata)9942 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
9943 {
9944 struct nfs4_layoutcommit_data *data = calldata;
9945 struct nfs_server *server = NFS_SERVER(data->args.inode);
9946
9947 if (!nfs41_sequence_done(task, &data->res.seq_res))
9948 return;
9949
9950 switch (task->tk_status) { /* Just ignore these failures */
9951 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
9952 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
9953 case -NFS4ERR_BADLAYOUT: /* no layout */
9954 case -NFS4ERR_GRACE: /* loca_recalim always false */
9955 task->tk_status = 0;
9956 break;
9957 case 0:
9958 break;
9959 default:
9960 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
9961 rpc_restart_call_prepare(task);
9962 return;
9963 }
9964 }
9965 }
9966
nfs4_layoutcommit_release(void * calldata)9967 static void nfs4_layoutcommit_release(void *calldata)
9968 {
9969 struct nfs4_layoutcommit_data *data = calldata;
9970
9971 pnfs_cleanup_layoutcommit(data);
9972 nfs_post_op_update_inode_force_wcc(data->args.inode,
9973 data->res.fattr);
9974 put_cred(data->cred);
9975 nfs_iput_and_deactive(data->inode);
9976 kfree(data);
9977 }
9978
9979 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
9980 .rpc_call_prepare = nfs4_layoutcommit_prepare,
9981 .rpc_call_done = nfs4_layoutcommit_done,
9982 .rpc_release = nfs4_layoutcommit_release,
9983 };
9984
9985 int
nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data * data,bool sync)9986 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
9987 {
9988 struct rpc_message msg = {
9989 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
9990 .rpc_argp = &data->args,
9991 .rpc_resp = &data->res,
9992 .rpc_cred = data->cred,
9993 };
9994 struct rpc_task_setup task_setup_data = {
9995 .task = &data->task,
9996 .rpc_client = NFS_CLIENT(data->args.inode),
9997 .rpc_message = &msg,
9998 .callback_ops = &nfs4_layoutcommit_ops,
9999 .callback_data = data,
10000 .flags = RPC_TASK_MOVEABLE,
10001 };
10002 struct rpc_task *task;
10003 int status = 0;
10004
10005 dprintk("NFS: initiating layoutcommit call. sync %d "
10006 "lbw: %llu inode %lu\n", sync,
10007 data->args.lastbytewritten,
10008 data->args.inode->i_ino);
10009
10010 if (!sync) {
10011 data->inode = nfs_igrab_and_active(data->args.inode);
10012 if (data->inode == NULL) {
10013 nfs4_layoutcommit_release(data);
10014 return -EAGAIN;
10015 }
10016 task_setup_data.flags = RPC_TASK_ASYNC;
10017 }
10018 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
10019 task = rpc_run_task(&task_setup_data);
10020 if (IS_ERR(task))
10021 return PTR_ERR(task);
10022 if (sync)
10023 status = task->tk_status;
10024 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status);
10025 dprintk("%s: status %d\n", __func__, status);
10026 rpc_put_task(task);
10027 return status;
10028 }
10029
10030 /*
10031 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
10032 * possible) as per RFC3530bis and RFC5661 Security Considerations sections
10033 */
10034 static int
_nfs41_proc_secinfo_no_name(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info,struct nfs4_secinfo_flavors * flavors,bool use_integrity)10035 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
10036 struct nfs_fsinfo *info,
10037 struct nfs4_secinfo_flavors *flavors, bool use_integrity)
10038 {
10039 struct nfs41_secinfo_no_name_args args = {
10040 .style = SECINFO_STYLE_CURRENT_FH,
10041 };
10042 struct nfs4_secinfo_res res = {
10043 .flavors = flavors,
10044 };
10045 struct rpc_message msg = {
10046 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
10047 .rpc_argp = &args,
10048 .rpc_resp = &res,
10049 };
10050 struct nfs4_call_sync_data data = {
10051 .seq_server = server,
10052 .seq_args = &args.seq_args,
10053 .seq_res = &res.seq_res,
10054 };
10055 struct rpc_task_setup task_setup = {
10056 .rpc_client = server->client,
10057 .rpc_message = &msg,
10058 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops,
10059 .callback_data = &data,
10060 .flags = RPC_TASK_NO_ROUND_ROBIN,
10061 };
10062 const struct cred *cred = NULL;
10063 int status;
10064
10065 if (use_integrity) {
10066 task_setup.rpc_client = server->nfs_client->cl_rpcclient;
10067
10068 cred = nfs4_get_clid_cred(server->nfs_client);
10069 msg.rpc_cred = cred;
10070 }
10071
10072 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
10073 status = nfs4_call_sync_custom(&task_setup);
10074 dprintk("<-- %s status=%d\n", __func__, status);
10075
10076 put_cred(cred);
10077
10078 return status;
10079 }
10080
10081 static int
nfs41_proc_secinfo_no_name(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info,struct nfs4_secinfo_flavors * flavors)10082 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
10083 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
10084 {
10085 struct nfs4_exception exception = {
10086 .interruptible = true,
10087 };
10088 int err;
10089 do {
10090 /* first try using integrity protection */
10091 err = -NFS4ERR_WRONGSEC;
10092
10093 /* try to use integrity protection with machine cred */
10094 if (_nfs4_is_integrity_protected(server->nfs_client))
10095 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
10096 flavors, true);
10097
10098 /*
10099 * if unable to use integrity protection, or SECINFO with
10100 * integrity protection returns NFS4ERR_WRONGSEC (which is
10101 * disallowed by spec, but exists in deployed servers) use
10102 * the current filesystem's rpc_client and the user cred.
10103 */
10104 if (err == -NFS4ERR_WRONGSEC)
10105 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
10106 flavors, false);
10107
10108 switch (err) {
10109 case 0:
10110 case -NFS4ERR_WRONGSEC:
10111 case -ENOTSUPP:
10112 goto out;
10113 default:
10114 err = nfs4_handle_exception(server, err, &exception);
10115 }
10116 } while (exception.retry);
10117 out:
10118 return err;
10119 }
10120
10121 static int
nfs41_find_root_sec(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)10122 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
10123 struct nfs_fsinfo *info)
10124 {
10125 int err;
10126 struct page *page;
10127 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
10128 struct nfs4_secinfo_flavors *flavors;
10129 struct nfs4_secinfo4 *secinfo;
10130 int i;
10131
10132 page = alloc_page(GFP_KERNEL);
10133 if (!page) {
10134 err = -ENOMEM;
10135 goto out;
10136 }
10137
10138 flavors = page_address(page);
10139 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
10140
10141 /*
10142 * Fall back on "guess and check" method if
10143 * the server doesn't support SECINFO_NO_NAME
10144 */
10145 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
10146 err = nfs4_find_root_sec(server, fhandle, info);
10147 goto out_freepage;
10148 }
10149 if (err)
10150 goto out_freepage;
10151
10152 for (i = 0; i < flavors->num_flavors; i++) {
10153 secinfo = &flavors->flavors[i];
10154
10155 switch (secinfo->flavor) {
10156 case RPC_AUTH_NULL:
10157 case RPC_AUTH_UNIX:
10158 case RPC_AUTH_GSS:
10159 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
10160 &secinfo->flavor_info);
10161 break;
10162 default:
10163 flavor = RPC_AUTH_MAXFLAVOR;
10164 break;
10165 }
10166
10167 if (!nfs_auth_info_match(&server->auth_info, flavor))
10168 flavor = RPC_AUTH_MAXFLAVOR;
10169
10170 if (flavor != RPC_AUTH_MAXFLAVOR) {
10171 err = nfs4_lookup_root_sec(server, fhandle,
10172 info, flavor);
10173 if (!err)
10174 break;
10175 }
10176 }
10177
10178 if (flavor == RPC_AUTH_MAXFLAVOR)
10179 err = -EPERM;
10180
10181 out_freepage:
10182 put_page(page);
10183 if (err == -EACCES)
10184 return -EPERM;
10185 out:
10186 return err;
10187 }
10188
_nfs41_test_stateid(struct nfs_server * server,nfs4_stateid * stateid,const struct cred * cred)10189 static int _nfs41_test_stateid(struct nfs_server *server,
10190 nfs4_stateid *stateid,
10191 const struct cred *cred)
10192 {
10193 int status;
10194 struct nfs41_test_stateid_args args = {
10195 .stateid = stateid,
10196 };
10197 struct nfs41_test_stateid_res res;
10198 struct rpc_message msg = {
10199 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
10200 .rpc_argp = &args,
10201 .rpc_resp = &res,
10202 .rpc_cred = cred,
10203 };
10204 struct rpc_clnt *rpc_client = server->client;
10205
10206 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
10207 &rpc_client, &msg);
10208
10209 dprintk("NFS call test_stateid %p\n", stateid);
10210 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
10211 status = nfs4_call_sync_sequence(rpc_client, server, &msg,
10212 &args.seq_args, &res.seq_res);
10213 if (status != NFS_OK) {
10214 dprintk("NFS reply test_stateid: failed, %d\n", status);
10215 return status;
10216 }
10217 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
10218 return -res.status;
10219 }
10220
nfs4_handle_delay_or_session_error(struct nfs_server * server,int err,struct nfs4_exception * exception)10221 static void nfs4_handle_delay_or_session_error(struct nfs_server *server,
10222 int err, struct nfs4_exception *exception)
10223 {
10224 exception->retry = 0;
10225 switch(err) {
10226 case -NFS4ERR_DELAY:
10227 case -NFS4ERR_RETRY_UNCACHED_REP:
10228 nfs4_handle_exception(server, err, exception);
10229 break;
10230 case -NFS4ERR_BADSESSION:
10231 case -NFS4ERR_BADSLOT:
10232 case -NFS4ERR_BAD_HIGH_SLOT:
10233 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
10234 case -NFS4ERR_DEADSESSION:
10235 nfs4_do_handle_exception(server, err, exception);
10236 }
10237 }
10238
10239 /**
10240 * nfs41_test_stateid - perform a TEST_STATEID operation
10241 *
10242 * @server: server / transport on which to perform the operation
10243 * @stateid: state ID to test
10244 * @cred: credential
10245 *
10246 * Returns NFS_OK if the server recognizes that "stateid" is valid.
10247 * Otherwise a negative NFS4ERR value is returned if the operation
10248 * failed or the state ID is not currently valid.
10249 */
nfs41_test_stateid(struct nfs_server * server,nfs4_stateid * stateid,const struct cred * cred)10250 static int nfs41_test_stateid(struct nfs_server *server,
10251 nfs4_stateid *stateid,
10252 const struct cred *cred)
10253 {
10254 struct nfs4_exception exception = {
10255 .interruptible = true,
10256 };
10257 int err;
10258 do {
10259 err = _nfs41_test_stateid(server, stateid, cred);
10260 nfs4_handle_delay_or_session_error(server, err, &exception);
10261 } while (exception.retry);
10262 return err;
10263 }
10264
10265 struct nfs_free_stateid_data {
10266 struct nfs_server *server;
10267 struct nfs41_free_stateid_args args;
10268 struct nfs41_free_stateid_res res;
10269 };
10270
nfs41_free_stateid_prepare(struct rpc_task * task,void * calldata)10271 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
10272 {
10273 struct nfs_free_stateid_data *data = calldata;
10274 nfs4_setup_sequence(data->server->nfs_client,
10275 &data->args.seq_args,
10276 &data->res.seq_res,
10277 task);
10278 }
10279
nfs41_free_stateid_done(struct rpc_task * task,void * calldata)10280 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
10281 {
10282 struct nfs_free_stateid_data *data = calldata;
10283
10284 nfs41_sequence_done(task, &data->res.seq_res);
10285
10286 switch (task->tk_status) {
10287 case -NFS4ERR_DELAY:
10288 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
10289 rpc_restart_call_prepare(task);
10290 }
10291 }
10292
nfs41_free_stateid_release(void * calldata)10293 static void nfs41_free_stateid_release(void *calldata)
10294 {
10295 struct nfs_free_stateid_data *data = calldata;
10296 struct nfs_client *clp = data->server->nfs_client;
10297
10298 nfs_put_client(clp);
10299 kfree(calldata);
10300 }
10301
10302 static const struct rpc_call_ops nfs41_free_stateid_ops = {
10303 .rpc_call_prepare = nfs41_free_stateid_prepare,
10304 .rpc_call_done = nfs41_free_stateid_done,
10305 .rpc_release = nfs41_free_stateid_release,
10306 };
10307
10308 /**
10309 * nfs41_free_stateid - perform a FREE_STATEID operation
10310 *
10311 * @server: server / transport on which to perform the operation
10312 * @stateid: state ID to release
10313 * @cred: credential
10314 * @privileged: set to true if this call needs to be privileged
10315 *
10316 * Note: this function is always asynchronous.
10317 */
nfs41_free_stateid(struct nfs_server * server,const nfs4_stateid * stateid,const struct cred * cred,bool privileged)10318 static int nfs41_free_stateid(struct nfs_server *server,
10319 const nfs4_stateid *stateid,
10320 const struct cred *cred,
10321 bool privileged)
10322 {
10323 struct rpc_message msg = {
10324 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
10325 .rpc_cred = cred,
10326 };
10327 struct rpc_task_setup task_setup = {
10328 .rpc_client = server->client,
10329 .rpc_message = &msg,
10330 .callback_ops = &nfs41_free_stateid_ops,
10331 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
10332 };
10333 struct nfs_free_stateid_data *data;
10334 struct rpc_task *task;
10335 struct nfs_client *clp = server->nfs_client;
10336
10337 if (!refcount_inc_not_zero(&clp->cl_count))
10338 return -EIO;
10339
10340 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
10341 &task_setup.rpc_client, &msg);
10342
10343 dprintk("NFS call free_stateid %p\n", stateid);
10344 data = kmalloc(sizeof(*data), GFP_KERNEL);
10345 if (!data)
10346 return -ENOMEM;
10347 data->server = server;
10348 nfs4_stateid_copy(&data->args.stateid, stateid);
10349
10350 task_setup.callback_data = data;
10351
10352 msg.rpc_argp = &data->args;
10353 msg.rpc_resp = &data->res;
10354 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, privileged);
10355 task = rpc_run_task(&task_setup);
10356 if (IS_ERR(task))
10357 return PTR_ERR(task);
10358 rpc_put_task(task);
10359 return 0;
10360 }
10361
10362 static void
nfs41_free_lock_state(struct nfs_server * server,struct nfs4_lock_state * lsp)10363 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
10364 {
10365 const struct cred *cred = lsp->ls_state->owner->so_cred;
10366
10367 nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
10368 nfs4_free_lock_state(server, lsp);
10369 }
10370
nfs41_match_stateid(const nfs4_stateid * s1,const nfs4_stateid * s2)10371 static bool nfs41_match_stateid(const nfs4_stateid *s1,
10372 const nfs4_stateid *s2)
10373 {
10374 if (s1->type != s2->type)
10375 return false;
10376
10377 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
10378 return false;
10379
10380 if (s1->seqid == s2->seqid)
10381 return true;
10382
10383 return s1->seqid == 0 || s2->seqid == 0;
10384 }
10385
10386 #endif /* CONFIG_NFS_V4_1 */
10387
nfs4_match_stateid(const nfs4_stateid * s1,const nfs4_stateid * s2)10388 static bool nfs4_match_stateid(const nfs4_stateid *s1,
10389 const nfs4_stateid *s2)
10390 {
10391 return nfs4_stateid_match(s1, s2);
10392 }
10393
10394
10395 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
10396 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
10397 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
10398 .recover_open = nfs4_open_reclaim,
10399 .recover_lock = nfs4_lock_reclaim,
10400 .establish_clid = nfs4_init_clientid,
10401 .detect_trunking = nfs40_discover_server_trunking,
10402 };
10403
10404 #if defined(CONFIG_NFS_V4_1)
10405 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
10406 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
10407 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
10408 .recover_open = nfs4_open_reclaim,
10409 .recover_lock = nfs4_lock_reclaim,
10410 .establish_clid = nfs41_init_clientid,
10411 .reclaim_complete = nfs41_proc_reclaim_complete,
10412 .detect_trunking = nfs41_discover_server_trunking,
10413 };
10414 #endif /* CONFIG_NFS_V4_1 */
10415
10416 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
10417 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
10418 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
10419 .recover_open = nfs40_open_expired,
10420 .recover_lock = nfs4_lock_expired,
10421 .establish_clid = nfs4_init_clientid,
10422 };
10423
10424 #if defined(CONFIG_NFS_V4_1)
10425 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
10426 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
10427 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
10428 .recover_open = nfs41_open_expired,
10429 .recover_lock = nfs41_lock_expired,
10430 .establish_clid = nfs41_init_clientid,
10431 };
10432 #endif /* CONFIG_NFS_V4_1 */
10433
10434 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
10435 .sched_state_renewal = nfs4_proc_async_renew,
10436 .get_state_renewal_cred = nfs4_get_renew_cred,
10437 .renew_lease = nfs4_proc_renew,
10438 };
10439
10440 #if defined(CONFIG_NFS_V4_1)
10441 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
10442 .sched_state_renewal = nfs41_proc_async_sequence,
10443 .get_state_renewal_cred = nfs4_get_machine_cred,
10444 .renew_lease = nfs4_proc_sequence,
10445 };
10446 #endif
10447
10448 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
10449 .get_locations = _nfs40_proc_get_locations,
10450 .fsid_present = _nfs40_proc_fsid_present,
10451 };
10452
10453 #if defined(CONFIG_NFS_V4_1)
10454 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
10455 .get_locations = _nfs41_proc_get_locations,
10456 .fsid_present = _nfs41_proc_fsid_present,
10457 };
10458 #endif /* CONFIG_NFS_V4_1 */
10459
10460 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
10461 .minor_version = 0,
10462 .init_caps = NFS_CAP_READDIRPLUS
10463 | NFS_CAP_ATOMIC_OPEN
10464 | NFS_CAP_POSIX_LOCK,
10465 .init_client = nfs40_init_client,
10466 .shutdown_client = nfs40_shutdown_client,
10467 .match_stateid = nfs4_match_stateid,
10468 .find_root_sec = nfs4_find_root_sec,
10469 .free_lock_state = nfs4_release_lockowner,
10470 .test_and_free_expired = nfs40_test_and_free_expired_stateid,
10471 .alloc_seqid = nfs_alloc_seqid,
10472 .call_sync_ops = &nfs40_call_sync_ops,
10473 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
10474 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
10475 .state_renewal_ops = &nfs40_state_renewal_ops,
10476 .mig_recovery_ops = &nfs40_mig_recovery_ops,
10477 };
10478
10479 #if defined(CONFIG_NFS_V4_1)
10480 static struct nfs_seqid *
nfs_alloc_no_seqid(struct nfs_seqid_counter * arg1,gfp_t arg2)10481 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
10482 {
10483 return NULL;
10484 }
10485
10486 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
10487 .minor_version = 1,
10488 .init_caps = NFS_CAP_READDIRPLUS
10489 | NFS_CAP_ATOMIC_OPEN
10490 | NFS_CAP_POSIX_LOCK
10491 | NFS_CAP_STATEID_NFSV41
10492 | NFS_CAP_ATOMIC_OPEN_V1
10493 | NFS_CAP_LGOPEN
10494 | NFS_CAP_MOVEABLE,
10495 .init_client = nfs41_init_client,
10496 .shutdown_client = nfs41_shutdown_client,
10497 .match_stateid = nfs41_match_stateid,
10498 .find_root_sec = nfs41_find_root_sec,
10499 .free_lock_state = nfs41_free_lock_state,
10500 .test_and_free_expired = nfs41_test_and_free_expired_stateid,
10501 .alloc_seqid = nfs_alloc_no_seqid,
10502 .session_trunk = nfs4_test_session_trunk,
10503 .call_sync_ops = &nfs41_call_sync_ops,
10504 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
10505 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
10506 .state_renewal_ops = &nfs41_state_renewal_ops,
10507 .mig_recovery_ops = &nfs41_mig_recovery_ops,
10508 };
10509 #endif
10510
10511 #if defined(CONFIG_NFS_V4_2)
10512 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
10513 .minor_version = 2,
10514 .init_caps = NFS_CAP_READDIRPLUS
10515 | NFS_CAP_ATOMIC_OPEN
10516 | NFS_CAP_POSIX_LOCK
10517 | NFS_CAP_STATEID_NFSV41
10518 | NFS_CAP_ATOMIC_OPEN_V1
10519 | NFS_CAP_LGOPEN
10520 | NFS_CAP_ALLOCATE
10521 | NFS_CAP_COPY
10522 | NFS_CAP_OFFLOAD_CANCEL
10523 | NFS_CAP_COPY_NOTIFY
10524 | NFS_CAP_DEALLOCATE
10525 | NFS_CAP_SEEK
10526 | NFS_CAP_LAYOUTSTATS
10527 | NFS_CAP_CLONE
10528 | NFS_CAP_LAYOUTERROR
10529 | NFS_CAP_READ_PLUS
10530 | NFS_CAP_MOVEABLE,
10531 .init_client = nfs41_init_client,
10532 .shutdown_client = nfs41_shutdown_client,
10533 .match_stateid = nfs41_match_stateid,
10534 .find_root_sec = nfs41_find_root_sec,
10535 .free_lock_state = nfs41_free_lock_state,
10536 .call_sync_ops = &nfs41_call_sync_ops,
10537 .test_and_free_expired = nfs41_test_and_free_expired_stateid,
10538 .alloc_seqid = nfs_alloc_no_seqid,
10539 .session_trunk = nfs4_test_session_trunk,
10540 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
10541 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
10542 .state_renewal_ops = &nfs41_state_renewal_ops,
10543 .mig_recovery_ops = &nfs41_mig_recovery_ops,
10544 };
10545 #endif
10546
10547 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
10548 [0] = &nfs_v4_0_minor_ops,
10549 #if defined(CONFIG_NFS_V4_1)
10550 [1] = &nfs_v4_1_minor_ops,
10551 #endif
10552 #if defined(CONFIG_NFS_V4_2)
10553 [2] = &nfs_v4_2_minor_ops,
10554 #endif
10555 };
10556
nfs4_listxattr(struct dentry * dentry,char * list,size_t size)10557 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
10558 {
10559 ssize_t error, error2, error3;
10560
10561 error = generic_listxattr(dentry, list, size);
10562 if (error < 0)
10563 return error;
10564 if (list) {
10565 list += error;
10566 size -= error;
10567 }
10568
10569 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size);
10570 if (error2 < 0)
10571 return error2;
10572
10573 if (list) {
10574 list += error2;
10575 size -= error2;
10576 }
10577
10578 error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, size);
10579 if (error3 < 0)
10580 return error3;
10581
10582 return error + error2 + error3;
10583 }
10584
nfs4_enable_swap(struct inode * inode)10585 static void nfs4_enable_swap(struct inode *inode)
10586 {
10587 /* The state manager thread must always be running.
10588 * It will notice the client is a swapper, and stay put.
10589 */
10590 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
10591
10592 nfs4_schedule_state_manager(clp);
10593 }
10594
nfs4_disable_swap(struct inode * inode)10595 static void nfs4_disable_swap(struct inode *inode)
10596 {
10597 /* The state manager thread will now exit once it is
10598 * woken.
10599 */
10600 wake_up_var(&NFS_SERVER(inode)->nfs_client->cl_state);
10601 }
10602
10603 static const struct inode_operations nfs4_dir_inode_operations = {
10604 .create = nfs_create,
10605 .lookup = nfs_lookup,
10606 .atomic_open = nfs_atomic_open,
10607 .link = nfs_link,
10608 .unlink = nfs_unlink,
10609 .symlink = nfs_symlink,
10610 .mkdir = nfs_mkdir,
10611 .rmdir = nfs_rmdir,
10612 .mknod = nfs_mknod,
10613 .rename = nfs_rename,
10614 .permission = nfs_permission,
10615 .getattr = nfs_getattr,
10616 .setattr = nfs_setattr,
10617 .listxattr = nfs4_listxattr,
10618 };
10619
10620 static const struct inode_operations nfs4_file_inode_operations = {
10621 .permission = nfs_permission,
10622 .getattr = nfs_getattr,
10623 .setattr = nfs_setattr,
10624 .listxattr = nfs4_listxattr,
10625 };
10626
10627 const struct nfs_rpc_ops nfs_v4_clientops = {
10628 .version = 4, /* protocol version */
10629 .dentry_ops = &nfs4_dentry_operations,
10630 .dir_inode_ops = &nfs4_dir_inode_operations,
10631 .file_inode_ops = &nfs4_file_inode_operations,
10632 .file_ops = &nfs4_file_operations,
10633 .getroot = nfs4_proc_get_root,
10634 .submount = nfs4_submount,
10635 .try_get_tree = nfs4_try_get_tree,
10636 .getattr = nfs4_proc_getattr,
10637 .setattr = nfs4_proc_setattr,
10638 .lookup = nfs4_proc_lookup,
10639 .lookupp = nfs4_proc_lookupp,
10640 .access = nfs4_proc_access,
10641 .readlink = nfs4_proc_readlink,
10642 .create = nfs4_proc_create,
10643 .remove = nfs4_proc_remove,
10644 .unlink_setup = nfs4_proc_unlink_setup,
10645 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
10646 .unlink_done = nfs4_proc_unlink_done,
10647 .rename_setup = nfs4_proc_rename_setup,
10648 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
10649 .rename_done = nfs4_proc_rename_done,
10650 .link = nfs4_proc_link,
10651 .symlink = nfs4_proc_symlink,
10652 .mkdir = nfs4_proc_mkdir,
10653 .rmdir = nfs4_proc_rmdir,
10654 .readdir = nfs4_proc_readdir,
10655 .mknod = nfs4_proc_mknod,
10656 .statfs = nfs4_proc_statfs,
10657 .fsinfo = nfs4_proc_fsinfo,
10658 .pathconf = nfs4_proc_pathconf,
10659 .set_capabilities = nfs4_server_capabilities,
10660 .decode_dirent = nfs4_decode_dirent,
10661 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
10662 .read_setup = nfs4_proc_read_setup,
10663 .read_done = nfs4_read_done,
10664 .write_setup = nfs4_proc_write_setup,
10665 .write_done = nfs4_write_done,
10666 .commit_setup = nfs4_proc_commit_setup,
10667 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
10668 .commit_done = nfs4_commit_done,
10669 .lock = nfs4_proc_lock,
10670 .clear_acl_cache = nfs4_zap_acl_attr,
10671 .close_context = nfs4_close_context,
10672 .open_context = nfs4_atomic_open,
10673 .have_delegation = nfs4_have_delegation,
10674 .alloc_client = nfs4_alloc_client,
10675 .init_client = nfs4_init_client,
10676 .free_client = nfs4_free_client,
10677 .create_server = nfs4_create_server,
10678 .clone_server = nfs_clone_server,
10679 .discover_trunking = nfs4_discover_trunking,
10680 .enable_swap = nfs4_enable_swap,
10681 .disable_swap = nfs4_disable_swap,
10682 };
10683
10684 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
10685 .name = XATTR_NAME_NFSV4_ACL,
10686 .list = nfs4_xattr_list_nfs4_acl,
10687 .get = nfs4_xattr_get_nfs4_acl,
10688 .set = nfs4_xattr_set_nfs4_acl,
10689 };
10690
10691 #if defined(CONFIG_NFS_V4_1)
10692 static const struct xattr_handler nfs4_xattr_nfs4_dacl_handler = {
10693 .name = XATTR_NAME_NFSV4_DACL,
10694 .list = nfs4_xattr_list_nfs4_dacl,
10695 .get = nfs4_xattr_get_nfs4_dacl,
10696 .set = nfs4_xattr_set_nfs4_dacl,
10697 };
10698
10699 static const struct xattr_handler nfs4_xattr_nfs4_sacl_handler = {
10700 .name = XATTR_NAME_NFSV4_SACL,
10701 .list = nfs4_xattr_list_nfs4_sacl,
10702 .get = nfs4_xattr_get_nfs4_sacl,
10703 .set = nfs4_xattr_set_nfs4_sacl,
10704 };
10705 #endif
10706
10707 #ifdef CONFIG_NFS_V4_2
10708 static const struct xattr_handler nfs4_xattr_nfs4_user_handler = {
10709 .prefix = XATTR_USER_PREFIX,
10710 .get = nfs4_xattr_get_nfs4_user,
10711 .set = nfs4_xattr_set_nfs4_user,
10712 };
10713 #endif
10714
10715 const struct xattr_handler *nfs4_xattr_handlers[] = {
10716 &nfs4_xattr_nfs4_acl_handler,
10717 #if defined(CONFIG_NFS_V4_1)
10718 &nfs4_xattr_nfs4_dacl_handler,
10719 &nfs4_xattr_nfs4_sacl_handler,
10720 #endif
10721 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
10722 &nfs4_xattr_nfs4_label_handler,
10723 #endif
10724 #ifdef CONFIG_NFS_V4_2
10725 &nfs4_xattr_nfs4_user_handler,
10726 #endif
10727 NULL
10728 };
10729