1 /*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
5 *
6 * Copyright (C) International Business Machines Corp., 2002,2010
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <linux/mm.h>
37 #include <asm/div64.h>
38 #include "cifsfs.h"
39 #include "cifspdu.h"
40 #include "cifsglob.h"
41 #include "cifsproto.h"
42 #include "cifs_unicode.h"
43 #include "cifs_debug.h"
44 #include "cifs_fs_sb.h"
45 #include "fscache.h"
46 #include "smbdirect.h"
47
cifs_convert_flags(unsigned int flags)48 static inline int cifs_convert_flags(unsigned int flags)
49 {
50 if ((flags & O_ACCMODE) == O_RDONLY)
51 return GENERIC_READ;
52 else if ((flags & O_ACCMODE) == O_WRONLY)
53 return GENERIC_WRITE;
54 else if ((flags & O_ACCMODE) == O_RDWR) {
55 /* GENERIC_ALL is too much permission to request
56 can cause unnecessary access denied on create */
57 /* return GENERIC_ALL; */
58 return (GENERIC_READ | GENERIC_WRITE);
59 }
60
61 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
62 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
63 FILE_READ_DATA);
64 }
65
cifs_posix_convert_flags(unsigned int flags)66 static u32 cifs_posix_convert_flags(unsigned int flags)
67 {
68 u32 posix_flags = 0;
69
70 if ((flags & O_ACCMODE) == O_RDONLY)
71 posix_flags = SMB_O_RDONLY;
72 else if ((flags & O_ACCMODE) == O_WRONLY)
73 posix_flags = SMB_O_WRONLY;
74 else if ((flags & O_ACCMODE) == O_RDWR)
75 posix_flags = SMB_O_RDWR;
76
77 if (flags & O_CREAT) {
78 posix_flags |= SMB_O_CREAT;
79 if (flags & O_EXCL)
80 posix_flags |= SMB_O_EXCL;
81 } else if (flags & O_EXCL)
82 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
83 current->comm, current->tgid);
84
85 if (flags & O_TRUNC)
86 posix_flags |= SMB_O_TRUNC;
87 /* be safe and imply O_SYNC for O_DSYNC */
88 if (flags & O_DSYNC)
89 posix_flags |= SMB_O_SYNC;
90 if (flags & O_DIRECTORY)
91 posix_flags |= SMB_O_DIRECTORY;
92 if (flags & O_NOFOLLOW)
93 posix_flags |= SMB_O_NOFOLLOW;
94 if (flags & O_DIRECT)
95 posix_flags |= SMB_O_DIRECT;
96
97 return posix_flags;
98 }
99
cifs_get_disposition(unsigned int flags)100 static inline int cifs_get_disposition(unsigned int flags)
101 {
102 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
103 return FILE_CREATE;
104 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
105 return FILE_OVERWRITE_IF;
106 else if ((flags & O_CREAT) == O_CREAT)
107 return FILE_OPEN_IF;
108 else if ((flags & O_TRUNC) == O_TRUNC)
109 return FILE_OVERWRITE;
110 else
111 return FILE_OPEN;
112 }
113
cifs_posix_open(char * full_path,struct inode ** pinode,struct super_block * sb,int mode,unsigned int f_flags,__u32 * poplock,__u16 * pnetfid,unsigned int xid)114 int cifs_posix_open(char *full_path, struct inode **pinode,
115 struct super_block *sb, int mode, unsigned int f_flags,
116 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
117 {
118 int rc;
119 FILE_UNIX_BASIC_INFO *presp_data;
120 __u32 posix_flags = 0;
121 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
122 struct cifs_fattr fattr;
123 struct tcon_link *tlink;
124 struct cifs_tcon *tcon;
125
126 cifs_dbg(FYI, "posix open %s\n", full_path);
127
128 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
129 if (presp_data == NULL)
130 return -ENOMEM;
131
132 tlink = cifs_sb_tlink(cifs_sb);
133 if (IS_ERR(tlink)) {
134 rc = PTR_ERR(tlink);
135 goto posix_open_ret;
136 }
137
138 tcon = tlink_tcon(tlink);
139 mode &= ~current_umask();
140
141 posix_flags = cifs_posix_convert_flags(f_flags);
142 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
143 poplock, full_path, cifs_sb->local_nls,
144 cifs_remap(cifs_sb));
145 cifs_put_tlink(tlink);
146
147 if (rc)
148 goto posix_open_ret;
149
150 if (presp_data->Type == cpu_to_le32(-1))
151 goto posix_open_ret; /* open ok, caller does qpathinfo */
152
153 if (!pinode)
154 goto posix_open_ret; /* caller does not need info */
155
156 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
157
158 /* get new inode and set it up */
159 if (*pinode == NULL) {
160 cifs_fill_uniqueid(sb, &fattr);
161 *pinode = cifs_iget(sb, &fattr);
162 if (!*pinode) {
163 rc = -ENOMEM;
164 goto posix_open_ret;
165 }
166 } else {
167 cifs_fattr_to_inode(*pinode, &fattr);
168 }
169
170 posix_open_ret:
171 kfree(presp_data);
172 return rc;
173 }
174
175 static int
cifs_nt_open(char * full_path,struct inode * inode,struct cifs_sb_info * cifs_sb,struct cifs_tcon * tcon,unsigned int f_flags,__u32 * oplock,struct cifs_fid * fid,unsigned int xid)176 cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
177 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
178 struct cifs_fid *fid, unsigned int xid)
179 {
180 int rc;
181 int desired_access;
182 int disposition;
183 int create_options = CREATE_NOT_DIR;
184 FILE_ALL_INFO *buf;
185 struct TCP_Server_Info *server = tcon->ses->server;
186 struct cifs_open_parms oparms;
187
188 if (!server->ops->open)
189 return -ENOSYS;
190
191 desired_access = cifs_convert_flags(f_flags);
192
193 /*********************************************************************
194 * open flag mapping table:
195 *
196 * POSIX Flag CIFS Disposition
197 * ---------- ----------------
198 * O_CREAT FILE_OPEN_IF
199 * O_CREAT | O_EXCL FILE_CREATE
200 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
201 * O_TRUNC FILE_OVERWRITE
202 * none of the above FILE_OPEN
203 *
204 * Note that there is not a direct match between disposition
205 * FILE_SUPERSEDE (ie create whether or not file exists although
206 * O_CREAT | O_TRUNC is similar but truncates the existing
207 * file rather than creating a new file as FILE_SUPERSEDE does
208 * (which uses the attributes / metadata passed in on open call)
209 *?
210 *? O_SYNC is a reasonable match to CIFS writethrough flag
211 *? and the read write flags match reasonably. O_LARGEFILE
212 *? is irrelevant because largefile support is always used
213 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
214 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
215 *********************************************************************/
216
217 disposition = cifs_get_disposition(f_flags);
218
219 /* BB pass O_SYNC flag through on file attributes .. BB */
220
221 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
222 if (!buf)
223 return -ENOMEM;
224
225 if (backup_cred(cifs_sb))
226 create_options |= CREATE_OPEN_BACKUP_INTENT;
227
228 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
229 if (f_flags & O_SYNC)
230 create_options |= CREATE_WRITE_THROUGH;
231
232 if (f_flags & O_DIRECT)
233 create_options |= CREATE_NO_BUFFER;
234
235 oparms.tcon = tcon;
236 oparms.cifs_sb = cifs_sb;
237 oparms.desired_access = desired_access;
238 oparms.create_options = create_options;
239 oparms.disposition = disposition;
240 oparms.path = full_path;
241 oparms.fid = fid;
242 oparms.reconnect = false;
243
244 rc = server->ops->open(xid, &oparms, oplock, buf);
245
246 if (rc)
247 goto out;
248
249 if (tcon->unix_ext)
250 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
251 xid);
252 else
253 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
254 xid, fid);
255
256 if (rc) {
257 server->ops->close(xid, tcon, fid);
258 if (rc == -ESTALE)
259 rc = -EOPENSTALE;
260 }
261
262 out:
263 kfree(buf);
264 return rc;
265 }
266
267 static bool
cifs_has_mand_locks(struct cifsInodeInfo * cinode)268 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
269 {
270 struct cifs_fid_locks *cur;
271 bool has_locks = false;
272
273 down_read(&cinode->lock_sem);
274 list_for_each_entry(cur, &cinode->llist, llist) {
275 if (!list_empty(&cur->locks)) {
276 has_locks = true;
277 break;
278 }
279 }
280 up_read(&cinode->lock_sem);
281 return has_locks;
282 }
283
284 void
cifs_down_write(struct rw_semaphore * sem)285 cifs_down_write(struct rw_semaphore *sem)
286 {
287 while (!down_write_trylock(sem))
288 msleep(10);
289 }
290
291 struct cifsFileInfo *
cifs_new_fileinfo(struct cifs_fid * fid,struct file * file,struct tcon_link * tlink,__u32 oplock)292 cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
293 struct tcon_link *tlink, __u32 oplock)
294 {
295 struct dentry *dentry = file_dentry(file);
296 struct inode *inode = d_inode(dentry);
297 struct cifsInodeInfo *cinode = CIFS_I(inode);
298 struct cifsFileInfo *cfile;
299 struct cifs_fid_locks *fdlocks;
300 struct cifs_tcon *tcon = tlink_tcon(tlink);
301 struct TCP_Server_Info *server = tcon->ses->server;
302
303 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
304 if (cfile == NULL)
305 return cfile;
306
307 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
308 if (!fdlocks) {
309 kfree(cfile);
310 return NULL;
311 }
312
313 INIT_LIST_HEAD(&fdlocks->locks);
314 fdlocks->cfile = cfile;
315 cfile->llist = fdlocks;
316 cifs_down_write(&cinode->lock_sem);
317 list_add(&fdlocks->llist, &cinode->llist);
318 up_write(&cinode->lock_sem);
319
320 cfile->count = 1;
321 cfile->pid = current->tgid;
322 cfile->uid = current_fsuid();
323 cfile->dentry = dget(dentry);
324 cfile->f_flags = file->f_flags;
325 cfile->invalidHandle = false;
326 cfile->tlink = cifs_get_tlink(tlink);
327 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
328 mutex_init(&cfile->fh_mutex);
329 spin_lock_init(&cfile->file_info_lock);
330
331 cifs_sb_active(inode->i_sb);
332
333 /*
334 * If the server returned a read oplock and we have mandatory brlocks,
335 * set oplock level to None.
336 */
337 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
338 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
339 oplock = 0;
340 }
341
342 spin_lock(&tcon->open_file_lock);
343 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
344 oplock = fid->pending_open->oplock;
345 list_del(&fid->pending_open->olist);
346
347 fid->purge_cache = false;
348 server->ops->set_fid(cfile, fid, oplock);
349
350 list_add(&cfile->tlist, &tcon->openFileList);
351 atomic_inc(&tcon->num_local_opens);
352
353 /* if readable file instance put first in list*/
354 spin_lock(&cinode->open_file_lock);
355 if (file->f_mode & FMODE_READ)
356 list_add(&cfile->flist, &cinode->openFileList);
357 else
358 list_add_tail(&cfile->flist, &cinode->openFileList);
359 spin_unlock(&cinode->open_file_lock);
360 spin_unlock(&tcon->open_file_lock);
361
362 if (fid->purge_cache)
363 cifs_zap_mapping(inode);
364
365 file->private_data = cfile;
366 return cfile;
367 }
368
369 struct cifsFileInfo *
cifsFileInfo_get(struct cifsFileInfo * cifs_file)370 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
371 {
372 spin_lock(&cifs_file->file_info_lock);
373 cifsFileInfo_get_locked(cifs_file);
374 spin_unlock(&cifs_file->file_info_lock);
375 return cifs_file;
376 }
377
378 /**
379 * cifsFileInfo_put - release a reference of file priv data
380 *
381 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
382 */
cifsFileInfo_put(struct cifsFileInfo * cifs_file)383 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
384 {
385 _cifsFileInfo_put(cifs_file, true);
386 }
387
388 /**
389 * _cifsFileInfo_put - release a reference of file priv data
390 *
391 * This may involve closing the filehandle @cifs_file out on the
392 * server. Must be called without holding tcon->open_file_lock and
393 * cifs_file->file_info_lock.
394 *
395 * If @wait_for_oplock_handler is true and we are releasing the last
396 * reference, wait for any running oplock break handler of the file
397 * and cancel any pending one. If calling this function from the
398 * oplock break handler, you need to pass false.
399 *
400 */
_cifsFileInfo_put(struct cifsFileInfo * cifs_file,bool wait_oplock_handler)401 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
402 {
403 struct inode *inode = d_inode(cifs_file->dentry);
404 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
405 struct TCP_Server_Info *server = tcon->ses->server;
406 struct cifsInodeInfo *cifsi = CIFS_I(inode);
407 struct super_block *sb = inode->i_sb;
408 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
409 struct cifsLockInfo *li, *tmp;
410 struct cifs_fid fid;
411 struct cifs_pending_open open;
412 bool oplock_break_cancelled;
413
414 spin_lock(&tcon->open_file_lock);
415 spin_lock(&cifsi->open_file_lock);
416 spin_lock(&cifs_file->file_info_lock);
417 if (--cifs_file->count > 0) {
418 spin_unlock(&cifs_file->file_info_lock);
419 spin_unlock(&cifsi->open_file_lock);
420 spin_unlock(&tcon->open_file_lock);
421 return;
422 }
423 spin_unlock(&cifs_file->file_info_lock);
424
425 if (server->ops->get_lease_key)
426 server->ops->get_lease_key(inode, &fid);
427
428 /* store open in pending opens to make sure we don't miss lease break */
429 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
430
431 /* remove it from the lists */
432 list_del(&cifs_file->flist);
433 list_del(&cifs_file->tlist);
434 atomic_dec(&tcon->num_local_opens);
435
436 if (list_empty(&cifsi->openFileList)) {
437 cifs_dbg(FYI, "closing last open instance for inode %p\n",
438 d_inode(cifs_file->dentry));
439 /*
440 * In strict cache mode we need invalidate mapping on the last
441 * close because it may cause a error when we open this file
442 * again and get at least level II oplock.
443 */
444 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
445 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
446 cifs_set_oplock_level(cifsi, 0);
447 }
448
449 spin_unlock(&cifsi->open_file_lock);
450 spin_unlock(&tcon->open_file_lock);
451
452 oplock_break_cancelled = wait_oplock_handler ?
453 cancel_work_sync(&cifs_file->oplock_break) : false;
454
455 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
456 struct TCP_Server_Info *server = tcon->ses->server;
457 unsigned int xid;
458
459 xid = get_xid();
460 if (server->ops->close)
461 server->ops->close(xid, tcon, &cifs_file->fid);
462 _free_xid(xid);
463 }
464
465 if (oplock_break_cancelled)
466 cifs_done_oplock_break(cifsi);
467
468 cifs_del_pending_open(&open);
469
470 /*
471 * Delete any outstanding lock records. We'll lose them when the file
472 * is closed anyway.
473 */
474 cifs_down_write(&cifsi->lock_sem);
475 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
476 list_del(&li->llist);
477 cifs_del_lock_waiters(li);
478 kfree(li);
479 }
480 list_del(&cifs_file->llist->llist);
481 kfree(cifs_file->llist);
482 up_write(&cifsi->lock_sem);
483
484 cifs_put_tlink(cifs_file->tlink);
485 dput(cifs_file->dentry);
486 cifs_sb_deactive(sb);
487 kfree(cifs_file);
488 }
489
cifs_open(struct inode * inode,struct file * file)490 int cifs_open(struct inode *inode, struct file *file)
491
492 {
493 int rc = -EACCES;
494 unsigned int xid;
495 __u32 oplock;
496 struct cifs_sb_info *cifs_sb;
497 struct TCP_Server_Info *server;
498 struct cifs_tcon *tcon;
499 struct tcon_link *tlink;
500 struct cifsFileInfo *cfile = NULL;
501 char *full_path = NULL;
502 bool posix_open_ok = false;
503 struct cifs_fid fid;
504 struct cifs_pending_open open;
505
506 xid = get_xid();
507
508 cifs_sb = CIFS_SB(inode->i_sb);
509 tlink = cifs_sb_tlink(cifs_sb);
510 if (IS_ERR(tlink)) {
511 free_xid(xid);
512 return PTR_ERR(tlink);
513 }
514 tcon = tlink_tcon(tlink);
515 server = tcon->ses->server;
516
517 full_path = build_path_from_dentry(file_dentry(file));
518 if (full_path == NULL) {
519 rc = -ENOMEM;
520 goto out;
521 }
522
523 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
524 inode, file->f_flags, full_path);
525
526 if (file->f_flags & O_DIRECT &&
527 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
528 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
529 file->f_op = &cifs_file_direct_nobrl_ops;
530 else
531 file->f_op = &cifs_file_direct_ops;
532 }
533
534 if (server->oplocks)
535 oplock = REQ_OPLOCK;
536 else
537 oplock = 0;
538
539 if (!tcon->broken_posix_open && tcon->unix_ext &&
540 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
541 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
542 /* can not refresh inode info since size could be stale */
543 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
544 cifs_sb->mnt_file_mode /* ignored */,
545 file->f_flags, &oplock, &fid.netfid, xid);
546 if (rc == 0) {
547 cifs_dbg(FYI, "posix open succeeded\n");
548 posix_open_ok = true;
549 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
550 if (tcon->ses->serverNOS)
551 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
552 tcon->ses->serverName,
553 tcon->ses->serverNOS);
554 tcon->broken_posix_open = true;
555 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
556 (rc != -EOPNOTSUPP)) /* path not found or net err */
557 goto out;
558 /*
559 * Else fallthrough to retry open the old way on network i/o
560 * or DFS errors.
561 */
562 }
563
564 if (server->ops->get_lease_key)
565 server->ops->get_lease_key(inode, &fid);
566
567 cifs_add_pending_open(&fid, tlink, &open);
568
569 if (!posix_open_ok) {
570 if (server->ops->get_lease_key)
571 server->ops->get_lease_key(inode, &fid);
572
573 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
574 file->f_flags, &oplock, &fid, xid);
575 if (rc) {
576 cifs_del_pending_open(&open);
577 goto out;
578 }
579 }
580
581 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
582 if (cfile == NULL) {
583 if (server->ops->close)
584 server->ops->close(xid, tcon, &fid);
585 cifs_del_pending_open(&open);
586 rc = -ENOMEM;
587 goto out;
588 }
589
590 cifs_fscache_set_inode_cookie(inode, file);
591
592 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
593 /*
594 * Time to set mode which we can not set earlier due to
595 * problems creating new read-only files.
596 */
597 struct cifs_unix_set_info_args args = {
598 .mode = inode->i_mode,
599 .uid = INVALID_UID, /* no change */
600 .gid = INVALID_GID, /* no change */
601 .ctime = NO_CHANGE_64,
602 .atime = NO_CHANGE_64,
603 .mtime = NO_CHANGE_64,
604 .device = 0,
605 };
606 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
607 cfile->pid);
608 }
609
610 out:
611 kfree(full_path);
612 free_xid(xid);
613 cifs_put_tlink(tlink);
614 return rc;
615 }
616
617 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
618
619 /*
620 * Try to reacquire byte range locks that were released when session
621 * to server was lost.
622 */
623 static int
cifs_relock_file(struct cifsFileInfo * cfile)624 cifs_relock_file(struct cifsFileInfo *cfile)
625 {
626 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
627 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
628 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
629 int rc = 0;
630
631 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
632 if (cinode->can_cache_brlcks) {
633 /* can cache locks - no need to relock */
634 up_read(&cinode->lock_sem);
635 return rc;
636 }
637
638 if (cap_unix(tcon->ses) &&
639 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
640 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
641 rc = cifs_push_posix_locks(cfile);
642 else
643 rc = tcon->ses->server->ops->push_mand_locks(cfile);
644
645 up_read(&cinode->lock_sem);
646 return rc;
647 }
648
649 static int
cifs_reopen_file(struct cifsFileInfo * cfile,bool can_flush)650 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
651 {
652 int rc = -EACCES;
653 unsigned int xid;
654 __u32 oplock;
655 struct cifs_sb_info *cifs_sb;
656 struct cifs_tcon *tcon;
657 struct TCP_Server_Info *server;
658 struct cifsInodeInfo *cinode;
659 struct inode *inode;
660 char *full_path = NULL;
661 int desired_access;
662 int disposition = FILE_OPEN;
663 int create_options = CREATE_NOT_DIR;
664 struct cifs_open_parms oparms;
665
666 xid = get_xid();
667 mutex_lock(&cfile->fh_mutex);
668 if (!cfile->invalidHandle) {
669 mutex_unlock(&cfile->fh_mutex);
670 rc = 0;
671 free_xid(xid);
672 return rc;
673 }
674
675 inode = d_inode(cfile->dentry);
676 cifs_sb = CIFS_SB(inode->i_sb);
677 tcon = tlink_tcon(cfile->tlink);
678 server = tcon->ses->server;
679
680 /*
681 * Can not grab rename sem here because various ops, including those
682 * that already have the rename sem can end up causing writepage to get
683 * called and if the server was down that means we end up here, and we
684 * can never tell if the caller already has the rename_sem.
685 */
686 full_path = build_path_from_dentry(cfile->dentry);
687 if (full_path == NULL) {
688 rc = -ENOMEM;
689 mutex_unlock(&cfile->fh_mutex);
690 free_xid(xid);
691 return rc;
692 }
693
694 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
695 inode, cfile->f_flags, full_path);
696
697 if (tcon->ses->server->oplocks)
698 oplock = REQ_OPLOCK;
699 else
700 oplock = 0;
701
702 if (tcon->unix_ext && cap_unix(tcon->ses) &&
703 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
704 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
705 /*
706 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
707 * original open. Must mask them off for a reopen.
708 */
709 unsigned int oflags = cfile->f_flags &
710 ~(O_CREAT | O_EXCL | O_TRUNC);
711
712 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
713 cifs_sb->mnt_file_mode /* ignored */,
714 oflags, &oplock, &cfile->fid.netfid, xid);
715 if (rc == 0) {
716 cifs_dbg(FYI, "posix reopen succeeded\n");
717 oparms.reconnect = true;
718 goto reopen_success;
719 }
720 /*
721 * fallthrough to retry open the old way on errors, especially
722 * in the reconnect path it is important to retry hard
723 */
724 }
725
726 desired_access = cifs_convert_flags(cfile->f_flags);
727
728 if (backup_cred(cifs_sb))
729 create_options |= CREATE_OPEN_BACKUP_INTENT;
730
731 if (server->ops->get_lease_key)
732 server->ops->get_lease_key(inode, &cfile->fid);
733
734 oparms.tcon = tcon;
735 oparms.cifs_sb = cifs_sb;
736 oparms.desired_access = desired_access;
737 oparms.create_options = create_options;
738 oparms.disposition = disposition;
739 oparms.path = full_path;
740 oparms.fid = &cfile->fid;
741 oparms.reconnect = true;
742
743 /*
744 * Can not refresh inode by passing in file_info buf to be returned by
745 * ops->open and then calling get_inode_info with returned buf since
746 * file might have write behind data that needs to be flushed and server
747 * version of file size can be stale. If we knew for sure that inode was
748 * not dirty locally we could do this.
749 */
750 rc = server->ops->open(xid, &oparms, &oplock, NULL);
751 if (rc == -ENOENT && oparms.reconnect == false) {
752 /* durable handle timeout is expired - open the file again */
753 rc = server->ops->open(xid, &oparms, &oplock, NULL);
754 /* indicate that we need to relock the file */
755 oparms.reconnect = true;
756 }
757
758 if (rc) {
759 mutex_unlock(&cfile->fh_mutex);
760 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
761 cifs_dbg(FYI, "oplock: %d\n", oplock);
762 goto reopen_error_exit;
763 }
764
765 reopen_success:
766 cfile->invalidHandle = false;
767 mutex_unlock(&cfile->fh_mutex);
768 cinode = CIFS_I(inode);
769
770 if (can_flush) {
771 rc = filemap_write_and_wait(inode->i_mapping);
772 if (!is_interrupt_error(rc))
773 mapping_set_error(inode->i_mapping, rc);
774
775 if (tcon->unix_ext)
776 rc = cifs_get_inode_info_unix(&inode, full_path,
777 inode->i_sb, xid);
778 else
779 rc = cifs_get_inode_info(&inode, full_path, NULL,
780 inode->i_sb, xid, NULL);
781 }
782 /*
783 * Else we are writing out data to server already and could deadlock if
784 * we tried to flush data, and since we do not know if we have data that
785 * would invalidate the current end of file on the server we can not go
786 * to the server to get the new inode info.
787 */
788
789 /*
790 * If the server returned a read oplock and we have mandatory brlocks,
791 * set oplock level to None.
792 */
793 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
794 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
795 oplock = 0;
796 }
797
798 server->ops->set_fid(cfile, &cfile->fid, oplock);
799 if (oparms.reconnect)
800 cifs_relock_file(cfile);
801
802 reopen_error_exit:
803 kfree(full_path);
804 free_xid(xid);
805 return rc;
806 }
807
cifs_close(struct inode * inode,struct file * file)808 int cifs_close(struct inode *inode, struct file *file)
809 {
810 if (file->private_data != NULL) {
811 cifsFileInfo_put(file->private_data);
812 file->private_data = NULL;
813 }
814
815 /* return code from the ->release op is always ignored */
816 return 0;
817 }
818
819 void
cifs_reopen_persistent_handles(struct cifs_tcon * tcon)820 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
821 {
822 struct cifsFileInfo *open_file;
823 struct list_head *tmp;
824 struct list_head *tmp1;
825 struct list_head tmp_list;
826
827 if (!tcon->use_persistent || !tcon->need_reopen_files)
828 return;
829
830 tcon->need_reopen_files = false;
831
832 cifs_dbg(FYI, "Reopen persistent handles");
833 INIT_LIST_HEAD(&tmp_list);
834
835 /* list all files open on tree connection, reopen resilient handles */
836 spin_lock(&tcon->open_file_lock);
837 list_for_each(tmp, &tcon->openFileList) {
838 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
839 if (!open_file->invalidHandle)
840 continue;
841 cifsFileInfo_get(open_file);
842 list_add_tail(&open_file->rlist, &tmp_list);
843 }
844 spin_unlock(&tcon->open_file_lock);
845
846 list_for_each_safe(tmp, tmp1, &tmp_list) {
847 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
848 if (cifs_reopen_file(open_file, false /* do not flush */))
849 tcon->need_reopen_files = true;
850 list_del_init(&open_file->rlist);
851 cifsFileInfo_put(open_file);
852 }
853 }
854
cifs_closedir(struct inode * inode,struct file * file)855 int cifs_closedir(struct inode *inode, struct file *file)
856 {
857 int rc = 0;
858 unsigned int xid;
859 struct cifsFileInfo *cfile = file->private_data;
860 struct cifs_tcon *tcon;
861 struct TCP_Server_Info *server;
862 char *buf;
863
864 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
865
866 if (cfile == NULL)
867 return rc;
868
869 xid = get_xid();
870 tcon = tlink_tcon(cfile->tlink);
871 server = tcon->ses->server;
872
873 cifs_dbg(FYI, "Freeing private data in close dir\n");
874 spin_lock(&cfile->file_info_lock);
875 if (server->ops->dir_needs_close(cfile)) {
876 cfile->invalidHandle = true;
877 spin_unlock(&cfile->file_info_lock);
878 if (server->ops->close_dir)
879 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
880 else
881 rc = -ENOSYS;
882 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
883 /* not much we can do if it fails anyway, ignore rc */
884 rc = 0;
885 } else
886 spin_unlock(&cfile->file_info_lock);
887
888 buf = cfile->srch_inf.ntwrk_buf_start;
889 if (buf) {
890 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
891 cfile->srch_inf.ntwrk_buf_start = NULL;
892 if (cfile->srch_inf.smallBuf)
893 cifs_small_buf_release(buf);
894 else
895 cifs_buf_release(buf);
896 }
897
898 cifs_put_tlink(cfile->tlink);
899 kfree(file->private_data);
900 file->private_data = NULL;
901 /* BB can we lock the filestruct while this is going on? */
902 free_xid(xid);
903 return rc;
904 }
905
906 static struct cifsLockInfo *
cifs_lock_init(__u64 offset,__u64 length,__u8 type,__u16 flags)907 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
908 {
909 struct cifsLockInfo *lock =
910 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
911 if (!lock)
912 return lock;
913 lock->offset = offset;
914 lock->length = length;
915 lock->type = type;
916 lock->pid = current->tgid;
917 lock->flags = flags;
918 INIT_LIST_HEAD(&lock->blist);
919 init_waitqueue_head(&lock->block_q);
920 return lock;
921 }
922
923 void
cifs_del_lock_waiters(struct cifsLockInfo * lock)924 cifs_del_lock_waiters(struct cifsLockInfo *lock)
925 {
926 struct cifsLockInfo *li, *tmp;
927 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
928 list_del_init(&li->blist);
929 wake_up(&li->block_q);
930 }
931 }
932
933 #define CIFS_LOCK_OP 0
934 #define CIFS_READ_OP 1
935 #define CIFS_WRITE_OP 2
936
937 /* @rw_check : 0 - no op, 1 - read, 2 - write */
938 static bool
cifs_find_fid_lock_conflict(struct cifs_fid_locks * fdlocks,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsFileInfo * cfile,struct cifsLockInfo ** conf_lock,int rw_check)939 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
940 __u64 length, __u8 type, __u16 flags,
941 struct cifsFileInfo *cfile,
942 struct cifsLockInfo **conf_lock, int rw_check)
943 {
944 struct cifsLockInfo *li;
945 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
946 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
947
948 list_for_each_entry(li, &fdlocks->locks, llist) {
949 if (offset + length <= li->offset ||
950 offset >= li->offset + li->length)
951 continue;
952 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
953 server->ops->compare_fids(cfile, cur_cfile)) {
954 /* shared lock prevents write op through the same fid */
955 if (!(li->type & server->vals->shared_lock_type) ||
956 rw_check != CIFS_WRITE_OP)
957 continue;
958 }
959 if ((type & server->vals->shared_lock_type) &&
960 ((server->ops->compare_fids(cfile, cur_cfile) &&
961 current->tgid == li->pid) || type == li->type))
962 continue;
963 if (rw_check == CIFS_LOCK_OP &&
964 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
965 server->ops->compare_fids(cfile, cur_cfile))
966 continue;
967 if (conf_lock)
968 *conf_lock = li;
969 return true;
970 }
971 return false;
972 }
973
974 bool
cifs_find_lock_conflict(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsLockInfo ** conf_lock,int rw_check)975 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
976 __u8 type, __u16 flags,
977 struct cifsLockInfo **conf_lock, int rw_check)
978 {
979 bool rc = false;
980 struct cifs_fid_locks *cur;
981 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
982
983 list_for_each_entry(cur, &cinode->llist, llist) {
984 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
985 flags, cfile, conf_lock,
986 rw_check);
987 if (rc)
988 break;
989 }
990
991 return rc;
992 }
993
994 /*
995 * Check if there is another lock that prevents us to set the lock (mandatory
996 * style). If such a lock exists, update the flock structure with its
997 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
998 * or leave it the same if we can't. Returns 0 if we don't need to request to
999 * the server or 1 otherwise.
1000 */
1001 static int
cifs_lock_test(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,struct file_lock * flock)1002 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1003 __u8 type, struct file_lock *flock)
1004 {
1005 int rc = 0;
1006 struct cifsLockInfo *conf_lock;
1007 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1008 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1009 bool exist;
1010
1011 down_read(&cinode->lock_sem);
1012
1013 exist = cifs_find_lock_conflict(cfile, offset, length, type,
1014 flock->fl_flags, &conf_lock,
1015 CIFS_LOCK_OP);
1016 if (exist) {
1017 flock->fl_start = conf_lock->offset;
1018 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1019 flock->fl_pid = conf_lock->pid;
1020 if (conf_lock->type & server->vals->shared_lock_type)
1021 flock->fl_type = F_RDLCK;
1022 else
1023 flock->fl_type = F_WRLCK;
1024 } else if (!cinode->can_cache_brlcks)
1025 rc = 1;
1026 else
1027 flock->fl_type = F_UNLCK;
1028
1029 up_read(&cinode->lock_sem);
1030 return rc;
1031 }
1032
1033 static void
cifs_lock_add(struct cifsFileInfo * cfile,struct cifsLockInfo * lock)1034 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1035 {
1036 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1037 cifs_down_write(&cinode->lock_sem);
1038 list_add_tail(&lock->llist, &cfile->llist->locks);
1039 up_write(&cinode->lock_sem);
1040 }
1041
1042 /*
1043 * Set the byte-range lock (mandatory style). Returns:
1044 * 1) 0, if we set the lock and don't need to request to the server;
1045 * 2) 1, if no locks prevent us but we need to request to the server;
1046 * 3) -EACCES, if there is a lock that prevents us and wait is false.
1047 */
1048 static int
cifs_lock_add_if(struct cifsFileInfo * cfile,struct cifsLockInfo * lock,bool wait)1049 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1050 bool wait)
1051 {
1052 struct cifsLockInfo *conf_lock;
1053 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1054 bool exist;
1055 int rc = 0;
1056
1057 try_again:
1058 exist = false;
1059 cifs_down_write(&cinode->lock_sem);
1060
1061 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1062 lock->type, lock->flags, &conf_lock,
1063 CIFS_LOCK_OP);
1064 if (!exist && cinode->can_cache_brlcks) {
1065 list_add_tail(&lock->llist, &cfile->llist->locks);
1066 up_write(&cinode->lock_sem);
1067 return rc;
1068 }
1069
1070 if (!exist)
1071 rc = 1;
1072 else if (!wait)
1073 rc = -EACCES;
1074 else {
1075 list_add_tail(&lock->blist, &conf_lock->blist);
1076 up_write(&cinode->lock_sem);
1077 rc = wait_event_interruptible(lock->block_q,
1078 (lock->blist.prev == &lock->blist) &&
1079 (lock->blist.next == &lock->blist));
1080 if (!rc)
1081 goto try_again;
1082 cifs_down_write(&cinode->lock_sem);
1083 list_del_init(&lock->blist);
1084 }
1085
1086 up_write(&cinode->lock_sem);
1087 return rc;
1088 }
1089
1090 /*
1091 * Check if there is another lock that prevents us to set the lock (posix
1092 * style). If such a lock exists, update the flock structure with its
1093 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1094 * or leave it the same if we can't. Returns 0 if we don't need to request to
1095 * the server or 1 otherwise.
1096 */
1097 static int
cifs_posix_lock_test(struct file * file,struct file_lock * flock)1098 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1099 {
1100 int rc = 0;
1101 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1102 unsigned char saved_type = flock->fl_type;
1103
1104 if ((flock->fl_flags & FL_POSIX) == 0)
1105 return 1;
1106
1107 down_read(&cinode->lock_sem);
1108 posix_test_lock(file, flock);
1109
1110 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1111 flock->fl_type = saved_type;
1112 rc = 1;
1113 }
1114
1115 up_read(&cinode->lock_sem);
1116 return rc;
1117 }
1118
1119 /*
1120 * Set the byte-range lock (posix style). Returns:
1121 * 1) 0, if we set the lock and don't need to request to the server;
1122 * 2) 1, if we need to request to the server;
1123 * 3) <0, if the error occurs while setting the lock.
1124 */
1125 static int
cifs_posix_lock_set(struct file * file,struct file_lock * flock)1126 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1127 {
1128 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1129 int rc = 1;
1130
1131 if ((flock->fl_flags & FL_POSIX) == 0)
1132 return rc;
1133
1134 try_again:
1135 cifs_down_write(&cinode->lock_sem);
1136 if (!cinode->can_cache_brlcks) {
1137 up_write(&cinode->lock_sem);
1138 return rc;
1139 }
1140
1141 rc = posix_lock_file(file, flock, NULL);
1142 up_write(&cinode->lock_sem);
1143 if (rc == FILE_LOCK_DEFERRED) {
1144 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_blocker);
1145 if (!rc)
1146 goto try_again;
1147 locks_delete_block(flock);
1148 }
1149 return rc;
1150 }
1151
1152 int
cifs_push_mandatory_locks(struct cifsFileInfo * cfile)1153 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1154 {
1155 unsigned int xid;
1156 int rc = 0, stored_rc;
1157 struct cifsLockInfo *li, *tmp;
1158 struct cifs_tcon *tcon;
1159 unsigned int num, max_num, max_buf;
1160 LOCKING_ANDX_RANGE *buf, *cur;
1161 static const int types[] = {
1162 LOCKING_ANDX_LARGE_FILES,
1163 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1164 };
1165 int i;
1166
1167 xid = get_xid();
1168 tcon = tlink_tcon(cfile->tlink);
1169
1170 /*
1171 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1172 * and check it before using.
1173 */
1174 max_buf = tcon->ses->server->maxBuf;
1175 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1176 free_xid(xid);
1177 return -EINVAL;
1178 }
1179
1180 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1181 PAGE_SIZE);
1182 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1183 PAGE_SIZE);
1184 max_num = (max_buf - sizeof(struct smb_hdr)) /
1185 sizeof(LOCKING_ANDX_RANGE);
1186 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1187 if (!buf) {
1188 free_xid(xid);
1189 return -ENOMEM;
1190 }
1191
1192 for (i = 0; i < 2; i++) {
1193 cur = buf;
1194 num = 0;
1195 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1196 if (li->type != types[i])
1197 continue;
1198 cur->Pid = cpu_to_le16(li->pid);
1199 cur->LengthLow = cpu_to_le32((u32)li->length);
1200 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1201 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1202 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1203 if (++num == max_num) {
1204 stored_rc = cifs_lockv(xid, tcon,
1205 cfile->fid.netfid,
1206 (__u8)li->type, 0, num,
1207 buf);
1208 if (stored_rc)
1209 rc = stored_rc;
1210 cur = buf;
1211 num = 0;
1212 } else
1213 cur++;
1214 }
1215
1216 if (num) {
1217 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1218 (__u8)types[i], 0, num, buf);
1219 if (stored_rc)
1220 rc = stored_rc;
1221 }
1222 }
1223
1224 kfree(buf);
1225 free_xid(xid);
1226 return rc;
1227 }
1228
1229 static __u32
hash_lockowner(fl_owner_t owner)1230 hash_lockowner(fl_owner_t owner)
1231 {
1232 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1233 }
1234
1235 struct lock_to_push {
1236 struct list_head llist;
1237 __u64 offset;
1238 __u64 length;
1239 __u32 pid;
1240 __u16 netfid;
1241 __u8 type;
1242 };
1243
1244 static int
cifs_push_posix_locks(struct cifsFileInfo * cfile)1245 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1246 {
1247 struct inode *inode = d_inode(cfile->dentry);
1248 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1249 struct file_lock *flock;
1250 struct file_lock_context *flctx = inode->i_flctx;
1251 unsigned int count = 0, i;
1252 int rc = 0, xid, type;
1253 struct list_head locks_to_send, *el;
1254 struct lock_to_push *lck, *tmp;
1255 __u64 length;
1256
1257 xid = get_xid();
1258
1259 if (!flctx)
1260 goto out;
1261
1262 spin_lock(&flctx->flc_lock);
1263 list_for_each(el, &flctx->flc_posix) {
1264 count++;
1265 }
1266 spin_unlock(&flctx->flc_lock);
1267
1268 INIT_LIST_HEAD(&locks_to_send);
1269
1270 /*
1271 * Allocating count locks is enough because no FL_POSIX locks can be
1272 * added to the list while we are holding cinode->lock_sem that
1273 * protects locking operations of this inode.
1274 */
1275 for (i = 0; i < count; i++) {
1276 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1277 if (!lck) {
1278 rc = -ENOMEM;
1279 goto err_out;
1280 }
1281 list_add_tail(&lck->llist, &locks_to_send);
1282 }
1283
1284 el = locks_to_send.next;
1285 spin_lock(&flctx->flc_lock);
1286 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
1287 if (el == &locks_to_send) {
1288 /*
1289 * The list ended. We don't have enough allocated
1290 * structures - something is really wrong.
1291 */
1292 cifs_dbg(VFS, "Can't push all brlocks!\n");
1293 break;
1294 }
1295 length = 1 + flock->fl_end - flock->fl_start;
1296 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1297 type = CIFS_RDLCK;
1298 else
1299 type = CIFS_WRLCK;
1300 lck = list_entry(el, struct lock_to_push, llist);
1301 lck->pid = hash_lockowner(flock->fl_owner);
1302 lck->netfid = cfile->fid.netfid;
1303 lck->length = length;
1304 lck->type = type;
1305 lck->offset = flock->fl_start;
1306 }
1307 spin_unlock(&flctx->flc_lock);
1308
1309 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1310 int stored_rc;
1311
1312 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1313 lck->offset, lck->length, NULL,
1314 lck->type, 0);
1315 if (stored_rc)
1316 rc = stored_rc;
1317 list_del(&lck->llist);
1318 kfree(lck);
1319 }
1320
1321 out:
1322 free_xid(xid);
1323 return rc;
1324 err_out:
1325 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1326 list_del(&lck->llist);
1327 kfree(lck);
1328 }
1329 goto out;
1330 }
1331
1332 static int
cifs_push_locks(struct cifsFileInfo * cfile)1333 cifs_push_locks(struct cifsFileInfo *cfile)
1334 {
1335 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1336 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1337 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1338 int rc = 0;
1339
1340 /* we are going to update can_cache_brlcks here - need a write access */
1341 cifs_down_write(&cinode->lock_sem);
1342 if (!cinode->can_cache_brlcks) {
1343 up_write(&cinode->lock_sem);
1344 return rc;
1345 }
1346
1347 if (cap_unix(tcon->ses) &&
1348 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1349 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1350 rc = cifs_push_posix_locks(cfile);
1351 else
1352 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1353
1354 cinode->can_cache_brlcks = false;
1355 up_write(&cinode->lock_sem);
1356 return rc;
1357 }
1358
1359 static void
cifs_read_flock(struct file_lock * flock,__u32 * type,int * lock,int * unlock,bool * wait_flag,struct TCP_Server_Info * server)1360 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1361 bool *wait_flag, struct TCP_Server_Info *server)
1362 {
1363 if (flock->fl_flags & FL_POSIX)
1364 cifs_dbg(FYI, "Posix\n");
1365 if (flock->fl_flags & FL_FLOCK)
1366 cifs_dbg(FYI, "Flock\n");
1367 if (flock->fl_flags & FL_SLEEP) {
1368 cifs_dbg(FYI, "Blocking lock\n");
1369 *wait_flag = true;
1370 }
1371 if (flock->fl_flags & FL_ACCESS)
1372 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1373 if (flock->fl_flags & FL_LEASE)
1374 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1375 if (flock->fl_flags &
1376 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1377 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1378 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
1379
1380 *type = server->vals->large_lock_type;
1381 if (flock->fl_type == F_WRLCK) {
1382 cifs_dbg(FYI, "F_WRLCK\n");
1383 *type |= server->vals->exclusive_lock_type;
1384 *lock = 1;
1385 } else if (flock->fl_type == F_UNLCK) {
1386 cifs_dbg(FYI, "F_UNLCK\n");
1387 *type |= server->vals->unlock_lock_type;
1388 *unlock = 1;
1389 /* Check if unlock includes more than one lock range */
1390 } else if (flock->fl_type == F_RDLCK) {
1391 cifs_dbg(FYI, "F_RDLCK\n");
1392 *type |= server->vals->shared_lock_type;
1393 *lock = 1;
1394 } else if (flock->fl_type == F_EXLCK) {
1395 cifs_dbg(FYI, "F_EXLCK\n");
1396 *type |= server->vals->exclusive_lock_type;
1397 *lock = 1;
1398 } else if (flock->fl_type == F_SHLCK) {
1399 cifs_dbg(FYI, "F_SHLCK\n");
1400 *type |= server->vals->shared_lock_type;
1401 *lock = 1;
1402 } else
1403 cifs_dbg(FYI, "Unknown type of lock\n");
1404 }
1405
1406 static int
cifs_getlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,unsigned int xid)1407 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1408 bool wait_flag, bool posix_lck, unsigned int xid)
1409 {
1410 int rc = 0;
1411 __u64 length = 1 + flock->fl_end - flock->fl_start;
1412 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1413 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1414 struct TCP_Server_Info *server = tcon->ses->server;
1415 __u16 netfid = cfile->fid.netfid;
1416
1417 if (posix_lck) {
1418 int posix_lock_type;
1419
1420 rc = cifs_posix_lock_test(file, flock);
1421 if (!rc)
1422 return rc;
1423
1424 if (type & server->vals->shared_lock_type)
1425 posix_lock_type = CIFS_RDLCK;
1426 else
1427 posix_lock_type = CIFS_WRLCK;
1428 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1429 hash_lockowner(flock->fl_owner),
1430 flock->fl_start, length, flock,
1431 posix_lock_type, wait_flag);
1432 return rc;
1433 }
1434
1435 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
1436 if (!rc)
1437 return rc;
1438
1439 /* BB we could chain these into one lock request BB */
1440 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1441 1, 0, false);
1442 if (rc == 0) {
1443 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1444 type, 0, 1, false);
1445 flock->fl_type = F_UNLCK;
1446 if (rc != 0)
1447 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1448 rc);
1449 return 0;
1450 }
1451
1452 if (type & server->vals->shared_lock_type) {
1453 flock->fl_type = F_WRLCK;
1454 return 0;
1455 }
1456
1457 type &= ~server->vals->exclusive_lock_type;
1458
1459 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1460 type | server->vals->shared_lock_type,
1461 1, 0, false);
1462 if (rc == 0) {
1463 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1464 type | server->vals->shared_lock_type, 0, 1, false);
1465 flock->fl_type = F_RDLCK;
1466 if (rc != 0)
1467 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1468 rc);
1469 } else
1470 flock->fl_type = F_WRLCK;
1471
1472 return 0;
1473 }
1474
1475 void
cifs_move_llist(struct list_head * source,struct list_head * dest)1476 cifs_move_llist(struct list_head *source, struct list_head *dest)
1477 {
1478 struct list_head *li, *tmp;
1479 list_for_each_safe(li, tmp, source)
1480 list_move(li, dest);
1481 }
1482
1483 void
cifs_free_llist(struct list_head * llist)1484 cifs_free_llist(struct list_head *llist)
1485 {
1486 struct cifsLockInfo *li, *tmp;
1487 list_for_each_entry_safe(li, tmp, llist, llist) {
1488 cifs_del_lock_waiters(li);
1489 list_del(&li->llist);
1490 kfree(li);
1491 }
1492 }
1493
1494 int
cifs_unlock_range(struct cifsFileInfo * cfile,struct file_lock * flock,unsigned int xid)1495 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1496 unsigned int xid)
1497 {
1498 int rc = 0, stored_rc;
1499 static const int types[] = {
1500 LOCKING_ANDX_LARGE_FILES,
1501 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1502 };
1503 unsigned int i;
1504 unsigned int max_num, num, max_buf;
1505 LOCKING_ANDX_RANGE *buf, *cur;
1506 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1507 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1508 struct cifsLockInfo *li, *tmp;
1509 __u64 length = 1 + flock->fl_end - flock->fl_start;
1510 struct list_head tmp_llist;
1511
1512 INIT_LIST_HEAD(&tmp_llist);
1513
1514 /*
1515 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1516 * and check it before using.
1517 */
1518 max_buf = tcon->ses->server->maxBuf;
1519 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
1520 return -EINVAL;
1521
1522 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1523 PAGE_SIZE);
1524 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1525 PAGE_SIZE);
1526 max_num = (max_buf - sizeof(struct smb_hdr)) /
1527 sizeof(LOCKING_ANDX_RANGE);
1528 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1529 if (!buf)
1530 return -ENOMEM;
1531
1532 cifs_down_write(&cinode->lock_sem);
1533 for (i = 0; i < 2; i++) {
1534 cur = buf;
1535 num = 0;
1536 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1537 if (flock->fl_start > li->offset ||
1538 (flock->fl_start + length) <
1539 (li->offset + li->length))
1540 continue;
1541 if (current->tgid != li->pid)
1542 continue;
1543 if (types[i] != li->type)
1544 continue;
1545 if (cinode->can_cache_brlcks) {
1546 /*
1547 * We can cache brlock requests - simply remove
1548 * a lock from the file's list.
1549 */
1550 list_del(&li->llist);
1551 cifs_del_lock_waiters(li);
1552 kfree(li);
1553 continue;
1554 }
1555 cur->Pid = cpu_to_le16(li->pid);
1556 cur->LengthLow = cpu_to_le32((u32)li->length);
1557 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1558 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1559 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1560 /*
1561 * We need to save a lock here to let us add it again to
1562 * the file's list if the unlock range request fails on
1563 * the server.
1564 */
1565 list_move(&li->llist, &tmp_llist);
1566 if (++num == max_num) {
1567 stored_rc = cifs_lockv(xid, tcon,
1568 cfile->fid.netfid,
1569 li->type, num, 0, buf);
1570 if (stored_rc) {
1571 /*
1572 * We failed on the unlock range
1573 * request - add all locks from the tmp
1574 * list to the head of the file's list.
1575 */
1576 cifs_move_llist(&tmp_llist,
1577 &cfile->llist->locks);
1578 rc = stored_rc;
1579 } else
1580 /*
1581 * The unlock range request succeed -
1582 * free the tmp list.
1583 */
1584 cifs_free_llist(&tmp_llist);
1585 cur = buf;
1586 num = 0;
1587 } else
1588 cur++;
1589 }
1590 if (num) {
1591 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1592 types[i], num, 0, buf);
1593 if (stored_rc) {
1594 cifs_move_llist(&tmp_llist,
1595 &cfile->llist->locks);
1596 rc = stored_rc;
1597 } else
1598 cifs_free_llist(&tmp_llist);
1599 }
1600 }
1601
1602 up_write(&cinode->lock_sem);
1603 kfree(buf);
1604 return rc;
1605 }
1606
1607 static int
cifs_setlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,int lock,int unlock,unsigned int xid)1608 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
1609 bool wait_flag, bool posix_lck, int lock, int unlock,
1610 unsigned int xid)
1611 {
1612 int rc = 0;
1613 __u64 length = 1 + flock->fl_end - flock->fl_start;
1614 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1615 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1616 struct TCP_Server_Info *server = tcon->ses->server;
1617 struct inode *inode = d_inode(cfile->dentry);
1618
1619 if (posix_lck) {
1620 int posix_lock_type;
1621
1622 rc = cifs_posix_lock_set(file, flock);
1623 if (!rc || rc < 0)
1624 return rc;
1625
1626 if (type & server->vals->shared_lock_type)
1627 posix_lock_type = CIFS_RDLCK;
1628 else
1629 posix_lock_type = CIFS_WRLCK;
1630
1631 if (unlock == 1)
1632 posix_lock_type = CIFS_UNLCK;
1633
1634 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1635 hash_lockowner(flock->fl_owner),
1636 flock->fl_start, length,
1637 NULL, posix_lock_type, wait_flag);
1638 goto out;
1639 }
1640
1641 if (lock) {
1642 struct cifsLockInfo *lock;
1643
1644 lock = cifs_lock_init(flock->fl_start, length, type,
1645 flock->fl_flags);
1646 if (!lock)
1647 return -ENOMEM;
1648
1649 rc = cifs_lock_add_if(cfile, lock, wait_flag);
1650 if (rc < 0) {
1651 kfree(lock);
1652 return rc;
1653 }
1654 if (!rc)
1655 goto out;
1656
1657 /*
1658 * Windows 7 server can delay breaking lease from read to None
1659 * if we set a byte-range lock on a file - break it explicitly
1660 * before sending the lock to the server to be sure the next
1661 * read won't conflict with non-overlapted locks due to
1662 * pagereading.
1663 */
1664 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1665 CIFS_CACHE_READ(CIFS_I(inode))) {
1666 cifs_zap_mapping(inode);
1667 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1668 inode);
1669 CIFS_I(inode)->oplock = 0;
1670 }
1671
1672 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1673 type, 1, 0, wait_flag);
1674 if (rc) {
1675 kfree(lock);
1676 return rc;
1677 }
1678
1679 cifs_lock_add(cfile, lock);
1680 } else if (unlock)
1681 rc = server->ops->mand_unlock_range(cfile, flock, xid);
1682
1683 out:
1684 if (flock->fl_flags & FL_POSIX) {
1685 /*
1686 * If this is a request to remove all locks because we
1687 * are closing the file, it doesn't matter if the
1688 * unlocking failed as both cifs.ko and the SMB server
1689 * remove the lock on file close
1690 */
1691 if (rc) {
1692 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
1693 if (!(flock->fl_flags & FL_CLOSE))
1694 return rc;
1695 }
1696 rc = locks_lock_file_wait(file, flock);
1697 }
1698 return rc;
1699 }
1700
cifs_lock(struct file * file,int cmd,struct file_lock * flock)1701 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1702 {
1703 int rc, xid;
1704 int lock = 0, unlock = 0;
1705 bool wait_flag = false;
1706 bool posix_lck = false;
1707 struct cifs_sb_info *cifs_sb;
1708 struct cifs_tcon *tcon;
1709 struct cifsFileInfo *cfile;
1710 __u32 type;
1711
1712 rc = -EACCES;
1713 xid = get_xid();
1714
1715 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1716 cmd, flock->fl_flags, flock->fl_type,
1717 flock->fl_start, flock->fl_end);
1718
1719 cfile = (struct cifsFileInfo *)file->private_data;
1720 tcon = tlink_tcon(cfile->tlink);
1721
1722 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1723 tcon->ses->server);
1724 cifs_sb = CIFS_FILE_SB(file);
1725
1726 if (cap_unix(tcon->ses) &&
1727 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1728 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1729 posix_lck = true;
1730 /*
1731 * BB add code here to normalize offset and length to account for
1732 * negative length which we can not accept over the wire.
1733 */
1734 if (IS_GETLK(cmd)) {
1735 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
1736 free_xid(xid);
1737 return rc;
1738 }
1739
1740 if (!lock && !unlock) {
1741 /*
1742 * if no lock or unlock then nothing to do since we do not
1743 * know what it is
1744 */
1745 free_xid(xid);
1746 return -EOPNOTSUPP;
1747 }
1748
1749 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1750 xid);
1751 free_xid(xid);
1752 return rc;
1753 }
1754
1755 /*
1756 * update the file size (if needed) after a write. Should be called with
1757 * the inode->i_lock held
1758 */
1759 void
cifs_update_eof(struct cifsInodeInfo * cifsi,loff_t offset,unsigned int bytes_written)1760 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1761 unsigned int bytes_written)
1762 {
1763 loff_t end_of_write = offset + bytes_written;
1764
1765 if (end_of_write > cifsi->server_eof)
1766 cifsi->server_eof = end_of_write;
1767 }
1768
1769 static ssize_t
cifs_write(struct cifsFileInfo * open_file,__u32 pid,const char * write_data,size_t write_size,loff_t * offset)1770 cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1771 size_t write_size, loff_t *offset)
1772 {
1773 int rc = 0;
1774 unsigned int bytes_written = 0;
1775 unsigned int total_written;
1776 struct cifs_tcon *tcon;
1777 struct TCP_Server_Info *server;
1778 unsigned int xid;
1779 struct dentry *dentry = open_file->dentry;
1780 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
1781 struct cifs_io_parms io_parms;
1782
1783 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1784 write_size, *offset, dentry);
1785
1786 tcon = tlink_tcon(open_file->tlink);
1787 server = tcon->ses->server;
1788
1789 if (!server->ops->sync_write)
1790 return -ENOSYS;
1791
1792 xid = get_xid();
1793
1794 for (total_written = 0; write_size > total_written;
1795 total_written += bytes_written) {
1796 rc = -EAGAIN;
1797 while (rc == -EAGAIN) {
1798 struct kvec iov[2];
1799 unsigned int len;
1800
1801 if (open_file->invalidHandle) {
1802 /* we could deadlock if we called
1803 filemap_fdatawait from here so tell
1804 reopen_file not to flush data to
1805 server now */
1806 rc = cifs_reopen_file(open_file, false);
1807 if (rc != 0)
1808 break;
1809 }
1810
1811 len = min(server->ops->wp_retry_size(d_inode(dentry)),
1812 (unsigned int)write_size - total_written);
1813 /* iov[0] is reserved for smb header */
1814 iov[1].iov_base = (char *)write_data + total_written;
1815 iov[1].iov_len = len;
1816 io_parms.pid = pid;
1817 io_parms.tcon = tcon;
1818 io_parms.offset = *offset;
1819 io_parms.length = len;
1820 rc = server->ops->sync_write(xid, &open_file->fid,
1821 &io_parms, &bytes_written, iov, 1);
1822 }
1823 if (rc || (bytes_written == 0)) {
1824 if (total_written)
1825 break;
1826 else {
1827 free_xid(xid);
1828 return rc;
1829 }
1830 } else {
1831 spin_lock(&d_inode(dentry)->i_lock);
1832 cifs_update_eof(cifsi, *offset, bytes_written);
1833 spin_unlock(&d_inode(dentry)->i_lock);
1834 *offset += bytes_written;
1835 }
1836 }
1837
1838 cifs_stats_bytes_written(tcon, total_written);
1839
1840 if (total_written > 0) {
1841 spin_lock(&d_inode(dentry)->i_lock);
1842 if (*offset > d_inode(dentry)->i_size)
1843 i_size_write(d_inode(dentry), *offset);
1844 spin_unlock(&d_inode(dentry)->i_lock);
1845 }
1846 mark_inode_dirty_sync(d_inode(dentry));
1847 free_xid(xid);
1848 return total_written;
1849 }
1850
find_readable_file(struct cifsInodeInfo * cifs_inode,bool fsuid_only)1851 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1852 bool fsuid_only)
1853 {
1854 struct cifsFileInfo *open_file = NULL;
1855 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1856
1857 /* only filter by fsuid on multiuser mounts */
1858 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1859 fsuid_only = false;
1860
1861 spin_lock(&cifs_inode->open_file_lock);
1862 /* we could simply get the first_list_entry since write-only entries
1863 are always at the end of the list but since the first entry might
1864 have a close pending, we go through the whole list */
1865 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1866 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
1867 continue;
1868 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
1869 if (!open_file->invalidHandle) {
1870 /* found a good file */
1871 /* lock it so it will not be closed on us */
1872 cifsFileInfo_get(open_file);
1873 spin_unlock(&cifs_inode->open_file_lock);
1874 return open_file;
1875 } /* else might as well continue, and look for
1876 another, or simply have the caller reopen it
1877 again rather than trying to fix this handle */
1878 } else /* write only file */
1879 break; /* write only files are last so must be done */
1880 }
1881 spin_unlock(&cifs_inode->open_file_lock);
1882 return NULL;
1883 }
1884
1885 /* Return -EBADF if no handle is found and general rc otherwise */
1886 int
cifs_get_writable_file(struct cifsInodeInfo * cifs_inode,bool fsuid_only,struct cifsFileInfo ** ret_file)1887 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
1888 struct cifsFileInfo **ret_file)
1889 {
1890 struct cifsFileInfo *open_file, *inv_file = NULL;
1891 struct cifs_sb_info *cifs_sb;
1892 bool any_available = false;
1893 int rc = -EBADF;
1894 unsigned int refind = 0;
1895
1896 *ret_file = NULL;
1897
1898 /*
1899 * Having a null inode here (because mapping->host was set to zero by
1900 * the VFS or MM) should not happen but we had reports of on oops (due
1901 * to it being zero) during stress testcases so we need to check for it
1902 */
1903
1904 if (cifs_inode == NULL) {
1905 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
1906 dump_stack();
1907 return rc;
1908 }
1909
1910 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1911
1912 /* only filter by fsuid on multiuser mounts */
1913 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1914 fsuid_only = false;
1915
1916 spin_lock(&cifs_inode->open_file_lock);
1917 refind_writable:
1918 if (refind > MAX_REOPEN_ATT) {
1919 spin_unlock(&cifs_inode->open_file_lock);
1920 return rc;
1921 }
1922 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1923 if (!any_available && open_file->pid != current->tgid)
1924 continue;
1925 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
1926 continue;
1927 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1928 if (!open_file->invalidHandle) {
1929 /* found a good writable file */
1930 cifsFileInfo_get(open_file);
1931 spin_unlock(&cifs_inode->open_file_lock);
1932 *ret_file = open_file;
1933 return 0;
1934 } else {
1935 if (!inv_file)
1936 inv_file = open_file;
1937 }
1938 }
1939 }
1940 /* couldn't find useable FH with same pid, try any available */
1941 if (!any_available) {
1942 any_available = true;
1943 goto refind_writable;
1944 }
1945
1946 if (inv_file) {
1947 any_available = false;
1948 cifsFileInfo_get(inv_file);
1949 }
1950
1951 spin_unlock(&cifs_inode->open_file_lock);
1952
1953 if (inv_file) {
1954 rc = cifs_reopen_file(inv_file, false);
1955 if (!rc) {
1956 *ret_file = inv_file;
1957 return 0;
1958 }
1959
1960 spin_lock(&cifs_inode->open_file_lock);
1961 list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
1962 spin_unlock(&cifs_inode->open_file_lock);
1963 cifsFileInfo_put(inv_file);
1964 ++refind;
1965 inv_file = NULL;
1966 spin_lock(&cifs_inode->open_file_lock);
1967 goto refind_writable;
1968 }
1969
1970 return rc;
1971 }
1972
1973 struct cifsFileInfo *
find_writable_file(struct cifsInodeInfo * cifs_inode,bool fsuid_only)1974 find_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only)
1975 {
1976 struct cifsFileInfo *cfile;
1977 int rc;
1978
1979 rc = cifs_get_writable_file(cifs_inode, fsuid_only, &cfile);
1980 if (rc)
1981 cifs_dbg(FYI, "couldn't find writable handle rc=%d", rc);
1982
1983 return cfile;
1984 }
1985
1986 int
cifs_get_writable_path(struct cifs_tcon * tcon,const char * name,struct cifsFileInfo ** ret_file)1987 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
1988 struct cifsFileInfo **ret_file)
1989 {
1990 struct list_head *tmp;
1991 struct cifsFileInfo *cfile;
1992 struct cifsInodeInfo *cinode;
1993 char *full_path;
1994
1995 *ret_file = NULL;
1996
1997 spin_lock(&tcon->open_file_lock);
1998 list_for_each(tmp, &tcon->openFileList) {
1999 cfile = list_entry(tmp, struct cifsFileInfo,
2000 tlist);
2001 full_path = build_path_from_dentry(cfile->dentry);
2002 if (full_path == NULL) {
2003 spin_unlock(&tcon->open_file_lock);
2004 return -ENOMEM;
2005 }
2006 if (strcmp(full_path, name)) {
2007 kfree(full_path);
2008 continue;
2009 }
2010
2011 kfree(full_path);
2012 cinode = CIFS_I(d_inode(cfile->dentry));
2013 spin_unlock(&tcon->open_file_lock);
2014 return cifs_get_writable_file(cinode, 0, ret_file);
2015 }
2016
2017 spin_unlock(&tcon->open_file_lock);
2018 return -ENOENT;
2019 }
2020
2021 int
cifs_get_readable_path(struct cifs_tcon * tcon,const char * name,struct cifsFileInfo ** ret_file)2022 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2023 struct cifsFileInfo **ret_file)
2024 {
2025 struct list_head *tmp;
2026 struct cifsFileInfo *cfile;
2027 struct cifsInodeInfo *cinode;
2028 char *full_path;
2029
2030 *ret_file = NULL;
2031
2032 spin_lock(&tcon->open_file_lock);
2033 list_for_each(tmp, &tcon->openFileList) {
2034 cfile = list_entry(tmp, struct cifsFileInfo,
2035 tlist);
2036 full_path = build_path_from_dentry(cfile->dentry);
2037 if (full_path == NULL) {
2038 spin_unlock(&tcon->open_file_lock);
2039 return -ENOMEM;
2040 }
2041 if (strcmp(full_path, name)) {
2042 kfree(full_path);
2043 continue;
2044 }
2045
2046 kfree(full_path);
2047 cinode = CIFS_I(d_inode(cfile->dentry));
2048 spin_unlock(&tcon->open_file_lock);
2049 *ret_file = find_readable_file(cinode, 0);
2050 return *ret_file ? 0 : -ENOENT;
2051 }
2052
2053 spin_unlock(&tcon->open_file_lock);
2054 return -ENOENT;
2055 }
2056
cifs_partialpagewrite(struct page * page,unsigned from,unsigned to)2057 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
2058 {
2059 struct address_space *mapping = page->mapping;
2060 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
2061 char *write_data;
2062 int rc = -EFAULT;
2063 int bytes_written = 0;
2064 struct inode *inode;
2065 struct cifsFileInfo *open_file;
2066
2067 if (!mapping || !mapping->host)
2068 return -EFAULT;
2069
2070 inode = page->mapping->host;
2071
2072 offset += (loff_t)from;
2073 write_data = kmap(page);
2074 write_data += from;
2075
2076 if ((to > PAGE_SIZE) || (from > to)) {
2077 kunmap(page);
2078 return -EIO;
2079 }
2080
2081 /* racing with truncate? */
2082 if (offset > mapping->host->i_size) {
2083 kunmap(page);
2084 return 0; /* don't care */
2085 }
2086
2087 /* check to make sure that we are not extending the file */
2088 if (mapping->host->i_size - offset < (loff_t)to)
2089 to = (unsigned)(mapping->host->i_size - offset);
2090
2091 rc = cifs_get_writable_file(CIFS_I(mapping->host), false, &open_file);
2092 if (!rc) {
2093 bytes_written = cifs_write(open_file, open_file->pid,
2094 write_data, to - from, &offset);
2095 cifsFileInfo_put(open_file);
2096 /* Does mm or vfs already set times? */
2097 inode->i_atime = inode->i_mtime = current_time(inode);
2098 if ((bytes_written > 0) && (offset))
2099 rc = 0;
2100 else if (bytes_written < 0)
2101 rc = bytes_written;
2102 else
2103 rc = -EFAULT;
2104 } else {
2105 cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
2106 if (!is_retryable_error(rc))
2107 rc = -EIO;
2108 }
2109
2110 kunmap(page);
2111 return rc;
2112 }
2113
2114 static struct cifs_writedata *
wdata_alloc_and_fillpages(pgoff_t tofind,struct address_space * mapping,pgoff_t end,pgoff_t * index,unsigned int * found_pages)2115 wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
2116 pgoff_t end, pgoff_t *index,
2117 unsigned int *found_pages)
2118 {
2119 struct cifs_writedata *wdata;
2120
2121 wdata = cifs_writedata_alloc((unsigned int)tofind,
2122 cifs_writev_complete);
2123 if (!wdata)
2124 return NULL;
2125
2126 *found_pages = find_get_pages_range_tag(mapping, index, end,
2127 PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
2128 return wdata;
2129 }
2130
2131 static unsigned int
wdata_prepare_pages(struct cifs_writedata * wdata,unsigned int found_pages,struct address_space * mapping,struct writeback_control * wbc,pgoff_t end,pgoff_t * index,pgoff_t * next,bool * done)2132 wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2133 struct address_space *mapping,
2134 struct writeback_control *wbc,
2135 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2136 {
2137 unsigned int nr_pages = 0, i;
2138 struct page *page;
2139
2140 for (i = 0; i < found_pages; i++) {
2141 page = wdata->pages[i];
2142 /*
2143 * At this point we hold neither the i_pages lock nor the
2144 * page lock: the page may be truncated or invalidated
2145 * (changing page->mapping to NULL), or even swizzled
2146 * back from swapper_space to tmpfs file mapping
2147 */
2148
2149 if (nr_pages == 0)
2150 lock_page(page);
2151 else if (!trylock_page(page))
2152 break;
2153
2154 if (unlikely(page->mapping != mapping)) {
2155 unlock_page(page);
2156 break;
2157 }
2158
2159 if (!wbc->range_cyclic && page->index > end) {
2160 *done = true;
2161 unlock_page(page);
2162 break;
2163 }
2164
2165 if (*next && (page->index != *next)) {
2166 /* Not next consecutive page */
2167 unlock_page(page);
2168 break;
2169 }
2170
2171 if (wbc->sync_mode != WB_SYNC_NONE)
2172 wait_on_page_writeback(page);
2173
2174 if (PageWriteback(page) ||
2175 !clear_page_dirty_for_io(page)) {
2176 unlock_page(page);
2177 break;
2178 }
2179
2180 /*
2181 * This actually clears the dirty bit in the radix tree.
2182 * See cifs_writepage() for more commentary.
2183 */
2184 set_page_writeback(page);
2185 if (page_offset(page) >= i_size_read(mapping->host)) {
2186 *done = true;
2187 unlock_page(page);
2188 end_page_writeback(page);
2189 break;
2190 }
2191
2192 wdata->pages[i] = page;
2193 *next = page->index + 1;
2194 ++nr_pages;
2195 }
2196
2197 /* reset index to refind any pages skipped */
2198 if (nr_pages == 0)
2199 *index = wdata->pages[0]->index + 1;
2200
2201 /* put any pages we aren't going to use */
2202 for (i = nr_pages; i < found_pages; i++) {
2203 put_page(wdata->pages[i]);
2204 wdata->pages[i] = NULL;
2205 }
2206
2207 return nr_pages;
2208 }
2209
2210 static int
wdata_send_pages(struct cifs_writedata * wdata,unsigned int nr_pages,struct address_space * mapping,struct writeback_control * wbc)2211 wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2212 struct address_space *mapping, struct writeback_control *wbc)
2213 {
2214 int rc;
2215 struct TCP_Server_Info *server =
2216 tlink_tcon(wdata->cfile->tlink)->ses->server;
2217
2218 wdata->sync_mode = wbc->sync_mode;
2219 wdata->nr_pages = nr_pages;
2220 wdata->offset = page_offset(wdata->pages[0]);
2221 wdata->pagesz = PAGE_SIZE;
2222 wdata->tailsz = min(i_size_read(mapping->host) -
2223 page_offset(wdata->pages[nr_pages - 1]),
2224 (loff_t)PAGE_SIZE);
2225 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
2226 wdata->pid = wdata->cfile->pid;
2227
2228 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2229 if (rc)
2230 return rc;
2231
2232 if (wdata->cfile->invalidHandle)
2233 rc = -EAGAIN;
2234 else
2235 rc = server->ops->async_writev(wdata, cifs_writedata_release);
2236
2237 return rc;
2238 }
2239
cifs_writepages(struct address_space * mapping,struct writeback_control * wbc)2240 static int cifs_writepages(struct address_space *mapping,
2241 struct writeback_control *wbc)
2242 {
2243 struct inode *inode = mapping->host;
2244 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2245 struct TCP_Server_Info *server;
2246 bool done = false, scanned = false, range_whole = false;
2247 pgoff_t end, index;
2248 struct cifs_writedata *wdata;
2249 struct cifsFileInfo *cfile = NULL;
2250 int rc = 0;
2251 int saved_rc = 0;
2252 unsigned int xid;
2253
2254 /*
2255 * If wsize is smaller than the page cache size, default to writing
2256 * one page at a time via cifs_writepage
2257 */
2258 if (cifs_sb->wsize < PAGE_SIZE)
2259 return generic_writepages(mapping, wbc);
2260
2261 xid = get_xid();
2262 if (wbc->range_cyclic) {
2263 index = mapping->writeback_index; /* Start from prev offset */
2264 end = -1;
2265 } else {
2266 index = wbc->range_start >> PAGE_SHIFT;
2267 end = wbc->range_end >> PAGE_SHIFT;
2268 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2269 range_whole = true;
2270 scanned = true;
2271 }
2272 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
2273 retry:
2274 while (!done && index <= end) {
2275 unsigned int i, nr_pages, found_pages, wsize;
2276 pgoff_t next = 0, tofind, saved_index = index;
2277 struct cifs_credits credits_on_stack;
2278 struct cifs_credits *credits = &credits_on_stack;
2279 int get_file_rc = 0;
2280
2281 if (cfile)
2282 cifsFileInfo_put(cfile);
2283
2284 rc = cifs_get_writable_file(CIFS_I(inode), false, &cfile);
2285
2286 /* in case of an error store it to return later */
2287 if (rc)
2288 get_file_rc = rc;
2289
2290 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2291 &wsize, credits);
2292 if (rc != 0) {
2293 done = true;
2294 break;
2295 }
2296
2297 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
2298
2299 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2300 &found_pages);
2301 if (!wdata) {
2302 rc = -ENOMEM;
2303 done = true;
2304 add_credits_and_wake_if(server, credits, 0);
2305 break;
2306 }
2307
2308 if (found_pages == 0) {
2309 kref_put(&wdata->refcount, cifs_writedata_release);
2310 add_credits_and_wake_if(server, credits, 0);
2311 break;
2312 }
2313
2314 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2315 end, &index, &next, &done);
2316
2317 /* nothing to write? */
2318 if (nr_pages == 0) {
2319 kref_put(&wdata->refcount, cifs_writedata_release);
2320 add_credits_and_wake_if(server, credits, 0);
2321 continue;
2322 }
2323
2324 wdata->credits = credits_on_stack;
2325 wdata->cfile = cfile;
2326 cfile = NULL;
2327
2328 if (!wdata->cfile) {
2329 cifs_dbg(VFS, "No writable handle in writepages rc=%d\n",
2330 get_file_rc);
2331 if (is_retryable_error(get_file_rc))
2332 rc = get_file_rc;
2333 else
2334 rc = -EBADF;
2335 } else
2336 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
2337
2338 for (i = 0; i < nr_pages; ++i)
2339 unlock_page(wdata->pages[i]);
2340
2341 /* send failure -- clean up the mess */
2342 if (rc != 0) {
2343 add_credits_and_wake_if(server, &wdata->credits, 0);
2344 for (i = 0; i < nr_pages; ++i) {
2345 if (is_retryable_error(rc))
2346 redirty_page_for_writepage(wbc,
2347 wdata->pages[i]);
2348 else
2349 SetPageError(wdata->pages[i]);
2350 end_page_writeback(wdata->pages[i]);
2351 put_page(wdata->pages[i]);
2352 }
2353 if (!is_retryable_error(rc))
2354 mapping_set_error(mapping, rc);
2355 }
2356 kref_put(&wdata->refcount, cifs_writedata_release);
2357
2358 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2359 index = saved_index;
2360 continue;
2361 }
2362
2363 /* Return immediately if we received a signal during writing */
2364 if (is_interrupt_error(rc)) {
2365 done = true;
2366 break;
2367 }
2368
2369 if (rc != 0 && saved_rc == 0)
2370 saved_rc = rc;
2371
2372 wbc->nr_to_write -= nr_pages;
2373 if (wbc->nr_to_write <= 0)
2374 done = true;
2375
2376 index = next;
2377 }
2378
2379 if (!scanned && !done) {
2380 /*
2381 * We hit the last page and there is more work to be done: wrap
2382 * back to the start of the file
2383 */
2384 scanned = true;
2385 index = 0;
2386 goto retry;
2387 }
2388
2389 if (saved_rc != 0)
2390 rc = saved_rc;
2391
2392 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2393 mapping->writeback_index = index;
2394
2395 if (cfile)
2396 cifsFileInfo_put(cfile);
2397 free_xid(xid);
2398 return rc;
2399 }
2400
2401 static int
cifs_writepage_locked(struct page * page,struct writeback_control * wbc)2402 cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
2403 {
2404 int rc;
2405 unsigned int xid;
2406
2407 xid = get_xid();
2408 /* BB add check for wbc flags */
2409 get_page(page);
2410 if (!PageUptodate(page))
2411 cifs_dbg(FYI, "ppw - page not up to date\n");
2412
2413 /*
2414 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2415 *
2416 * A writepage() implementation always needs to do either this,
2417 * or re-dirty the page with "redirty_page_for_writepage()" in
2418 * the case of a failure.
2419 *
2420 * Just unlocking the page will cause the radix tree tag-bits
2421 * to fail to update with the state of the page correctly.
2422 */
2423 set_page_writeback(page);
2424 retry_write:
2425 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
2426 if (is_retryable_error(rc)) {
2427 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
2428 goto retry_write;
2429 redirty_page_for_writepage(wbc, page);
2430 } else if (rc != 0) {
2431 SetPageError(page);
2432 mapping_set_error(page->mapping, rc);
2433 } else {
2434 SetPageUptodate(page);
2435 }
2436 end_page_writeback(page);
2437 put_page(page);
2438 free_xid(xid);
2439 return rc;
2440 }
2441
cifs_writepage(struct page * page,struct writeback_control * wbc)2442 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2443 {
2444 int rc = cifs_writepage_locked(page, wbc);
2445 unlock_page(page);
2446 return rc;
2447 }
2448
cifs_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)2449 static int cifs_write_end(struct file *file, struct address_space *mapping,
2450 loff_t pos, unsigned len, unsigned copied,
2451 struct page *page, void *fsdata)
2452 {
2453 int rc;
2454 struct inode *inode = mapping->host;
2455 struct cifsFileInfo *cfile = file->private_data;
2456 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2457 __u32 pid;
2458
2459 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2460 pid = cfile->pid;
2461 else
2462 pid = current->tgid;
2463
2464 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
2465 page, pos, copied);
2466
2467 if (PageChecked(page)) {
2468 if (copied == len)
2469 SetPageUptodate(page);
2470 ClearPageChecked(page);
2471 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
2472 SetPageUptodate(page);
2473
2474 if (!PageUptodate(page)) {
2475 char *page_data;
2476 unsigned offset = pos & (PAGE_SIZE - 1);
2477 unsigned int xid;
2478
2479 xid = get_xid();
2480 /* this is probably better than directly calling
2481 partialpage_write since in this function the file handle is
2482 known which we might as well leverage */
2483 /* BB check if anything else missing out of ppw
2484 such as updating last write time */
2485 page_data = kmap(page);
2486 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
2487 /* if (rc < 0) should we set writebehind rc? */
2488 kunmap(page);
2489
2490 free_xid(xid);
2491 } else {
2492 rc = copied;
2493 pos += copied;
2494 set_page_dirty(page);
2495 }
2496
2497 if (rc > 0) {
2498 spin_lock(&inode->i_lock);
2499 if (pos > inode->i_size)
2500 i_size_write(inode, pos);
2501 spin_unlock(&inode->i_lock);
2502 }
2503
2504 unlock_page(page);
2505 put_page(page);
2506
2507 return rc;
2508 }
2509
cifs_strict_fsync(struct file * file,loff_t start,loff_t end,int datasync)2510 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2511 int datasync)
2512 {
2513 unsigned int xid;
2514 int rc = 0;
2515 struct cifs_tcon *tcon;
2516 struct TCP_Server_Info *server;
2517 struct cifsFileInfo *smbfile = file->private_data;
2518 struct inode *inode = file_inode(file);
2519 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2520
2521 rc = file_write_and_wait_range(file, start, end);
2522 if (rc)
2523 return rc;
2524
2525 xid = get_xid();
2526
2527 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2528 file, datasync);
2529
2530 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2531 rc = cifs_zap_mapping(inode);
2532 if (rc) {
2533 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2534 rc = 0; /* don't care about it in fsync */
2535 }
2536 }
2537
2538 tcon = tlink_tcon(smbfile->tlink);
2539 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2540 server = tcon->ses->server;
2541 if (server->ops->flush)
2542 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2543 else
2544 rc = -ENOSYS;
2545 }
2546
2547 free_xid(xid);
2548 return rc;
2549 }
2550
cifs_fsync(struct file * file,loff_t start,loff_t end,int datasync)2551 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2552 {
2553 unsigned int xid;
2554 int rc = 0;
2555 struct cifs_tcon *tcon;
2556 struct TCP_Server_Info *server;
2557 struct cifsFileInfo *smbfile = file->private_data;
2558 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2559
2560 rc = file_write_and_wait_range(file, start, end);
2561 if (rc)
2562 return rc;
2563
2564 xid = get_xid();
2565
2566 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2567 file, datasync);
2568
2569 tcon = tlink_tcon(smbfile->tlink);
2570 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2571 server = tcon->ses->server;
2572 if (server->ops->flush)
2573 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2574 else
2575 rc = -ENOSYS;
2576 }
2577
2578 free_xid(xid);
2579 return rc;
2580 }
2581
2582 /*
2583 * As file closes, flush all cached write data for this inode checking
2584 * for write behind errors.
2585 */
cifs_flush(struct file * file,fl_owner_t id)2586 int cifs_flush(struct file *file, fl_owner_t id)
2587 {
2588 struct inode *inode = file_inode(file);
2589 int rc = 0;
2590
2591 if (file->f_mode & FMODE_WRITE)
2592 rc = filemap_write_and_wait(inode->i_mapping);
2593
2594 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2595
2596 return rc;
2597 }
2598
2599 static int
cifs_write_allocate_pages(struct page ** pages,unsigned long num_pages)2600 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2601 {
2602 int rc = 0;
2603 unsigned long i;
2604
2605 for (i = 0; i < num_pages; i++) {
2606 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2607 if (!pages[i]) {
2608 /*
2609 * save number of pages we have already allocated and
2610 * return with ENOMEM error
2611 */
2612 num_pages = i;
2613 rc = -ENOMEM;
2614 break;
2615 }
2616 }
2617
2618 if (rc) {
2619 for (i = 0; i < num_pages; i++)
2620 put_page(pages[i]);
2621 }
2622 return rc;
2623 }
2624
2625 static inline
get_numpages(const size_t wsize,const size_t len,size_t * cur_len)2626 size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2627 {
2628 size_t num_pages;
2629 size_t clen;
2630
2631 clen = min_t(const size_t, len, wsize);
2632 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
2633
2634 if (cur_len)
2635 *cur_len = clen;
2636
2637 return num_pages;
2638 }
2639
2640 static void
cifs_uncached_writedata_release(struct kref * refcount)2641 cifs_uncached_writedata_release(struct kref *refcount)
2642 {
2643 int i;
2644 struct cifs_writedata *wdata = container_of(refcount,
2645 struct cifs_writedata, refcount);
2646
2647 kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
2648 for (i = 0; i < wdata->nr_pages; i++)
2649 put_page(wdata->pages[i]);
2650 cifs_writedata_release(refcount);
2651 }
2652
2653 static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
2654
2655 static void
cifs_uncached_writev_complete(struct work_struct * work)2656 cifs_uncached_writev_complete(struct work_struct *work)
2657 {
2658 struct cifs_writedata *wdata = container_of(work,
2659 struct cifs_writedata, work);
2660 struct inode *inode = d_inode(wdata->cfile->dentry);
2661 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2662
2663 spin_lock(&inode->i_lock);
2664 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2665 if (cifsi->server_eof > inode->i_size)
2666 i_size_write(inode, cifsi->server_eof);
2667 spin_unlock(&inode->i_lock);
2668
2669 complete(&wdata->done);
2670 collect_uncached_write_data(wdata->ctx);
2671 /* the below call can possibly free the last ref to aio ctx */
2672 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2673 }
2674
2675 static int
wdata_fill_from_iovec(struct cifs_writedata * wdata,struct iov_iter * from,size_t * len,unsigned long * num_pages)2676 wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2677 size_t *len, unsigned long *num_pages)
2678 {
2679 size_t save_len, copied, bytes, cur_len = *len;
2680 unsigned long i, nr_pages = *num_pages;
2681
2682 save_len = cur_len;
2683 for (i = 0; i < nr_pages; i++) {
2684 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2685 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2686 cur_len -= copied;
2687 /*
2688 * If we didn't copy as much as we expected, then that
2689 * may mean we trod into an unmapped area. Stop copying
2690 * at that point. On the next pass through the big
2691 * loop, we'll likely end up getting a zero-length
2692 * write and bailing out of it.
2693 */
2694 if (copied < bytes)
2695 break;
2696 }
2697 cur_len = save_len - cur_len;
2698 *len = cur_len;
2699
2700 /*
2701 * If we have no data to send, then that probably means that
2702 * the copy above failed altogether. That's most likely because
2703 * the address in the iovec was bogus. Return -EFAULT and let
2704 * the caller free anything we allocated and bail out.
2705 */
2706 if (!cur_len)
2707 return -EFAULT;
2708
2709 /*
2710 * i + 1 now represents the number of pages we actually used in
2711 * the copy phase above.
2712 */
2713 *num_pages = i + 1;
2714 return 0;
2715 }
2716
2717 static int
cifs_resend_wdata(struct cifs_writedata * wdata,struct list_head * wdata_list,struct cifs_aio_ctx * ctx)2718 cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
2719 struct cifs_aio_ctx *ctx)
2720 {
2721 unsigned int wsize;
2722 struct cifs_credits credits;
2723 int rc;
2724 struct TCP_Server_Info *server =
2725 tlink_tcon(wdata->cfile->tlink)->ses->server;
2726
2727 do {
2728 if (wdata->cfile->invalidHandle) {
2729 rc = cifs_reopen_file(wdata->cfile, false);
2730 if (rc == -EAGAIN)
2731 continue;
2732 else if (rc)
2733 break;
2734 }
2735
2736
2737 /*
2738 * Wait for credits to resend this wdata.
2739 * Note: we are attempting to resend the whole wdata not in
2740 * segments
2741 */
2742 do {
2743 rc = server->ops->wait_mtu_credits(server, wdata->bytes,
2744 &wsize, &credits);
2745 if (rc)
2746 goto fail;
2747
2748 if (wsize < wdata->bytes) {
2749 add_credits_and_wake_if(server, &credits, 0);
2750 msleep(1000);
2751 }
2752 } while (wsize < wdata->bytes);
2753 wdata->credits = credits;
2754
2755 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2756
2757 if (!rc) {
2758 if (wdata->cfile->invalidHandle)
2759 rc = -EAGAIN;
2760 else
2761 rc = server->ops->async_writev(wdata,
2762 cifs_uncached_writedata_release);
2763 }
2764
2765 /* If the write was successfully sent, we are done */
2766 if (!rc) {
2767 list_add_tail(&wdata->list, wdata_list);
2768 return 0;
2769 }
2770
2771 /* Roll back credits and retry if needed */
2772 add_credits_and_wake_if(server, &wdata->credits, 0);
2773 } while (rc == -EAGAIN);
2774
2775 fail:
2776 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2777 return rc;
2778 }
2779
2780 static int
cifs_write_from_iter(loff_t offset,size_t len,struct iov_iter * from,struct cifsFileInfo * open_file,struct cifs_sb_info * cifs_sb,struct list_head * wdata_list,struct cifs_aio_ctx * ctx)2781 cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2782 struct cifsFileInfo *open_file,
2783 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
2784 struct cifs_aio_ctx *ctx)
2785 {
2786 int rc = 0;
2787 size_t cur_len;
2788 unsigned long nr_pages, num_pages, i;
2789 struct cifs_writedata *wdata;
2790 struct iov_iter saved_from = *from;
2791 loff_t saved_offset = offset;
2792 pid_t pid;
2793 struct TCP_Server_Info *server;
2794 struct page **pagevec;
2795 size_t start;
2796 unsigned int xid;
2797
2798 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2799 pid = open_file->pid;
2800 else
2801 pid = current->tgid;
2802
2803 server = tlink_tcon(open_file->tlink)->ses->server;
2804 xid = get_xid();
2805
2806 do {
2807 unsigned int wsize;
2808 struct cifs_credits credits_on_stack;
2809 struct cifs_credits *credits = &credits_on_stack;
2810
2811 if (open_file->invalidHandle) {
2812 rc = cifs_reopen_file(open_file, false);
2813 if (rc == -EAGAIN)
2814 continue;
2815 else if (rc)
2816 break;
2817 }
2818
2819 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2820 &wsize, credits);
2821 if (rc)
2822 break;
2823
2824 cur_len = min_t(const size_t, len, wsize);
2825
2826 if (ctx->direct_io) {
2827 ssize_t result;
2828
2829 result = iov_iter_get_pages_alloc(
2830 from, &pagevec, cur_len, &start);
2831 if (result < 0) {
2832 cifs_dbg(VFS,
2833 "direct_writev couldn't get user pages "
2834 "(rc=%zd) iter type %d iov_offset %zd "
2835 "count %zd\n",
2836 result, from->type,
2837 from->iov_offset, from->count);
2838 dump_stack();
2839
2840 rc = result;
2841 add_credits_and_wake_if(server, credits, 0);
2842 break;
2843 }
2844 cur_len = (size_t)result;
2845 iov_iter_advance(from, cur_len);
2846
2847 nr_pages =
2848 (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
2849
2850 wdata = cifs_writedata_direct_alloc(pagevec,
2851 cifs_uncached_writev_complete);
2852 if (!wdata) {
2853 rc = -ENOMEM;
2854 add_credits_and_wake_if(server, credits, 0);
2855 break;
2856 }
2857
2858
2859 wdata->page_offset = start;
2860 wdata->tailsz =
2861 nr_pages > 1 ?
2862 cur_len - (PAGE_SIZE - start) -
2863 (nr_pages - 2) * PAGE_SIZE :
2864 cur_len;
2865 } else {
2866 nr_pages = get_numpages(wsize, len, &cur_len);
2867 wdata = cifs_writedata_alloc(nr_pages,
2868 cifs_uncached_writev_complete);
2869 if (!wdata) {
2870 rc = -ENOMEM;
2871 add_credits_and_wake_if(server, credits, 0);
2872 break;
2873 }
2874
2875 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2876 if (rc) {
2877 kvfree(wdata->pages);
2878 kfree(wdata);
2879 add_credits_and_wake_if(server, credits, 0);
2880 break;
2881 }
2882
2883 num_pages = nr_pages;
2884 rc = wdata_fill_from_iovec(
2885 wdata, from, &cur_len, &num_pages);
2886 if (rc) {
2887 for (i = 0; i < nr_pages; i++)
2888 put_page(wdata->pages[i]);
2889 kvfree(wdata->pages);
2890 kfree(wdata);
2891 add_credits_and_wake_if(server, credits, 0);
2892 break;
2893 }
2894
2895 /*
2896 * Bring nr_pages down to the number of pages we
2897 * actually used, and free any pages that we didn't use.
2898 */
2899 for ( ; nr_pages > num_pages; nr_pages--)
2900 put_page(wdata->pages[nr_pages - 1]);
2901
2902 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
2903 }
2904
2905 wdata->sync_mode = WB_SYNC_ALL;
2906 wdata->nr_pages = nr_pages;
2907 wdata->offset = (__u64)offset;
2908 wdata->cfile = cifsFileInfo_get(open_file);
2909 wdata->pid = pid;
2910 wdata->bytes = cur_len;
2911 wdata->pagesz = PAGE_SIZE;
2912 wdata->credits = credits_on_stack;
2913 wdata->ctx = ctx;
2914 kref_get(&ctx->refcount);
2915
2916 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2917
2918 if (!rc) {
2919 if (wdata->cfile->invalidHandle)
2920 rc = -EAGAIN;
2921 else
2922 rc = server->ops->async_writev(wdata,
2923 cifs_uncached_writedata_release);
2924 }
2925
2926 if (rc) {
2927 add_credits_and_wake_if(server, &wdata->credits, 0);
2928 kref_put(&wdata->refcount,
2929 cifs_uncached_writedata_release);
2930 if (rc == -EAGAIN) {
2931 *from = saved_from;
2932 iov_iter_advance(from, offset - saved_offset);
2933 continue;
2934 }
2935 break;
2936 }
2937
2938 list_add_tail(&wdata->list, wdata_list);
2939 offset += cur_len;
2940 len -= cur_len;
2941 } while (len > 0);
2942
2943 free_xid(xid);
2944 return rc;
2945 }
2946
collect_uncached_write_data(struct cifs_aio_ctx * ctx)2947 static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
2948 {
2949 struct cifs_writedata *wdata, *tmp;
2950 struct cifs_tcon *tcon;
2951 struct cifs_sb_info *cifs_sb;
2952 struct dentry *dentry = ctx->cfile->dentry;
2953 int rc;
2954
2955 tcon = tlink_tcon(ctx->cfile->tlink);
2956 cifs_sb = CIFS_SB(dentry->d_sb);
2957
2958 mutex_lock(&ctx->aio_mutex);
2959
2960 if (list_empty(&ctx->list)) {
2961 mutex_unlock(&ctx->aio_mutex);
2962 return;
2963 }
2964
2965 rc = ctx->rc;
2966 /*
2967 * Wait for and collect replies for any successful sends in order of
2968 * increasing offset. Once an error is hit, then return without waiting
2969 * for any more replies.
2970 */
2971 restart_loop:
2972 list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
2973 if (!rc) {
2974 if (!try_wait_for_completion(&wdata->done)) {
2975 mutex_unlock(&ctx->aio_mutex);
2976 return;
2977 }
2978
2979 if (wdata->result)
2980 rc = wdata->result;
2981 else
2982 ctx->total_len += wdata->bytes;
2983
2984 /* resend call if it's a retryable error */
2985 if (rc == -EAGAIN) {
2986 struct list_head tmp_list;
2987 struct iov_iter tmp_from = ctx->iter;
2988
2989 INIT_LIST_HEAD(&tmp_list);
2990 list_del_init(&wdata->list);
2991
2992 if (ctx->direct_io)
2993 rc = cifs_resend_wdata(
2994 wdata, &tmp_list, ctx);
2995 else {
2996 iov_iter_advance(&tmp_from,
2997 wdata->offset - ctx->pos);
2998
2999 rc = cifs_write_from_iter(wdata->offset,
3000 wdata->bytes, &tmp_from,
3001 ctx->cfile, cifs_sb, &tmp_list,
3002 ctx);
3003
3004 kref_put(&wdata->refcount,
3005 cifs_uncached_writedata_release);
3006 }
3007
3008 list_splice(&tmp_list, &ctx->list);
3009 goto restart_loop;
3010 }
3011 }
3012 list_del_init(&wdata->list);
3013 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
3014 }
3015
3016 cifs_stats_bytes_written(tcon, ctx->total_len);
3017 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
3018
3019 ctx->rc = (rc == 0) ? ctx->total_len : rc;
3020
3021 mutex_unlock(&ctx->aio_mutex);
3022
3023 if (ctx->iocb && ctx->iocb->ki_complete)
3024 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3025 else
3026 complete(&ctx->done);
3027 }
3028
__cifs_writev(struct kiocb * iocb,struct iov_iter * from,bool direct)3029 static ssize_t __cifs_writev(
3030 struct kiocb *iocb, struct iov_iter *from, bool direct)
3031 {
3032 struct file *file = iocb->ki_filp;
3033 ssize_t total_written = 0;
3034 struct cifsFileInfo *cfile;
3035 struct cifs_tcon *tcon;
3036 struct cifs_sb_info *cifs_sb;
3037 struct cifs_aio_ctx *ctx;
3038 struct iov_iter saved_from = *from;
3039 size_t len = iov_iter_count(from);
3040 int rc;
3041
3042 /*
3043 * iov_iter_get_pages_alloc doesn't work with ITER_KVEC.
3044 * In this case, fall back to non-direct write function.
3045 * this could be improved by getting pages directly in ITER_KVEC
3046 */
3047 if (direct && from->type & ITER_KVEC) {
3048 cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
3049 direct = false;
3050 }
3051
3052 rc = generic_write_checks(iocb, from);
3053 if (rc <= 0)
3054 return rc;
3055
3056 cifs_sb = CIFS_FILE_SB(file);
3057 cfile = file->private_data;
3058 tcon = tlink_tcon(cfile->tlink);
3059
3060 if (!tcon->ses->server->ops->async_writev)
3061 return -ENOSYS;
3062
3063 ctx = cifs_aio_ctx_alloc();
3064 if (!ctx)
3065 return -ENOMEM;
3066
3067 ctx->cfile = cifsFileInfo_get(cfile);
3068
3069 if (!is_sync_kiocb(iocb))
3070 ctx->iocb = iocb;
3071
3072 ctx->pos = iocb->ki_pos;
3073
3074 if (direct) {
3075 ctx->direct_io = true;
3076 ctx->iter = *from;
3077 ctx->len = len;
3078 } else {
3079 rc = setup_aio_ctx_iter(ctx, from, WRITE);
3080 if (rc) {
3081 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3082 return rc;
3083 }
3084 }
3085
3086 /* grab a lock here due to read response handlers can access ctx */
3087 mutex_lock(&ctx->aio_mutex);
3088
3089 rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
3090 cfile, cifs_sb, &ctx->list, ctx);
3091
3092 /*
3093 * If at least one write was successfully sent, then discard any rc
3094 * value from the later writes. If the other write succeeds, then
3095 * we'll end up returning whatever was written. If it fails, then
3096 * we'll get a new rc value from that.
3097 */
3098 if (!list_empty(&ctx->list))
3099 rc = 0;
3100
3101 mutex_unlock(&ctx->aio_mutex);
3102
3103 if (rc) {
3104 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3105 return rc;
3106 }
3107
3108 if (!is_sync_kiocb(iocb)) {
3109 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3110 return -EIOCBQUEUED;
3111 }
3112
3113 rc = wait_for_completion_killable(&ctx->done);
3114 if (rc) {
3115 mutex_lock(&ctx->aio_mutex);
3116 ctx->rc = rc = -EINTR;
3117 total_written = ctx->total_len;
3118 mutex_unlock(&ctx->aio_mutex);
3119 } else {
3120 rc = ctx->rc;
3121 total_written = ctx->total_len;
3122 }
3123
3124 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3125
3126 if (unlikely(!total_written))
3127 return rc;
3128
3129 iocb->ki_pos += total_written;
3130 return total_written;
3131 }
3132
cifs_direct_writev(struct kiocb * iocb,struct iov_iter * from)3133 ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
3134 {
3135 return __cifs_writev(iocb, from, true);
3136 }
3137
cifs_user_writev(struct kiocb * iocb,struct iov_iter * from)3138 ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
3139 {
3140 return __cifs_writev(iocb, from, false);
3141 }
3142
3143 static ssize_t
cifs_writev(struct kiocb * iocb,struct iov_iter * from)3144 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
3145 {
3146 struct file *file = iocb->ki_filp;
3147 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
3148 struct inode *inode = file->f_mapping->host;
3149 struct cifsInodeInfo *cinode = CIFS_I(inode);
3150 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
3151 ssize_t rc;
3152
3153 inode_lock(inode);
3154 /*
3155 * We need to hold the sem to be sure nobody modifies lock list
3156 * with a brlock that prevents writing.
3157 */
3158 down_read(&cinode->lock_sem);
3159
3160 rc = generic_write_checks(iocb, from);
3161 if (rc <= 0)
3162 goto out;
3163
3164 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
3165 server->vals->exclusive_lock_type, 0,
3166 NULL, CIFS_WRITE_OP))
3167 rc = __generic_file_write_iter(iocb, from);
3168 else
3169 rc = -EACCES;
3170 out:
3171 up_read(&cinode->lock_sem);
3172 inode_unlock(inode);
3173
3174 if (rc > 0)
3175 rc = generic_write_sync(iocb, rc);
3176 return rc;
3177 }
3178
3179 ssize_t
cifs_strict_writev(struct kiocb * iocb,struct iov_iter * from)3180 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
3181 {
3182 struct inode *inode = file_inode(iocb->ki_filp);
3183 struct cifsInodeInfo *cinode = CIFS_I(inode);
3184 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3185 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3186 iocb->ki_filp->private_data;
3187 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3188 ssize_t written;
3189
3190 written = cifs_get_writer(cinode);
3191 if (written)
3192 return written;
3193
3194 if (CIFS_CACHE_WRITE(cinode)) {
3195 if (cap_unix(tcon->ses) &&
3196 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
3197 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
3198 written = generic_file_write_iter(iocb, from);
3199 goto out;
3200 }
3201 written = cifs_writev(iocb, from);
3202 goto out;
3203 }
3204 /*
3205 * For non-oplocked files in strict cache mode we need to write the data
3206 * to the server exactly from the pos to pos+len-1 rather than flush all
3207 * affected pages because it may cause a error with mandatory locks on
3208 * these pages but not on the region from pos to ppos+len-1.
3209 */
3210 written = cifs_user_writev(iocb, from);
3211 if (CIFS_CACHE_READ(cinode)) {
3212 /*
3213 * We have read level caching and we have just sent a write
3214 * request to the server thus making data in the cache stale.
3215 * Zap the cache and set oplock/lease level to NONE to avoid
3216 * reading stale data from the cache. All subsequent read
3217 * operations will read new data from the server.
3218 */
3219 cifs_zap_mapping(inode);
3220 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
3221 inode);
3222 cinode->oplock = 0;
3223 }
3224 out:
3225 cifs_put_writer(cinode);
3226 return written;
3227 }
3228
3229 static struct cifs_readdata *
cifs_readdata_direct_alloc(struct page ** pages,work_func_t complete)3230 cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
3231 {
3232 struct cifs_readdata *rdata;
3233
3234 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
3235 if (rdata != NULL) {
3236 rdata->pages = pages;
3237 kref_init(&rdata->refcount);
3238 INIT_LIST_HEAD(&rdata->list);
3239 init_completion(&rdata->done);
3240 INIT_WORK(&rdata->work, complete);
3241 }
3242
3243 return rdata;
3244 }
3245
3246 static struct cifs_readdata *
cifs_readdata_alloc(unsigned int nr_pages,work_func_t complete)3247 cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
3248 {
3249 struct page **pages =
3250 kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
3251 struct cifs_readdata *ret = NULL;
3252
3253 if (pages) {
3254 ret = cifs_readdata_direct_alloc(pages, complete);
3255 if (!ret)
3256 kfree(pages);
3257 }
3258
3259 return ret;
3260 }
3261
3262 void
cifs_readdata_release(struct kref * refcount)3263 cifs_readdata_release(struct kref *refcount)
3264 {
3265 struct cifs_readdata *rdata = container_of(refcount,
3266 struct cifs_readdata, refcount);
3267 #ifdef CONFIG_CIFS_SMB_DIRECT
3268 if (rdata->mr) {
3269 smbd_deregister_mr(rdata->mr);
3270 rdata->mr = NULL;
3271 }
3272 #endif
3273 if (rdata->cfile)
3274 cifsFileInfo_put(rdata->cfile);
3275
3276 kvfree(rdata->pages);
3277 kfree(rdata);
3278 }
3279
3280 static int
cifs_read_allocate_pages(struct cifs_readdata * rdata,unsigned int nr_pages)3281 cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
3282 {
3283 int rc = 0;
3284 struct page *page;
3285 unsigned int i;
3286
3287 for (i = 0; i < nr_pages; i++) {
3288 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3289 if (!page) {
3290 rc = -ENOMEM;
3291 break;
3292 }
3293 rdata->pages[i] = page;
3294 }
3295
3296 if (rc) {
3297 unsigned int nr_page_failed = i;
3298
3299 for (i = 0; i < nr_page_failed; i++) {
3300 put_page(rdata->pages[i]);
3301 rdata->pages[i] = NULL;
3302 }
3303 }
3304 return rc;
3305 }
3306
3307 static void
cifs_uncached_readdata_release(struct kref * refcount)3308 cifs_uncached_readdata_release(struct kref *refcount)
3309 {
3310 struct cifs_readdata *rdata = container_of(refcount,
3311 struct cifs_readdata, refcount);
3312 unsigned int i;
3313
3314 kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
3315 for (i = 0; i < rdata->nr_pages; i++) {
3316 put_page(rdata->pages[i]);
3317 }
3318 cifs_readdata_release(refcount);
3319 }
3320
3321 /**
3322 * cifs_readdata_to_iov - copy data from pages in response to an iovec
3323 * @rdata: the readdata response with list of pages holding data
3324 * @iter: destination for our data
3325 *
3326 * This function copies data from a list of pages in a readdata response into
3327 * an array of iovecs. It will first calculate where the data should go
3328 * based on the info in the readdata and then copy the data into that spot.
3329 */
3330 static int
cifs_readdata_to_iov(struct cifs_readdata * rdata,struct iov_iter * iter)3331 cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
3332 {
3333 size_t remaining = rdata->got_bytes;
3334 unsigned int i;
3335
3336 for (i = 0; i < rdata->nr_pages; i++) {
3337 struct page *page = rdata->pages[i];
3338 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
3339 size_t written;
3340
3341 if (unlikely(iov_iter_is_pipe(iter))) {
3342 void *addr = kmap_atomic(page);
3343
3344 written = copy_to_iter(addr, copy, iter);
3345 kunmap_atomic(addr);
3346 } else
3347 written = copy_page_to_iter(page, 0, copy, iter);
3348 remaining -= written;
3349 if (written < copy && iov_iter_count(iter) > 0)
3350 break;
3351 }
3352 return remaining ? -EFAULT : 0;
3353 }
3354
3355 static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
3356
3357 static void
cifs_uncached_readv_complete(struct work_struct * work)3358 cifs_uncached_readv_complete(struct work_struct *work)
3359 {
3360 struct cifs_readdata *rdata = container_of(work,
3361 struct cifs_readdata, work);
3362
3363 complete(&rdata->done);
3364 collect_uncached_read_data(rdata->ctx);
3365 /* the below call can possibly free the last ref to aio ctx */
3366 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3367 }
3368
3369 static int
uncached_fill_pages(struct TCP_Server_Info * server,struct cifs_readdata * rdata,struct iov_iter * iter,unsigned int len)3370 uncached_fill_pages(struct TCP_Server_Info *server,
3371 struct cifs_readdata *rdata, struct iov_iter *iter,
3372 unsigned int len)
3373 {
3374 int result = 0;
3375 unsigned int i;
3376 unsigned int nr_pages = rdata->nr_pages;
3377 unsigned int page_offset = rdata->page_offset;
3378
3379 rdata->got_bytes = 0;
3380 rdata->tailsz = PAGE_SIZE;
3381 for (i = 0; i < nr_pages; i++) {
3382 struct page *page = rdata->pages[i];
3383 size_t n;
3384 unsigned int segment_size = rdata->pagesz;
3385
3386 if (i == 0)
3387 segment_size -= page_offset;
3388 else
3389 page_offset = 0;
3390
3391
3392 if (len <= 0) {
3393 /* no need to hold page hostage */
3394 rdata->pages[i] = NULL;
3395 rdata->nr_pages--;
3396 put_page(page);
3397 continue;
3398 }
3399
3400 n = len;
3401 if (len >= segment_size)
3402 /* enough data to fill the page */
3403 n = segment_size;
3404 else
3405 rdata->tailsz = len;
3406 len -= n;
3407
3408 if (iter)
3409 result = copy_page_from_iter(
3410 page, page_offset, n, iter);
3411 #ifdef CONFIG_CIFS_SMB_DIRECT
3412 else if (rdata->mr)
3413 result = n;
3414 #endif
3415 else
3416 result = cifs_read_page_from_socket(
3417 server, page, page_offset, n);
3418 if (result < 0)
3419 break;
3420
3421 rdata->got_bytes += result;
3422 }
3423
3424 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3425 rdata->got_bytes : result;
3426 }
3427
3428 static int
cifs_uncached_read_into_pages(struct TCP_Server_Info * server,struct cifs_readdata * rdata,unsigned int len)3429 cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
3430 struct cifs_readdata *rdata, unsigned int len)
3431 {
3432 return uncached_fill_pages(server, rdata, NULL, len);
3433 }
3434
3435 static int
cifs_uncached_copy_into_pages(struct TCP_Server_Info * server,struct cifs_readdata * rdata,struct iov_iter * iter)3436 cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
3437 struct cifs_readdata *rdata,
3438 struct iov_iter *iter)
3439 {
3440 return uncached_fill_pages(server, rdata, iter, iter->count);
3441 }
3442
cifs_resend_rdata(struct cifs_readdata * rdata,struct list_head * rdata_list,struct cifs_aio_ctx * ctx)3443 static int cifs_resend_rdata(struct cifs_readdata *rdata,
3444 struct list_head *rdata_list,
3445 struct cifs_aio_ctx *ctx)
3446 {
3447 unsigned int rsize;
3448 struct cifs_credits credits;
3449 int rc;
3450 struct TCP_Server_Info *server =
3451 tlink_tcon(rdata->cfile->tlink)->ses->server;
3452
3453 do {
3454 if (rdata->cfile->invalidHandle) {
3455 rc = cifs_reopen_file(rdata->cfile, true);
3456 if (rc == -EAGAIN)
3457 continue;
3458 else if (rc)
3459 break;
3460 }
3461
3462 /*
3463 * Wait for credits to resend this rdata.
3464 * Note: we are attempting to resend the whole rdata not in
3465 * segments
3466 */
3467 do {
3468 rc = server->ops->wait_mtu_credits(server, rdata->bytes,
3469 &rsize, &credits);
3470
3471 if (rc)
3472 goto fail;
3473
3474 if (rsize < rdata->bytes) {
3475 add_credits_and_wake_if(server, &credits, 0);
3476 msleep(1000);
3477 }
3478 } while (rsize < rdata->bytes);
3479 rdata->credits = credits;
3480
3481 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3482 if (!rc) {
3483 if (rdata->cfile->invalidHandle)
3484 rc = -EAGAIN;
3485 else
3486 rc = server->ops->async_readv(rdata);
3487 }
3488
3489 /* If the read was successfully sent, we are done */
3490 if (!rc) {
3491 /* Add to aio pending list */
3492 list_add_tail(&rdata->list, rdata_list);
3493 return 0;
3494 }
3495
3496 /* Roll back credits and retry if needed */
3497 add_credits_and_wake_if(server, &rdata->credits, 0);
3498 } while (rc == -EAGAIN);
3499
3500 fail:
3501 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3502 return rc;
3503 }
3504
3505 static int
cifs_send_async_read(loff_t offset,size_t len,struct cifsFileInfo * open_file,struct cifs_sb_info * cifs_sb,struct list_head * rdata_list,struct cifs_aio_ctx * ctx)3506 cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3507 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
3508 struct cifs_aio_ctx *ctx)
3509 {
3510 struct cifs_readdata *rdata;
3511 unsigned int npages, rsize;
3512 struct cifs_credits credits_on_stack;
3513 struct cifs_credits *credits = &credits_on_stack;
3514 size_t cur_len;
3515 int rc;
3516 pid_t pid;
3517 struct TCP_Server_Info *server;
3518 struct page **pagevec;
3519 size_t start;
3520 struct iov_iter direct_iov = ctx->iter;
3521
3522 server = tlink_tcon(open_file->tlink)->ses->server;
3523
3524 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3525 pid = open_file->pid;
3526 else
3527 pid = current->tgid;
3528
3529 if (ctx->direct_io)
3530 iov_iter_advance(&direct_iov, offset - ctx->pos);
3531
3532 do {
3533 if (open_file->invalidHandle) {
3534 rc = cifs_reopen_file(open_file, true);
3535 if (rc == -EAGAIN)
3536 continue;
3537 else if (rc)
3538 break;
3539 }
3540
3541 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3542 &rsize, credits);
3543 if (rc)
3544 break;
3545
3546 cur_len = min_t(const size_t, len, rsize);
3547
3548 if (ctx->direct_io) {
3549 ssize_t result;
3550
3551 result = iov_iter_get_pages_alloc(
3552 &direct_iov, &pagevec,
3553 cur_len, &start);
3554 if (result < 0) {
3555 cifs_dbg(VFS,
3556 "couldn't get user pages (rc=%zd)"
3557 " iter type %d"
3558 " iov_offset %zd count %zd\n",
3559 result, direct_iov.type,
3560 direct_iov.iov_offset,
3561 direct_iov.count);
3562 dump_stack();
3563
3564 rc = result;
3565 add_credits_and_wake_if(server, credits, 0);
3566 break;
3567 }
3568 cur_len = (size_t)result;
3569 iov_iter_advance(&direct_iov, cur_len);
3570
3571 rdata = cifs_readdata_direct_alloc(
3572 pagevec, cifs_uncached_readv_complete);
3573 if (!rdata) {
3574 add_credits_and_wake_if(server, credits, 0);
3575 rc = -ENOMEM;
3576 break;
3577 }
3578
3579 npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE;
3580 rdata->page_offset = start;
3581 rdata->tailsz = npages > 1 ?
3582 cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE :
3583 cur_len;
3584
3585 } else {
3586
3587 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
3588 /* allocate a readdata struct */
3589 rdata = cifs_readdata_alloc(npages,
3590 cifs_uncached_readv_complete);
3591 if (!rdata) {
3592 add_credits_and_wake_if(server, credits, 0);
3593 rc = -ENOMEM;
3594 break;
3595 }
3596
3597 rc = cifs_read_allocate_pages(rdata, npages);
3598 if (rc) {
3599 kvfree(rdata->pages);
3600 kfree(rdata);
3601 add_credits_and_wake_if(server, credits, 0);
3602 break;
3603 }
3604
3605 rdata->tailsz = PAGE_SIZE;
3606 }
3607
3608 rdata->cfile = cifsFileInfo_get(open_file);
3609 rdata->nr_pages = npages;
3610 rdata->offset = offset;
3611 rdata->bytes = cur_len;
3612 rdata->pid = pid;
3613 rdata->pagesz = PAGE_SIZE;
3614 rdata->read_into_pages = cifs_uncached_read_into_pages;
3615 rdata->copy_into_pages = cifs_uncached_copy_into_pages;
3616 rdata->credits = credits_on_stack;
3617 rdata->ctx = ctx;
3618 kref_get(&ctx->refcount);
3619
3620 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3621
3622 if (!rc) {
3623 if (rdata->cfile->invalidHandle)
3624 rc = -EAGAIN;
3625 else
3626 rc = server->ops->async_readv(rdata);
3627 }
3628
3629 if (rc) {
3630 add_credits_and_wake_if(server, &rdata->credits, 0);
3631 kref_put(&rdata->refcount,
3632 cifs_uncached_readdata_release);
3633 if (rc == -EAGAIN) {
3634 iov_iter_revert(&direct_iov, cur_len);
3635 continue;
3636 }
3637 break;
3638 }
3639
3640 list_add_tail(&rdata->list, rdata_list);
3641 offset += cur_len;
3642 len -= cur_len;
3643 } while (len > 0);
3644
3645 return rc;
3646 }
3647
3648 static void
collect_uncached_read_data(struct cifs_aio_ctx * ctx)3649 collect_uncached_read_data(struct cifs_aio_ctx *ctx)
3650 {
3651 struct cifs_readdata *rdata, *tmp;
3652 struct iov_iter *to = &ctx->iter;
3653 struct cifs_sb_info *cifs_sb;
3654 int rc;
3655
3656 cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
3657
3658 mutex_lock(&ctx->aio_mutex);
3659
3660 if (list_empty(&ctx->list)) {
3661 mutex_unlock(&ctx->aio_mutex);
3662 return;
3663 }
3664
3665 rc = ctx->rc;
3666 /* the loop below should proceed in the order of increasing offsets */
3667 again:
3668 list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
3669 if (!rc) {
3670 if (!try_wait_for_completion(&rdata->done)) {
3671 mutex_unlock(&ctx->aio_mutex);
3672 return;
3673 }
3674
3675 if (rdata->result == -EAGAIN) {
3676 /* resend call if it's a retryable error */
3677 struct list_head tmp_list;
3678 unsigned int got_bytes = rdata->got_bytes;
3679
3680 list_del_init(&rdata->list);
3681 INIT_LIST_HEAD(&tmp_list);
3682
3683 /*
3684 * Got a part of data and then reconnect has
3685 * happened -- fill the buffer and continue
3686 * reading.
3687 */
3688 if (got_bytes && got_bytes < rdata->bytes) {
3689 rc = 0;
3690 if (!ctx->direct_io)
3691 rc = cifs_readdata_to_iov(rdata, to);
3692 if (rc) {
3693 kref_put(&rdata->refcount,
3694 cifs_uncached_readdata_release);
3695 continue;
3696 }
3697 }
3698
3699 if (ctx->direct_io) {
3700 /*
3701 * Re-use rdata as this is a
3702 * direct I/O
3703 */
3704 rc = cifs_resend_rdata(
3705 rdata,
3706 &tmp_list, ctx);
3707 } else {
3708 rc = cifs_send_async_read(
3709 rdata->offset + got_bytes,
3710 rdata->bytes - got_bytes,
3711 rdata->cfile, cifs_sb,
3712 &tmp_list, ctx);
3713
3714 kref_put(&rdata->refcount,
3715 cifs_uncached_readdata_release);
3716 }
3717
3718 list_splice(&tmp_list, &ctx->list);
3719
3720 goto again;
3721 } else if (rdata->result)
3722 rc = rdata->result;
3723 else if (!ctx->direct_io)
3724 rc = cifs_readdata_to_iov(rdata, to);
3725
3726 /* if there was a short read -- discard anything left */
3727 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3728 rc = -ENODATA;
3729
3730 ctx->total_len += rdata->got_bytes;
3731 }
3732 list_del_init(&rdata->list);
3733 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3734 }
3735
3736 if (!ctx->direct_io)
3737 ctx->total_len = ctx->len - iov_iter_count(to);
3738
3739 /* mask nodata case */
3740 if (rc == -ENODATA)
3741 rc = 0;
3742
3743 ctx->rc = (rc == 0) ? ctx->total_len : rc;
3744
3745 mutex_unlock(&ctx->aio_mutex);
3746
3747 if (ctx->iocb && ctx->iocb->ki_complete)
3748 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3749 else
3750 complete(&ctx->done);
3751 }
3752
__cifs_readv(struct kiocb * iocb,struct iov_iter * to,bool direct)3753 static ssize_t __cifs_readv(
3754 struct kiocb *iocb, struct iov_iter *to, bool direct)
3755 {
3756 size_t len;
3757 struct file *file = iocb->ki_filp;
3758 struct cifs_sb_info *cifs_sb;
3759 struct cifsFileInfo *cfile;
3760 struct cifs_tcon *tcon;
3761 ssize_t rc, total_read = 0;
3762 loff_t offset = iocb->ki_pos;
3763 struct cifs_aio_ctx *ctx;
3764
3765 /*
3766 * iov_iter_get_pages_alloc() doesn't work with ITER_KVEC,
3767 * fall back to data copy read path
3768 * this could be improved by getting pages directly in ITER_KVEC
3769 */
3770 if (direct && to->type & ITER_KVEC) {
3771 cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
3772 direct = false;
3773 }
3774
3775 len = iov_iter_count(to);
3776 if (!len)
3777 return 0;
3778
3779 cifs_sb = CIFS_FILE_SB(file);
3780 cfile = file->private_data;
3781 tcon = tlink_tcon(cfile->tlink);
3782
3783 if (!tcon->ses->server->ops->async_readv)
3784 return -ENOSYS;
3785
3786 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3787 cifs_dbg(FYI, "attempting read on write only file instance\n");
3788
3789 ctx = cifs_aio_ctx_alloc();
3790 if (!ctx)
3791 return -ENOMEM;
3792
3793 ctx->cfile = cifsFileInfo_get(cfile);
3794
3795 if (!is_sync_kiocb(iocb))
3796 ctx->iocb = iocb;
3797
3798 if (iter_is_iovec(to))
3799 ctx->should_dirty = true;
3800
3801 if (direct) {
3802 ctx->pos = offset;
3803 ctx->direct_io = true;
3804 ctx->iter = *to;
3805 ctx->len = len;
3806 } else {
3807 rc = setup_aio_ctx_iter(ctx, to, READ);
3808 if (rc) {
3809 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3810 return rc;
3811 }
3812 len = ctx->len;
3813 }
3814
3815 /* grab a lock here due to read response handlers can access ctx */
3816 mutex_lock(&ctx->aio_mutex);
3817
3818 rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
3819
3820 /* if at least one read request send succeeded, then reset rc */
3821 if (!list_empty(&ctx->list))
3822 rc = 0;
3823
3824 mutex_unlock(&ctx->aio_mutex);
3825
3826 if (rc) {
3827 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3828 return rc;
3829 }
3830
3831 if (!is_sync_kiocb(iocb)) {
3832 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3833 return -EIOCBQUEUED;
3834 }
3835
3836 rc = wait_for_completion_killable(&ctx->done);
3837 if (rc) {
3838 mutex_lock(&ctx->aio_mutex);
3839 ctx->rc = rc = -EINTR;
3840 total_read = ctx->total_len;
3841 mutex_unlock(&ctx->aio_mutex);
3842 } else {
3843 rc = ctx->rc;
3844 total_read = ctx->total_len;
3845 }
3846
3847 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3848
3849 if (total_read) {
3850 iocb->ki_pos += total_read;
3851 return total_read;
3852 }
3853 return rc;
3854 }
3855
cifs_direct_readv(struct kiocb * iocb,struct iov_iter * to)3856 ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
3857 {
3858 return __cifs_readv(iocb, to, true);
3859 }
3860
cifs_user_readv(struct kiocb * iocb,struct iov_iter * to)3861 ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3862 {
3863 return __cifs_readv(iocb, to, false);
3864 }
3865
3866 ssize_t
cifs_strict_readv(struct kiocb * iocb,struct iov_iter * to)3867 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
3868 {
3869 struct inode *inode = file_inode(iocb->ki_filp);
3870 struct cifsInodeInfo *cinode = CIFS_I(inode);
3871 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3872 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3873 iocb->ki_filp->private_data;
3874 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3875 int rc = -EACCES;
3876
3877 /*
3878 * In strict cache mode we need to read from the server all the time
3879 * if we don't have level II oplock because the server can delay mtime
3880 * change - so we can't make a decision about inode invalidating.
3881 * And we can also fail with pagereading if there are mandatory locks
3882 * on pages affected by this read but not on the region from pos to
3883 * pos+len-1.
3884 */
3885 if (!CIFS_CACHE_READ(cinode))
3886 return cifs_user_readv(iocb, to);
3887
3888 if (cap_unix(tcon->ses) &&
3889 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3890 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
3891 return generic_file_read_iter(iocb, to);
3892
3893 /*
3894 * We need to hold the sem to be sure nobody modifies lock list
3895 * with a brlock that prevents reading.
3896 */
3897 down_read(&cinode->lock_sem);
3898 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
3899 tcon->ses->server->vals->shared_lock_type,
3900 0, NULL, CIFS_READ_OP))
3901 rc = generic_file_read_iter(iocb, to);
3902 up_read(&cinode->lock_sem);
3903 return rc;
3904 }
3905
3906 static ssize_t
cifs_read(struct file * file,char * read_data,size_t read_size,loff_t * offset)3907 cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
3908 {
3909 int rc = -EACCES;
3910 unsigned int bytes_read = 0;
3911 unsigned int total_read;
3912 unsigned int current_read_size;
3913 unsigned int rsize;
3914 struct cifs_sb_info *cifs_sb;
3915 struct cifs_tcon *tcon;
3916 struct TCP_Server_Info *server;
3917 unsigned int xid;
3918 char *cur_offset;
3919 struct cifsFileInfo *open_file;
3920 struct cifs_io_parms io_parms;
3921 int buf_type = CIFS_NO_BUFFER;
3922 __u32 pid;
3923
3924 xid = get_xid();
3925 cifs_sb = CIFS_FILE_SB(file);
3926
3927 /* FIXME: set up handlers for larger reads and/or convert to async */
3928 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3929
3930 if (file->private_data == NULL) {
3931 rc = -EBADF;
3932 free_xid(xid);
3933 return rc;
3934 }
3935 open_file = file->private_data;
3936 tcon = tlink_tcon(open_file->tlink);
3937 server = tcon->ses->server;
3938
3939 if (!server->ops->sync_read) {
3940 free_xid(xid);
3941 return -ENOSYS;
3942 }
3943
3944 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3945 pid = open_file->pid;
3946 else
3947 pid = current->tgid;
3948
3949 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3950 cifs_dbg(FYI, "attempting read on write only file instance\n");
3951
3952 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3953 total_read += bytes_read, cur_offset += bytes_read) {
3954 do {
3955 current_read_size = min_t(uint, read_size - total_read,
3956 rsize);
3957 /*
3958 * For windows me and 9x we do not want to request more
3959 * than it negotiated since it will refuse the read
3960 * then.
3961 */
3962 if ((tcon->ses) && !(tcon->ses->capabilities &
3963 tcon->ses->server->vals->cap_large_files)) {
3964 current_read_size = min_t(uint,
3965 current_read_size, CIFSMaxBufSize);
3966 }
3967 if (open_file->invalidHandle) {
3968 rc = cifs_reopen_file(open_file, true);
3969 if (rc != 0)
3970 break;
3971 }
3972 io_parms.pid = pid;
3973 io_parms.tcon = tcon;
3974 io_parms.offset = *offset;
3975 io_parms.length = current_read_size;
3976 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
3977 &bytes_read, &cur_offset,
3978 &buf_type);
3979 } while (rc == -EAGAIN);
3980
3981 if (rc || (bytes_read == 0)) {
3982 if (total_read) {
3983 break;
3984 } else {
3985 free_xid(xid);
3986 return rc;
3987 }
3988 } else {
3989 cifs_stats_bytes_read(tcon, total_read);
3990 *offset += bytes_read;
3991 }
3992 }
3993 free_xid(xid);
3994 return total_read;
3995 }
3996
3997 /*
3998 * If the page is mmap'ed into a process' page tables, then we need to make
3999 * sure that it doesn't change while being written back.
4000 */
4001 static vm_fault_t
cifs_page_mkwrite(struct vm_fault * vmf)4002 cifs_page_mkwrite(struct vm_fault *vmf)
4003 {
4004 struct page *page = vmf->page;
4005
4006 lock_page(page);
4007 return VM_FAULT_LOCKED;
4008 }
4009
4010 static const struct vm_operations_struct cifs_file_vm_ops = {
4011 .fault = filemap_fault,
4012 .map_pages = filemap_map_pages,
4013 .page_mkwrite = cifs_page_mkwrite,
4014 };
4015
cifs_file_strict_mmap(struct file * file,struct vm_area_struct * vma)4016 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
4017 {
4018 int xid, rc = 0;
4019 struct inode *inode = file_inode(file);
4020
4021 xid = get_xid();
4022
4023 if (!CIFS_CACHE_READ(CIFS_I(inode)))
4024 rc = cifs_zap_mapping(inode);
4025 if (!rc)
4026 rc = generic_file_mmap(file, vma);
4027 if (!rc)
4028 vma->vm_ops = &cifs_file_vm_ops;
4029
4030 free_xid(xid);
4031 return rc;
4032 }
4033
cifs_file_mmap(struct file * file,struct vm_area_struct * vma)4034 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
4035 {
4036 int rc, xid;
4037
4038 xid = get_xid();
4039
4040 rc = cifs_revalidate_file(file);
4041 if (rc)
4042 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
4043 rc);
4044 if (!rc)
4045 rc = generic_file_mmap(file, vma);
4046 if (!rc)
4047 vma->vm_ops = &cifs_file_vm_ops;
4048
4049 free_xid(xid);
4050 return rc;
4051 }
4052
4053 static void
cifs_readv_complete(struct work_struct * work)4054 cifs_readv_complete(struct work_struct *work)
4055 {
4056 unsigned int i, got_bytes;
4057 struct cifs_readdata *rdata = container_of(work,
4058 struct cifs_readdata, work);
4059
4060 got_bytes = rdata->got_bytes;
4061 for (i = 0; i < rdata->nr_pages; i++) {
4062 struct page *page = rdata->pages[i];
4063
4064 lru_cache_add_file(page);
4065
4066 if (rdata->result == 0 ||
4067 (rdata->result == -EAGAIN && got_bytes)) {
4068 flush_dcache_page(page);
4069 SetPageUptodate(page);
4070 }
4071
4072 unlock_page(page);
4073
4074 if (rdata->result == 0 ||
4075 (rdata->result == -EAGAIN && got_bytes))
4076 cifs_readpage_to_fscache(rdata->mapping->host, page);
4077
4078 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
4079
4080 put_page(page);
4081 rdata->pages[i] = NULL;
4082 }
4083 kref_put(&rdata->refcount, cifs_readdata_release);
4084 }
4085
4086 static int
readpages_fill_pages(struct TCP_Server_Info * server,struct cifs_readdata * rdata,struct iov_iter * iter,unsigned int len)4087 readpages_fill_pages(struct TCP_Server_Info *server,
4088 struct cifs_readdata *rdata, struct iov_iter *iter,
4089 unsigned int len)
4090 {
4091 int result = 0;
4092 unsigned int i;
4093 u64 eof;
4094 pgoff_t eof_index;
4095 unsigned int nr_pages = rdata->nr_pages;
4096 unsigned int page_offset = rdata->page_offset;
4097
4098 /* determine the eof that the server (probably) has */
4099 eof = CIFS_I(rdata->mapping->host)->server_eof;
4100 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
4101 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
4102
4103 rdata->got_bytes = 0;
4104 rdata->tailsz = PAGE_SIZE;
4105 for (i = 0; i < nr_pages; i++) {
4106 struct page *page = rdata->pages[i];
4107 unsigned int to_read = rdata->pagesz;
4108 size_t n;
4109
4110 if (i == 0)
4111 to_read -= page_offset;
4112 else
4113 page_offset = 0;
4114
4115 n = to_read;
4116
4117 if (len >= to_read) {
4118 len -= to_read;
4119 } else if (len > 0) {
4120 /* enough for partial page, fill and zero the rest */
4121 zero_user(page, len + page_offset, to_read - len);
4122 n = rdata->tailsz = len;
4123 len = 0;
4124 } else if (page->index > eof_index) {
4125 /*
4126 * The VFS will not try to do readahead past the
4127 * i_size, but it's possible that we have outstanding
4128 * writes with gaps in the middle and the i_size hasn't
4129 * caught up yet. Populate those with zeroed out pages
4130 * to prevent the VFS from repeatedly attempting to
4131 * fill them until the writes are flushed.
4132 */
4133 zero_user(page, 0, PAGE_SIZE);
4134 lru_cache_add_file(page);
4135 flush_dcache_page(page);
4136 SetPageUptodate(page);
4137 unlock_page(page);
4138 put_page(page);
4139 rdata->pages[i] = NULL;
4140 rdata->nr_pages--;
4141 continue;
4142 } else {
4143 /* no need to hold page hostage */
4144 lru_cache_add_file(page);
4145 unlock_page(page);
4146 put_page(page);
4147 rdata->pages[i] = NULL;
4148 rdata->nr_pages--;
4149 continue;
4150 }
4151
4152 if (iter)
4153 result = copy_page_from_iter(
4154 page, page_offset, n, iter);
4155 #ifdef CONFIG_CIFS_SMB_DIRECT
4156 else if (rdata->mr)
4157 result = n;
4158 #endif
4159 else
4160 result = cifs_read_page_from_socket(
4161 server, page, page_offset, n);
4162 if (result < 0)
4163 break;
4164
4165 rdata->got_bytes += result;
4166 }
4167
4168 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
4169 rdata->got_bytes : result;
4170 }
4171
4172 static int
cifs_readpages_read_into_pages(struct TCP_Server_Info * server,struct cifs_readdata * rdata,unsigned int len)4173 cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
4174 struct cifs_readdata *rdata, unsigned int len)
4175 {
4176 return readpages_fill_pages(server, rdata, NULL, len);
4177 }
4178
4179 static int
cifs_readpages_copy_into_pages(struct TCP_Server_Info * server,struct cifs_readdata * rdata,struct iov_iter * iter)4180 cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
4181 struct cifs_readdata *rdata,
4182 struct iov_iter *iter)
4183 {
4184 return readpages_fill_pages(server, rdata, iter, iter->count);
4185 }
4186
4187 static int
readpages_get_pages(struct address_space * mapping,struct list_head * page_list,unsigned int rsize,struct list_head * tmplist,unsigned int * nr_pages,loff_t * offset,unsigned int * bytes)4188 readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
4189 unsigned int rsize, struct list_head *tmplist,
4190 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
4191 {
4192 struct page *page, *tpage;
4193 unsigned int expected_index;
4194 int rc;
4195 gfp_t gfp = readahead_gfp_mask(mapping);
4196
4197 INIT_LIST_HEAD(tmplist);
4198
4199 page = lru_to_page(page_list);
4200
4201 /*
4202 * Lock the page and put it in the cache. Since no one else
4203 * should have access to this page, we're safe to simply set
4204 * PG_locked without checking it first.
4205 */
4206 __SetPageLocked(page);
4207 rc = add_to_page_cache_locked(page, mapping,
4208 page->index, gfp);
4209
4210 /* give up if we can't stick it in the cache */
4211 if (rc) {
4212 __ClearPageLocked(page);
4213 return rc;
4214 }
4215
4216 /* move first page to the tmplist */
4217 *offset = (loff_t)page->index << PAGE_SHIFT;
4218 *bytes = PAGE_SIZE;
4219 *nr_pages = 1;
4220 list_move_tail(&page->lru, tmplist);
4221
4222 /* now try and add more pages onto the request */
4223 expected_index = page->index + 1;
4224 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
4225 /* discontinuity ? */
4226 if (page->index != expected_index)
4227 break;
4228
4229 /* would this page push the read over the rsize? */
4230 if (*bytes + PAGE_SIZE > rsize)
4231 break;
4232
4233 __SetPageLocked(page);
4234 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
4235 __ClearPageLocked(page);
4236 break;
4237 }
4238 list_move_tail(&page->lru, tmplist);
4239 (*bytes) += PAGE_SIZE;
4240 expected_index++;
4241 (*nr_pages)++;
4242 }
4243 return rc;
4244 }
4245
cifs_readpages(struct file * file,struct address_space * mapping,struct list_head * page_list,unsigned num_pages)4246 static int cifs_readpages(struct file *file, struct address_space *mapping,
4247 struct list_head *page_list, unsigned num_pages)
4248 {
4249 int rc;
4250 struct list_head tmplist;
4251 struct cifsFileInfo *open_file = file->private_data;
4252 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
4253 struct TCP_Server_Info *server;
4254 pid_t pid;
4255 unsigned int xid;
4256
4257 xid = get_xid();
4258 /*
4259 * Reads as many pages as possible from fscache. Returns -ENOBUFS
4260 * immediately if the cookie is negative
4261 *
4262 * After this point, every page in the list might have PG_fscache set,
4263 * so we will need to clean that up off of every page we don't use.
4264 */
4265 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
4266 &num_pages);
4267 if (rc == 0) {
4268 free_xid(xid);
4269 return rc;
4270 }
4271
4272 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4273 pid = open_file->pid;
4274 else
4275 pid = current->tgid;
4276
4277 rc = 0;
4278 server = tlink_tcon(open_file->tlink)->ses->server;
4279
4280 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
4281 __func__, file, mapping, num_pages);
4282
4283 /*
4284 * Start with the page at end of list and move it to private
4285 * list. Do the same with any following pages until we hit
4286 * the rsize limit, hit an index discontinuity, or run out of
4287 * pages. Issue the async read and then start the loop again
4288 * until the list is empty.
4289 *
4290 * Note that list order is important. The page_list is in
4291 * the order of declining indexes. When we put the pages in
4292 * the rdata->pages, then we want them in increasing order.
4293 */
4294 while (!list_empty(page_list)) {
4295 unsigned int i, nr_pages, bytes, rsize;
4296 loff_t offset;
4297 struct page *page, *tpage;
4298 struct cifs_readdata *rdata;
4299 struct cifs_credits credits_on_stack;
4300 struct cifs_credits *credits = &credits_on_stack;
4301
4302 if (open_file->invalidHandle) {
4303 rc = cifs_reopen_file(open_file, true);
4304 if (rc == -EAGAIN)
4305 continue;
4306 else if (rc)
4307 break;
4308 }
4309
4310 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
4311 &rsize, credits);
4312 if (rc)
4313 break;
4314
4315 /*
4316 * Give up immediately if rsize is too small to read an entire
4317 * page. The VFS will fall back to readpage. We should never
4318 * reach this point however since we set ra_pages to 0 when the
4319 * rsize is smaller than a cache page.
4320 */
4321 if (unlikely(rsize < PAGE_SIZE)) {
4322 add_credits_and_wake_if(server, credits, 0);
4323 free_xid(xid);
4324 return 0;
4325 }
4326
4327 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
4328 &nr_pages, &offset, &bytes);
4329 if (rc) {
4330 add_credits_and_wake_if(server, credits, 0);
4331 break;
4332 }
4333
4334 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
4335 if (!rdata) {
4336 /* best to give up if we're out of mem */
4337 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4338 list_del(&page->lru);
4339 lru_cache_add_file(page);
4340 unlock_page(page);
4341 put_page(page);
4342 }
4343 rc = -ENOMEM;
4344 add_credits_and_wake_if(server, credits, 0);
4345 break;
4346 }
4347
4348 rdata->cfile = cifsFileInfo_get(open_file);
4349 rdata->mapping = mapping;
4350 rdata->offset = offset;
4351 rdata->bytes = bytes;
4352 rdata->pid = pid;
4353 rdata->pagesz = PAGE_SIZE;
4354 rdata->tailsz = PAGE_SIZE;
4355 rdata->read_into_pages = cifs_readpages_read_into_pages;
4356 rdata->copy_into_pages = cifs_readpages_copy_into_pages;
4357 rdata->credits = credits_on_stack;
4358
4359 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4360 list_del(&page->lru);
4361 rdata->pages[rdata->nr_pages++] = page;
4362 }
4363
4364 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
4365
4366 if (!rc) {
4367 if (rdata->cfile->invalidHandle)
4368 rc = -EAGAIN;
4369 else
4370 rc = server->ops->async_readv(rdata);
4371 }
4372
4373 if (rc) {
4374 add_credits_and_wake_if(server, &rdata->credits, 0);
4375 for (i = 0; i < rdata->nr_pages; i++) {
4376 page = rdata->pages[i];
4377 lru_cache_add_file(page);
4378 unlock_page(page);
4379 put_page(page);
4380 }
4381 /* Fallback to the readpage in error/reconnect cases */
4382 kref_put(&rdata->refcount, cifs_readdata_release);
4383 break;
4384 }
4385
4386 kref_put(&rdata->refcount, cifs_readdata_release);
4387 }
4388
4389 /* Any pages that have been shown to fscache but didn't get added to
4390 * the pagecache must be uncached before they get returned to the
4391 * allocator.
4392 */
4393 cifs_fscache_readpages_cancel(mapping->host, page_list);
4394 free_xid(xid);
4395 return rc;
4396 }
4397
4398 /*
4399 * cifs_readpage_worker must be called with the page pinned
4400 */
cifs_readpage_worker(struct file * file,struct page * page,loff_t * poffset)4401 static int cifs_readpage_worker(struct file *file, struct page *page,
4402 loff_t *poffset)
4403 {
4404 char *read_data;
4405 int rc;
4406
4407 /* Is the page cached? */
4408 rc = cifs_readpage_from_fscache(file_inode(file), page);
4409 if (rc == 0)
4410 goto read_complete;
4411
4412 read_data = kmap(page);
4413 /* for reads over a certain size could initiate async read ahead */
4414
4415 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
4416
4417 if (rc < 0)
4418 goto io_error;
4419 else
4420 cifs_dbg(FYI, "Bytes read %d\n", rc);
4421
4422 /* we do not want atime to be less than mtime, it broke some apps */
4423 file_inode(file)->i_atime = current_time(file_inode(file));
4424 if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
4425 file_inode(file)->i_atime = file_inode(file)->i_mtime;
4426 else
4427 file_inode(file)->i_atime = current_time(file_inode(file));
4428
4429 if (PAGE_SIZE > rc)
4430 memset(read_data + rc, 0, PAGE_SIZE - rc);
4431
4432 flush_dcache_page(page);
4433 SetPageUptodate(page);
4434
4435 /* send this page to the cache */
4436 cifs_readpage_to_fscache(file_inode(file), page);
4437
4438 rc = 0;
4439
4440 io_error:
4441 kunmap(page);
4442 unlock_page(page);
4443
4444 read_complete:
4445 return rc;
4446 }
4447
cifs_readpage(struct file * file,struct page * page)4448 static int cifs_readpage(struct file *file, struct page *page)
4449 {
4450 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
4451 int rc = -EACCES;
4452 unsigned int xid;
4453
4454 xid = get_xid();
4455
4456 if (file->private_data == NULL) {
4457 rc = -EBADF;
4458 free_xid(xid);
4459 return rc;
4460 }
4461
4462 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
4463 page, (int)offset, (int)offset);
4464
4465 rc = cifs_readpage_worker(file, page, &offset);
4466
4467 free_xid(xid);
4468 return rc;
4469 }
4470
is_inode_writable(struct cifsInodeInfo * cifs_inode)4471 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
4472 {
4473 struct cifsFileInfo *open_file;
4474
4475 spin_lock(&cifs_inode->open_file_lock);
4476 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
4477 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
4478 spin_unlock(&cifs_inode->open_file_lock);
4479 return 1;
4480 }
4481 }
4482 spin_unlock(&cifs_inode->open_file_lock);
4483 return 0;
4484 }
4485
4486 /* We do not want to update the file size from server for inodes
4487 open for write - to avoid races with writepage extending
4488 the file - in the future we could consider allowing
4489 refreshing the inode only on increases in the file size
4490 but this is tricky to do without racing with writebehind
4491 page caching in the current Linux kernel design */
is_size_safe_to_change(struct cifsInodeInfo * cifsInode,__u64 end_of_file)4492 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
4493 {
4494 if (!cifsInode)
4495 return true;
4496
4497 if (is_inode_writable(cifsInode)) {
4498 /* This inode is open for write at least once */
4499 struct cifs_sb_info *cifs_sb;
4500
4501 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
4502 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
4503 /* since no page cache to corrupt on directio
4504 we can change size safely */
4505 return true;
4506 }
4507
4508 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4509 return true;
4510
4511 return false;
4512 } else
4513 return true;
4514 }
4515
cifs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata)4516 static int cifs_write_begin(struct file *file, struct address_space *mapping,
4517 loff_t pos, unsigned len, unsigned flags,
4518 struct page **pagep, void **fsdata)
4519 {
4520 int oncethru = 0;
4521 pgoff_t index = pos >> PAGE_SHIFT;
4522 loff_t offset = pos & (PAGE_SIZE - 1);
4523 loff_t page_start = pos & PAGE_MASK;
4524 loff_t i_size;
4525 struct page *page;
4526 int rc = 0;
4527
4528 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
4529
4530 start:
4531 page = grab_cache_page_write_begin(mapping, index, flags);
4532 if (!page) {
4533 rc = -ENOMEM;
4534 goto out;
4535 }
4536
4537 if (PageUptodate(page))
4538 goto out;
4539
4540 /*
4541 * If we write a full page it will be up to date, no need to read from
4542 * the server. If the write is short, we'll end up doing a sync write
4543 * instead.
4544 */
4545 if (len == PAGE_SIZE)
4546 goto out;
4547
4548 /*
4549 * optimize away the read when we have an oplock, and we're not
4550 * expecting to use any of the data we'd be reading in. That
4551 * is, when the page lies beyond the EOF, or straddles the EOF
4552 * and the write will cover all of the existing data.
4553 */
4554 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
4555 i_size = i_size_read(mapping->host);
4556 if (page_start >= i_size ||
4557 (offset == 0 && (pos + len) >= i_size)) {
4558 zero_user_segments(page, 0, offset,
4559 offset + len,
4560 PAGE_SIZE);
4561 /*
4562 * PageChecked means that the parts of the page
4563 * to which we're not writing are considered up
4564 * to date. Once the data is copied to the
4565 * page, it can be set uptodate.
4566 */
4567 SetPageChecked(page);
4568 goto out;
4569 }
4570 }
4571
4572 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
4573 /*
4574 * might as well read a page, it is fast enough. If we get
4575 * an error, we don't need to return it. cifs_write_end will
4576 * do a sync write instead since PG_uptodate isn't set.
4577 */
4578 cifs_readpage_worker(file, page, &page_start);
4579 put_page(page);
4580 oncethru = 1;
4581 goto start;
4582 } else {
4583 /* we could try using another file handle if there is one -
4584 but how would we lock it to prevent close of that handle
4585 racing with this read? In any case
4586 this will be written out by write_end so is fine */
4587 }
4588 out:
4589 *pagep = page;
4590 return rc;
4591 }
4592
cifs_release_page(struct page * page,gfp_t gfp)4593 static int cifs_release_page(struct page *page, gfp_t gfp)
4594 {
4595 if (PagePrivate(page))
4596 return 0;
4597
4598 return cifs_fscache_release_page(page, gfp);
4599 }
4600
cifs_invalidate_page(struct page * page,unsigned int offset,unsigned int length)4601 static void cifs_invalidate_page(struct page *page, unsigned int offset,
4602 unsigned int length)
4603 {
4604 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
4605
4606 if (offset == 0 && length == PAGE_SIZE)
4607 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
4608 }
4609
cifs_launder_page(struct page * page)4610 static int cifs_launder_page(struct page *page)
4611 {
4612 int rc = 0;
4613 loff_t range_start = page_offset(page);
4614 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
4615 struct writeback_control wbc = {
4616 .sync_mode = WB_SYNC_ALL,
4617 .nr_to_write = 0,
4618 .range_start = range_start,
4619 .range_end = range_end,
4620 };
4621
4622 cifs_dbg(FYI, "Launder page: %p\n", page);
4623
4624 if (clear_page_dirty_for_io(page))
4625 rc = cifs_writepage_locked(page, &wbc);
4626
4627 cifs_fscache_invalidate_page(page, page->mapping->host);
4628 return rc;
4629 }
4630
cifs_oplock_break(struct work_struct * work)4631 void cifs_oplock_break(struct work_struct *work)
4632 {
4633 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
4634 oplock_break);
4635 struct inode *inode = d_inode(cfile->dentry);
4636 struct cifsInodeInfo *cinode = CIFS_I(inode);
4637 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
4638 struct TCP_Server_Info *server = tcon->ses->server;
4639 int rc = 0;
4640
4641 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
4642 TASK_UNINTERRUPTIBLE);
4643
4644 server->ops->downgrade_oplock(server, cinode,
4645 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
4646
4647 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
4648 cifs_has_mand_locks(cinode)) {
4649 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
4650 inode);
4651 cinode->oplock = 0;
4652 }
4653
4654 if (inode && S_ISREG(inode->i_mode)) {
4655 if (CIFS_CACHE_READ(cinode))
4656 break_lease(inode, O_RDONLY);
4657 else
4658 break_lease(inode, O_WRONLY);
4659 rc = filemap_fdatawrite(inode->i_mapping);
4660 if (!CIFS_CACHE_READ(cinode)) {
4661 rc = filemap_fdatawait(inode->i_mapping);
4662 mapping_set_error(inode->i_mapping, rc);
4663 cifs_zap_mapping(inode);
4664 }
4665 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
4666 }
4667
4668 rc = cifs_push_locks(cfile);
4669 if (rc)
4670 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
4671
4672 /*
4673 * releasing stale oplock after recent reconnect of smb session using
4674 * a now incorrect file handle is not a data integrity issue but do
4675 * not bother sending an oplock release if session to server still is
4676 * disconnected since oplock already released by the server
4677 */
4678 if (!cfile->oplock_break_cancelled) {
4679 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
4680 cinode);
4681 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
4682 }
4683 _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
4684 cifs_done_oplock_break(cinode);
4685 }
4686
4687 /*
4688 * The presence of cifs_direct_io() in the address space ops vector
4689 * allowes open() O_DIRECT flags which would have failed otherwise.
4690 *
4691 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
4692 * so this method should never be called.
4693 *
4694 * Direct IO is not yet supported in the cached mode.
4695 */
4696 static ssize_t
cifs_direct_io(struct kiocb * iocb,struct iov_iter * iter)4697 cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
4698 {
4699 /*
4700 * FIXME
4701 * Eventually need to support direct IO for non forcedirectio mounts
4702 */
4703 return -EINVAL;
4704 }
4705
4706
4707 const struct address_space_operations cifs_addr_ops = {
4708 .readpage = cifs_readpage,
4709 .readpages = cifs_readpages,
4710 .writepage = cifs_writepage,
4711 .writepages = cifs_writepages,
4712 .write_begin = cifs_write_begin,
4713 .write_end = cifs_write_end,
4714 .set_page_dirty = __set_page_dirty_nobuffers,
4715 .releasepage = cifs_release_page,
4716 .direct_IO = cifs_direct_io,
4717 .invalidatepage = cifs_invalidate_page,
4718 .launder_page = cifs_launder_page,
4719 };
4720
4721 /*
4722 * cifs_readpages requires the server to support a buffer large enough to
4723 * contain the header plus one complete page of data. Otherwise, we need
4724 * to leave cifs_readpages out of the address space operations.
4725 */
4726 const struct address_space_operations cifs_addr_ops_smallbuf = {
4727 .readpage = cifs_readpage,
4728 .writepage = cifs_writepage,
4729 .writepages = cifs_writepages,
4730 .write_begin = cifs_write_begin,
4731 .write_end = cifs_write_end,
4732 .set_page_dirty = __set_page_dirty_nobuffers,
4733 .releasepage = cifs_release_page,
4734 .invalidatepage = cifs_invalidate_page,
4735 .launder_page = cifs_launder_page,
4736 };
4737