Lines Matching +full:tcon +full:- +full:channel

1 // SPDX-License-Identifier: LGPL-2.1
12 #include <linux/backing-dev.h>
44 cifs_mark_open_files_invalid(struct cifs_tcon *tcon) in cifs_mark_open_files_invalid() argument
51 spin_lock(&tcon->ses->ses_lock); in cifs_mark_open_files_invalid()
52 if ((tcon->ses->ses_status != SES_GOOD) || (tcon->status != TID_NEED_RECON)) { in cifs_mark_open_files_invalid()
53 spin_unlock(&tcon->ses->ses_lock); in cifs_mark_open_files_invalid()
56 tcon->status = TID_IN_FILES_INVALIDATE; in cifs_mark_open_files_invalid()
57 spin_unlock(&tcon->ses->ses_lock); in cifs_mark_open_files_invalid()
60 spin_lock(&tcon->open_file_lock); in cifs_mark_open_files_invalid()
61 list_for_each_safe(tmp, tmp1, &tcon->openFileList) { in cifs_mark_open_files_invalid()
63 open_file->invalidHandle = true; in cifs_mark_open_files_invalid()
64 open_file->oplock_break_cancelled = true; in cifs_mark_open_files_invalid()
66 spin_unlock(&tcon->open_file_lock); in cifs_mark_open_files_invalid()
68 invalidate_all_cached_dirs(tcon); in cifs_mark_open_files_invalid()
69 spin_lock(&tcon->tc_lock); in cifs_mark_open_files_invalid()
70 if (tcon->status == TID_IN_FILES_INVALIDATE) in cifs_mark_open_files_invalid()
71 tcon->status = TID_NEED_TCON; in cifs_mark_open_files_invalid()
72 spin_unlock(&tcon->tc_lock); in cifs_mark_open_files_invalid()
76 * to this tcon. in cifs_mark_open_files_invalid()
116 current->comm, current->tgid); in cifs_posix_convert_flags()
159 struct cifs_tcon *tcon; in cifs_posix_open() local
165 return -ENOMEM; in cifs_posix_open()
173 tcon = tlink_tcon(tlink); in cifs_posix_open()
177 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data, in cifs_posix_open()
178 poplock, full_path, cifs_sb->local_nls, in cifs_posix_open()
185 if (presp_data->Type == cpu_to_le32(-1)) in cifs_posix_open()
198 rc = -ENOMEM; in cifs_posix_open()
213 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock, in cifs_nt_open() argument
220 struct TCP_Server_Info *server = tcon->ses->server; in cifs_nt_open()
223 if (!server->ops->open) in cifs_nt_open()
224 return -ENOSYS; in cifs_nt_open()
232 * ---------- ---------------- in cifs_nt_open()
263 oparms.tcon = tcon; in cifs_nt_open()
272 rc = server->ops->open(xid, &oparms, oplock, buf); in cifs_nt_open()
277 if (tcon->unix_ext) in cifs_nt_open()
278 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb, in cifs_nt_open()
281 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, in cifs_nt_open()
285 server->ops->close(xid, tcon, fid); in cifs_nt_open()
286 if (rc == -ESTALE) in cifs_nt_open()
287 rc = -EOPENSTALE; in cifs_nt_open()
299 down_read(&cinode->lock_sem); in cifs_has_mand_locks()
300 list_for_each_entry(cur, &cinode->llist, llist) { in cifs_has_mand_locks()
301 if (!list_empty(&cur->locks)) { in cifs_has_mand_locks()
306 up_read(&cinode->lock_sem); in cifs_has_mand_locks()
328 struct cifs_tcon *tcon = tlink_tcon(tlink); in cifs_new_fileinfo() local
329 struct TCP_Server_Info *server = tcon->ses->server; in cifs_new_fileinfo()
342 cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL); in cifs_new_fileinfo()
343 if (!cfile->symlink_target) { in cifs_new_fileinfo()
350 INIT_LIST_HEAD(&fdlocks->locks); in cifs_new_fileinfo()
351 fdlocks->cfile = cfile; in cifs_new_fileinfo()
352 cfile->llist = fdlocks; in cifs_new_fileinfo()
354 cfile->count = 1; in cifs_new_fileinfo()
355 cfile->pid = current->tgid; in cifs_new_fileinfo()
356 cfile->uid = current_fsuid(); in cifs_new_fileinfo()
357 cfile->dentry = dget(dentry); in cifs_new_fileinfo()
358 cfile->f_flags = file->f_flags; in cifs_new_fileinfo()
359 cfile->invalidHandle = false; in cifs_new_fileinfo()
360 cfile->deferred_close_scheduled = false; in cifs_new_fileinfo()
361 cfile->tlink = cifs_get_tlink(tlink); in cifs_new_fileinfo()
362 INIT_WORK(&cfile->oplock_break, cifs_oplock_break); in cifs_new_fileinfo()
363 INIT_WORK(&cfile->put, cifsFileInfo_put_work); in cifs_new_fileinfo()
364 INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close); in cifs_new_fileinfo()
365 mutex_init(&cfile->fh_mutex); in cifs_new_fileinfo()
366 spin_lock_init(&cfile->file_info_lock); in cifs_new_fileinfo()
368 cifs_sb_active(inode->i_sb); in cifs_new_fileinfo()
374 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) { in cifs_new_fileinfo()
379 cifs_down_write(&cinode->lock_sem); in cifs_new_fileinfo()
380 list_add(&fdlocks->llist, &cinode->llist); in cifs_new_fileinfo()
381 up_write(&cinode->lock_sem); in cifs_new_fileinfo()
383 spin_lock(&tcon->open_file_lock); in cifs_new_fileinfo()
384 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock) in cifs_new_fileinfo()
385 oplock = fid->pending_open->oplock; in cifs_new_fileinfo()
386 list_del(&fid->pending_open->olist); in cifs_new_fileinfo()
388 fid->purge_cache = false; in cifs_new_fileinfo()
389 server->ops->set_fid(cfile, fid, oplock); in cifs_new_fileinfo()
391 list_add(&cfile->tlist, &tcon->openFileList); in cifs_new_fileinfo()
392 atomic_inc(&tcon->num_local_opens); in cifs_new_fileinfo()
395 spin_lock(&cinode->open_file_lock); in cifs_new_fileinfo()
396 if (file->f_mode & FMODE_READ) in cifs_new_fileinfo()
397 list_add(&cfile->flist, &cinode->openFileList); in cifs_new_fileinfo()
399 list_add_tail(&cfile->flist, &cinode->openFileList); in cifs_new_fileinfo()
400 spin_unlock(&cinode->open_file_lock); in cifs_new_fileinfo()
401 spin_unlock(&tcon->open_file_lock); in cifs_new_fileinfo()
403 if (fid->purge_cache) in cifs_new_fileinfo()
406 file->private_data = cfile; in cifs_new_fileinfo()
413 spin_lock(&cifs_file->file_info_lock); in cifsFileInfo_get()
415 spin_unlock(&cifs_file->file_info_lock); in cifsFileInfo_get()
421 struct inode *inode = d_inode(cifs_file->dentry); in cifsFileInfo_put_final()
424 struct super_block *sb = inode->i_sb; in cifsFileInfo_put_final()
430 cifs_down_write(&cifsi->lock_sem); in cifsFileInfo_put_final()
431 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) { in cifsFileInfo_put_final()
432 list_del(&li->llist); in cifsFileInfo_put_final()
436 list_del(&cifs_file->llist->llist); in cifsFileInfo_put_final()
437 kfree(cifs_file->llist); in cifsFileInfo_put_final()
438 up_write(&cifsi->lock_sem); in cifsFileInfo_put_final()
440 cifs_put_tlink(cifs_file->tlink); in cifsFileInfo_put_final()
441 dput(cifs_file->dentry); in cifsFileInfo_put_final()
443 kfree(cifs_file->symlink_target); in cifsFileInfo_put_final()
456 * cifsFileInfo_put - release a reference of file priv data
468 * _cifsFileInfo_put - release a reference of file priv data
471 * server. Must be called without holding tcon->open_file_lock,
472 * cinode->open_file_lock and cifs_file->file_info_lock.
486 struct inode *inode = d_inode(cifs_file->dentry); in _cifsFileInfo_put()
487 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); in _cifsFileInfo_put() local
488 struct TCP_Server_Info *server = tcon->ses->server; in _cifsFileInfo_put()
490 struct super_block *sb = inode->i_sb; in _cifsFileInfo_put()
496 spin_lock(&tcon->open_file_lock); in _cifsFileInfo_put()
497 spin_lock(&cifsi->open_file_lock); in _cifsFileInfo_put()
498 spin_lock(&cifs_file->file_info_lock); in _cifsFileInfo_put()
499 if (--cifs_file->count > 0) { in _cifsFileInfo_put()
500 spin_unlock(&cifs_file->file_info_lock); in _cifsFileInfo_put()
501 spin_unlock(&cifsi->open_file_lock); in _cifsFileInfo_put()
502 spin_unlock(&tcon->open_file_lock); in _cifsFileInfo_put()
505 spin_unlock(&cifs_file->file_info_lock); in _cifsFileInfo_put()
507 if (server->ops->get_lease_key) in _cifsFileInfo_put()
508 server->ops->get_lease_key(inode, &fid); in _cifsFileInfo_put()
511 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open); in _cifsFileInfo_put()
514 list_del(&cifs_file->flist); in _cifsFileInfo_put()
515 list_del(&cifs_file->tlist); in _cifsFileInfo_put()
516 atomic_dec(&tcon->num_local_opens); in _cifsFileInfo_put()
518 if (list_empty(&cifsi->openFileList)) { in _cifsFileInfo_put()
520 d_inode(cifs_file->dentry)); in _cifsFileInfo_put()
526 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) in _cifsFileInfo_put()
527 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags); in _cifsFileInfo_put()
531 spin_unlock(&cifsi->open_file_lock); in _cifsFileInfo_put()
532 spin_unlock(&tcon->open_file_lock); in _cifsFileInfo_put()
535 cancel_work_sync(&cifs_file->oplock_break) : false; in _cifsFileInfo_put()
537 if (!tcon->need_reconnect && !cifs_file->invalidHandle) { in _cifsFileInfo_put()
538 struct TCP_Server_Info *server = tcon->ses->server; in _cifsFileInfo_put()
542 if (server->ops->close_getattr) in _cifsFileInfo_put()
543 server->ops->close_getattr(xid, tcon, cifs_file); in _cifsFileInfo_put()
544 else if (server->ops->close) in _cifsFileInfo_put()
545 server->ops->close(xid, tcon, &cifs_file->fid); in _cifsFileInfo_put()
555 queue_work(fileinfo_put_wq, &cifs_file->put); in _cifsFileInfo_put()
563 int rc = -EACCES; in cifs_open()
568 struct cifs_tcon *tcon; in cifs_open() local
580 cifs_sb = CIFS_SB(inode->i_sb); in cifs_open()
583 return -EIO; in cifs_open()
591 tcon = tlink_tcon(tlink); in cifs_open()
592 server = tcon->ses->server; in cifs_open()
602 inode, file->f_flags, full_path); in cifs_open()
604 if (file->f_flags & O_DIRECT && in cifs_open()
605 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) { in cifs_open()
606 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) in cifs_open()
607 file->f_op = &cifs_file_direct_nobrl_ops; in cifs_open()
609 file->f_op = &cifs_file_direct_ops; in cifs_open()
613 rc = cifs_get_readable_path(tcon, full_path, &cfile); in cifs_open()
615 if (file->f_flags == cfile->f_flags) { in cifs_open()
616 file->private_data = cfile; in cifs_open()
617 spin_lock(&CIFS_I(inode)->deferred_lock); in cifs_open()
619 spin_unlock(&CIFS_I(inode)->deferred_lock); in cifs_open()
626 if (server->oplocks) in cifs_open()
632 if (!tcon->broken_posix_open && tcon->unix_ext && in cifs_open()
633 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & in cifs_open()
634 le64_to_cpu(tcon->fsUnixInfo.Capability))) { in cifs_open()
636 rc = cifs_posix_open(full_path, &inode, inode->i_sb, in cifs_open()
637 cifs_sb->ctx->file_mode /* ignored */, in cifs_open()
638 file->f_flags, &oplock, &fid.netfid, xid); in cifs_open()
642 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { in cifs_open()
643 if (tcon->ses->serverNOS) in cifs_open()
645 tcon->ses->ip_addr, in cifs_open()
646 tcon->ses->serverNOS); in cifs_open()
647 tcon->broken_posix_open = true; in cifs_open()
648 } else if ((rc != -EIO) && (rc != -EREMOTE) && in cifs_open()
649 (rc != -EOPNOTSUPP)) /* path not found or net err */ in cifs_open()
658 if (server->ops->get_lease_key) in cifs_open()
659 server->ops->get_lease_key(inode, &fid); in cifs_open()
664 if (server->ops->get_lease_key) in cifs_open()
665 server->ops->get_lease_key(inode, &fid); in cifs_open()
667 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid, in cifs_open()
677 if (server->ops->close) in cifs_open()
678 server->ops->close(xid, tcon, &fid); in cifs_open()
680 rc = -ENOMEM; in cifs_open()
685 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) { in cifs_open()
688 * problems creating new read-only files. in cifs_open()
691 .mode = inode->i_mode, in cifs_open()
699 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid, in cifs_open()
700 cfile->pid); in cifs_open()
706 file->f_mode & FMODE_WRITE); in cifs_open()
707 if (file->f_flags & O_DIRECT && in cifs_open()
708 (!((file->f_flags & O_ACCMODE) != O_RDONLY) || in cifs_open()
709 file->f_flags & O_APPEND)) in cifs_open()
732 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); in cifs_relock_file()
733 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); in cifs_relock_file() local
736 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); in cifs_relock_file()
739 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING); in cifs_relock_file()
740 if (cinode->can_cache_brlcks) { in cifs_relock_file()
741 /* can cache locks - no need to relock */ in cifs_relock_file()
742 up_read(&cinode->lock_sem); in cifs_relock_file()
747 if (cap_unix(tcon->ses) && in cifs_relock_file()
748 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && in cifs_relock_file()
749 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) in cifs_relock_file()
753 rc = tcon->ses->server->ops->push_mand_locks(cfile); in cifs_relock_file()
755 up_read(&cinode->lock_sem); in cifs_relock_file()
762 int rc = -EACCES; in cifs_reopen_file()
766 struct cifs_tcon *tcon; in cifs_reopen_file() local
778 mutex_lock(&cfile->fh_mutex); in cifs_reopen_file()
779 if (!cfile->invalidHandle) { in cifs_reopen_file()
780 mutex_unlock(&cfile->fh_mutex); in cifs_reopen_file()
785 inode = d_inode(cfile->dentry); in cifs_reopen_file()
786 cifs_sb = CIFS_SB(inode->i_sb); in cifs_reopen_file()
787 tcon = tlink_tcon(cfile->tlink); in cifs_reopen_file()
788 server = tcon->ses->server; in cifs_reopen_file()
797 full_path = build_path_from_dentry(cfile->dentry, page); in cifs_reopen_file()
799 mutex_unlock(&cfile->fh_mutex); in cifs_reopen_file()
806 inode, cfile->f_flags, full_path); in cifs_reopen_file()
808 if (tcon->ses->server->oplocks) in cifs_reopen_file()
814 if (tcon->unix_ext && cap_unix(tcon->ses) && in cifs_reopen_file()
816 le64_to_cpu(tcon->fsUnixInfo.Capability))) { in cifs_reopen_file()
821 unsigned int oflags = cfile->f_flags & in cifs_reopen_file()
824 rc = cifs_posix_open(full_path, NULL, inode->i_sb, in cifs_reopen_file()
825 cifs_sb->ctx->file_mode /* ignored */, in cifs_reopen_file()
826 oflags, &oplock, &cfile->fid.netfid, xid); in cifs_reopen_file()
839 desired_access = cifs_convert_flags(cfile->f_flags); in cifs_reopen_file()
842 if (cfile->f_flags & O_SYNC) in cifs_reopen_file()
845 if (cfile->f_flags & O_DIRECT) in cifs_reopen_file()
848 if (server->ops->get_lease_key) in cifs_reopen_file()
849 server->ops->get_lease_key(inode, &cfile->fid); in cifs_reopen_file()
851 oparms.tcon = tcon; in cifs_reopen_file()
857 oparms.fid = &cfile->fid; in cifs_reopen_file()
862 * ops->open and then calling get_inode_info with returned buf since in cifs_reopen_file()
867 rc = server->ops->open(xid, &oparms, &oplock, NULL); in cifs_reopen_file()
868 if (rc == -ENOENT && oparms.reconnect == false) { in cifs_reopen_file()
869 /* durable handle timeout is expired - open the file again */ in cifs_reopen_file()
870 rc = server->ops->open(xid, &oparms, &oplock, NULL); in cifs_reopen_file()
876 mutex_unlock(&cfile->fh_mutex); in cifs_reopen_file()
885 cfile->invalidHandle = false; in cifs_reopen_file()
886 mutex_unlock(&cfile->fh_mutex); in cifs_reopen_file()
890 rc = filemap_write_and_wait(inode->i_mapping); in cifs_reopen_file()
892 mapping_set_error(inode->i_mapping, rc); in cifs_reopen_file()
894 if (tcon->posix_extensions) in cifs_reopen_file()
895 rc = smb311_posix_get_inode_info(&inode, full_path, inode->i_sb, xid); in cifs_reopen_file()
896 else if (tcon->unix_ext) in cifs_reopen_file()
898 inode->i_sb, xid); in cifs_reopen_file()
901 inode->i_sb, xid, NULL); in cifs_reopen_file()
914 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) { in cifs_reopen_file()
919 server->ops->set_fid(cfile, &cfile->fid, oplock); in cifs_reopen_file()
934 spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); in smb2_deferred_work_close()
936 cfile->deferred_close_scheduled = false; in smb2_deferred_work_close()
937 spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); in smb2_deferred_work_close()
945 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); in cifs_close()
948 cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE); in cifs_close()
950 if (file->private_data != NULL) { in cifs_close()
951 cfile = file->private_data; in cifs_close()
952 file->private_data = NULL; in cifs_close()
954 if ((cinode->oplock == CIFS_CACHE_RHW_FLG) && in cifs_close()
955 cinode->lease_granted && in cifs_close()
956 !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) && in cifs_close()
958 if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) { in cifs_close()
959 inode->i_ctime = inode->i_mtime = current_time(inode); in cifs_close()
961 spin_lock(&cinode->deferred_lock); in cifs_close()
963 if (cfile->deferred_close_scheduled && in cifs_close()
964 delayed_work_pending(&cfile->deferred)) { in cifs_close()
967 * So, Increase the ref count to avoid use-after-free. in cifs_close()
970 &cfile->deferred, cifs_sb->ctx->closetimeo)) in cifs_close()
975 &cfile->deferred, cifs_sb->ctx->closetimeo); in cifs_close()
976 cfile->deferred_close_scheduled = true; in cifs_close()
977 spin_unlock(&cinode->deferred_lock); in cifs_close()
980 spin_unlock(&cinode->deferred_lock); in cifs_close()
988 /* return code from the ->release op is always ignored */ in cifs_close()
993 cifs_reopen_persistent_handles(struct cifs_tcon *tcon) in cifs_reopen_persistent_handles() argument
998 if (!tcon->use_persistent || !tcon->need_reopen_files) in cifs_reopen_persistent_handles()
1001 tcon->need_reopen_files = false; in cifs_reopen_persistent_handles()
1007 spin_lock(&tcon->open_file_lock); in cifs_reopen_persistent_handles()
1008 list_for_each_entry(open_file, &tcon->openFileList, tlist) { in cifs_reopen_persistent_handles()
1009 if (!open_file->invalidHandle) in cifs_reopen_persistent_handles()
1012 list_add_tail(&open_file->rlist, &tmp_list); in cifs_reopen_persistent_handles()
1014 spin_unlock(&tcon->open_file_lock); in cifs_reopen_persistent_handles()
1018 tcon->need_reopen_files = true; in cifs_reopen_persistent_handles()
1019 list_del_init(&open_file->rlist); in cifs_reopen_persistent_handles()
1028 struct cifsFileInfo *cfile = file->private_data; in cifs_closedir()
1029 struct cifs_tcon *tcon; in cifs_closedir() local
1039 tcon = tlink_tcon(cfile->tlink); in cifs_closedir()
1040 server = tcon->ses->server; in cifs_closedir()
1043 spin_lock(&cfile->file_info_lock); in cifs_closedir()
1044 if (server->ops->dir_needs_close(cfile)) { in cifs_closedir()
1045 cfile->invalidHandle = true; in cifs_closedir()
1046 spin_unlock(&cfile->file_info_lock); in cifs_closedir()
1047 if (server->ops->close_dir) in cifs_closedir()
1048 rc = server->ops->close_dir(xid, tcon, &cfile->fid); in cifs_closedir()
1050 rc = -ENOSYS; in cifs_closedir()
1055 spin_unlock(&cfile->file_info_lock); in cifs_closedir()
1057 buf = cfile->srch_inf.ntwrk_buf_start; in cifs_closedir()
1060 cfile->srch_inf.ntwrk_buf_start = NULL; in cifs_closedir()
1061 if (cfile->srch_inf.smallBuf) in cifs_closedir()
1067 cifs_put_tlink(cfile->tlink); in cifs_closedir()
1068 kfree(file->private_data); in cifs_closedir()
1069 file->private_data = NULL; in cifs_closedir()
1082 lock->offset = offset; in cifs_lock_init()
1083 lock->length = length; in cifs_lock_init()
1084 lock->type = type; in cifs_lock_init()
1085 lock->pid = current->tgid; in cifs_lock_init()
1086 lock->flags = flags; in cifs_lock_init()
1087 INIT_LIST_HEAD(&lock->blist); in cifs_lock_init()
1088 init_waitqueue_head(&lock->block_q); in cifs_lock_init()
1096 list_for_each_entry_safe(li, tmp, &lock->blist, blist) { in cifs_del_lock_waiters()
1097 list_del_init(&li->blist); in cifs_del_lock_waiters()
1098 wake_up(&li->block_q); in cifs_del_lock_waiters()
1106 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1114 struct cifsFileInfo *cur_cfile = fdlocks->cfile; in cifs_find_fid_lock_conflict()
1115 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; in cifs_find_fid_lock_conflict()
1117 list_for_each_entry(li, &fdlocks->locks, llist) { in cifs_find_fid_lock_conflict()
1118 if (offset + length <= li->offset || in cifs_find_fid_lock_conflict()
1119 offset >= li->offset + li->length) in cifs_find_fid_lock_conflict()
1121 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid && in cifs_find_fid_lock_conflict()
1122 server->ops->compare_fids(cfile, cur_cfile)) { in cifs_find_fid_lock_conflict()
1124 if (!(li->type & server->vals->shared_lock_type) || in cifs_find_fid_lock_conflict()
1128 if ((type & server->vals->shared_lock_type) && in cifs_find_fid_lock_conflict()
1129 ((server->ops->compare_fids(cfile, cur_cfile) && in cifs_find_fid_lock_conflict()
1130 current->tgid == li->pid) || type == li->type)) in cifs_find_fid_lock_conflict()
1133 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) && in cifs_find_fid_lock_conflict()
1134 server->ops->compare_fids(cfile, cur_cfile)) in cifs_find_fid_lock_conflict()
1150 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); in cifs_find_lock_conflict()
1152 list_for_each_entry(cur, &cinode->llist, llist) { in cifs_find_lock_conflict()
1176 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); in cifs_lock_test()
1177 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; in cifs_lock_test()
1180 down_read(&cinode->lock_sem); in cifs_lock_test()
1183 flock->fl_flags, &conf_lock, in cifs_lock_test()
1186 flock->fl_start = conf_lock->offset; in cifs_lock_test()
1187 flock->fl_end = conf_lock->offset + conf_lock->length - 1; in cifs_lock_test()
1188 flock->fl_pid = conf_lock->pid; in cifs_lock_test()
1189 if (conf_lock->type & server->vals->shared_lock_type) in cifs_lock_test()
1190 flock->fl_type = F_RDLCK; in cifs_lock_test()
1192 flock->fl_type = F_WRLCK; in cifs_lock_test()
1193 } else if (!cinode->can_cache_brlcks) in cifs_lock_test()
1196 flock->fl_type = F_UNLCK; in cifs_lock_test()
1198 up_read(&cinode->lock_sem); in cifs_lock_test()
1205 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); in cifs_lock_add()
1206 cifs_down_write(&cinode->lock_sem); in cifs_lock_add()
1207 list_add_tail(&lock->llist, &cfile->llist->locks); in cifs_lock_add()
1208 up_write(&cinode->lock_sem); in cifs_lock_add()
1212 * Set the byte-range lock (mandatory style). Returns:
1215 * 3) -EACCES, if there is a lock that prevents us and wait is false.
1222 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); in cifs_lock_add_if()
1228 cifs_down_write(&cinode->lock_sem); in cifs_lock_add_if()
1230 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length, in cifs_lock_add_if()
1231 lock->type, lock->flags, &conf_lock, in cifs_lock_add_if()
1233 if (!exist && cinode->can_cache_brlcks) { in cifs_lock_add_if()
1234 list_add_tail(&lock->llist, &cfile->llist->locks); in cifs_lock_add_if()
1235 up_write(&cinode->lock_sem); in cifs_lock_add_if()
1242 rc = -EACCES; in cifs_lock_add_if()
1244 list_add_tail(&lock->blist, &conf_lock->blist); in cifs_lock_add_if()
1245 up_write(&cinode->lock_sem); in cifs_lock_add_if()
1246 rc = wait_event_interruptible(lock->block_q, in cifs_lock_add_if()
1247 (lock->blist.prev == &lock->blist) && in cifs_lock_add_if()
1248 (lock->blist.next == &lock->blist)); in cifs_lock_add_if()
1251 cifs_down_write(&cinode->lock_sem); in cifs_lock_add_if()
1252 list_del_init(&lock->blist); in cifs_lock_add_if()
1255 up_write(&cinode->lock_sem); in cifs_lock_add_if()
1272 unsigned char saved_type = flock->fl_type; in cifs_posix_lock_test()
1274 if ((flock->fl_flags & FL_POSIX) == 0) in cifs_posix_lock_test()
1277 down_read(&cinode->lock_sem); in cifs_posix_lock_test()
1280 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) { in cifs_posix_lock_test()
1281 flock->fl_type = saved_type; in cifs_posix_lock_test()
1285 up_read(&cinode->lock_sem); in cifs_posix_lock_test()
1290 * Set the byte-range lock (posix style). Returns:
1302 if ((flock->fl_flags & FL_POSIX) == 0) in cifs_posix_lock_set()
1305 cifs_down_write(&cinode->lock_sem); in cifs_posix_lock_set()
1306 if (!cinode->can_cache_brlcks) { in cifs_posix_lock_set()
1307 up_write(&cinode->lock_sem); in cifs_posix_lock_set()
1312 up_write(&cinode->lock_sem); in cifs_posix_lock_set()
1322 struct cifs_tcon *tcon; in cifs_push_mandatory_locks() local
1332 tcon = tlink_tcon(cfile->tlink); in cifs_push_mandatory_locks()
1335 * Accessing maxBuf is racy with cifs_reconnect - need to store value in cifs_push_mandatory_locks()
1338 max_buf = tcon->ses->server->maxBuf; in cifs_push_mandatory_locks()
1341 return -EINVAL; in cifs_push_mandatory_locks()
1346 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr), in cifs_push_mandatory_locks()
1348 max_num = (max_buf - sizeof(struct smb_hdr)) / in cifs_push_mandatory_locks()
1353 return -ENOMEM; in cifs_push_mandatory_locks()
1359 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) { in cifs_push_mandatory_locks()
1360 if (li->type != types[i]) in cifs_push_mandatory_locks()
1362 cur->Pid = cpu_to_le16(li->pid); in cifs_push_mandatory_locks()
1363 cur->LengthLow = cpu_to_le32((u32)li->length); in cifs_push_mandatory_locks()
1364 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32)); in cifs_push_mandatory_locks()
1365 cur->OffsetLow = cpu_to_le32((u32)li->offset); in cifs_push_mandatory_locks()
1366 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32)); in cifs_push_mandatory_locks()
1368 stored_rc = cifs_lockv(xid, tcon, in cifs_push_mandatory_locks()
1369 cfile->fid.netfid, in cifs_push_mandatory_locks()
1370 (__u8)li->type, 0, num, in cifs_push_mandatory_locks()
1381 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid, in cifs_push_mandatory_locks()
1413 struct inode *inode = d_inode(cfile->dentry); in cifs_push_posix_locks()
1414 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); in cifs_push_posix_locks() local
1416 struct file_lock_context *flctx = inode->i_flctx; in cifs_push_posix_locks()
1428 spin_lock(&flctx->flc_lock); in cifs_push_posix_locks()
1429 list_for_each(el, &flctx->flc_posix) { in cifs_push_posix_locks()
1432 spin_unlock(&flctx->flc_lock); in cifs_push_posix_locks()
1438 * added to the list while we are holding cinode->lock_sem that in cifs_push_posix_locks()
1444 rc = -ENOMEM; in cifs_push_posix_locks()
1447 list_add_tail(&lck->llist, &locks_to_send); in cifs_push_posix_locks()
1451 spin_lock(&flctx->flc_lock); in cifs_push_posix_locks()
1452 list_for_each_entry(flock, &flctx->flc_posix, fl_list) { in cifs_push_posix_locks()
1456 * structures - something is really wrong. in cifs_push_posix_locks()
1462 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK) in cifs_push_posix_locks()
1467 lck->pid = hash_lockowner(flock->fl_owner); in cifs_push_posix_locks()
1468 lck->netfid = cfile->fid.netfid; in cifs_push_posix_locks()
1469 lck->length = length; in cifs_push_posix_locks()
1470 lck->type = type; in cifs_push_posix_locks()
1471 lck->offset = flock->fl_start; in cifs_push_posix_locks()
1473 spin_unlock(&flctx->flc_lock); in cifs_push_posix_locks()
1478 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid, in cifs_push_posix_locks()
1479 lck->offset, lck->length, NULL, in cifs_push_posix_locks()
1480 lck->type, 0); in cifs_push_posix_locks()
1483 list_del(&lck->llist); in cifs_push_posix_locks()
1492 list_del(&lck->llist); in cifs_push_posix_locks()
1502 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); in cifs_push_locks()
1503 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); in cifs_push_locks() local
1506 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); in cifs_push_locks()
1509 /* we are going to update can_cache_brlcks here - need a write access */ in cifs_push_locks()
1510 cifs_down_write(&cinode->lock_sem); in cifs_push_locks()
1511 if (!cinode->can_cache_brlcks) { in cifs_push_locks()
1512 up_write(&cinode->lock_sem); in cifs_push_locks()
1517 if (cap_unix(tcon->ses) && in cifs_push_locks()
1518 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && in cifs_push_locks()
1519 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) in cifs_push_locks()
1523 rc = tcon->ses->server->ops->push_mand_locks(cfile); in cifs_push_locks()
1525 cinode->can_cache_brlcks = false; in cifs_push_locks()
1526 up_write(&cinode->lock_sem); in cifs_push_locks()
1534 if (flock->fl_flags & FL_POSIX) in cifs_read_flock()
1536 if (flock->fl_flags & FL_FLOCK) in cifs_read_flock()
1538 if (flock->fl_flags & FL_SLEEP) { in cifs_read_flock()
1542 if (flock->fl_flags & FL_ACCESS) in cifs_read_flock()
1543 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n"); in cifs_read_flock()
1544 if (flock->fl_flags & FL_LEASE) in cifs_read_flock()
1545 cifs_dbg(FYI, "Lease on file - not implemented yet\n"); in cifs_read_flock()
1546 if (flock->fl_flags & in cifs_read_flock()
1549 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags); in cifs_read_flock()
1551 *type = server->vals->large_lock_type; in cifs_read_flock()
1552 if (flock->fl_type == F_WRLCK) { in cifs_read_flock()
1554 *type |= server->vals->exclusive_lock_type; in cifs_read_flock()
1556 } else if (flock->fl_type == F_UNLCK) { in cifs_read_flock()
1558 *type |= server->vals->unlock_lock_type; in cifs_read_flock()
1561 } else if (flock->fl_type == F_RDLCK) { in cifs_read_flock()
1563 *type |= server->vals->shared_lock_type; in cifs_read_flock()
1565 } else if (flock->fl_type == F_EXLCK) { in cifs_read_flock()
1567 *type |= server->vals->exclusive_lock_type; in cifs_read_flock()
1569 } else if (flock->fl_type == F_SHLCK) { in cifs_read_flock()
1571 *type |= server->vals->shared_lock_type; in cifs_read_flock()
1583 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; in cifs_getlk()
1584 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); in cifs_getlk() local
1585 struct TCP_Server_Info *server = tcon->ses->server; in cifs_getlk()
1587 __u16 netfid = cfile->fid.netfid; in cifs_getlk()
1596 if (type & server->vals->shared_lock_type) in cifs_getlk()
1600 rc = CIFSSMBPosixLock(xid, tcon, netfid, in cifs_getlk()
1601 hash_lockowner(flock->fl_owner), in cifs_getlk()
1602 flock->fl_start, length, flock, in cifs_getlk()
1608 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock); in cifs_getlk()
1613 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type, in cifs_getlk()
1616 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, in cifs_getlk()
1618 flock->fl_type = F_UNLCK; in cifs_getlk()
1625 if (type & server->vals->shared_lock_type) { in cifs_getlk()
1626 flock->fl_type = F_WRLCK; in cifs_getlk()
1630 type &= ~server->vals->exclusive_lock_type; in cifs_getlk()
1632 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, in cifs_getlk()
1633 type | server->vals->shared_lock_type, in cifs_getlk()
1636 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, in cifs_getlk()
1637 type | server->vals->shared_lock_type, 0, 1, false); in cifs_getlk()
1638 flock->fl_type = F_RDLCK; in cifs_getlk()
1643 flock->fl_type = F_WRLCK; in cifs_getlk()
1662 list_del(&li->llist); in cifs_free_llist()
1680 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); in cifs_unlock_range() local
1681 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); in cifs_unlock_range()
1689 * Accessing maxBuf is racy with cifs_reconnect - need to store value in cifs_unlock_range()
1692 max_buf = tcon->ses->server->maxBuf; in cifs_unlock_range()
1694 return -EINVAL; in cifs_unlock_range()
1698 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr), in cifs_unlock_range()
1700 max_num = (max_buf - sizeof(struct smb_hdr)) / in cifs_unlock_range()
1704 return -ENOMEM; in cifs_unlock_range()
1706 cifs_down_write(&cinode->lock_sem); in cifs_unlock_range()
1710 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) { in cifs_unlock_range()
1711 if (flock->fl_start > li->offset || in cifs_unlock_range()
1712 (flock->fl_start + length) < in cifs_unlock_range()
1713 (li->offset + li->length)) in cifs_unlock_range()
1715 if (current->tgid != li->pid) in cifs_unlock_range()
1717 if (types[i] != li->type) in cifs_unlock_range()
1719 if (cinode->can_cache_brlcks) { in cifs_unlock_range()
1721 * We can cache brlock requests - simply remove in cifs_unlock_range()
1724 list_del(&li->llist); in cifs_unlock_range()
1729 cur->Pid = cpu_to_le16(li->pid); in cifs_unlock_range()
1730 cur->LengthLow = cpu_to_le32((u32)li->length); in cifs_unlock_range()
1731 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32)); in cifs_unlock_range()
1732 cur->OffsetLow = cpu_to_le32((u32)li->offset); in cifs_unlock_range()
1733 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32)); in cifs_unlock_range()
1739 list_move(&li->llist, &tmp_llist); in cifs_unlock_range()
1741 stored_rc = cifs_lockv(xid, tcon, in cifs_unlock_range()
1742 cfile->fid.netfid, in cifs_unlock_range()
1743 li->type, num, 0, buf); in cifs_unlock_range()
1747 * request - add all locks from the tmp in cifs_unlock_range()
1751 &cfile->llist->locks); in cifs_unlock_range()
1755 * The unlock range request succeed - in cifs_unlock_range()
1765 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid, in cifs_unlock_range()
1769 &cfile->llist->locks); in cifs_unlock_range()
1776 up_write(&cinode->lock_sem); in cifs_unlock_range()
1789 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; in cifs_setlk()
1790 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); in cifs_setlk() local
1791 struct TCP_Server_Info *server = tcon->ses->server; in cifs_setlk()
1792 struct inode *inode = d_inode(cfile->dentry); in cifs_setlk()
1802 if (type & server->vals->shared_lock_type) in cifs_setlk()
1810 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid, in cifs_setlk()
1811 hash_lockowner(flock->fl_owner), in cifs_setlk()
1812 flock->fl_start, length, in cifs_setlk()
1820 lock = cifs_lock_init(flock->fl_start, length, type, in cifs_setlk()
1821 flock->fl_flags); in cifs_setlk()
1823 return -ENOMEM; in cifs_setlk()
1835 * if we set a byte-range lock on a file - break it explicitly in cifs_setlk()
1837 * read won't conflict with non-overlapted locks due to in cifs_setlk()
1845 CIFS_I(inode)->oplock = 0; in cifs_setlk()
1848 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, in cifs_setlk()
1857 rc = server->ops->mand_unlock_range(cfile, flock, xid); in cifs_setlk()
1860 if ((flock->fl_flags & FL_POSIX) || (flock->fl_flags & FL_FLOCK)) { in cifs_setlk()
1869 if (!(flock->fl_flags & FL_CLOSE)) in cifs_setlk()
1884 struct cifs_tcon *tcon; in cifs_flock() local
1890 if (!(fl->fl_flags & FL_FLOCK)) { in cifs_flock()
1891 rc = -ENOLCK; in cifs_flock()
1896 cfile = (struct cifsFileInfo *)file->private_data; in cifs_flock()
1897 tcon = tlink_tcon(cfile->tlink); in cifs_flock()
1900 tcon->ses->server); in cifs_flock()
1903 if (cap_unix(tcon->ses) && in cifs_flock()
1904 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && in cifs_flock()
1905 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) in cifs_flock()
1913 rc = -EOPNOTSUPP; in cifs_flock()
1933 struct cifs_tcon *tcon; in cifs_lock() local
1937 rc = -EACCES; in cifs_lock()
1941 flock->fl_flags, flock->fl_type, (long long)flock->fl_start, in cifs_lock()
1942 (long long)flock->fl_end); in cifs_lock()
1944 cfile = (struct cifsFileInfo *)file->private_data; in cifs_lock()
1945 tcon = tlink_tcon(cfile->tlink); in cifs_lock()
1948 tcon->ses->server); in cifs_lock()
1950 set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags); in cifs_lock()
1952 if (cap_unix(tcon->ses) && in cifs_lock()
1953 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && in cifs_lock()
1954 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) in cifs_lock()
1972 return -EOPNOTSUPP; in cifs_lock()
1983 * the inode->i_lock held
1991 if (end_of_write > cifsi->server_eof) in cifs_update_eof()
1992 cifsi->server_eof = end_of_write; in cifs_update_eof()
2002 struct cifs_tcon *tcon; in cifs_write() local
2005 struct dentry *dentry = open_file->dentry; in cifs_write()
2012 tcon = tlink_tcon(open_file->tlink); in cifs_write()
2013 server = tcon->ses->server; in cifs_write()
2015 if (!server->ops->sync_write) in cifs_write()
2016 return -ENOSYS; in cifs_write()
2022 rc = -EAGAIN; in cifs_write()
2023 while (rc == -EAGAIN) { in cifs_write()
2027 if (open_file->invalidHandle) { in cifs_write()
2037 len = min(server->ops->wp_retry_size(d_inode(dentry)), in cifs_write()
2038 (unsigned int)write_size - total_written); in cifs_write()
2043 io_parms.tcon = tcon; in cifs_write()
2046 rc = server->ops->sync_write(xid, &open_file->fid, in cifs_write()
2057 spin_lock(&d_inode(dentry)->i_lock); in cifs_write()
2059 spin_unlock(&d_inode(dentry)->i_lock); in cifs_write()
2064 cifs_stats_bytes_written(tcon, total_written); in cifs_write()
2067 spin_lock(&d_inode(dentry)->i_lock); in cifs_write()
2068 if (*offset > d_inode(dentry)->i_size) { in cifs_write()
2070 d_inode(dentry)->i_blocks = (512 - 1 + *offset) >> 9; in cifs_write()
2072 spin_unlock(&d_inode(dentry)->i_lock); in cifs_write()
2083 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb); in find_readable_file()
2086 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) in find_readable_file()
2089 spin_lock(&cifs_inode->open_file_lock); in find_readable_file()
2090 /* we could simply get the first_list_entry since write-only entries in find_readable_file()
2093 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { in find_readable_file()
2094 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid())) in find_readable_file()
2096 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) { in find_readable_file()
2097 if ((!open_file->invalidHandle)) { in find_readable_file()
2101 spin_unlock(&cifs_inode->open_file_lock); in find_readable_file()
2109 spin_unlock(&cifs_inode->open_file_lock); in find_readable_file()
2113 /* Return -EBADF if no handle is found and general rc otherwise */
2121 int rc = -EBADF; in cifs_get_writable_file()
2128 * Having a null inode here (because mapping->host was set to zero by in cifs_get_writable_file()
2139 cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb); in cifs_get_writable_file()
2142 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) in cifs_get_writable_file()
2145 spin_lock(&cifs_inode->open_file_lock); in cifs_get_writable_file()
2148 spin_unlock(&cifs_inode->open_file_lock); in cifs_get_writable_file()
2151 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { in cifs_get_writable_file()
2152 if (!any_available && open_file->pid != current->tgid) in cifs_get_writable_file()
2154 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid())) in cifs_get_writable_file()
2156 if (with_delete && !(open_file->fid.access & DELETE)) in cifs_get_writable_file()
2158 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { in cifs_get_writable_file()
2159 if (!open_file->invalidHandle) { in cifs_get_writable_file()
2162 spin_unlock(&cifs_inode->open_file_lock); in cifs_get_writable_file()
2182 spin_unlock(&cifs_inode->open_file_lock); in cifs_get_writable_file()
2191 spin_lock(&cifs_inode->open_file_lock); in cifs_get_writable_file()
2192 list_move_tail(&inv_file->flist, &cifs_inode->openFileList); in cifs_get_writable_file()
2193 spin_unlock(&cifs_inode->open_file_lock); in cifs_get_writable_file()
2197 spin_lock(&cifs_inode->open_file_lock); in cifs_get_writable_file()
2218 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name, in cifs_get_writable_path() argument
2227 spin_lock(&tcon->open_file_lock); in cifs_get_writable_path()
2228 list_for_each_entry(cfile, &tcon->openFileList, tlist) { in cifs_get_writable_path()
2230 const char *full_path = build_path_from_dentry(cfile->dentry, page); in cifs_get_writable_path()
2232 spin_unlock(&tcon->open_file_lock); in cifs_get_writable_path()
2239 cinode = CIFS_I(d_inode(cfile->dentry)); in cifs_get_writable_path()
2240 spin_unlock(&tcon->open_file_lock); in cifs_get_writable_path()
2245 spin_unlock(&tcon->open_file_lock); in cifs_get_writable_path()
2247 return -ENOENT; in cifs_get_writable_path()
2251 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name, in cifs_get_readable_path() argument
2259 spin_lock(&tcon->open_file_lock); in cifs_get_readable_path()
2260 list_for_each_entry(cfile, &tcon->openFileList, tlist) { in cifs_get_readable_path()
2262 const char *full_path = build_path_from_dentry(cfile->dentry, page); in cifs_get_readable_path()
2264 spin_unlock(&tcon->open_file_lock); in cifs_get_readable_path()
2271 cinode = CIFS_I(d_inode(cfile->dentry)); in cifs_get_readable_path()
2272 spin_unlock(&tcon->open_file_lock); in cifs_get_readable_path()
2275 return *ret_file ? 0 : -ENOENT; in cifs_get_readable_path()
2278 spin_unlock(&tcon->open_file_lock); in cifs_get_readable_path()
2280 return -ENOENT; in cifs_get_readable_path()
2289 if (wdata->mr) { in cifs_writedata_release()
2290 smbd_deregister_mr(wdata->mr); in cifs_writedata_release()
2291 wdata->mr = NULL; in cifs_writedata_release()
2295 if (wdata->cfile) in cifs_writedata_release()
2296 cifsFileInfo_put(wdata->cfile); in cifs_writedata_release()
2298 kvfree(wdata->pages); in cifs_writedata_release()
2304 * possible that the page was redirtied so re-clean the page.
2310 struct inode *inode = d_inode(wdata->cfile->dentry); in cifs_writev_requeue()
2314 server = tlink_tcon(wdata->cfile->tlink)->ses->server; in cifs_writev_requeue()
2316 rest_len = wdata->bytes; in cifs_writev_requeue()
2321 wsize = server->ops->wp_retry_size(inode); in cifs_writev_requeue()
2325 rc = -EOPNOTSUPP; in cifs_writev_requeue()
2333 tailsz = rest_len - (nr_pages - 1) * PAGE_SIZE; in cifs_writev_requeue()
2338 rc = -ENOMEM; in cifs_writev_requeue()
2343 wdata2->pages[j] = wdata->pages[i + j]; in cifs_writev_requeue()
2344 lock_page(wdata2->pages[j]); in cifs_writev_requeue()
2345 clear_page_dirty_for_io(wdata2->pages[j]); in cifs_writev_requeue()
2348 wdata2->sync_mode = wdata->sync_mode; in cifs_writev_requeue()
2349 wdata2->nr_pages = nr_pages; in cifs_writev_requeue()
2350 wdata2->offset = page_offset(wdata2->pages[0]); in cifs_writev_requeue()
2351 wdata2->pagesz = PAGE_SIZE; in cifs_writev_requeue()
2352 wdata2->tailsz = tailsz; in cifs_writev_requeue()
2353 wdata2->bytes = cur_len; in cifs_writev_requeue()
2356 &wdata2->cfile); in cifs_writev_requeue()
2357 if (!wdata2->cfile) { in cifs_writev_requeue()
2361 rc = -EBADF; in cifs_writev_requeue()
2363 wdata2->pid = wdata2->cfile->pid; in cifs_writev_requeue()
2364 rc = server->ops->async_writev(wdata2, in cifs_writev_requeue()
2369 unlock_page(wdata2->pages[j]); in cifs_writev_requeue()
2371 SetPageError(wdata2->pages[j]); in cifs_writev_requeue()
2372 end_page_writeback(wdata2->pages[j]); in cifs_writev_requeue()
2373 put_page(wdata2->pages[j]); in cifs_writev_requeue()
2377 kref_put(&wdata2->refcount, cifs_writedata_release); in cifs_writev_requeue()
2385 rest_len -= cur_len; in cifs_writev_requeue()
2387 } while (i < wdata->nr_pages); in cifs_writev_requeue()
2390 for (; i < wdata->nr_pages; i++) { in cifs_writev_requeue()
2391 SetPageError(wdata->pages[i]); in cifs_writev_requeue()
2392 end_page_writeback(wdata->pages[i]); in cifs_writev_requeue()
2393 put_page(wdata->pages[i]); in cifs_writev_requeue()
2397 mapping_set_error(inode->i_mapping, rc); in cifs_writev_requeue()
2398 kref_put(&wdata->refcount, cifs_writedata_release); in cifs_writev_requeue()
2406 struct inode *inode = d_inode(wdata->cfile->dentry); in cifs_writev_complete()
2409 if (wdata->result == 0) { in cifs_writev_complete()
2410 spin_lock(&inode->i_lock); in cifs_writev_complete()
2411 cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes); in cifs_writev_complete()
2412 spin_unlock(&inode->i_lock); in cifs_writev_complete()
2413 cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink), in cifs_writev_complete()
2414 wdata->bytes); in cifs_writev_complete()
2415 } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN) in cifs_writev_complete()
2418 for (i = 0; i < wdata->nr_pages; i++) { in cifs_writev_complete()
2419 struct page *page = wdata->pages[i]; in cifs_writev_complete()
2421 if (wdata->result == -EAGAIN) in cifs_writev_complete()
2423 else if (wdata->result < 0) in cifs_writev_complete()
2429 if (wdata->result != -EAGAIN) in cifs_writev_complete()
2430 mapping_set_error(inode->i_mapping, wdata->result); in cifs_writev_complete()
2431 kref_put(&wdata->refcount, cifs_writedata_release); in cifs_writev_complete()
2456 wdata->pages = pages; in cifs_writedata_direct_alloc()
2457 kref_init(&wdata->refcount); in cifs_writedata_direct_alloc()
2458 INIT_LIST_HEAD(&wdata->list); in cifs_writedata_direct_alloc()
2459 init_completion(&wdata->done); in cifs_writedata_direct_alloc()
2460 INIT_WORK(&wdata->work, complete); in cifs_writedata_direct_alloc()
2468 struct address_space *mapping = page->mapping; in cifs_partialpagewrite()
2469 loff_t offset = (loff_t)page->index << PAGE_SHIFT; in cifs_partialpagewrite()
2471 int rc = -EFAULT; in cifs_partialpagewrite()
2476 if (!mapping || !mapping->host) in cifs_partialpagewrite()
2477 return -EFAULT; in cifs_partialpagewrite()
2479 inode = page->mapping->host; in cifs_partialpagewrite()
2487 return -EIO; in cifs_partialpagewrite()
2491 if (offset > mapping->host->i_size) { in cifs_partialpagewrite()
2497 if (mapping->host->i_size - offset < (loff_t)to) in cifs_partialpagewrite()
2498 to = (unsigned)(mapping->host->i_size - offset); in cifs_partialpagewrite()
2500 rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY, in cifs_partialpagewrite()
2503 bytes_written = cifs_write(open_file, open_file->pid, in cifs_partialpagewrite()
2504 write_data, to - from, &offset); in cifs_partialpagewrite()
2507 inode->i_atime = inode->i_mtime = current_time(inode); in cifs_partialpagewrite()
2513 rc = -EFAULT; in cifs_partialpagewrite()
2517 rc = -EIO; in cifs_partialpagewrite()
2537 PAGECACHE_TAG_DIRTY, tofind, wdata->pages); in wdata_alloc_and_fillpages()
2551 page = wdata->pages[i]; in wdata_prepare_pages()
2555 * (changing page->mapping to NULL), or even swizzled in wdata_prepare_pages()
2564 if (unlikely(page->mapping != mapping)) { in wdata_prepare_pages()
2569 if (!wbc->range_cyclic && page->index > end) { in wdata_prepare_pages()
2575 if (*next && (page->index != *next)) { in wdata_prepare_pages()
2581 if (wbc->sync_mode != WB_SYNC_NONE) in wdata_prepare_pages()
2595 if (page_offset(page) >= i_size_read(mapping->host)) { in wdata_prepare_pages()
2602 wdata->pages[i] = page; in wdata_prepare_pages()
2603 *next = page->index + 1; in wdata_prepare_pages()
2609 *index = wdata->pages[0]->index + 1; in wdata_prepare_pages()
2613 put_page(wdata->pages[i]); in wdata_prepare_pages()
2614 wdata->pages[i] = NULL; in wdata_prepare_pages()
2626 wdata->sync_mode = wbc->sync_mode; in wdata_send_pages()
2627 wdata->nr_pages = nr_pages; in wdata_send_pages()
2628 wdata->offset = page_offset(wdata->pages[0]); in wdata_send_pages()
2629 wdata->pagesz = PAGE_SIZE; in wdata_send_pages()
2630 wdata->tailsz = min(i_size_read(mapping->host) - in wdata_send_pages()
2631 page_offset(wdata->pages[nr_pages - 1]), in wdata_send_pages()
2633 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz; in wdata_send_pages()
2634 wdata->pid = wdata->cfile->pid; in wdata_send_pages()
2636 rc = adjust_credits(wdata->server, &wdata->credits, wdata->bytes); in wdata_send_pages()
2640 if (wdata->cfile->invalidHandle) in wdata_send_pages()
2641 rc = -EAGAIN; in wdata_send_pages()
2643 rc = wdata->server->ops->async_writev(wdata, in wdata_send_pages()
2652 struct inode *inode = mapping->host; in cifs_writepages()
2653 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); in cifs_writepages()
2667 if (cifs_sb->ctx->wsize < PAGE_SIZE) in cifs_writepages()
2671 if (wbc->range_cyclic) { in cifs_writepages()
2672 index = mapping->writeback_index; /* Start from prev offset */ in cifs_writepages()
2673 end = -1; in cifs_writepages()
2675 index = wbc->range_start >> PAGE_SHIFT; in cifs_writepages()
2676 end = wbc->range_end >> PAGE_SHIFT; in cifs_writepages()
2677 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) in cifs_writepages()
2681 server = cifs_pick_channel(cifs_sb_master_tcon(cifs_sb)->ses); in cifs_writepages()
2700 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize, in cifs_writepages()
2707 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1; in cifs_writepages()
2712 rc = -ENOMEM; in cifs_writepages()
2719 kref_put(&wdata->refcount, cifs_writedata_release); in cifs_writepages()
2729 kref_put(&wdata->refcount, cifs_writedata_release); in cifs_writepages()
2734 wdata->credits = credits_on_stack; in cifs_writepages()
2735 wdata->cfile = cfile; in cifs_writepages()
2736 wdata->server = server; in cifs_writepages()
2739 if (!wdata->cfile) { in cifs_writepages()
2745 rc = -EBADF; in cifs_writepages()
2750 unlock_page(wdata->pages[i]); in cifs_writepages()
2752 /* send failure -- clean up the mess */ in cifs_writepages()
2754 add_credits_and_wake_if(server, &wdata->credits, 0); in cifs_writepages()
2758 wdata->pages[i]); in cifs_writepages()
2760 SetPageError(wdata->pages[i]); in cifs_writepages()
2761 end_page_writeback(wdata->pages[i]); in cifs_writepages()
2762 put_page(wdata->pages[i]); in cifs_writepages()
2767 kref_put(&wdata->refcount, cifs_writedata_release); in cifs_writepages()
2769 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) { in cifs_writepages()
2783 wbc->nr_to_write -= nr_pages; in cifs_writepages()
2784 if (wbc->nr_to_write <= 0) in cifs_writepages()
2803 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) in cifs_writepages()
2804 mapping->writeback_index = index; in cifs_writepages()
2810 set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags); in cifs_writepages()
2824 cifs_dbg(FYI, "ppw - page not up to date\n"); in cifs_writepage_locked()
2830 * or re-dirty the page with "redirty_page_for_writepage()" in in cifs_writepage_locked()
2833 * Just unlocking the page will cause the radix tree tag-bits in cifs_writepage_locked()
2840 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) in cifs_writepage_locked()
2845 mapping_set_error(page->mapping, rc); in cifs_writepage_locked()
2867 struct inode *inode = mapping->host; in cifs_write_end()
2868 struct cifsFileInfo *cfile = file->private_data; in cifs_write_end()
2869 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); in cifs_write_end()
2872 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) in cifs_write_end()
2873 pid = cfile->pid; in cifs_write_end()
2875 pid = current->tgid; in cifs_write_end()
2889 unsigned offset = pos & (PAGE_SIZE - 1); in cifs_write_end()
2911 spin_lock(&inode->i_lock); in cifs_write_end()
2912 if (pos > inode->i_size) { in cifs_write_end()
2914 inode->i_blocks = (512 - 1 + pos) >> 9; in cifs_write_end()
2916 spin_unlock(&inode->i_lock); in cifs_write_end()
2922 set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags); in cifs_write_end()
2932 struct cifs_tcon *tcon; in cifs_strict_fsync() local
2934 struct cifsFileInfo *smbfile = file->private_data; in cifs_strict_fsync()
2936 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); in cifs_strict_fsync()
2940 trace_cifs_fsync_err(inode->i_ino, rc); in cifs_strict_fsync()
2946 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n", in cifs_strict_fsync()
2957 tcon = tlink_tcon(smbfile->tlink); in cifs_strict_fsync()
2958 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) { in cifs_strict_fsync()
2959 server = tcon->ses->server; in cifs_strict_fsync()
2960 if (server->ops->flush == NULL) { in cifs_strict_fsync()
2961 rc = -ENOSYS; in cifs_strict_fsync()
2965 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) { in cifs_strict_fsync()
2968 rc = server->ops->flush(xid, tcon, &smbfile->fid); in cifs_strict_fsync()
2973 rc = server->ops->flush(xid, tcon, &smbfile->fid); in cifs_strict_fsync()
2985 struct cifs_tcon *tcon; in cifs_fsync() local
2987 struct cifsFileInfo *smbfile = file->private_data; in cifs_fsync()
2993 trace_cifs_fsync_err(file_inode(file)->i_ino, rc); in cifs_fsync()
2999 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n", in cifs_fsync()
3002 tcon = tlink_tcon(smbfile->tlink); in cifs_fsync()
3003 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) { in cifs_fsync()
3004 server = tcon->ses->server; in cifs_fsync()
3005 if (server->ops->flush == NULL) { in cifs_fsync()
3006 rc = -ENOSYS; in cifs_fsync()
3010 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) { in cifs_fsync()
3013 rc = server->ops->flush(xid, tcon, &smbfile->fid); in cifs_fsync()
3018 rc = server->ops->flush(xid, tcon, &smbfile->fid); in cifs_fsync()
3035 if (file->f_mode & FMODE_WRITE) in cifs_flush()
3036 rc = filemap_write_and_wait(inode->i_mapping); in cifs_flush()
3041 rc = filemap_check_wb_err(file->f_mapping, 0); in cifs_flush()
3042 trace_cifs_flush_err(inode->i_ino, rc); in cifs_flush()
3061 rc = -ENOMEM; in cifs_write_allocate_pages()
3095 kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release); in cifs_uncached_writedata_release()
3096 for (i = 0; i < wdata->nr_pages; i++) in cifs_uncached_writedata_release()
3097 put_page(wdata->pages[i]); in cifs_uncached_writedata_release()
3108 struct inode *inode = d_inode(wdata->cfile->dentry); in cifs_uncached_writev_complete()
3111 spin_lock(&inode->i_lock); in cifs_uncached_writev_complete()
3112 cifs_update_eof(cifsi, wdata->offset, wdata->bytes); in cifs_uncached_writev_complete()
3113 if (cifsi->server_eof > inode->i_size) in cifs_uncached_writev_complete()
3114 i_size_write(inode, cifsi->server_eof); in cifs_uncached_writev_complete()
3115 spin_unlock(&inode->i_lock); in cifs_uncached_writev_complete()
3117 complete(&wdata->done); in cifs_uncached_writev_complete()
3118 collect_uncached_write_data(wdata->ctx); in cifs_uncached_writev_complete()
3120 kref_put(&wdata->refcount, cifs_uncached_writedata_release); in cifs_uncached_writev_complete()
3133 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from); in wdata_fill_from_iovec()
3134 cur_len -= copied; in wdata_fill_from_iovec()
3139 * loop, we'll likely end up getting a zero-length in wdata_fill_from_iovec()
3145 cur_len = save_len - cur_len; in wdata_fill_from_iovec()
3151 * the address in the iovec was bogus. Return -EFAULT and let in wdata_fill_from_iovec()
3155 return -EFAULT; in wdata_fill_from_iovec()
3172 struct TCP_Server_Info *server = wdata->server; in cifs_resend_wdata()
3175 if (wdata->cfile->invalidHandle) { in cifs_resend_wdata()
3176 rc = cifs_reopen_file(wdata->cfile, false); in cifs_resend_wdata()
3177 if (rc == -EAGAIN) in cifs_resend_wdata()
3190 rc = server->ops->wait_mtu_credits(server, wdata->bytes, in cifs_resend_wdata()
3195 if (wsize < wdata->bytes) { in cifs_resend_wdata()
3199 } while (wsize < wdata->bytes); in cifs_resend_wdata()
3200 wdata->credits = credits; in cifs_resend_wdata()
3202 rc = adjust_credits(server, &wdata->credits, wdata->bytes); in cifs_resend_wdata()
3205 if (wdata->cfile->invalidHandle) in cifs_resend_wdata()
3206 rc = -EAGAIN; in cifs_resend_wdata()
3209 if (wdata->mr) { in cifs_resend_wdata()
3210 wdata->mr->need_invalidate = true; in cifs_resend_wdata()
3211 smbd_deregister_mr(wdata->mr); in cifs_resend_wdata()
3212 wdata->mr = NULL; in cifs_resend_wdata()
3215 rc = server->ops->async_writev(wdata, in cifs_resend_wdata()
3222 list_add_tail(&wdata->list, wdata_list); in cifs_resend_wdata()
3227 add_credits_and_wake_if(server, &wdata->credits, 0); in cifs_resend_wdata()
3228 } while (rc == -EAGAIN); in cifs_resend_wdata()
3231 kref_put(&wdata->refcount, cifs_uncached_writedata_release); in cifs_resend_wdata()
3253 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) in cifs_write_from_iter()
3254 pid = open_file->pid; in cifs_write_from_iter()
3256 pid = current->tgid; in cifs_write_from_iter()
3258 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses); in cifs_write_from_iter()
3266 if (open_file->invalidHandle) { in cifs_write_from_iter()
3268 if (rc == -EAGAIN) in cifs_write_from_iter()
3274 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize, in cifs_write_from_iter()
3281 if (ctx->direct_io) { in cifs_write_from_iter()
3290 from->iov_offset, from->count); in cifs_write_from_iter()
3300 (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE; in cifs_write_from_iter()
3305 rc = -ENOMEM; in cifs_write_from_iter()
3314 wdata->page_offset = start; in cifs_write_from_iter()
3315 wdata->tailsz = in cifs_write_from_iter()
3317 cur_len - (PAGE_SIZE - start) - in cifs_write_from_iter()
3318 (nr_pages - 2) * PAGE_SIZE : in cifs_write_from_iter()
3325 rc = -ENOMEM; in cifs_write_from_iter()
3330 rc = cifs_write_allocate_pages(wdata->pages, nr_pages); in cifs_write_from_iter()
3332 kvfree(wdata->pages); in cifs_write_from_iter()
3343 put_page(wdata->pages[i]); in cifs_write_from_iter()
3344 kvfree(wdata->pages); in cifs_write_from_iter()
3354 for ( ; nr_pages > num_pages; nr_pages--) in cifs_write_from_iter()
3355 put_page(wdata->pages[nr_pages - 1]); in cifs_write_from_iter()
3357 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE); in cifs_write_from_iter()
3360 wdata->sync_mode = WB_SYNC_ALL; in cifs_write_from_iter()
3361 wdata->nr_pages = nr_pages; in cifs_write_from_iter()
3362 wdata->offset = (__u64)offset; in cifs_write_from_iter()
3363 wdata->cfile = cifsFileInfo_get(open_file); in cifs_write_from_iter()
3364 wdata->server = server; in cifs_write_from_iter()
3365 wdata->pid = pid; in cifs_write_from_iter()
3366 wdata->bytes = cur_len; in cifs_write_from_iter()
3367 wdata->pagesz = PAGE_SIZE; in cifs_write_from_iter()
3368 wdata->credits = credits_on_stack; in cifs_write_from_iter()
3369 wdata->ctx = ctx; in cifs_write_from_iter()
3370 kref_get(&ctx->refcount); in cifs_write_from_iter()
3372 rc = adjust_credits(server, &wdata->credits, wdata->bytes); in cifs_write_from_iter()
3375 if (wdata->cfile->invalidHandle) in cifs_write_from_iter()
3376 rc = -EAGAIN; in cifs_write_from_iter()
3378 rc = server->ops->async_writev(wdata, in cifs_write_from_iter()
3383 add_credits_and_wake_if(server, &wdata->credits, 0); in cifs_write_from_iter()
3384 kref_put(&wdata->refcount, in cifs_write_from_iter()
3386 if (rc == -EAGAIN) { in cifs_write_from_iter()
3388 iov_iter_advance(from, offset - saved_offset); in cifs_write_from_iter()
3394 list_add_tail(&wdata->list, wdata_list); in cifs_write_from_iter()
3396 len -= cur_len; in cifs_write_from_iter()
3406 struct cifs_tcon *tcon; in collect_uncached_write_data() local
3408 struct dentry *dentry = ctx->cfile->dentry; in collect_uncached_write_data()
3411 tcon = tlink_tcon(ctx->cfile->tlink); in collect_uncached_write_data()
3412 cifs_sb = CIFS_SB(dentry->d_sb); in collect_uncached_write_data()
3414 mutex_lock(&ctx->aio_mutex); in collect_uncached_write_data()
3416 if (list_empty(&ctx->list)) { in collect_uncached_write_data()
3417 mutex_unlock(&ctx->aio_mutex); in collect_uncached_write_data()
3421 rc = ctx->rc; in collect_uncached_write_data()
3428 list_for_each_entry_safe(wdata, tmp, &ctx->list, list) { in collect_uncached_write_data()
3430 if (!try_wait_for_completion(&wdata->done)) { in collect_uncached_write_data()
3431 mutex_unlock(&ctx->aio_mutex); in collect_uncached_write_data()
3435 if (wdata->result) in collect_uncached_write_data()
3436 rc = wdata->result; in collect_uncached_write_data()
3438 ctx->total_len += wdata->bytes; in collect_uncached_write_data()
3441 if (rc == -EAGAIN) { in collect_uncached_write_data()
3443 struct iov_iter tmp_from = ctx->iter; in collect_uncached_write_data()
3446 list_del_init(&wdata->list); in collect_uncached_write_data()
3448 if (ctx->direct_io) in collect_uncached_write_data()
3453 wdata->offset - ctx->pos); in collect_uncached_write_data()
3455 rc = cifs_write_from_iter(wdata->offset, in collect_uncached_write_data()
3456 wdata->bytes, &tmp_from, in collect_uncached_write_data()
3457 ctx->cfile, cifs_sb, &tmp_list, in collect_uncached_write_data()
3460 kref_put(&wdata->refcount, in collect_uncached_write_data()
3464 list_splice(&tmp_list, &ctx->list); in collect_uncached_write_data()
3468 list_del_init(&wdata->list); in collect_uncached_write_data()
3469 kref_put(&wdata->refcount, cifs_uncached_writedata_release); in collect_uncached_write_data()
3472 cifs_stats_bytes_written(tcon, ctx->total_len); in collect_uncached_write_data()
3473 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags); in collect_uncached_write_data()
3475 ctx->rc = (rc == 0) ? ctx->total_len : rc; in collect_uncached_write_data()
3477 mutex_unlock(&ctx->aio_mutex); in collect_uncached_write_data()
3479 if (ctx->iocb && ctx->iocb->ki_complete) in collect_uncached_write_data()
3480 ctx->iocb->ki_complete(ctx->iocb, ctx->rc); in collect_uncached_write_data()
3482 complete(&ctx->done); in collect_uncached_write_data()
3488 struct file *file = iocb->ki_filp; in __cifs_writev()
3491 struct cifs_tcon *tcon; in __cifs_writev() local
3500 * In this case, fall back to non-direct write function. in __cifs_writev()
3504 cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n"); in __cifs_writev()
3513 cfile = file->private_data; in __cifs_writev()
3514 tcon = tlink_tcon(cfile->tlink); in __cifs_writev()
3516 if (!tcon->ses->server->ops->async_writev) in __cifs_writev()
3517 return -ENOSYS; in __cifs_writev()
3521 return -ENOMEM; in __cifs_writev()
3523 ctx->cfile = cifsFileInfo_get(cfile); in __cifs_writev()
3526 ctx->iocb = iocb; in __cifs_writev()
3528 ctx->pos = iocb->ki_pos; in __cifs_writev()
3531 ctx->direct_io = true; in __cifs_writev()
3532 ctx->iter = *from; in __cifs_writev()
3533 ctx->len = len; in __cifs_writev()
3537 kref_put(&ctx->refcount, cifs_aio_ctx_release); in __cifs_writev()
3543 mutex_lock(&ctx->aio_mutex); in __cifs_writev()
3545 rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from, in __cifs_writev()
3546 cfile, cifs_sb, &ctx->list, ctx); in __cifs_writev()
3554 if (!list_empty(&ctx->list)) in __cifs_writev()
3557 mutex_unlock(&ctx->aio_mutex); in __cifs_writev()
3560 kref_put(&ctx->refcount, cifs_aio_ctx_release); in __cifs_writev()
3565 kref_put(&ctx->refcount, cifs_aio_ctx_release); in __cifs_writev()
3566 return -EIOCBQUEUED; in __cifs_writev()
3569 rc = wait_for_completion_killable(&ctx->done); in __cifs_writev()
3571 mutex_lock(&ctx->aio_mutex); in __cifs_writev()
3572 ctx->rc = rc = -EINTR; in __cifs_writev()
3573 total_written = ctx->total_len; in __cifs_writev()
3574 mutex_unlock(&ctx->aio_mutex); in __cifs_writev()
3576 rc = ctx->rc; in __cifs_writev()
3577 total_written = ctx->total_len; in __cifs_writev()
3580 kref_put(&ctx->refcount, cifs_aio_ctx_release); in __cifs_writev()
3585 iocb->ki_pos += total_written; in __cifs_writev()
3591 struct file *file = iocb->ki_filp; in cifs_direct_writev()
3593 cifs_revalidate_mapping(file->f_inode); in cifs_direct_writev()
3605 struct file *file = iocb->ki_filp; in cifs_writev()
3606 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; in cifs_writev()
3607 struct inode *inode = file->f_mapping->host; in cifs_writev()
3609 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; in cifs_writev()
3617 down_read(&cinode->lock_sem); in cifs_writev()
3623 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from), in cifs_writev()
3624 server->vals->exclusive_lock_type, 0, in cifs_writev()
3628 rc = -EACCES; in cifs_writev()
3630 up_read(&cinode->lock_sem); in cifs_writev()
3641 struct inode *inode = file_inode(iocb->ki_filp); in cifs_strict_writev()
3643 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); in cifs_strict_writev()
3645 iocb->ki_filp->private_data; in cifs_strict_writev()
3646 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); in cifs_strict_writev() local
3654 if (cap_unix(tcon->ses) && in cifs_strict_writev()
3655 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) in cifs_strict_writev()
3656 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) { in cifs_strict_writev()
3664 * For non-oplocked files in strict cache mode we need to write the data in cifs_strict_writev()
3665 * to the server exactly from the pos to pos+len-1 rather than flush all in cifs_strict_writev()
3667 * these pages but not on the region from pos to ppos+len-1. in cifs_strict_writev()
3681 cinode->oplock = 0; in cifs_strict_writev()
3695 rdata->pages = pages; in cifs_readdata_direct_alloc()
3696 kref_init(&rdata->refcount); in cifs_readdata_direct_alloc()
3697 INIT_LIST_HEAD(&rdata->list); in cifs_readdata_direct_alloc()
3698 init_completion(&rdata->done); in cifs_readdata_direct_alloc()
3699 INIT_WORK(&rdata->work, complete); in cifs_readdata_direct_alloc()
3727 if (rdata->mr) { in cifs_readdata_release()
3728 smbd_deregister_mr(rdata->mr); in cifs_readdata_release()
3729 rdata->mr = NULL; in cifs_readdata_release()
3732 if (rdata->cfile) in cifs_readdata_release()
3733 cifsFileInfo_put(rdata->cfile); in cifs_readdata_release()
3735 kvfree(rdata->pages); in cifs_readdata_release()
3749 rc = -ENOMEM; in cifs_read_allocate_pages()
3752 rdata->pages[i] = page; in cifs_read_allocate_pages()
3759 put_page(rdata->pages[i]); in cifs_read_allocate_pages()
3760 rdata->pages[i] = NULL; in cifs_read_allocate_pages()
3773 kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release); in cifs_uncached_readdata_release()
3774 for (i = 0; i < rdata->nr_pages; i++) { in cifs_uncached_readdata_release()
3775 put_page(rdata->pages[i]); in cifs_uncached_readdata_release()
3781 * cifs_readdata_to_iov - copy data from pages in response to an iovec
3792 size_t remaining = rdata->got_bytes; in cifs_readdata_to_iov()
3795 for (i = 0; i < rdata->nr_pages; i++) { in cifs_readdata_to_iov()
3796 struct page *page = rdata->pages[i]; in cifs_readdata_to_iov()
3807 remaining -= written; in cifs_readdata_to_iov()
3811 return remaining ? -EFAULT : 0; in cifs_readdata_to_iov()
3822 complete(&rdata->done); in cifs_uncached_readv_complete()
3823 collect_uncached_read_data(rdata->ctx); in cifs_uncached_readv_complete()
3825 kref_put(&rdata->refcount, cifs_uncached_readdata_release); in cifs_uncached_readv_complete()
3835 unsigned int nr_pages = rdata->nr_pages; in uncached_fill_pages()
3836 unsigned int page_offset = rdata->page_offset; in uncached_fill_pages()
3838 rdata->got_bytes = 0; in uncached_fill_pages()
3839 rdata->tailsz = PAGE_SIZE; in uncached_fill_pages()
3841 struct page *page = rdata->pages[i]; in uncached_fill_pages()
3843 unsigned int segment_size = rdata->pagesz; in uncached_fill_pages()
3846 segment_size -= page_offset; in uncached_fill_pages()
3853 rdata->pages[i] = NULL; in uncached_fill_pages()
3854 rdata->nr_pages--; in uncached_fill_pages()
3864 rdata->tailsz = len; in uncached_fill_pages()
3865 len -= n; in uncached_fill_pages()
3871 else if (rdata->mr) in uncached_fill_pages()
3880 rdata->got_bytes += result; in uncached_fill_pages()
3883 return rdata->got_bytes > 0 && result != -ECONNABORTED ? in uncached_fill_pages()
3884 rdata->got_bytes : result; in uncached_fill_pages()
3899 return uncached_fill_pages(server, rdata, iter, iter->count); in cifs_uncached_copy_into_pages()
3911 /* XXX: should we pick a new channel here? */ in cifs_resend_rdata()
3912 server = rdata->server; in cifs_resend_rdata()
3915 if (rdata->cfile->invalidHandle) { in cifs_resend_rdata()
3916 rc = cifs_reopen_file(rdata->cfile, true); in cifs_resend_rdata()
3917 if (rc == -EAGAIN) in cifs_resend_rdata()
3929 rc = server->ops->wait_mtu_credits(server, rdata->bytes, in cifs_resend_rdata()
3935 if (rsize < rdata->bytes) { in cifs_resend_rdata()
3939 } while (rsize < rdata->bytes); in cifs_resend_rdata()
3940 rdata->credits = credits; in cifs_resend_rdata()
3942 rc = adjust_credits(server, &rdata->credits, rdata->bytes); in cifs_resend_rdata()
3944 if (rdata->cfile->invalidHandle) in cifs_resend_rdata()
3945 rc = -EAGAIN; in cifs_resend_rdata()
3948 if (rdata->mr) { in cifs_resend_rdata()
3949 rdata->mr->need_invalidate = true; in cifs_resend_rdata()
3950 smbd_deregister_mr(rdata->mr); in cifs_resend_rdata()
3951 rdata->mr = NULL; in cifs_resend_rdata()
3954 rc = server->ops->async_readv(rdata); in cifs_resend_rdata()
3961 list_add_tail(&rdata->list, rdata_list); in cifs_resend_rdata()
3966 add_credits_and_wake_if(server, &rdata->credits, 0); in cifs_resend_rdata()
3967 } while (rc == -EAGAIN); in cifs_resend_rdata()
3970 kref_put(&rdata->refcount, cifs_uncached_readdata_release); in cifs_resend_rdata()
3989 struct iov_iter direct_iov = ctx->iter; in cifs_send_async_read()
3991 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses); in cifs_send_async_read()
3993 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) in cifs_send_async_read()
3994 pid = open_file->pid; in cifs_send_async_read()
3996 pid = current->tgid; in cifs_send_async_read()
3998 if (ctx->direct_io) in cifs_send_async_read()
3999 iov_iter_advance(&direct_iov, offset - ctx->pos); in cifs_send_async_read()
4002 if (open_file->invalidHandle) { in cifs_send_async_read()
4004 if (rc == -EAGAIN) in cifs_send_async_read()
4010 if (cifs_sb->ctx->rsize == 0) in cifs_send_async_read()
4011 cifs_sb->ctx->rsize = in cifs_send_async_read()
4012 server->ops->negotiate_rsize(tlink_tcon(open_file->tlink), in cifs_send_async_read()
4013 cifs_sb->ctx); in cifs_send_async_read()
4015 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, in cifs_send_async_read()
4022 if (ctx->direct_io) { in cifs_send_async_read()
4046 rc = -ENOMEM; in cifs_send_async_read()
4050 npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE; in cifs_send_async_read()
4051 rdata->page_offset = start; in cifs_send_async_read()
4052 rdata->tailsz = npages > 1 ? in cifs_send_async_read()
4053 cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE : in cifs_send_async_read()
4064 rc = -ENOMEM; in cifs_send_async_read()
4070 kvfree(rdata->pages); in cifs_send_async_read()
4076 rdata->tailsz = PAGE_SIZE; in cifs_send_async_read()
4079 rdata->server = server; in cifs_send_async_read()
4080 rdata->cfile = cifsFileInfo_get(open_file); in cifs_send_async_read()
4081 rdata->nr_pages = npages; in cifs_send_async_read()
4082 rdata->offset = offset; in cifs_send_async_read()
4083 rdata->bytes = cur_len; in cifs_send_async_read()
4084 rdata->pid = pid; in cifs_send_async_read()
4085 rdata->pagesz = PAGE_SIZE; in cifs_send_async_read()
4086 rdata->read_into_pages = cifs_uncached_read_into_pages; in cifs_send_async_read()
4087 rdata->copy_into_pages = cifs_uncached_copy_into_pages; in cifs_send_async_read()
4088 rdata->credits = credits_on_stack; in cifs_send_async_read()
4089 rdata->ctx = ctx; in cifs_send_async_read()
4090 kref_get(&ctx->refcount); in cifs_send_async_read()
4092 rc = adjust_credits(server, &rdata->credits, rdata->bytes); in cifs_send_async_read()
4095 if (rdata->cfile->invalidHandle) in cifs_send_async_read()
4096 rc = -EAGAIN; in cifs_send_async_read()
4098 rc = server->ops->async_readv(rdata); in cifs_send_async_read()
4102 add_credits_and_wake_if(server, &rdata->credits, 0); in cifs_send_async_read()
4103 kref_put(&rdata->refcount, in cifs_send_async_read()
4105 if (rc == -EAGAIN) { in cifs_send_async_read()
4112 list_add_tail(&rdata->list, rdata_list); in cifs_send_async_read()
4114 len -= cur_len; in cifs_send_async_read()
4124 struct iov_iter *to = &ctx->iter; in collect_uncached_read_data()
4128 cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb); in collect_uncached_read_data()
4130 mutex_lock(&ctx->aio_mutex); in collect_uncached_read_data()
4132 if (list_empty(&ctx->list)) { in collect_uncached_read_data()
4133 mutex_unlock(&ctx->aio_mutex); in collect_uncached_read_data()
4137 rc = ctx->rc; in collect_uncached_read_data()
4140 list_for_each_entry_safe(rdata, tmp, &ctx->list, list) { in collect_uncached_read_data()
4142 if (!try_wait_for_completion(&rdata->done)) { in collect_uncached_read_data()
4143 mutex_unlock(&ctx->aio_mutex); in collect_uncached_read_data()
4147 if (rdata->result == -EAGAIN) { in collect_uncached_read_data()
4150 unsigned int got_bytes = rdata->got_bytes; in collect_uncached_read_data()
4152 list_del_init(&rdata->list); in collect_uncached_read_data()
4157 * happened -- fill the buffer and continue in collect_uncached_read_data()
4160 if (got_bytes && got_bytes < rdata->bytes) { in collect_uncached_read_data()
4162 if (!ctx->direct_io) in collect_uncached_read_data()
4165 kref_put(&rdata->refcount, in collect_uncached_read_data()
4171 if (ctx->direct_io) { in collect_uncached_read_data()
4173 * Re-use rdata as this is a in collect_uncached_read_data()
4181 rdata->offset + got_bytes, in collect_uncached_read_data()
4182 rdata->bytes - got_bytes, in collect_uncached_read_data()
4183 rdata->cfile, cifs_sb, in collect_uncached_read_data()
4186 kref_put(&rdata->refcount, in collect_uncached_read_data()
4190 list_splice(&tmp_list, &ctx->list); in collect_uncached_read_data()
4193 } else if (rdata->result) in collect_uncached_read_data()
4194 rc = rdata->result; in collect_uncached_read_data()
4195 else if (!ctx->direct_io) in collect_uncached_read_data()
4198 /* if there was a short read -- discard anything left */ in collect_uncached_read_data()
4199 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes) in collect_uncached_read_data()
4200 rc = -ENODATA; in collect_uncached_read_data()
4202 ctx->total_len += rdata->got_bytes; in collect_uncached_read_data()
4204 list_del_init(&rdata->list); in collect_uncached_read_data()
4205 kref_put(&rdata->refcount, cifs_uncached_readdata_release); in collect_uncached_read_data()
4208 if (!ctx->direct_io) in collect_uncached_read_data()
4209 ctx->total_len = ctx->len - iov_iter_count(to); in collect_uncached_read_data()
4212 if (rc == -ENODATA) in collect_uncached_read_data()
4215 ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc; in collect_uncached_read_data()
4217 mutex_unlock(&ctx->aio_mutex); in collect_uncached_read_data()
4219 if (ctx->iocb && ctx->iocb->ki_complete) in collect_uncached_read_data()
4220 ctx->iocb->ki_complete(ctx->iocb, ctx->rc); in collect_uncached_read_data()
4222 complete(&ctx->done); in collect_uncached_read_data()
4229 struct file *file = iocb->ki_filp; in __cifs_readv()
4232 struct cifs_tcon *tcon; in __cifs_readv() local
4234 loff_t offset = iocb->ki_pos; in __cifs_readv()
4243 cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n"); in __cifs_readv()
4252 cfile = file->private_data; in __cifs_readv()
4253 tcon = tlink_tcon(cfile->tlink); in __cifs_readv()
4255 if (!tcon->ses->server->ops->async_readv) in __cifs_readv()
4256 return -ENOSYS; in __cifs_readv()
4258 if ((file->f_flags & O_ACCMODE) == O_WRONLY) in __cifs_readv()
4263 return -ENOMEM; in __cifs_readv()
4265 ctx->cfile = cifsFileInfo_get(cfile); in __cifs_readv()
4268 ctx->iocb = iocb; in __cifs_readv()
4271 ctx->should_dirty = true; in __cifs_readv()
4274 ctx->pos = offset; in __cifs_readv()
4275 ctx->direct_io = true; in __cifs_readv()
4276 ctx->iter = *to; in __cifs_readv()
4277 ctx->len = len; in __cifs_readv()
4281 kref_put(&ctx->refcount, cifs_aio_ctx_release); in __cifs_readv()
4284 len = ctx->len; in __cifs_readv()
4288 rc = filemap_write_and_wait_range(file->f_inode->i_mapping, in __cifs_readv()
4289 offset, offset + len - 1); in __cifs_readv()
4291 kref_put(&ctx->refcount, cifs_aio_ctx_release); in __cifs_readv()
4292 return -EAGAIN; in __cifs_readv()
4297 mutex_lock(&ctx->aio_mutex); in __cifs_readv()
4299 rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx); in __cifs_readv()
4302 if (!list_empty(&ctx->list)) in __cifs_readv()
4305 mutex_unlock(&ctx->aio_mutex); in __cifs_readv()
4308 kref_put(&ctx->refcount, cifs_aio_ctx_release); in __cifs_readv()
4313 kref_put(&ctx->refcount, cifs_aio_ctx_release); in __cifs_readv()
4314 return -EIOCBQUEUED; in __cifs_readv()
4317 rc = wait_for_completion_killable(&ctx->done); in __cifs_readv()
4319 mutex_lock(&ctx->aio_mutex); in __cifs_readv()
4320 ctx->rc = rc = -EINTR; in __cifs_readv()
4321 total_read = ctx->total_len; in __cifs_readv()
4322 mutex_unlock(&ctx->aio_mutex); in __cifs_readv()
4324 rc = ctx->rc; in __cifs_readv()
4325 total_read = ctx->total_len; in __cifs_readv()
4328 kref_put(&ctx->refcount, cifs_aio_ctx_release); in __cifs_readv()
4331 iocb->ki_pos += total_read; in __cifs_readv()
4350 struct inode *inode = file_inode(iocb->ki_filp); in cifs_strict_readv()
4352 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); in cifs_strict_readv()
4354 iocb->ki_filp->private_data; in cifs_strict_readv()
4355 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); in cifs_strict_readv() local
4356 int rc = -EACCES; in cifs_strict_readv()
4361 * change - so we can't make a decision about inode invalidating. in cifs_strict_readv()
4364 * pos+len-1. in cifs_strict_readv()
4369 if (cap_unix(tcon->ses) && in cifs_strict_readv()
4370 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && in cifs_strict_readv()
4371 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) in cifs_strict_readv()
4378 down_read(&cinode->lock_sem); in cifs_strict_readv()
4379 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to), in cifs_strict_readv()
4380 tcon->ses->server->vals->shared_lock_type, in cifs_strict_readv()
4383 up_read(&cinode->lock_sem); in cifs_strict_readv()
4390 int rc = -EACCES; in cifs_read()
4396 struct cifs_tcon *tcon; in cifs_read() local
4409 rsize = min_t(unsigned int, cifs_sb->ctx->rsize, CIFSMaxBufSize); in cifs_read()
4411 if (file->private_data == NULL) { in cifs_read()
4412 rc = -EBADF; in cifs_read()
4416 open_file = file->private_data; in cifs_read()
4417 tcon = tlink_tcon(open_file->tlink); in cifs_read()
4418 server = cifs_pick_channel(tcon->ses); in cifs_read()
4420 if (!server->ops->sync_read) { in cifs_read()
4422 return -ENOSYS; in cifs_read()
4425 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) in cifs_read()
4426 pid = open_file->pid; in cifs_read()
4428 pid = current->tgid; in cifs_read()
4430 if ((file->f_flags & O_ACCMODE) == O_WRONLY) in cifs_read()
4436 current_read_size = min_t(uint, read_size - total_read, in cifs_read()
4443 if (!(tcon->ses->capabilities & in cifs_read()
4444 tcon->ses->server->vals->cap_large_files)) { in cifs_read()
4448 if (open_file->invalidHandle) { in cifs_read()
4454 io_parms.tcon = tcon; in cifs_read()
4458 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms, in cifs_read()
4461 } while (rc == -EAGAIN); in cifs_read()
4471 cifs_stats_bytes_read(tcon, total_read); in cifs_read()
4486 struct page *page = vmf->page; in cifs_page_mkwrite()
4522 vma->vm_ops = &cifs_file_vm_ops; in cifs_file_strict_mmap()
4541 vma->vm_ops = &cifs_file_vm_ops; in cifs_file_mmap()
4554 got_bytes = rdata->got_bytes; in cifs_readv_complete()
4555 for (i = 0; i < rdata->nr_pages; i++) { in cifs_readv_complete()
4556 struct page *page = rdata->pages[i]; in cifs_readv_complete()
4558 if (rdata->result == 0 || in cifs_readv_complete()
4559 (rdata->result == -EAGAIN && got_bytes)) { in cifs_readv_complete()
4565 if (rdata->result == 0 || in cifs_readv_complete()
4566 (rdata->result == -EAGAIN && got_bytes)) in cifs_readv_complete()
4567 cifs_readpage_to_fscache(rdata->mapping->host, page); in cifs_readv_complete()
4571 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes); in cifs_readv_complete()
4574 rdata->pages[i] = NULL; in cifs_readv_complete()
4576 kref_put(&rdata->refcount, cifs_readdata_release); in cifs_readv_complete()
4588 unsigned int nr_pages = rdata->nr_pages; in readpages_fill_pages()
4589 unsigned int page_offset = rdata->page_offset; in readpages_fill_pages()
4592 eof = CIFS_I(rdata->mapping->host)->server_eof; in readpages_fill_pages()
4593 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0; in readpages_fill_pages()
4596 rdata->got_bytes = 0; in readpages_fill_pages()
4597 rdata->tailsz = PAGE_SIZE; in readpages_fill_pages()
4599 struct page *page = rdata->pages[i]; in readpages_fill_pages()
4600 unsigned int to_read = rdata->pagesz; in readpages_fill_pages()
4604 to_read -= page_offset; in readpages_fill_pages()
4611 len -= to_read; in readpages_fill_pages()
4614 zero_user(page, len + page_offset, to_read - len); in readpages_fill_pages()
4615 n = rdata->tailsz = len; in readpages_fill_pages()
4617 } else if (page->index > eof_index) { in readpages_fill_pages()
4631 rdata->pages[i] = NULL; in readpages_fill_pages()
4632 rdata->nr_pages--; in readpages_fill_pages()
4638 rdata->pages[i] = NULL; in readpages_fill_pages()
4639 rdata->nr_pages--; in readpages_fill_pages()
4647 else if (rdata->mr) in readpages_fill_pages()
4656 rdata->got_bytes += result; in readpages_fill_pages()
4659 return rdata->got_bytes > 0 && result != -ECONNABORTED ? in readpages_fill_pages()
4660 rdata->got_bytes : result; in readpages_fill_pages()
4675 return readpages_fill_pages(server, rdata, iter, iter->count); in cifs_readpages_copy_into_pages()
4681 struct cifsFileInfo *open_file = ractl->file->private_data; in cifs_readahead()
4682 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(ractl->file); in cifs_readahead()
4687 bool caching = fscache_cookie_enabled(cifs_inode_cookie(ractl->mapping->host)) && in cifs_readahead()
4688 cifs_inode_cookie(ractl->mapping->host)->cache_priv; in cifs_readahead()
4693 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) in cifs_readahead()
4694 pid = open_file->pid; in cifs_readahead()
4696 pid = current->tgid; in cifs_readahead()
4699 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses); in cifs_readahead()
4702 __func__, ractl->file, ractl->mapping, readahead_count(ractl)); in cifs_readahead()
4705 * Chop the readahead request up into rsize-sized read requests. in cifs_readahead()
4707 while ((nr_pages = readahead_count(ractl) - last_batch_size)) { in cifs_readahead()
4722 ractl->mapping->host, index, nr_pages, in cifs_readahead()
4737 if (cifs_readpage_from_fscache(ractl->mapping->host, in cifs_readahead()
4738 &folio->page) < 0) { in cifs_readahead()
4748 cache_nr_pages--; in cifs_readahead()
4755 if (open_file->invalidHandle) { in cifs_readahead()
4758 if (rc == -EAGAIN) in cifs_readahead()
4764 if (cifs_sb->ctx->rsize == 0) in cifs_readahead()
4765 cifs_sb->ctx->rsize = in cifs_readahead()
4766 server->ops->negotiate_rsize(tlink_tcon(open_file->tlink), in cifs_readahead()
4767 cifs_sb->ctx); in cifs_readahead()
4769 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, in cifs_readahead()
4774 nr_pages = min_t(size_t, nr_pages, next_cached - index); in cifs_readahead()
4794 got = __readahead_batch(ractl, rdata->pages, nr_pages); in cifs_readahead()
4801 rdata->nr_pages = nr_pages; in cifs_readahead()
4802 rdata->bytes = readahead_batch_length(ractl); in cifs_readahead()
4803 rdata->cfile = cifsFileInfo_get(open_file); in cifs_readahead()
4804 rdata->server = server; in cifs_readahead()
4805 rdata->mapping = ractl->mapping; in cifs_readahead()
4806 rdata->offset = readahead_pos(ractl); in cifs_readahead()
4807 rdata->pid = pid; in cifs_readahead()
4808 rdata->pagesz = PAGE_SIZE; in cifs_readahead()
4809 rdata->tailsz = PAGE_SIZE; in cifs_readahead()
4810 rdata->read_into_pages = cifs_readpages_read_into_pages; in cifs_readahead()
4811 rdata->copy_into_pages = cifs_readpages_copy_into_pages; in cifs_readahead()
4812 rdata->credits = credits_on_stack; in cifs_readahead()
4814 rc = adjust_credits(server, &rdata->credits, rdata->bytes); in cifs_readahead()
4816 if (rdata->cfile->invalidHandle) in cifs_readahead()
4817 rc = -EAGAIN; in cifs_readahead()
4819 rc = server->ops->async_readv(rdata); in cifs_readahead()
4823 add_credits_and_wake_if(server, &rdata->credits, 0); in cifs_readahead()
4824 for (i = 0; i < rdata->nr_pages; i++) { in cifs_readahead()
4825 page = rdata->pages[i]; in cifs_readahead()
4830 kref_put(&rdata->refcount, cifs_readdata_release); in cifs_readahead()
4834 kref_put(&rdata->refcount, cifs_readdata_release); in cifs_readahead()
4866 file_inode(file)->i_atime = current_time(file_inode(file)); in cifs_readpage_worker()
4867 if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime))) in cifs_readpage_worker()
4868 file_inode(file)->i_atime = file_inode(file)->i_mtime; in cifs_readpage_worker()
4870 file_inode(file)->i_atime = current_time(file_inode(file)); in cifs_readpage_worker()
4873 memset(read_data + rc, 0, PAGE_SIZE - rc); in cifs_readpage_worker()
4893 struct page *page = &folio->page; in cifs_read_folio()
4895 int rc = -EACCES; in cifs_read_folio()
4900 if (file->private_data == NULL) { in cifs_read_folio()
4901 rc = -EBADF; in cifs_read_folio()
4919 spin_lock(&cifs_inode->open_file_lock); in is_inode_writable()
4920 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { in is_inode_writable()
4921 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { in is_inode_writable()
4922 spin_unlock(&cifs_inode->open_file_lock); in is_inode_writable()
4926 spin_unlock(&cifs_inode->open_file_lock); in is_inode_writable()
4931 open for write - to avoid races with writepage extending
4932 the file - in the future we could consider allowing
4945 cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb); in is_size_safe_to_change()
4946 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { in is_size_safe_to_change()
4952 if (i_size_read(&cifsInode->netfs.inode) < end_of_file) in is_size_safe_to_change()
4966 loff_t offset = pos & (PAGE_SIZE - 1); in cifs_write_begin()
4977 rc = -ENOMEM; in cifs_write_begin()
4998 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) { in cifs_write_begin()
4999 i_size = i_size_read(mapping->host); in cifs_write_begin()
5016 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) { in cifs_write_begin()
5027 /* we could try using another file handle if there is one - in cifs_write_begin()
5046 fscache_note_page_release(cifs_inode_cookie(folio->mapping->host)); in cifs_release_folio()
5068 cifs_dbg(FYI, "Launder page: %lu\n", folio->index); in cifs_launder_folio()
5071 rc = cifs_writepage_locked(&folio->page, &wbc); in cifs_launder_folio()
5081 struct inode *inode = d_inode(cfile->dentry); in cifs_oplock_break()
5083 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); in cifs_oplock_break() local
5084 struct TCP_Server_Info *server = tcon->ses->server; in cifs_oplock_break()
5088 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, in cifs_oplock_break()
5091 server->ops->downgrade_oplock(server, cinode, cfile->oplock_level, in cifs_oplock_break()
5092 cfile->oplock_epoch, &purge_cache); in cifs_oplock_break()
5098 cinode->oplock = 0; in cifs_oplock_break()
5101 if (inode && S_ISREG(inode->i_mode)) { in cifs_oplock_break()
5106 rc = filemap_fdatawrite(inode->i_mapping); in cifs_oplock_break()
5108 rc = filemap_fdatawait(inode->i_mapping); in cifs_oplock_break()
5109 mapping_set_error(inode->i_mapping, rc); in cifs_oplock_break()
5128 if (!cfile->oplock_break_cancelled) { in cifs_oplock_break()
5129 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid, in cifs_oplock_break()
5142 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
5154 return -EINVAL; in cifs_direct_io()
5160 struct cifsFileInfo *cfile = swap_file->private_data; in cifs_swap_activate()
5161 struct inode *inode = swap_file->f_mapping->host; in cifs_swap_activate()
5167 if (!swap_file->f_mapping->a_ops->swap_rw) in cifs_swap_activate()
5169 return -EINVAL; in cifs_swap_activate()
5171 spin_lock(&inode->i_lock); in cifs_swap_activate()
5172 blocks = inode->i_blocks; in cifs_swap_activate()
5173 isize = inode->i_size; in cifs_swap_activate()
5174 spin_unlock(&inode->i_lock); in cifs_swap_activate()
5177 return -EINVAL; in cifs_swap_activate()
5179 *span = sis->pages; in cifs_swap_activate()
5192 cfile->swapfile = true; in cifs_swap_activate()
5199 sis->flags |= SWP_FS_OPS; in cifs_swap_activate()
5200 return add_swap_extent(sis, 0, sis->max, 0); in cifs_swap_activate()
5205 struct cifsFileInfo *cfile = file->private_data; in cifs_swap_deactivate()
5212 cfile->swapfile = false; in cifs_swap_deactivate()
5225 cifs_inode_cookie(mapping->host)); in cifs_dirty_folio()