1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 */
10
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/mount.h>
16 #include <linux/slab.h>
17 #include <linux/init.h>
18 #include <linux/list.h>
19 #include <linux/seq_file.h>
20 #include <linux/vfs.h>
21 #include <linux/mempool.h>
22 #include <linux/delay.h>
23 #include <linux/kthread.h>
24 #include <linux/freezer.h>
25 #include <linux/namei.h>
26 #include <linux/random.h>
27 #include <linux/uuid.h>
28 #include <linux/xattr.h>
29 #include <uapi/linux/magic.h>
30 #include <net/ipv6.h>
31 #include "cifsfs.h"
32 #include "cifspdu.h"
33 #define DECLARE_GLOBALS_HERE
34 #include "cifsglob.h"
35 #include "cifsproto.h"
36 #include "cifs_debug.h"
37 #include "cifs_fs_sb.h"
38 #include <linux/mm.h>
39 #include <linux/key-type.h>
40 #include "cifs_spnego.h"
41 #include "fscache.h"
42 #ifdef CONFIG_CIFS_DFS_UPCALL
43 #include "dfs_cache.h"
44 #endif
45 #ifdef CONFIG_CIFS_SWN_UPCALL
46 #include "netlink.h"
47 #endif
48 #include "fs_context.h"
49 #include "cached_dir.h"
50
51 /*
52 * DOS dates from 1980/1/1 through 2107/12/31
53 * Protocol specifications indicate the range should be to 119, which
54 * limits maximum year to 2099. But this range has not been checked.
55 */
56 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
57 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
58 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
59
60 int cifsFYI = 0;
61 bool traceSMB;
62 bool enable_oplocks = true;
63 bool linuxExtEnabled = true;
64 bool lookupCacheEnabled = true;
65 bool disable_legacy_dialects; /* false by default */
66 bool enable_gcm_256 = true;
67 bool require_gcm_256; /* false by default */
68 bool enable_negotiate_signing; /* false by default */
69 unsigned int global_secflags = CIFSSEC_DEF;
70 /* unsigned int ntlmv2_support = 0; */
71 unsigned int sign_CIFS_PDUs = 1;
72
73 /*
74 * Global transaction id (XID) information
75 */
76 unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
77 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
78 unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
79 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
80
81 /*
82 * Global counters, updated atomically
83 */
84 atomic_t sesInfoAllocCount;
85 atomic_t tconInfoAllocCount;
86 atomic_t tcpSesNextId;
87 atomic_t tcpSesAllocCount;
88 atomic_t tcpSesReconnectCount;
89 atomic_t tconInfoReconnectCount;
90
91 atomic_t mid_count;
92 atomic_t buf_alloc_count;
93 atomic_t small_buf_alloc_count;
94 #ifdef CONFIG_CIFS_STATS2
95 atomic_t total_buf_alloc_count;
96 atomic_t total_small_buf_alloc_count;
97 #endif/* STATS2 */
98 struct list_head cifs_tcp_ses_list;
99 spinlock_t cifs_tcp_ses_lock;
100 static const struct super_operations cifs_super_ops;
101 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
102 module_param(CIFSMaxBufSize, uint, 0444);
103 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
104 "for CIFS requests. "
105 "Default: 16384 Range: 8192 to 130048");
106 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
107 module_param(cifs_min_rcv, uint, 0444);
108 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
109 "1 to 64");
110 unsigned int cifs_min_small = 30;
111 module_param(cifs_min_small, uint, 0444);
112 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
113 "Range: 2 to 256");
114 unsigned int cifs_max_pending = CIFS_MAX_REQ;
115 module_param(cifs_max_pending, uint, 0444);
116 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
117 "CIFS/SMB1 dialect (N/A for SMB3) "
118 "Default: 32767 Range: 2 to 32767.");
119 #ifdef CONFIG_CIFS_STATS2
120 unsigned int slow_rsp_threshold = 1;
121 module_param(slow_rsp_threshold, uint, 0644);
122 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
123 "before logging that a response is delayed. "
124 "Default: 1 (if set to 0 disables msg).");
125 #endif /* STATS2 */
126
127 module_param(enable_oplocks, bool, 0644);
128 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
129
130 module_param(enable_gcm_256, bool, 0644);
131 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
132
133 module_param(require_gcm_256, bool, 0644);
134 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
135
136 module_param(enable_negotiate_signing, bool, 0644);
137 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
138
139 module_param(disable_legacy_dialects, bool, 0644);
140 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
141 "helpful to restrict the ability to "
142 "override the default dialects (SMB2.1, "
143 "SMB3 and SMB3.02) on mount with old "
144 "dialects (CIFS/SMB1 and SMB2) since "
145 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
146 " and less secure. Default: n/N/0");
147
148 extern mempool_t *cifs_sm_req_poolp;
149 extern mempool_t *cifs_req_poolp;
150 extern mempool_t *cifs_mid_poolp;
151
152 struct workqueue_struct *cifsiod_wq;
153 struct workqueue_struct *decrypt_wq;
154 struct workqueue_struct *fileinfo_put_wq;
155 struct workqueue_struct *cifsoplockd_wq;
156 struct workqueue_struct *deferredclose_wq;
157 __u32 cifs_lock_secret;
158
159 /*
160 * Bumps refcount for cifs super block.
161 * Note that it should be only called if a referece to VFS super block is
162 * already held, e.g. in open-type syscalls context. Otherwise it can race with
163 * atomic_dec_and_test in deactivate_locked_super.
164 */
165 void
cifs_sb_active(struct super_block * sb)166 cifs_sb_active(struct super_block *sb)
167 {
168 struct cifs_sb_info *server = CIFS_SB(sb);
169
170 if (atomic_inc_return(&server->active) == 1)
171 atomic_inc(&sb->s_active);
172 }
173
174 void
cifs_sb_deactive(struct super_block * sb)175 cifs_sb_deactive(struct super_block *sb)
176 {
177 struct cifs_sb_info *server = CIFS_SB(sb);
178
179 if (atomic_dec_and_test(&server->active))
180 deactivate_super(sb);
181 }
182
183 static int
cifs_read_super(struct super_block * sb)184 cifs_read_super(struct super_block *sb)
185 {
186 struct inode *inode;
187 struct cifs_sb_info *cifs_sb;
188 struct cifs_tcon *tcon;
189 struct timespec64 ts;
190 int rc = 0;
191
192 cifs_sb = CIFS_SB(sb);
193 tcon = cifs_sb_master_tcon(cifs_sb);
194
195 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
196 sb->s_flags |= SB_POSIXACL;
197
198 if (tcon->snapshot_time)
199 sb->s_flags |= SB_RDONLY;
200
201 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
202 sb->s_maxbytes = MAX_LFS_FILESIZE;
203 else
204 sb->s_maxbytes = MAX_NON_LFS;
205
206 /*
207 * Some very old servers like DOS and OS/2 used 2 second granularity
208 * (while all current servers use 100ns granularity - see MS-DTYP)
209 * but 1 second is the maximum allowed granularity for the VFS
210 * so for old servers set time granularity to 1 second while for
211 * everything else (current servers) set it to 100ns.
212 */
213 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
214 ((tcon->ses->capabilities &
215 tcon->ses->server->vals->cap_nt_find) == 0) &&
216 !tcon->unix_ext) {
217 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
218 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
219 sb->s_time_min = ts.tv_sec;
220 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
221 cpu_to_le16(SMB_TIME_MAX), 0);
222 sb->s_time_max = ts.tv_sec;
223 } else {
224 /*
225 * Almost every server, including all SMB2+, uses DCE TIME
226 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
227 */
228 sb->s_time_gran = 100;
229 ts = cifs_NTtimeToUnix(0);
230 sb->s_time_min = ts.tv_sec;
231 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
232 sb->s_time_max = ts.tv_sec;
233 }
234
235 sb->s_magic = CIFS_SUPER_MAGIC;
236 sb->s_op = &cifs_super_ops;
237 sb->s_xattr = cifs_xattr_handlers;
238 rc = super_setup_bdi(sb);
239 if (rc)
240 goto out_no_root;
241 /* tune readahead according to rsize if readahead size not set on mount */
242 if (cifs_sb->ctx->rsize == 0)
243 cifs_sb->ctx->rsize =
244 tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
245 if (cifs_sb->ctx->rasize)
246 sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
247 else
248 sb->s_bdi->ra_pages = cifs_sb->ctx->rsize / PAGE_SIZE;
249
250 sb->s_blocksize = CIFS_MAX_MSGSIZE;
251 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
252 inode = cifs_root_iget(sb);
253
254 if (IS_ERR(inode)) {
255 rc = PTR_ERR(inode);
256 goto out_no_root;
257 }
258
259 if (tcon->nocase)
260 sb->s_d_op = &cifs_ci_dentry_ops;
261 else
262 sb->s_d_op = &cifs_dentry_ops;
263
264 sb->s_root = d_make_root(inode);
265 if (!sb->s_root) {
266 rc = -ENOMEM;
267 goto out_no_root;
268 }
269
270 #ifdef CONFIG_CIFS_NFSD_EXPORT
271 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
272 cifs_dbg(FYI, "export ops supported\n");
273 sb->s_export_op = &cifs_export_ops;
274 }
275 #endif /* CONFIG_CIFS_NFSD_EXPORT */
276
277 return 0;
278
279 out_no_root:
280 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
281 return rc;
282 }
283
cifs_kill_sb(struct super_block * sb)284 static void cifs_kill_sb(struct super_block *sb)
285 {
286 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
287
288 /*
289 * We ned to release all dentries for the cached directories
290 * before we kill the sb.
291 */
292 if (cifs_sb->root) {
293 close_all_cached_dirs(cifs_sb);
294
295 /* finally release root dentry */
296 dput(cifs_sb->root);
297 cifs_sb->root = NULL;
298 }
299
300 kill_anon_super(sb);
301 cifs_umount(cifs_sb);
302 }
303
304 static int
cifs_statfs(struct dentry * dentry,struct kstatfs * buf)305 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
306 {
307 struct super_block *sb = dentry->d_sb;
308 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
309 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
310 struct TCP_Server_Info *server = tcon->ses->server;
311 unsigned int xid;
312 int rc = 0;
313
314 xid = get_xid();
315
316 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
317 buf->f_namelen =
318 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
319 else
320 buf->f_namelen = PATH_MAX;
321
322 buf->f_fsid.val[0] = tcon->vol_serial_number;
323 /* are using part of create time for more randomness, see man statfs */
324 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
325
326 buf->f_files = 0; /* undefined */
327 buf->f_ffree = 0; /* unlimited */
328
329 if (server->ops->queryfs)
330 rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
331
332 free_xid(xid);
333 return rc;
334 }
335
cifs_fallocate(struct file * file,int mode,loff_t off,loff_t len)336 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
337 {
338 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
339 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
340 struct TCP_Server_Info *server = tcon->ses->server;
341
342 if (server->ops->fallocate)
343 return server->ops->fallocate(file, tcon, mode, off, len);
344
345 return -EOPNOTSUPP;
346 }
347
cifs_permission(struct user_namespace * mnt_userns,struct inode * inode,int mask)348 static int cifs_permission(struct user_namespace *mnt_userns,
349 struct inode *inode, int mask)
350 {
351 struct cifs_sb_info *cifs_sb;
352
353 cifs_sb = CIFS_SB(inode->i_sb);
354
355 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
356 if ((mask & MAY_EXEC) && !execute_ok(inode))
357 return -EACCES;
358 else
359 return 0;
360 } else /* file mode might have been restricted at mount time
361 on the client (above and beyond ACL on servers) for
362 servers which do not support setting and viewing mode bits,
363 so allowing client to check permissions is useful */
364 return generic_permission(&init_user_ns, inode, mask);
365 }
366
367 static struct kmem_cache *cifs_inode_cachep;
368 static struct kmem_cache *cifs_req_cachep;
369 static struct kmem_cache *cifs_mid_cachep;
370 static struct kmem_cache *cifs_sm_req_cachep;
371 mempool_t *cifs_sm_req_poolp;
372 mempool_t *cifs_req_poolp;
373 mempool_t *cifs_mid_poolp;
374
375 static struct inode *
cifs_alloc_inode(struct super_block * sb)376 cifs_alloc_inode(struct super_block *sb)
377 {
378 struct cifsInodeInfo *cifs_inode;
379 cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
380 if (!cifs_inode)
381 return NULL;
382 cifs_inode->cifsAttrs = 0x20; /* default */
383 cifs_inode->time = 0;
384 /*
385 * Until the file is open and we have gotten oplock info back from the
386 * server, can not assume caching of file data or metadata.
387 */
388 cifs_set_oplock_level(cifs_inode, 0);
389 cifs_inode->flags = 0;
390 spin_lock_init(&cifs_inode->writers_lock);
391 cifs_inode->writers = 0;
392 cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
393 cifs_inode->server_eof = 0;
394 cifs_inode->uniqueid = 0;
395 cifs_inode->createtime = 0;
396 cifs_inode->epoch = 0;
397 spin_lock_init(&cifs_inode->open_file_lock);
398 generate_random_uuid(cifs_inode->lease_key);
399 cifs_inode->symlink_target = NULL;
400
401 /*
402 * Can not set i_flags here - they get immediately overwritten to zero
403 * by the VFS.
404 */
405 /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
406 INIT_LIST_HEAD(&cifs_inode->openFileList);
407 INIT_LIST_HEAD(&cifs_inode->llist);
408 INIT_LIST_HEAD(&cifs_inode->deferred_closes);
409 spin_lock_init(&cifs_inode->deferred_lock);
410 return &cifs_inode->netfs.inode;
411 }
412
413 static void
cifs_free_inode(struct inode * inode)414 cifs_free_inode(struct inode *inode)
415 {
416 struct cifsInodeInfo *cinode = CIFS_I(inode);
417
418 if (S_ISLNK(inode->i_mode))
419 kfree(cinode->symlink_target);
420 kmem_cache_free(cifs_inode_cachep, cinode);
421 }
422
423 static void
cifs_evict_inode(struct inode * inode)424 cifs_evict_inode(struct inode *inode)
425 {
426 truncate_inode_pages_final(&inode->i_data);
427 if (inode->i_state & I_PINNING_FSCACHE_WB)
428 cifs_fscache_unuse_inode_cookie(inode, true);
429 cifs_fscache_release_inode_cookie(inode);
430 clear_inode(inode);
431 }
432
433 static void
cifs_show_address(struct seq_file * s,struct TCP_Server_Info * server)434 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
435 {
436 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
437 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
438
439 seq_puts(s, ",addr=");
440
441 switch (server->dstaddr.ss_family) {
442 case AF_INET:
443 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
444 break;
445 case AF_INET6:
446 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
447 if (sa6->sin6_scope_id)
448 seq_printf(s, "%%%u", sa6->sin6_scope_id);
449 break;
450 default:
451 seq_puts(s, "(unknown)");
452 }
453 if (server->rdma)
454 seq_puts(s, ",rdma");
455 }
456
457 static void
cifs_show_security(struct seq_file * s,struct cifs_ses * ses)458 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
459 {
460 if (ses->sectype == Unspecified) {
461 if (ses->user_name == NULL)
462 seq_puts(s, ",sec=none");
463 return;
464 }
465
466 seq_puts(s, ",sec=");
467
468 switch (ses->sectype) {
469 case NTLMv2:
470 seq_puts(s, "ntlmv2");
471 break;
472 case Kerberos:
473 seq_puts(s, "krb5");
474 break;
475 case RawNTLMSSP:
476 seq_puts(s, "ntlmssp");
477 break;
478 default:
479 /* shouldn't ever happen */
480 seq_puts(s, "unknown");
481 break;
482 }
483
484 if (ses->sign)
485 seq_puts(s, "i");
486
487 if (ses->sectype == Kerberos)
488 seq_printf(s, ",cruid=%u",
489 from_kuid_munged(&init_user_ns, ses->cred_uid));
490 }
491
492 static void
cifs_show_cache_flavor(struct seq_file * s,struct cifs_sb_info * cifs_sb)493 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
494 {
495 seq_puts(s, ",cache=");
496
497 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
498 seq_puts(s, "strict");
499 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
500 seq_puts(s, "none");
501 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
502 seq_puts(s, "singleclient"); /* assume only one client access */
503 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
504 seq_puts(s, "ro"); /* read only caching assumed */
505 else
506 seq_puts(s, "loose");
507 }
508
509 /*
510 * cifs_show_devname() is used so we show the mount device name with correct
511 * format (e.g. forward slashes vs. back slashes) in /proc/mounts
512 */
cifs_show_devname(struct seq_file * m,struct dentry * root)513 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
514 {
515 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
516 char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
517
518 if (devname == NULL)
519 seq_puts(m, "none");
520 else {
521 convert_delimiter(devname, '/');
522 /* escape all spaces in share names */
523 seq_escape(m, devname, " \t");
524 kfree(devname);
525 }
526 return 0;
527 }
528
529 /*
530 * cifs_show_options() is for displaying mount options in /proc/mounts.
531 * Not all settable options are displayed but most of the important
532 * ones are.
533 */
534 static int
cifs_show_options(struct seq_file * s,struct dentry * root)535 cifs_show_options(struct seq_file *s, struct dentry *root)
536 {
537 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
538 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
539 struct sockaddr *srcaddr;
540 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
541
542 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
543 cifs_show_security(s, tcon->ses);
544 cifs_show_cache_flavor(s, cifs_sb);
545
546 if (tcon->no_lease)
547 seq_puts(s, ",nolease");
548 if (cifs_sb->ctx->multiuser)
549 seq_puts(s, ",multiuser");
550 else if (tcon->ses->user_name)
551 seq_show_option(s, "username", tcon->ses->user_name);
552
553 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
554 seq_show_option(s, "domain", tcon->ses->domainName);
555
556 if (srcaddr->sa_family != AF_UNSPEC) {
557 struct sockaddr_in *saddr4;
558 struct sockaddr_in6 *saddr6;
559 saddr4 = (struct sockaddr_in *)srcaddr;
560 saddr6 = (struct sockaddr_in6 *)srcaddr;
561 if (srcaddr->sa_family == AF_INET6)
562 seq_printf(s, ",srcaddr=%pI6c",
563 &saddr6->sin6_addr);
564 else if (srcaddr->sa_family == AF_INET)
565 seq_printf(s, ",srcaddr=%pI4",
566 &saddr4->sin_addr.s_addr);
567 else
568 seq_printf(s, ",srcaddr=BAD-AF:%i",
569 (int)(srcaddr->sa_family));
570 }
571
572 seq_printf(s, ",uid=%u",
573 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
574 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
575 seq_puts(s, ",forceuid");
576 else
577 seq_puts(s, ",noforceuid");
578
579 seq_printf(s, ",gid=%u",
580 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
581 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
582 seq_puts(s, ",forcegid");
583 else
584 seq_puts(s, ",noforcegid");
585
586 cifs_show_address(s, tcon->ses->server);
587
588 if (!tcon->unix_ext)
589 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
590 cifs_sb->ctx->file_mode,
591 cifs_sb->ctx->dir_mode);
592 if (cifs_sb->ctx->iocharset)
593 seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
594 if (tcon->seal)
595 seq_puts(s, ",seal");
596 else if (tcon->ses->server->ignore_signature)
597 seq_puts(s, ",signloosely");
598 if (tcon->nocase)
599 seq_puts(s, ",nocase");
600 if (tcon->nodelete)
601 seq_puts(s, ",nodelete");
602 if (cifs_sb->ctx->no_sparse)
603 seq_puts(s, ",nosparse");
604 if (tcon->local_lease)
605 seq_puts(s, ",locallease");
606 if (tcon->retry)
607 seq_puts(s, ",hard");
608 else
609 seq_puts(s, ",soft");
610 if (tcon->use_persistent)
611 seq_puts(s, ",persistenthandles");
612 else if (tcon->use_resilient)
613 seq_puts(s, ",resilienthandles");
614 if (tcon->posix_extensions)
615 seq_puts(s, ",posix");
616 else if (tcon->unix_ext)
617 seq_puts(s, ",unix");
618 else
619 seq_puts(s, ",nounix");
620 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
621 seq_puts(s, ",nodfs");
622 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
623 seq_puts(s, ",posixpaths");
624 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
625 seq_puts(s, ",setuids");
626 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
627 seq_puts(s, ",idsfromsid");
628 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
629 seq_puts(s, ",serverino");
630 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
631 seq_puts(s, ",rwpidforward");
632 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
633 seq_puts(s, ",forcemand");
634 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
635 seq_puts(s, ",nouser_xattr");
636 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
637 seq_puts(s, ",mapchars");
638 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
639 seq_puts(s, ",mapposix");
640 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
641 seq_puts(s, ",sfu");
642 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
643 seq_puts(s, ",nobrl");
644 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
645 seq_puts(s, ",nohandlecache");
646 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
647 seq_puts(s, ",modefromsid");
648 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
649 seq_puts(s, ",cifsacl");
650 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
651 seq_puts(s, ",dynperm");
652 if (root->d_sb->s_flags & SB_POSIXACL)
653 seq_puts(s, ",acl");
654 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
655 seq_puts(s, ",mfsymlinks");
656 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
657 seq_puts(s, ",fsc");
658 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
659 seq_puts(s, ",nostrictsync");
660 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
661 seq_puts(s, ",noperm");
662 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
663 seq_printf(s, ",backupuid=%u",
664 from_kuid_munged(&init_user_ns,
665 cifs_sb->ctx->backupuid));
666 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
667 seq_printf(s, ",backupgid=%u",
668 from_kgid_munged(&init_user_ns,
669 cifs_sb->ctx->backupgid));
670
671 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
672 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
673 seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
674 if (cifs_sb->ctx->rasize)
675 seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
676 if (tcon->ses->server->min_offload)
677 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
678 seq_printf(s, ",echo_interval=%lu",
679 tcon->ses->server->echo_interval / HZ);
680
681 /* Only display max_credits if it was overridden on mount */
682 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
683 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
684
685 if (tcon->snapshot_time)
686 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
687 if (tcon->handle_timeout)
688 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
689
690 /*
691 * Display file and directory attribute timeout in seconds.
692 * If file and directory attribute timeout the same then actimeo
693 * was likely specified on mount
694 */
695 if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
696 seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
697 else {
698 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
699 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
700 }
701 seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
702
703 if (tcon->ses->chan_max > 1)
704 seq_printf(s, ",multichannel,max_channels=%zu",
705 tcon->ses->chan_max);
706
707 if (tcon->use_witness)
708 seq_puts(s, ",witness");
709
710 return 0;
711 }
712
cifs_umount_begin(struct super_block * sb)713 static void cifs_umount_begin(struct super_block *sb)
714 {
715 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
716 struct cifs_tcon *tcon;
717
718 if (cifs_sb == NULL)
719 return;
720
721 tcon = cifs_sb_master_tcon(cifs_sb);
722
723 spin_lock(&cifs_tcp_ses_lock);
724 spin_lock(&tcon->tc_lock);
725 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
726 /* we have other mounts to same share or we have
727 already tried to force umount this and woken up
728 all waiting network requests, nothing to do */
729 spin_unlock(&tcon->tc_lock);
730 spin_unlock(&cifs_tcp_ses_lock);
731 return;
732 } else if (tcon->tc_count == 1)
733 tcon->status = TID_EXITING;
734 spin_unlock(&tcon->tc_lock);
735 spin_unlock(&cifs_tcp_ses_lock);
736
737 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
738 /* cancel_notify_requests(tcon); */
739 if (tcon->ses && tcon->ses->server) {
740 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
741 wake_up_all(&tcon->ses->server->request_q);
742 wake_up_all(&tcon->ses->server->response_q);
743 msleep(1); /* yield */
744 /* we have to kick the requests once more */
745 wake_up_all(&tcon->ses->server->response_q);
746 msleep(1);
747 }
748
749 return;
750 }
751
752 #ifdef CONFIG_CIFS_STATS2
cifs_show_stats(struct seq_file * s,struct dentry * root)753 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
754 {
755 /* BB FIXME */
756 return 0;
757 }
758 #endif
759
cifs_write_inode(struct inode * inode,struct writeback_control * wbc)760 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
761 {
762 fscache_unpin_writeback(wbc, cifs_inode_cookie(inode));
763 return 0;
764 }
765
cifs_drop_inode(struct inode * inode)766 static int cifs_drop_inode(struct inode *inode)
767 {
768 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
769
770 /* no serverino => unconditional eviction */
771 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
772 generic_drop_inode(inode);
773 }
774
775 static const struct super_operations cifs_super_ops = {
776 .statfs = cifs_statfs,
777 .alloc_inode = cifs_alloc_inode,
778 .write_inode = cifs_write_inode,
779 .free_inode = cifs_free_inode,
780 .drop_inode = cifs_drop_inode,
781 .evict_inode = cifs_evict_inode,
782 /* .show_path = cifs_show_path, */ /* Would we ever need show path? */
783 .show_devname = cifs_show_devname,
784 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
785 function unless later we add lazy close of inodes or unless the
786 kernel forgets to call us with the same number of releases (closes)
787 as opens */
788 .show_options = cifs_show_options,
789 .umount_begin = cifs_umount_begin,
790 #ifdef CONFIG_CIFS_STATS2
791 .show_stats = cifs_show_stats,
792 #endif
793 };
794
795 /*
796 * Get root dentry from superblock according to prefix path mount option.
797 * Return dentry with refcount + 1 on success and NULL otherwise.
798 */
799 static struct dentry *
cifs_get_root(struct smb3_fs_context * ctx,struct super_block * sb)800 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
801 {
802 struct dentry *dentry;
803 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
804 char *full_path = NULL;
805 char *s, *p;
806 char sep;
807
808 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
809 return dget(sb->s_root);
810
811 full_path = cifs_build_path_to_root(ctx, cifs_sb,
812 cifs_sb_master_tcon(cifs_sb), 0);
813 if (full_path == NULL)
814 return ERR_PTR(-ENOMEM);
815
816 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
817
818 sep = CIFS_DIR_SEP(cifs_sb);
819 dentry = dget(sb->s_root);
820 s = full_path;
821
822 do {
823 struct inode *dir = d_inode(dentry);
824 struct dentry *child;
825
826 if (!S_ISDIR(dir->i_mode)) {
827 dput(dentry);
828 dentry = ERR_PTR(-ENOTDIR);
829 break;
830 }
831
832 /* skip separators */
833 while (*s == sep)
834 s++;
835 if (!*s)
836 break;
837 p = s++;
838 /* next separator */
839 while (*s && *s != sep)
840 s++;
841
842 child = lookup_positive_unlocked(p, dentry, s - p);
843 dput(dentry);
844 dentry = child;
845 } while (!IS_ERR(dentry));
846 kfree(full_path);
847 return dentry;
848 }
849
cifs_set_super(struct super_block * sb,void * data)850 static int cifs_set_super(struct super_block *sb, void *data)
851 {
852 struct cifs_mnt_data *mnt_data = data;
853 sb->s_fs_info = mnt_data->cifs_sb;
854 return set_anon_super(sb, NULL);
855 }
856
857 struct dentry *
cifs_smb3_do_mount(struct file_system_type * fs_type,int flags,struct smb3_fs_context * old_ctx)858 cifs_smb3_do_mount(struct file_system_type *fs_type,
859 int flags, struct smb3_fs_context *old_ctx)
860 {
861 int rc;
862 struct super_block *sb = NULL;
863 struct cifs_sb_info *cifs_sb = NULL;
864 struct cifs_mnt_data mnt_data;
865 struct dentry *root;
866
867 /*
868 * Prints in Kernel / CIFS log the attempted mount operation
869 * If CIFS_DEBUG && cifs_FYI
870 */
871 if (cifsFYI)
872 cifs_dbg(FYI, "Devname: %s flags: %d\n", old_ctx->UNC, flags);
873 else
874 cifs_info("Attempting to mount %s\n", old_ctx->UNC);
875
876 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
877 if (cifs_sb == NULL) {
878 root = ERR_PTR(-ENOMEM);
879 goto out;
880 }
881
882 cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
883 if (!cifs_sb->ctx) {
884 root = ERR_PTR(-ENOMEM);
885 goto out;
886 }
887 rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
888 if (rc) {
889 root = ERR_PTR(rc);
890 goto out;
891 }
892
893 rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, NULL);
894 if (rc) {
895 root = ERR_PTR(rc);
896 goto out;
897 }
898
899 rc = cifs_setup_cifs_sb(cifs_sb);
900 if (rc) {
901 root = ERR_PTR(rc);
902 goto out;
903 }
904
905 rc = cifs_mount(cifs_sb, cifs_sb->ctx);
906 if (rc) {
907 if (!(flags & SB_SILENT))
908 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
909 rc);
910 root = ERR_PTR(rc);
911 goto out;
912 }
913
914 mnt_data.ctx = cifs_sb->ctx;
915 mnt_data.cifs_sb = cifs_sb;
916 mnt_data.flags = flags;
917
918 /* BB should we make this contingent on mount parm? */
919 flags |= SB_NODIRATIME | SB_NOATIME;
920
921 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
922 if (IS_ERR(sb)) {
923 root = ERR_CAST(sb);
924 cifs_umount(cifs_sb);
925 cifs_sb = NULL;
926 goto out;
927 }
928
929 if (sb->s_root) {
930 cifs_dbg(FYI, "Use existing superblock\n");
931 cifs_umount(cifs_sb);
932 cifs_sb = NULL;
933 } else {
934 rc = cifs_read_super(sb);
935 if (rc) {
936 root = ERR_PTR(rc);
937 goto out_super;
938 }
939
940 sb->s_flags |= SB_ACTIVE;
941 }
942
943 root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
944 if (IS_ERR(root))
945 goto out_super;
946
947 if (cifs_sb)
948 cifs_sb->root = dget(root);
949
950 cifs_dbg(FYI, "dentry root is: %p\n", root);
951 return root;
952
953 out_super:
954 deactivate_locked_super(sb);
955 return root;
956 out:
957 if (cifs_sb) {
958 if (!sb || IS_ERR(sb)) { /* otherwise kill_sb will handle */
959 kfree(cifs_sb->prepath);
960 smb3_cleanup_fs_context(cifs_sb->ctx);
961 kfree(cifs_sb);
962 }
963 }
964 return root;
965 }
966
967
968 static ssize_t
cifs_loose_read_iter(struct kiocb * iocb,struct iov_iter * iter)969 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
970 {
971 ssize_t rc;
972 struct inode *inode = file_inode(iocb->ki_filp);
973
974 if (iocb->ki_flags & IOCB_DIRECT)
975 return cifs_user_readv(iocb, iter);
976
977 rc = cifs_revalidate_mapping(inode);
978 if (rc)
979 return rc;
980
981 return generic_file_read_iter(iocb, iter);
982 }
983
cifs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)984 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
985 {
986 struct inode *inode = file_inode(iocb->ki_filp);
987 struct cifsInodeInfo *cinode = CIFS_I(inode);
988 ssize_t written;
989 int rc;
990
991 if (iocb->ki_filp->f_flags & O_DIRECT) {
992 written = cifs_user_writev(iocb, from);
993 if (written > 0 && CIFS_CACHE_READ(cinode)) {
994 cifs_zap_mapping(inode);
995 cifs_dbg(FYI,
996 "Set no oplock for inode=%p after a write operation\n",
997 inode);
998 cinode->oplock = 0;
999 }
1000 return written;
1001 }
1002
1003 written = cifs_get_writer(cinode);
1004 if (written)
1005 return written;
1006
1007 written = generic_file_write_iter(iocb, from);
1008
1009 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
1010 goto out;
1011
1012 rc = filemap_fdatawrite(inode->i_mapping);
1013 if (rc)
1014 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
1015 rc, inode);
1016
1017 out:
1018 cifs_put_writer(cinode);
1019 return written;
1020 }
1021
cifs_llseek(struct file * file,loff_t offset,int whence)1022 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1023 {
1024 struct cifsFileInfo *cfile = file->private_data;
1025 struct cifs_tcon *tcon;
1026
1027 /*
1028 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1029 * the cached file length
1030 */
1031 if (whence != SEEK_SET && whence != SEEK_CUR) {
1032 int rc;
1033 struct inode *inode = file_inode(file);
1034
1035 /*
1036 * We need to be sure that all dirty pages are written and the
1037 * server has the newest file length.
1038 */
1039 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1040 inode->i_mapping->nrpages != 0) {
1041 rc = filemap_fdatawait(inode->i_mapping);
1042 if (rc) {
1043 mapping_set_error(inode->i_mapping, rc);
1044 return rc;
1045 }
1046 }
1047 /*
1048 * Some applications poll for the file length in this strange
1049 * way so we must seek to end on non-oplocked files by
1050 * setting the revalidate time to zero.
1051 */
1052 CIFS_I(inode)->time = 0;
1053
1054 rc = cifs_revalidate_file_attr(file);
1055 if (rc < 0)
1056 return (loff_t)rc;
1057 }
1058 if (cfile && cfile->tlink) {
1059 tcon = tlink_tcon(cfile->tlink);
1060 if (tcon->ses->server->ops->llseek)
1061 return tcon->ses->server->ops->llseek(file, tcon,
1062 offset, whence);
1063 }
1064 return generic_file_llseek(file, offset, whence);
1065 }
1066
1067 static int
cifs_setlease(struct file * file,long arg,struct file_lock ** lease,void ** priv)1068 cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
1069 {
1070 /*
1071 * Note that this is called by vfs setlease with i_lock held to
1072 * protect *lease from going away.
1073 */
1074 struct inode *inode = file_inode(file);
1075 struct cifsFileInfo *cfile = file->private_data;
1076
1077 if (!(S_ISREG(inode->i_mode)))
1078 return -EINVAL;
1079
1080 /* Check if file is oplocked if this is request for new lease */
1081 if (arg == F_UNLCK ||
1082 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1083 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1084 return generic_setlease(file, arg, lease, priv);
1085 else if (tlink_tcon(cfile->tlink)->local_lease &&
1086 !CIFS_CACHE_READ(CIFS_I(inode)))
1087 /*
1088 * If the server claims to support oplock on this file, then we
1089 * still need to check oplock even if the local_lease mount
1090 * option is set, but there are servers which do not support
1091 * oplock for which this mount option may be useful if the user
1092 * knows that the file won't be changed on the server by anyone
1093 * else.
1094 */
1095 return generic_setlease(file, arg, lease, priv);
1096 else
1097 return -EAGAIN;
1098 }
1099
1100 struct file_system_type cifs_fs_type = {
1101 .owner = THIS_MODULE,
1102 .name = "cifs",
1103 .init_fs_context = smb3_init_fs_context,
1104 .parameters = smb3_fs_parameters,
1105 .kill_sb = cifs_kill_sb,
1106 .fs_flags = FS_RENAME_DOES_D_MOVE,
1107 };
1108 MODULE_ALIAS_FS("cifs");
1109
1110 struct file_system_type smb3_fs_type = {
1111 .owner = THIS_MODULE,
1112 .name = "smb3",
1113 .init_fs_context = smb3_init_fs_context,
1114 .parameters = smb3_fs_parameters,
1115 .kill_sb = cifs_kill_sb,
1116 .fs_flags = FS_RENAME_DOES_D_MOVE,
1117 };
1118 MODULE_ALIAS_FS("smb3");
1119 MODULE_ALIAS("smb3");
1120
1121 const struct inode_operations cifs_dir_inode_ops = {
1122 .create = cifs_create,
1123 .atomic_open = cifs_atomic_open,
1124 .lookup = cifs_lookup,
1125 .getattr = cifs_getattr,
1126 .unlink = cifs_unlink,
1127 .link = cifs_hardlink,
1128 .mkdir = cifs_mkdir,
1129 .rmdir = cifs_rmdir,
1130 .rename = cifs_rename2,
1131 .permission = cifs_permission,
1132 .setattr = cifs_setattr,
1133 .symlink = cifs_symlink,
1134 .mknod = cifs_mknod,
1135 .listxattr = cifs_listxattr,
1136 };
1137
1138 const struct inode_operations cifs_file_inode_ops = {
1139 .setattr = cifs_setattr,
1140 .getattr = cifs_getattr,
1141 .permission = cifs_permission,
1142 .listxattr = cifs_listxattr,
1143 .fiemap = cifs_fiemap,
1144 };
1145
cifs_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)1146 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1147 struct delayed_call *done)
1148 {
1149 char *target_path;
1150
1151 target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1152 if (!target_path)
1153 return ERR_PTR(-ENOMEM);
1154
1155 spin_lock(&inode->i_lock);
1156 if (likely(CIFS_I(inode)->symlink_target)) {
1157 strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1158 } else {
1159 kfree(target_path);
1160 target_path = ERR_PTR(-EOPNOTSUPP);
1161 }
1162 spin_unlock(&inode->i_lock);
1163
1164 if (!IS_ERR(target_path))
1165 set_delayed_call(done, kfree_link, target_path);
1166
1167 return target_path;
1168 }
1169
1170 const struct inode_operations cifs_symlink_inode_ops = {
1171 .get_link = cifs_get_link,
1172 .permission = cifs_permission,
1173 .listxattr = cifs_listxattr,
1174 };
1175
cifs_remap_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,loff_t len,unsigned int remap_flags)1176 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1177 struct file *dst_file, loff_t destoff, loff_t len,
1178 unsigned int remap_flags)
1179 {
1180 struct inode *src_inode = file_inode(src_file);
1181 struct inode *target_inode = file_inode(dst_file);
1182 struct cifsFileInfo *smb_file_src = src_file->private_data;
1183 struct cifsFileInfo *smb_file_target;
1184 struct cifs_tcon *target_tcon;
1185 unsigned int xid;
1186 int rc;
1187
1188 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1189 return -EINVAL;
1190
1191 cifs_dbg(FYI, "clone range\n");
1192
1193 xid = get_xid();
1194
1195 if (!src_file->private_data || !dst_file->private_data) {
1196 rc = -EBADF;
1197 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1198 goto out;
1199 }
1200
1201 smb_file_target = dst_file->private_data;
1202 target_tcon = tlink_tcon(smb_file_target->tlink);
1203
1204 /*
1205 * Note: cifs case is easier than btrfs since server responsible for
1206 * checks for proper open modes and file type and if it wants
1207 * server could even support copy of range where source = target
1208 */
1209 lock_two_nondirectories(target_inode, src_inode);
1210
1211 if (len == 0)
1212 len = src_inode->i_size - off;
1213
1214 cifs_dbg(FYI, "about to flush pages\n");
1215 /* should we flush first and last page first */
1216 truncate_inode_pages_range(&target_inode->i_data, destoff,
1217 PAGE_ALIGN(destoff + len)-1);
1218
1219 if (target_tcon->ses->server->ops->duplicate_extents)
1220 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1221 smb_file_src, smb_file_target, off, len, destoff);
1222 else
1223 rc = -EOPNOTSUPP;
1224
1225 /* force revalidate of size and timestamps of target file now
1226 that target is updated on the server */
1227 CIFS_I(target_inode)->time = 0;
1228 /* although unlocking in the reverse order from locking is not
1229 strictly necessary here it is a little cleaner to be consistent */
1230 unlock_two_nondirectories(src_inode, target_inode);
1231 out:
1232 free_xid(xid);
1233 return rc < 0 ? rc : len;
1234 }
1235
cifs_file_copychunk_range(unsigned int xid,struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1236 ssize_t cifs_file_copychunk_range(unsigned int xid,
1237 struct file *src_file, loff_t off,
1238 struct file *dst_file, loff_t destoff,
1239 size_t len, unsigned int flags)
1240 {
1241 struct inode *src_inode = file_inode(src_file);
1242 struct inode *target_inode = file_inode(dst_file);
1243 struct cifsFileInfo *smb_file_src;
1244 struct cifsFileInfo *smb_file_target;
1245 struct cifs_tcon *src_tcon;
1246 struct cifs_tcon *target_tcon;
1247 ssize_t rc;
1248
1249 cifs_dbg(FYI, "copychunk range\n");
1250
1251 if (!src_file->private_data || !dst_file->private_data) {
1252 rc = -EBADF;
1253 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1254 goto out;
1255 }
1256
1257 rc = -EXDEV;
1258 smb_file_target = dst_file->private_data;
1259 smb_file_src = src_file->private_data;
1260 src_tcon = tlink_tcon(smb_file_src->tlink);
1261 target_tcon = tlink_tcon(smb_file_target->tlink);
1262
1263 if (src_tcon->ses != target_tcon->ses) {
1264 cifs_dbg(VFS, "source and target of copy not on same server\n");
1265 goto out;
1266 }
1267
1268 rc = -EOPNOTSUPP;
1269 if (!target_tcon->ses->server->ops->copychunk_range)
1270 goto out;
1271
1272 /*
1273 * Note: cifs case is easier than btrfs since server responsible for
1274 * checks for proper open modes and file type and if it wants
1275 * server could even support copy of range where source = target
1276 */
1277 lock_two_nondirectories(target_inode, src_inode);
1278
1279 cifs_dbg(FYI, "about to flush pages\n");
1280
1281 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1282 off + len - 1);
1283 if (rc)
1284 goto unlock;
1285
1286 /* should we flush first and last page first */
1287 truncate_inode_pages(&target_inode->i_data, 0);
1288
1289 rc = file_modified(dst_file);
1290 if (!rc)
1291 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1292 smb_file_src, smb_file_target, off, len, destoff);
1293
1294 file_accessed(src_file);
1295
1296 /* force revalidate of size and timestamps of target file now
1297 * that target is updated on the server
1298 */
1299 CIFS_I(target_inode)->time = 0;
1300
1301 unlock:
1302 /* although unlocking in the reverse order from locking is not
1303 * strictly necessary here it is a little cleaner to be consistent
1304 */
1305 unlock_two_nondirectories(src_inode, target_inode);
1306
1307 out:
1308 return rc;
1309 }
1310
1311 /*
1312 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1313 * is a dummy operation.
1314 */
cifs_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)1315 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1316 {
1317 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1318 file, datasync);
1319
1320 return 0;
1321 }
1322
cifs_copy_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1323 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1324 struct file *dst_file, loff_t destoff,
1325 size_t len, unsigned int flags)
1326 {
1327 unsigned int xid = get_xid();
1328 ssize_t rc;
1329 struct cifsFileInfo *cfile = dst_file->private_data;
1330
1331 if (cfile->swapfile) {
1332 rc = -EOPNOTSUPP;
1333 free_xid(xid);
1334 return rc;
1335 }
1336
1337 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1338 len, flags);
1339 free_xid(xid);
1340
1341 if (rc == -EOPNOTSUPP || rc == -EXDEV)
1342 rc = generic_copy_file_range(src_file, off, dst_file,
1343 destoff, len, flags);
1344 return rc;
1345 }
1346
1347 const struct file_operations cifs_file_ops = {
1348 .read_iter = cifs_loose_read_iter,
1349 .write_iter = cifs_file_write_iter,
1350 .open = cifs_open,
1351 .release = cifs_close,
1352 .lock = cifs_lock,
1353 .flock = cifs_flock,
1354 .fsync = cifs_fsync,
1355 .flush = cifs_flush,
1356 .mmap = cifs_file_mmap,
1357 .splice_read = generic_file_splice_read,
1358 .splice_write = iter_file_splice_write,
1359 .llseek = cifs_llseek,
1360 .unlocked_ioctl = cifs_ioctl,
1361 .copy_file_range = cifs_copy_file_range,
1362 .remap_file_range = cifs_remap_file_range,
1363 .setlease = cifs_setlease,
1364 .fallocate = cifs_fallocate,
1365 };
1366
1367 const struct file_operations cifs_file_strict_ops = {
1368 .read_iter = cifs_strict_readv,
1369 .write_iter = cifs_strict_writev,
1370 .open = cifs_open,
1371 .release = cifs_close,
1372 .lock = cifs_lock,
1373 .flock = cifs_flock,
1374 .fsync = cifs_strict_fsync,
1375 .flush = cifs_flush,
1376 .mmap = cifs_file_strict_mmap,
1377 .splice_read = generic_file_splice_read,
1378 .splice_write = iter_file_splice_write,
1379 .llseek = cifs_llseek,
1380 .unlocked_ioctl = cifs_ioctl,
1381 .copy_file_range = cifs_copy_file_range,
1382 .remap_file_range = cifs_remap_file_range,
1383 .setlease = cifs_setlease,
1384 .fallocate = cifs_fallocate,
1385 };
1386
1387 const struct file_operations cifs_file_direct_ops = {
1388 .read_iter = cifs_direct_readv,
1389 .write_iter = cifs_direct_writev,
1390 .open = cifs_open,
1391 .release = cifs_close,
1392 .lock = cifs_lock,
1393 .flock = cifs_flock,
1394 .fsync = cifs_fsync,
1395 .flush = cifs_flush,
1396 .mmap = cifs_file_mmap,
1397 .splice_read = generic_file_splice_read,
1398 .splice_write = iter_file_splice_write,
1399 .unlocked_ioctl = cifs_ioctl,
1400 .copy_file_range = cifs_copy_file_range,
1401 .remap_file_range = cifs_remap_file_range,
1402 .llseek = cifs_llseek,
1403 .setlease = cifs_setlease,
1404 .fallocate = cifs_fallocate,
1405 };
1406
1407 const struct file_operations cifs_file_nobrl_ops = {
1408 .read_iter = cifs_loose_read_iter,
1409 .write_iter = cifs_file_write_iter,
1410 .open = cifs_open,
1411 .release = cifs_close,
1412 .fsync = cifs_fsync,
1413 .flush = cifs_flush,
1414 .mmap = cifs_file_mmap,
1415 .splice_read = generic_file_splice_read,
1416 .splice_write = iter_file_splice_write,
1417 .llseek = cifs_llseek,
1418 .unlocked_ioctl = cifs_ioctl,
1419 .copy_file_range = cifs_copy_file_range,
1420 .remap_file_range = cifs_remap_file_range,
1421 .setlease = cifs_setlease,
1422 .fallocate = cifs_fallocate,
1423 };
1424
1425 const struct file_operations cifs_file_strict_nobrl_ops = {
1426 .read_iter = cifs_strict_readv,
1427 .write_iter = cifs_strict_writev,
1428 .open = cifs_open,
1429 .release = cifs_close,
1430 .fsync = cifs_strict_fsync,
1431 .flush = cifs_flush,
1432 .mmap = cifs_file_strict_mmap,
1433 .splice_read = generic_file_splice_read,
1434 .splice_write = iter_file_splice_write,
1435 .llseek = cifs_llseek,
1436 .unlocked_ioctl = cifs_ioctl,
1437 .copy_file_range = cifs_copy_file_range,
1438 .remap_file_range = cifs_remap_file_range,
1439 .setlease = cifs_setlease,
1440 .fallocate = cifs_fallocate,
1441 };
1442
1443 const struct file_operations cifs_file_direct_nobrl_ops = {
1444 .read_iter = cifs_direct_readv,
1445 .write_iter = cifs_direct_writev,
1446 .open = cifs_open,
1447 .release = cifs_close,
1448 .fsync = cifs_fsync,
1449 .flush = cifs_flush,
1450 .mmap = cifs_file_mmap,
1451 .splice_read = generic_file_splice_read,
1452 .splice_write = iter_file_splice_write,
1453 .unlocked_ioctl = cifs_ioctl,
1454 .copy_file_range = cifs_copy_file_range,
1455 .remap_file_range = cifs_remap_file_range,
1456 .llseek = cifs_llseek,
1457 .setlease = cifs_setlease,
1458 .fallocate = cifs_fallocate,
1459 };
1460
1461 const struct file_operations cifs_dir_ops = {
1462 .iterate_shared = cifs_readdir,
1463 .release = cifs_closedir,
1464 .read = generic_read_dir,
1465 .unlocked_ioctl = cifs_ioctl,
1466 .copy_file_range = cifs_copy_file_range,
1467 .remap_file_range = cifs_remap_file_range,
1468 .llseek = generic_file_llseek,
1469 .fsync = cifs_dir_fsync,
1470 };
1471
1472 static void
cifs_init_once(void * inode)1473 cifs_init_once(void *inode)
1474 {
1475 struct cifsInodeInfo *cifsi = inode;
1476
1477 inode_init_once(&cifsi->netfs.inode);
1478 init_rwsem(&cifsi->lock_sem);
1479 }
1480
1481 static int __init
cifs_init_inodecache(void)1482 cifs_init_inodecache(void)
1483 {
1484 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1485 sizeof(struct cifsInodeInfo),
1486 0, (SLAB_RECLAIM_ACCOUNT|
1487 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1488 cifs_init_once);
1489 if (cifs_inode_cachep == NULL)
1490 return -ENOMEM;
1491
1492 return 0;
1493 }
1494
1495 static void
cifs_destroy_inodecache(void)1496 cifs_destroy_inodecache(void)
1497 {
1498 /*
1499 * Make sure all delayed rcu free inodes are flushed before we
1500 * destroy cache.
1501 */
1502 rcu_barrier();
1503 kmem_cache_destroy(cifs_inode_cachep);
1504 }
1505
1506 static int
cifs_init_request_bufs(void)1507 cifs_init_request_bufs(void)
1508 {
1509 /*
1510 * SMB2 maximum header size is bigger than CIFS one - no problems to
1511 * allocate some more bytes for CIFS.
1512 */
1513 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1514
1515 if (CIFSMaxBufSize < 8192) {
1516 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1517 Unicode path name has to fit in any SMB/CIFS path based frames */
1518 CIFSMaxBufSize = 8192;
1519 } else if (CIFSMaxBufSize > 1024*127) {
1520 CIFSMaxBufSize = 1024 * 127;
1521 } else {
1522 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1523 }
1524 /*
1525 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1526 CIFSMaxBufSize, CIFSMaxBufSize);
1527 */
1528 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1529 CIFSMaxBufSize + max_hdr_size, 0,
1530 SLAB_HWCACHE_ALIGN, 0,
1531 CIFSMaxBufSize + max_hdr_size,
1532 NULL);
1533 if (cifs_req_cachep == NULL)
1534 return -ENOMEM;
1535
1536 if (cifs_min_rcv < 1)
1537 cifs_min_rcv = 1;
1538 else if (cifs_min_rcv > 64) {
1539 cifs_min_rcv = 64;
1540 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1541 }
1542
1543 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1544 cifs_req_cachep);
1545
1546 if (cifs_req_poolp == NULL) {
1547 kmem_cache_destroy(cifs_req_cachep);
1548 return -ENOMEM;
1549 }
1550 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1551 almost all handle based requests (but not write response, nor is it
1552 sufficient for path based requests). A smaller size would have
1553 been more efficient (compacting multiple slab items on one 4k page)
1554 for the case in which debug was on, but this larger size allows
1555 more SMBs to use small buffer alloc and is still much more
1556 efficient to alloc 1 per page off the slab compared to 17K (5page)
1557 alloc of large cifs buffers even when page debugging is on */
1558 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1559 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1560 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1561 if (cifs_sm_req_cachep == NULL) {
1562 mempool_destroy(cifs_req_poolp);
1563 kmem_cache_destroy(cifs_req_cachep);
1564 return -ENOMEM;
1565 }
1566
1567 if (cifs_min_small < 2)
1568 cifs_min_small = 2;
1569 else if (cifs_min_small > 256) {
1570 cifs_min_small = 256;
1571 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1572 }
1573
1574 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1575 cifs_sm_req_cachep);
1576
1577 if (cifs_sm_req_poolp == NULL) {
1578 mempool_destroy(cifs_req_poolp);
1579 kmem_cache_destroy(cifs_req_cachep);
1580 kmem_cache_destroy(cifs_sm_req_cachep);
1581 return -ENOMEM;
1582 }
1583
1584 return 0;
1585 }
1586
1587 static void
cifs_destroy_request_bufs(void)1588 cifs_destroy_request_bufs(void)
1589 {
1590 mempool_destroy(cifs_req_poolp);
1591 kmem_cache_destroy(cifs_req_cachep);
1592 mempool_destroy(cifs_sm_req_poolp);
1593 kmem_cache_destroy(cifs_sm_req_cachep);
1594 }
1595
init_mids(void)1596 static int init_mids(void)
1597 {
1598 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1599 sizeof(struct mid_q_entry), 0,
1600 SLAB_HWCACHE_ALIGN, NULL);
1601 if (cifs_mid_cachep == NULL)
1602 return -ENOMEM;
1603
1604 /* 3 is a reasonable minimum number of simultaneous operations */
1605 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1606 if (cifs_mid_poolp == NULL) {
1607 kmem_cache_destroy(cifs_mid_cachep);
1608 return -ENOMEM;
1609 }
1610
1611 return 0;
1612 }
1613
destroy_mids(void)1614 static void destroy_mids(void)
1615 {
1616 mempool_destroy(cifs_mid_poolp);
1617 kmem_cache_destroy(cifs_mid_cachep);
1618 }
1619
1620 static int __init
init_cifs(void)1621 init_cifs(void)
1622 {
1623 int rc = 0;
1624 cifs_proc_init();
1625 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1626 /*
1627 * Initialize Global counters
1628 */
1629 atomic_set(&sesInfoAllocCount, 0);
1630 atomic_set(&tconInfoAllocCount, 0);
1631 atomic_set(&tcpSesNextId, 0);
1632 atomic_set(&tcpSesAllocCount, 0);
1633 atomic_set(&tcpSesReconnectCount, 0);
1634 atomic_set(&tconInfoReconnectCount, 0);
1635
1636 atomic_set(&buf_alloc_count, 0);
1637 atomic_set(&small_buf_alloc_count, 0);
1638 #ifdef CONFIG_CIFS_STATS2
1639 atomic_set(&total_buf_alloc_count, 0);
1640 atomic_set(&total_small_buf_alloc_count, 0);
1641 if (slow_rsp_threshold < 1)
1642 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1643 else if (slow_rsp_threshold > 32767)
1644 cifs_dbg(VFS,
1645 "slow response threshold set higher than recommended (0 to 32767)\n");
1646 #endif /* CONFIG_CIFS_STATS2 */
1647
1648 atomic_set(&mid_count, 0);
1649 GlobalCurrentXid = 0;
1650 GlobalTotalActiveXid = 0;
1651 GlobalMaxActiveXid = 0;
1652 spin_lock_init(&cifs_tcp_ses_lock);
1653 spin_lock_init(&GlobalMid_Lock);
1654
1655 cifs_lock_secret = get_random_u32();
1656
1657 if (cifs_max_pending < 2) {
1658 cifs_max_pending = 2;
1659 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1660 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1661 cifs_max_pending = CIFS_MAX_REQ;
1662 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1663 CIFS_MAX_REQ);
1664 }
1665
1666 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1667 if (!cifsiod_wq) {
1668 rc = -ENOMEM;
1669 goto out_clean_proc;
1670 }
1671
1672 /*
1673 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1674 * so that we don't launch too many worker threads but
1675 * Documentation/core-api/workqueue.rst recommends setting it to 0
1676 */
1677
1678 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1679 decrypt_wq = alloc_workqueue("smb3decryptd",
1680 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1681 if (!decrypt_wq) {
1682 rc = -ENOMEM;
1683 goto out_destroy_cifsiod_wq;
1684 }
1685
1686 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1687 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1688 if (!fileinfo_put_wq) {
1689 rc = -ENOMEM;
1690 goto out_destroy_decrypt_wq;
1691 }
1692
1693 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1694 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1695 if (!cifsoplockd_wq) {
1696 rc = -ENOMEM;
1697 goto out_destroy_fileinfo_put_wq;
1698 }
1699
1700 deferredclose_wq = alloc_workqueue("deferredclose",
1701 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1702 if (!deferredclose_wq) {
1703 rc = -ENOMEM;
1704 goto out_destroy_cifsoplockd_wq;
1705 }
1706
1707 rc = cifs_init_inodecache();
1708 if (rc)
1709 goto out_destroy_deferredclose_wq;
1710
1711 rc = init_mids();
1712 if (rc)
1713 goto out_destroy_inodecache;
1714
1715 rc = cifs_init_request_bufs();
1716 if (rc)
1717 goto out_destroy_mids;
1718
1719 #ifdef CONFIG_CIFS_DFS_UPCALL
1720 rc = dfs_cache_init();
1721 if (rc)
1722 goto out_destroy_request_bufs;
1723 #endif /* CONFIG_CIFS_DFS_UPCALL */
1724 #ifdef CONFIG_CIFS_UPCALL
1725 rc = init_cifs_spnego();
1726 if (rc)
1727 goto out_destroy_dfs_cache;
1728 #endif /* CONFIG_CIFS_UPCALL */
1729 #ifdef CONFIG_CIFS_SWN_UPCALL
1730 rc = cifs_genl_init();
1731 if (rc)
1732 goto out_register_key_type;
1733 #endif /* CONFIG_CIFS_SWN_UPCALL */
1734
1735 rc = init_cifs_idmap();
1736 if (rc)
1737 goto out_cifs_swn_init;
1738
1739 rc = register_filesystem(&cifs_fs_type);
1740 if (rc)
1741 goto out_init_cifs_idmap;
1742
1743 rc = register_filesystem(&smb3_fs_type);
1744 if (rc) {
1745 unregister_filesystem(&cifs_fs_type);
1746 goto out_init_cifs_idmap;
1747 }
1748
1749 return 0;
1750
1751 out_init_cifs_idmap:
1752 exit_cifs_idmap();
1753 out_cifs_swn_init:
1754 #ifdef CONFIG_CIFS_SWN_UPCALL
1755 cifs_genl_exit();
1756 out_register_key_type:
1757 #endif
1758 #ifdef CONFIG_CIFS_UPCALL
1759 exit_cifs_spnego();
1760 out_destroy_dfs_cache:
1761 #endif
1762 #ifdef CONFIG_CIFS_DFS_UPCALL
1763 dfs_cache_destroy();
1764 out_destroy_request_bufs:
1765 #endif
1766 cifs_destroy_request_bufs();
1767 out_destroy_mids:
1768 destroy_mids();
1769 out_destroy_inodecache:
1770 cifs_destroy_inodecache();
1771 out_destroy_deferredclose_wq:
1772 destroy_workqueue(deferredclose_wq);
1773 out_destroy_cifsoplockd_wq:
1774 destroy_workqueue(cifsoplockd_wq);
1775 out_destroy_fileinfo_put_wq:
1776 destroy_workqueue(fileinfo_put_wq);
1777 out_destroy_decrypt_wq:
1778 destroy_workqueue(decrypt_wq);
1779 out_destroy_cifsiod_wq:
1780 destroy_workqueue(cifsiod_wq);
1781 out_clean_proc:
1782 cifs_proc_clean();
1783 return rc;
1784 }
1785
1786 static void __exit
exit_cifs(void)1787 exit_cifs(void)
1788 {
1789 cifs_dbg(NOISY, "exit_smb3\n");
1790 unregister_filesystem(&cifs_fs_type);
1791 unregister_filesystem(&smb3_fs_type);
1792 cifs_dfs_release_automount_timer();
1793 exit_cifs_idmap();
1794 #ifdef CONFIG_CIFS_SWN_UPCALL
1795 cifs_genl_exit();
1796 #endif
1797 #ifdef CONFIG_CIFS_UPCALL
1798 exit_cifs_spnego();
1799 #endif
1800 #ifdef CONFIG_CIFS_DFS_UPCALL
1801 dfs_cache_destroy();
1802 #endif
1803 cifs_destroy_request_bufs();
1804 destroy_mids();
1805 cifs_destroy_inodecache();
1806 destroy_workqueue(deferredclose_wq);
1807 destroy_workqueue(cifsoplockd_wq);
1808 destroy_workqueue(decrypt_wq);
1809 destroy_workqueue(fileinfo_put_wq);
1810 destroy_workqueue(cifsiod_wq);
1811 cifs_proc_clean();
1812 }
1813
1814 MODULE_AUTHOR("Steve French");
1815 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1816 MODULE_DESCRIPTION
1817 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
1818 "also older servers complying with the SNIA CIFS Specification)");
1819 MODULE_VERSION(CIFS_VERSION);
1820 MODULE_SOFTDEP("ecb");
1821 MODULE_SOFTDEP("hmac");
1822 MODULE_SOFTDEP("md5");
1823 MODULE_SOFTDEP("nls");
1824 MODULE_SOFTDEP("aes");
1825 MODULE_SOFTDEP("cmac");
1826 MODULE_SOFTDEP("sha256");
1827 MODULE_SOFTDEP("sha512");
1828 MODULE_SOFTDEP("aead2");
1829 MODULE_SOFTDEP("ccm");
1830 MODULE_SOFTDEP("gcm");
1831 module_init(init_cifs)
1832 module_exit(exit_cifs)
1833