1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2011
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  */
8 #include <linux/fs.h>
9 #include <linux/net.h>
10 #include <linux/string.h>
11 #include <linux/sched/mm.h>
12 #include <linux/sched/signal.h>
13 #include <linux/list.h>
14 #include <linux/wait.h>
15 #include <linux/slab.h>
16 #include <linux/pagemap.h>
17 #include <linux/ctype.h>
18 #include <linux/utsname.h>
19 #include <linux/mempool.h>
20 #include <linux/delay.h>
21 #include <linux/completion.h>
22 #include <linux/kthread.h>
23 #include <linux/pagevec.h>
24 #include <linux/freezer.h>
25 #include <linux/namei.h>
26 #include <linux/uuid.h>
27 #include <linux/uaccess.h>
28 #include <asm/processor.h>
29 #include <linux/inet.h>
30 #include <linux/module.h>
31 #include <keys/user-type.h>
32 #include <net/ipv6.h>
33 #include <linux/parser.h>
34 #include <linux/bvec.h>
35 #include "cifspdu.h"
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_unicode.h"
39 #include "cifs_debug.h"
40 #include "cifs_fs_sb.h"
41 #include "ntlmssp.h"
42 #include "nterr.h"
43 #include "rfc1002pdu.h"
44 #include "fscache.h"
45 #include "smb2proto.h"
46 #include "smbdirect.h"
47 #include "dns_resolve.h"
48 #ifdef CONFIG_CIFS_DFS_UPCALL
49 #include "dfs.h"
50 #include "dfs_cache.h"
51 #endif
52 #include "fs_context.h"
53 #include "cifs_swn.h"
54 
55 extern mempool_t *cifs_req_poolp;
56 extern bool disable_legacy_dialects;
57 
58 /* FIXME: should these be tunable? */
59 #define TLINK_ERROR_EXPIRE	(1 * HZ)
60 #define TLINK_IDLE_EXPIRE	(600 * HZ)
61 
62 /* Drop the connection to not overload the server */
63 #define MAX_STATUS_IO_TIMEOUT   5
64 
65 static int ip_connect(struct TCP_Server_Info *server);
66 static int generic_ip_connect(struct TCP_Server_Info *server);
67 static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
68 static void cifs_prune_tlinks(struct work_struct *work);
69 
70 /*
71  * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may
72  * get their ip addresses changed at some point.
73  *
74  * This should be called with server->srv_mutex held.
75  */
reconn_set_ipaddr_from_hostname(struct TCP_Server_Info * server)76 static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
77 {
78 	int rc;
79 	int len;
80 	char *unc;
81 	struct sockaddr_storage ss;
82 
83 	if (!server->hostname)
84 		return -EINVAL;
85 
86 	/* if server hostname isn't populated, there's nothing to do here */
87 	if (server->hostname[0] == '\0')
88 		return 0;
89 
90 	len = strlen(server->hostname) + 3;
91 
92 	unc = kmalloc(len, GFP_KERNEL);
93 	if (!unc) {
94 		cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__);
95 		return -ENOMEM;
96 	}
97 	scnprintf(unc, len, "\\\\%s", server->hostname);
98 
99 	spin_lock(&server->srv_lock);
100 	ss = server->dstaddr;
101 	spin_unlock(&server->srv_lock);
102 
103 	rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL);
104 	kfree(unc);
105 
106 	if (rc < 0) {
107 		cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
108 			 __func__, server->hostname, rc);
109 	} else {
110 		spin_lock(&server->srv_lock);
111 		memcpy(&server->dstaddr, &ss, sizeof(server->dstaddr));
112 		spin_unlock(&server->srv_lock);
113 		rc = 0;
114 	}
115 
116 	return rc;
117 }
118 
smb2_query_server_interfaces(struct work_struct * work)119 static void smb2_query_server_interfaces(struct work_struct *work)
120 {
121 	int rc;
122 	struct cifs_tcon *tcon = container_of(work,
123 					struct cifs_tcon,
124 					query_interfaces.work);
125 
126 	/*
127 	 * query server network interfaces, in case they change
128 	 */
129 	rc = SMB3_request_interfaces(0, tcon, false);
130 	if (rc) {
131 		cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n",
132 				__func__, rc);
133 	}
134 
135 	queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
136 			   (SMB_INTERFACE_POLL_INTERVAL * HZ));
137 }
138 
139 /*
140  * Update the tcpStatus for the server.
141  * This is used to signal the cifsd thread to call cifs_reconnect
142  * ONLY cifsd thread should call cifs_reconnect. For any other
143  * thread, use this function
144  *
145  * @server: the tcp ses for which reconnect is needed
146  * @all_channels: if this needs to be done for all channels
147  */
148 void
cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info * server,bool all_channels)149 cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
150 				bool all_channels)
151 {
152 	struct TCP_Server_Info *pserver;
153 	struct cifs_ses *ses;
154 	int i;
155 
156 	/* If server is a channel, select the primary channel */
157 	pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
158 
159 	spin_lock(&pserver->srv_lock);
160 	if (!all_channels) {
161 		pserver->tcpStatus = CifsNeedReconnect;
162 		spin_unlock(&pserver->srv_lock);
163 		return;
164 	}
165 	spin_unlock(&pserver->srv_lock);
166 
167 	spin_lock(&cifs_tcp_ses_lock);
168 	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
169 		spin_lock(&ses->chan_lock);
170 		for (i = 0; i < ses->chan_count; i++) {
171 			spin_lock(&ses->chans[i].server->srv_lock);
172 			ses->chans[i].server->tcpStatus = CifsNeedReconnect;
173 			spin_unlock(&ses->chans[i].server->srv_lock);
174 		}
175 		spin_unlock(&ses->chan_lock);
176 	}
177 	spin_unlock(&cifs_tcp_ses_lock);
178 }
179 
180 /*
181  * Mark all sessions and tcons for reconnect.
182  * IMPORTANT: make sure that this gets called only from
183  * cifsd thread. For any other thread, use
184  * cifs_signal_cifsd_for_reconnect
185  *
186  * @server: the tcp ses for which reconnect is needed
187  * @server needs to be previously set to CifsNeedReconnect.
188  * @mark_smb_session: whether even sessions need to be marked
189  */
190 void
cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info * server,bool mark_smb_session)191 cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
192 				      bool mark_smb_session)
193 {
194 	struct TCP_Server_Info *pserver;
195 	struct cifs_ses *ses, *nses;
196 	struct cifs_tcon *tcon;
197 
198 	/*
199 	 * before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they
200 	 * are not used until reconnected.
201 	 */
202 	cifs_dbg(FYI, "%s: marking necessary sessions and tcons for reconnect\n", __func__);
203 
204 	/* If server is a channel, select the primary channel */
205 	pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
206 
207 
208 	spin_lock(&cifs_tcp_ses_lock);
209 	list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) {
210 		/* check if iface is still active */
211 		if (!cifs_chan_is_iface_active(ses, server))
212 			cifs_chan_update_iface(ses, server);
213 
214 		spin_lock(&ses->chan_lock);
215 		if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) {
216 			spin_unlock(&ses->chan_lock);
217 			continue;
218 		}
219 
220 		if (mark_smb_session)
221 			CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses);
222 		else
223 			cifs_chan_set_need_reconnect(ses, server);
224 
225 		cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
226 			 __func__, ses->chans_need_reconnect);
227 
228 		/* If all channels need reconnect, then tcon needs reconnect */
229 		if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
230 			spin_unlock(&ses->chan_lock);
231 			continue;
232 		}
233 		spin_unlock(&ses->chan_lock);
234 
235 		spin_lock(&ses->ses_lock);
236 		ses->ses_status = SES_NEED_RECON;
237 		spin_unlock(&ses->ses_lock);
238 
239 		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
240 			tcon->need_reconnect = true;
241 			spin_lock(&tcon->tc_lock);
242 			tcon->status = TID_NEED_RECON;
243 			spin_unlock(&tcon->tc_lock);
244 		}
245 		if (ses->tcon_ipc) {
246 			ses->tcon_ipc->need_reconnect = true;
247 			spin_lock(&ses->tcon_ipc->tc_lock);
248 			ses->tcon_ipc->status = TID_NEED_RECON;
249 			spin_unlock(&ses->tcon_ipc->tc_lock);
250 		}
251 	}
252 	spin_unlock(&cifs_tcp_ses_lock);
253 }
254 
255 static void
cifs_abort_connection(struct TCP_Server_Info * server)256 cifs_abort_connection(struct TCP_Server_Info *server)
257 {
258 	struct mid_q_entry *mid, *nmid;
259 	struct list_head retry_list;
260 
261 	server->maxBuf = 0;
262 	server->max_read = 0;
263 
264 	/* do not want to be sending data on a socket we are freeing */
265 	cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
266 	cifs_server_lock(server);
267 	if (server->ssocket) {
268 		cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n", server->ssocket->state,
269 			 server->ssocket->flags);
270 		kernel_sock_shutdown(server->ssocket, SHUT_WR);
271 		cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n", server->ssocket->state,
272 			 server->ssocket->flags);
273 		sock_release(server->ssocket);
274 		server->ssocket = NULL;
275 	}
276 	server->sequence_number = 0;
277 	server->session_estab = false;
278 	kfree_sensitive(server->session_key.response);
279 	server->session_key.response = NULL;
280 	server->session_key.len = 0;
281 	server->lstrp = jiffies;
282 
283 	/* mark submitted MIDs for retry and issue callback */
284 	INIT_LIST_HEAD(&retry_list);
285 	cifs_dbg(FYI, "%s: moving mids to private list\n", __func__);
286 	spin_lock(&server->mid_lock);
287 	list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) {
288 		kref_get(&mid->refcount);
289 		if (mid->mid_state == MID_REQUEST_SUBMITTED)
290 			mid->mid_state = MID_RETRY_NEEDED;
291 		list_move(&mid->qhead, &retry_list);
292 		mid->mid_flags |= MID_DELETED;
293 	}
294 	spin_unlock(&server->mid_lock);
295 	cifs_server_unlock(server);
296 
297 	cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
298 	list_for_each_entry_safe(mid, nmid, &retry_list, qhead) {
299 		list_del_init(&mid->qhead);
300 		mid->callback(mid);
301 		release_mid(mid);
302 	}
303 
304 	if (cifs_rdma_enabled(server)) {
305 		cifs_server_lock(server);
306 		smbd_destroy(server);
307 		cifs_server_unlock(server);
308 	}
309 }
310 
cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info * server,int num_targets)311 static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets)
312 {
313 	spin_lock(&server->srv_lock);
314 	server->nr_targets = num_targets;
315 	if (server->tcpStatus == CifsExiting) {
316 		/* the demux thread will exit normally next time through the loop */
317 		spin_unlock(&server->srv_lock);
318 		wake_up(&server->response_q);
319 		return false;
320 	}
321 
322 	cifs_dbg(FYI, "Mark tcp session as need reconnect\n");
323 	trace_smb3_reconnect(server->CurrentMid, server->conn_id,
324 			     server->hostname);
325 	server->tcpStatus = CifsNeedReconnect;
326 
327 	spin_unlock(&server->srv_lock);
328 	return true;
329 }
330 
331 /*
332  * cifs tcp session reconnection
333  *
334  * mark tcp session as reconnecting so temporarily locked
335  * mark all smb sessions as reconnecting for tcp session
336  * reconnect tcp session
337  * wake up waiters on reconnection? - (not needed currently)
338  *
339  * if mark_smb_session is passed as true, unconditionally mark
340  * the smb session (and tcon) for reconnect as well. This value
341  * doesn't really matter for non-multichannel scenario.
342  *
343  */
__cifs_reconnect(struct TCP_Server_Info * server,bool mark_smb_session)344 static int __cifs_reconnect(struct TCP_Server_Info *server,
345 			    bool mark_smb_session)
346 {
347 	int rc = 0;
348 
349 	if (!cifs_tcp_ses_needs_reconnect(server, 1))
350 		return 0;
351 
352 	cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
353 
354 	cifs_abort_connection(server);
355 
356 	do {
357 		try_to_freeze();
358 		cifs_server_lock(server);
359 
360 		if (!cifs_swn_set_server_dstaddr(server)) {
361 			/* resolve the hostname again to make sure that IP address is up-to-date */
362 			rc = reconn_set_ipaddr_from_hostname(server);
363 			cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
364 		}
365 
366 		if (cifs_rdma_enabled(server))
367 			rc = smbd_reconnect(server);
368 		else
369 			rc = generic_ip_connect(server);
370 		if (rc) {
371 			cifs_server_unlock(server);
372 			cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
373 			msleep(3000);
374 		} else {
375 			atomic_inc(&tcpSesReconnectCount);
376 			set_credits(server, 1);
377 			spin_lock(&server->srv_lock);
378 			if (server->tcpStatus != CifsExiting)
379 				server->tcpStatus = CifsNeedNegotiate;
380 			spin_unlock(&server->srv_lock);
381 			cifs_swn_reset_server_dstaddr(server);
382 			cifs_server_unlock(server);
383 			mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
384 		}
385 	} while (server->tcpStatus == CifsNeedReconnect);
386 
387 	spin_lock(&server->srv_lock);
388 	if (server->tcpStatus == CifsNeedNegotiate)
389 		mod_delayed_work(cifsiod_wq, &server->echo, 0);
390 	spin_unlock(&server->srv_lock);
391 
392 	wake_up(&server->response_q);
393 	return rc;
394 }
395 
396 #ifdef CONFIG_CIFS_DFS_UPCALL
__reconnect_target_unlocked(struct TCP_Server_Info * server,const char * target)397 static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const char *target)
398 {
399 	int rc;
400 	char *hostname;
401 
402 	if (!cifs_swn_set_server_dstaddr(server)) {
403 		if (server->hostname != target) {
404 			hostname = extract_hostname(target);
405 			if (!IS_ERR(hostname)) {
406 				spin_lock(&server->srv_lock);
407 				kfree(server->hostname);
408 				server->hostname = hostname;
409 				spin_unlock(&server->srv_lock);
410 			} else {
411 				cifs_dbg(FYI, "%s: couldn't extract hostname or address from dfs target: %ld\n",
412 					 __func__, PTR_ERR(hostname));
413 				cifs_dbg(FYI, "%s: default to last target server: %s\n", __func__,
414 					 server->hostname);
415 			}
416 		}
417 		/* resolve the hostname again to make sure that IP address is up-to-date. */
418 		rc = reconn_set_ipaddr_from_hostname(server);
419 		cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
420 	}
421 	/* Reconnect the socket */
422 	if (cifs_rdma_enabled(server))
423 		rc = smbd_reconnect(server);
424 	else
425 		rc = generic_ip_connect(server);
426 
427 	return rc;
428 }
429 
reconnect_target_unlocked(struct TCP_Server_Info * server,struct dfs_cache_tgt_list * tl,struct dfs_cache_tgt_iterator ** target_hint)430 static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_cache_tgt_list *tl,
431 				     struct dfs_cache_tgt_iterator **target_hint)
432 {
433 	int rc;
434 	struct dfs_cache_tgt_iterator *tit;
435 
436 	*target_hint = NULL;
437 
438 	/* If dfs target list is empty, then reconnect to last server */
439 	tit = dfs_cache_get_tgt_iterator(tl);
440 	if (!tit)
441 		return __reconnect_target_unlocked(server, server->hostname);
442 
443 	/* Otherwise, try every dfs target in @tl */
444 	for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) {
445 		rc = __reconnect_target_unlocked(server, dfs_cache_get_tgt_name(tit));
446 		if (!rc) {
447 			*target_hint = tit;
448 			break;
449 		}
450 	}
451 	return rc;
452 }
453 
reconnect_dfs_server(struct TCP_Server_Info * server)454 static int reconnect_dfs_server(struct TCP_Server_Info *server)
455 {
456 	struct dfs_cache_tgt_iterator *target_hint = NULL;
457 	DFS_CACHE_TGT_LIST(tl);
458 	int num_targets = 0;
459 	int rc = 0;
460 
461 	/*
462 	 * Determine the number of dfs targets the referral path in @cifs_sb resolves to.
463 	 *
464 	 * smb2_reconnect() needs to know how long it should wait based upon the number of dfs
465 	 * targets (server->nr_targets).  It's also possible that the cached referral was cleared
466 	 * through /proc/fs/cifs/dfscache or the target list is empty due to server settings after
467 	 * refreshing the referral, so, in this case, default it to 1.
468 	 */
469 	mutex_lock(&server->refpath_lock);
470 	if (!dfs_cache_noreq_find(server->leaf_fullpath + 1, NULL, &tl))
471 		num_targets = dfs_cache_get_nr_tgts(&tl);
472 	mutex_unlock(&server->refpath_lock);
473 	if (!num_targets)
474 		num_targets = 1;
475 
476 	if (!cifs_tcp_ses_needs_reconnect(server, num_targets))
477 		return 0;
478 
479 	/*
480 	 * Unconditionally mark all sessions & tcons for reconnect as we might be connecting to a
481 	 * different server or share during failover.  It could be improved by adding some logic to
482 	 * only do that in case it connects to a different server or share, though.
483 	 */
484 	cifs_mark_tcp_ses_conns_for_reconnect(server, true);
485 
486 	cifs_abort_connection(server);
487 
488 	do {
489 		try_to_freeze();
490 		cifs_server_lock(server);
491 
492 		rc = reconnect_target_unlocked(server, &tl, &target_hint);
493 		if (rc) {
494 			/* Failed to reconnect socket */
495 			cifs_server_unlock(server);
496 			cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
497 			msleep(3000);
498 			continue;
499 		}
500 		/*
501 		 * Socket was created.  Update tcp session status to CifsNeedNegotiate so that a
502 		 * process waiting for reconnect will know it needs to re-establish session and tcon
503 		 * through the reconnected target server.
504 		 */
505 		atomic_inc(&tcpSesReconnectCount);
506 		set_credits(server, 1);
507 		spin_lock(&server->srv_lock);
508 		if (server->tcpStatus != CifsExiting)
509 			server->tcpStatus = CifsNeedNegotiate;
510 		spin_unlock(&server->srv_lock);
511 		cifs_swn_reset_server_dstaddr(server);
512 		cifs_server_unlock(server);
513 		mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
514 	} while (server->tcpStatus == CifsNeedReconnect);
515 
516 	mutex_lock(&server->refpath_lock);
517 	dfs_cache_noreq_update_tgthint(server->leaf_fullpath + 1, target_hint);
518 	mutex_unlock(&server->refpath_lock);
519 	dfs_cache_free_tgts(&tl);
520 
521 	/* Need to set up echo worker again once connection has been established */
522 	spin_lock(&server->srv_lock);
523 	if (server->tcpStatus == CifsNeedNegotiate)
524 		mod_delayed_work(cifsiod_wq, &server->echo, 0);
525 	spin_unlock(&server->srv_lock);
526 
527 	wake_up(&server->response_q);
528 	return rc;
529 }
530 
cifs_reconnect(struct TCP_Server_Info * server,bool mark_smb_session)531 int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
532 {
533 	mutex_lock(&server->refpath_lock);
534 	if (!server->leaf_fullpath) {
535 		mutex_unlock(&server->refpath_lock);
536 		return __cifs_reconnect(server, mark_smb_session);
537 	}
538 	mutex_unlock(&server->refpath_lock);
539 
540 	return reconnect_dfs_server(server);
541 }
542 #else
cifs_reconnect(struct TCP_Server_Info * server,bool mark_smb_session)543 int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
544 {
545 	return __cifs_reconnect(server, mark_smb_session);
546 }
547 #endif
548 
549 static void
cifs_echo_request(struct work_struct * work)550 cifs_echo_request(struct work_struct *work)
551 {
552 	int rc;
553 	struct TCP_Server_Info *server = container_of(work,
554 					struct TCP_Server_Info, echo.work);
555 
556 	/*
557 	 * We cannot send an echo if it is disabled.
558 	 * Also, no need to ping if we got a response recently.
559 	 */
560 
561 	if (server->tcpStatus == CifsNeedReconnect ||
562 	    server->tcpStatus == CifsExiting ||
563 	    server->tcpStatus == CifsNew ||
564 	    (server->ops->can_echo && !server->ops->can_echo(server)) ||
565 	    time_before(jiffies, server->lstrp + server->echo_interval - HZ))
566 		goto requeue_echo;
567 
568 	rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;
569 	cifs_server_dbg(FYI, "send echo request: rc = %d\n", rc);
570 
571 	/* Check witness registrations */
572 	cifs_swn_check();
573 
574 requeue_echo:
575 	queue_delayed_work(cifsiod_wq, &server->echo, server->echo_interval);
576 }
577 
578 static bool
allocate_buffers(struct TCP_Server_Info * server)579 allocate_buffers(struct TCP_Server_Info *server)
580 {
581 	if (!server->bigbuf) {
582 		server->bigbuf = (char *)cifs_buf_get();
583 		if (!server->bigbuf) {
584 			cifs_server_dbg(VFS, "No memory for large SMB response\n");
585 			msleep(3000);
586 			/* retry will check if exiting */
587 			return false;
588 		}
589 	} else if (server->large_buf) {
590 		/* we are reusing a dirty large buf, clear its start */
591 		memset(server->bigbuf, 0, HEADER_SIZE(server));
592 	}
593 
594 	if (!server->smallbuf) {
595 		server->smallbuf = (char *)cifs_small_buf_get();
596 		if (!server->smallbuf) {
597 			cifs_server_dbg(VFS, "No memory for SMB response\n");
598 			msleep(1000);
599 			/* retry will check if exiting */
600 			return false;
601 		}
602 		/* beginning of smb buffer is cleared in our buf_get */
603 	} else {
604 		/* if existing small buf clear beginning */
605 		memset(server->smallbuf, 0, HEADER_SIZE(server));
606 	}
607 
608 	return true;
609 }
610 
611 static bool
server_unresponsive(struct TCP_Server_Info * server)612 server_unresponsive(struct TCP_Server_Info *server)
613 {
614 	/*
615 	 * We need to wait 3 echo intervals to make sure we handle such
616 	 * situations right:
617 	 * 1s  client sends a normal SMB request
618 	 * 2s  client gets a response
619 	 * 30s echo workqueue job pops, and decides we got a response recently
620 	 *     and don't need to send another
621 	 * ...
622 	 * 65s kernel_recvmsg times out, and we see that we haven't gotten
623 	 *     a response in >60s.
624 	 */
625 	spin_lock(&server->srv_lock);
626 	if ((server->tcpStatus == CifsGood ||
627 	    server->tcpStatus == CifsNeedNegotiate) &&
628 	    (!server->ops->can_echo || server->ops->can_echo(server)) &&
629 	    time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
630 		spin_unlock(&server->srv_lock);
631 		cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n",
632 			 (3 * server->echo_interval) / HZ);
633 		cifs_reconnect(server, false);
634 		return true;
635 	}
636 	spin_unlock(&server->srv_lock);
637 
638 	return false;
639 }
640 
641 static inline bool
zero_credits(struct TCP_Server_Info * server)642 zero_credits(struct TCP_Server_Info *server)
643 {
644 	int val;
645 
646 	spin_lock(&server->req_lock);
647 	val = server->credits + server->echo_credits + server->oplock_credits;
648 	if (server->in_flight == 0 && val == 0) {
649 		spin_unlock(&server->req_lock);
650 		return true;
651 	}
652 	spin_unlock(&server->req_lock);
653 	return false;
654 }
655 
656 static int
cifs_readv_from_socket(struct TCP_Server_Info * server,struct msghdr * smb_msg)657 cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
658 {
659 	int length = 0;
660 	int total_read;
661 
662 	for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
663 		try_to_freeze();
664 
665 		/* reconnect if no credits and no requests in flight */
666 		if (zero_credits(server)) {
667 			cifs_reconnect(server, false);
668 			return -ECONNABORTED;
669 		}
670 
671 		if (server_unresponsive(server))
672 			return -ECONNABORTED;
673 		if (cifs_rdma_enabled(server) && server->smbd_conn)
674 			length = smbd_recv(server->smbd_conn, smb_msg);
675 		else
676 			length = sock_recvmsg(server->ssocket, smb_msg, 0);
677 
678 		spin_lock(&server->srv_lock);
679 		if (server->tcpStatus == CifsExiting) {
680 			spin_unlock(&server->srv_lock);
681 			return -ESHUTDOWN;
682 		}
683 
684 		if (server->tcpStatus == CifsNeedReconnect) {
685 			spin_unlock(&server->srv_lock);
686 			cifs_reconnect(server, false);
687 			return -ECONNABORTED;
688 		}
689 		spin_unlock(&server->srv_lock);
690 
691 		if (length == -ERESTARTSYS ||
692 		    length == -EAGAIN ||
693 		    length == -EINTR) {
694 			/*
695 			 * Minimum sleep to prevent looping, allowing socket
696 			 * to clear and app threads to set tcpStatus
697 			 * CifsNeedReconnect if server hung.
698 			 */
699 			usleep_range(1000, 2000);
700 			length = 0;
701 			continue;
702 		}
703 
704 		if (length <= 0) {
705 			cifs_dbg(FYI, "Received no data or error: %d\n", length);
706 			cifs_reconnect(server, false);
707 			return -ECONNABORTED;
708 		}
709 	}
710 	return total_read;
711 }
712 
713 int
cifs_read_from_socket(struct TCP_Server_Info * server,char * buf,unsigned int to_read)714 cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
715 		      unsigned int to_read)
716 {
717 	struct msghdr smb_msg = {};
718 	struct kvec iov = {.iov_base = buf, .iov_len = to_read};
719 	iov_iter_kvec(&smb_msg.msg_iter, ITER_DEST, &iov, 1, to_read);
720 
721 	return cifs_readv_from_socket(server, &smb_msg);
722 }
723 
724 ssize_t
cifs_discard_from_socket(struct TCP_Server_Info * server,size_t to_read)725 cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read)
726 {
727 	struct msghdr smb_msg = {};
728 
729 	/*
730 	 *  iov_iter_discard already sets smb_msg.type and count and iov_offset
731 	 *  and cifs_readv_from_socket sets msg_control and msg_controllen
732 	 *  so little to initialize in struct msghdr
733 	 */
734 	iov_iter_discard(&smb_msg.msg_iter, ITER_DEST, to_read);
735 
736 	return cifs_readv_from_socket(server, &smb_msg);
737 }
738 
739 int
cifs_read_page_from_socket(struct TCP_Server_Info * server,struct page * page,unsigned int page_offset,unsigned int to_read)740 cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page,
741 	unsigned int page_offset, unsigned int to_read)
742 {
743 	struct msghdr smb_msg = {};
744 	struct bio_vec bv;
745 
746 	bvec_set_page(&bv, page, to_read, page_offset);
747 	iov_iter_bvec(&smb_msg.msg_iter, ITER_DEST, &bv, 1, to_read);
748 	return cifs_readv_from_socket(server, &smb_msg);
749 }
750 
751 int
cifs_read_iter_from_socket(struct TCP_Server_Info * server,struct iov_iter * iter,unsigned int to_read)752 cifs_read_iter_from_socket(struct TCP_Server_Info *server, struct iov_iter *iter,
753 			   unsigned int to_read)
754 {
755 	struct msghdr smb_msg = { .msg_iter = *iter };
756 	int ret;
757 
758 	iov_iter_truncate(&smb_msg.msg_iter, to_read);
759 	ret = cifs_readv_from_socket(server, &smb_msg);
760 	if (ret > 0)
761 		iov_iter_advance(iter, ret);
762 	return ret;
763 }
764 
765 static bool
is_smb_response(struct TCP_Server_Info * server,unsigned char type)766 is_smb_response(struct TCP_Server_Info *server, unsigned char type)
767 {
768 	/*
769 	 * The first byte big endian of the length field,
770 	 * is actually not part of the length but the type
771 	 * with the most common, zero, as regular data.
772 	 */
773 	switch (type) {
774 	case RFC1002_SESSION_MESSAGE:
775 		/* Regular SMB response */
776 		return true;
777 	case RFC1002_SESSION_KEEP_ALIVE:
778 		cifs_dbg(FYI, "RFC 1002 session keep alive\n");
779 		break;
780 	case RFC1002_POSITIVE_SESSION_RESPONSE:
781 		cifs_dbg(FYI, "RFC 1002 positive session response\n");
782 		break;
783 	case RFC1002_NEGATIVE_SESSION_RESPONSE:
784 		/*
785 		 * We get this from Windows 98 instead of an error on
786 		 * SMB negprot response.
787 		 */
788 		cifs_dbg(FYI, "RFC 1002 negative session response\n");
789 		/* give server a second to clean up */
790 		msleep(1000);
791 		/*
792 		 * Always try 445 first on reconnect since we get NACK
793 		 * on some if we ever connected to port 139 (the NACK
794 		 * is since we do not begin with RFC1001 session
795 		 * initialize frame).
796 		 */
797 		cifs_set_port((struct sockaddr *)&server->dstaddr, CIFS_PORT);
798 		cifs_reconnect(server, true);
799 		break;
800 	default:
801 		cifs_server_dbg(VFS, "RFC 1002 unknown response type 0x%x\n", type);
802 		cifs_reconnect(server, true);
803 	}
804 
805 	return false;
806 }
807 
808 void
dequeue_mid(struct mid_q_entry * mid,bool malformed)809 dequeue_mid(struct mid_q_entry *mid, bool malformed)
810 {
811 #ifdef CONFIG_CIFS_STATS2
812 	mid->when_received = jiffies;
813 #endif
814 	spin_lock(&mid->server->mid_lock);
815 	if (!malformed)
816 		mid->mid_state = MID_RESPONSE_RECEIVED;
817 	else
818 		mid->mid_state = MID_RESPONSE_MALFORMED;
819 	/*
820 	 * Trying to handle/dequeue a mid after the send_recv()
821 	 * function has finished processing it is a bug.
822 	 */
823 	if (mid->mid_flags & MID_DELETED) {
824 		spin_unlock(&mid->server->mid_lock);
825 		pr_warn_once("trying to dequeue a deleted mid\n");
826 	} else {
827 		list_del_init(&mid->qhead);
828 		mid->mid_flags |= MID_DELETED;
829 		spin_unlock(&mid->server->mid_lock);
830 	}
831 }
832 
833 static unsigned int
smb2_get_credits_from_hdr(char * buffer,struct TCP_Server_Info * server)834 smb2_get_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
835 {
836 	struct smb2_hdr *shdr = (struct smb2_hdr *)buffer;
837 
838 	/*
839 	 * SMB1 does not use credits.
840 	 */
841 	if (is_smb1(server))
842 		return 0;
843 
844 	return le16_to_cpu(shdr->CreditRequest);
845 }
846 
847 static void
handle_mid(struct mid_q_entry * mid,struct TCP_Server_Info * server,char * buf,int malformed)848 handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
849 	   char *buf, int malformed)
850 {
851 	if (server->ops->check_trans2 &&
852 	    server->ops->check_trans2(mid, server, buf, malformed))
853 		return;
854 	mid->credits_received = smb2_get_credits_from_hdr(buf, server);
855 	mid->resp_buf = buf;
856 	mid->large_buf = server->large_buf;
857 	/* Was previous buf put in mpx struct for multi-rsp? */
858 	if (!mid->multiRsp) {
859 		/* smb buffer will be freed by user thread */
860 		if (server->large_buf)
861 			server->bigbuf = NULL;
862 		else
863 			server->smallbuf = NULL;
864 	}
865 	dequeue_mid(mid, malformed);
866 }
867 
868 int
cifs_enable_signing(struct TCP_Server_Info * server,bool mnt_sign_required)869 cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required)
870 {
871 	bool srv_sign_required = server->sec_mode & server->vals->signing_required;
872 	bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled;
873 	bool mnt_sign_enabled;
874 
875 	/*
876 	 * Is signing required by mnt options? If not then check
877 	 * global_secflags to see if it is there.
878 	 */
879 	if (!mnt_sign_required)
880 		mnt_sign_required = ((global_secflags & CIFSSEC_MUST_SIGN) ==
881 						CIFSSEC_MUST_SIGN);
882 
883 	/*
884 	 * If signing is required then it's automatically enabled too,
885 	 * otherwise, check to see if the secflags allow it.
886 	 */
887 	mnt_sign_enabled = mnt_sign_required ? mnt_sign_required :
888 				(global_secflags & CIFSSEC_MAY_SIGN);
889 
890 	/* If server requires signing, does client allow it? */
891 	if (srv_sign_required) {
892 		if (!mnt_sign_enabled) {
893 			cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!\n");
894 			return -EOPNOTSUPP;
895 		}
896 		server->sign = true;
897 	}
898 
899 	/* If client requires signing, does server allow it? */
900 	if (mnt_sign_required) {
901 		if (!srv_sign_enabled) {
902 			cifs_dbg(VFS, "Server does not support signing!\n");
903 			return -EOPNOTSUPP;
904 		}
905 		server->sign = true;
906 	}
907 
908 	if (cifs_rdma_enabled(server) && server->sign)
909 		cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled\n");
910 
911 	return 0;
912 }
913 
914 static noinline_for_stack void
clean_demultiplex_info(struct TCP_Server_Info * server)915 clean_demultiplex_info(struct TCP_Server_Info *server)
916 {
917 	int length;
918 
919 	/* take it off the list, if it's not already */
920 	spin_lock(&server->srv_lock);
921 	list_del_init(&server->tcp_ses_list);
922 	spin_unlock(&server->srv_lock);
923 
924 	cancel_delayed_work_sync(&server->echo);
925 
926 	spin_lock(&server->srv_lock);
927 	server->tcpStatus = CifsExiting;
928 	spin_unlock(&server->srv_lock);
929 	wake_up_all(&server->response_q);
930 
931 	/* check if we have blocked requests that need to free */
932 	spin_lock(&server->req_lock);
933 	if (server->credits <= 0)
934 		server->credits = 1;
935 	spin_unlock(&server->req_lock);
936 	/*
937 	 * Although there should not be any requests blocked on this queue it
938 	 * can not hurt to be paranoid and try to wake up requests that may
939 	 * haven been blocked when more than 50 at time were on the wire to the
940 	 * same server - they now will see the session is in exit state and get
941 	 * out of SendReceive.
942 	 */
943 	wake_up_all(&server->request_q);
944 	/* give those requests time to exit */
945 	msleep(125);
946 	if (cifs_rdma_enabled(server))
947 		smbd_destroy(server);
948 	if (server->ssocket) {
949 		sock_release(server->ssocket);
950 		server->ssocket = NULL;
951 	}
952 
953 	if (!list_empty(&server->pending_mid_q)) {
954 		struct list_head dispose_list;
955 		struct mid_q_entry *mid_entry;
956 		struct list_head *tmp, *tmp2;
957 
958 		INIT_LIST_HEAD(&dispose_list);
959 		spin_lock(&server->mid_lock);
960 		list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
961 			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
962 			cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid);
963 			kref_get(&mid_entry->refcount);
964 			mid_entry->mid_state = MID_SHUTDOWN;
965 			list_move(&mid_entry->qhead, &dispose_list);
966 			mid_entry->mid_flags |= MID_DELETED;
967 		}
968 		spin_unlock(&server->mid_lock);
969 
970 		/* now walk dispose list and issue callbacks */
971 		list_for_each_safe(tmp, tmp2, &dispose_list) {
972 			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
973 			cifs_dbg(FYI, "Callback mid %llu\n", mid_entry->mid);
974 			list_del_init(&mid_entry->qhead);
975 			mid_entry->callback(mid_entry);
976 			release_mid(mid_entry);
977 		}
978 		/* 1/8th of sec is more than enough time for them to exit */
979 		msleep(125);
980 	}
981 
982 	if (!list_empty(&server->pending_mid_q)) {
983 		/*
984 		 * mpx threads have not exited yet give them at least the smb
985 		 * send timeout time for long ops.
986 		 *
987 		 * Due to delays on oplock break requests, we need to wait at
988 		 * least 45 seconds before giving up on a request getting a
989 		 * response and going ahead and killing cifsd.
990 		 */
991 		cifs_dbg(FYI, "Wait for exit from demultiplex thread\n");
992 		msleep(46000);
993 		/*
994 		 * If threads still have not exited they are probably never
995 		 * coming home not much else we can do but free the memory.
996 		 */
997 	}
998 
999 	kfree(server->leaf_fullpath);
1000 	kfree(server);
1001 
1002 	length = atomic_dec_return(&tcpSesAllocCount);
1003 	if (length > 0)
1004 		mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
1005 }
1006 
1007 static int
standard_receive3(struct TCP_Server_Info * server,struct mid_q_entry * mid)1008 standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1009 {
1010 	int length;
1011 	char *buf = server->smallbuf;
1012 	unsigned int pdu_length = server->pdu_size;
1013 
1014 	/* make sure this will fit in a large buffer */
1015 	if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) -
1016 	    HEADER_PREAMBLE_SIZE(server)) {
1017 		cifs_server_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length);
1018 		cifs_reconnect(server, true);
1019 		return -ECONNABORTED;
1020 	}
1021 
1022 	/* switch to large buffer if too big for a small one */
1023 	if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
1024 		server->large_buf = true;
1025 		memcpy(server->bigbuf, buf, server->total_read);
1026 		buf = server->bigbuf;
1027 	}
1028 
1029 	/* now read the rest */
1030 	length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
1031 				       pdu_length - MID_HEADER_SIZE(server));
1032 
1033 	if (length < 0)
1034 		return length;
1035 	server->total_read += length;
1036 
1037 	dump_smb(buf, server->total_read);
1038 
1039 	return cifs_handle_standard(server, mid);
1040 }
1041 
1042 int
cifs_handle_standard(struct TCP_Server_Info * server,struct mid_q_entry * mid)1043 cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1044 {
1045 	char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
1046 	int rc;
1047 
1048 	/*
1049 	 * We know that we received enough to get to the MID as we
1050 	 * checked the pdu_length earlier. Now check to see
1051 	 * if the rest of the header is OK.
1052 	 *
1053 	 * 48 bytes is enough to display the header and a little bit
1054 	 * into the payload for debugging purposes.
1055 	 */
1056 	rc = server->ops->check_message(buf, server->total_read, server);
1057 	if (rc)
1058 		cifs_dump_mem("Bad SMB: ", buf,
1059 			min_t(unsigned int, server->total_read, 48));
1060 
1061 	if (server->ops->is_session_expired &&
1062 	    server->ops->is_session_expired(buf)) {
1063 		cifs_reconnect(server, true);
1064 		return -1;
1065 	}
1066 
1067 	if (server->ops->is_status_pending &&
1068 	    server->ops->is_status_pending(buf, server))
1069 		return -1;
1070 
1071 	if (!mid)
1072 		return rc;
1073 
1074 	handle_mid(mid, server, buf, rc);
1075 	return 0;
1076 }
1077 
1078 static void
smb2_add_credits_from_hdr(char * buffer,struct TCP_Server_Info * server)1079 smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
1080 {
1081 	struct smb2_hdr *shdr = (struct smb2_hdr *)buffer;
1082 	int scredits, in_flight;
1083 
1084 	/*
1085 	 * SMB1 does not use credits.
1086 	 */
1087 	if (is_smb1(server))
1088 		return;
1089 
1090 	if (shdr->CreditRequest) {
1091 		spin_lock(&server->req_lock);
1092 		server->credits += le16_to_cpu(shdr->CreditRequest);
1093 		scredits = server->credits;
1094 		in_flight = server->in_flight;
1095 		spin_unlock(&server->req_lock);
1096 		wake_up(&server->request_q);
1097 
1098 		trace_smb3_hdr_credits(server->CurrentMid,
1099 				server->conn_id, server->hostname, scredits,
1100 				le16_to_cpu(shdr->CreditRequest), in_flight);
1101 		cifs_server_dbg(FYI, "%s: added %u credits total=%d\n",
1102 				__func__, le16_to_cpu(shdr->CreditRequest),
1103 				scredits);
1104 	}
1105 }
1106 
1107 
1108 static int
cifs_demultiplex_thread(void * p)1109 cifs_demultiplex_thread(void *p)
1110 {
1111 	int i, num_mids, length;
1112 	struct TCP_Server_Info *server = p;
1113 	unsigned int pdu_length;
1114 	unsigned int next_offset;
1115 	char *buf = NULL;
1116 	struct task_struct *task_to_wake = NULL;
1117 	struct mid_q_entry *mids[MAX_COMPOUND];
1118 	char *bufs[MAX_COMPOUND];
1119 	unsigned int noreclaim_flag, num_io_timeout = 0;
1120 	bool pending_reconnect = false;
1121 
1122 	noreclaim_flag = memalloc_noreclaim_save();
1123 	cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current));
1124 
1125 	length = atomic_inc_return(&tcpSesAllocCount);
1126 	if (length > 1)
1127 		mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
1128 
1129 	set_freezable();
1130 	allow_kernel_signal(SIGKILL);
1131 	while (server->tcpStatus != CifsExiting) {
1132 		if (try_to_freeze())
1133 			continue;
1134 
1135 		if (!allocate_buffers(server))
1136 			continue;
1137 
1138 		server->large_buf = false;
1139 		buf = server->smallbuf;
1140 		pdu_length = 4; /* enough to get RFC1001 header */
1141 
1142 		length = cifs_read_from_socket(server, buf, pdu_length);
1143 		if (length < 0)
1144 			continue;
1145 
1146 		if (is_smb1(server))
1147 			server->total_read = length;
1148 		else
1149 			server->total_read = 0;
1150 
1151 		/*
1152 		 * The right amount was read from socket - 4 bytes,
1153 		 * so we can now interpret the length field.
1154 		 */
1155 		pdu_length = get_rfc1002_length(buf);
1156 
1157 		cifs_dbg(FYI, "RFC1002 header 0x%x\n", pdu_length);
1158 		if (!is_smb_response(server, buf[0]))
1159 			continue;
1160 
1161 		pending_reconnect = false;
1162 next_pdu:
1163 		server->pdu_size = pdu_length;
1164 
1165 		/* make sure we have enough to get to the MID */
1166 		if (server->pdu_size < MID_HEADER_SIZE(server)) {
1167 			cifs_server_dbg(VFS, "SMB response too short (%u bytes)\n",
1168 				 server->pdu_size);
1169 			cifs_reconnect(server, true);
1170 			continue;
1171 		}
1172 
1173 		/* read down to the MID */
1174 		length = cifs_read_from_socket(server,
1175 			     buf + HEADER_PREAMBLE_SIZE(server),
1176 			     MID_HEADER_SIZE(server));
1177 		if (length < 0)
1178 			continue;
1179 		server->total_read += length;
1180 
1181 		if (server->ops->next_header) {
1182 			next_offset = server->ops->next_header(buf);
1183 			if (next_offset)
1184 				server->pdu_size = next_offset;
1185 		}
1186 
1187 		memset(mids, 0, sizeof(mids));
1188 		memset(bufs, 0, sizeof(bufs));
1189 		num_mids = 0;
1190 
1191 		if (server->ops->is_transform_hdr &&
1192 		    server->ops->receive_transform &&
1193 		    server->ops->is_transform_hdr(buf)) {
1194 			length = server->ops->receive_transform(server,
1195 								mids,
1196 								bufs,
1197 								&num_mids);
1198 		} else {
1199 			mids[0] = server->ops->find_mid(server, buf);
1200 			bufs[0] = buf;
1201 			num_mids = 1;
1202 
1203 			if (!mids[0] || !mids[0]->receive)
1204 				length = standard_receive3(server, mids[0]);
1205 			else
1206 				length = mids[0]->receive(server, mids[0]);
1207 		}
1208 
1209 		if (length < 0) {
1210 			for (i = 0; i < num_mids; i++)
1211 				if (mids[i])
1212 					release_mid(mids[i]);
1213 			continue;
1214 		}
1215 
1216 		if (server->ops->is_status_io_timeout &&
1217 		    server->ops->is_status_io_timeout(buf)) {
1218 			num_io_timeout++;
1219 			if (num_io_timeout > MAX_STATUS_IO_TIMEOUT) {
1220 				cifs_server_dbg(VFS,
1221 						"Number of request timeouts exceeded %d. Reconnecting",
1222 						MAX_STATUS_IO_TIMEOUT);
1223 
1224 				pending_reconnect = true;
1225 				num_io_timeout = 0;
1226 			}
1227 		}
1228 
1229 		server->lstrp = jiffies;
1230 
1231 		for (i = 0; i < num_mids; i++) {
1232 			if (mids[i] != NULL) {
1233 				mids[i]->resp_buf_size = server->pdu_size;
1234 
1235 				if (bufs[i] != NULL) {
1236 					if (server->ops->is_network_name_deleted &&
1237 					    server->ops->is_network_name_deleted(bufs[i],
1238 										 server)) {
1239 						cifs_server_dbg(FYI,
1240 								"Share deleted. Reconnect needed");
1241 					}
1242 				}
1243 
1244 				if (!mids[i]->multiRsp || mids[i]->multiEnd)
1245 					mids[i]->callback(mids[i]);
1246 
1247 				release_mid(mids[i]);
1248 			} else if (server->ops->is_oplock_break &&
1249 				   server->ops->is_oplock_break(bufs[i],
1250 								server)) {
1251 				smb2_add_credits_from_hdr(bufs[i], server);
1252 				cifs_dbg(FYI, "Received oplock break\n");
1253 			} else {
1254 				cifs_server_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
1255 						atomic_read(&mid_count));
1256 				cifs_dump_mem("Received Data is: ", bufs[i],
1257 					      HEADER_SIZE(server));
1258 				smb2_add_credits_from_hdr(bufs[i], server);
1259 #ifdef CONFIG_CIFS_DEBUG2
1260 				if (server->ops->dump_detail)
1261 					server->ops->dump_detail(bufs[i],
1262 								 server);
1263 				cifs_dump_mids(server);
1264 #endif /* CIFS_DEBUG2 */
1265 			}
1266 		}
1267 
1268 		if (pdu_length > server->pdu_size) {
1269 			if (!allocate_buffers(server))
1270 				continue;
1271 			pdu_length -= server->pdu_size;
1272 			server->total_read = 0;
1273 			server->large_buf = false;
1274 			buf = server->smallbuf;
1275 			goto next_pdu;
1276 		}
1277 
1278 		/* do this reconnect at the very end after processing all MIDs */
1279 		if (pending_reconnect)
1280 			cifs_reconnect(server, true);
1281 
1282 	} /* end while !EXITING */
1283 
1284 	/* buffer usually freed in free_mid - need to free it here on exit */
1285 	cifs_buf_release(server->bigbuf);
1286 	if (server->smallbuf) /* no sense logging a debug message if NULL */
1287 		cifs_small_buf_release(server->smallbuf);
1288 
1289 	task_to_wake = xchg(&server->tsk, NULL);
1290 	clean_demultiplex_info(server);
1291 
1292 	/* if server->tsk was NULL then wait for a signal before exiting */
1293 	if (!task_to_wake) {
1294 		set_current_state(TASK_INTERRUPTIBLE);
1295 		while (!signal_pending(current)) {
1296 			schedule();
1297 			set_current_state(TASK_INTERRUPTIBLE);
1298 		}
1299 		set_current_state(TASK_RUNNING);
1300 	}
1301 
1302 	memalloc_noreclaim_restore(noreclaim_flag);
1303 	module_put_and_kthread_exit(0);
1304 }
1305 
1306 int
cifs_ipaddr_cmp(struct sockaddr * srcaddr,struct sockaddr * rhs)1307 cifs_ipaddr_cmp(struct sockaddr *srcaddr, struct sockaddr *rhs)
1308 {
1309 	struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr;
1310 	struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs;
1311 	struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
1312 	struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;
1313 
1314 	switch (srcaddr->sa_family) {
1315 	case AF_UNSPEC:
1316 		switch (rhs->sa_family) {
1317 		case AF_UNSPEC:
1318 			return 0;
1319 		case AF_INET:
1320 		case AF_INET6:
1321 			return 1;
1322 		default:
1323 			return -1;
1324 		}
1325 	case AF_INET: {
1326 		switch (rhs->sa_family) {
1327 		case AF_UNSPEC:
1328 			return -1;
1329 		case AF_INET:
1330 			return memcmp(saddr4, vaddr4,
1331 				      sizeof(struct sockaddr_in));
1332 		case AF_INET6:
1333 			return 1;
1334 		default:
1335 			return -1;
1336 		}
1337 	}
1338 	case AF_INET6: {
1339 		switch (rhs->sa_family) {
1340 		case AF_UNSPEC:
1341 		case AF_INET:
1342 			return -1;
1343 		case AF_INET6:
1344 			return memcmp(saddr6,
1345 				      vaddr6,
1346 				      sizeof(struct sockaddr_in6));
1347 		default:
1348 			return -1;
1349 		}
1350 	}
1351 	default:
1352 		return -1; /* don't expect to be here */
1353 	}
1354 }
1355 
1356 /*
1357  * Returns true if srcaddr isn't specified and rhs isn't specified, or
1358  * if srcaddr is specified and matches the IP address of the rhs argument
1359  */
1360 bool
cifs_match_ipaddr(struct sockaddr * srcaddr,struct sockaddr * rhs)1361 cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs)
1362 {
1363 	switch (srcaddr->sa_family) {
1364 	case AF_UNSPEC:
1365 		return (rhs->sa_family == AF_UNSPEC);
1366 	case AF_INET: {
1367 		struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr;
1368 		struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs;
1369 		return (saddr4->sin_addr.s_addr == vaddr4->sin_addr.s_addr);
1370 	}
1371 	case AF_INET6: {
1372 		struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
1373 		struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;
1374 		return (ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr)
1375 			&& saddr6->sin6_scope_id == vaddr6->sin6_scope_id);
1376 	}
1377 	default:
1378 		WARN_ON(1);
1379 		return false; /* don't expect to be here */
1380 	}
1381 }
1382 
1383 /*
1384  * If no port is specified in addr structure, we try to match with 445 port
1385  * and if it fails - with 139 ports. It should be called only if address
1386  * families of server and addr are equal.
1387  */
1388 static bool
match_port(struct TCP_Server_Info * server,struct sockaddr * addr)1389 match_port(struct TCP_Server_Info *server, struct sockaddr *addr)
1390 {
1391 	__be16 port, *sport;
1392 
1393 	/* SMBDirect manages its own ports, don't match it here */
1394 	if (server->rdma)
1395 		return true;
1396 
1397 	switch (addr->sa_family) {
1398 	case AF_INET:
1399 		sport = &((struct sockaddr_in *) &server->dstaddr)->sin_port;
1400 		port = ((struct sockaddr_in *) addr)->sin_port;
1401 		break;
1402 	case AF_INET6:
1403 		sport = &((struct sockaddr_in6 *) &server->dstaddr)->sin6_port;
1404 		port = ((struct sockaddr_in6 *) addr)->sin6_port;
1405 		break;
1406 	default:
1407 		WARN_ON(1);
1408 		return false;
1409 	}
1410 
1411 	if (!port) {
1412 		port = htons(CIFS_PORT);
1413 		if (port == *sport)
1414 			return true;
1415 
1416 		port = htons(RFC1001_PORT);
1417 	}
1418 
1419 	return port == *sport;
1420 }
1421 
match_server_address(struct TCP_Server_Info * server,struct sockaddr * addr)1422 static bool match_server_address(struct TCP_Server_Info *server, struct sockaddr *addr)
1423 {
1424 	if (!cifs_match_ipaddr(addr, (struct sockaddr *)&server->dstaddr))
1425 		return false;
1426 
1427 	return true;
1428 }
1429 
1430 static bool
match_security(struct TCP_Server_Info * server,struct smb3_fs_context * ctx)1431 match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
1432 {
1433 	/*
1434 	 * The select_sectype function should either return the ctx->sectype
1435 	 * that was specified, or "Unspecified" if that sectype was not
1436 	 * compatible with the given NEGOTIATE request.
1437 	 */
1438 	if (server->ops->select_sectype(server, ctx->sectype)
1439 	     == Unspecified)
1440 		return false;
1441 
1442 	/*
1443 	 * Now check if signing mode is acceptable. No need to check
1444 	 * global_secflags at this point since if MUST_SIGN is set then
1445 	 * the server->sign had better be too.
1446 	 */
1447 	if (ctx->sign && !server->sign)
1448 		return false;
1449 
1450 	return true;
1451 }
1452 
1453 /* this function must be called with srv_lock held */
match_server(struct TCP_Server_Info * server,struct smb3_fs_context * ctx,bool match_super)1454 static int match_server(struct TCP_Server_Info *server,
1455 			struct smb3_fs_context *ctx,
1456 			bool match_super)
1457 {
1458 	struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
1459 
1460 	lockdep_assert_held(&server->srv_lock);
1461 
1462 	if (ctx->nosharesock)
1463 		return 0;
1464 
1465 	/* this server does not share socket */
1466 	if (server->nosharesock)
1467 		return 0;
1468 
1469 	/* If multidialect negotiation see if existing sessions match one */
1470 	if (strcmp(ctx->vals->version_string, SMB3ANY_VERSION_STRING) == 0) {
1471 		if (server->vals->protocol_id < SMB30_PROT_ID)
1472 			return 0;
1473 	} else if (strcmp(ctx->vals->version_string,
1474 		   SMBDEFAULT_VERSION_STRING) == 0) {
1475 		if (server->vals->protocol_id < SMB21_PROT_ID)
1476 			return 0;
1477 	} else if ((server->vals != ctx->vals) || (server->ops != ctx->ops))
1478 		return 0;
1479 
1480 	if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
1481 		return 0;
1482 
1483 	if (!cifs_match_ipaddr((struct sockaddr *)&ctx->srcaddr,
1484 			       (struct sockaddr *)&server->srcaddr))
1485 		return 0;
1486 	/*
1487 	 * When matching cifs.ko superblocks (@match_super == true), we can't
1488 	 * really match either @server->leaf_fullpath or @server->dstaddr
1489 	 * directly since this @server might belong to a completely different
1490 	 * server -- in case of domain-based DFS referrals or DFS links -- as
1491 	 * provided earlier by mount(2) through 'source' and 'ip' options.
1492 	 *
1493 	 * Otherwise, match the DFS referral in @server->leaf_fullpath or the
1494 	 * destination address in @server->dstaddr.
1495 	 *
1496 	 * When using 'nodfs' mount option, we avoid sharing it with DFS
1497 	 * connections as they might failover.
1498 	 */
1499 	if (!match_super) {
1500 		if (!ctx->nodfs) {
1501 			if (server->leaf_fullpath) {
1502 				if (!ctx->leaf_fullpath ||
1503 				    strcasecmp(server->leaf_fullpath,
1504 					       ctx->leaf_fullpath))
1505 					return 0;
1506 			} else if (ctx->leaf_fullpath) {
1507 				return 0;
1508 			}
1509 		} else if (server->leaf_fullpath) {
1510 			return 0;
1511 		}
1512 	}
1513 
1514 	/*
1515 	 * Match for a regular connection (address/hostname/port) which has no
1516 	 * DFS referrals set.
1517 	 */
1518 	if (!server->leaf_fullpath &&
1519 	    (strcasecmp(server->hostname, ctx->server_hostname) ||
1520 	     !match_server_address(server, addr) ||
1521 	     !match_port(server, addr)))
1522 		return 0;
1523 
1524 	if (!match_security(server, ctx))
1525 		return 0;
1526 
1527 	if (server->echo_interval != ctx->echo_interval * HZ)
1528 		return 0;
1529 
1530 	if (server->rdma != ctx->rdma)
1531 		return 0;
1532 
1533 	if (server->ignore_signature != ctx->ignore_signature)
1534 		return 0;
1535 
1536 	if (server->min_offload != ctx->min_offload)
1537 		return 0;
1538 
1539 	return 1;
1540 }
1541 
1542 struct TCP_Server_Info *
cifs_find_tcp_session(struct smb3_fs_context * ctx)1543 cifs_find_tcp_session(struct smb3_fs_context *ctx)
1544 {
1545 	struct TCP_Server_Info *server;
1546 
1547 	spin_lock(&cifs_tcp_ses_lock);
1548 	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
1549 		spin_lock(&server->srv_lock);
1550 		/*
1551 		 * Skip ses channels since they're only handled in lower layers
1552 		 * (e.g. cifs_send_recv).
1553 		 */
1554 		if (SERVER_IS_CHAN(server) ||
1555 		    !match_server(server, ctx, false)) {
1556 			spin_unlock(&server->srv_lock);
1557 			continue;
1558 		}
1559 		spin_unlock(&server->srv_lock);
1560 
1561 		++server->srv_count;
1562 		spin_unlock(&cifs_tcp_ses_lock);
1563 		cifs_dbg(FYI, "Existing tcp session with server found\n");
1564 		return server;
1565 	}
1566 	spin_unlock(&cifs_tcp_ses_lock);
1567 	return NULL;
1568 }
1569 
1570 void
cifs_put_tcp_session(struct TCP_Server_Info * server,int from_reconnect)1571 cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
1572 {
1573 	struct task_struct *task;
1574 
1575 	spin_lock(&cifs_tcp_ses_lock);
1576 	if (--server->srv_count > 0) {
1577 		spin_unlock(&cifs_tcp_ses_lock);
1578 		return;
1579 	}
1580 
1581 	/* srv_count can never go negative */
1582 	WARN_ON(server->srv_count < 0);
1583 
1584 	put_net(cifs_net_ns(server));
1585 
1586 	list_del_init(&server->tcp_ses_list);
1587 	spin_unlock(&cifs_tcp_ses_lock);
1588 
1589 	/* For secondary channels, we pick up ref-count on the primary server */
1590 	if (SERVER_IS_CHAN(server))
1591 		cifs_put_tcp_session(server->primary_server, from_reconnect);
1592 
1593 	cancel_delayed_work_sync(&server->echo);
1594 
1595 	if (from_reconnect)
1596 		/*
1597 		 * Avoid deadlock here: reconnect work calls
1598 		 * cifs_put_tcp_session() at its end. Need to be sure
1599 		 * that reconnect work does nothing with server pointer after
1600 		 * that step.
1601 		 */
1602 		cancel_delayed_work(&server->reconnect);
1603 	else
1604 		cancel_delayed_work_sync(&server->reconnect);
1605 
1606 	spin_lock(&server->srv_lock);
1607 	server->tcpStatus = CifsExiting;
1608 	spin_unlock(&server->srv_lock);
1609 
1610 	cifs_crypto_secmech_release(server);
1611 
1612 	kfree_sensitive(server->session_key.response);
1613 	server->session_key.response = NULL;
1614 	server->session_key.len = 0;
1615 	kfree(server->hostname);
1616 	server->hostname = NULL;
1617 
1618 	task = xchg(&server->tsk, NULL);
1619 	if (task)
1620 		send_sig(SIGKILL, task, 1);
1621 }
1622 
1623 struct TCP_Server_Info *
cifs_get_tcp_session(struct smb3_fs_context * ctx,struct TCP_Server_Info * primary_server)1624 cifs_get_tcp_session(struct smb3_fs_context *ctx,
1625 		     struct TCP_Server_Info *primary_server)
1626 {
1627 	struct TCP_Server_Info *tcp_ses = NULL;
1628 	int rc;
1629 
1630 	cifs_dbg(FYI, "UNC: %s\n", ctx->UNC);
1631 
1632 	/* see if we already have a matching tcp_ses */
1633 	tcp_ses = cifs_find_tcp_session(ctx);
1634 	if (tcp_ses)
1635 		return tcp_ses;
1636 
1637 	tcp_ses = kzalloc(sizeof(struct TCP_Server_Info), GFP_KERNEL);
1638 	if (!tcp_ses) {
1639 		rc = -ENOMEM;
1640 		goto out_err;
1641 	}
1642 
1643 	tcp_ses->hostname = kstrdup(ctx->server_hostname, GFP_KERNEL);
1644 	if (!tcp_ses->hostname) {
1645 		rc = -ENOMEM;
1646 		goto out_err;
1647 	}
1648 
1649 	if (ctx->leaf_fullpath) {
1650 		tcp_ses->leaf_fullpath = kstrdup(ctx->leaf_fullpath, GFP_KERNEL);
1651 		if (!tcp_ses->leaf_fullpath) {
1652 			rc = -ENOMEM;
1653 			goto out_err;
1654 		}
1655 	}
1656 
1657 	if (ctx->nosharesock)
1658 		tcp_ses->nosharesock = true;
1659 
1660 	tcp_ses->ops = ctx->ops;
1661 	tcp_ses->vals = ctx->vals;
1662 	cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
1663 
1664 	tcp_ses->conn_id = atomic_inc_return(&tcpSesNextId);
1665 	tcp_ses->noblockcnt = ctx->rootfs;
1666 	tcp_ses->noblocksnd = ctx->noblocksnd || ctx->rootfs;
1667 	tcp_ses->noautotune = ctx->noautotune;
1668 	tcp_ses->tcp_nodelay = ctx->sockopt_tcp_nodelay;
1669 	tcp_ses->rdma = ctx->rdma;
1670 	tcp_ses->in_flight = 0;
1671 	tcp_ses->max_in_flight = 0;
1672 	tcp_ses->credits = 1;
1673 	if (primary_server) {
1674 		spin_lock(&cifs_tcp_ses_lock);
1675 		++primary_server->srv_count;
1676 		spin_unlock(&cifs_tcp_ses_lock);
1677 		tcp_ses->primary_server = primary_server;
1678 	}
1679 	init_waitqueue_head(&tcp_ses->response_q);
1680 	init_waitqueue_head(&tcp_ses->request_q);
1681 	INIT_LIST_HEAD(&tcp_ses->pending_mid_q);
1682 	mutex_init(&tcp_ses->_srv_mutex);
1683 	memcpy(tcp_ses->workstation_RFC1001_name,
1684 		ctx->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
1685 	memcpy(tcp_ses->server_RFC1001_name,
1686 		ctx->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
1687 	tcp_ses->session_estab = false;
1688 	tcp_ses->sequence_number = 0;
1689 	tcp_ses->channel_sequence_num = 0; /* only tracked for primary channel */
1690 	tcp_ses->reconnect_instance = 1;
1691 	tcp_ses->lstrp = jiffies;
1692 	tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression);
1693 	spin_lock_init(&tcp_ses->req_lock);
1694 	spin_lock_init(&tcp_ses->srv_lock);
1695 	spin_lock_init(&tcp_ses->mid_lock);
1696 	INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
1697 	INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
1698 	INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
1699 	INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
1700 	mutex_init(&tcp_ses->reconnect_mutex);
1701 #ifdef CONFIG_CIFS_DFS_UPCALL
1702 	mutex_init(&tcp_ses->refpath_lock);
1703 #endif
1704 	memcpy(&tcp_ses->srcaddr, &ctx->srcaddr,
1705 	       sizeof(tcp_ses->srcaddr));
1706 	memcpy(&tcp_ses->dstaddr, &ctx->dstaddr,
1707 		sizeof(tcp_ses->dstaddr));
1708 	if (ctx->use_client_guid)
1709 		memcpy(tcp_ses->client_guid, ctx->client_guid,
1710 		       SMB2_CLIENT_GUID_SIZE);
1711 	else
1712 		generate_random_uuid(tcp_ses->client_guid);
1713 	/*
1714 	 * at this point we are the only ones with the pointer
1715 	 * to the struct since the kernel thread not created yet
1716 	 * no need to spinlock this init of tcpStatus or srv_count
1717 	 */
1718 	tcp_ses->tcpStatus = CifsNew;
1719 	++tcp_ses->srv_count;
1720 
1721 	if (ctx->echo_interval >= SMB_ECHO_INTERVAL_MIN &&
1722 		ctx->echo_interval <= SMB_ECHO_INTERVAL_MAX)
1723 		tcp_ses->echo_interval = ctx->echo_interval * HZ;
1724 	else
1725 		tcp_ses->echo_interval = SMB_ECHO_INTERVAL_DEFAULT * HZ;
1726 	if (tcp_ses->rdma) {
1727 #ifndef CONFIG_CIFS_SMB_DIRECT
1728 		cifs_dbg(VFS, "CONFIG_CIFS_SMB_DIRECT is not enabled\n");
1729 		rc = -ENOENT;
1730 		goto out_err_crypto_release;
1731 #endif
1732 		tcp_ses->smbd_conn = smbd_get_connection(
1733 			tcp_ses, (struct sockaddr *)&ctx->dstaddr);
1734 		if (tcp_ses->smbd_conn) {
1735 			cifs_dbg(VFS, "RDMA transport established\n");
1736 			rc = 0;
1737 			goto smbd_connected;
1738 		} else {
1739 			rc = -ENOENT;
1740 			goto out_err_crypto_release;
1741 		}
1742 	}
1743 	rc = ip_connect(tcp_ses);
1744 	if (rc < 0) {
1745 		cifs_dbg(VFS, "Error connecting to socket. Aborting operation.\n");
1746 		goto out_err_crypto_release;
1747 	}
1748 smbd_connected:
1749 	/*
1750 	 * since we're in a cifs function already, we know that
1751 	 * this will succeed. No need for try_module_get().
1752 	 */
1753 	__module_get(THIS_MODULE);
1754 	tcp_ses->tsk = kthread_run(cifs_demultiplex_thread,
1755 				  tcp_ses, "cifsd");
1756 	if (IS_ERR(tcp_ses->tsk)) {
1757 		rc = PTR_ERR(tcp_ses->tsk);
1758 		cifs_dbg(VFS, "error %d create cifsd thread\n", rc);
1759 		module_put(THIS_MODULE);
1760 		goto out_err_crypto_release;
1761 	}
1762 	tcp_ses->min_offload = ctx->min_offload;
1763 	/*
1764 	 * at this point we are the only ones with the pointer
1765 	 * to the struct since the kernel thread not created yet
1766 	 * no need to spinlock this update of tcpStatus
1767 	 */
1768 	spin_lock(&tcp_ses->srv_lock);
1769 	tcp_ses->tcpStatus = CifsNeedNegotiate;
1770 	spin_unlock(&tcp_ses->srv_lock);
1771 
1772 	if ((ctx->max_credits < 20) || (ctx->max_credits > 60000))
1773 		tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE;
1774 	else
1775 		tcp_ses->max_credits = ctx->max_credits;
1776 
1777 	tcp_ses->nr_targets = 1;
1778 	tcp_ses->ignore_signature = ctx->ignore_signature;
1779 	/* thread spawned, put it on the list */
1780 	spin_lock(&cifs_tcp_ses_lock);
1781 	list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list);
1782 	spin_unlock(&cifs_tcp_ses_lock);
1783 
1784 	/* queue echo request delayed work */
1785 	queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval);
1786 
1787 	return tcp_ses;
1788 
1789 out_err_crypto_release:
1790 	cifs_crypto_secmech_release(tcp_ses);
1791 
1792 	put_net(cifs_net_ns(tcp_ses));
1793 
1794 out_err:
1795 	if (tcp_ses) {
1796 		if (SERVER_IS_CHAN(tcp_ses))
1797 			cifs_put_tcp_session(tcp_ses->primary_server, false);
1798 		kfree(tcp_ses->hostname);
1799 		kfree(tcp_ses->leaf_fullpath);
1800 		if (tcp_ses->ssocket)
1801 			sock_release(tcp_ses->ssocket);
1802 		kfree(tcp_ses);
1803 	}
1804 	return ERR_PTR(rc);
1805 }
1806 
1807 /* this function must be called with ses_lock and chan_lock held */
match_session(struct cifs_ses * ses,struct smb3_fs_context * ctx)1808 static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
1809 {
1810 	if (ctx->sectype != Unspecified &&
1811 	    ctx->sectype != ses->sectype)
1812 		return 0;
1813 
1814 	/*
1815 	 * If an existing session is limited to less channels than
1816 	 * requested, it should not be reused
1817 	 */
1818 	if (ses->chan_max < ctx->max_channels)
1819 		return 0;
1820 
1821 	switch (ses->sectype) {
1822 	case Kerberos:
1823 		if (!uid_eq(ctx->cred_uid, ses->cred_uid))
1824 			return 0;
1825 		break;
1826 	default:
1827 		/* NULL username means anonymous session */
1828 		if (ses->user_name == NULL) {
1829 			if (!ctx->nullauth)
1830 				return 0;
1831 			break;
1832 		}
1833 
1834 		/* anything else takes username/password */
1835 		if (strncmp(ses->user_name,
1836 			    ctx->username ? ctx->username : "",
1837 			    CIFS_MAX_USERNAME_LEN))
1838 			return 0;
1839 		if ((ctx->username && strlen(ctx->username) != 0) &&
1840 		    ses->password != NULL &&
1841 		    strncmp(ses->password,
1842 			    ctx->password ? ctx->password : "",
1843 			    CIFS_MAX_PASSWORD_LEN))
1844 			return 0;
1845 	}
1846 
1847 	if (strcmp(ctx->local_nls->charset, ses->local_nls->charset))
1848 		return 0;
1849 
1850 	return 1;
1851 }
1852 
1853 /**
1854  * cifs_setup_ipc - helper to setup the IPC tcon for the session
1855  * @ses: smb session to issue the request on
1856  * @ctx: the superblock configuration context to use for building the
1857  *       new tree connection for the IPC (interprocess communication RPC)
1858  *
1859  * A new IPC connection is made and stored in the session
1860  * tcon_ipc. The IPC tcon has the same lifetime as the session.
1861  */
1862 static int
cifs_setup_ipc(struct cifs_ses * ses,struct smb3_fs_context * ctx)1863 cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
1864 {
1865 	int rc = 0, xid;
1866 	struct cifs_tcon *tcon;
1867 	char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0};
1868 	bool seal = false;
1869 	struct TCP_Server_Info *server = ses->server;
1870 
1871 	/*
1872 	 * If the mount request that resulted in the creation of the
1873 	 * session requires encryption, force IPC to be encrypted too.
1874 	 */
1875 	if (ctx->seal) {
1876 		if (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)
1877 			seal = true;
1878 		else {
1879 			cifs_server_dbg(VFS,
1880 				 "IPC: server doesn't support encryption\n");
1881 			return -EOPNOTSUPP;
1882 		}
1883 	}
1884 
1885 	/* no need to setup directory caching on IPC share, so pass in false */
1886 	tcon = tcon_info_alloc(false);
1887 	if (tcon == NULL)
1888 		return -ENOMEM;
1889 
1890 	spin_lock(&server->srv_lock);
1891 	scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname);
1892 	spin_unlock(&server->srv_lock);
1893 
1894 	xid = get_xid();
1895 	tcon->ses = ses;
1896 	tcon->ipc = true;
1897 	tcon->seal = seal;
1898 	rc = server->ops->tree_connect(xid, ses, unc, tcon, ctx->local_nls);
1899 	free_xid(xid);
1900 
1901 	if (rc) {
1902 		cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc);
1903 		tconInfoFree(tcon);
1904 		goto out;
1905 	}
1906 
1907 	cifs_dbg(FYI, "IPC tcon rc=%d ipc tid=0x%x\n", rc, tcon->tid);
1908 
1909 	spin_lock(&tcon->tc_lock);
1910 	tcon->status = TID_GOOD;
1911 	spin_unlock(&tcon->tc_lock);
1912 	ses->tcon_ipc = tcon;
1913 out:
1914 	return rc;
1915 }
1916 
1917 /**
1918  * cifs_free_ipc - helper to release the session IPC tcon
1919  * @ses: smb session to unmount the IPC from
1920  *
1921  * Needs to be called everytime a session is destroyed.
1922  *
1923  * On session close, the IPC is closed and the server must release all tcons of the session.
1924  * No need to send a tree disconnect here.
1925  *
1926  * Besides, it will make the server to not close durable and resilient files on session close, as
1927  * specified in MS-SMB2 3.3.5.6 Receiving an SMB2 LOGOFF Request.
1928  */
1929 static int
cifs_free_ipc(struct cifs_ses * ses)1930 cifs_free_ipc(struct cifs_ses *ses)
1931 {
1932 	struct cifs_tcon *tcon = ses->tcon_ipc;
1933 
1934 	if (tcon == NULL)
1935 		return 0;
1936 
1937 	tconInfoFree(tcon);
1938 	ses->tcon_ipc = NULL;
1939 	return 0;
1940 }
1941 
1942 static struct cifs_ses *
cifs_find_smb_ses(struct TCP_Server_Info * server,struct smb3_fs_context * ctx)1943 cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
1944 {
1945 	struct cifs_ses *ses, *ret = NULL;
1946 
1947 	spin_lock(&cifs_tcp_ses_lock);
1948 	list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1949 		spin_lock(&ses->ses_lock);
1950 		if (ses->ses_status == SES_EXITING) {
1951 			spin_unlock(&ses->ses_lock);
1952 			continue;
1953 		}
1954 		spin_lock(&ses->chan_lock);
1955 		if (match_session(ses, ctx)) {
1956 			spin_unlock(&ses->chan_lock);
1957 			spin_unlock(&ses->ses_lock);
1958 			ret = ses;
1959 			break;
1960 		}
1961 		spin_unlock(&ses->chan_lock);
1962 		spin_unlock(&ses->ses_lock);
1963 	}
1964 	if (ret)
1965 		cifs_smb_ses_inc_refcount(ret);
1966 	spin_unlock(&cifs_tcp_ses_lock);
1967 	return ret;
1968 }
1969 
__cifs_put_smb_ses(struct cifs_ses * ses)1970 void __cifs_put_smb_ses(struct cifs_ses *ses)
1971 {
1972 	unsigned int rc, xid;
1973 	unsigned int chan_count;
1974 	struct TCP_Server_Info *server = ses->server;
1975 
1976 	spin_lock(&ses->ses_lock);
1977 	if (ses->ses_status == SES_EXITING) {
1978 		spin_unlock(&ses->ses_lock);
1979 		return;
1980 	}
1981 	spin_unlock(&ses->ses_lock);
1982 
1983 	cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
1984 	cifs_dbg(FYI,
1985 		 "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->tree_name : "NONE");
1986 
1987 	spin_lock(&cifs_tcp_ses_lock);
1988 	if (--ses->ses_count > 0) {
1989 		spin_unlock(&cifs_tcp_ses_lock);
1990 		return;
1991 	}
1992 	spin_lock(&ses->ses_lock);
1993 	if (ses->ses_status == SES_GOOD)
1994 		ses->ses_status = SES_EXITING;
1995 	spin_unlock(&ses->ses_lock);
1996 	spin_unlock(&cifs_tcp_ses_lock);
1997 
1998 	/* ses_count can never go negative */
1999 	WARN_ON(ses->ses_count < 0);
2000 
2001 	spin_lock(&ses->ses_lock);
2002 	if (ses->ses_status == SES_EXITING && server->ops->logoff) {
2003 		spin_unlock(&ses->ses_lock);
2004 		cifs_free_ipc(ses);
2005 		xid = get_xid();
2006 		rc = server->ops->logoff(xid, ses);
2007 		if (rc)
2008 			cifs_server_dbg(VFS, "%s: Session Logoff failure rc=%d\n",
2009 				__func__, rc);
2010 		_free_xid(xid);
2011 	} else {
2012 		spin_unlock(&ses->ses_lock);
2013 		cifs_free_ipc(ses);
2014 	}
2015 
2016 	spin_lock(&cifs_tcp_ses_lock);
2017 	list_del_init(&ses->smb_ses_list);
2018 	spin_unlock(&cifs_tcp_ses_lock);
2019 
2020 	chan_count = ses->chan_count;
2021 
2022 	/* close any extra channels */
2023 	if (chan_count > 1) {
2024 		int i;
2025 
2026 		for (i = 1; i < chan_count; i++) {
2027 			if (ses->chans[i].iface) {
2028 				kref_put(&ses->chans[i].iface->refcount, release_iface);
2029 				ses->chans[i].iface = NULL;
2030 			}
2031 			cifs_put_tcp_session(ses->chans[i].server, 0);
2032 			ses->chans[i].server = NULL;
2033 		}
2034 	}
2035 
2036 	sesInfoFree(ses);
2037 	cifs_put_tcp_session(server, 0);
2038 }
2039 
2040 #ifdef CONFIG_KEYS
2041 
2042 /* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */
2043 #define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1)
2044 
2045 /* Populate username and pw fields from keyring if possible */
2046 static int
cifs_set_cifscreds(struct smb3_fs_context * ctx,struct cifs_ses * ses)2047 cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
2048 {
2049 	int rc = 0;
2050 	int is_domain = 0;
2051 	const char *delim, *payload;
2052 	char *desc;
2053 	ssize_t len;
2054 	struct key *key;
2055 	struct TCP_Server_Info *server = ses->server;
2056 	struct sockaddr_in *sa;
2057 	struct sockaddr_in6 *sa6;
2058 	const struct user_key_payload *upayload;
2059 
2060 	desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL);
2061 	if (!desc)
2062 		return -ENOMEM;
2063 
2064 	/* try to find an address key first */
2065 	switch (server->dstaddr.ss_family) {
2066 	case AF_INET:
2067 		sa = (struct sockaddr_in *)&server->dstaddr;
2068 		sprintf(desc, "cifs:a:%pI4", &sa->sin_addr.s_addr);
2069 		break;
2070 	case AF_INET6:
2071 		sa6 = (struct sockaddr_in6 *)&server->dstaddr;
2072 		sprintf(desc, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr);
2073 		break;
2074 	default:
2075 		cifs_dbg(FYI, "Bad ss_family (%hu)\n",
2076 			 server->dstaddr.ss_family);
2077 		rc = -EINVAL;
2078 		goto out_err;
2079 	}
2080 
2081 	cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc);
2082 	key = request_key(&key_type_logon, desc, "");
2083 	if (IS_ERR(key)) {
2084 		if (!ses->domainName) {
2085 			cifs_dbg(FYI, "domainName is NULL\n");
2086 			rc = PTR_ERR(key);
2087 			goto out_err;
2088 		}
2089 
2090 		/* didn't work, try to find a domain key */
2091 		sprintf(desc, "cifs:d:%s", ses->domainName);
2092 		cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc);
2093 		key = request_key(&key_type_logon, desc, "");
2094 		if (IS_ERR(key)) {
2095 			rc = PTR_ERR(key);
2096 			goto out_err;
2097 		}
2098 		is_domain = 1;
2099 	}
2100 
2101 	down_read(&key->sem);
2102 	upayload = user_key_payload_locked(key);
2103 	if (IS_ERR_OR_NULL(upayload)) {
2104 		rc = upayload ? PTR_ERR(upayload) : -EINVAL;
2105 		goto out_key_put;
2106 	}
2107 
2108 	/* find first : in payload */
2109 	payload = upayload->data;
2110 	delim = strnchr(payload, upayload->datalen, ':');
2111 	cifs_dbg(FYI, "payload=%s\n", payload);
2112 	if (!delim) {
2113 		cifs_dbg(FYI, "Unable to find ':' in payload (datalen=%d)\n",
2114 			 upayload->datalen);
2115 		rc = -EINVAL;
2116 		goto out_key_put;
2117 	}
2118 
2119 	len = delim - payload;
2120 	if (len > CIFS_MAX_USERNAME_LEN || len <= 0) {
2121 		cifs_dbg(FYI, "Bad value from username search (len=%zd)\n",
2122 			 len);
2123 		rc = -EINVAL;
2124 		goto out_key_put;
2125 	}
2126 
2127 	ctx->username = kstrndup(payload, len, GFP_KERNEL);
2128 	if (!ctx->username) {
2129 		cifs_dbg(FYI, "Unable to allocate %zd bytes for username\n",
2130 			 len);
2131 		rc = -ENOMEM;
2132 		goto out_key_put;
2133 	}
2134 	cifs_dbg(FYI, "%s: username=%s\n", __func__, ctx->username);
2135 
2136 	len = key->datalen - (len + 1);
2137 	if (len > CIFS_MAX_PASSWORD_LEN || len <= 0) {
2138 		cifs_dbg(FYI, "Bad len for password search (len=%zd)\n", len);
2139 		rc = -EINVAL;
2140 		kfree(ctx->username);
2141 		ctx->username = NULL;
2142 		goto out_key_put;
2143 	}
2144 
2145 	++delim;
2146 	ctx->password = kstrndup(delim, len, GFP_KERNEL);
2147 	if (!ctx->password) {
2148 		cifs_dbg(FYI, "Unable to allocate %zd bytes for password\n",
2149 			 len);
2150 		rc = -ENOMEM;
2151 		kfree(ctx->username);
2152 		ctx->username = NULL;
2153 		goto out_key_put;
2154 	}
2155 
2156 	/*
2157 	 * If we have a domain key then we must set the domainName in the
2158 	 * for the request.
2159 	 */
2160 	if (is_domain && ses->domainName) {
2161 		ctx->domainname = kstrdup(ses->domainName, GFP_KERNEL);
2162 		if (!ctx->domainname) {
2163 			cifs_dbg(FYI, "Unable to allocate %zd bytes for domain\n",
2164 				 len);
2165 			rc = -ENOMEM;
2166 			kfree(ctx->username);
2167 			ctx->username = NULL;
2168 			kfree_sensitive(ctx->password);
2169 			ctx->password = NULL;
2170 			goto out_key_put;
2171 		}
2172 	}
2173 
2174 	strscpy(ctx->workstation_name, ses->workstation_name, sizeof(ctx->workstation_name));
2175 
2176 out_key_put:
2177 	up_read(&key->sem);
2178 	key_put(key);
2179 out_err:
2180 	kfree(desc);
2181 	cifs_dbg(FYI, "%s: returning %d\n", __func__, rc);
2182 	return rc;
2183 }
2184 #else /* ! CONFIG_KEYS */
2185 static inline int
cifs_set_cifscreds(struct smb3_fs_context * ctx,struct cifs_ses * ses)2186 cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)),
2187 		   struct cifs_ses *ses __attribute__((unused)))
2188 {
2189 	return -ENOSYS;
2190 }
2191 #endif /* CONFIG_KEYS */
2192 
2193 /**
2194  * cifs_get_smb_ses - get a session matching @ctx data from @server
2195  * @server: server to setup the session to
2196  * @ctx: superblock configuration context to use to setup the session
2197  *
2198  * This function assumes it is being called from cifs_mount() where we
2199  * already got a server reference (server refcount +1). See
2200  * cifs_get_tcon() for refcount explanations.
2201  */
2202 struct cifs_ses *
cifs_get_smb_ses(struct TCP_Server_Info * server,struct smb3_fs_context * ctx)2203 cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
2204 {
2205 	int rc = 0;
2206 	unsigned int xid;
2207 	struct cifs_ses *ses;
2208 	struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
2209 	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
2210 
2211 	xid = get_xid();
2212 
2213 	ses = cifs_find_smb_ses(server, ctx);
2214 	if (ses) {
2215 		cifs_dbg(FYI, "Existing smb sess found (status=%d)\n",
2216 			 ses->ses_status);
2217 
2218 		spin_lock(&ses->chan_lock);
2219 		if (cifs_chan_needs_reconnect(ses, server)) {
2220 			spin_unlock(&ses->chan_lock);
2221 			cifs_dbg(FYI, "Session needs reconnect\n");
2222 
2223 			mutex_lock(&ses->session_mutex);
2224 			rc = cifs_negotiate_protocol(xid, ses, server);
2225 			if (rc) {
2226 				mutex_unlock(&ses->session_mutex);
2227 				/* problem -- put our ses reference */
2228 				cifs_put_smb_ses(ses);
2229 				free_xid(xid);
2230 				return ERR_PTR(rc);
2231 			}
2232 
2233 			rc = cifs_setup_session(xid, ses, server,
2234 						ctx->local_nls);
2235 			if (rc) {
2236 				mutex_unlock(&ses->session_mutex);
2237 				/* problem -- put our reference */
2238 				cifs_put_smb_ses(ses);
2239 				free_xid(xid);
2240 				return ERR_PTR(rc);
2241 			}
2242 			mutex_unlock(&ses->session_mutex);
2243 
2244 			spin_lock(&ses->chan_lock);
2245 		}
2246 		spin_unlock(&ses->chan_lock);
2247 
2248 		/* existing SMB ses has a server reference already */
2249 		cifs_put_tcp_session(server, 0);
2250 		free_xid(xid);
2251 		return ses;
2252 	}
2253 
2254 	rc = -ENOMEM;
2255 
2256 	cifs_dbg(FYI, "Existing smb sess not found\n");
2257 	ses = sesInfoAlloc();
2258 	if (ses == NULL)
2259 		goto get_ses_fail;
2260 
2261 	/* new SMB session uses our server ref */
2262 	ses->server = server;
2263 	if (server->dstaddr.ss_family == AF_INET6)
2264 		sprintf(ses->ip_addr, "%pI6", &addr6->sin6_addr);
2265 	else
2266 		sprintf(ses->ip_addr, "%pI4", &addr->sin_addr);
2267 
2268 	if (ctx->username) {
2269 		ses->user_name = kstrdup(ctx->username, GFP_KERNEL);
2270 		if (!ses->user_name)
2271 			goto get_ses_fail;
2272 	}
2273 
2274 	/* ctx->password freed at unmount */
2275 	if (ctx->password) {
2276 		ses->password = kstrdup(ctx->password, GFP_KERNEL);
2277 		if (!ses->password)
2278 			goto get_ses_fail;
2279 	}
2280 	if (ctx->domainname) {
2281 		ses->domainName = kstrdup(ctx->domainname, GFP_KERNEL);
2282 		if (!ses->domainName)
2283 			goto get_ses_fail;
2284 	}
2285 
2286 	strscpy(ses->workstation_name, ctx->workstation_name, sizeof(ses->workstation_name));
2287 
2288 	if (ctx->domainauto)
2289 		ses->domainAuto = ctx->domainauto;
2290 	ses->cred_uid = ctx->cred_uid;
2291 	ses->linux_uid = ctx->linux_uid;
2292 
2293 	ses->sectype = ctx->sectype;
2294 	ses->sign = ctx->sign;
2295 	ses->local_nls = load_nls(ctx->local_nls->charset);
2296 
2297 	/* add server as first channel */
2298 	spin_lock(&ses->chan_lock);
2299 	ses->chans[0].server = server;
2300 	ses->chan_count = 1;
2301 	ses->chan_max = ctx->multichannel ? ctx->max_channels:1;
2302 	ses->chans_need_reconnect = 1;
2303 	spin_unlock(&ses->chan_lock);
2304 
2305 	mutex_lock(&ses->session_mutex);
2306 	rc = cifs_negotiate_protocol(xid, ses, server);
2307 	if (!rc)
2308 		rc = cifs_setup_session(xid, ses, server, ctx->local_nls);
2309 	mutex_unlock(&ses->session_mutex);
2310 
2311 	/* each channel uses a different signing key */
2312 	spin_lock(&ses->chan_lock);
2313 	memcpy(ses->chans[0].signkey, ses->smb3signingkey,
2314 	       sizeof(ses->smb3signingkey));
2315 	spin_unlock(&ses->chan_lock);
2316 
2317 	if (rc)
2318 		goto get_ses_fail;
2319 
2320 	/*
2321 	 * success, put it on the list and add it as first channel
2322 	 * note: the session becomes active soon after this. So you'll
2323 	 * need to lock before changing something in the session.
2324 	 */
2325 	spin_lock(&cifs_tcp_ses_lock);
2326 	ses->dfs_root_ses = ctx->dfs_root_ses;
2327 	if (ses->dfs_root_ses)
2328 		ses->dfs_root_ses->ses_count++;
2329 	list_add(&ses->smb_ses_list, &server->smb_ses_list);
2330 	spin_unlock(&cifs_tcp_ses_lock);
2331 
2332 	cifs_setup_ipc(ses, ctx);
2333 
2334 	free_xid(xid);
2335 
2336 	return ses;
2337 
2338 get_ses_fail:
2339 	sesInfoFree(ses);
2340 	free_xid(xid);
2341 	return ERR_PTR(rc);
2342 }
2343 
2344 /* this function must be called with tc_lock held */
match_tcon(struct cifs_tcon * tcon,struct smb3_fs_context * ctx)2345 static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
2346 {
2347 	struct TCP_Server_Info *server = tcon->ses->server;
2348 
2349 	if (tcon->status == TID_EXITING)
2350 		return 0;
2351 
2352 	if (tcon->origin_fullpath) {
2353 		if (!ctx->source ||
2354 		    !dfs_src_pathname_equal(ctx->source,
2355 					    tcon->origin_fullpath))
2356 			return 0;
2357 	} else if (!server->leaf_fullpath &&
2358 		   strncmp(tcon->tree_name, ctx->UNC, MAX_TREE_SIZE)) {
2359 		return 0;
2360 	}
2361 	if (tcon->seal != ctx->seal)
2362 		return 0;
2363 	if (tcon->snapshot_time != ctx->snapshot_time)
2364 		return 0;
2365 	if (tcon->handle_timeout != ctx->handle_timeout)
2366 		return 0;
2367 	if (tcon->no_lease != ctx->no_lease)
2368 		return 0;
2369 	if (tcon->nodelete != ctx->nodelete)
2370 		return 0;
2371 	return 1;
2372 }
2373 
2374 static struct cifs_tcon *
cifs_find_tcon(struct cifs_ses * ses,struct smb3_fs_context * ctx)2375 cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
2376 {
2377 	struct cifs_tcon *tcon;
2378 
2379 	spin_lock(&cifs_tcp_ses_lock);
2380 	list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
2381 		spin_lock(&tcon->tc_lock);
2382 		if (!match_tcon(tcon, ctx)) {
2383 			spin_unlock(&tcon->tc_lock);
2384 			continue;
2385 		}
2386 		++tcon->tc_count;
2387 		spin_unlock(&tcon->tc_lock);
2388 		spin_unlock(&cifs_tcp_ses_lock);
2389 		return tcon;
2390 	}
2391 	spin_unlock(&cifs_tcp_ses_lock);
2392 	return NULL;
2393 }
2394 
2395 void
cifs_put_tcon(struct cifs_tcon * tcon)2396 cifs_put_tcon(struct cifs_tcon *tcon)
2397 {
2398 	unsigned int xid;
2399 	struct cifs_ses *ses;
2400 
2401 	/*
2402 	 * IPC tcon share the lifetime of their session and are
2403 	 * destroyed in the session put function
2404 	 */
2405 	if (tcon == NULL || tcon->ipc)
2406 		return;
2407 
2408 	ses = tcon->ses;
2409 	cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
2410 	spin_lock(&cifs_tcp_ses_lock);
2411 	spin_lock(&tcon->tc_lock);
2412 	if (--tcon->tc_count > 0) {
2413 		spin_unlock(&tcon->tc_lock);
2414 		spin_unlock(&cifs_tcp_ses_lock);
2415 		return;
2416 	}
2417 
2418 	/* tc_count can never go negative */
2419 	WARN_ON(tcon->tc_count < 0);
2420 
2421 	list_del_init(&tcon->tcon_list);
2422 	tcon->status = TID_EXITING;
2423 	spin_unlock(&tcon->tc_lock);
2424 	spin_unlock(&cifs_tcp_ses_lock);
2425 
2426 	/* cancel polling of interfaces */
2427 	cancel_delayed_work_sync(&tcon->query_interfaces);
2428 #ifdef CONFIG_CIFS_DFS_UPCALL
2429 	cancel_delayed_work_sync(&tcon->dfs_cache_work);
2430 #endif
2431 
2432 	if (tcon->use_witness) {
2433 		int rc;
2434 
2435 		rc = cifs_swn_unregister(tcon);
2436 		if (rc < 0) {
2437 			cifs_dbg(VFS, "%s: Failed to unregister for witness notifications: %d\n",
2438 					__func__, rc);
2439 		}
2440 	}
2441 
2442 	xid = get_xid();
2443 	if (ses->server->ops->tree_disconnect)
2444 		ses->server->ops->tree_disconnect(xid, tcon);
2445 	_free_xid(xid);
2446 
2447 	cifs_fscache_release_super_cookie(tcon);
2448 	tconInfoFree(tcon);
2449 	cifs_put_smb_ses(ses);
2450 }
2451 
2452 /**
2453  * cifs_get_tcon - get a tcon matching @ctx data from @ses
2454  * @ses: smb session to issue the request on
2455  * @ctx: the superblock configuration context to use for building the
2456  *
2457  * - tcon refcount is the number of mount points using the tcon.
2458  * - ses refcount is the number of tcon using the session.
2459  *
2460  * 1. This function assumes it is being called from cifs_mount() where
2461  *    we already got a session reference (ses refcount +1).
2462  *
2463  * 2. Since we're in the context of adding a mount point, the end
2464  *    result should be either:
2465  *
2466  * a) a new tcon already allocated with refcount=1 (1 mount point) and
2467  *    its session refcount incremented (1 new tcon). This +1 was
2468  *    already done in (1).
2469  *
2470  * b) an existing tcon with refcount+1 (add a mount point to it) and
2471  *    identical ses refcount (no new tcon). Because of (1) we need to
2472  *    decrement the ses refcount.
2473  */
2474 static struct cifs_tcon *
cifs_get_tcon(struct cifs_ses * ses,struct smb3_fs_context * ctx)2475 cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
2476 {
2477 	struct cifs_tcon *tcon;
2478 	bool nohandlecache;
2479 	int rc, xid;
2480 
2481 	tcon = cifs_find_tcon(ses, ctx);
2482 	if (tcon) {
2483 		/*
2484 		 * tcon has refcount already incremented but we need to
2485 		 * decrement extra ses reference gotten by caller (case b)
2486 		 */
2487 		cifs_dbg(FYI, "Found match on UNC path\n");
2488 		cifs_put_smb_ses(ses);
2489 		return tcon;
2490 	}
2491 
2492 	if (!ses->server->ops->tree_connect) {
2493 		rc = -ENOSYS;
2494 		goto out_fail;
2495 	}
2496 
2497 	if (ses->server->dialect >= SMB20_PROT_ID &&
2498 	    (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING))
2499 		nohandlecache = ctx->nohandlecache;
2500 	else
2501 		nohandlecache = true;
2502 	tcon = tcon_info_alloc(!nohandlecache);
2503 	if (tcon == NULL) {
2504 		rc = -ENOMEM;
2505 		goto out_fail;
2506 	}
2507 	tcon->nohandlecache = nohandlecache;
2508 
2509 	if (ctx->snapshot_time) {
2510 		if (ses->server->vals->protocol_id == 0) {
2511 			cifs_dbg(VFS,
2512 			     "Use SMB2 or later for snapshot mount option\n");
2513 			rc = -EOPNOTSUPP;
2514 			goto out_fail;
2515 		} else
2516 			tcon->snapshot_time = ctx->snapshot_time;
2517 	}
2518 
2519 	if (ctx->handle_timeout) {
2520 		if (ses->server->vals->protocol_id == 0) {
2521 			cifs_dbg(VFS,
2522 			     "Use SMB2.1 or later for handle timeout option\n");
2523 			rc = -EOPNOTSUPP;
2524 			goto out_fail;
2525 		} else
2526 			tcon->handle_timeout = ctx->handle_timeout;
2527 	}
2528 
2529 	tcon->ses = ses;
2530 	if (ctx->password) {
2531 		tcon->password = kstrdup(ctx->password, GFP_KERNEL);
2532 		if (!tcon->password) {
2533 			rc = -ENOMEM;
2534 			goto out_fail;
2535 		}
2536 	}
2537 
2538 	if (ctx->seal) {
2539 		if (ses->server->vals->protocol_id == 0) {
2540 			cifs_dbg(VFS,
2541 				 "SMB3 or later required for encryption\n");
2542 			rc = -EOPNOTSUPP;
2543 			goto out_fail;
2544 		} else if (tcon->ses->server->capabilities &
2545 					SMB2_GLOBAL_CAP_ENCRYPTION)
2546 			tcon->seal = true;
2547 		else {
2548 			cifs_dbg(VFS, "Encryption is not supported on share\n");
2549 			rc = -EOPNOTSUPP;
2550 			goto out_fail;
2551 		}
2552 	}
2553 
2554 	if (ctx->linux_ext) {
2555 		if (ses->server->posix_ext_supported) {
2556 			tcon->posix_extensions = true;
2557 			pr_warn_once("SMB3.11 POSIX Extensions are experimental\n");
2558 		} else if ((ses->server->vals->protocol_id == SMB311_PROT_ID) ||
2559 		    (strcmp(ses->server->vals->version_string,
2560 		     SMB3ANY_VERSION_STRING) == 0) ||
2561 		    (strcmp(ses->server->vals->version_string,
2562 		     SMBDEFAULT_VERSION_STRING) == 0)) {
2563 			cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions\n");
2564 			rc = -EOPNOTSUPP;
2565 			goto out_fail;
2566 		} else {
2567 			cifs_dbg(VFS, "Check vers= mount option. SMB3.11 "
2568 				"disabled but required for POSIX extensions\n");
2569 			rc = -EOPNOTSUPP;
2570 			goto out_fail;
2571 		}
2572 	}
2573 
2574 	xid = get_xid();
2575 	rc = ses->server->ops->tree_connect(xid, ses, ctx->UNC, tcon,
2576 					    ctx->local_nls);
2577 	free_xid(xid);
2578 	cifs_dbg(FYI, "Tcon rc = %d\n", rc);
2579 	if (rc)
2580 		goto out_fail;
2581 
2582 	tcon->use_persistent = false;
2583 	/* check if SMB2 or later, CIFS does not support persistent handles */
2584 	if (ctx->persistent) {
2585 		if (ses->server->vals->protocol_id == 0) {
2586 			cifs_dbg(VFS,
2587 			     "SMB3 or later required for persistent handles\n");
2588 			rc = -EOPNOTSUPP;
2589 			goto out_fail;
2590 		} else if (ses->server->capabilities &
2591 			   SMB2_GLOBAL_CAP_PERSISTENT_HANDLES)
2592 			tcon->use_persistent = true;
2593 		else /* persistent handles requested but not supported */ {
2594 			cifs_dbg(VFS,
2595 				"Persistent handles not supported on share\n");
2596 			rc = -EOPNOTSUPP;
2597 			goto out_fail;
2598 		}
2599 	} else if ((tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
2600 	     && (ses->server->capabilities & SMB2_GLOBAL_CAP_PERSISTENT_HANDLES)
2601 	     && (ctx->nopersistent == false)) {
2602 		cifs_dbg(FYI, "enabling persistent handles\n");
2603 		tcon->use_persistent = true;
2604 	} else if (ctx->resilient) {
2605 		if (ses->server->vals->protocol_id == 0) {
2606 			cifs_dbg(VFS,
2607 			     "SMB2.1 or later required for resilient handles\n");
2608 			rc = -EOPNOTSUPP;
2609 			goto out_fail;
2610 		}
2611 		tcon->use_resilient = true;
2612 	}
2613 
2614 	tcon->use_witness = false;
2615 	if (IS_ENABLED(CONFIG_CIFS_SWN_UPCALL) && ctx->witness) {
2616 		if (ses->server->vals->protocol_id >= SMB30_PROT_ID) {
2617 			if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER) {
2618 				/*
2619 				 * Set witness in use flag in first place
2620 				 * to retry registration in the echo task
2621 				 */
2622 				tcon->use_witness = true;
2623 				/* And try to register immediately */
2624 				rc = cifs_swn_register(tcon);
2625 				if (rc < 0) {
2626 					cifs_dbg(VFS, "Failed to register for witness notifications: %d\n", rc);
2627 					goto out_fail;
2628 				}
2629 			} else {
2630 				/* TODO: try to extend for non-cluster uses (eg multichannel) */
2631 				cifs_dbg(VFS, "witness requested on mount but no CLUSTER capability on share\n");
2632 				rc = -EOPNOTSUPP;
2633 				goto out_fail;
2634 			}
2635 		} else {
2636 			cifs_dbg(VFS, "SMB3 or later required for witness option\n");
2637 			rc = -EOPNOTSUPP;
2638 			goto out_fail;
2639 		}
2640 	}
2641 
2642 	/* If the user really knows what they are doing they can override */
2643 	if (tcon->share_flags & SMB2_SHAREFLAG_NO_CACHING) {
2644 		if (ctx->cache_ro)
2645 			cifs_dbg(VFS, "cache=ro requested on mount but NO_CACHING flag set on share\n");
2646 		else if (ctx->cache_rw)
2647 			cifs_dbg(VFS, "cache=singleclient requested on mount but NO_CACHING flag set on share\n");
2648 	}
2649 
2650 	if (ctx->no_lease) {
2651 		if (ses->server->vals->protocol_id == 0) {
2652 			cifs_dbg(VFS,
2653 				"SMB2 or later required for nolease option\n");
2654 			rc = -EOPNOTSUPP;
2655 			goto out_fail;
2656 		} else
2657 			tcon->no_lease = ctx->no_lease;
2658 	}
2659 
2660 	/*
2661 	 * We can have only one retry value for a connection to a share so for
2662 	 * resources mounted more than once to the same server share the last
2663 	 * value passed in for the retry flag is used.
2664 	 */
2665 	tcon->retry = ctx->retry;
2666 	tcon->nocase = ctx->nocase;
2667 	tcon->broken_sparse_sup = ctx->no_sparse;
2668 	tcon->max_cached_dirs = ctx->max_cached_dirs;
2669 	tcon->nodelete = ctx->nodelete;
2670 	tcon->local_lease = ctx->local_lease;
2671 	INIT_LIST_HEAD(&tcon->pending_opens);
2672 	tcon->status = TID_GOOD;
2673 
2674 	INIT_DELAYED_WORK(&tcon->query_interfaces,
2675 			  smb2_query_server_interfaces);
2676 	if (ses->server->dialect >= SMB30_PROT_ID &&
2677 	    (ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
2678 		/* schedule query interfaces poll */
2679 		queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
2680 				   (SMB_INTERFACE_POLL_INTERVAL * HZ));
2681 	}
2682 #ifdef CONFIG_CIFS_DFS_UPCALL
2683 	INIT_DELAYED_WORK(&tcon->dfs_cache_work, dfs_cache_refresh);
2684 #endif
2685 	spin_lock(&cifs_tcp_ses_lock);
2686 	list_add(&tcon->tcon_list, &ses->tcon_list);
2687 	spin_unlock(&cifs_tcp_ses_lock);
2688 
2689 	return tcon;
2690 
2691 out_fail:
2692 	tconInfoFree(tcon);
2693 	return ERR_PTR(rc);
2694 }
2695 
2696 void
cifs_put_tlink(struct tcon_link * tlink)2697 cifs_put_tlink(struct tcon_link *tlink)
2698 {
2699 	if (!tlink || IS_ERR(tlink))
2700 		return;
2701 
2702 	if (!atomic_dec_and_test(&tlink->tl_count) ||
2703 	    test_bit(TCON_LINK_IN_TREE, &tlink->tl_flags)) {
2704 		tlink->tl_time = jiffies;
2705 		return;
2706 	}
2707 
2708 	if (!IS_ERR(tlink_tcon(tlink)))
2709 		cifs_put_tcon(tlink_tcon(tlink));
2710 	kfree(tlink);
2711 	return;
2712 }
2713 
2714 static int
compare_mount_options(struct super_block * sb,struct cifs_mnt_data * mnt_data)2715 compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
2716 {
2717 	struct cifs_sb_info *old = CIFS_SB(sb);
2718 	struct cifs_sb_info *new = mnt_data->cifs_sb;
2719 	unsigned int oldflags = old->mnt_cifs_flags & CIFS_MOUNT_MASK;
2720 	unsigned int newflags = new->mnt_cifs_flags & CIFS_MOUNT_MASK;
2721 
2722 	if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK))
2723 		return 0;
2724 
2725 	if (old->mnt_cifs_serverino_autodisabled)
2726 		newflags &= ~CIFS_MOUNT_SERVER_INUM;
2727 
2728 	if (oldflags != newflags)
2729 		return 0;
2730 
2731 	/*
2732 	 * We want to share sb only if we don't specify an r/wsize or
2733 	 * specified r/wsize is greater than or equal to existing one.
2734 	 */
2735 	if (new->ctx->wsize && new->ctx->wsize < old->ctx->wsize)
2736 		return 0;
2737 
2738 	if (new->ctx->rsize && new->ctx->rsize < old->ctx->rsize)
2739 		return 0;
2740 
2741 	if (!uid_eq(old->ctx->linux_uid, new->ctx->linux_uid) ||
2742 	    !gid_eq(old->ctx->linux_gid, new->ctx->linux_gid))
2743 		return 0;
2744 
2745 	if (old->ctx->file_mode != new->ctx->file_mode ||
2746 	    old->ctx->dir_mode != new->ctx->dir_mode)
2747 		return 0;
2748 
2749 	if (strcmp(old->local_nls->charset, new->local_nls->charset))
2750 		return 0;
2751 
2752 	if (old->ctx->acregmax != new->ctx->acregmax)
2753 		return 0;
2754 	if (old->ctx->acdirmax != new->ctx->acdirmax)
2755 		return 0;
2756 	if (old->ctx->closetimeo != new->ctx->closetimeo)
2757 		return 0;
2758 
2759 	return 1;
2760 }
2761 
match_prepath(struct super_block * sb,struct cifs_tcon * tcon,struct cifs_mnt_data * mnt_data)2762 static int match_prepath(struct super_block *sb,
2763 			 struct cifs_tcon *tcon,
2764 			 struct cifs_mnt_data *mnt_data)
2765 {
2766 	struct smb3_fs_context *ctx = mnt_data->ctx;
2767 	struct cifs_sb_info *old = CIFS_SB(sb);
2768 	struct cifs_sb_info *new = mnt_data->cifs_sb;
2769 	bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
2770 		old->prepath;
2771 	bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
2772 		new->prepath;
2773 
2774 	if (tcon->origin_fullpath &&
2775 	    dfs_src_pathname_equal(tcon->origin_fullpath, ctx->source))
2776 		return 1;
2777 
2778 	if (old_set && new_set && !strcmp(new->prepath, old->prepath))
2779 		return 1;
2780 	else if (!old_set && !new_set)
2781 		return 1;
2782 
2783 	return 0;
2784 }
2785 
2786 int
cifs_match_super(struct super_block * sb,void * data)2787 cifs_match_super(struct super_block *sb, void *data)
2788 {
2789 	struct cifs_mnt_data *mnt_data = data;
2790 	struct smb3_fs_context *ctx;
2791 	struct cifs_sb_info *cifs_sb;
2792 	struct TCP_Server_Info *tcp_srv;
2793 	struct cifs_ses *ses;
2794 	struct cifs_tcon *tcon;
2795 	struct tcon_link *tlink;
2796 	int rc = 0;
2797 
2798 	spin_lock(&cifs_tcp_ses_lock);
2799 	cifs_sb = CIFS_SB(sb);
2800 
2801 	/* We do not want to use a superblock that has been shutdown */
2802 	if (CIFS_MOUNT_SHUTDOWN & cifs_sb->mnt_cifs_flags) {
2803 		spin_unlock(&cifs_tcp_ses_lock);
2804 		return 0;
2805 	}
2806 
2807 	tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
2808 	if (IS_ERR_OR_NULL(tlink)) {
2809 		pr_warn_once("%s: skip super matching due to bad tlink(%p)\n",
2810 			     __func__, tlink);
2811 		spin_unlock(&cifs_tcp_ses_lock);
2812 		return 0;
2813 	}
2814 	tcon = tlink_tcon(tlink);
2815 	ses = tcon->ses;
2816 	tcp_srv = ses->server;
2817 
2818 	ctx = mnt_data->ctx;
2819 
2820 	spin_lock(&tcp_srv->srv_lock);
2821 	spin_lock(&ses->ses_lock);
2822 	spin_lock(&ses->chan_lock);
2823 	spin_lock(&tcon->tc_lock);
2824 	if (!match_server(tcp_srv, ctx, true) ||
2825 	    !match_session(ses, ctx) ||
2826 	    !match_tcon(tcon, ctx) ||
2827 	    !match_prepath(sb, tcon, mnt_data)) {
2828 		rc = 0;
2829 		goto out;
2830 	}
2831 
2832 	rc = compare_mount_options(sb, mnt_data);
2833 out:
2834 	spin_unlock(&tcon->tc_lock);
2835 	spin_unlock(&ses->chan_lock);
2836 	spin_unlock(&ses->ses_lock);
2837 	spin_unlock(&tcp_srv->srv_lock);
2838 
2839 	spin_unlock(&cifs_tcp_ses_lock);
2840 	cifs_put_tlink(tlink);
2841 	return rc;
2842 }
2843 
2844 #ifdef CONFIG_DEBUG_LOCK_ALLOC
2845 static struct lock_class_key cifs_key[2];
2846 static struct lock_class_key cifs_slock_key[2];
2847 
2848 static inline void
cifs_reclassify_socket4(struct socket * sock)2849 cifs_reclassify_socket4(struct socket *sock)
2850 {
2851 	struct sock *sk = sock->sk;
2852 	BUG_ON(!sock_allow_reclassification(sk));
2853 	sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS",
2854 		&cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]);
2855 }
2856 
2857 static inline void
cifs_reclassify_socket6(struct socket * sock)2858 cifs_reclassify_socket6(struct socket *sock)
2859 {
2860 	struct sock *sk = sock->sk;
2861 	BUG_ON(!sock_allow_reclassification(sk));
2862 	sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS",
2863 		&cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]);
2864 }
2865 #else
2866 static inline void
cifs_reclassify_socket4(struct socket * sock)2867 cifs_reclassify_socket4(struct socket *sock)
2868 {
2869 }
2870 
2871 static inline void
cifs_reclassify_socket6(struct socket * sock)2872 cifs_reclassify_socket6(struct socket *sock)
2873 {
2874 }
2875 #endif
2876 
2877 /* See RFC1001 section 14 on representation of Netbios names */
rfc1002mangle(char * target,char * source,unsigned int length)2878 static void rfc1002mangle(char *target, char *source, unsigned int length)
2879 {
2880 	unsigned int i, j;
2881 
2882 	for (i = 0, j = 0; i < (length); i++) {
2883 		/* mask a nibble at a time and encode */
2884 		target[j] = 'A' + (0x0F & (source[i] >> 4));
2885 		target[j+1] = 'A' + (0x0F & source[i]);
2886 		j += 2;
2887 	}
2888 
2889 }
2890 
2891 static int
bind_socket(struct TCP_Server_Info * server)2892 bind_socket(struct TCP_Server_Info *server)
2893 {
2894 	int rc = 0;
2895 	if (server->srcaddr.ss_family != AF_UNSPEC) {
2896 		/* Bind to the specified local IP address */
2897 		struct socket *socket = server->ssocket;
2898 		rc = kernel_bind(socket,
2899 				 (struct sockaddr *) &server->srcaddr,
2900 				 sizeof(server->srcaddr));
2901 		if (rc < 0) {
2902 			struct sockaddr_in *saddr4;
2903 			struct sockaddr_in6 *saddr6;
2904 			saddr4 = (struct sockaddr_in *)&server->srcaddr;
2905 			saddr6 = (struct sockaddr_in6 *)&server->srcaddr;
2906 			if (saddr6->sin6_family == AF_INET6)
2907 				cifs_server_dbg(VFS, "Failed to bind to: %pI6c, error: %d\n",
2908 					 &saddr6->sin6_addr, rc);
2909 			else
2910 				cifs_server_dbg(VFS, "Failed to bind to: %pI4, error: %d\n",
2911 					 &saddr4->sin_addr.s_addr, rc);
2912 		}
2913 	}
2914 	return rc;
2915 }
2916 
2917 static int
ip_rfc1001_connect(struct TCP_Server_Info * server)2918 ip_rfc1001_connect(struct TCP_Server_Info *server)
2919 {
2920 	int rc = 0;
2921 	/*
2922 	 * some servers require RFC1001 sessinit before sending
2923 	 * negprot - BB check reconnection in case where second
2924 	 * sessinit is sent but no second negprot
2925 	 */
2926 	struct rfc1002_session_packet req = {};
2927 	struct smb_hdr *smb_buf = (struct smb_hdr *)&req;
2928 	unsigned int len;
2929 
2930 	req.trailer.session_req.called_len = sizeof(req.trailer.session_req.called_name);
2931 
2932 	if (server->server_RFC1001_name[0] != 0)
2933 		rfc1002mangle(req.trailer.session_req.called_name,
2934 			      server->server_RFC1001_name,
2935 			      RFC1001_NAME_LEN_WITH_NULL);
2936 	else
2937 		rfc1002mangle(req.trailer.session_req.called_name,
2938 			      DEFAULT_CIFS_CALLED_NAME,
2939 			      RFC1001_NAME_LEN_WITH_NULL);
2940 
2941 	req.trailer.session_req.calling_len = sizeof(req.trailer.session_req.calling_name);
2942 
2943 	/* calling name ends in null (byte 16) from old smb convention */
2944 	if (server->workstation_RFC1001_name[0] != 0)
2945 		rfc1002mangle(req.trailer.session_req.calling_name,
2946 			      server->workstation_RFC1001_name,
2947 			      RFC1001_NAME_LEN_WITH_NULL);
2948 	else
2949 		rfc1002mangle(req.trailer.session_req.calling_name,
2950 			      "LINUX_CIFS_CLNT",
2951 			      RFC1001_NAME_LEN_WITH_NULL);
2952 
2953 	/*
2954 	 * As per rfc1002, @len must be the number of bytes that follows the
2955 	 * length field of a rfc1002 session request payload.
2956 	 */
2957 	len = sizeof(req) - offsetof(struct rfc1002_session_packet, trailer.session_req);
2958 
2959 	smb_buf->smb_buf_length = cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | len);
2960 	rc = smb_send(server, smb_buf, len);
2961 	/*
2962 	 * RFC1001 layer in at least one server requires very short break before
2963 	 * negprot presumably because not expecting negprot to follow so fast.
2964 	 * This is a simple solution that works without complicating the code
2965 	 * and causes no significant slowing down on mount for everyone else
2966 	 */
2967 	usleep_range(1000, 2000);
2968 
2969 	return rc;
2970 }
2971 
2972 static int
generic_ip_connect(struct TCP_Server_Info * server)2973 generic_ip_connect(struct TCP_Server_Info *server)
2974 {
2975 	struct sockaddr *saddr;
2976 	struct socket *socket;
2977 	int slen, sfamily;
2978 	__be16 sport;
2979 	int rc = 0;
2980 
2981 	saddr = (struct sockaddr *) &server->dstaddr;
2982 
2983 	if (server->dstaddr.ss_family == AF_INET6) {
2984 		struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&server->dstaddr;
2985 
2986 		sport = ipv6->sin6_port;
2987 		slen = sizeof(struct sockaddr_in6);
2988 		sfamily = AF_INET6;
2989 		cifs_dbg(FYI, "%s: connecting to [%pI6]:%d\n", __func__, &ipv6->sin6_addr,
2990 				ntohs(sport));
2991 	} else {
2992 		struct sockaddr_in *ipv4 = (struct sockaddr_in *)&server->dstaddr;
2993 
2994 		sport = ipv4->sin_port;
2995 		slen = sizeof(struct sockaddr_in);
2996 		sfamily = AF_INET;
2997 		cifs_dbg(FYI, "%s: connecting to %pI4:%d\n", __func__, &ipv4->sin_addr,
2998 				ntohs(sport));
2999 	}
3000 
3001 	if (server->ssocket) {
3002 		socket = server->ssocket;
3003 	} else {
3004 		rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM,
3005 				   IPPROTO_TCP, &server->ssocket, 1);
3006 		if (rc < 0) {
3007 			cifs_server_dbg(VFS, "Error %d creating socket\n", rc);
3008 			return rc;
3009 		}
3010 
3011 		/* BB other socket options to set KEEPALIVE, NODELAY? */
3012 		cifs_dbg(FYI, "Socket created\n");
3013 		socket = server->ssocket;
3014 		socket->sk->sk_allocation = GFP_NOFS;
3015 		socket->sk->sk_use_task_frag = false;
3016 		if (sfamily == AF_INET6)
3017 			cifs_reclassify_socket6(socket);
3018 		else
3019 			cifs_reclassify_socket4(socket);
3020 	}
3021 
3022 	rc = bind_socket(server);
3023 	if (rc < 0)
3024 		return rc;
3025 
3026 	/*
3027 	 * Eventually check for other socket options to change from
3028 	 * the default. sock_setsockopt not used because it expects
3029 	 * user space buffer
3030 	 */
3031 	socket->sk->sk_rcvtimeo = 7 * HZ;
3032 	socket->sk->sk_sndtimeo = 5 * HZ;
3033 
3034 	/* make the bufsizes depend on wsize/rsize and max requests */
3035 	if (server->noautotune) {
3036 		if (socket->sk->sk_sndbuf < (200 * 1024))
3037 			socket->sk->sk_sndbuf = 200 * 1024;
3038 		if (socket->sk->sk_rcvbuf < (140 * 1024))
3039 			socket->sk->sk_rcvbuf = 140 * 1024;
3040 	}
3041 
3042 	if (server->tcp_nodelay)
3043 		tcp_sock_set_nodelay(socket->sk);
3044 
3045 	cifs_dbg(FYI, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx\n",
3046 		 socket->sk->sk_sndbuf,
3047 		 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
3048 
3049 	rc = kernel_connect(socket, saddr, slen,
3050 			    server->noblockcnt ? O_NONBLOCK : 0);
3051 	/*
3052 	 * When mounting SMB root file systems, we do not want to block in
3053 	 * connect. Otherwise bail out and then let cifs_reconnect() perform
3054 	 * reconnect failover - if possible.
3055 	 */
3056 	if (server->noblockcnt && rc == -EINPROGRESS)
3057 		rc = 0;
3058 	if (rc < 0) {
3059 		cifs_dbg(FYI, "Error %d connecting to server\n", rc);
3060 		trace_smb3_connect_err(server->hostname, server->conn_id, &server->dstaddr, rc);
3061 		sock_release(socket);
3062 		server->ssocket = NULL;
3063 		return rc;
3064 	}
3065 	trace_smb3_connect_done(server->hostname, server->conn_id, &server->dstaddr);
3066 	if (sport == htons(RFC1001_PORT))
3067 		rc = ip_rfc1001_connect(server);
3068 
3069 	return rc;
3070 }
3071 
3072 static int
ip_connect(struct TCP_Server_Info * server)3073 ip_connect(struct TCP_Server_Info *server)
3074 {
3075 	__be16 *sport;
3076 	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
3077 	struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
3078 
3079 	if (server->dstaddr.ss_family == AF_INET6)
3080 		sport = &addr6->sin6_port;
3081 	else
3082 		sport = &addr->sin_port;
3083 
3084 	if (*sport == 0) {
3085 		int rc;
3086 
3087 		/* try with 445 port at first */
3088 		*sport = htons(CIFS_PORT);
3089 
3090 		rc = generic_ip_connect(server);
3091 		if (rc >= 0)
3092 			return rc;
3093 
3094 		/* if it failed, try with 139 port */
3095 		*sport = htons(RFC1001_PORT);
3096 	}
3097 
3098 	return generic_ip_connect(server);
3099 }
3100 
3101 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
reset_cifs_unix_caps(unsigned int xid,struct cifs_tcon * tcon,struct cifs_sb_info * cifs_sb,struct smb3_fs_context * ctx)3102 void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
3103 			  struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
3104 {
3105 	/*
3106 	 * If we are reconnecting then should we check to see if
3107 	 * any requested capabilities changed locally e.g. via
3108 	 * remount but we can not do much about it here
3109 	 * if they have (even if we could detect it by the following)
3110 	 * Perhaps we could add a backpointer to array of sb from tcon
3111 	 * or if we change to make all sb to same share the same
3112 	 * sb as NFS - then we only have one backpointer to sb.
3113 	 * What if we wanted to mount the server share twice once with
3114 	 * and once without posixacls or posix paths?
3115 	 */
3116 	__u64 saved_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
3117 
3118 	if (ctx && ctx->no_linux_ext) {
3119 		tcon->fsUnixInfo.Capability = 0;
3120 		tcon->unix_ext = 0; /* Unix Extensions disabled */
3121 		cifs_dbg(FYI, "Linux protocol extensions disabled\n");
3122 		return;
3123 	} else if (ctx)
3124 		tcon->unix_ext = 1; /* Unix Extensions supported */
3125 
3126 	if (!tcon->unix_ext) {
3127 		cifs_dbg(FYI, "Unix extensions disabled so not set on reconnect\n");
3128 		return;
3129 	}
3130 
3131 	if (!CIFSSMBQFSUnixInfo(xid, tcon)) {
3132 		__u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
3133 		cifs_dbg(FYI, "unix caps which server supports %lld\n", cap);
3134 		/*
3135 		 * check for reconnect case in which we do not
3136 		 * want to change the mount behavior if we can avoid it
3137 		 */
3138 		if (ctx == NULL) {
3139 			/*
3140 			 * turn off POSIX ACL and PATHNAMES if not set
3141 			 * originally at mount time
3142 			 */
3143 			if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0)
3144 				cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
3145 			if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
3146 				if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
3147 					cifs_dbg(VFS, "POSIXPATH support change\n");
3148 				cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
3149 			} else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
3150 				cifs_dbg(VFS, "possible reconnect error\n");
3151 				cifs_dbg(VFS, "server disabled POSIX path support\n");
3152 			}
3153 		}
3154 
3155 		if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
3156 			cifs_dbg(VFS, "per-share encryption not supported yet\n");
3157 
3158 		cap &= CIFS_UNIX_CAP_MASK;
3159 		if (ctx && ctx->no_psx_acl)
3160 			cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
3161 		else if (CIFS_UNIX_POSIX_ACL_CAP & cap) {
3162 			cifs_dbg(FYI, "negotiated posix acl support\n");
3163 			if (cifs_sb)
3164 				cifs_sb->mnt_cifs_flags |=
3165 					CIFS_MOUNT_POSIXACL;
3166 		}
3167 
3168 		if (ctx && ctx->posix_paths == 0)
3169 			cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
3170 		else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
3171 			cifs_dbg(FYI, "negotiate posix pathnames\n");
3172 			if (cifs_sb)
3173 				cifs_sb->mnt_cifs_flags |=
3174 					CIFS_MOUNT_POSIX_PATHS;
3175 		}
3176 
3177 		cifs_dbg(FYI, "Negotiate caps 0x%x\n", (int)cap);
3178 #ifdef CONFIG_CIFS_DEBUG2
3179 		if (cap & CIFS_UNIX_FCNTL_CAP)
3180 			cifs_dbg(FYI, "FCNTL cap\n");
3181 		if (cap & CIFS_UNIX_EXTATTR_CAP)
3182 			cifs_dbg(FYI, "EXTATTR cap\n");
3183 		if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
3184 			cifs_dbg(FYI, "POSIX path cap\n");
3185 		if (cap & CIFS_UNIX_XATTR_CAP)
3186 			cifs_dbg(FYI, "XATTR cap\n");
3187 		if (cap & CIFS_UNIX_POSIX_ACL_CAP)
3188 			cifs_dbg(FYI, "POSIX ACL cap\n");
3189 		if (cap & CIFS_UNIX_LARGE_READ_CAP)
3190 			cifs_dbg(FYI, "very large read cap\n");
3191 		if (cap & CIFS_UNIX_LARGE_WRITE_CAP)
3192 			cifs_dbg(FYI, "very large write cap\n");
3193 		if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP)
3194 			cifs_dbg(FYI, "transport encryption cap\n");
3195 		if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
3196 			cifs_dbg(FYI, "mandatory transport encryption cap\n");
3197 #endif /* CIFS_DEBUG2 */
3198 		if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
3199 			if (ctx == NULL)
3200 				cifs_dbg(FYI, "resetting capabilities failed\n");
3201 			else
3202 				cifs_dbg(VFS, "Negotiating Unix capabilities with the server failed. Consider mounting with the Unix Extensions disabled if problems are found by specifying the nounix mount option.\n");
3203 
3204 		}
3205 	}
3206 }
3207 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
3208 
cifs_setup_cifs_sb(struct cifs_sb_info * cifs_sb)3209 int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb)
3210 {
3211 	struct smb3_fs_context *ctx = cifs_sb->ctx;
3212 
3213 	INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
3214 
3215 	spin_lock_init(&cifs_sb->tlink_tree_lock);
3216 	cifs_sb->tlink_tree = RB_ROOT;
3217 
3218 	cifs_dbg(FYI, "file mode: %04ho  dir mode: %04ho\n",
3219 		 ctx->file_mode, ctx->dir_mode);
3220 
3221 	/* this is needed for ASCII cp to Unicode converts */
3222 	if (ctx->iocharset == NULL) {
3223 		/* load_nls_default cannot return null */
3224 		cifs_sb->local_nls = load_nls_default();
3225 	} else {
3226 		cifs_sb->local_nls = load_nls(ctx->iocharset);
3227 		if (cifs_sb->local_nls == NULL) {
3228 			cifs_dbg(VFS, "CIFS mount error: iocharset %s not found\n",
3229 				 ctx->iocharset);
3230 			return -ELIBACC;
3231 		}
3232 	}
3233 	ctx->local_nls = cifs_sb->local_nls;
3234 
3235 	smb3_update_mnt_flags(cifs_sb);
3236 
3237 	if (ctx->direct_io)
3238 		cifs_dbg(FYI, "mounting share using direct i/o\n");
3239 	if (ctx->cache_ro) {
3240 		cifs_dbg(VFS, "mounting share with read only caching. Ensure that the share will not be modified while in use.\n");
3241 		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RO_CACHE;
3242 	} else if (ctx->cache_rw) {
3243 		cifs_dbg(VFS, "mounting share in single client RW caching mode. Ensure that no other systems will be accessing the share.\n");
3244 		cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_RO_CACHE |
3245 					    CIFS_MOUNT_RW_CACHE);
3246 	}
3247 
3248 	if ((ctx->cifs_acl) && (ctx->dynperm))
3249 		cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n");
3250 
3251 	if (ctx->prepath) {
3252 		cifs_sb->prepath = kstrdup(ctx->prepath, GFP_KERNEL);
3253 		if (cifs_sb->prepath == NULL)
3254 			return -ENOMEM;
3255 		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
3256 	}
3257 
3258 	return 0;
3259 }
3260 
3261 /* Release all succeed connections */
cifs_mount_put_conns(struct cifs_mount_ctx * mnt_ctx)3262 void cifs_mount_put_conns(struct cifs_mount_ctx *mnt_ctx)
3263 {
3264 	int rc = 0;
3265 
3266 	if (mnt_ctx->tcon)
3267 		cifs_put_tcon(mnt_ctx->tcon);
3268 	else if (mnt_ctx->ses)
3269 		cifs_put_smb_ses(mnt_ctx->ses);
3270 	else if (mnt_ctx->server)
3271 		cifs_put_tcp_session(mnt_ctx->server, 0);
3272 	mnt_ctx->cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS;
3273 	free_xid(mnt_ctx->xid);
3274 }
3275 
cifs_mount_get_session(struct cifs_mount_ctx * mnt_ctx)3276 int cifs_mount_get_session(struct cifs_mount_ctx *mnt_ctx)
3277 {
3278 	struct TCP_Server_Info *server = NULL;
3279 	struct smb3_fs_context *ctx;
3280 	struct cifs_ses *ses = NULL;
3281 	unsigned int xid;
3282 	int rc = 0;
3283 
3284 	xid = get_xid();
3285 
3286 	if (WARN_ON_ONCE(!mnt_ctx || !mnt_ctx->fs_ctx)) {
3287 		rc = -EINVAL;
3288 		goto out;
3289 	}
3290 	ctx = mnt_ctx->fs_ctx;
3291 
3292 	/* get a reference to a tcp session */
3293 	server = cifs_get_tcp_session(ctx, NULL);
3294 	if (IS_ERR(server)) {
3295 		rc = PTR_ERR(server);
3296 		server = NULL;
3297 		goto out;
3298 	}
3299 
3300 	/* get a reference to a SMB session */
3301 	ses = cifs_get_smb_ses(server, ctx);
3302 	if (IS_ERR(ses)) {
3303 		rc = PTR_ERR(ses);
3304 		ses = NULL;
3305 		goto out;
3306 	}
3307 
3308 	if ((ctx->persistent == true) && (!(ses->server->capabilities &
3309 					    SMB2_GLOBAL_CAP_PERSISTENT_HANDLES))) {
3310 		cifs_server_dbg(VFS, "persistent handles not supported by server\n");
3311 		rc = -EOPNOTSUPP;
3312 	}
3313 
3314 out:
3315 	mnt_ctx->xid = xid;
3316 	mnt_ctx->server = server;
3317 	mnt_ctx->ses = ses;
3318 	mnt_ctx->tcon = NULL;
3319 
3320 	return rc;
3321 }
3322 
cifs_mount_get_tcon(struct cifs_mount_ctx * mnt_ctx)3323 int cifs_mount_get_tcon(struct cifs_mount_ctx *mnt_ctx)
3324 {
3325 	struct TCP_Server_Info *server;
3326 	struct cifs_sb_info *cifs_sb;
3327 	struct smb3_fs_context *ctx;
3328 	struct cifs_tcon *tcon = NULL;
3329 	int rc = 0;
3330 
3331 	if (WARN_ON_ONCE(!mnt_ctx || !mnt_ctx->server || !mnt_ctx->ses || !mnt_ctx->fs_ctx ||
3332 			 !mnt_ctx->cifs_sb)) {
3333 		rc = -EINVAL;
3334 		goto out;
3335 	}
3336 	server = mnt_ctx->server;
3337 	ctx = mnt_ctx->fs_ctx;
3338 	cifs_sb = mnt_ctx->cifs_sb;
3339 
3340 	/* search for existing tcon to this server share */
3341 	tcon = cifs_get_tcon(mnt_ctx->ses, ctx);
3342 	if (IS_ERR(tcon)) {
3343 		rc = PTR_ERR(tcon);
3344 		tcon = NULL;
3345 		goto out;
3346 	}
3347 
3348 	/* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
3349 	if (tcon->posix_extensions)
3350 		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
3351 
3352 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
3353 	/* tell server which Unix caps we support */
3354 	if (cap_unix(tcon->ses)) {
3355 		/*
3356 		 * reset of caps checks mount to see if unix extensions disabled
3357 		 * for just this mount.
3358 		 */
3359 		reset_cifs_unix_caps(mnt_ctx->xid, tcon, cifs_sb, ctx);
3360 		spin_lock(&tcon->ses->server->srv_lock);
3361 		if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
3362 		    (le64_to_cpu(tcon->fsUnixInfo.Capability) &
3363 		     CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
3364 			spin_unlock(&tcon->ses->server->srv_lock);
3365 			rc = -EACCES;
3366 			goto out;
3367 		}
3368 		spin_unlock(&tcon->ses->server->srv_lock);
3369 	} else
3370 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
3371 		tcon->unix_ext = 0; /* server does not support them */
3372 
3373 	/* do not care if a following call succeed - informational */
3374 	if (!tcon->pipe && server->ops->qfs_tcon) {
3375 		server->ops->qfs_tcon(mnt_ctx->xid, tcon, cifs_sb);
3376 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) {
3377 			if (tcon->fsDevInfo.DeviceCharacteristics &
3378 			    cpu_to_le32(FILE_READ_ONLY_DEVICE))
3379 				cifs_dbg(VFS, "mounted to read only share\n");
3380 			else if ((cifs_sb->mnt_cifs_flags &
3381 				  CIFS_MOUNT_RW_CACHE) == 0)
3382 				cifs_dbg(VFS, "read only mount of RW share\n");
3383 			/* no need to log a RW mount of a typical RW share */
3384 		}
3385 	}
3386 
3387 	/*
3388 	 * Clamp the rsize/wsize mount arguments if they are too big for the server
3389 	 * and set the rsize/wsize to the negotiated values if not passed in by
3390 	 * the user on mount
3391 	 */
3392 	if ((cifs_sb->ctx->wsize == 0) ||
3393 	    (cifs_sb->ctx->wsize > server->ops->negotiate_wsize(tcon, ctx)))
3394 		cifs_sb->ctx->wsize = server->ops->negotiate_wsize(tcon, ctx);
3395 	if ((cifs_sb->ctx->rsize == 0) ||
3396 	    (cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx)))
3397 		cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx);
3398 
3399 	/*
3400 	 * The cookie is initialized from volume info returned above.
3401 	 * Inside cifs_fscache_get_super_cookie it checks
3402 	 * that we do not get super cookie twice.
3403 	 */
3404 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
3405 		cifs_fscache_get_super_cookie(tcon);
3406 
3407 out:
3408 	mnt_ctx->tcon = tcon;
3409 	return rc;
3410 }
3411 
mount_setup_tlink(struct cifs_sb_info * cifs_sb,struct cifs_ses * ses,struct cifs_tcon * tcon)3412 static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
3413 			     struct cifs_tcon *tcon)
3414 {
3415 	struct tcon_link *tlink;
3416 
3417 	/* hang the tcon off of the superblock */
3418 	tlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
3419 	if (tlink == NULL)
3420 		return -ENOMEM;
3421 
3422 	tlink->tl_uid = ses->linux_uid;
3423 	tlink->tl_tcon = tcon;
3424 	tlink->tl_time = jiffies;
3425 	set_bit(TCON_LINK_MASTER, &tlink->tl_flags);
3426 	set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
3427 
3428 	cifs_sb->master_tlink = tlink;
3429 	spin_lock(&cifs_sb->tlink_tree_lock);
3430 	tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
3431 	spin_unlock(&cifs_sb->tlink_tree_lock);
3432 
3433 	queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
3434 				TLINK_IDLE_EXPIRE);
3435 	return 0;
3436 }
3437 
3438 static int
cifs_are_all_path_components_accessible(struct TCP_Server_Info * server,unsigned int xid,struct cifs_tcon * tcon,struct cifs_sb_info * cifs_sb,char * full_path,int added_treename)3439 cifs_are_all_path_components_accessible(struct TCP_Server_Info *server,
3440 					unsigned int xid,
3441 					struct cifs_tcon *tcon,
3442 					struct cifs_sb_info *cifs_sb,
3443 					char *full_path,
3444 					int added_treename)
3445 {
3446 	int rc;
3447 	char *s;
3448 	char sep, tmp;
3449 	int skip = added_treename ? 1 : 0;
3450 
3451 	sep = CIFS_DIR_SEP(cifs_sb);
3452 	s = full_path;
3453 
3454 	rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, "");
3455 	while (rc == 0) {
3456 		/* skip separators */
3457 		while (*s == sep)
3458 			s++;
3459 		if (!*s)
3460 			break;
3461 		/* next separator */
3462 		while (*s && *s != sep)
3463 			s++;
3464 		/*
3465 		 * if the treename is added, we then have to skip the first
3466 		 * part within the separators
3467 		 */
3468 		if (skip) {
3469 			skip = 0;
3470 			continue;
3471 		}
3472 		/*
3473 		 * temporarily null-terminate the path at the end of
3474 		 * the current component
3475 		 */
3476 		tmp = *s;
3477 		*s = 0;
3478 		rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
3479 						     full_path);
3480 		*s = tmp;
3481 	}
3482 	return rc;
3483 }
3484 
3485 /*
3486  * Check if path is remote (i.e. a DFS share).
3487  *
3488  * Return -EREMOTE if it is, otherwise 0 or -errno.
3489  */
cifs_is_path_remote(struct cifs_mount_ctx * mnt_ctx)3490 int cifs_is_path_remote(struct cifs_mount_ctx *mnt_ctx)
3491 {
3492 	int rc;
3493 	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
3494 	struct TCP_Server_Info *server = mnt_ctx->server;
3495 	unsigned int xid = mnt_ctx->xid;
3496 	struct cifs_tcon *tcon = mnt_ctx->tcon;
3497 	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
3498 	char *full_path;
3499 
3500 	if (!server->ops->is_path_accessible)
3501 		return -EOPNOTSUPP;
3502 
3503 	/*
3504 	 * cifs_build_path_to_root works only when we have a valid tcon
3505 	 */
3506 	full_path = cifs_build_path_to_root(ctx, cifs_sb, tcon,
3507 					    tcon->Flags & SMB_SHARE_IS_IN_DFS);
3508 	if (full_path == NULL)
3509 		return -ENOMEM;
3510 
3511 	cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);
3512 
3513 	rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
3514 					     full_path);
3515 	if (rc != 0 && rc != -EREMOTE)
3516 		goto out;
3517 
3518 	if (rc != -EREMOTE) {
3519 		rc = cifs_are_all_path_components_accessible(server, xid, tcon,
3520 			cifs_sb, full_path, tcon->Flags & SMB_SHARE_IS_IN_DFS);
3521 		if (rc != 0) {
3522 			cifs_server_dbg(VFS, "cannot query dirs between root and final path, enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
3523 			cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
3524 			rc = 0;
3525 		}
3526 	}
3527 
3528 out:
3529 	kfree(full_path);
3530 	return rc;
3531 }
3532 
3533 #ifdef CONFIG_CIFS_DFS_UPCALL
cifs_mount(struct cifs_sb_info * cifs_sb,struct smb3_fs_context * ctx)3534 int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
3535 {
3536 	struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
3537 	bool isdfs;
3538 	int rc;
3539 
3540 	INIT_LIST_HEAD(&mnt_ctx.dfs_ses_list);
3541 
3542 	rc = dfs_mount_share(&mnt_ctx, &isdfs);
3543 	if (rc)
3544 		goto error;
3545 	if (!isdfs)
3546 		goto out;
3547 
3548 	/*
3549 	 * After reconnecting to a different server, unique ids won't match anymore, so we disable
3550 	 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
3551 	 */
3552 	cifs_autodisable_serverino(cifs_sb);
3553 	/*
3554 	 * Force the use of prefix path to support failover on DFS paths that resolve to targets
3555 	 * that have different prefix paths.
3556 	 */
3557 	cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
3558 	kfree(cifs_sb->prepath);
3559 	cifs_sb->prepath = ctx->prepath;
3560 	ctx->prepath = NULL;
3561 
3562 out:
3563 	cifs_try_adding_channels(cifs_sb, mnt_ctx.ses);
3564 	rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
3565 	if (rc)
3566 		goto error;
3567 
3568 	free_xid(mnt_ctx.xid);
3569 	return rc;
3570 
3571 error:
3572 	dfs_put_root_smb_sessions(&mnt_ctx.dfs_ses_list);
3573 	cifs_mount_put_conns(&mnt_ctx);
3574 	return rc;
3575 }
3576 #else
cifs_mount(struct cifs_sb_info * cifs_sb,struct smb3_fs_context * ctx)3577 int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
3578 {
3579 	int rc = 0;
3580 	struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
3581 
3582 	rc = cifs_mount_get_session(&mnt_ctx);
3583 	if (rc)
3584 		goto error;
3585 
3586 	rc = cifs_mount_get_tcon(&mnt_ctx);
3587 	if (rc)
3588 		goto error;
3589 
3590 	rc = cifs_is_path_remote(&mnt_ctx);
3591 	if (rc == -EREMOTE)
3592 		rc = -EOPNOTSUPP;
3593 	if (rc)
3594 		goto error;
3595 
3596 	rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
3597 	if (rc)
3598 		goto error;
3599 
3600 	free_xid(mnt_ctx.xid);
3601 	return rc;
3602 
3603 error:
3604 	cifs_mount_put_conns(&mnt_ctx);
3605 	return rc;
3606 }
3607 #endif
3608 
3609 /*
3610  * Issue a TREE_CONNECT request.
3611  */
3612 int
CIFSTCon(const unsigned int xid,struct cifs_ses * ses,const char * tree,struct cifs_tcon * tcon,const struct nls_table * nls_codepage)3613 CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
3614 	 const char *tree, struct cifs_tcon *tcon,
3615 	 const struct nls_table *nls_codepage)
3616 {
3617 	struct smb_hdr *smb_buffer;
3618 	struct smb_hdr *smb_buffer_response;
3619 	TCONX_REQ *pSMB;
3620 	TCONX_RSP *pSMBr;
3621 	unsigned char *bcc_ptr;
3622 	int rc = 0;
3623 	int length;
3624 	__u16 bytes_left, count;
3625 
3626 	if (ses == NULL)
3627 		return -EIO;
3628 
3629 	smb_buffer = cifs_buf_get();
3630 	if (smb_buffer == NULL)
3631 		return -ENOMEM;
3632 
3633 	smb_buffer_response = smb_buffer;
3634 
3635 	header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX,
3636 			NULL /*no tid */ , 4 /*wct */ );
3637 
3638 	smb_buffer->Mid = get_next_mid(ses->server);
3639 	smb_buffer->Uid = ses->Suid;
3640 	pSMB = (TCONX_REQ *) smb_buffer;
3641 	pSMBr = (TCONX_RSP *) smb_buffer_response;
3642 
3643 	pSMB->AndXCommand = 0xFF;
3644 	pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
3645 	bcc_ptr = &pSMB->Password[0];
3646 
3647 	pSMB->PasswordLength = cpu_to_le16(1);	/* minimum */
3648 	*bcc_ptr = 0; /* password is null byte */
3649 	bcc_ptr++;              /* skip password */
3650 	/* already aligned so no need to do it below */
3651 
3652 	if (ses->server->sign)
3653 		smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
3654 
3655 	if (ses->capabilities & CAP_STATUS32) {
3656 		smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS;
3657 	}
3658 	if (ses->capabilities & CAP_DFS) {
3659 		smb_buffer->Flags2 |= SMBFLG2_DFS;
3660 	}
3661 	if (ses->capabilities & CAP_UNICODE) {
3662 		smb_buffer->Flags2 |= SMBFLG2_UNICODE;
3663 		length =
3664 		    cifs_strtoUTF16((__le16 *) bcc_ptr, tree,
3665 			6 /* max utf8 char length in bytes */ *
3666 			(/* server len*/ + 256 /* share len */), nls_codepage);
3667 		bcc_ptr += 2 * length;	/* convert num 16 bit words to bytes */
3668 		bcc_ptr += 2;	/* skip trailing null */
3669 	} else {		/* ASCII */
3670 		strcpy(bcc_ptr, tree);
3671 		bcc_ptr += strlen(tree) + 1;
3672 	}
3673 	strcpy(bcc_ptr, "?????");
3674 	bcc_ptr += strlen("?????");
3675 	bcc_ptr += 1;
3676 	count = bcc_ptr - &pSMB->Password[0];
3677 	be32_add_cpu(&pSMB->hdr.smb_buf_length, count);
3678 	pSMB->ByteCount = cpu_to_le16(count);
3679 
3680 	rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length,
3681 			 0);
3682 
3683 	/* above now done in SendReceive */
3684 	if (rc == 0) {
3685 		bool is_unicode;
3686 
3687 		tcon->tid = smb_buffer_response->Tid;
3688 		bcc_ptr = pByteArea(smb_buffer_response);
3689 		bytes_left = get_bcc(smb_buffer_response);
3690 		length = strnlen(bcc_ptr, bytes_left - 2);
3691 		if (smb_buffer->Flags2 & SMBFLG2_UNICODE)
3692 			is_unicode = true;
3693 		else
3694 			is_unicode = false;
3695 
3696 
3697 		/* skip service field (NB: this field is always ASCII) */
3698 		if (length == 3) {
3699 			if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') &&
3700 			    (bcc_ptr[2] == 'C')) {
3701 				cifs_dbg(FYI, "IPC connection\n");
3702 				tcon->ipc = true;
3703 				tcon->pipe = true;
3704 			}
3705 		} else if (length == 2) {
3706 			if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) {
3707 				/* the most common case */
3708 				cifs_dbg(FYI, "disk share connection\n");
3709 			}
3710 		}
3711 		bcc_ptr += length + 1;
3712 		bytes_left -= (length + 1);
3713 		strscpy(tcon->tree_name, tree, sizeof(tcon->tree_name));
3714 
3715 		/* mostly informational -- no need to fail on error here */
3716 		kfree(tcon->nativeFileSystem);
3717 		tcon->nativeFileSystem = cifs_strndup_from_utf16(bcc_ptr,
3718 						      bytes_left, is_unicode,
3719 						      nls_codepage);
3720 
3721 		cifs_dbg(FYI, "nativeFileSystem=%s\n", tcon->nativeFileSystem);
3722 
3723 		if ((smb_buffer_response->WordCount == 3) ||
3724 			 (smb_buffer_response->WordCount == 7))
3725 			/* field is in same location */
3726 			tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport);
3727 		else
3728 			tcon->Flags = 0;
3729 		cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags);
3730 	}
3731 
3732 	cifs_buf_release(smb_buffer);
3733 	return rc;
3734 }
3735 
delayed_free(struct rcu_head * p)3736 static void delayed_free(struct rcu_head *p)
3737 {
3738 	struct cifs_sb_info *cifs_sb = container_of(p, struct cifs_sb_info, rcu);
3739 
3740 	unload_nls(cifs_sb->local_nls);
3741 	smb3_cleanup_fs_context(cifs_sb->ctx);
3742 	kfree(cifs_sb);
3743 }
3744 
3745 void
cifs_umount(struct cifs_sb_info * cifs_sb)3746 cifs_umount(struct cifs_sb_info *cifs_sb)
3747 {
3748 	struct rb_root *root = &cifs_sb->tlink_tree;
3749 	struct rb_node *node;
3750 	struct tcon_link *tlink;
3751 
3752 	cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
3753 
3754 	spin_lock(&cifs_sb->tlink_tree_lock);
3755 	while ((node = rb_first(root))) {
3756 		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
3757 		cifs_get_tlink(tlink);
3758 		clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
3759 		rb_erase(node, root);
3760 
3761 		spin_unlock(&cifs_sb->tlink_tree_lock);
3762 		cifs_put_tlink(tlink);
3763 		spin_lock(&cifs_sb->tlink_tree_lock);
3764 	}
3765 	spin_unlock(&cifs_sb->tlink_tree_lock);
3766 
3767 	kfree(cifs_sb->prepath);
3768 	call_rcu(&cifs_sb->rcu, delayed_free);
3769 }
3770 
3771 int
cifs_negotiate_protocol(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server)3772 cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
3773 			struct TCP_Server_Info *server)
3774 {
3775 	int rc = 0;
3776 
3777 	if (!server->ops->need_neg || !server->ops->negotiate)
3778 		return -ENOSYS;
3779 
3780 	/* only send once per connect */
3781 	spin_lock(&server->srv_lock);
3782 	if (server->tcpStatus != CifsGood &&
3783 	    server->tcpStatus != CifsNew &&
3784 	    server->tcpStatus != CifsNeedNegotiate) {
3785 		spin_unlock(&server->srv_lock);
3786 		return -EHOSTDOWN;
3787 	}
3788 
3789 	if (!server->ops->need_neg(server) &&
3790 	    server->tcpStatus == CifsGood) {
3791 		spin_unlock(&server->srv_lock);
3792 		return 0;
3793 	}
3794 
3795 	server->tcpStatus = CifsInNegotiate;
3796 	spin_unlock(&server->srv_lock);
3797 
3798 	rc = server->ops->negotiate(xid, ses, server);
3799 	if (rc == 0) {
3800 		spin_lock(&server->srv_lock);
3801 		if (server->tcpStatus == CifsInNegotiate)
3802 			server->tcpStatus = CifsGood;
3803 		else
3804 			rc = -EHOSTDOWN;
3805 		spin_unlock(&server->srv_lock);
3806 	} else {
3807 		spin_lock(&server->srv_lock);
3808 		if (server->tcpStatus == CifsInNegotiate)
3809 			server->tcpStatus = CifsNeedNegotiate;
3810 		spin_unlock(&server->srv_lock);
3811 	}
3812 
3813 	return rc;
3814 }
3815 
3816 int
cifs_setup_session(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server,struct nls_table * nls_info)3817 cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
3818 		   struct TCP_Server_Info *server,
3819 		   struct nls_table *nls_info)
3820 {
3821 	int rc = -ENOSYS;
3822 	struct TCP_Server_Info *pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
3823 	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&pserver->dstaddr;
3824 	struct sockaddr_in *addr = (struct sockaddr_in *)&pserver->dstaddr;
3825 	bool is_binding = false;
3826 
3827 	spin_lock(&ses->ses_lock);
3828 	cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
3829 		 __func__, ses->chans_need_reconnect);
3830 
3831 	if (ses->ses_status != SES_GOOD &&
3832 	    ses->ses_status != SES_NEW &&
3833 	    ses->ses_status != SES_NEED_RECON) {
3834 		spin_unlock(&ses->ses_lock);
3835 		return -EHOSTDOWN;
3836 	}
3837 
3838 	/* only send once per connect */
3839 	spin_lock(&ses->chan_lock);
3840 	if (CIFS_ALL_CHANS_GOOD(ses)) {
3841 		if (ses->ses_status == SES_NEED_RECON)
3842 			ses->ses_status = SES_GOOD;
3843 		spin_unlock(&ses->chan_lock);
3844 		spin_unlock(&ses->ses_lock);
3845 		return 0;
3846 	}
3847 
3848 	cifs_chan_set_in_reconnect(ses, server);
3849 	is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
3850 	spin_unlock(&ses->chan_lock);
3851 
3852 	if (!is_binding)
3853 		ses->ses_status = SES_IN_SETUP;
3854 	spin_unlock(&ses->ses_lock);
3855 
3856 	/* update ses ip_addr only for primary chan */
3857 	if (server == pserver) {
3858 		if (server->dstaddr.ss_family == AF_INET6)
3859 			scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr);
3860 		else
3861 			scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI4", &addr->sin_addr);
3862 	}
3863 
3864 	if (!is_binding) {
3865 		ses->capabilities = server->capabilities;
3866 		if (!linuxExtEnabled)
3867 			ses->capabilities &= (~server->vals->cap_unix);
3868 
3869 		if (ses->auth_key.response) {
3870 			cifs_dbg(FYI, "Free previous auth_key.response = %p\n",
3871 				 ses->auth_key.response);
3872 			kfree_sensitive(ses->auth_key.response);
3873 			ses->auth_key.response = NULL;
3874 			ses->auth_key.len = 0;
3875 		}
3876 	}
3877 
3878 	cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
3879 		 server->sec_mode, server->capabilities, server->timeAdj);
3880 
3881 	if (server->ops->sess_setup)
3882 		rc = server->ops->sess_setup(xid, ses, server, nls_info);
3883 
3884 	if (rc) {
3885 		cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc);
3886 		spin_lock(&ses->ses_lock);
3887 		if (ses->ses_status == SES_IN_SETUP)
3888 			ses->ses_status = SES_NEED_RECON;
3889 		spin_lock(&ses->chan_lock);
3890 		cifs_chan_clear_in_reconnect(ses, server);
3891 		spin_unlock(&ses->chan_lock);
3892 		spin_unlock(&ses->ses_lock);
3893 	} else {
3894 		spin_lock(&ses->ses_lock);
3895 		if (ses->ses_status == SES_IN_SETUP)
3896 			ses->ses_status = SES_GOOD;
3897 		spin_lock(&ses->chan_lock);
3898 		cifs_chan_clear_in_reconnect(ses, server);
3899 		cifs_chan_clear_need_reconnect(ses, server);
3900 		spin_unlock(&ses->chan_lock);
3901 		spin_unlock(&ses->ses_lock);
3902 	}
3903 
3904 	return rc;
3905 }
3906 
3907 static int
cifs_set_vol_auth(struct smb3_fs_context * ctx,struct cifs_ses * ses)3908 cifs_set_vol_auth(struct smb3_fs_context *ctx, struct cifs_ses *ses)
3909 {
3910 	ctx->sectype = ses->sectype;
3911 
3912 	/* krb5 is special, since we don't need username or pw */
3913 	if (ctx->sectype == Kerberos)
3914 		return 0;
3915 
3916 	return cifs_set_cifscreds(ctx, ses);
3917 }
3918 
3919 static struct cifs_tcon *
cifs_construct_tcon(struct cifs_sb_info * cifs_sb,kuid_t fsuid)3920 cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
3921 {
3922 	int rc;
3923 	struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
3924 	struct cifs_ses *ses;
3925 	struct cifs_tcon *tcon = NULL;
3926 	struct smb3_fs_context *ctx;
3927 
3928 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
3929 	if (ctx == NULL)
3930 		return ERR_PTR(-ENOMEM);
3931 
3932 	ctx->local_nls = cifs_sb->local_nls;
3933 	ctx->linux_uid = fsuid;
3934 	ctx->cred_uid = fsuid;
3935 	ctx->UNC = master_tcon->tree_name;
3936 	ctx->retry = master_tcon->retry;
3937 	ctx->nocase = master_tcon->nocase;
3938 	ctx->nohandlecache = master_tcon->nohandlecache;
3939 	ctx->local_lease = master_tcon->local_lease;
3940 	ctx->no_lease = master_tcon->no_lease;
3941 	ctx->resilient = master_tcon->use_resilient;
3942 	ctx->persistent = master_tcon->use_persistent;
3943 	ctx->handle_timeout = master_tcon->handle_timeout;
3944 	ctx->no_linux_ext = !master_tcon->unix_ext;
3945 	ctx->linux_ext = master_tcon->posix_extensions;
3946 	ctx->sectype = master_tcon->ses->sectype;
3947 	ctx->sign = master_tcon->ses->sign;
3948 	ctx->seal = master_tcon->seal;
3949 	ctx->witness = master_tcon->use_witness;
3950 
3951 	rc = cifs_set_vol_auth(ctx, master_tcon->ses);
3952 	if (rc) {
3953 		tcon = ERR_PTR(rc);
3954 		goto out;
3955 	}
3956 
3957 	/* get a reference for the same TCP session */
3958 	spin_lock(&cifs_tcp_ses_lock);
3959 	++master_tcon->ses->server->srv_count;
3960 	spin_unlock(&cifs_tcp_ses_lock);
3961 
3962 	ses = cifs_get_smb_ses(master_tcon->ses->server, ctx);
3963 	if (IS_ERR(ses)) {
3964 		tcon = (struct cifs_tcon *)ses;
3965 		cifs_put_tcp_session(master_tcon->ses->server, 0);
3966 		goto out;
3967 	}
3968 
3969 	tcon = cifs_get_tcon(ses, ctx);
3970 	if (IS_ERR(tcon)) {
3971 		cifs_put_smb_ses(ses);
3972 		goto out;
3973 	}
3974 
3975 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
3976 	if (cap_unix(ses))
3977 		reset_cifs_unix_caps(0, tcon, NULL, ctx);
3978 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
3979 
3980 out:
3981 	kfree(ctx->username);
3982 	kfree_sensitive(ctx->password);
3983 	kfree(ctx);
3984 
3985 	return tcon;
3986 }
3987 
3988 struct cifs_tcon *
cifs_sb_master_tcon(struct cifs_sb_info * cifs_sb)3989 cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
3990 {
3991 	return tlink_tcon(cifs_sb_master_tlink(cifs_sb));
3992 }
3993 
3994 /* find and return a tlink with given uid */
3995 static struct tcon_link *
tlink_rb_search(struct rb_root * root,kuid_t uid)3996 tlink_rb_search(struct rb_root *root, kuid_t uid)
3997 {
3998 	struct rb_node *node = root->rb_node;
3999 	struct tcon_link *tlink;
4000 
4001 	while (node) {
4002 		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
4003 
4004 		if (uid_gt(tlink->tl_uid, uid))
4005 			node = node->rb_left;
4006 		else if (uid_lt(tlink->tl_uid, uid))
4007 			node = node->rb_right;
4008 		else
4009 			return tlink;
4010 	}
4011 	return NULL;
4012 }
4013 
4014 /* insert a tcon_link into the tree */
4015 static void
tlink_rb_insert(struct rb_root * root,struct tcon_link * new_tlink)4016 tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
4017 {
4018 	struct rb_node **new = &(root->rb_node), *parent = NULL;
4019 	struct tcon_link *tlink;
4020 
4021 	while (*new) {
4022 		tlink = rb_entry(*new, struct tcon_link, tl_rbnode);
4023 		parent = *new;
4024 
4025 		if (uid_gt(tlink->tl_uid, new_tlink->tl_uid))
4026 			new = &((*new)->rb_left);
4027 		else
4028 			new = &((*new)->rb_right);
4029 	}
4030 
4031 	rb_link_node(&new_tlink->tl_rbnode, parent, new);
4032 	rb_insert_color(&new_tlink->tl_rbnode, root);
4033 }
4034 
4035 /*
4036  * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the
4037  * current task.
4038  *
4039  * If the superblock doesn't refer to a multiuser mount, then just return
4040  * the master tcon for the mount.
4041  *
4042  * First, search the rbtree for an existing tcon for this fsuid. If one
4043  * exists, then check to see if it's pending construction. If it is then wait
4044  * for construction to complete. Once it's no longer pending, check to see if
4045  * it failed and either return an error or retry construction, depending on
4046  * the timeout.
4047  *
4048  * If one doesn't exist then insert a new tcon_link struct into the tree and
4049  * try to construct a new one.
4050  */
4051 struct tcon_link *
cifs_sb_tlink(struct cifs_sb_info * cifs_sb)4052 cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
4053 {
4054 	int ret;
4055 	kuid_t fsuid = current_fsuid();
4056 	struct tcon_link *tlink, *newtlink;
4057 
4058 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
4059 		return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
4060 
4061 	spin_lock(&cifs_sb->tlink_tree_lock);
4062 	tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
4063 	if (tlink)
4064 		cifs_get_tlink(tlink);
4065 	spin_unlock(&cifs_sb->tlink_tree_lock);
4066 
4067 	if (tlink == NULL) {
4068 		newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
4069 		if (newtlink == NULL)
4070 			return ERR_PTR(-ENOMEM);
4071 		newtlink->tl_uid = fsuid;
4072 		newtlink->tl_tcon = ERR_PTR(-EACCES);
4073 		set_bit(TCON_LINK_PENDING, &newtlink->tl_flags);
4074 		set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags);
4075 		cifs_get_tlink(newtlink);
4076 
4077 		spin_lock(&cifs_sb->tlink_tree_lock);
4078 		/* was one inserted after previous search? */
4079 		tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
4080 		if (tlink) {
4081 			cifs_get_tlink(tlink);
4082 			spin_unlock(&cifs_sb->tlink_tree_lock);
4083 			kfree(newtlink);
4084 			goto wait_for_construction;
4085 		}
4086 		tlink = newtlink;
4087 		tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
4088 		spin_unlock(&cifs_sb->tlink_tree_lock);
4089 	} else {
4090 wait_for_construction:
4091 		ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
4092 				  TASK_INTERRUPTIBLE);
4093 		if (ret) {
4094 			cifs_put_tlink(tlink);
4095 			return ERR_PTR(-ERESTARTSYS);
4096 		}
4097 
4098 		/* if it's good, return it */
4099 		if (!IS_ERR(tlink->tl_tcon))
4100 			return tlink;
4101 
4102 		/* return error if we tried this already recently */
4103 		if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) {
4104 			cifs_put_tlink(tlink);
4105 			return ERR_PTR(-EACCES);
4106 		}
4107 
4108 		if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags))
4109 			goto wait_for_construction;
4110 	}
4111 
4112 	tlink->tl_tcon = cifs_construct_tcon(cifs_sb, fsuid);
4113 	clear_bit(TCON_LINK_PENDING, &tlink->tl_flags);
4114 	wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING);
4115 
4116 	if (IS_ERR(tlink->tl_tcon)) {
4117 		cifs_put_tlink(tlink);
4118 		return ERR_PTR(-EACCES);
4119 	}
4120 
4121 	return tlink;
4122 }
4123 
4124 /*
4125  * periodic workqueue job that scans tcon_tree for a superblock and closes
4126  * out tcons.
4127  */
4128 static void
cifs_prune_tlinks(struct work_struct * work)4129 cifs_prune_tlinks(struct work_struct *work)
4130 {
4131 	struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
4132 						    prune_tlinks.work);
4133 	struct rb_root *root = &cifs_sb->tlink_tree;
4134 	struct rb_node *node;
4135 	struct rb_node *tmp;
4136 	struct tcon_link *tlink;
4137 
4138 	/*
4139 	 * Because we drop the spinlock in the loop in order to put the tlink
4140 	 * it's not guarded against removal of links from the tree. The only
4141 	 * places that remove entries from the tree are this function and
4142 	 * umounts. Because this function is non-reentrant and is canceled
4143 	 * before umount can proceed, this is safe.
4144 	 */
4145 	spin_lock(&cifs_sb->tlink_tree_lock);
4146 	node = rb_first(root);
4147 	while (node != NULL) {
4148 		tmp = node;
4149 		node = rb_next(tmp);
4150 		tlink = rb_entry(tmp, struct tcon_link, tl_rbnode);
4151 
4152 		if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) ||
4153 		    atomic_read(&tlink->tl_count) != 0 ||
4154 		    time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies))
4155 			continue;
4156 
4157 		cifs_get_tlink(tlink);
4158 		clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
4159 		rb_erase(tmp, root);
4160 
4161 		spin_unlock(&cifs_sb->tlink_tree_lock);
4162 		cifs_put_tlink(tlink);
4163 		spin_lock(&cifs_sb->tlink_tree_lock);
4164 	}
4165 	spin_unlock(&cifs_sb->tlink_tree_lock);
4166 
4167 	queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
4168 				TLINK_IDLE_EXPIRE);
4169 }
4170 
4171 #ifndef CONFIG_CIFS_DFS_UPCALL
cifs_tree_connect(const unsigned int xid,struct cifs_tcon * tcon,const struct nls_table * nlsc)4172 int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc)
4173 {
4174 	int rc;
4175 	const struct smb_version_operations *ops = tcon->ses->server->ops;
4176 
4177 	/* only send once per connect */
4178 	spin_lock(&tcon->tc_lock);
4179 	if (tcon->status == TID_GOOD) {
4180 		spin_unlock(&tcon->tc_lock);
4181 		return 0;
4182 	}
4183 
4184 	if (tcon->status != TID_NEW &&
4185 	    tcon->status != TID_NEED_TCON) {
4186 		spin_unlock(&tcon->tc_lock);
4187 		return -EHOSTDOWN;
4188 	}
4189 
4190 	tcon->status = TID_IN_TCON;
4191 	spin_unlock(&tcon->tc_lock);
4192 
4193 	rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, nlsc);
4194 	if (rc) {
4195 		spin_lock(&tcon->tc_lock);
4196 		if (tcon->status == TID_IN_TCON)
4197 			tcon->status = TID_NEED_TCON;
4198 		spin_unlock(&tcon->tc_lock);
4199 	} else {
4200 		spin_lock(&tcon->tc_lock);
4201 		if (tcon->status == TID_IN_TCON)
4202 			tcon->status = TID_GOOD;
4203 		tcon->need_reconnect = false;
4204 		spin_unlock(&tcon->tc_lock);
4205 	}
4206 
4207 	return rc;
4208 }
4209 #endif
4210