1 /*
2  *   fs/cifs/transport.c
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  *   This library is free software; you can redistribute it and/or modify
9  *   it under the terms of the GNU Lesser General Public License as published
10  *   by the Free Software Foundation; either version 2.1 of the License, or
11  *   (at your option) any later version.
12  *
13  *   This library is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
16  *   the GNU Lesser General Public License for more details.
17  *
18  *   You should have received a copy of the GNU Lesser General Public License
19  *   along with this library; if not, write to the Free Software
20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  */
22 
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include <linux/sched/signal.h>
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_debug.h"
41 #include "smb2proto.h"
42 #include "smbdirect.h"
43 
44 /* Max number of iovectors we can use off the stack when sending requests. */
45 #define CIFS_MAX_IOV_SIZE 8
46 
47 void
cifs_wake_up_task(struct mid_q_entry * mid)48 cifs_wake_up_task(struct mid_q_entry *mid)
49 {
50 	wake_up_process(mid->callback_data);
51 }
52 
53 struct mid_q_entry *
AllocMidQEntry(const struct smb_hdr * smb_buffer,struct TCP_Server_Info * server)54 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
55 {
56 	struct mid_q_entry *temp;
57 
58 	if (server == NULL) {
59 		cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
60 		return NULL;
61 	}
62 
63 	temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
64 	memset(temp, 0, sizeof(struct mid_q_entry));
65 	kref_init(&temp->refcount);
66 	temp->mid = get_mid(smb_buffer);
67 	temp->pid = current->pid;
68 	temp->command = cpu_to_le16(smb_buffer->Command);
69 	cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
70 	/*	do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
71 	/* when mid allocated can be before when sent */
72 	temp->when_alloc = jiffies;
73 	temp->server = server;
74 
75 	/*
76 	 * The default is for the mid to be synchronous, so the
77 	 * default callback just wakes up the current task.
78 	 */
79 	temp->callback = cifs_wake_up_task;
80 	temp->callback_data = current;
81 
82 	atomic_inc(&midCount);
83 	temp->mid_state = MID_REQUEST_ALLOCATED;
84 	return temp;
85 }
86 
_cifs_mid_q_entry_release(struct kref * refcount)87 static void _cifs_mid_q_entry_release(struct kref *refcount)
88 {
89 	struct mid_q_entry *midEntry =
90 			container_of(refcount, struct mid_q_entry, refcount);
91 #ifdef CONFIG_CIFS_STATS2
92 	__le16 command = midEntry->server->vals->lock_cmd;
93 	__u16 smb_cmd = le16_to_cpu(midEntry->command);
94 	unsigned long now;
95 	unsigned long roundtrip_time;
96 	struct TCP_Server_Info *server = midEntry->server;
97 #endif
98 	midEntry->mid_state = MID_FREE;
99 	atomic_dec(&midCount);
100 	if (midEntry->large_buf)
101 		cifs_buf_release(midEntry->resp_buf);
102 	else
103 		cifs_small_buf_release(midEntry->resp_buf);
104 #ifdef CONFIG_CIFS_STATS2
105 	now = jiffies;
106 	if (now < midEntry->when_alloc)
107 		cifs_server_dbg(VFS, "invalid mid allocation time\n");
108 	roundtrip_time = now - midEntry->when_alloc;
109 
110 	if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
111 		if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
112 			server->slowest_cmd[smb_cmd] = roundtrip_time;
113 			server->fastest_cmd[smb_cmd] = roundtrip_time;
114 		} else {
115 			if (server->slowest_cmd[smb_cmd] < roundtrip_time)
116 				server->slowest_cmd[smb_cmd] = roundtrip_time;
117 			else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
118 				server->fastest_cmd[smb_cmd] = roundtrip_time;
119 		}
120 		cifs_stats_inc(&server->num_cmds[smb_cmd]);
121 		server->time_per_cmd[smb_cmd] += roundtrip_time;
122 	}
123 	/*
124 	 * commands taking longer than one second (default) can be indications
125 	 * that something is wrong, unless it is quite a slow link or a very
126 	 * busy server. Note that this calc is unlikely or impossible to wrap
127 	 * as long as slow_rsp_threshold is not set way above recommended max
128 	 * value (32767 ie 9 hours) and is generally harmless even if wrong
129 	 * since only affects debug counters - so leaving the calc as simple
130 	 * comparison rather than doing multiple conversions and overflow
131 	 * checks
132 	 */
133 	if ((slow_rsp_threshold != 0) &&
134 	    time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
135 	    (midEntry->command != command)) {
136 		/*
137 		 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
138 		 * NB: le16_to_cpu returns unsigned so can not be negative below
139 		 */
140 		if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
141 			cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
142 
143 		trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
144 			       midEntry->when_sent, midEntry->when_received);
145 		if (cifsFYI & CIFS_TIMER) {
146 			pr_debug(" CIFS slow rsp: cmd %d mid %llu",
147 			       midEntry->command, midEntry->mid);
148 			cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
149 			       now - midEntry->when_alloc,
150 			       now - midEntry->when_sent,
151 			       now - midEntry->when_received);
152 		}
153 	}
154 #endif
155 
156 	mempool_free(midEntry, cifs_mid_poolp);
157 }
158 
cifs_mid_q_entry_release(struct mid_q_entry * midEntry)159 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
160 {
161 	spin_lock(&GlobalMid_Lock);
162 	kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
163 	spin_unlock(&GlobalMid_Lock);
164 }
165 
DeleteMidQEntry(struct mid_q_entry * midEntry)166 void DeleteMidQEntry(struct mid_q_entry *midEntry)
167 {
168 	cifs_mid_q_entry_release(midEntry);
169 }
170 
171 void
cifs_delete_mid(struct mid_q_entry * mid)172 cifs_delete_mid(struct mid_q_entry *mid)
173 {
174 	spin_lock(&GlobalMid_Lock);
175 	if (!(mid->mid_flags & MID_DELETED)) {
176 		list_del_init(&mid->qhead);
177 		mid->mid_flags |= MID_DELETED;
178 	}
179 	spin_unlock(&GlobalMid_Lock);
180 
181 	DeleteMidQEntry(mid);
182 }
183 
184 /*
185  * smb_send_kvec - send an array of kvecs to the server
186  * @server:	Server to send the data to
187  * @smb_msg:	Message to send
188  * @sent:	amount of data sent on socket is stored here
189  *
190  * Our basic "send data to server" function. Should be called with srv_mutex
191  * held. The caller is responsible for handling the results.
192  */
193 static int
smb_send_kvec(struct TCP_Server_Info * server,struct msghdr * smb_msg,size_t * sent)194 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
195 	      size_t *sent)
196 {
197 	int rc = 0;
198 	int retries = 0;
199 	struct socket *ssocket = server->ssocket;
200 
201 	*sent = 0;
202 
203 	smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
204 	smb_msg->msg_namelen = sizeof(struct sockaddr);
205 	smb_msg->msg_control = NULL;
206 	smb_msg->msg_controllen = 0;
207 	if (server->noblocksnd)
208 		smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
209 	else
210 		smb_msg->msg_flags = MSG_NOSIGNAL;
211 
212 	while (msg_data_left(smb_msg)) {
213 		/*
214 		 * If blocking send, we try 3 times, since each can block
215 		 * for 5 seconds. For nonblocking  we have to try more
216 		 * but wait increasing amounts of time allowing time for
217 		 * socket to clear.  The overall time we wait in either
218 		 * case to send on the socket is about 15 seconds.
219 		 * Similarly we wait for 15 seconds for a response from
220 		 * the server in SendReceive[2] for the server to send
221 		 * a response back for most types of requests (except
222 		 * SMB Write past end of file which can be slow, and
223 		 * blocking lock operations). NFS waits slightly longer
224 		 * than CIFS, but this can make it take longer for
225 		 * nonresponsive servers to be detected and 15 seconds
226 		 * is more than enough time for modern networks to
227 		 * send a packet.  In most cases if we fail to send
228 		 * after the retries we will kill the socket and
229 		 * reconnect which may clear the network problem.
230 		 */
231 		rc = sock_sendmsg(ssocket, smb_msg);
232 		if (rc == -EAGAIN) {
233 			retries++;
234 			if (retries >= 14 ||
235 			    (!server->noblocksnd && (retries > 2))) {
236 				cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
237 					 ssocket);
238 				return -EAGAIN;
239 			}
240 			msleep(1 << retries);
241 			continue;
242 		}
243 
244 		if (rc < 0)
245 			return rc;
246 
247 		if (rc == 0) {
248 			/* should never happen, letting socket clear before
249 			   retrying is our only obvious option here */
250 			cifs_server_dbg(VFS, "tcp sent no data\n");
251 			msleep(500);
252 			continue;
253 		}
254 
255 		/* send was at least partially successful */
256 		*sent += rc;
257 		retries = 0; /* in case we get ENOSPC on the next send */
258 	}
259 	return 0;
260 }
261 
262 unsigned long
smb_rqst_len(struct TCP_Server_Info * server,struct smb_rqst * rqst)263 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
264 {
265 	unsigned int i;
266 	struct kvec *iov;
267 	int nvec;
268 	unsigned long buflen = 0;
269 
270 	if (server->vals->header_preamble_size == 0 &&
271 	    rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
272 		iov = &rqst->rq_iov[1];
273 		nvec = rqst->rq_nvec - 1;
274 	} else {
275 		iov = rqst->rq_iov;
276 		nvec = rqst->rq_nvec;
277 	}
278 
279 	/* total up iov array first */
280 	for (i = 0; i < nvec; i++)
281 		buflen += iov[i].iov_len;
282 
283 	/*
284 	 * Add in the page array if there is one. The caller needs to make
285 	 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
286 	 * multiple pages ends at page boundary, rq_tailsz needs to be set to
287 	 * PAGE_SIZE.
288 	 */
289 	if (rqst->rq_npages) {
290 		if (rqst->rq_npages == 1)
291 			buflen += rqst->rq_tailsz;
292 		else {
293 			/*
294 			 * If there is more than one page, calculate the
295 			 * buffer length based on rq_offset and rq_tailsz
296 			 */
297 			buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
298 					rqst->rq_offset;
299 			buflen += rqst->rq_tailsz;
300 		}
301 	}
302 
303 	return buflen;
304 }
305 
306 static int
__smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst)307 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
308 		struct smb_rqst *rqst)
309 {
310 	int rc = 0;
311 	struct kvec *iov;
312 	int n_vec;
313 	unsigned int send_length = 0;
314 	unsigned int i, j;
315 	sigset_t mask, oldmask;
316 	size_t total_len = 0, sent, size;
317 	struct socket *ssocket = server->ssocket;
318 	struct msghdr smb_msg;
319 	int val = 1;
320 	__be32 rfc1002_marker;
321 
322 	if (cifs_rdma_enabled(server) && server->smbd_conn) {
323 		rc = smbd_send(server, num_rqst, rqst);
324 		goto smbd_done;
325 	}
326 
327 	if (ssocket == NULL)
328 		return -EAGAIN;
329 
330 	if (signal_pending(current)) {
331 		cifs_dbg(FYI, "signal is pending before sending any data\n");
332 		return -EINTR;
333 	}
334 
335 	/* cork the socket */
336 	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
337 				(char *)&val, sizeof(val));
338 
339 	for (j = 0; j < num_rqst; j++)
340 		send_length += smb_rqst_len(server, &rqst[j]);
341 	rfc1002_marker = cpu_to_be32(send_length);
342 
343 	/*
344 	 * We should not allow signals to interrupt the network send because
345 	 * any partial send will cause session reconnects thus increasing
346 	 * latency of system calls and overload a server with unnecessary
347 	 * requests.
348 	 */
349 
350 	sigfillset(&mask);
351 	sigprocmask(SIG_BLOCK, &mask, &oldmask);
352 
353 	/* Generate a rfc1002 marker for SMB2+ */
354 	if (server->vals->header_preamble_size == 0) {
355 		struct kvec hiov = {
356 			.iov_base = &rfc1002_marker,
357 			.iov_len  = 4
358 		};
359 		iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
360 		rc = smb_send_kvec(server, &smb_msg, &sent);
361 		if (rc < 0)
362 			goto unmask;
363 
364 		total_len += sent;
365 		send_length += 4;
366 	}
367 
368 	cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
369 
370 	for (j = 0; j < num_rqst; j++) {
371 		iov = rqst[j].rq_iov;
372 		n_vec = rqst[j].rq_nvec;
373 
374 		size = 0;
375 		for (i = 0; i < n_vec; i++) {
376 			dump_smb(iov[i].iov_base, iov[i].iov_len);
377 			size += iov[i].iov_len;
378 		}
379 
380 		iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
381 
382 		rc = smb_send_kvec(server, &smb_msg, &sent);
383 		if (rc < 0)
384 			goto unmask;
385 
386 		total_len += sent;
387 
388 		/* now walk the page array and send each page in it */
389 		for (i = 0; i < rqst[j].rq_npages; i++) {
390 			struct bio_vec bvec;
391 
392 			bvec.bv_page = rqst[j].rq_pages[i];
393 			rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
394 					     &bvec.bv_offset);
395 
396 			iov_iter_bvec(&smb_msg.msg_iter, WRITE,
397 				      &bvec, 1, bvec.bv_len);
398 			rc = smb_send_kvec(server, &smb_msg, &sent);
399 			if (rc < 0)
400 				break;
401 
402 			total_len += sent;
403 		}
404 	}
405 
406 unmask:
407 	sigprocmask(SIG_SETMASK, &oldmask, NULL);
408 
409 	/*
410 	 * If signal is pending but we have already sent the whole packet to
411 	 * the server we need to return success status to allow a corresponding
412 	 * mid entry to be kept in the pending requests queue thus allowing
413 	 * to handle responses from the server by the client.
414 	 *
415 	 * If only part of the packet has been sent there is no need to hide
416 	 * interrupt because the session will be reconnected anyway, so there
417 	 * won't be any response from the server to handle.
418 	 */
419 
420 	if (signal_pending(current) && (total_len != send_length)) {
421 		cifs_dbg(FYI, "signal is pending after attempt to send\n");
422 		rc = -EINTR;
423 	}
424 
425 	/* uncork it */
426 	val = 0;
427 	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
428 				(char *)&val, sizeof(val));
429 
430 	if ((total_len > 0) && (total_len != send_length)) {
431 		cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
432 			 send_length, total_len);
433 		/*
434 		 * If we have only sent part of an SMB then the next SMB could
435 		 * be taken as the remainder of this one. We need to kill the
436 		 * socket so the server throws away the partial SMB
437 		 */
438 		server->tcpStatus = CifsNeedReconnect;
439 		trace_smb3_partial_send_reconnect(server->CurrentMid,
440 						  server->hostname);
441 	}
442 smbd_done:
443 	if (rc < 0 && rc != -EINTR)
444 		cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
445 			 rc);
446 	else if (rc > 0)
447 		rc = 0;
448 
449 	return rc;
450 }
451 
452 static int
smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst,int flags)453 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
454 	      struct smb_rqst *rqst, int flags)
455 {
456 	struct kvec iov;
457 	struct smb2_transform_hdr tr_hdr;
458 	struct smb_rqst cur_rqst[MAX_COMPOUND];
459 	int rc;
460 
461 	if (!(flags & CIFS_TRANSFORM_REQ))
462 		return __smb_send_rqst(server, num_rqst, rqst);
463 
464 	if (num_rqst > MAX_COMPOUND - 1)
465 		return -ENOMEM;
466 
467 	memset(&cur_rqst[0], 0, sizeof(cur_rqst));
468 	memset(&iov, 0, sizeof(iov));
469 	memset(&tr_hdr, 0, sizeof(tr_hdr));
470 
471 	iov.iov_base = &tr_hdr;
472 	iov.iov_len = sizeof(tr_hdr);
473 	cur_rqst[0].rq_iov = &iov;
474 	cur_rqst[0].rq_nvec = 1;
475 
476 	if (!server->ops->init_transform_rq) {
477 		cifs_server_dbg(VFS, "Encryption requested but transform "
478 				"callback is missing\n");
479 		return -EIO;
480 	}
481 
482 	rc = server->ops->init_transform_rq(server, num_rqst + 1,
483 					    &cur_rqst[0], rqst);
484 	if (rc)
485 		return rc;
486 
487 	rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
488 	smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
489 	return rc;
490 }
491 
492 int
smb_send(struct TCP_Server_Info * server,struct smb_hdr * smb_buffer,unsigned int smb_buf_length)493 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
494 	 unsigned int smb_buf_length)
495 {
496 	struct kvec iov[2];
497 	struct smb_rqst rqst = { .rq_iov = iov,
498 				 .rq_nvec = 2 };
499 
500 	iov[0].iov_base = smb_buffer;
501 	iov[0].iov_len = 4;
502 	iov[1].iov_base = (char *)smb_buffer + 4;
503 	iov[1].iov_len = smb_buf_length;
504 
505 	return __smb_send_rqst(server, 1, &rqst);
506 }
507 
508 static int
wait_for_free_credits(struct TCP_Server_Info * server,const int num_credits,const int timeout,const int flags,unsigned int * instance)509 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
510 		      const int timeout, const int flags,
511 		      unsigned int *instance)
512 {
513 	int rc;
514 	int *credits;
515 	int optype;
516 	long int t;
517 
518 	if (timeout < 0)
519 		t = MAX_JIFFY_OFFSET;
520 	else
521 		t = msecs_to_jiffies(timeout);
522 
523 	optype = flags & CIFS_OP_MASK;
524 
525 	*instance = 0;
526 
527 	credits = server->ops->get_credits_field(server, optype);
528 	/* Since an echo is already inflight, no need to wait to send another */
529 	if (*credits <= 0 && optype == CIFS_ECHO_OP)
530 		return -EAGAIN;
531 
532 	spin_lock(&server->req_lock);
533 	if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
534 		/* oplock breaks must not be held up */
535 		server->in_flight++;
536 		if (server->in_flight > server->max_in_flight)
537 			server->max_in_flight = server->in_flight;
538 		*credits -= 1;
539 		*instance = server->reconnect_instance;
540 		spin_unlock(&server->req_lock);
541 		return 0;
542 	}
543 
544 	while (1) {
545 		if (*credits < num_credits) {
546 			spin_unlock(&server->req_lock);
547 			cifs_num_waiters_inc(server);
548 			rc = wait_event_killable_timeout(server->request_q,
549 				has_credits(server, credits, num_credits), t);
550 			cifs_num_waiters_dec(server);
551 			if (!rc) {
552 				trace_smb3_credit_timeout(server->CurrentMid,
553 					server->hostname, num_credits);
554 				cifs_server_dbg(VFS, "wait timed out after %d ms\n",
555 					 timeout);
556 				return -ENOTSUPP;
557 			}
558 			if (rc == -ERESTARTSYS)
559 				return -ERESTARTSYS;
560 			spin_lock(&server->req_lock);
561 		} else {
562 			if (server->tcpStatus == CifsExiting) {
563 				spin_unlock(&server->req_lock);
564 				return -ENOENT;
565 			}
566 
567 			/*
568 			 * For normal commands, reserve the last MAX_COMPOUND
569 			 * credits to compound requests.
570 			 * Otherwise these compounds could be permanently
571 			 * starved for credits by single-credit requests.
572 			 *
573 			 * To prevent spinning CPU, block this thread until
574 			 * there are >MAX_COMPOUND credits available.
575 			 * But only do this is we already have a lot of
576 			 * credits in flight to avoid triggering this check
577 			 * for servers that are slow to hand out credits on
578 			 * new sessions.
579 			 */
580 			if (!optype && num_credits == 1 &&
581 			    server->in_flight > 2 * MAX_COMPOUND &&
582 			    *credits <= MAX_COMPOUND) {
583 				spin_unlock(&server->req_lock);
584 				cifs_num_waiters_inc(server);
585 				rc = wait_event_killable_timeout(
586 					server->request_q,
587 					has_credits(server, credits,
588 						    MAX_COMPOUND + 1),
589 					t);
590 				cifs_num_waiters_dec(server);
591 				if (!rc) {
592 					trace_smb3_credit_timeout(
593 						server->CurrentMid,
594 						server->hostname, num_credits);
595 					cifs_server_dbg(VFS, "wait timed out after %d ms\n",
596 						 timeout);
597 					return -ENOTSUPP;
598 				}
599 				if (rc == -ERESTARTSYS)
600 					return -ERESTARTSYS;
601 				spin_lock(&server->req_lock);
602 				continue;
603 			}
604 
605 			/*
606 			 * Can not count locking commands against total
607 			 * as they are allowed to block on server.
608 			 */
609 
610 			/* update # of requests on the wire to server */
611 			if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
612 				*credits -= num_credits;
613 				server->in_flight += num_credits;
614 				if (server->in_flight > server->max_in_flight)
615 					server->max_in_flight = server->in_flight;
616 				*instance = server->reconnect_instance;
617 			}
618 			spin_unlock(&server->req_lock);
619 			break;
620 		}
621 	}
622 	return 0;
623 }
624 
625 static int
wait_for_free_request(struct TCP_Server_Info * server,const int flags,unsigned int * instance)626 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
627 		      unsigned int *instance)
628 {
629 	return wait_for_free_credits(server, 1, -1, flags,
630 				     instance);
631 }
632 
633 static int
wait_for_compound_request(struct TCP_Server_Info * server,int num,const int flags,unsigned int * instance)634 wait_for_compound_request(struct TCP_Server_Info *server, int num,
635 			  const int flags, unsigned int *instance)
636 {
637 	int *credits;
638 
639 	credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
640 
641 	spin_lock(&server->req_lock);
642 	if (*credits < num) {
643 		/*
644 		 * Return immediately if not too many requests in flight since
645 		 * we will likely be stuck on waiting for credits.
646 		 */
647 		if (server->in_flight < num - *credits) {
648 			spin_unlock(&server->req_lock);
649 			return -ENOTSUPP;
650 		}
651 	}
652 	spin_unlock(&server->req_lock);
653 
654 	return wait_for_free_credits(server, num, 60000, flags,
655 				     instance);
656 }
657 
658 int
cifs_wait_mtu_credits(struct TCP_Server_Info * server,unsigned int size,unsigned int * num,struct cifs_credits * credits)659 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
660 		      unsigned int *num, struct cifs_credits *credits)
661 {
662 	*num = size;
663 	credits->value = 0;
664 	credits->instance = server->reconnect_instance;
665 	return 0;
666 }
667 
allocate_mid(struct cifs_ses * ses,struct smb_hdr * in_buf,struct mid_q_entry ** ppmidQ)668 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
669 			struct mid_q_entry **ppmidQ)
670 {
671 	if (ses->server->tcpStatus == CifsExiting) {
672 		return -ENOENT;
673 	}
674 
675 	if (ses->server->tcpStatus == CifsNeedReconnect) {
676 		cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
677 		return -EAGAIN;
678 	}
679 
680 	if (ses->status == CifsNew) {
681 		if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
682 			(in_buf->Command != SMB_COM_NEGOTIATE))
683 			return -EAGAIN;
684 		/* else ok - we are setting up session */
685 	}
686 
687 	if (ses->status == CifsExiting) {
688 		/* check if SMB session is bad because we are setting it up */
689 		if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
690 			return -EAGAIN;
691 		/* else ok - we are shutting down session */
692 	}
693 
694 	*ppmidQ = AllocMidQEntry(in_buf, ses->server);
695 	if (*ppmidQ == NULL)
696 		return -ENOMEM;
697 	spin_lock(&GlobalMid_Lock);
698 	list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
699 	spin_unlock(&GlobalMid_Lock);
700 	return 0;
701 }
702 
703 static int
wait_for_response(struct TCP_Server_Info * server,struct mid_q_entry * midQ)704 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
705 {
706 	int error;
707 
708 	error = wait_event_freezekillable_unsafe(server->response_q,
709 				    midQ->mid_state != MID_REQUEST_SUBMITTED);
710 	if (error < 0)
711 		return -ERESTARTSYS;
712 
713 	return 0;
714 }
715 
716 struct mid_q_entry *
cifs_setup_async_request(struct TCP_Server_Info * server,struct smb_rqst * rqst)717 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
718 {
719 	int rc;
720 	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
721 	struct mid_q_entry *mid;
722 
723 	if (rqst->rq_iov[0].iov_len != 4 ||
724 	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
725 		return ERR_PTR(-EIO);
726 
727 	/* enable signing if server requires it */
728 	if (server->sign)
729 		hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
730 
731 	mid = AllocMidQEntry(hdr, server);
732 	if (mid == NULL)
733 		return ERR_PTR(-ENOMEM);
734 
735 	rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
736 	if (rc) {
737 		DeleteMidQEntry(mid);
738 		return ERR_PTR(rc);
739 	}
740 
741 	return mid;
742 }
743 
744 /*
745  * Send a SMB request and set the callback function in the mid to handle
746  * the result. Caller is responsible for dealing with timeouts.
747  */
748 int
cifs_call_async(struct TCP_Server_Info * server,struct smb_rqst * rqst,mid_receive_t * receive,mid_callback_t * callback,mid_handle_t * handle,void * cbdata,const int flags,const struct cifs_credits * exist_credits)749 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
750 		mid_receive_t *receive, mid_callback_t *callback,
751 		mid_handle_t *handle, void *cbdata, const int flags,
752 		const struct cifs_credits *exist_credits)
753 {
754 	int rc;
755 	struct mid_q_entry *mid;
756 	struct cifs_credits credits = { .value = 0, .instance = 0 };
757 	unsigned int instance;
758 	int optype;
759 
760 	optype = flags & CIFS_OP_MASK;
761 
762 	if ((flags & CIFS_HAS_CREDITS) == 0) {
763 		rc = wait_for_free_request(server, flags, &instance);
764 		if (rc)
765 			return rc;
766 		credits.value = 1;
767 		credits.instance = instance;
768 	} else
769 		instance = exist_credits->instance;
770 
771 	mutex_lock(&server->srv_mutex);
772 
773 	/*
774 	 * We can't use credits obtained from the previous session to send this
775 	 * request. Check if there were reconnects after we obtained credits and
776 	 * return -EAGAIN in such cases to let callers handle it.
777 	 */
778 	if (instance != server->reconnect_instance) {
779 		mutex_unlock(&server->srv_mutex);
780 		add_credits_and_wake_if(server, &credits, optype);
781 		return -EAGAIN;
782 	}
783 
784 	mid = server->ops->setup_async_request(server, rqst);
785 	if (IS_ERR(mid)) {
786 		mutex_unlock(&server->srv_mutex);
787 		add_credits_and_wake_if(server, &credits, optype);
788 		return PTR_ERR(mid);
789 	}
790 
791 	mid->receive = receive;
792 	mid->callback = callback;
793 	mid->callback_data = cbdata;
794 	mid->handle = handle;
795 	mid->mid_state = MID_REQUEST_SUBMITTED;
796 
797 	/* put it on the pending_mid_q */
798 	spin_lock(&GlobalMid_Lock);
799 	list_add_tail(&mid->qhead, &server->pending_mid_q);
800 	spin_unlock(&GlobalMid_Lock);
801 
802 	/*
803 	 * Need to store the time in mid before calling I/O. For call_async,
804 	 * I/O response may come back and free the mid entry on another thread.
805 	 */
806 	cifs_save_when_sent(mid);
807 	cifs_in_send_inc(server);
808 	rc = smb_send_rqst(server, 1, rqst, flags);
809 	cifs_in_send_dec(server);
810 
811 	if (rc < 0) {
812 		revert_current_mid(server, mid->credits);
813 		server->sequence_number -= 2;
814 		cifs_delete_mid(mid);
815 	}
816 
817 	mutex_unlock(&server->srv_mutex);
818 
819 	if (rc == 0)
820 		return 0;
821 
822 	add_credits_and_wake_if(server, &credits, optype);
823 	return rc;
824 }
825 
826 /*
827  *
828  * Send an SMB Request.  No response info (other than return code)
829  * needs to be parsed.
830  *
831  * flags indicate the type of request buffer and how long to wait
832  * and whether to log NT STATUS code (error) before mapping it to POSIX error
833  *
834  */
835 int
SendReceiveNoRsp(const unsigned int xid,struct cifs_ses * ses,char * in_buf,int flags)836 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
837 		 char *in_buf, int flags)
838 {
839 	int rc;
840 	struct kvec iov[1];
841 	struct kvec rsp_iov;
842 	int resp_buf_type;
843 
844 	iov[0].iov_base = in_buf;
845 	iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
846 	flags |= CIFS_NO_RSP_BUF;
847 	rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
848 	cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
849 
850 	return rc;
851 }
852 
853 static int
cifs_sync_mid_result(struct mid_q_entry * mid,struct TCP_Server_Info * server)854 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
855 {
856 	int rc = 0;
857 
858 	cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
859 		 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
860 
861 	spin_lock(&GlobalMid_Lock);
862 	switch (mid->mid_state) {
863 	case MID_RESPONSE_RECEIVED:
864 		spin_unlock(&GlobalMid_Lock);
865 		return rc;
866 	case MID_RETRY_NEEDED:
867 		rc = -EAGAIN;
868 		break;
869 	case MID_RESPONSE_MALFORMED:
870 		rc = -EIO;
871 		break;
872 	case MID_SHUTDOWN:
873 		rc = -EHOSTDOWN;
874 		break;
875 	default:
876 		if (!(mid->mid_flags & MID_DELETED)) {
877 			list_del_init(&mid->qhead);
878 			mid->mid_flags |= MID_DELETED;
879 		}
880 		cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
881 			 __func__, mid->mid, mid->mid_state);
882 		rc = -EIO;
883 	}
884 	spin_unlock(&GlobalMid_Lock);
885 
886 	DeleteMidQEntry(mid);
887 	return rc;
888 }
889 
890 static inline int
send_cancel(struct TCP_Server_Info * server,struct smb_rqst * rqst,struct mid_q_entry * mid)891 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
892 	    struct mid_q_entry *mid)
893 {
894 	return server->ops->send_cancel ?
895 				server->ops->send_cancel(server, rqst, mid) : 0;
896 }
897 
898 int
cifs_check_receive(struct mid_q_entry * mid,struct TCP_Server_Info * server,bool log_error)899 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
900 		   bool log_error)
901 {
902 	unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
903 
904 	dump_smb(mid->resp_buf, min_t(u32, 92, len));
905 
906 	/* convert the length into a more usable form */
907 	if (server->sign) {
908 		struct kvec iov[2];
909 		int rc = 0;
910 		struct smb_rqst rqst = { .rq_iov = iov,
911 					 .rq_nvec = 2 };
912 
913 		iov[0].iov_base = mid->resp_buf;
914 		iov[0].iov_len = 4;
915 		iov[1].iov_base = (char *)mid->resp_buf + 4;
916 		iov[1].iov_len = len - 4;
917 		/* FIXME: add code to kill session */
918 		rc = cifs_verify_signature(&rqst, server,
919 					   mid->sequence_number);
920 		if (rc)
921 			cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
922 				 rc);
923 	}
924 
925 	/* BB special case reconnect tid and uid here? */
926 	return map_smb_to_linux_error(mid->resp_buf, log_error);
927 }
928 
929 struct mid_q_entry *
cifs_setup_request(struct cifs_ses * ses,struct smb_rqst * rqst)930 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
931 {
932 	int rc;
933 	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
934 	struct mid_q_entry *mid;
935 
936 	if (rqst->rq_iov[0].iov_len != 4 ||
937 	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
938 		return ERR_PTR(-EIO);
939 
940 	rc = allocate_mid(ses, hdr, &mid);
941 	if (rc)
942 		return ERR_PTR(rc);
943 	rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
944 	if (rc) {
945 		cifs_delete_mid(mid);
946 		return ERR_PTR(rc);
947 	}
948 	return mid;
949 }
950 
951 static void
cifs_compound_callback(struct mid_q_entry * mid)952 cifs_compound_callback(struct mid_q_entry *mid)
953 {
954 	struct TCP_Server_Info *server = mid->server;
955 	struct cifs_credits credits;
956 
957 	credits.value = server->ops->get_credits(mid);
958 	credits.instance = server->reconnect_instance;
959 
960 	add_credits(server, &credits, mid->optype);
961 }
962 
963 static void
cifs_compound_last_callback(struct mid_q_entry * mid)964 cifs_compound_last_callback(struct mid_q_entry *mid)
965 {
966 	cifs_compound_callback(mid);
967 	cifs_wake_up_task(mid);
968 }
969 
970 static void
cifs_cancelled_callback(struct mid_q_entry * mid)971 cifs_cancelled_callback(struct mid_q_entry *mid)
972 {
973 	cifs_compound_callback(mid);
974 	DeleteMidQEntry(mid);
975 }
976 
977 int
compound_send_recv(const unsigned int xid,struct cifs_ses * ses,const int flags,const int num_rqst,struct smb_rqst * rqst,int * resp_buf_type,struct kvec * resp_iov)978 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
979 		   const int flags, const int num_rqst, struct smb_rqst *rqst,
980 		   int *resp_buf_type, struct kvec *resp_iov)
981 {
982 	int i, j, optype, rc = 0;
983 	struct mid_q_entry *midQ[MAX_COMPOUND];
984 	bool cancelled_mid[MAX_COMPOUND] = {false};
985 	struct cifs_credits credits[MAX_COMPOUND] = {
986 		{ .value = 0, .instance = 0 }
987 	};
988 	unsigned int instance;
989 	char *buf;
990 	struct TCP_Server_Info *server;
991 
992 	optype = flags & CIFS_OP_MASK;
993 
994 	for (i = 0; i < num_rqst; i++)
995 		resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
996 
997 	if ((ses == NULL) || (ses->server == NULL)) {
998 		cifs_dbg(VFS, "Null session\n");
999 		return -EIO;
1000 	}
1001 
1002 	server = ses->server;
1003 	if (server->tcpStatus == CifsExiting)
1004 		return -ENOENT;
1005 
1006 	/*
1007 	 * Wait for all the requests to become available.
1008 	 * This approach still leaves the possibility to be stuck waiting for
1009 	 * credits if the server doesn't grant credits to the outstanding
1010 	 * requests and if the client is completely idle, not generating any
1011 	 * other requests.
1012 	 * This can be handled by the eventual session reconnect.
1013 	 */
1014 	rc = wait_for_compound_request(server, num_rqst, flags,
1015 				       &instance);
1016 	if (rc)
1017 		return rc;
1018 
1019 	for (i = 0; i < num_rqst; i++) {
1020 		credits[i].value = 1;
1021 		credits[i].instance = instance;
1022 	}
1023 
1024 	/*
1025 	 * Make sure that we sign in the same order that we send on this socket
1026 	 * and avoid races inside tcp sendmsg code that could cause corruption
1027 	 * of smb data.
1028 	 */
1029 
1030 	mutex_lock(&server->srv_mutex);
1031 
1032 	/*
1033 	 * All the parts of the compound chain belong obtained credits from the
1034 	 * same session. We can not use credits obtained from the previous
1035 	 * session to send this request. Check if there were reconnects after
1036 	 * we obtained credits and return -EAGAIN in such cases to let callers
1037 	 * handle it.
1038 	 */
1039 	if (instance != server->reconnect_instance) {
1040 		mutex_unlock(&server->srv_mutex);
1041 		for (j = 0; j < num_rqst; j++)
1042 			add_credits(server, &credits[j], optype);
1043 		return -EAGAIN;
1044 	}
1045 
1046 	for (i = 0; i < num_rqst; i++) {
1047 		midQ[i] = server->ops->setup_request(ses, &rqst[i]);
1048 		if (IS_ERR(midQ[i])) {
1049 			revert_current_mid(server, i);
1050 			for (j = 0; j < i; j++)
1051 				cifs_delete_mid(midQ[j]);
1052 			mutex_unlock(&server->srv_mutex);
1053 
1054 			/* Update # of requests on wire to server */
1055 			for (j = 0; j < num_rqst; j++)
1056 				add_credits(server, &credits[j], optype);
1057 			return PTR_ERR(midQ[i]);
1058 		}
1059 
1060 		midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1061 		midQ[i]->optype = optype;
1062 		/*
1063 		 * Invoke callback for every part of the compound chain
1064 		 * to calculate credits properly. Wake up this thread only when
1065 		 * the last element is received.
1066 		 */
1067 		if (i < num_rqst - 1)
1068 			midQ[i]->callback = cifs_compound_callback;
1069 		else
1070 			midQ[i]->callback = cifs_compound_last_callback;
1071 	}
1072 	cifs_in_send_inc(server);
1073 	rc = smb_send_rqst(server, num_rqst, rqst, flags);
1074 	cifs_in_send_dec(server);
1075 
1076 	for (i = 0; i < num_rqst; i++)
1077 		cifs_save_when_sent(midQ[i]);
1078 
1079 	if (rc < 0) {
1080 		revert_current_mid(server, num_rqst);
1081 		server->sequence_number -= 2;
1082 	}
1083 
1084 	mutex_unlock(&server->srv_mutex);
1085 
1086 	/*
1087 	 * If sending failed for some reason or it is an oplock break that we
1088 	 * will not receive a response to - return credits back
1089 	 */
1090 	if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1091 		for (i = 0; i < num_rqst; i++)
1092 			add_credits(server, &credits[i], optype);
1093 		goto out;
1094 	}
1095 
1096 	/*
1097 	 * At this point the request is passed to the network stack - we assume
1098 	 * that any credits taken from the server structure on the client have
1099 	 * been spent and we can't return them back. Once we receive responses
1100 	 * we will collect credits granted by the server in the mid callbacks
1101 	 * and add those credits to the server structure.
1102 	 */
1103 
1104 	/*
1105 	 * Compounding is never used during session establish.
1106 	 */
1107 	if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1108 		smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1109 					   rqst[0].rq_nvec);
1110 
1111 	for (i = 0; i < num_rqst; i++) {
1112 		rc = wait_for_response(server, midQ[i]);
1113 		if (rc != 0)
1114 			break;
1115 	}
1116 	if (rc != 0) {
1117 		for (; i < num_rqst; i++) {
1118 			cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1119 				 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1120 			send_cancel(server, &rqst[i], midQ[i]);
1121 			spin_lock(&GlobalMid_Lock);
1122 			if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1123 				midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1124 				midQ[i]->callback = cifs_cancelled_callback;
1125 				cancelled_mid[i] = true;
1126 				credits[i].value = 0;
1127 			}
1128 			spin_unlock(&GlobalMid_Lock);
1129 		}
1130 	}
1131 
1132 	for (i = 0; i < num_rqst; i++) {
1133 		if (rc < 0)
1134 			goto out;
1135 
1136 		rc = cifs_sync_mid_result(midQ[i], server);
1137 		if (rc != 0) {
1138 			/* mark this mid as cancelled to not free it below */
1139 			cancelled_mid[i] = true;
1140 			goto out;
1141 		}
1142 
1143 		if (!midQ[i]->resp_buf ||
1144 		    midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1145 			rc = -EIO;
1146 			cifs_dbg(FYI, "Bad MID state?\n");
1147 			goto out;
1148 		}
1149 
1150 		buf = (char *)midQ[i]->resp_buf;
1151 		resp_iov[i].iov_base = buf;
1152 		resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1153 			server->vals->header_preamble_size;
1154 
1155 		if (midQ[i]->large_buf)
1156 			resp_buf_type[i] = CIFS_LARGE_BUFFER;
1157 		else
1158 			resp_buf_type[i] = CIFS_SMALL_BUFFER;
1159 
1160 		rc = server->ops->check_receive(midQ[i], server,
1161 						     flags & CIFS_LOG_ERROR);
1162 
1163 		/* mark it so buf will not be freed by cifs_delete_mid */
1164 		if ((flags & CIFS_NO_RSP_BUF) == 0)
1165 			midQ[i]->resp_buf = NULL;
1166 
1167 	}
1168 
1169 	/*
1170 	 * Compounding is never used during session establish.
1171 	 */
1172 	if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1173 		struct kvec iov = {
1174 			.iov_base = resp_iov[0].iov_base,
1175 			.iov_len = resp_iov[0].iov_len
1176 		};
1177 		smb311_update_preauth_hash(ses, &iov, 1);
1178 	}
1179 
1180 out:
1181 	/*
1182 	 * This will dequeue all mids. After this it is important that the
1183 	 * demultiplex_thread will not process any of these mids any futher.
1184 	 * This is prevented above by using a noop callback that will not
1185 	 * wake this thread except for the very last PDU.
1186 	 */
1187 	for (i = 0; i < num_rqst; i++) {
1188 		if (!cancelled_mid[i])
1189 			cifs_delete_mid(midQ[i]);
1190 	}
1191 
1192 	return rc;
1193 }
1194 
1195 int
cifs_send_recv(const unsigned int xid,struct cifs_ses * ses,struct smb_rqst * rqst,int * resp_buf_type,const int flags,struct kvec * resp_iov)1196 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1197 	       struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1198 	       struct kvec *resp_iov)
1199 {
1200 	return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1201 				  resp_iov);
1202 }
1203 
1204 int
SendReceive2(const unsigned int xid,struct cifs_ses * ses,struct kvec * iov,int n_vec,int * resp_buf_type,const int flags,struct kvec * resp_iov)1205 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1206 	     struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1207 	     const int flags, struct kvec *resp_iov)
1208 {
1209 	struct smb_rqst rqst;
1210 	struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1211 	int rc;
1212 
1213 	if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1214 		new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1215 					GFP_KERNEL);
1216 		if (!new_iov) {
1217 			/* otherwise cifs_send_recv below sets resp_buf_type */
1218 			*resp_buf_type = CIFS_NO_BUFFER;
1219 			return -ENOMEM;
1220 		}
1221 	} else
1222 		new_iov = s_iov;
1223 
1224 	/* 1st iov is a RFC1001 length followed by the rest of the packet */
1225 	memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1226 
1227 	new_iov[0].iov_base = new_iov[1].iov_base;
1228 	new_iov[0].iov_len = 4;
1229 	new_iov[1].iov_base += 4;
1230 	new_iov[1].iov_len -= 4;
1231 
1232 	memset(&rqst, 0, sizeof(struct smb_rqst));
1233 	rqst.rq_iov = new_iov;
1234 	rqst.rq_nvec = n_vec + 1;
1235 
1236 	rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
1237 	if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1238 		kfree(new_iov);
1239 	return rc;
1240 }
1241 
1242 int
SendReceive(const unsigned int xid,struct cifs_ses * ses,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned,const int flags)1243 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1244 	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1245 	    int *pbytes_returned, const int flags)
1246 {
1247 	int rc = 0;
1248 	struct mid_q_entry *midQ;
1249 	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1250 	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1251 	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1252 	struct cifs_credits credits = { .value = 1, .instance = 0 };
1253 	struct TCP_Server_Info *server;
1254 
1255 	if (ses == NULL) {
1256 		cifs_dbg(VFS, "Null smb session\n");
1257 		return -EIO;
1258 	}
1259 	server = ses->server;
1260 	if (server == NULL) {
1261 		cifs_dbg(VFS, "Null tcp session\n");
1262 		return -EIO;
1263 	}
1264 
1265 	if (server->tcpStatus == CifsExiting)
1266 		return -ENOENT;
1267 
1268 	/* Ensure that we do not send more than 50 overlapping requests
1269 	   to the same server. We may make this configurable later or
1270 	   use ses->maxReq */
1271 
1272 	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1273 		cifs_server_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1274 			 len);
1275 		return -EIO;
1276 	}
1277 
1278 	rc = wait_for_free_request(server, flags, &credits.instance);
1279 	if (rc)
1280 		return rc;
1281 
1282 	/* make sure that we sign in the same order that we send on this socket
1283 	   and avoid races inside tcp sendmsg code that could cause corruption
1284 	   of smb data */
1285 
1286 	mutex_lock(&server->srv_mutex);
1287 
1288 	rc = allocate_mid(ses, in_buf, &midQ);
1289 	if (rc) {
1290 		mutex_unlock(&ses->server->srv_mutex);
1291 		/* Update # of requests on wire to server */
1292 		add_credits(server, &credits, 0);
1293 		return rc;
1294 	}
1295 
1296 	rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1297 	if (rc) {
1298 		mutex_unlock(&server->srv_mutex);
1299 		goto out;
1300 	}
1301 
1302 	midQ->mid_state = MID_REQUEST_SUBMITTED;
1303 
1304 	cifs_in_send_inc(server);
1305 	rc = smb_send(server, in_buf, len);
1306 	cifs_in_send_dec(server);
1307 	cifs_save_when_sent(midQ);
1308 
1309 	if (rc < 0)
1310 		server->sequence_number -= 2;
1311 
1312 	mutex_unlock(&server->srv_mutex);
1313 
1314 	if (rc < 0)
1315 		goto out;
1316 
1317 	rc = wait_for_response(server, midQ);
1318 	if (rc != 0) {
1319 		send_cancel(server, &rqst, midQ);
1320 		spin_lock(&GlobalMid_Lock);
1321 		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1322 			/* no longer considered to be "in-flight" */
1323 			midQ->callback = DeleteMidQEntry;
1324 			spin_unlock(&GlobalMid_Lock);
1325 			add_credits(server, &credits, 0);
1326 			return rc;
1327 		}
1328 		spin_unlock(&GlobalMid_Lock);
1329 	}
1330 
1331 	rc = cifs_sync_mid_result(midQ, server);
1332 	if (rc != 0) {
1333 		add_credits(server, &credits, 0);
1334 		return rc;
1335 	}
1336 
1337 	if (!midQ->resp_buf || !out_buf ||
1338 	    midQ->mid_state != MID_RESPONSE_RECEIVED) {
1339 		rc = -EIO;
1340 		cifs_server_dbg(VFS, "Bad MID state?\n");
1341 		goto out;
1342 	}
1343 
1344 	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1345 	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1346 	rc = cifs_check_receive(midQ, server, 0);
1347 out:
1348 	cifs_delete_mid(midQ);
1349 	add_credits(server, &credits, 0);
1350 
1351 	return rc;
1352 }
1353 
1354 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1355    blocking lock to return. */
1356 
1357 static int
send_lock_cancel(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf)1358 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1359 			struct smb_hdr *in_buf,
1360 			struct smb_hdr *out_buf)
1361 {
1362 	int bytes_returned;
1363 	struct cifs_ses *ses = tcon->ses;
1364 	LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1365 
1366 	/* We just modify the current in_buf to change
1367 	   the type of lock from LOCKING_ANDX_SHARED_LOCK
1368 	   or LOCKING_ANDX_EXCLUSIVE_LOCK to
1369 	   LOCKING_ANDX_CANCEL_LOCK. */
1370 
1371 	pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1372 	pSMB->Timeout = 0;
1373 	pSMB->hdr.Mid = get_next_mid(ses->server);
1374 
1375 	return SendReceive(xid, ses, in_buf, out_buf,
1376 			&bytes_returned, 0);
1377 }
1378 
1379 int
SendReceiveBlockingLock(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned)1380 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1381 	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1382 	    int *pbytes_returned)
1383 {
1384 	int rc = 0;
1385 	int rstart = 0;
1386 	struct mid_q_entry *midQ;
1387 	struct cifs_ses *ses;
1388 	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1389 	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1390 	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1391 	unsigned int instance;
1392 	struct TCP_Server_Info *server;
1393 
1394 	if (tcon == NULL || tcon->ses == NULL) {
1395 		cifs_dbg(VFS, "Null smb session\n");
1396 		return -EIO;
1397 	}
1398 	ses = tcon->ses;
1399 	server = ses->server;
1400 
1401 	if (server == NULL) {
1402 		cifs_dbg(VFS, "Null tcp session\n");
1403 		return -EIO;
1404 	}
1405 
1406 	if (server->tcpStatus == CifsExiting)
1407 		return -ENOENT;
1408 
1409 	/* Ensure that we do not send more than 50 overlapping requests
1410 	   to the same server. We may make this configurable later or
1411 	   use ses->maxReq */
1412 
1413 	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1414 		cifs_tcon_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1415 			 len);
1416 		return -EIO;
1417 	}
1418 
1419 	rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1420 	if (rc)
1421 		return rc;
1422 
1423 	/* make sure that we sign in the same order that we send on this socket
1424 	   and avoid races inside tcp sendmsg code that could cause corruption
1425 	   of smb data */
1426 
1427 	mutex_lock(&server->srv_mutex);
1428 
1429 	rc = allocate_mid(ses, in_buf, &midQ);
1430 	if (rc) {
1431 		mutex_unlock(&server->srv_mutex);
1432 		return rc;
1433 	}
1434 
1435 	rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1436 	if (rc) {
1437 		cifs_delete_mid(midQ);
1438 		mutex_unlock(&server->srv_mutex);
1439 		return rc;
1440 	}
1441 
1442 	midQ->mid_state = MID_REQUEST_SUBMITTED;
1443 	cifs_in_send_inc(server);
1444 	rc = smb_send(server, in_buf, len);
1445 	cifs_in_send_dec(server);
1446 	cifs_save_when_sent(midQ);
1447 
1448 	if (rc < 0)
1449 		server->sequence_number -= 2;
1450 
1451 	mutex_unlock(&server->srv_mutex);
1452 
1453 	if (rc < 0) {
1454 		cifs_delete_mid(midQ);
1455 		return rc;
1456 	}
1457 
1458 	/* Wait for a reply - allow signals to interrupt. */
1459 	rc = wait_event_interruptible(server->response_q,
1460 		(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1461 		((server->tcpStatus != CifsGood) &&
1462 		 (server->tcpStatus != CifsNew)));
1463 
1464 	/* Were we interrupted by a signal ? */
1465 	if ((rc == -ERESTARTSYS) &&
1466 		(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1467 		((server->tcpStatus == CifsGood) ||
1468 		 (server->tcpStatus == CifsNew))) {
1469 
1470 		if (in_buf->Command == SMB_COM_TRANSACTION2) {
1471 			/* POSIX lock. We send a NT_CANCEL SMB to cause the
1472 			   blocking lock to return. */
1473 			rc = send_cancel(server, &rqst, midQ);
1474 			if (rc) {
1475 				cifs_delete_mid(midQ);
1476 				return rc;
1477 			}
1478 		} else {
1479 			/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1480 			   to cause the blocking lock to return. */
1481 
1482 			rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1483 
1484 			/* If we get -ENOLCK back the lock may have
1485 			   already been removed. Don't exit in this case. */
1486 			if (rc && rc != -ENOLCK) {
1487 				cifs_delete_mid(midQ);
1488 				return rc;
1489 			}
1490 		}
1491 
1492 		rc = wait_for_response(server, midQ);
1493 		if (rc) {
1494 			send_cancel(server, &rqst, midQ);
1495 			spin_lock(&GlobalMid_Lock);
1496 			if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1497 				/* no longer considered to be "in-flight" */
1498 				midQ->callback = DeleteMidQEntry;
1499 				spin_unlock(&GlobalMid_Lock);
1500 				return rc;
1501 			}
1502 			spin_unlock(&GlobalMid_Lock);
1503 		}
1504 
1505 		/* We got the response - restart system call. */
1506 		rstart = 1;
1507 	}
1508 
1509 	rc = cifs_sync_mid_result(midQ, server);
1510 	if (rc != 0)
1511 		return rc;
1512 
1513 	/* rcvd frame is ok */
1514 	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1515 		rc = -EIO;
1516 		cifs_tcon_dbg(VFS, "Bad MID state?\n");
1517 		goto out;
1518 	}
1519 
1520 	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1521 	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1522 	rc = cifs_check_receive(midQ, server, 0);
1523 out:
1524 	cifs_delete_mid(midQ);
1525 	if (rstart && rc == -EACCES)
1526 		return -ERESTARTSYS;
1527 	return rc;
1528 }
1529