1 /*
2  *   fs/cifs/transport.c
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  *   This library is free software; you can redistribute it and/or modify
9  *   it under the terms of the GNU Lesser General Public License as published
10  *   by the Free Software Foundation; either version 2.1 of the License, or
11  *   (at your option) any later version.
12  *
13  *   This library is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
16  *   the GNU Lesser General Public License for more details.
17  *
18  *   You should have received a copy of the GNU Lesser General Public License
19  *   along with this library; if not, write to the Free Software
20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  */
22 
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include "cifspdu.h"
37 #include "cifsglob.h"
38 #include "cifsproto.h"
39 #include "cifs_debug.h"
40 #include "smb2proto.h"
41 #include "smbdirect.h"
42 
43 /* Max number of iovectors we can use off the stack when sending requests. */
44 #define CIFS_MAX_IOV_SIZE 8
45 
46 void
cifs_wake_up_task(struct mid_q_entry * mid)47 cifs_wake_up_task(struct mid_q_entry *mid)
48 {
49 	wake_up_process(mid->callback_data);
50 }
51 
52 struct mid_q_entry *
AllocMidQEntry(const struct smb_hdr * smb_buffer,struct TCP_Server_Info * server)53 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
54 {
55 	struct mid_q_entry *temp;
56 
57 	if (server == NULL) {
58 		cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
59 		return NULL;
60 	}
61 
62 	temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
63 	memset(temp, 0, sizeof(struct mid_q_entry));
64 	kref_init(&temp->refcount);
65 	temp->mid = get_mid(smb_buffer);
66 	temp->pid = current->pid;
67 	temp->command = cpu_to_le16(smb_buffer->Command);
68 	cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
69 	/*	do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
70 	/* when mid allocated can be before when sent */
71 	temp->when_alloc = jiffies;
72 	temp->server = server;
73 
74 	/*
75 	 * The default is for the mid to be synchronous, so the
76 	 * default callback just wakes up the current task.
77 	 */
78 	temp->callback = cifs_wake_up_task;
79 	temp->callback_data = current;
80 
81 	atomic_inc(&midCount);
82 	temp->mid_state = MID_REQUEST_ALLOCATED;
83 	return temp;
84 }
85 
_cifs_mid_q_entry_release(struct kref * refcount)86 static void _cifs_mid_q_entry_release(struct kref *refcount)
87 {
88 	struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
89 					       refcount);
90 
91 	mempool_free(mid, cifs_mid_poolp);
92 }
93 
cifs_mid_q_entry_release(struct mid_q_entry * midEntry)94 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
95 {
96 	spin_lock(&GlobalMid_Lock);
97 	kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
98 	spin_unlock(&GlobalMid_Lock);
99 }
100 
101 void
DeleteMidQEntry(struct mid_q_entry * midEntry)102 DeleteMidQEntry(struct mid_q_entry *midEntry)
103 {
104 #ifdef CONFIG_CIFS_STATS2
105 	__le16 command = midEntry->server->vals->lock_cmd;
106 	unsigned long now;
107 #endif
108 	midEntry->mid_state = MID_FREE;
109 	atomic_dec(&midCount);
110 	if (midEntry->large_buf)
111 		cifs_buf_release(midEntry->resp_buf);
112 	else
113 		cifs_small_buf_release(midEntry->resp_buf);
114 #ifdef CONFIG_CIFS_STATS2
115 	now = jiffies;
116 	/* commands taking longer than one second are indications that
117 	   something is wrong, unless it is quite a slow link or server */
118 	if (time_after(now, midEntry->when_alloc + HZ) &&
119 	    (midEntry->command != command)) {
120 		/* smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command */
121 		if ((le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS) &&
122 		    (le16_to_cpu(midEntry->command) >= 0))
123 			cifs_stats_inc(&midEntry->server->smb2slowcmd[le16_to_cpu(midEntry->command)]);
124 
125 		trace_smb3_slow_rsp(le16_to_cpu(midEntry->command),
126 			       midEntry->mid, midEntry->pid,
127 			       midEntry->when_sent, midEntry->when_received);
128 		if (cifsFYI & CIFS_TIMER) {
129 			pr_debug(" CIFS slow rsp: cmd %d mid %llu",
130 			       midEntry->command, midEntry->mid);
131 			pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
132 			       now - midEntry->when_alloc,
133 			       now - midEntry->when_sent,
134 			       now - midEntry->when_received);
135 		}
136 	}
137 #endif
138 	cifs_mid_q_entry_release(midEntry);
139 }
140 
141 void
cifs_delete_mid(struct mid_q_entry * mid)142 cifs_delete_mid(struct mid_q_entry *mid)
143 {
144 	spin_lock(&GlobalMid_Lock);
145 	list_del_init(&mid->qhead);
146 	mid->mid_flags |= MID_DELETED;
147 	spin_unlock(&GlobalMid_Lock);
148 
149 	DeleteMidQEntry(mid);
150 }
151 
152 /*
153  * smb_send_kvec - send an array of kvecs to the server
154  * @server:	Server to send the data to
155  * @smb_msg:	Message to send
156  * @sent:	amount of data sent on socket is stored here
157  *
158  * Our basic "send data to server" function. Should be called with srv_mutex
159  * held. The caller is responsible for handling the results.
160  */
161 static int
smb_send_kvec(struct TCP_Server_Info * server,struct msghdr * smb_msg,size_t * sent)162 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
163 	      size_t *sent)
164 {
165 	int rc = 0;
166 	int retries = 0;
167 	struct socket *ssocket = server->ssocket;
168 
169 	*sent = 0;
170 
171 	smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
172 	smb_msg->msg_namelen = sizeof(struct sockaddr);
173 	smb_msg->msg_control = NULL;
174 	smb_msg->msg_controllen = 0;
175 	if (server->noblocksnd)
176 		smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
177 	else
178 		smb_msg->msg_flags = MSG_NOSIGNAL;
179 
180 	while (msg_data_left(smb_msg)) {
181 		/*
182 		 * If blocking send, we try 3 times, since each can block
183 		 * for 5 seconds. For nonblocking  we have to try more
184 		 * but wait increasing amounts of time allowing time for
185 		 * socket to clear.  The overall time we wait in either
186 		 * case to send on the socket is about 15 seconds.
187 		 * Similarly we wait for 15 seconds for a response from
188 		 * the server in SendReceive[2] for the server to send
189 		 * a response back for most types of requests (except
190 		 * SMB Write past end of file which can be slow, and
191 		 * blocking lock operations). NFS waits slightly longer
192 		 * than CIFS, but this can make it take longer for
193 		 * nonresponsive servers to be detected and 15 seconds
194 		 * is more than enough time for modern networks to
195 		 * send a packet.  In most cases if we fail to send
196 		 * after the retries we will kill the socket and
197 		 * reconnect which may clear the network problem.
198 		 */
199 		rc = sock_sendmsg(ssocket, smb_msg);
200 		if (rc == -EAGAIN) {
201 			retries++;
202 			if (retries >= 14 ||
203 			    (!server->noblocksnd && (retries > 2))) {
204 				cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
205 					 ssocket);
206 				return -EAGAIN;
207 			}
208 			msleep(1 << retries);
209 			continue;
210 		}
211 
212 		if (rc < 0)
213 			return rc;
214 
215 		if (rc == 0) {
216 			/* should never happen, letting socket clear before
217 			   retrying is our only obvious option here */
218 			cifs_dbg(VFS, "tcp sent no data\n");
219 			msleep(500);
220 			continue;
221 		}
222 
223 		/* send was at least partially successful */
224 		*sent += rc;
225 		retries = 0; /* in case we get ENOSPC on the next send */
226 	}
227 	return 0;
228 }
229 
230 unsigned long
smb_rqst_len(struct TCP_Server_Info * server,struct smb_rqst * rqst)231 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
232 {
233 	unsigned int i;
234 	struct kvec *iov;
235 	int nvec;
236 	unsigned long buflen = 0;
237 
238 	if (server->vals->header_preamble_size == 0 &&
239 	    rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
240 		iov = &rqst->rq_iov[1];
241 		nvec = rqst->rq_nvec - 1;
242 	} else {
243 		iov = rqst->rq_iov;
244 		nvec = rqst->rq_nvec;
245 	}
246 
247 	/* total up iov array first */
248 	for (i = 0; i < nvec; i++)
249 		buflen += iov[i].iov_len;
250 
251 	/*
252 	 * Add in the page array if there is one. The caller needs to make
253 	 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
254 	 * multiple pages ends at page boundary, rq_tailsz needs to be set to
255 	 * PAGE_SIZE.
256 	 */
257 	if (rqst->rq_npages) {
258 		if (rqst->rq_npages == 1)
259 			buflen += rqst->rq_tailsz;
260 		else {
261 			/*
262 			 * If there is more than one page, calculate the
263 			 * buffer length based on rq_offset and rq_tailsz
264 			 */
265 			buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
266 					rqst->rq_offset;
267 			buflen += rqst->rq_tailsz;
268 		}
269 	}
270 
271 	return buflen;
272 }
273 
274 static int
__smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst)275 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
276 		struct smb_rqst *rqst)
277 {
278 	int rc = 0;
279 	struct kvec *iov;
280 	int n_vec;
281 	unsigned int send_length = 0;
282 	unsigned int i, j;
283 	size_t total_len = 0, sent, size;
284 	struct socket *ssocket = server->ssocket;
285 	struct msghdr smb_msg;
286 	int val = 1;
287 	__be32 rfc1002_marker;
288 
289 	if (cifs_rdma_enabled(server) && server->smbd_conn) {
290 		rc = smbd_send(server, rqst);
291 		goto smbd_done;
292 	}
293 	if (ssocket == NULL)
294 		return -ENOTSOCK;
295 
296 	/* cork the socket */
297 	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
298 				(char *)&val, sizeof(val));
299 
300 	for (j = 0; j < num_rqst; j++)
301 		send_length += smb_rqst_len(server, &rqst[j]);
302 	rfc1002_marker = cpu_to_be32(send_length);
303 
304 	/* Generate a rfc1002 marker for SMB2+ */
305 	if (server->vals->header_preamble_size == 0) {
306 		struct kvec hiov = {
307 			.iov_base = &rfc1002_marker,
308 			.iov_len  = 4
309 		};
310 		iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, &hiov,
311 			      1, 4);
312 		rc = smb_send_kvec(server, &smb_msg, &sent);
313 		if (rc < 0)
314 			goto uncork;
315 
316 		total_len += sent;
317 		send_length += 4;
318 	}
319 
320 	cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
321 
322 	for (j = 0; j < num_rqst; j++) {
323 		iov = rqst[j].rq_iov;
324 		n_vec = rqst[j].rq_nvec;
325 
326 		size = 0;
327 		for (i = 0; i < n_vec; i++) {
328 			dump_smb(iov[i].iov_base, iov[i].iov_len);
329 			size += iov[i].iov_len;
330 		}
331 
332 		iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC,
333 			      iov, n_vec, size);
334 
335 		rc = smb_send_kvec(server, &smb_msg, &sent);
336 		if (rc < 0)
337 			goto uncork;
338 
339 		total_len += sent;
340 
341 		/* now walk the page array and send each page in it */
342 		for (i = 0; i < rqst[j].rq_npages; i++) {
343 			struct bio_vec bvec;
344 
345 			bvec.bv_page = rqst[j].rq_pages[i];
346 			rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
347 					     &bvec.bv_offset);
348 
349 			iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
350 				      &bvec, 1, bvec.bv_len);
351 			rc = smb_send_kvec(server, &smb_msg, &sent);
352 			if (rc < 0)
353 				break;
354 
355 			total_len += sent;
356 		}
357 	}
358 
359 uncork:
360 	/* uncork it */
361 	val = 0;
362 	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
363 				(char *)&val, sizeof(val));
364 
365 	if ((total_len > 0) && (total_len != send_length)) {
366 		cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
367 			 send_length, total_len);
368 		/*
369 		 * If we have only sent part of an SMB then the next SMB could
370 		 * be taken as the remainder of this one. We need to kill the
371 		 * socket so the server throws away the partial SMB
372 		 */
373 		server->tcpStatus = CifsNeedReconnect;
374 		trace_smb3_partial_send_reconnect(server->CurrentMid,
375 						  server->hostname);
376 	}
377 smbd_done:
378 	if (rc < 0 && rc != -EINTR)
379 		cifs_dbg(VFS, "Error %d sending data on socket to server\n",
380 			 rc);
381 	else
382 		rc = 0;
383 
384 	return rc;
385 }
386 
387 static int
smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst,int flags)388 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
389 	      struct smb_rqst *rqst, int flags)
390 {
391 	struct kvec iov;
392 	struct smb2_transform_hdr tr_hdr;
393 	struct smb_rqst cur_rqst[MAX_COMPOUND];
394 	int rc;
395 
396 	if (!(flags & CIFS_TRANSFORM_REQ))
397 		return __smb_send_rqst(server, num_rqst, rqst);
398 
399 	if (num_rqst > MAX_COMPOUND - 1)
400 		return -ENOMEM;
401 
402 	memset(&cur_rqst[0], 0, sizeof(cur_rqst));
403 	memset(&iov, 0, sizeof(iov));
404 	memset(&tr_hdr, 0, sizeof(tr_hdr));
405 
406 	iov.iov_base = &tr_hdr;
407 	iov.iov_len = sizeof(tr_hdr);
408 	cur_rqst[0].rq_iov = &iov;
409 	cur_rqst[0].rq_nvec = 1;
410 
411 	if (!server->ops->init_transform_rq) {
412 		cifs_dbg(VFS, "Encryption requested but transform callback "
413 			 "is missing\n");
414 		return -EIO;
415 	}
416 
417 	rc = server->ops->init_transform_rq(server, num_rqst + 1,
418 					    &cur_rqst[0], rqst);
419 	if (rc)
420 		return rc;
421 
422 	rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
423 	smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
424 	return rc;
425 }
426 
427 int
smb_send(struct TCP_Server_Info * server,struct smb_hdr * smb_buffer,unsigned int smb_buf_length)428 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
429 	 unsigned int smb_buf_length)
430 {
431 	struct kvec iov[2];
432 	struct smb_rqst rqst = { .rq_iov = iov,
433 				 .rq_nvec = 2 };
434 
435 	iov[0].iov_base = smb_buffer;
436 	iov[0].iov_len = 4;
437 	iov[1].iov_base = (char *)smb_buffer + 4;
438 	iov[1].iov_len = smb_buf_length;
439 
440 	return __smb_send_rqst(server, 1, &rqst);
441 }
442 
443 static int
wait_for_free_credits(struct TCP_Server_Info * server,const int timeout,int * credits)444 wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
445 		      int *credits)
446 {
447 	int rc;
448 
449 	spin_lock(&server->req_lock);
450 	if (timeout == CIFS_ASYNC_OP) {
451 		/* oplock breaks must not be held up */
452 		server->in_flight++;
453 		*credits -= 1;
454 		spin_unlock(&server->req_lock);
455 		return 0;
456 	}
457 
458 	while (1) {
459 		if (*credits <= 0) {
460 			spin_unlock(&server->req_lock);
461 			cifs_num_waiters_inc(server);
462 			rc = wait_event_killable(server->request_q,
463 						 has_credits(server, credits));
464 			cifs_num_waiters_dec(server);
465 			if (rc)
466 				return rc;
467 			spin_lock(&server->req_lock);
468 		} else {
469 			if (server->tcpStatus == CifsExiting) {
470 				spin_unlock(&server->req_lock);
471 				return -ENOENT;
472 			}
473 
474 			/*
475 			 * Can not count locking commands against total
476 			 * as they are allowed to block on server.
477 			 */
478 
479 			/* update # of requests on the wire to server */
480 			if (timeout != CIFS_BLOCKING_OP) {
481 				*credits -= 1;
482 				server->in_flight++;
483 			}
484 			spin_unlock(&server->req_lock);
485 			break;
486 		}
487 	}
488 	return 0;
489 }
490 
491 static int
wait_for_free_request(struct TCP_Server_Info * server,const int timeout,const int optype)492 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
493 		      const int optype)
494 {
495 	int *val;
496 
497 	val = server->ops->get_credits_field(server, optype);
498 	/* Since an echo is already inflight, no need to wait to send another */
499 	if (*val <= 0 && optype == CIFS_ECHO_OP)
500 		return -EAGAIN;
501 	return wait_for_free_credits(server, timeout, val);
502 }
503 
504 int
cifs_wait_mtu_credits(struct TCP_Server_Info * server,unsigned int size,unsigned int * num,unsigned int * credits)505 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
506 		      unsigned int *num, unsigned int *credits)
507 {
508 	*num = size;
509 	*credits = 0;
510 	return 0;
511 }
512 
allocate_mid(struct cifs_ses * ses,struct smb_hdr * in_buf,struct mid_q_entry ** ppmidQ)513 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
514 			struct mid_q_entry **ppmidQ)
515 {
516 	if (ses->server->tcpStatus == CifsExiting) {
517 		return -ENOENT;
518 	}
519 
520 	if (ses->server->tcpStatus == CifsNeedReconnect) {
521 		cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
522 		return -EAGAIN;
523 	}
524 
525 	if (ses->status == CifsNew) {
526 		if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
527 			(in_buf->Command != SMB_COM_NEGOTIATE))
528 			return -EAGAIN;
529 		/* else ok - we are setting up session */
530 	}
531 
532 	if (ses->status == CifsExiting) {
533 		/* check if SMB session is bad because we are setting it up */
534 		if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
535 			return -EAGAIN;
536 		/* else ok - we are shutting down session */
537 	}
538 
539 	*ppmidQ = AllocMidQEntry(in_buf, ses->server);
540 	if (*ppmidQ == NULL)
541 		return -ENOMEM;
542 	spin_lock(&GlobalMid_Lock);
543 	list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
544 	spin_unlock(&GlobalMid_Lock);
545 	return 0;
546 }
547 
548 static int
wait_for_response(struct TCP_Server_Info * server,struct mid_q_entry * midQ)549 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
550 {
551 	int error;
552 
553 	error = wait_event_freezekillable_unsafe(server->response_q,
554 				    midQ->mid_state != MID_REQUEST_SUBMITTED);
555 	if (error < 0)
556 		return -ERESTARTSYS;
557 
558 	return 0;
559 }
560 
561 struct mid_q_entry *
cifs_setup_async_request(struct TCP_Server_Info * server,struct smb_rqst * rqst)562 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
563 {
564 	int rc;
565 	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
566 	struct mid_q_entry *mid;
567 
568 	if (rqst->rq_iov[0].iov_len != 4 ||
569 	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
570 		return ERR_PTR(-EIO);
571 
572 	/* enable signing if server requires it */
573 	if (server->sign)
574 		hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
575 
576 	mid = AllocMidQEntry(hdr, server);
577 	if (mid == NULL)
578 		return ERR_PTR(-ENOMEM);
579 
580 	rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
581 	if (rc) {
582 		DeleteMidQEntry(mid);
583 		return ERR_PTR(rc);
584 	}
585 
586 	return mid;
587 }
588 
589 /*
590  * Send a SMB request and set the callback function in the mid to handle
591  * the result. Caller is responsible for dealing with timeouts.
592  */
593 int
cifs_call_async(struct TCP_Server_Info * server,struct smb_rqst * rqst,mid_receive_t * receive,mid_callback_t * callback,mid_handle_t * handle,void * cbdata,const int flags)594 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
595 		mid_receive_t *receive, mid_callback_t *callback,
596 		mid_handle_t *handle, void *cbdata, const int flags)
597 {
598 	int rc, timeout, optype;
599 	struct mid_q_entry *mid;
600 	unsigned int credits = 0;
601 
602 	timeout = flags & CIFS_TIMEOUT_MASK;
603 	optype = flags & CIFS_OP_MASK;
604 
605 	if ((flags & CIFS_HAS_CREDITS) == 0) {
606 		rc = wait_for_free_request(server, timeout, optype);
607 		if (rc)
608 			return rc;
609 		credits = 1;
610 	}
611 
612 	mutex_lock(&server->srv_mutex);
613 	mid = server->ops->setup_async_request(server, rqst);
614 	if (IS_ERR(mid)) {
615 		mutex_unlock(&server->srv_mutex);
616 		add_credits_and_wake_if(server, credits, optype);
617 		return PTR_ERR(mid);
618 	}
619 
620 	mid->receive = receive;
621 	mid->callback = callback;
622 	mid->callback_data = cbdata;
623 	mid->handle = handle;
624 	mid->mid_state = MID_REQUEST_SUBMITTED;
625 
626 	/* put it on the pending_mid_q */
627 	spin_lock(&GlobalMid_Lock);
628 	list_add_tail(&mid->qhead, &server->pending_mid_q);
629 	spin_unlock(&GlobalMid_Lock);
630 
631 	/*
632 	 * Need to store the time in mid before calling I/O. For call_async,
633 	 * I/O response may come back and free the mid entry on another thread.
634 	 */
635 	cifs_save_when_sent(mid);
636 	cifs_in_send_inc(server);
637 	rc = smb_send_rqst(server, 1, rqst, flags);
638 	cifs_in_send_dec(server);
639 
640 	if (rc < 0) {
641 		server->sequence_number -= 2;
642 		cifs_delete_mid(mid);
643 	}
644 
645 	mutex_unlock(&server->srv_mutex);
646 
647 	if (rc == 0)
648 		return 0;
649 
650 	add_credits_and_wake_if(server, credits, optype);
651 	return rc;
652 }
653 
654 /*
655  *
656  * Send an SMB Request.  No response info (other than return code)
657  * needs to be parsed.
658  *
659  * flags indicate the type of request buffer and how long to wait
660  * and whether to log NT STATUS code (error) before mapping it to POSIX error
661  *
662  */
663 int
SendReceiveNoRsp(const unsigned int xid,struct cifs_ses * ses,char * in_buf,int flags)664 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
665 		 char *in_buf, int flags)
666 {
667 	int rc;
668 	struct kvec iov[1];
669 	struct kvec rsp_iov;
670 	int resp_buf_type;
671 
672 	iov[0].iov_base = in_buf;
673 	iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
674 	flags |= CIFS_NO_RESP;
675 	rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
676 	cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
677 
678 	return rc;
679 }
680 
681 static int
cifs_sync_mid_result(struct mid_q_entry * mid,struct TCP_Server_Info * server)682 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
683 {
684 	int rc = 0;
685 
686 	cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
687 		 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
688 
689 	spin_lock(&GlobalMid_Lock);
690 	switch (mid->mid_state) {
691 	case MID_RESPONSE_RECEIVED:
692 		spin_unlock(&GlobalMid_Lock);
693 		return rc;
694 	case MID_RETRY_NEEDED:
695 		rc = -EAGAIN;
696 		break;
697 	case MID_RESPONSE_MALFORMED:
698 		rc = -EIO;
699 		break;
700 	case MID_SHUTDOWN:
701 		rc = -EHOSTDOWN;
702 		break;
703 	default:
704 		list_del_init(&mid->qhead);
705 		cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
706 			 __func__, mid->mid, mid->mid_state);
707 		rc = -EIO;
708 	}
709 	spin_unlock(&GlobalMid_Lock);
710 
711 	DeleteMidQEntry(mid);
712 	return rc;
713 }
714 
715 static inline int
send_cancel(struct TCP_Server_Info * server,struct smb_rqst * rqst,struct mid_q_entry * mid)716 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
717 	    struct mid_q_entry *mid)
718 {
719 	return server->ops->send_cancel ?
720 				server->ops->send_cancel(server, rqst, mid) : 0;
721 }
722 
723 int
cifs_check_receive(struct mid_q_entry * mid,struct TCP_Server_Info * server,bool log_error)724 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
725 		   bool log_error)
726 {
727 	unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
728 
729 	dump_smb(mid->resp_buf, min_t(u32, 92, len));
730 
731 	/* convert the length into a more usable form */
732 	if (server->sign) {
733 		struct kvec iov[2];
734 		int rc = 0;
735 		struct smb_rqst rqst = { .rq_iov = iov,
736 					 .rq_nvec = 2 };
737 
738 		iov[0].iov_base = mid->resp_buf;
739 		iov[0].iov_len = 4;
740 		iov[1].iov_base = (char *)mid->resp_buf + 4;
741 		iov[1].iov_len = len - 4;
742 		/* FIXME: add code to kill session */
743 		rc = cifs_verify_signature(&rqst, server,
744 					   mid->sequence_number);
745 		if (rc)
746 			cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
747 				 rc);
748 	}
749 
750 	/* BB special case reconnect tid and uid here? */
751 	return map_smb_to_linux_error(mid->resp_buf, log_error);
752 }
753 
754 struct mid_q_entry *
cifs_setup_request(struct cifs_ses * ses,struct smb_rqst * rqst)755 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
756 {
757 	int rc;
758 	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
759 	struct mid_q_entry *mid;
760 
761 	if (rqst->rq_iov[0].iov_len != 4 ||
762 	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
763 		return ERR_PTR(-EIO);
764 
765 	rc = allocate_mid(ses, hdr, &mid);
766 	if (rc)
767 		return ERR_PTR(rc);
768 	rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
769 	if (rc) {
770 		cifs_delete_mid(mid);
771 		return ERR_PTR(rc);
772 	}
773 	return mid;
774 }
775 
776 static void
cifs_noop_callback(struct mid_q_entry * mid)777 cifs_noop_callback(struct mid_q_entry *mid)
778 {
779 }
780 
781 int
compound_send_recv(const unsigned int xid,struct cifs_ses * ses,const int flags,const int num_rqst,struct smb_rqst * rqst,int * resp_buf_type,struct kvec * resp_iov)782 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
783 		   const int flags, const int num_rqst, struct smb_rqst *rqst,
784 		   int *resp_buf_type, struct kvec *resp_iov)
785 {
786 	int i, j, rc = 0;
787 	int timeout, optype;
788 	struct mid_q_entry *midQ[MAX_COMPOUND];
789 	unsigned int credits = 1;
790 	char *buf;
791 
792 	timeout = flags & CIFS_TIMEOUT_MASK;
793 	optype = flags & CIFS_OP_MASK;
794 
795 	for (i = 0; i < num_rqst; i++)
796 		resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
797 
798 	if ((ses == NULL) || (ses->server == NULL)) {
799 		cifs_dbg(VFS, "Null session\n");
800 		return -EIO;
801 	}
802 
803 	if (ses->server->tcpStatus == CifsExiting)
804 		return -ENOENT;
805 
806 	/*
807 	 * Ensure that we do not send more than 50 overlapping requests
808 	 * to the same server. We may make this configurable later or
809 	 * use ses->maxReq.
810 	 */
811 	rc = wait_for_free_request(ses->server, timeout, optype);
812 	if (rc)
813 		return rc;
814 
815 	/*
816 	 * Make sure that we sign in the same order that we send on this socket
817 	 * and avoid races inside tcp sendmsg code that could cause corruption
818 	 * of smb data.
819 	 */
820 
821 	mutex_lock(&ses->server->srv_mutex);
822 
823 	for (i = 0; i < num_rqst; i++) {
824 		midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
825 		if (IS_ERR(midQ[i])) {
826 			for (j = 0; j < i; j++)
827 				cifs_delete_mid(midQ[j]);
828 			mutex_unlock(&ses->server->srv_mutex);
829 			/* Update # of requests on wire to server */
830 			add_credits(ses->server, 1, optype);
831 			return PTR_ERR(midQ[i]);
832 		}
833 
834 		midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
835 		/*
836 		 * We don't invoke the callback compounds unless it is the last
837 		 * request.
838 		 */
839 		if (i < num_rqst - 1)
840 			midQ[i]->callback = cifs_noop_callback;
841 	}
842 	cifs_in_send_inc(ses->server);
843 	rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
844 	cifs_in_send_dec(ses->server);
845 
846 	for (i = 0; i < num_rqst; i++)
847 		cifs_save_when_sent(midQ[i]);
848 
849 	if (rc < 0)
850 		ses->server->sequence_number -= 2;
851 
852 	mutex_unlock(&ses->server->srv_mutex);
853 
854 	for (i = 0; i < num_rqst; i++) {
855 		if (rc < 0)
856 			goto out;
857 
858 		if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
859 			smb311_update_preauth_hash(ses, rqst[i].rq_iov,
860 						   rqst[i].rq_nvec);
861 
862 		if (timeout == CIFS_ASYNC_OP)
863 			goto out;
864 
865 		rc = wait_for_response(ses->server, midQ[i]);
866 		if (rc != 0) {
867 			cifs_dbg(FYI, "Cancelling wait for mid %llu\n",
868 				 midQ[i]->mid);
869 			send_cancel(ses->server, &rqst[i], midQ[i]);
870 			spin_lock(&GlobalMid_Lock);
871 			if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
872 				midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
873 				midQ[i]->callback = DeleteMidQEntry;
874 				spin_unlock(&GlobalMid_Lock);
875 				add_credits(ses->server, 1, optype);
876 				return rc;
877 			}
878 			spin_unlock(&GlobalMid_Lock);
879 		}
880 
881 		rc = cifs_sync_mid_result(midQ[i], ses->server);
882 		if (rc != 0) {
883 			add_credits(ses->server, 1, optype);
884 			return rc;
885 		}
886 
887 		if (!midQ[i]->resp_buf ||
888 		    midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
889 			rc = -EIO;
890 			cifs_dbg(FYI, "Bad MID state?\n");
891 			goto out;
892 		}
893 
894 		buf = (char *)midQ[i]->resp_buf;
895 		resp_iov[i].iov_base = buf;
896 		resp_iov[i].iov_len = midQ[i]->resp_buf_size +
897 			ses->server->vals->header_preamble_size;
898 
899 		if (midQ[i]->large_buf)
900 			resp_buf_type[i] = CIFS_LARGE_BUFFER;
901 		else
902 			resp_buf_type[i] = CIFS_SMALL_BUFFER;
903 
904 		if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
905 			struct kvec iov = {
906 				.iov_base = resp_iov[i].iov_base,
907 				.iov_len = resp_iov[i].iov_len
908 			};
909 			smb311_update_preauth_hash(ses, &iov, 1);
910 		}
911 
912 		credits = ses->server->ops->get_credits(midQ[i]);
913 
914 		rc = ses->server->ops->check_receive(midQ[i], ses->server,
915 						     flags & CIFS_LOG_ERROR);
916 
917 		/* mark it so buf will not be freed by cifs_delete_mid */
918 		if ((flags & CIFS_NO_RESP) == 0)
919 			midQ[i]->resp_buf = NULL;
920 	}
921 out:
922 	/*
923 	 * This will dequeue all mids. After this it is important that the
924 	 * demultiplex_thread will not process any of these mids any futher.
925 	 * This is prevented above by using a noop callback that will not
926 	 * wake this thread except for the very last PDU.
927 	 */
928 	for (i = 0; i < num_rqst; i++)
929 		cifs_delete_mid(midQ[i]);
930 	add_credits(ses->server, credits, optype);
931 
932 	return rc;
933 }
934 
935 int
cifs_send_recv(const unsigned int xid,struct cifs_ses * ses,struct smb_rqst * rqst,int * resp_buf_type,const int flags,struct kvec * resp_iov)936 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
937 	       struct smb_rqst *rqst, int *resp_buf_type, const int flags,
938 	       struct kvec *resp_iov)
939 {
940 	return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
941 				  resp_iov);
942 }
943 
944 int
SendReceive2(const unsigned int xid,struct cifs_ses * ses,struct kvec * iov,int n_vec,int * resp_buf_type,const int flags,struct kvec * resp_iov)945 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
946 	     struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
947 	     const int flags, struct kvec *resp_iov)
948 {
949 	struct smb_rqst rqst;
950 	struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
951 	int rc;
952 
953 	if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
954 		new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
955 					GFP_KERNEL);
956 		if (!new_iov) {
957 			/* otherwise cifs_send_recv below sets resp_buf_type */
958 			*resp_buf_type = CIFS_NO_BUFFER;
959 			return -ENOMEM;
960 		}
961 	} else
962 		new_iov = s_iov;
963 
964 	/* 1st iov is a RFC1001 length followed by the rest of the packet */
965 	memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
966 
967 	new_iov[0].iov_base = new_iov[1].iov_base;
968 	new_iov[0].iov_len = 4;
969 	new_iov[1].iov_base += 4;
970 	new_iov[1].iov_len -= 4;
971 
972 	memset(&rqst, 0, sizeof(struct smb_rqst));
973 	rqst.rq_iov = new_iov;
974 	rqst.rq_nvec = n_vec + 1;
975 
976 	rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
977 	if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
978 		kfree(new_iov);
979 	return rc;
980 }
981 
982 int
SendReceive(const unsigned int xid,struct cifs_ses * ses,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned,const int timeout)983 SendReceive(const unsigned int xid, struct cifs_ses *ses,
984 	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
985 	    int *pbytes_returned, const int timeout)
986 {
987 	int rc = 0;
988 	struct mid_q_entry *midQ;
989 	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
990 	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
991 	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
992 
993 	if (ses == NULL) {
994 		cifs_dbg(VFS, "Null smb session\n");
995 		return -EIO;
996 	}
997 	if (ses->server == NULL) {
998 		cifs_dbg(VFS, "Null tcp session\n");
999 		return -EIO;
1000 	}
1001 
1002 	if (ses->server->tcpStatus == CifsExiting)
1003 		return -ENOENT;
1004 
1005 	/* Ensure that we do not send more than 50 overlapping requests
1006 	   to the same server. We may make this configurable later or
1007 	   use ses->maxReq */
1008 
1009 	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1010 		cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1011 			 len);
1012 		return -EIO;
1013 	}
1014 
1015 	rc = wait_for_free_request(ses->server, timeout, 0);
1016 	if (rc)
1017 		return rc;
1018 
1019 	/* make sure that we sign in the same order that we send on this socket
1020 	   and avoid races inside tcp sendmsg code that could cause corruption
1021 	   of smb data */
1022 
1023 	mutex_lock(&ses->server->srv_mutex);
1024 
1025 	rc = allocate_mid(ses, in_buf, &midQ);
1026 	if (rc) {
1027 		mutex_unlock(&ses->server->srv_mutex);
1028 		/* Update # of requests on wire to server */
1029 		add_credits(ses->server, 1, 0);
1030 		return rc;
1031 	}
1032 
1033 	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1034 	if (rc) {
1035 		mutex_unlock(&ses->server->srv_mutex);
1036 		goto out;
1037 	}
1038 
1039 	midQ->mid_state = MID_REQUEST_SUBMITTED;
1040 
1041 	cifs_in_send_inc(ses->server);
1042 	rc = smb_send(ses->server, in_buf, len);
1043 	cifs_in_send_dec(ses->server);
1044 	cifs_save_when_sent(midQ);
1045 
1046 	if (rc < 0)
1047 		ses->server->sequence_number -= 2;
1048 
1049 	mutex_unlock(&ses->server->srv_mutex);
1050 
1051 	if (rc < 0)
1052 		goto out;
1053 
1054 	if (timeout == CIFS_ASYNC_OP)
1055 		goto out;
1056 
1057 	rc = wait_for_response(ses->server, midQ);
1058 	if (rc != 0) {
1059 		send_cancel(ses->server, &rqst, midQ);
1060 		spin_lock(&GlobalMid_Lock);
1061 		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1062 			/* no longer considered to be "in-flight" */
1063 			midQ->callback = DeleteMidQEntry;
1064 			spin_unlock(&GlobalMid_Lock);
1065 			add_credits(ses->server, 1, 0);
1066 			return rc;
1067 		}
1068 		spin_unlock(&GlobalMid_Lock);
1069 	}
1070 
1071 	rc = cifs_sync_mid_result(midQ, ses->server);
1072 	if (rc != 0) {
1073 		add_credits(ses->server, 1, 0);
1074 		return rc;
1075 	}
1076 
1077 	if (!midQ->resp_buf || !out_buf ||
1078 	    midQ->mid_state != MID_RESPONSE_RECEIVED) {
1079 		rc = -EIO;
1080 		cifs_dbg(VFS, "Bad MID state?\n");
1081 		goto out;
1082 	}
1083 
1084 	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1085 	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1086 	rc = cifs_check_receive(midQ, ses->server, 0);
1087 out:
1088 	cifs_delete_mid(midQ);
1089 	add_credits(ses->server, 1, 0);
1090 
1091 	return rc;
1092 }
1093 
1094 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1095    blocking lock to return. */
1096 
1097 static int
send_lock_cancel(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf)1098 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1099 			struct smb_hdr *in_buf,
1100 			struct smb_hdr *out_buf)
1101 {
1102 	int bytes_returned;
1103 	struct cifs_ses *ses = tcon->ses;
1104 	LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1105 
1106 	/* We just modify the current in_buf to change
1107 	   the type of lock from LOCKING_ANDX_SHARED_LOCK
1108 	   or LOCKING_ANDX_EXCLUSIVE_LOCK to
1109 	   LOCKING_ANDX_CANCEL_LOCK. */
1110 
1111 	pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1112 	pSMB->Timeout = 0;
1113 	pSMB->hdr.Mid = get_next_mid(ses->server);
1114 
1115 	return SendReceive(xid, ses, in_buf, out_buf,
1116 			&bytes_returned, 0);
1117 }
1118 
1119 int
SendReceiveBlockingLock(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned)1120 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1121 	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1122 	    int *pbytes_returned)
1123 {
1124 	int rc = 0;
1125 	int rstart = 0;
1126 	struct mid_q_entry *midQ;
1127 	struct cifs_ses *ses;
1128 	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1129 	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1130 	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1131 
1132 	if (tcon == NULL || tcon->ses == NULL) {
1133 		cifs_dbg(VFS, "Null smb session\n");
1134 		return -EIO;
1135 	}
1136 	ses = tcon->ses;
1137 
1138 	if (ses->server == NULL) {
1139 		cifs_dbg(VFS, "Null tcp session\n");
1140 		return -EIO;
1141 	}
1142 
1143 	if (ses->server->tcpStatus == CifsExiting)
1144 		return -ENOENT;
1145 
1146 	/* Ensure that we do not send more than 50 overlapping requests
1147 	   to the same server. We may make this configurable later or
1148 	   use ses->maxReq */
1149 
1150 	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1151 		cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1152 			 len);
1153 		return -EIO;
1154 	}
1155 
1156 	rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
1157 	if (rc)
1158 		return rc;
1159 
1160 	/* make sure that we sign in the same order that we send on this socket
1161 	   and avoid races inside tcp sendmsg code that could cause corruption
1162 	   of smb data */
1163 
1164 	mutex_lock(&ses->server->srv_mutex);
1165 
1166 	rc = allocate_mid(ses, in_buf, &midQ);
1167 	if (rc) {
1168 		mutex_unlock(&ses->server->srv_mutex);
1169 		return rc;
1170 	}
1171 
1172 	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1173 	if (rc) {
1174 		cifs_delete_mid(midQ);
1175 		mutex_unlock(&ses->server->srv_mutex);
1176 		return rc;
1177 	}
1178 
1179 	midQ->mid_state = MID_REQUEST_SUBMITTED;
1180 	cifs_in_send_inc(ses->server);
1181 	rc = smb_send(ses->server, in_buf, len);
1182 	cifs_in_send_dec(ses->server);
1183 	cifs_save_when_sent(midQ);
1184 
1185 	if (rc < 0)
1186 		ses->server->sequence_number -= 2;
1187 
1188 	mutex_unlock(&ses->server->srv_mutex);
1189 
1190 	if (rc < 0) {
1191 		cifs_delete_mid(midQ);
1192 		return rc;
1193 	}
1194 
1195 	/* Wait for a reply - allow signals to interrupt. */
1196 	rc = wait_event_interruptible(ses->server->response_q,
1197 		(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1198 		((ses->server->tcpStatus != CifsGood) &&
1199 		 (ses->server->tcpStatus != CifsNew)));
1200 
1201 	/* Were we interrupted by a signal ? */
1202 	if ((rc == -ERESTARTSYS) &&
1203 		(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1204 		((ses->server->tcpStatus == CifsGood) ||
1205 		 (ses->server->tcpStatus == CifsNew))) {
1206 
1207 		if (in_buf->Command == SMB_COM_TRANSACTION2) {
1208 			/* POSIX lock. We send a NT_CANCEL SMB to cause the
1209 			   blocking lock to return. */
1210 			rc = send_cancel(ses->server, &rqst, midQ);
1211 			if (rc) {
1212 				cifs_delete_mid(midQ);
1213 				return rc;
1214 			}
1215 		} else {
1216 			/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1217 			   to cause the blocking lock to return. */
1218 
1219 			rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1220 
1221 			/* If we get -ENOLCK back the lock may have
1222 			   already been removed. Don't exit in this case. */
1223 			if (rc && rc != -ENOLCK) {
1224 				cifs_delete_mid(midQ);
1225 				return rc;
1226 			}
1227 		}
1228 
1229 		rc = wait_for_response(ses->server, midQ);
1230 		if (rc) {
1231 			send_cancel(ses->server, &rqst, midQ);
1232 			spin_lock(&GlobalMid_Lock);
1233 			if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1234 				/* no longer considered to be "in-flight" */
1235 				midQ->callback = DeleteMidQEntry;
1236 				spin_unlock(&GlobalMid_Lock);
1237 				return rc;
1238 			}
1239 			spin_unlock(&GlobalMid_Lock);
1240 		}
1241 
1242 		/* We got the response - restart system call. */
1243 		rstart = 1;
1244 	}
1245 
1246 	rc = cifs_sync_mid_result(midQ, ses->server);
1247 	if (rc != 0)
1248 		return rc;
1249 
1250 	/* rcvd frame is ok */
1251 	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1252 		rc = -EIO;
1253 		cifs_dbg(VFS, "Bad MID state?\n");
1254 		goto out;
1255 	}
1256 
1257 	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1258 	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1259 	rc = cifs_check_receive(midQ, ses->server, 0);
1260 out:
1261 	cifs_delete_mid(midQ);
1262 	if (rstart && rc == -EACCES)
1263 		return -ERESTARTSYS;
1264 	return rc;
1265 }
1266