1 /*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include <linux/sched/signal.h>
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_debug.h"
41 #include "smb2proto.h"
42 #include "smbdirect.h"
43
44 /* Max number of iovectors we can use off the stack when sending requests. */
45 #define CIFS_MAX_IOV_SIZE 8
46
47 void
cifs_wake_up_task(struct mid_q_entry * mid)48 cifs_wake_up_task(struct mid_q_entry *mid)
49 {
50 wake_up_process(mid->callback_data);
51 }
52
53 struct mid_q_entry *
AllocMidQEntry(const struct smb_hdr * smb_buffer,struct TCP_Server_Info * server)54 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
55 {
56 struct mid_q_entry *temp;
57
58 if (server == NULL) {
59 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
60 return NULL;
61 }
62
63 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
64 memset(temp, 0, sizeof(struct mid_q_entry));
65 kref_init(&temp->refcount);
66 temp->mid = get_mid(smb_buffer);
67 temp->pid = current->pid;
68 temp->command = cpu_to_le16(smb_buffer->Command);
69 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
70 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
71 /* when mid allocated can be before when sent */
72 temp->when_alloc = jiffies;
73 temp->server = server;
74
75 /*
76 * The default is for the mid to be synchronous, so the
77 * default callback just wakes up the current task.
78 */
79 get_task_struct(current);
80 temp->creator = current;
81 temp->callback = cifs_wake_up_task;
82 temp->callback_data = current;
83
84 atomic_inc(&midCount);
85 temp->mid_state = MID_REQUEST_ALLOCATED;
86 return temp;
87 }
88
_cifs_mid_q_entry_release(struct kref * refcount)89 static void _cifs_mid_q_entry_release(struct kref *refcount)
90 {
91 struct mid_q_entry *midEntry =
92 container_of(refcount, struct mid_q_entry, refcount);
93 #ifdef CONFIG_CIFS_STATS2
94 __le16 command = midEntry->server->vals->lock_cmd;
95 __u16 smb_cmd = le16_to_cpu(midEntry->command);
96 unsigned long now;
97 unsigned long roundtrip_time;
98 #endif
99 struct TCP_Server_Info *server = midEntry->server;
100
101 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
102 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
103 server->ops->handle_cancelled_mid)
104 server->ops->handle_cancelled_mid(midEntry->resp_buf, server);
105
106 midEntry->mid_state = MID_FREE;
107 atomic_dec(&midCount);
108 if (midEntry->large_buf)
109 cifs_buf_release(midEntry->resp_buf);
110 else
111 cifs_small_buf_release(midEntry->resp_buf);
112 #ifdef CONFIG_CIFS_STATS2
113 now = jiffies;
114 if (now < midEntry->when_alloc)
115 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
116 roundtrip_time = now - midEntry->when_alloc;
117
118 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
119 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
120 server->slowest_cmd[smb_cmd] = roundtrip_time;
121 server->fastest_cmd[smb_cmd] = roundtrip_time;
122 } else {
123 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
124 server->slowest_cmd[smb_cmd] = roundtrip_time;
125 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
126 server->fastest_cmd[smb_cmd] = roundtrip_time;
127 }
128 cifs_stats_inc(&server->num_cmds[smb_cmd]);
129 server->time_per_cmd[smb_cmd] += roundtrip_time;
130 }
131 /*
132 * commands taking longer than one second (default) can be indications
133 * that something is wrong, unless it is quite a slow link or a very
134 * busy server. Note that this calc is unlikely or impossible to wrap
135 * as long as slow_rsp_threshold is not set way above recommended max
136 * value (32767 ie 9 hours) and is generally harmless even if wrong
137 * since only affects debug counters - so leaving the calc as simple
138 * comparison rather than doing multiple conversions and overflow
139 * checks
140 */
141 if ((slow_rsp_threshold != 0) &&
142 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
143 (midEntry->command != command)) {
144 /*
145 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
146 * NB: le16_to_cpu returns unsigned so can not be negative below
147 */
148 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
149 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
150
151 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
152 midEntry->when_sent, midEntry->when_received);
153 if (cifsFYI & CIFS_TIMER) {
154 pr_debug("slow rsp: cmd %d mid %llu",
155 midEntry->command, midEntry->mid);
156 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
157 now - midEntry->when_alloc,
158 now - midEntry->when_sent,
159 now - midEntry->when_received);
160 }
161 }
162 #endif
163 put_task_struct(midEntry->creator);
164
165 mempool_free(midEntry, cifs_mid_poolp);
166 }
167
cifs_mid_q_entry_release(struct mid_q_entry * midEntry)168 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
169 {
170 spin_lock(&GlobalMid_Lock);
171 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
172 spin_unlock(&GlobalMid_Lock);
173 }
174
DeleteMidQEntry(struct mid_q_entry * midEntry)175 void DeleteMidQEntry(struct mid_q_entry *midEntry)
176 {
177 cifs_mid_q_entry_release(midEntry);
178 }
179
180 void
cifs_delete_mid(struct mid_q_entry * mid)181 cifs_delete_mid(struct mid_q_entry *mid)
182 {
183 spin_lock(&GlobalMid_Lock);
184 if (!(mid->mid_flags & MID_DELETED)) {
185 list_del_init(&mid->qhead);
186 mid->mid_flags |= MID_DELETED;
187 }
188 spin_unlock(&GlobalMid_Lock);
189
190 DeleteMidQEntry(mid);
191 }
192
193 /*
194 * smb_send_kvec - send an array of kvecs to the server
195 * @server: Server to send the data to
196 * @smb_msg: Message to send
197 * @sent: amount of data sent on socket is stored here
198 *
199 * Our basic "send data to server" function. Should be called with srv_mutex
200 * held. The caller is responsible for handling the results.
201 */
202 static int
smb_send_kvec(struct TCP_Server_Info * server,struct msghdr * smb_msg,size_t * sent)203 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
204 size_t *sent)
205 {
206 int rc = 0;
207 int retries = 0;
208 struct socket *ssocket = server->ssocket;
209
210 *sent = 0;
211
212 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
213 smb_msg->msg_namelen = sizeof(struct sockaddr);
214 smb_msg->msg_control = NULL;
215 smb_msg->msg_controllen = 0;
216 if (server->noblocksnd)
217 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
218 else
219 smb_msg->msg_flags = MSG_NOSIGNAL;
220
221 while (msg_data_left(smb_msg)) {
222 /*
223 * If blocking send, we try 3 times, since each can block
224 * for 5 seconds. For nonblocking we have to try more
225 * but wait increasing amounts of time allowing time for
226 * socket to clear. The overall time we wait in either
227 * case to send on the socket is about 15 seconds.
228 * Similarly we wait for 15 seconds for a response from
229 * the server in SendReceive[2] for the server to send
230 * a response back for most types of requests (except
231 * SMB Write past end of file which can be slow, and
232 * blocking lock operations). NFS waits slightly longer
233 * than CIFS, but this can make it take longer for
234 * nonresponsive servers to be detected and 15 seconds
235 * is more than enough time for modern networks to
236 * send a packet. In most cases if we fail to send
237 * after the retries we will kill the socket and
238 * reconnect which may clear the network problem.
239 */
240 rc = sock_sendmsg(ssocket, smb_msg);
241 if (rc == -EAGAIN) {
242 retries++;
243 if (retries >= 14 ||
244 (!server->noblocksnd && (retries > 2))) {
245 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
246 ssocket);
247 return -EAGAIN;
248 }
249 msleep(1 << retries);
250 continue;
251 }
252
253 if (rc < 0)
254 return rc;
255
256 if (rc == 0) {
257 /* should never happen, letting socket clear before
258 retrying is our only obvious option here */
259 cifs_server_dbg(VFS, "tcp sent no data\n");
260 msleep(500);
261 continue;
262 }
263
264 /* send was at least partially successful */
265 *sent += rc;
266 retries = 0; /* in case we get ENOSPC on the next send */
267 }
268 return 0;
269 }
270
271 unsigned long
smb_rqst_len(struct TCP_Server_Info * server,struct smb_rqst * rqst)272 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
273 {
274 unsigned int i;
275 struct kvec *iov;
276 int nvec;
277 unsigned long buflen = 0;
278
279 if (server->vals->header_preamble_size == 0 &&
280 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
281 iov = &rqst->rq_iov[1];
282 nvec = rqst->rq_nvec - 1;
283 } else {
284 iov = rqst->rq_iov;
285 nvec = rqst->rq_nvec;
286 }
287
288 /* total up iov array first */
289 for (i = 0; i < nvec; i++)
290 buflen += iov[i].iov_len;
291
292 /*
293 * Add in the page array if there is one. The caller needs to make
294 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
295 * multiple pages ends at page boundary, rq_tailsz needs to be set to
296 * PAGE_SIZE.
297 */
298 if (rqst->rq_npages) {
299 if (rqst->rq_npages == 1)
300 buflen += rqst->rq_tailsz;
301 else {
302 /*
303 * If there is more than one page, calculate the
304 * buffer length based on rq_offset and rq_tailsz
305 */
306 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
307 rqst->rq_offset;
308 buflen += rqst->rq_tailsz;
309 }
310 }
311
312 return buflen;
313 }
314
315 static int
__smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst)316 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
317 struct smb_rqst *rqst)
318 {
319 int rc = 0;
320 struct kvec *iov;
321 int n_vec;
322 unsigned int send_length = 0;
323 unsigned int i, j;
324 sigset_t mask, oldmask;
325 size_t total_len = 0, sent, size;
326 struct socket *ssocket = server->ssocket;
327 struct msghdr smb_msg;
328 __be32 rfc1002_marker;
329
330 if (cifs_rdma_enabled(server)) {
331 /* return -EAGAIN when connecting or reconnecting */
332 rc = -EAGAIN;
333 if (server->smbd_conn)
334 rc = smbd_send(server, num_rqst, rqst);
335 goto smbd_done;
336 }
337
338 if (ssocket == NULL)
339 return -EAGAIN;
340
341 if (signal_pending(current)) {
342 cifs_dbg(FYI, "signal pending before send request\n");
343 return -ERESTARTSYS;
344 }
345
346 /* cork the socket */
347 tcp_sock_set_cork(ssocket->sk, true);
348
349 for (j = 0; j < num_rqst; j++)
350 send_length += smb_rqst_len(server, &rqst[j]);
351 rfc1002_marker = cpu_to_be32(send_length);
352
353 /*
354 * We should not allow signals to interrupt the network send because
355 * any partial send will cause session reconnects thus increasing
356 * latency of system calls and overload a server with unnecessary
357 * requests.
358 */
359
360 sigfillset(&mask);
361 sigprocmask(SIG_BLOCK, &mask, &oldmask);
362
363 /* Generate a rfc1002 marker for SMB2+ */
364 if (server->vals->header_preamble_size == 0) {
365 struct kvec hiov = {
366 .iov_base = &rfc1002_marker,
367 .iov_len = 4
368 };
369 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
370 rc = smb_send_kvec(server, &smb_msg, &sent);
371 if (rc < 0)
372 goto unmask;
373
374 total_len += sent;
375 send_length += 4;
376 }
377
378 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
379
380 for (j = 0; j < num_rqst; j++) {
381 iov = rqst[j].rq_iov;
382 n_vec = rqst[j].rq_nvec;
383
384 size = 0;
385 for (i = 0; i < n_vec; i++) {
386 dump_smb(iov[i].iov_base, iov[i].iov_len);
387 size += iov[i].iov_len;
388 }
389
390 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
391
392 rc = smb_send_kvec(server, &smb_msg, &sent);
393 if (rc < 0)
394 goto unmask;
395
396 total_len += sent;
397
398 /* now walk the page array and send each page in it */
399 for (i = 0; i < rqst[j].rq_npages; i++) {
400 struct bio_vec bvec;
401
402 bvec.bv_page = rqst[j].rq_pages[i];
403 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
404 &bvec.bv_offset);
405
406 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
407 &bvec, 1, bvec.bv_len);
408 rc = smb_send_kvec(server, &smb_msg, &sent);
409 if (rc < 0)
410 break;
411
412 total_len += sent;
413 }
414 }
415
416 unmask:
417 sigprocmask(SIG_SETMASK, &oldmask, NULL);
418
419 /*
420 * If signal is pending but we have already sent the whole packet to
421 * the server we need to return success status to allow a corresponding
422 * mid entry to be kept in the pending requests queue thus allowing
423 * to handle responses from the server by the client.
424 *
425 * If only part of the packet has been sent there is no need to hide
426 * interrupt because the session will be reconnected anyway, so there
427 * won't be any response from the server to handle.
428 */
429
430 if (signal_pending(current) && (total_len != send_length)) {
431 cifs_dbg(FYI, "signal is pending after attempt to send\n");
432 rc = -EINTR;
433 }
434
435 /* uncork it */
436 tcp_sock_set_cork(ssocket->sk, false);
437
438 if ((total_len > 0) && (total_len != send_length)) {
439 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
440 send_length, total_len);
441 /*
442 * If we have only sent part of an SMB then the next SMB could
443 * be taken as the remainder of this one. We need to kill the
444 * socket so the server throws away the partial SMB
445 */
446 server->tcpStatus = CifsNeedReconnect;
447 trace_smb3_partial_send_reconnect(server->CurrentMid,
448 server->hostname);
449 }
450 smbd_done:
451 if (rc < 0 && rc != -EINTR)
452 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
453 rc);
454 else if (rc > 0)
455 rc = 0;
456
457 return rc;
458 }
459
460 static int
smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst,int flags)461 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
462 struct smb_rqst *rqst, int flags)
463 {
464 struct kvec iov;
465 struct smb2_transform_hdr *tr_hdr;
466 struct smb_rqst cur_rqst[MAX_COMPOUND];
467 int rc;
468
469 if (!(flags & CIFS_TRANSFORM_REQ))
470 return __smb_send_rqst(server, num_rqst, rqst);
471
472 if (num_rqst > MAX_COMPOUND - 1)
473 return -ENOMEM;
474
475 if (!server->ops->init_transform_rq) {
476 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
477 return -EIO;
478 }
479
480 tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
481 if (!tr_hdr)
482 return -ENOMEM;
483
484 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
485 memset(&iov, 0, sizeof(iov));
486 memset(tr_hdr, 0, sizeof(*tr_hdr));
487
488 iov.iov_base = tr_hdr;
489 iov.iov_len = sizeof(*tr_hdr);
490 cur_rqst[0].rq_iov = &iov;
491 cur_rqst[0].rq_nvec = 1;
492
493 rc = server->ops->init_transform_rq(server, num_rqst + 1,
494 &cur_rqst[0], rqst);
495 if (rc)
496 goto out;
497
498 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
499 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
500 out:
501 kfree(tr_hdr);
502 return rc;
503 }
504
505 int
smb_send(struct TCP_Server_Info * server,struct smb_hdr * smb_buffer,unsigned int smb_buf_length)506 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
507 unsigned int smb_buf_length)
508 {
509 struct kvec iov[2];
510 struct smb_rqst rqst = { .rq_iov = iov,
511 .rq_nvec = 2 };
512
513 iov[0].iov_base = smb_buffer;
514 iov[0].iov_len = 4;
515 iov[1].iov_base = (char *)smb_buffer + 4;
516 iov[1].iov_len = smb_buf_length;
517
518 return __smb_send_rqst(server, 1, &rqst);
519 }
520
521 static int
wait_for_free_credits(struct TCP_Server_Info * server,const int num_credits,const int timeout,const int flags,unsigned int * instance)522 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
523 const int timeout, const int flags,
524 unsigned int *instance)
525 {
526 long rc;
527 int *credits;
528 int optype;
529 long int t;
530
531 if (timeout < 0)
532 t = MAX_JIFFY_OFFSET;
533 else
534 t = msecs_to_jiffies(timeout);
535
536 optype = flags & CIFS_OP_MASK;
537
538 *instance = 0;
539
540 credits = server->ops->get_credits_field(server, optype);
541 /* Since an echo is already inflight, no need to wait to send another */
542 if (*credits <= 0 && optype == CIFS_ECHO_OP)
543 return -EAGAIN;
544
545 spin_lock(&server->req_lock);
546 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
547 /* oplock breaks must not be held up */
548 server->in_flight++;
549 if (server->in_flight > server->max_in_flight)
550 server->max_in_flight = server->in_flight;
551 *credits -= 1;
552 *instance = server->reconnect_instance;
553 spin_unlock(&server->req_lock);
554 return 0;
555 }
556
557 while (1) {
558 if (*credits < num_credits) {
559 spin_unlock(&server->req_lock);
560 cifs_num_waiters_inc(server);
561 rc = wait_event_killable_timeout(server->request_q,
562 has_credits(server, credits, num_credits), t);
563 cifs_num_waiters_dec(server);
564 if (!rc) {
565 trace_smb3_credit_timeout(server->CurrentMid,
566 server->hostname, num_credits, 0);
567 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
568 timeout);
569 return -ENOTSUPP;
570 }
571 if (rc == -ERESTARTSYS)
572 return -ERESTARTSYS;
573 spin_lock(&server->req_lock);
574 } else {
575 if (server->tcpStatus == CifsExiting) {
576 spin_unlock(&server->req_lock);
577 return -ENOENT;
578 }
579
580 /*
581 * For normal commands, reserve the last MAX_COMPOUND
582 * credits to compound requests.
583 * Otherwise these compounds could be permanently
584 * starved for credits by single-credit requests.
585 *
586 * To prevent spinning CPU, block this thread until
587 * there are >MAX_COMPOUND credits available.
588 * But only do this is we already have a lot of
589 * credits in flight to avoid triggering this check
590 * for servers that are slow to hand out credits on
591 * new sessions.
592 */
593 if (!optype && num_credits == 1 &&
594 server->in_flight > 2 * MAX_COMPOUND &&
595 *credits <= MAX_COMPOUND) {
596 spin_unlock(&server->req_lock);
597 cifs_num_waiters_inc(server);
598 rc = wait_event_killable_timeout(
599 server->request_q,
600 has_credits(server, credits,
601 MAX_COMPOUND + 1),
602 t);
603 cifs_num_waiters_dec(server);
604 if (!rc) {
605 trace_smb3_credit_timeout(
606 server->CurrentMid,
607 server->hostname, num_credits,
608 0);
609 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
610 timeout);
611 return -ENOTSUPP;
612 }
613 if (rc == -ERESTARTSYS)
614 return -ERESTARTSYS;
615 spin_lock(&server->req_lock);
616 continue;
617 }
618
619 /*
620 * Can not count locking commands against total
621 * as they are allowed to block on server.
622 */
623
624 /* update # of requests on the wire to server */
625 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
626 *credits -= num_credits;
627 server->in_flight += num_credits;
628 if (server->in_flight > server->max_in_flight)
629 server->max_in_flight = server->in_flight;
630 *instance = server->reconnect_instance;
631 }
632 spin_unlock(&server->req_lock);
633 break;
634 }
635 }
636 return 0;
637 }
638
639 static int
wait_for_free_request(struct TCP_Server_Info * server,const int flags,unsigned int * instance)640 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
641 unsigned int *instance)
642 {
643 return wait_for_free_credits(server, 1, -1, flags,
644 instance);
645 }
646
647 static int
wait_for_compound_request(struct TCP_Server_Info * server,int num,const int flags,unsigned int * instance)648 wait_for_compound_request(struct TCP_Server_Info *server, int num,
649 const int flags, unsigned int *instance)
650 {
651 int *credits;
652
653 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
654
655 spin_lock(&server->req_lock);
656 if (*credits < num) {
657 /*
658 * Return immediately if not too many requests in flight since
659 * we will likely be stuck on waiting for credits.
660 */
661 if (server->in_flight < num - *credits) {
662 spin_unlock(&server->req_lock);
663 return -ENOTSUPP;
664 }
665 }
666 spin_unlock(&server->req_lock);
667
668 return wait_for_free_credits(server, num, 60000, flags,
669 instance);
670 }
671
672 int
cifs_wait_mtu_credits(struct TCP_Server_Info * server,unsigned int size,unsigned int * num,struct cifs_credits * credits)673 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
674 unsigned int *num, struct cifs_credits *credits)
675 {
676 *num = size;
677 credits->value = 0;
678 credits->instance = server->reconnect_instance;
679 return 0;
680 }
681
allocate_mid(struct cifs_ses * ses,struct smb_hdr * in_buf,struct mid_q_entry ** ppmidQ)682 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
683 struct mid_q_entry **ppmidQ)
684 {
685 if (ses->server->tcpStatus == CifsExiting) {
686 return -ENOENT;
687 }
688
689 if (ses->server->tcpStatus == CifsNeedReconnect) {
690 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
691 return -EAGAIN;
692 }
693
694 if (ses->status == CifsNew) {
695 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
696 (in_buf->Command != SMB_COM_NEGOTIATE))
697 return -EAGAIN;
698 /* else ok - we are setting up session */
699 }
700
701 if (ses->status == CifsExiting) {
702 /* check if SMB session is bad because we are setting it up */
703 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
704 return -EAGAIN;
705 /* else ok - we are shutting down session */
706 }
707
708 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
709 if (*ppmidQ == NULL)
710 return -ENOMEM;
711 spin_lock(&GlobalMid_Lock);
712 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
713 spin_unlock(&GlobalMid_Lock);
714 return 0;
715 }
716
717 static int
wait_for_response(struct TCP_Server_Info * server,struct mid_q_entry * midQ)718 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
719 {
720 int error;
721
722 error = wait_event_freezekillable_unsafe(server->response_q,
723 midQ->mid_state != MID_REQUEST_SUBMITTED);
724 if (error < 0)
725 return -ERESTARTSYS;
726
727 return 0;
728 }
729
730 struct mid_q_entry *
cifs_setup_async_request(struct TCP_Server_Info * server,struct smb_rqst * rqst)731 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
732 {
733 int rc;
734 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
735 struct mid_q_entry *mid;
736
737 if (rqst->rq_iov[0].iov_len != 4 ||
738 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
739 return ERR_PTR(-EIO);
740
741 /* enable signing if server requires it */
742 if (server->sign)
743 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
744
745 mid = AllocMidQEntry(hdr, server);
746 if (mid == NULL)
747 return ERR_PTR(-ENOMEM);
748
749 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
750 if (rc) {
751 DeleteMidQEntry(mid);
752 return ERR_PTR(rc);
753 }
754
755 return mid;
756 }
757
758 /*
759 * Send a SMB request and set the callback function in the mid to handle
760 * the result. Caller is responsible for dealing with timeouts.
761 */
762 int
cifs_call_async(struct TCP_Server_Info * server,struct smb_rqst * rqst,mid_receive_t * receive,mid_callback_t * callback,mid_handle_t * handle,void * cbdata,const int flags,const struct cifs_credits * exist_credits)763 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
764 mid_receive_t *receive, mid_callback_t *callback,
765 mid_handle_t *handle, void *cbdata, const int flags,
766 const struct cifs_credits *exist_credits)
767 {
768 int rc;
769 struct mid_q_entry *mid;
770 struct cifs_credits credits = { .value = 0, .instance = 0 };
771 unsigned int instance;
772 int optype;
773
774 optype = flags & CIFS_OP_MASK;
775
776 if ((flags & CIFS_HAS_CREDITS) == 0) {
777 rc = wait_for_free_request(server, flags, &instance);
778 if (rc)
779 return rc;
780 credits.value = 1;
781 credits.instance = instance;
782 } else
783 instance = exist_credits->instance;
784
785 mutex_lock(&server->srv_mutex);
786
787 /*
788 * We can't use credits obtained from the previous session to send this
789 * request. Check if there were reconnects after we obtained credits and
790 * return -EAGAIN in such cases to let callers handle it.
791 */
792 if (instance != server->reconnect_instance) {
793 mutex_unlock(&server->srv_mutex);
794 add_credits_and_wake_if(server, &credits, optype);
795 return -EAGAIN;
796 }
797
798 mid = server->ops->setup_async_request(server, rqst);
799 if (IS_ERR(mid)) {
800 mutex_unlock(&server->srv_mutex);
801 add_credits_and_wake_if(server, &credits, optype);
802 return PTR_ERR(mid);
803 }
804
805 mid->receive = receive;
806 mid->callback = callback;
807 mid->callback_data = cbdata;
808 mid->handle = handle;
809 mid->mid_state = MID_REQUEST_SUBMITTED;
810
811 /* put it on the pending_mid_q */
812 spin_lock(&GlobalMid_Lock);
813 list_add_tail(&mid->qhead, &server->pending_mid_q);
814 spin_unlock(&GlobalMid_Lock);
815
816 /*
817 * Need to store the time in mid before calling I/O. For call_async,
818 * I/O response may come back and free the mid entry on another thread.
819 */
820 cifs_save_when_sent(mid);
821 cifs_in_send_inc(server);
822 rc = smb_send_rqst(server, 1, rqst, flags);
823 cifs_in_send_dec(server);
824
825 if (rc < 0) {
826 revert_current_mid(server, mid->credits);
827 server->sequence_number -= 2;
828 cifs_delete_mid(mid);
829 }
830
831 mutex_unlock(&server->srv_mutex);
832
833 if (rc == 0)
834 return 0;
835
836 add_credits_and_wake_if(server, &credits, optype);
837 return rc;
838 }
839
840 /*
841 *
842 * Send an SMB Request. No response info (other than return code)
843 * needs to be parsed.
844 *
845 * flags indicate the type of request buffer and how long to wait
846 * and whether to log NT STATUS code (error) before mapping it to POSIX error
847 *
848 */
849 int
SendReceiveNoRsp(const unsigned int xid,struct cifs_ses * ses,char * in_buf,int flags)850 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
851 char *in_buf, int flags)
852 {
853 int rc;
854 struct kvec iov[1];
855 struct kvec rsp_iov;
856 int resp_buf_type;
857
858 iov[0].iov_base = in_buf;
859 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
860 flags |= CIFS_NO_RSP_BUF;
861 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
862 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
863
864 return rc;
865 }
866
867 static int
cifs_sync_mid_result(struct mid_q_entry * mid,struct TCP_Server_Info * server)868 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
869 {
870 int rc = 0;
871
872 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
873 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
874
875 spin_lock(&GlobalMid_Lock);
876 switch (mid->mid_state) {
877 case MID_RESPONSE_RECEIVED:
878 spin_unlock(&GlobalMid_Lock);
879 return rc;
880 case MID_RETRY_NEEDED:
881 rc = -EAGAIN;
882 break;
883 case MID_RESPONSE_MALFORMED:
884 rc = -EIO;
885 break;
886 case MID_SHUTDOWN:
887 rc = -EHOSTDOWN;
888 break;
889 default:
890 if (!(mid->mid_flags & MID_DELETED)) {
891 list_del_init(&mid->qhead);
892 mid->mid_flags |= MID_DELETED;
893 }
894 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
895 __func__, mid->mid, mid->mid_state);
896 rc = -EIO;
897 }
898 spin_unlock(&GlobalMid_Lock);
899
900 DeleteMidQEntry(mid);
901 return rc;
902 }
903
904 static inline int
send_cancel(struct TCP_Server_Info * server,struct smb_rqst * rqst,struct mid_q_entry * mid)905 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
906 struct mid_q_entry *mid)
907 {
908 return server->ops->send_cancel ?
909 server->ops->send_cancel(server, rqst, mid) : 0;
910 }
911
912 int
cifs_check_receive(struct mid_q_entry * mid,struct TCP_Server_Info * server,bool log_error)913 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
914 bool log_error)
915 {
916 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
917
918 dump_smb(mid->resp_buf, min_t(u32, 92, len));
919
920 /* convert the length into a more usable form */
921 if (server->sign) {
922 struct kvec iov[2];
923 int rc = 0;
924 struct smb_rqst rqst = { .rq_iov = iov,
925 .rq_nvec = 2 };
926
927 iov[0].iov_base = mid->resp_buf;
928 iov[0].iov_len = 4;
929 iov[1].iov_base = (char *)mid->resp_buf + 4;
930 iov[1].iov_len = len - 4;
931 /* FIXME: add code to kill session */
932 rc = cifs_verify_signature(&rqst, server,
933 mid->sequence_number);
934 if (rc)
935 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
936 rc);
937 }
938
939 /* BB special case reconnect tid and uid here? */
940 return map_and_check_smb_error(mid, log_error);
941 }
942
943 struct mid_q_entry *
cifs_setup_request(struct cifs_ses * ses,struct TCP_Server_Info * ignored,struct smb_rqst * rqst)944 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
945 struct smb_rqst *rqst)
946 {
947 int rc;
948 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
949 struct mid_q_entry *mid;
950
951 if (rqst->rq_iov[0].iov_len != 4 ||
952 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
953 return ERR_PTR(-EIO);
954
955 rc = allocate_mid(ses, hdr, &mid);
956 if (rc)
957 return ERR_PTR(rc);
958 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
959 if (rc) {
960 cifs_delete_mid(mid);
961 return ERR_PTR(rc);
962 }
963 return mid;
964 }
965
966 static void
cifs_compound_callback(struct mid_q_entry * mid)967 cifs_compound_callback(struct mid_q_entry *mid)
968 {
969 struct TCP_Server_Info *server = mid->server;
970 struct cifs_credits credits;
971
972 credits.value = server->ops->get_credits(mid);
973 credits.instance = server->reconnect_instance;
974
975 add_credits(server, &credits, mid->optype);
976 }
977
978 static void
cifs_compound_last_callback(struct mid_q_entry * mid)979 cifs_compound_last_callback(struct mid_q_entry *mid)
980 {
981 cifs_compound_callback(mid);
982 cifs_wake_up_task(mid);
983 }
984
985 static void
cifs_cancelled_callback(struct mid_q_entry * mid)986 cifs_cancelled_callback(struct mid_q_entry *mid)
987 {
988 cifs_compound_callback(mid);
989 DeleteMidQEntry(mid);
990 }
991
992 /*
993 * Return a channel (master if none) of @ses that can be used to send
994 * regular requests.
995 *
996 * If we are currently binding a new channel (negprot/sess.setup),
997 * return the new incomplete channel.
998 */
cifs_pick_channel(struct cifs_ses * ses)999 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1000 {
1001 uint index = 0;
1002
1003 if (!ses)
1004 return NULL;
1005
1006 if (!ses->binding) {
1007 /* round robin */
1008 if (ses->chan_count > 1) {
1009 index = (uint)atomic_inc_return(&ses->chan_seq);
1010 index %= ses->chan_count;
1011 }
1012 return ses->chans[index].server;
1013 } else {
1014 return cifs_ses_server(ses);
1015 }
1016 }
1017
1018 int
compound_send_recv(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server,const int flags,const int num_rqst,struct smb_rqst * rqst,int * resp_buf_type,struct kvec * resp_iov)1019 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1020 struct TCP_Server_Info *server,
1021 const int flags, const int num_rqst, struct smb_rqst *rqst,
1022 int *resp_buf_type, struct kvec *resp_iov)
1023 {
1024 int i, j, optype, rc = 0;
1025 struct mid_q_entry *midQ[MAX_COMPOUND];
1026 bool cancelled_mid[MAX_COMPOUND] = {false};
1027 struct cifs_credits credits[MAX_COMPOUND] = {
1028 { .value = 0, .instance = 0 }
1029 };
1030 unsigned int instance;
1031 char *buf;
1032
1033 optype = flags & CIFS_OP_MASK;
1034
1035 for (i = 0; i < num_rqst; i++)
1036 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
1037
1038 if (!ses || !ses->server || !server) {
1039 cifs_dbg(VFS, "Null session\n");
1040 return -EIO;
1041 }
1042
1043 if (server->tcpStatus == CifsExiting)
1044 return -ENOENT;
1045
1046 /*
1047 * Wait for all the requests to become available.
1048 * This approach still leaves the possibility to be stuck waiting for
1049 * credits if the server doesn't grant credits to the outstanding
1050 * requests and if the client is completely idle, not generating any
1051 * other requests.
1052 * This can be handled by the eventual session reconnect.
1053 */
1054 rc = wait_for_compound_request(server, num_rqst, flags,
1055 &instance);
1056 if (rc)
1057 return rc;
1058
1059 for (i = 0; i < num_rqst; i++) {
1060 credits[i].value = 1;
1061 credits[i].instance = instance;
1062 }
1063
1064 /*
1065 * Make sure that we sign in the same order that we send on this socket
1066 * and avoid races inside tcp sendmsg code that could cause corruption
1067 * of smb data.
1068 */
1069
1070 mutex_lock(&server->srv_mutex);
1071
1072 /*
1073 * All the parts of the compound chain belong obtained credits from the
1074 * same session. We can not use credits obtained from the previous
1075 * session to send this request. Check if there were reconnects after
1076 * we obtained credits and return -EAGAIN in such cases to let callers
1077 * handle it.
1078 */
1079 if (instance != server->reconnect_instance) {
1080 mutex_unlock(&server->srv_mutex);
1081 for (j = 0; j < num_rqst; j++)
1082 add_credits(server, &credits[j], optype);
1083 return -EAGAIN;
1084 }
1085
1086 for (i = 0; i < num_rqst; i++) {
1087 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1088 if (IS_ERR(midQ[i])) {
1089 revert_current_mid(server, i);
1090 for (j = 0; j < i; j++)
1091 cifs_delete_mid(midQ[j]);
1092 mutex_unlock(&server->srv_mutex);
1093
1094 /* Update # of requests on wire to server */
1095 for (j = 0; j < num_rqst; j++)
1096 add_credits(server, &credits[j], optype);
1097 return PTR_ERR(midQ[i]);
1098 }
1099
1100 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1101 midQ[i]->optype = optype;
1102 /*
1103 * Invoke callback for every part of the compound chain
1104 * to calculate credits properly. Wake up this thread only when
1105 * the last element is received.
1106 */
1107 if (i < num_rqst - 1)
1108 midQ[i]->callback = cifs_compound_callback;
1109 else
1110 midQ[i]->callback = cifs_compound_last_callback;
1111 }
1112 cifs_in_send_inc(server);
1113 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1114 cifs_in_send_dec(server);
1115
1116 for (i = 0; i < num_rqst; i++)
1117 cifs_save_when_sent(midQ[i]);
1118
1119 if (rc < 0) {
1120 revert_current_mid(server, num_rqst);
1121 server->sequence_number -= 2;
1122 }
1123
1124 mutex_unlock(&server->srv_mutex);
1125
1126 /*
1127 * If sending failed for some reason or it is an oplock break that we
1128 * will not receive a response to - return credits back
1129 */
1130 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1131 for (i = 0; i < num_rqst; i++)
1132 add_credits(server, &credits[i], optype);
1133 goto out;
1134 }
1135
1136 /*
1137 * At this point the request is passed to the network stack - we assume
1138 * that any credits taken from the server structure on the client have
1139 * been spent and we can't return them back. Once we receive responses
1140 * we will collect credits granted by the server in the mid callbacks
1141 * and add those credits to the server structure.
1142 */
1143
1144 /*
1145 * Compounding is never used during session establish.
1146 */
1147 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1148 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1149 rqst[0].rq_nvec);
1150
1151 for (i = 0; i < num_rqst; i++) {
1152 rc = wait_for_response(server, midQ[i]);
1153 if (rc != 0)
1154 break;
1155 }
1156 if (rc != 0) {
1157 for (; i < num_rqst; i++) {
1158 cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1159 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1160 send_cancel(server, &rqst[i], midQ[i]);
1161 spin_lock(&GlobalMid_Lock);
1162 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1163 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1164 midQ[i]->callback = cifs_cancelled_callback;
1165 cancelled_mid[i] = true;
1166 credits[i].value = 0;
1167 }
1168 spin_unlock(&GlobalMid_Lock);
1169 }
1170 }
1171
1172 for (i = 0; i < num_rqst; i++) {
1173 if (rc < 0)
1174 goto out;
1175
1176 rc = cifs_sync_mid_result(midQ[i], server);
1177 if (rc != 0) {
1178 /* mark this mid as cancelled to not free it below */
1179 cancelled_mid[i] = true;
1180 goto out;
1181 }
1182
1183 if (!midQ[i]->resp_buf ||
1184 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1185 rc = -EIO;
1186 cifs_dbg(FYI, "Bad MID state?\n");
1187 goto out;
1188 }
1189
1190 buf = (char *)midQ[i]->resp_buf;
1191 resp_iov[i].iov_base = buf;
1192 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1193 server->vals->header_preamble_size;
1194
1195 if (midQ[i]->large_buf)
1196 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1197 else
1198 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1199
1200 rc = server->ops->check_receive(midQ[i], server,
1201 flags & CIFS_LOG_ERROR);
1202
1203 /* mark it so buf will not be freed by cifs_delete_mid */
1204 if ((flags & CIFS_NO_RSP_BUF) == 0)
1205 midQ[i]->resp_buf = NULL;
1206
1207 }
1208
1209 /*
1210 * Compounding is never used during session establish.
1211 */
1212 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1213 struct kvec iov = {
1214 .iov_base = resp_iov[0].iov_base,
1215 .iov_len = resp_iov[0].iov_len
1216 };
1217 smb311_update_preauth_hash(ses, &iov, 1);
1218 }
1219
1220 out:
1221 /*
1222 * This will dequeue all mids. After this it is important that the
1223 * demultiplex_thread will not process any of these mids any futher.
1224 * This is prevented above by using a noop callback that will not
1225 * wake this thread except for the very last PDU.
1226 */
1227 for (i = 0; i < num_rqst; i++) {
1228 if (!cancelled_mid[i])
1229 cifs_delete_mid(midQ[i]);
1230 }
1231
1232 return rc;
1233 }
1234
1235 int
cifs_send_recv(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server,struct smb_rqst * rqst,int * resp_buf_type,const int flags,struct kvec * resp_iov)1236 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1237 struct TCP_Server_Info *server,
1238 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1239 struct kvec *resp_iov)
1240 {
1241 return compound_send_recv(xid, ses, server, flags, 1,
1242 rqst, resp_buf_type, resp_iov);
1243 }
1244
1245 int
SendReceive2(const unsigned int xid,struct cifs_ses * ses,struct kvec * iov,int n_vec,int * resp_buf_type,const int flags,struct kvec * resp_iov)1246 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1247 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1248 const int flags, struct kvec *resp_iov)
1249 {
1250 struct smb_rqst rqst;
1251 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1252 int rc;
1253
1254 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1255 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1256 GFP_KERNEL);
1257 if (!new_iov) {
1258 /* otherwise cifs_send_recv below sets resp_buf_type */
1259 *resp_buf_type = CIFS_NO_BUFFER;
1260 return -ENOMEM;
1261 }
1262 } else
1263 new_iov = s_iov;
1264
1265 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1266 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1267
1268 new_iov[0].iov_base = new_iov[1].iov_base;
1269 new_iov[0].iov_len = 4;
1270 new_iov[1].iov_base += 4;
1271 new_iov[1].iov_len -= 4;
1272
1273 memset(&rqst, 0, sizeof(struct smb_rqst));
1274 rqst.rq_iov = new_iov;
1275 rqst.rq_nvec = n_vec + 1;
1276
1277 rc = cifs_send_recv(xid, ses, ses->server,
1278 &rqst, resp_buf_type, flags, resp_iov);
1279 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1280 kfree(new_iov);
1281 return rc;
1282 }
1283
1284 int
SendReceive(const unsigned int xid,struct cifs_ses * ses,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned,const int flags)1285 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1286 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1287 int *pbytes_returned, const int flags)
1288 {
1289 int rc = 0;
1290 struct mid_q_entry *midQ;
1291 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1292 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1293 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1294 struct cifs_credits credits = { .value = 1, .instance = 0 };
1295 struct TCP_Server_Info *server;
1296
1297 if (ses == NULL) {
1298 cifs_dbg(VFS, "Null smb session\n");
1299 return -EIO;
1300 }
1301 server = ses->server;
1302 if (server == NULL) {
1303 cifs_dbg(VFS, "Null tcp session\n");
1304 return -EIO;
1305 }
1306
1307 if (server->tcpStatus == CifsExiting)
1308 return -ENOENT;
1309
1310 /* Ensure that we do not send more than 50 overlapping requests
1311 to the same server. We may make this configurable later or
1312 use ses->maxReq */
1313
1314 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1315 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1316 len);
1317 return -EIO;
1318 }
1319
1320 rc = wait_for_free_request(server, flags, &credits.instance);
1321 if (rc)
1322 return rc;
1323
1324 /* make sure that we sign in the same order that we send on this socket
1325 and avoid races inside tcp sendmsg code that could cause corruption
1326 of smb data */
1327
1328 mutex_lock(&server->srv_mutex);
1329
1330 rc = allocate_mid(ses, in_buf, &midQ);
1331 if (rc) {
1332 mutex_unlock(&server->srv_mutex);
1333 /* Update # of requests on wire to server */
1334 add_credits(server, &credits, 0);
1335 return rc;
1336 }
1337
1338 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1339 if (rc) {
1340 mutex_unlock(&server->srv_mutex);
1341 goto out;
1342 }
1343
1344 midQ->mid_state = MID_REQUEST_SUBMITTED;
1345
1346 cifs_in_send_inc(server);
1347 rc = smb_send(server, in_buf, len);
1348 cifs_in_send_dec(server);
1349 cifs_save_when_sent(midQ);
1350
1351 if (rc < 0)
1352 server->sequence_number -= 2;
1353
1354 mutex_unlock(&server->srv_mutex);
1355
1356 if (rc < 0)
1357 goto out;
1358
1359 rc = wait_for_response(server, midQ);
1360 if (rc != 0) {
1361 send_cancel(server, &rqst, midQ);
1362 spin_lock(&GlobalMid_Lock);
1363 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1364 /* no longer considered to be "in-flight" */
1365 midQ->callback = DeleteMidQEntry;
1366 spin_unlock(&GlobalMid_Lock);
1367 add_credits(server, &credits, 0);
1368 return rc;
1369 }
1370 spin_unlock(&GlobalMid_Lock);
1371 }
1372
1373 rc = cifs_sync_mid_result(midQ, server);
1374 if (rc != 0) {
1375 add_credits(server, &credits, 0);
1376 return rc;
1377 }
1378
1379 if (!midQ->resp_buf || !out_buf ||
1380 midQ->mid_state != MID_RESPONSE_RECEIVED) {
1381 rc = -EIO;
1382 cifs_server_dbg(VFS, "Bad MID state?\n");
1383 goto out;
1384 }
1385
1386 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1387 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1388 rc = cifs_check_receive(midQ, server, 0);
1389 out:
1390 cifs_delete_mid(midQ);
1391 add_credits(server, &credits, 0);
1392
1393 return rc;
1394 }
1395
1396 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1397 blocking lock to return. */
1398
1399 static int
send_lock_cancel(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf)1400 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1401 struct smb_hdr *in_buf,
1402 struct smb_hdr *out_buf)
1403 {
1404 int bytes_returned;
1405 struct cifs_ses *ses = tcon->ses;
1406 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1407
1408 /* We just modify the current in_buf to change
1409 the type of lock from LOCKING_ANDX_SHARED_LOCK
1410 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1411 LOCKING_ANDX_CANCEL_LOCK. */
1412
1413 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1414 pSMB->Timeout = 0;
1415 pSMB->hdr.Mid = get_next_mid(ses->server);
1416
1417 return SendReceive(xid, ses, in_buf, out_buf,
1418 &bytes_returned, 0);
1419 }
1420
1421 int
SendReceiveBlockingLock(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned)1422 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1423 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1424 int *pbytes_returned)
1425 {
1426 int rc = 0;
1427 int rstart = 0;
1428 struct mid_q_entry *midQ;
1429 struct cifs_ses *ses;
1430 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1431 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1432 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1433 unsigned int instance;
1434 struct TCP_Server_Info *server;
1435
1436 if (tcon == NULL || tcon->ses == NULL) {
1437 cifs_dbg(VFS, "Null smb session\n");
1438 return -EIO;
1439 }
1440 ses = tcon->ses;
1441 server = ses->server;
1442
1443 if (server == NULL) {
1444 cifs_dbg(VFS, "Null tcp session\n");
1445 return -EIO;
1446 }
1447
1448 if (server->tcpStatus == CifsExiting)
1449 return -ENOENT;
1450
1451 /* Ensure that we do not send more than 50 overlapping requests
1452 to the same server. We may make this configurable later or
1453 use ses->maxReq */
1454
1455 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1456 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1457 len);
1458 return -EIO;
1459 }
1460
1461 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1462 if (rc)
1463 return rc;
1464
1465 /* make sure that we sign in the same order that we send on this socket
1466 and avoid races inside tcp sendmsg code that could cause corruption
1467 of smb data */
1468
1469 mutex_lock(&server->srv_mutex);
1470
1471 rc = allocate_mid(ses, in_buf, &midQ);
1472 if (rc) {
1473 mutex_unlock(&server->srv_mutex);
1474 return rc;
1475 }
1476
1477 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1478 if (rc) {
1479 cifs_delete_mid(midQ);
1480 mutex_unlock(&server->srv_mutex);
1481 return rc;
1482 }
1483
1484 midQ->mid_state = MID_REQUEST_SUBMITTED;
1485 cifs_in_send_inc(server);
1486 rc = smb_send(server, in_buf, len);
1487 cifs_in_send_dec(server);
1488 cifs_save_when_sent(midQ);
1489
1490 if (rc < 0)
1491 server->sequence_number -= 2;
1492
1493 mutex_unlock(&server->srv_mutex);
1494
1495 if (rc < 0) {
1496 cifs_delete_mid(midQ);
1497 return rc;
1498 }
1499
1500 /* Wait for a reply - allow signals to interrupt. */
1501 rc = wait_event_interruptible(server->response_q,
1502 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1503 ((server->tcpStatus != CifsGood) &&
1504 (server->tcpStatus != CifsNew)));
1505
1506 /* Were we interrupted by a signal ? */
1507 if ((rc == -ERESTARTSYS) &&
1508 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1509 ((server->tcpStatus == CifsGood) ||
1510 (server->tcpStatus == CifsNew))) {
1511
1512 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1513 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1514 blocking lock to return. */
1515 rc = send_cancel(server, &rqst, midQ);
1516 if (rc) {
1517 cifs_delete_mid(midQ);
1518 return rc;
1519 }
1520 } else {
1521 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1522 to cause the blocking lock to return. */
1523
1524 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1525
1526 /* If we get -ENOLCK back the lock may have
1527 already been removed. Don't exit in this case. */
1528 if (rc && rc != -ENOLCK) {
1529 cifs_delete_mid(midQ);
1530 return rc;
1531 }
1532 }
1533
1534 rc = wait_for_response(server, midQ);
1535 if (rc) {
1536 send_cancel(server, &rqst, midQ);
1537 spin_lock(&GlobalMid_Lock);
1538 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1539 /* no longer considered to be "in-flight" */
1540 midQ->callback = DeleteMidQEntry;
1541 spin_unlock(&GlobalMid_Lock);
1542 return rc;
1543 }
1544 spin_unlock(&GlobalMid_Lock);
1545 }
1546
1547 /* We got the response - restart system call. */
1548 rstart = 1;
1549 }
1550
1551 rc = cifs_sync_mid_result(midQ, server);
1552 if (rc != 0)
1553 return rc;
1554
1555 /* rcvd frame is ok */
1556 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1557 rc = -EIO;
1558 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1559 goto out;
1560 }
1561
1562 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1563 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1564 rc = cifs_check_receive(midQ, server, 0);
1565 out:
1566 cifs_delete_mid(midQ);
1567 if (rstart && rc == -EACCES)
1568 return -ERESTARTSYS;
1569 return rc;
1570 }
1571