1 /*
2  *   fs/cifs/misc.c
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   This library is free software; you can redistribute it and/or modify
8  *   it under the terms of the GNU Lesser General Public License as published
9  *   by the Free Software Foundation; either version 2.1 of the License, or
10  *   (at your option) any later version.
11  *
12  *   This library is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
15  *   the GNU Lesser General Public License for more details.
16  *
17  *   You should have received a copy of the GNU Lesser General Public License
18  *   along with this library; if not, write to the Free Software
19  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20  */
21 
22 #include <linux/slab.h>
23 #include <linux/ctype.h>
24 #include <linux/mempool.h>
25 #include <linux/vmalloc.h>
26 #include "cifspdu.h"
27 #include "cifsglob.h"
28 #include "cifsproto.h"
29 #include "cifs_debug.h"
30 #include "smberr.h"
31 #include "nterr.h"
32 #include "cifs_unicode.h"
33 #include "smb2pdu.h"
34 
35 extern mempool_t *cifs_sm_req_poolp;
36 extern mempool_t *cifs_req_poolp;
37 
38 /* The xid serves as a useful identifier for each incoming vfs request,
39    in a similar way to the mid which is useful to track each sent smb,
40    and CurrentXid can also provide a running counter (although it
41    will eventually wrap past zero) of the total vfs operations handled
42    since the cifs fs was mounted */
43 
44 unsigned int
_get_xid(void)45 _get_xid(void)
46 {
47 	unsigned int xid;
48 
49 	spin_lock(&GlobalMid_Lock);
50 	GlobalTotalActiveXid++;
51 
52 	/* keep high water mark for number of simultaneous ops in filesystem */
53 	if (GlobalTotalActiveXid > GlobalMaxActiveXid)
54 		GlobalMaxActiveXid = GlobalTotalActiveXid;
55 	if (GlobalTotalActiveXid > 65000)
56 		cifs_dbg(FYI, "warning: more than 65000 requests active\n");
57 	xid = GlobalCurrentXid++;
58 	spin_unlock(&GlobalMid_Lock);
59 	return xid;
60 }
61 
62 void
_free_xid(unsigned int xid)63 _free_xid(unsigned int xid)
64 {
65 	spin_lock(&GlobalMid_Lock);
66 	/* if (GlobalTotalActiveXid == 0)
67 		BUG(); */
68 	GlobalTotalActiveXid--;
69 	spin_unlock(&GlobalMid_Lock);
70 }
71 
72 struct cifs_ses *
sesInfoAlloc(void)73 sesInfoAlloc(void)
74 {
75 	struct cifs_ses *ret_buf;
76 
77 	ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
78 	if (ret_buf) {
79 		atomic_inc(&sesInfoAllocCount);
80 		ret_buf->status = CifsNew;
81 		++ret_buf->ses_count;
82 		INIT_LIST_HEAD(&ret_buf->smb_ses_list);
83 		INIT_LIST_HEAD(&ret_buf->tcon_list);
84 		mutex_init(&ret_buf->session_mutex);
85 		spin_lock_init(&ret_buf->iface_lock);
86 	}
87 	return ret_buf;
88 }
89 
90 void
sesInfoFree(struct cifs_ses * buf_to_free)91 sesInfoFree(struct cifs_ses *buf_to_free)
92 {
93 	if (buf_to_free == NULL) {
94 		cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
95 		return;
96 	}
97 
98 	atomic_dec(&sesInfoAllocCount);
99 	kfree(buf_to_free->serverOS);
100 	kfree(buf_to_free->serverDomain);
101 	kfree(buf_to_free->serverNOS);
102 	kzfree(buf_to_free->password);
103 	kfree(buf_to_free->user_name);
104 	kfree(buf_to_free->domainName);
105 	kzfree(buf_to_free->auth_key.response);
106 	kfree(buf_to_free->iface_list);
107 	kzfree(buf_to_free);
108 }
109 
110 struct cifs_tcon *
tconInfoAlloc(void)111 tconInfoAlloc(void)
112 {
113 	struct cifs_tcon *ret_buf;
114 	ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
115 	if (ret_buf) {
116 		atomic_inc(&tconInfoAllocCount);
117 		ret_buf->tidStatus = CifsNew;
118 		++ret_buf->tc_count;
119 		INIT_LIST_HEAD(&ret_buf->openFileList);
120 		INIT_LIST_HEAD(&ret_buf->tcon_list);
121 		spin_lock_init(&ret_buf->open_file_lock);
122 		mutex_init(&ret_buf->crfid.fid_mutex);
123 		ret_buf->crfid.fid = kzalloc(sizeof(struct cifs_fid),
124 					     GFP_KERNEL);
125 		spin_lock_init(&ret_buf->stat_lock);
126 	}
127 	return ret_buf;
128 }
129 
130 void
tconInfoFree(struct cifs_tcon * buf_to_free)131 tconInfoFree(struct cifs_tcon *buf_to_free)
132 {
133 	if (buf_to_free == NULL) {
134 		cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
135 		return;
136 	}
137 	atomic_dec(&tconInfoAllocCount);
138 	kfree(buf_to_free->nativeFileSystem);
139 	kzfree(buf_to_free->password);
140 	kfree(buf_to_free->crfid.fid);
141 	kfree(buf_to_free);
142 }
143 
144 struct smb_hdr *
cifs_buf_get(void)145 cifs_buf_get(void)
146 {
147 	struct smb_hdr *ret_buf = NULL;
148 	/*
149 	 * SMB2 header is bigger than CIFS one - no problems to clean some
150 	 * more bytes for CIFS.
151 	 */
152 	size_t buf_size = sizeof(struct smb2_sync_hdr);
153 
154 	/*
155 	 * We could use negotiated size instead of max_msgsize -
156 	 * but it may be more efficient to always alloc same size
157 	 * albeit slightly larger than necessary and maxbuffersize
158 	 * defaults to this and can not be bigger.
159 	 */
160 	ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
161 
162 	/* clear the first few header bytes */
163 	/* for most paths, more is cleared in header_assemble */
164 	memset(ret_buf, 0, buf_size + 3);
165 	atomic_inc(&bufAllocCount);
166 #ifdef CONFIG_CIFS_STATS2
167 	atomic_inc(&totBufAllocCount);
168 #endif /* CONFIG_CIFS_STATS2 */
169 
170 	return ret_buf;
171 }
172 
173 void
cifs_buf_release(void * buf_to_free)174 cifs_buf_release(void *buf_to_free)
175 {
176 	if (buf_to_free == NULL) {
177 		/* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
178 		return;
179 	}
180 	mempool_free(buf_to_free, cifs_req_poolp);
181 
182 	atomic_dec(&bufAllocCount);
183 	return;
184 }
185 
186 struct smb_hdr *
cifs_small_buf_get(void)187 cifs_small_buf_get(void)
188 {
189 	struct smb_hdr *ret_buf = NULL;
190 
191 /* We could use negotiated size instead of max_msgsize -
192    but it may be more efficient to always alloc same size
193    albeit slightly larger than necessary and maxbuffersize
194    defaults to this and can not be bigger */
195 	ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
196 	/* No need to clear memory here, cleared in header assemble */
197 	/*	memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
198 	atomic_inc(&smBufAllocCount);
199 #ifdef CONFIG_CIFS_STATS2
200 	atomic_inc(&totSmBufAllocCount);
201 #endif /* CONFIG_CIFS_STATS2 */
202 
203 	return ret_buf;
204 }
205 
206 void
cifs_small_buf_release(void * buf_to_free)207 cifs_small_buf_release(void *buf_to_free)
208 {
209 
210 	if (buf_to_free == NULL) {
211 		cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
212 		return;
213 	}
214 	mempool_free(buf_to_free, cifs_sm_req_poolp);
215 
216 	atomic_dec(&smBufAllocCount);
217 	return;
218 }
219 
220 void
free_rsp_buf(int resp_buftype,void * rsp)221 free_rsp_buf(int resp_buftype, void *rsp)
222 {
223 	if (resp_buftype == CIFS_SMALL_BUFFER)
224 		cifs_small_buf_release(rsp);
225 	else if (resp_buftype == CIFS_LARGE_BUFFER)
226 		cifs_buf_release(rsp);
227 }
228 
229 /* NB: MID can not be set if treeCon not passed in, in that
230    case it is responsbility of caller to set the mid */
231 void
header_assemble(struct smb_hdr * buffer,char smb_command,const struct cifs_tcon * treeCon,int word_count)232 header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
233 		const struct cifs_tcon *treeCon, int word_count
234 		/* length of fixed section (word count) in two byte units  */)
235 {
236 	char *temp = (char *) buffer;
237 
238 	memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
239 
240 	buffer->smb_buf_length = cpu_to_be32(
241 	    (2 * word_count) + sizeof(struct smb_hdr) -
242 	    4 /*  RFC 1001 length field does not count */  +
243 	    2 /* for bcc field itself */) ;
244 
245 	buffer->Protocol[0] = 0xFF;
246 	buffer->Protocol[1] = 'S';
247 	buffer->Protocol[2] = 'M';
248 	buffer->Protocol[3] = 'B';
249 	buffer->Command = smb_command;
250 	buffer->Flags = 0x00;	/* case sensitive */
251 	buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
252 	buffer->Pid = cpu_to_le16((__u16)current->tgid);
253 	buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
254 	if (treeCon) {
255 		buffer->Tid = treeCon->tid;
256 		if (treeCon->ses) {
257 			if (treeCon->ses->capabilities & CAP_UNICODE)
258 				buffer->Flags2 |= SMBFLG2_UNICODE;
259 			if (treeCon->ses->capabilities & CAP_STATUS32)
260 				buffer->Flags2 |= SMBFLG2_ERR_STATUS;
261 
262 			/* Uid is not converted */
263 			buffer->Uid = treeCon->ses->Suid;
264 			buffer->Mid = get_next_mid(treeCon->ses->server);
265 		}
266 		if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
267 			buffer->Flags2 |= SMBFLG2_DFS;
268 		if (treeCon->nocase)
269 			buffer->Flags  |= SMBFLG_CASELESS;
270 		if ((treeCon->ses) && (treeCon->ses->server))
271 			if (treeCon->ses->server->sign)
272 				buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
273 	}
274 
275 /*  endian conversion of flags is now done just before sending */
276 	buffer->WordCount = (char) word_count;
277 	return;
278 }
279 
280 static int
check_smb_hdr(struct smb_hdr * smb)281 check_smb_hdr(struct smb_hdr *smb)
282 {
283 	/* does it have the right SMB "signature" ? */
284 	if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
285 		cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
286 			 *(unsigned int *)smb->Protocol);
287 		return 1;
288 	}
289 
290 	/* if it's a response then accept */
291 	if (smb->Flags & SMBFLG_RESPONSE)
292 		return 0;
293 
294 	/* only one valid case where server sends us request */
295 	if (smb->Command == SMB_COM_LOCKING_ANDX)
296 		return 0;
297 
298 	cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
299 		 get_mid(smb));
300 	return 1;
301 }
302 
303 int
checkSMB(char * buf,unsigned int total_read,struct TCP_Server_Info * server)304 checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
305 {
306 	struct smb_hdr *smb = (struct smb_hdr *)buf;
307 	__u32 rfclen = be32_to_cpu(smb->smb_buf_length);
308 	__u32 clc_len;  /* calculated length */
309 	cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
310 		 total_read, rfclen);
311 
312 	/* is this frame too small to even get to a BCC? */
313 	if (total_read < 2 + sizeof(struct smb_hdr)) {
314 		if ((total_read >= sizeof(struct smb_hdr) - 1)
315 			    && (smb->Status.CifsError != 0)) {
316 			/* it's an error return */
317 			smb->WordCount = 0;
318 			/* some error cases do not return wct and bcc */
319 			return 0;
320 		} else if ((total_read == sizeof(struct smb_hdr) + 1) &&
321 				(smb->WordCount == 0)) {
322 			char *tmp = (char *)smb;
323 			/* Need to work around a bug in two servers here */
324 			/* First, check if the part of bcc they sent was zero */
325 			if (tmp[sizeof(struct smb_hdr)] == 0) {
326 				/* some servers return only half of bcc
327 				 * on simple responses (wct, bcc both zero)
328 				 * in particular have seen this on
329 				 * ulogoffX and FindClose. This leaves
330 				 * one byte of bcc potentially unitialized
331 				 */
332 				/* zero rest of bcc */
333 				tmp[sizeof(struct smb_hdr)+1] = 0;
334 				return 0;
335 			}
336 			cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
337 		} else {
338 			cifs_dbg(VFS, "Length less than smb header size\n");
339 		}
340 		return -EIO;
341 	}
342 
343 	/* otherwise, there is enough to get to the BCC */
344 	if (check_smb_hdr(smb))
345 		return -EIO;
346 	clc_len = smbCalcSize(smb, server);
347 
348 	if (4 + rfclen != total_read) {
349 		cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
350 			 rfclen);
351 		return -EIO;
352 	}
353 
354 	if (4 + rfclen != clc_len) {
355 		__u16 mid = get_mid(smb);
356 		/* check if bcc wrapped around for large read responses */
357 		if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
358 			/* check if lengths match mod 64K */
359 			if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
360 				return 0; /* bcc wrapped */
361 		}
362 		cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
363 			 clc_len, 4 + rfclen, mid);
364 
365 		if (4 + rfclen < clc_len) {
366 			cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
367 				 rfclen, mid);
368 			return -EIO;
369 		} else if (rfclen > clc_len + 512) {
370 			/*
371 			 * Some servers (Windows XP in particular) send more
372 			 * data than the lengths in the SMB packet would
373 			 * indicate on certain calls (byte range locks and
374 			 * trans2 find first calls in particular). While the
375 			 * client can handle such a frame by ignoring the
376 			 * trailing data, we choose limit the amount of extra
377 			 * data to 512 bytes.
378 			 */
379 			cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
380 				 rfclen, mid);
381 			return -EIO;
382 		}
383 	}
384 	return 0;
385 }
386 
387 bool
is_valid_oplock_break(char * buffer,struct TCP_Server_Info * srv)388 is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
389 {
390 	struct smb_hdr *buf = (struct smb_hdr *)buffer;
391 	struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
392 	struct list_head *tmp, *tmp1, *tmp2;
393 	struct cifs_ses *ses;
394 	struct cifs_tcon *tcon;
395 	struct cifsInodeInfo *pCifsInode;
396 	struct cifsFileInfo *netfile;
397 
398 	cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
399 	if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
400 	   (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
401 		struct smb_com_transaction_change_notify_rsp *pSMBr =
402 			(struct smb_com_transaction_change_notify_rsp *)buf;
403 		struct file_notify_information *pnotify;
404 		__u32 data_offset = 0;
405 		size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
406 
407 		if (get_bcc(buf) > sizeof(struct file_notify_information)) {
408 			data_offset = le32_to_cpu(pSMBr->DataOffset);
409 
410 			if (data_offset >
411 			    len - sizeof(struct file_notify_information)) {
412 				cifs_dbg(FYI, "invalid data_offset %u\n",
413 					 data_offset);
414 				return true;
415 			}
416 			pnotify = (struct file_notify_information *)
417 				((char *)&pSMBr->hdr.Protocol + data_offset);
418 			cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
419 				 pnotify->FileName, pnotify->Action);
420 			/*   cifs_dump_mem("Rcvd notify Data: ",buf,
421 				sizeof(struct smb_hdr)+60); */
422 			return true;
423 		}
424 		if (pSMBr->hdr.Status.CifsError) {
425 			cifs_dbg(FYI, "notify err 0x%x\n",
426 				 pSMBr->hdr.Status.CifsError);
427 			return true;
428 		}
429 		return false;
430 	}
431 	if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
432 		return false;
433 	if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
434 		/* no sense logging error on invalid handle on oplock
435 		   break - harmless race between close request and oplock
436 		   break response is expected from time to time writing out
437 		   large dirty files cached on the client */
438 		if ((NT_STATUS_INVALID_HANDLE) ==
439 		   le32_to_cpu(pSMB->hdr.Status.CifsError)) {
440 			cifs_dbg(FYI, "invalid handle on oplock break\n");
441 			return true;
442 		} else if (ERRbadfid ==
443 		   le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
444 			return true;
445 		} else {
446 			return false; /* on valid oplock brk we get "request" */
447 		}
448 	}
449 	if (pSMB->hdr.WordCount != 8)
450 		return false;
451 
452 	cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
453 		 pSMB->LockType, pSMB->OplockLevel);
454 	if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
455 		return false;
456 
457 	/* look up tcon based on tid & uid */
458 	spin_lock(&cifs_tcp_ses_lock);
459 	list_for_each(tmp, &srv->smb_ses_list) {
460 		ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
461 		list_for_each(tmp1, &ses->tcon_list) {
462 			tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
463 			if (tcon->tid != buf->Tid)
464 				continue;
465 
466 			cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
467 			spin_lock(&tcon->open_file_lock);
468 			list_for_each(tmp2, &tcon->openFileList) {
469 				netfile = list_entry(tmp2, struct cifsFileInfo,
470 						     tlist);
471 				if (pSMB->Fid != netfile->fid.netfid)
472 					continue;
473 
474 				cifs_dbg(FYI, "file id match, oplock break\n");
475 				pCifsInode = CIFS_I(d_inode(netfile->dentry));
476 
477 				set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
478 					&pCifsInode->flags);
479 
480 				/*
481 				 * Set flag if the server downgrades the oplock
482 				 * to L2 else clear.
483 				 */
484 				if (pSMB->OplockLevel)
485 					set_bit(
486 					   CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
487 					   &pCifsInode->flags);
488 				else
489 					clear_bit(
490 					   CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
491 					   &pCifsInode->flags);
492 
493 				queue_work(cifsoplockd_wq,
494 					   &netfile->oplock_break);
495 				netfile->oplock_break_cancelled = false;
496 
497 				spin_unlock(&tcon->open_file_lock);
498 				spin_unlock(&cifs_tcp_ses_lock);
499 				return true;
500 			}
501 			spin_unlock(&tcon->open_file_lock);
502 			spin_unlock(&cifs_tcp_ses_lock);
503 			cifs_dbg(FYI, "No matching file for oplock break\n");
504 			return true;
505 		}
506 	}
507 	spin_unlock(&cifs_tcp_ses_lock);
508 	cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
509 	return true;
510 }
511 
512 void
dump_smb(void * buf,int smb_buf_length)513 dump_smb(void *buf, int smb_buf_length)
514 {
515 	if (traceSMB == 0)
516 		return;
517 
518 	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
519 		       smb_buf_length, true);
520 }
521 
522 void
cifs_autodisable_serverino(struct cifs_sb_info * cifs_sb)523 cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
524 {
525 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
526 		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
527 		cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s. This server doesn't seem to support them properly. Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n",
528 			 cifs_sb_master_tcon(cifs_sb)->treeName);
529 	}
530 }
531 
cifs_set_oplock_level(struct cifsInodeInfo * cinode,__u32 oplock)532 void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
533 {
534 	oplock &= 0xF;
535 
536 	if (oplock == OPLOCK_EXCLUSIVE) {
537 		cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
538 		cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
539 			 &cinode->vfs_inode);
540 	} else if (oplock == OPLOCK_READ) {
541 		cinode->oplock = CIFS_CACHE_READ_FLG;
542 		cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
543 			 &cinode->vfs_inode);
544 	} else
545 		cinode->oplock = 0;
546 }
547 
548 /*
549  * We wait for oplock breaks to be processed before we attempt to perform
550  * writes.
551  */
cifs_get_writer(struct cifsInodeInfo * cinode)552 int cifs_get_writer(struct cifsInodeInfo *cinode)
553 {
554 	int rc;
555 
556 start:
557 	rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
558 			 TASK_KILLABLE);
559 	if (rc)
560 		return rc;
561 
562 	spin_lock(&cinode->writers_lock);
563 	if (!cinode->writers)
564 		set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
565 	cinode->writers++;
566 	/* Check to see if we have started servicing an oplock break */
567 	if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
568 		cinode->writers--;
569 		if (cinode->writers == 0) {
570 			clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
571 			wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
572 		}
573 		spin_unlock(&cinode->writers_lock);
574 		goto start;
575 	}
576 	spin_unlock(&cinode->writers_lock);
577 	return 0;
578 }
579 
cifs_put_writer(struct cifsInodeInfo * cinode)580 void cifs_put_writer(struct cifsInodeInfo *cinode)
581 {
582 	spin_lock(&cinode->writers_lock);
583 	cinode->writers--;
584 	if (cinode->writers == 0) {
585 		clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
586 		wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
587 	}
588 	spin_unlock(&cinode->writers_lock);
589 }
590 
cifs_done_oplock_break(struct cifsInodeInfo * cinode)591 void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
592 {
593 	clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
594 	wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
595 }
596 
597 bool
backup_cred(struct cifs_sb_info * cifs_sb)598 backup_cred(struct cifs_sb_info *cifs_sb)
599 {
600 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
601 		if (uid_eq(cifs_sb->mnt_backupuid, current_fsuid()))
602 			return true;
603 	}
604 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
605 		if (in_group_p(cifs_sb->mnt_backupgid))
606 			return true;
607 	}
608 
609 	return false;
610 }
611 
612 void
cifs_del_pending_open(struct cifs_pending_open * open)613 cifs_del_pending_open(struct cifs_pending_open *open)
614 {
615 	spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
616 	list_del(&open->olist);
617 	spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
618 }
619 
620 void
cifs_add_pending_open_locked(struct cifs_fid * fid,struct tcon_link * tlink,struct cifs_pending_open * open)621 cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
622 			     struct cifs_pending_open *open)
623 {
624 	memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
625 	open->oplock = CIFS_OPLOCK_NO_CHANGE;
626 	open->tlink = tlink;
627 	fid->pending_open = open;
628 	list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
629 }
630 
631 void
cifs_add_pending_open(struct cifs_fid * fid,struct tcon_link * tlink,struct cifs_pending_open * open)632 cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
633 		      struct cifs_pending_open *open)
634 {
635 	spin_lock(&tlink_tcon(tlink)->open_file_lock);
636 	cifs_add_pending_open_locked(fid, tlink, open);
637 	spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
638 }
639 
640 /* parses DFS refferal V3 structure
641  * caller is responsible for freeing target_nodes
642  * returns:
643  * - on success - 0
644  * - on failure - errno
645  */
646 int
parse_dfs_referrals(struct get_dfs_referral_rsp * rsp,u32 rsp_size,unsigned int * num_of_nodes,struct dfs_info3_param ** target_nodes,const struct nls_table * nls_codepage,int remap,const char * searchName,bool is_unicode)647 parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
648 		    unsigned int *num_of_nodes,
649 		    struct dfs_info3_param **target_nodes,
650 		    const struct nls_table *nls_codepage, int remap,
651 		    const char *searchName, bool is_unicode)
652 {
653 	int i, rc = 0;
654 	char *data_end;
655 	struct dfs_referral_level_3 *ref;
656 
657 	*num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
658 
659 	if (*num_of_nodes < 1) {
660 		cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
661 			 *num_of_nodes);
662 		rc = -EINVAL;
663 		goto parse_DFS_referrals_exit;
664 	}
665 
666 	ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
667 	if (ref->VersionNumber != cpu_to_le16(3)) {
668 		cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
669 			 le16_to_cpu(ref->VersionNumber));
670 		rc = -EINVAL;
671 		goto parse_DFS_referrals_exit;
672 	}
673 
674 	/* get the upper boundary of the resp buffer */
675 	data_end = (char *)rsp + rsp_size;
676 
677 	cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
678 		 *num_of_nodes, le32_to_cpu(rsp->DFSFlags));
679 
680 	*target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param),
681 				GFP_KERNEL);
682 	if (*target_nodes == NULL) {
683 		rc = -ENOMEM;
684 		goto parse_DFS_referrals_exit;
685 	}
686 
687 	/* collect necessary data from referrals */
688 	for (i = 0; i < *num_of_nodes; i++) {
689 		char *temp;
690 		int max_len;
691 		struct dfs_info3_param *node = (*target_nodes)+i;
692 
693 		node->flags = le32_to_cpu(rsp->DFSFlags);
694 		if (is_unicode) {
695 			__le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
696 						GFP_KERNEL);
697 			if (tmp == NULL) {
698 				rc = -ENOMEM;
699 				goto parse_DFS_referrals_exit;
700 			}
701 			cifsConvertToUTF16((__le16 *) tmp, searchName,
702 					   PATH_MAX, nls_codepage, remap);
703 			node->path_consumed = cifs_utf16_bytes(tmp,
704 					le16_to_cpu(rsp->PathConsumed),
705 					nls_codepage);
706 			kfree(tmp);
707 		} else
708 			node->path_consumed = le16_to_cpu(rsp->PathConsumed);
709 
710 		node->server_type = le16_to_cpu(ref->ServerType);
711 		node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
712 
713 		/* copy DfsPath */
714 		temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
715 		max_len = data_end - temp;
716 		node->path_name = cifs_strndup_from_utf16(temp, max_len,
717 						is_unicode, nls_codepage);
718 		if (!node->path_name) {
719 			rc = -ENOMEM;
720 			goto parse_DFS_referrals_exit;
721 		}
722 
723 		/* copy link target UNC */
724 		temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
725 		max_len = data_end - temp;
726 		node->node_name = cifs_strndup_from_utf16(temp, max_len,
727 						is_unicode, nls_codepage);
728 		if (!node->node_name) {
729 			rc = -ENOMEM;
730 			goto parse_DFS_referrals_exit;
731 		}
732 
733 		ref++;
734 	}
735 
736 parse_DFS_referrals_exit:
737 	if (rc) {
738 		free_dfs_info_array(*target_nodes, *num_of_nodes);
739 		*target_nodes = NULL;
740 		*num_of_nodes = 0;
741 	}
742 	return rc;
743 }
744 
745 struct cifs_aio_ctx *
cifs_aio_ctx_alloc(void)746 cifs_aio_ctx_alloc(void)
747 {
748 	struct cifs_aio_ctx *ctx;
749 
750 	ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
751 	if (!ctx)
752 		return NULL;
753 
754 	INIT_LIST_HEAD(&ctx->list);
755 	mutex_init(&ctx->aio_mutex);
756 	init_completion(&ctx->done);
757 	kref_init(&ctx->refcount);
758 	return ctx;
759 }
760 
761 void
cifs_aio_ctx_release(struct kref * refcount)762 cifs_aio_ctx_release(struct kref *refcount)
763 {
764 	struct cifs_aio_ctx *ctx = container_of(refcount,
765 					struct cifs_aio_ctx, refcount);
766 
767 	cifsFileInfo_put(ctx->cfile);
768 	kvfree(ctx->bv);
769 	kfree(ctx);
770 }
771 
772 #define CIFS_AIO_KMALLOC_LIMIT (1024 * 1024)
773 
774 int
setup_aio_ctx_iter(struct cifs_aio_ctx * ctx,struct iov_iter * iter,int rw)775 setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
776 {
777 	ssize_t rc;
778 	unsigned int cur_npages;
779 	unsigned int npages = 0;
780 	unsigned int i;
781 	size_t len;
782 	size_t count = iov_iter_count(iter);
783 	unsigned int saved_len;
784 	size_t start;
785 	unsigned int max_pages = iov_iter_npages(iter, INT_MAX);
786 	struct page **pages = NULL;
787 	struct bio_vec *bv = NULL;
788 
789 	if (iter->type & ITER_KVEC) {
790 		memcpy(&ctx->iter, iter, sizeof(struct iov_iter));
791 		ctx->len = count;
792 		iov_iter_advance(iter, count);
793 		return 0;
794 	}
795 
796 	if (max_pages * sizeof(struct bio_vec) <= CIFS_AIO_KMALLOC_LIMIT)
797 		bv = kmalloc_array(max_pages, sizeof(struct bio_vec),
798 				   GFP_KERNEL);
799 
800 	if (!bv) {
801 		bv = vmalloc(array_size(max_pages, sizeof(struct bio_vec)));
802 		if (!bv)
803 			return -ENOMEM;
804 	}
805 
806 	if (max_pages * sizeof(struct page *) <= CIFS_AIO_KMALLOC_LIMIT)
807 		pages = kmalloc_array(max_pages, sizeof(struct page *),
808 				      GFP_KERNEL);
809 
810 	if (!pages) {
811 		pages = vmalloc(array_size(max_pages, sizeof(struct page *)));
812 		if (!pages) {
813 			kvfree(bv);
814 			return -ENOMEM;
815 		}
816 	}
817 
818 	saved_len = count;
819 
820 	while (count && npages < max_pages) {
821 		rc = iov_iter_get_pages(iter, pages, count, max_pages, &start);
822 		if (rc < 0) {
823 			cifs_dbg(VFS, "couldn't get user pages (rc=%zd)\n", rc);
824 			break;
825 		}
826 
827 		if (rc > count) {
828 			cifs_dbg(VFS, "get pages rc=%zd more than %zu\n", rc,
829 				 count);
830 			break;
831 		}
832 
833 		iov_iter_advance(iter, rc);
834 		count -= rc;
835 		rc += start;
836 		cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE);
837 
838 		if (npages + cur_npages > max_pages) {
839 			cifs_dbg(VFS, "out of vec array capacity (%u vs %u)\n",
840 				 npages + cur_npages, max_pages);
841 			break;
842 		}
843 
844 		for (i = 0; i < cur_npages; i++) {
845 			len = rc > PAGE_SIZE ? PAGE_SIZE : rc;
846 			bv[npages + i].bv_page = pages[i];
847 			bv[npages + i].bv_offset = start;
848 			bv[npages + i].bv_len = len - start;
849 			rc -= len;
850 			start = 0;
851 		}
852 
853 		npages += cur_npages;
854 	}
855 
856 	kvfree(pages);
857 	ctx->bv = bv;
858 	ctx->len = saved_len - count;
859 	ctx->npages = npages;
860 	iov_iter_bvec(&ctx->iter, ITER_BVEC | rw, ctx->bv, npages, ctx->len);
861 	return 0;
862 }
863 
864 /**
865  * cifs_alloc_hash - allocate hash and hash context together
866  *
867  * The caller has to make sure @sdesc is initialized to either NULL or
868  * a valid context. Both can be freed via cifs_free_hash().
869  */
870 int
cifs_alloc_hash(const char * name,struct crypto_shash ** shash,struct sdesc ** sdesc)871 cifs_alloc_hash(const char *name,
872 		struct crypto_shash **shash, struct sdesc **sdesc)
873 {
874 	int rc = 0;
875 	size_t size;
876 
877 	if (*sdesc != NULL)
878 		return 0;
879 
880 	*shash = crypto_alloc_shash(name, 0, 0);
881 	if (IS_ERR(*shash)) {
882 		cifs_dbg(VFS, "could not allocate crypto %s\n", name);
883 		rc = PTR_ERR(*shash);
884 		*shash = NULL;
885 		*sdesc = NULL;
886 		return rc;
887 	}
888 
889 	size = sizeof(struct shash_desc) + crypto_shash_descsize(*shash);
890 	*sdesc = kmalloc(size, GFP_KERNEL);
891 	if (*sdesc == NULL) {
892 		cifs_dbg(VFS, "no memory left to allocate crypto %s\n", name);
893 		crypto_free_shash(*shash);
894 		*shash = NULL;
895 		return -ENOMEM;
896 	}
897 
898 	(*sdesc)->shash.tfm = *shash;
899 	(*sdesc)->shash.flags = 0x0;
900 	return 0;
901 }
902 
903 /**
904  * cifs_free_hash - free hash and hash context together
905  *
906  * Freeing a NULL hash or context is safe.
907  */
908 void
cifs_free_hash(struct crypto_shash ** shash,struct sdesc ** sdesc)909 cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
910 {
911 	kfree(*sdesc);
912 	*sdesc = NULL;
913 	if (*shash)
914 		crypto_free_shash(*shash);
915 	*shash = NULL;
916 }
917 
918 /**
919  * rqst_page_get_length - obtain the length and offset for a page in smb_rqst
920  * Input: rqst - a smb_rqst, page - a page index for rqst
921  * Output: *len - the length for this page, *offset - the offset for this page
922  */
rqst_page_get_length(struct smb_rqst * rqst,unsigned int page,unsigned int * len,unsigned int * offset)923 void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
924 				unsigned int *len, unsigned int *offset)
925 {
926 	*len = rqst->rq_pagesz;
927 	*offset = (page == 0) ? rqst->rq_offset : 0;
928 
929 	if (rqst->rq_npages == 1 || page == rqst->rq_npages-1)
930 		*len = rqst->rq_tailsz;
931 	else if (page == 0)
932 		*len = rqst->rq_pagesz - rqst->rq_offset;
933 }
934