1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  */
7 
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
10 #include <linux/fs.h>
11 #include <linux/kernel.h>
12 #include <linux/nls.h>
13 
14 #include "debug.h"
15 #include "ntfs.h"
16 #include "ntfs_fs.h"
17 
18 // clang-format off
19 const struct cpu_str NAME_MFT = {
20 	4, 0, { '$', 'M', 'F', 'T' },
21 };
22 const struct cpu_str NAME_MIRROR = {
23 	8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
24 };
25 const struct cpu_str NAME_LOGFILE = {
26 	8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
27 };
28 const struct cpu_str NAME_VOLUME = {
29 	7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
30 };
31 const struct cpu_str NAME_ATTRDEF = {
32 	8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
33 };
34 const struct cpu_str NAME_ROOT = {
35 	1, 0, { '.' },
36 };
37 const struct cpu_str NAME_BITMAP = {
38 	7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
39 };
40 const struct cpu_str NAME_BOOT = {
41 	5, 0, { '$', 'B', 'o', 'o', 't' },
42 };
43 const struct cpu_str NAME_BADCLUS = {
44 	8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
45 };
46 const struct cpu_str NAME_QUOTA = {
47 	6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
48 };
49 const struct cpu_str NAME_SECURE = {
50 	7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
51 };
52 const struct cpu_str NAME_UPCASE = {
53 	7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
54 };
55 const struct cpu_str NAME_EXTEND = {
56 	7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
57 };
58 const struct cpu_str NAME_OBJID = {
59 	6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
60 };
61 const struct cpu_str NAME_REPARSE = {
62 	8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
63 };
64 const struct cpu_str NAME_USNJRNL = {
65 	8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
66 };
67 const __le16 BAD_NAME[4] = {
68 	cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
69 };
70 const __le16 I30_NAME[4] = {
71 	cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
72 };
73 const __le16 SII_NAME[4] = {
74 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
75 };
76 const __le16 SDH_NAME[4] = {
77 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
78 };
79 const __le16 SDS_NAME[4] = {
80 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
81 };
82 const __le16 SO_NAME[2] = {
83 	cpu_to_le16('$'), cpu_to_le16('O'),
84 };
85 const __le16 SQ_NAME[2] = {
86 	cpu_to_le16('$'), cpu_to_le16('Q'),
87 };
88 const __le16 SR_NAME[2] = {
89 	cpu_to_le16('$'), cpu_to_le16('R'),
90 };
91 
92 #ifdef CONFIG_NTFS3_LZX_XPRESS
93 const __le16 WOF_NAME[17] = {
94 	cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
95 	cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
96 	cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
97 	cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
98 	cpu_to_le16('a'),
99 };
100 #endif
101 
102 static const __le16 CON_NAME[3] = {
103 	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('N'),
104 };
105 
106 static const __le16 NUL_NAME[3] = {
107 	cpu_to_le16('N'), cpu_to_le16('U'), cpu_to_le16('L'),
108 };
109 
110 static const __le16 AUX_NAME[3] = {
111 	cpu_to_le16('A'), cpu_to_le16('U'), cpu_to_le16('X'),
112 };
113 
114 static const __le16 PRN_NAME[3] = {
115 	cpu_to_le16('P'), cpu_to_le16('R'), cpu_to_le16('N'),
116 };
117 
118 static const __le16 COM_NAME[3] = {
119 	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('M'),
120 };
121 
122 static const __le16 LPT_NAME[3] = {
123 	cpu_to_le16('L'), cpu_to_le16('P'), cpu_to_le16('T'),
124 };
125 
126 // clang-format on
127 
128 /*
129  * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
130  */
ntfs_fix_pre_write(struct NTFS_RECORD_HEADER * rhdr,size_t bytes)131 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
132 {
133 	u16 *fixup, *ptr;
134 	u16 sample;
135 	u16 fo = le16_to_cpu(rhdr->fix_off);
136 	u16 fn = le16_to_cpu(rhdr->fix_num);
137 
138 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
139 	    fn * SECTOR_SIZE > bytes) {
140 		return false;
141 	}
142 
143 	/* Get fixup pointer. */
144 	fixup = Add2Ptr(rhdr, fo);
145 
146 	if (*fixup >= 0x7FFF)
147 		*fixup = 1;
148 	else
149 		*fixup += 1;
150 
151 	sample = *fixup;
152 
153 	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
154 
155 	while (fn--) {
156 		*++fixup = *ptr;
157 		*ptr = sample;
158 		ptr += SECTOR_SIZE / sizeof(short);
159 	}
160 	return true;
161 }
162 
163 /*
164  * ntfs_fix_post_read - Remove fixups after reading from disk.
165  *
166  * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
167  */
ntfs_fix_post_read(struct NTFS_RECORD_HEADER * rhdr,size_t bytes,bool simple)168 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
169 		       bool simple)
170 {
171 	int ret;
172 	u16 *fixup, *ptr;
173 	u16 sample, fo, fn;
174 
175 	fo = le16_to_cpu(rhdr->fix_off);
176 	fn = simple ? ((bytes >> SECTOR_SHIFT) + 1) :
177 		      le16_to_cpu(rhdr->fix_num);
178 
179 	/* Check errors. */
180 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
181 	    fn * SECTOR_SIZE > bytes) {
182 		return -E_NTFS_CORRUPT;
183 	}
184 
185 	/* Get fixup pointer. */
186 	fixup = Add2Ptr(rhdr, fo);
187 	sample = *fixup;
188 	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
189 	ret = 0;
190 
191 	while (fn--) {
192 		/* Test current word. */
193 		if (*ptr != sample) {
194 			/* Fixup does not match! Is it serious error? */
195 			ret = -E_NTFS_FIXUP;
196 		}
197 
198 		/* Replace fixup. */
199 		*ptr = *++fixup;
200 		ptr += SECTOR_SIZE / sizeof(short);
201 	}
202 
203 	return ret;
204 }
205 
206 /*
207  * ntfs_extend_init - Load $Extend file.
208  */
ntfs_extend_init(struct ntfs_sb_info * sbi)209 int ntfs_extend_init(struct ntfs_sb_info *sbi)
210 {
211 	int err;
212 	struct super_block *sb = sbi->sb;
213 	struct inode *inode, *inode2;
214 	struct MFT_REF ref;
215 
216 	if (sbi->volume.major_ver < 3) {
217 		ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
218 		return 0;
219 	}
220 
221 	ref.low = cpu_to_le32(MFT_REC_EXTEND);
222 	ref.high = 0;
223 	ref.seq = cpu_to_le16(MFT_REC_EXTEND);
224 	inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
225 	if (IS_ERR(inode)) {
226 		err = PTR_ERR(inode);
227 		ntfs_err(sb, "Failed to load $Extend (%d).", err);
228 		inode = NULL;
229 		goto out;
230 	}
231 
232 	/* If ntfs_iget5() reads from disk it never returns bad inode. */
233 	if (!S_ISDIR(inode->i_mode)) {
234 		err = -EINVAL;
235 		goto out;
236 	}
237 
238 	/* Try to find $ObjId */
239 	inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
240 	if (inode2 && !IS_ERR(inode2)) {
241 		if (is_bad_inode(inode2)) {
242 			iput(inode2);
243 		} else {
244 			sbi->objid.ni = ntfs_i(inode2);
245 			sbi->objid_no = inode2->i_ino;
246 		}
247 	}
248 
249 	/* Try to find $Quota */
250 	inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
251 	if (inode2 && !IS_ERR(inode2)) {
252 		sbi->quota_no = inode2->i_ino;
253 		iput(inode2);
254 	}
255 
256 	/* Try to find $Reparse */
257 	inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
258 	if (inode2 && !IS_ERR(inode2)) {
259 		sbi->reparse.ni = ntfs_i(inode2);
260 		sbi->reparse_no = inode2->i_ino;
261 	}
262 
263 	/* Try to find $UsnJrnl */
264 	inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
265 	if (inode2 && !IS_ERR(inode2)) {
266 		sbi->usn_jrnl_no = inode2->i_ino;
267 		iput(inode2);
268 	}
269 
270 	err = 0;
271 out:
272 	iput(inode);
273 	return err;
274 }
275 
ntfs_loadlog_and_replay(struct ntfs_inode * ni,struct ntfs_sb_info * sbi)276 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
277 {
278 	int err = 0;
279 	struct super_block *sb = sbi->sb;
280 	bool initialized = false;
281 	struct MFT_REF ref;
282 	struct inode *inode;
283 
284 	/* Check for 4GB. */
285 	if (ni->vfs_inode.i_size >= 0x100000000ull) {
286 		ntfs_err(sb, "\x24LogFile is large than 4G.");
287 		err = -EINVAL;
288 		goto out;
289 	}
290 
291 	sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
292 
293 	ref.low = cpu_to_le32(MFT_REC_MFT);
294 	ref.high = 0;
295 	ref.seq = cpu_to_le16(1);
296 
297 	inode = ntfs_iget5(sb, &ref, NULL);
298 
299 	if (IS_ERR(inode))
300 		inode = NULL;
301 
302 	if (!inode) {
303 		/* Try to use MFT copy. */
304 		u64 t64 = sbi->mft.lbo;
305 
306 		sbi->mft.lbo = sbi->mft.lbo2;
307 		inode = ntfs_iget5(sb, &ref, NULL);
308 		sbi->mft.lbo = t64;
309 		if (IS_ERR(inode))
310 			inode = NULL;
311 	}
312 
313 	if (!inode) {
314 		err = -EINVAL;
315 		ntfs_err(sb, "Failed to load $MFT.");
316 		goto out;
317 	}
318 
319 	sbi->mft.ni = ntfs_i(inode);
320 
321 	/* LogFile should not contains attribute list. */
322 	err = ni_load_all_mi(sbi->mft.ni);
323 	if (!err)
324 		err = log_replay(ni, &initialized);
325 
326 	iput(inode);
327 	sbi->mft.ni = NULL;
328 
329 	sync_blockdev(sb->s_bdev);
330 	invalidate_bdev(sb->s_bdev);
331 
332 	if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
333 		err = 0;
334 		goto out;
335 	}
336 
337 	if (sb_rdonly(sb) || !initialized)
338 		goto out;
339 
340 	/* Fill LogFile by '-1' if it is initialized. */
341 	err = ntfs_bio_fill_1(sbi, &ni->file.run);
342 
343 out:
344 	sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
345 
346 	return err;
347 }
348 
349 /*
350  * ntfs_look_for_free_space - Look for a free space in bitmap.
351  */
ntfs_look_for_free_space(struct ntfs_sb_info * sbi,CLST lcn,CLST len,CLST * new_lcn,CLST * new_len,enum ALLOCATE_OPT opt)352 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
353 			     CLST *new_lcn, CLST *new_len,
354 			     enum ALLOCATE_OPT opt)
355 {
356 	int err;
357 	CLST alen;
358 	struct super_block *sb = sbi->sb;
359 	size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
360 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
361 
362 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
363 	if (opt & ALLOCATE_MFT) {
364 		zlen = wnd_zone_len(wnd);
365 
366 		if (!zlen) {
367 			err = ntfs_refresh_zone(sbi);
368 			if (err)
369 				goto up_write;
370 
371 			zlen = wnd_zone_len(wnd);
372 		}
373 
374 		if (!zlen) {
375 			ntfs_err(sbi->sb, "no free space to extend mft");
376 			err = -ENOSPC;
377 			goto up_write;
378 		}
379 
380 		lcn = wnd_zone_bit(wnd);
381 		alen = min_t(CLST, len, zlen);
382 
383 		wnd_zone_set(wnd, lcn + alen, zlen - alen);
384 
385 		err = wnd_set_used(wnd, lcn, alen);
386 		if (err)
387 			goto up_write;
388 
389 		alcn = lcn;
390 		goto space_found;
391 	}
392 	/*
393 	 * 'Cause cluster 0 is always used this value means that we should use
394 	 * cached value of 'next_free_lcn' to improve performance.
395 	 */
396 	if (!lcn)
397 		lcn = sbi->used.next_free_lcn;
398 
399 	if (lcn >= wnd->nbits)
400 		lcn = 0;
401 
402 	alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
403 	if (alen)
404 		goto space_found;
405 
406 	/* Try to use clusters from MftZone. */
407 	zlen = wnd_zone_len(wnd);
408 	zeroes = wnd_zeroes(wnd);
409 
410 	/* Check too big request */
411 	if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
412 		err = -ENOSPC;
413 		goto up_write;
414 	}
415 
416 	/* How many clusters to cat from zone. */
417 	zlcn = wnd_zone_bit(wnd);
418 	zlen2 = zlen >> 1;
419 	ztrim = clamp_val(len, zlen2, zlen);
420 	new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
421 
422 	wnd_zone_set(wnd, zlcn, new_zlen);
423 
424 	/* Allocate continues clusters. */
425 	alen = wnd_find(wnd, len, 0,
426 			BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
427 	if (!alen) {
428 		err = -ENOSPC;
429 		goto up_write;
430 	}
431 
432 space_found:
433 	err = 0;
434 	*new_len = alen;
435 	*new_lcn = alcn;
436 
437 	ntfs_unmap_meta(sb, alcn, alen);
438 
439 	/* Set hint for next requests. */
440 	if (!(opt & ALLOCATE_MFT))
441 		sbi->used.next_free_lcn = alcn + alen;
442 up_write:
443 	up_write(&wnd->rw_lock);
444 	return err;
445 }
446 
447 /*
448  * ntfs_check_for_free_space
449  *
450  * Check if it is possible to allocate 'clen' clusters and 'mlen' Mft records
451  */
ntfs_check_for_free_space(struct ntfs_sb_info * sbi,CLST clen,CLST mlen)452 bool ntfs_check_for_free_space(struct ntfs_sb_info *sbi, CLST clen, CLST mlen)
453 {
454 	size_t free, zlen, avail;
455 	struct wnd_bitmap *wnd;
456 
457 	wnd = &sbi->used.bitmap;
458 	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
459 	free = wnd_zeroes(wnd);
460 	zlen = min_t(size_t, NTFS_MIN_MFT_ZONE, wnd_zone_len(wnd));
461 	up_read(&wnd->rw_lock);
462 
463 	if (free < zlen + clen)
464 		return false;
465 
466 	avail = free - (zlen + clen);
467 
468 	wnd = &sbi->mft.bitmap;
469 	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
470 	free = wnd_zeroes(wnd);
471 	zlen = wnd_zone_len(wnd);
472 	up_read(&wnd->rw_lock);
473 
474 	if (free >= zlen + mlen)
475 		return true;
476 
477 	return avail >= bytes_to_cluster(sbi, mlen << sbi->record_bits);
478 }
479 
480 /*
481  * ntfs_extend_mft - Allocate additional MFT records.
482  *
483  * sbi->mft.bitmap is locked for write.
484  *
485  * NOTE: recursive:
486  *	ntfs_look_free_mft ->
487  *	ntfs_extend_mft ->
488  *	attr_set_size ->
489  *	ni_insert_nonresident ->
490  *	ni_insert_attr ->
491  *	ni_ins_attr_ext ->
492  *	ntfs_look_free_mft ->
493  *	ntfs_extend_mft
494  *
495  * To avoid recursive always allocate space for two new MFT records
496  * see attrib.c: "at least two MFT to avoid recursive loop".
497  */
ntfs_extend_mft(struct ntfs_sb_info * sbi)498 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
499 {
500 	int err;
501 	struct ntfs_inode *ni = sbi->mft.ni;
502 	size_t new_mft_total;
503 	u64 new_mft_bytes, new_bitmap_bytes;
504 	struct ATTRIB *attr;
505 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
506 
507 	new_mft_total = ALIGN(wnd->nbits + NTFS_MFT_INCREASE_STEP, 128);
508 	new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
509 
510 	/* Step 1: Resize $MFT::DATA. */
511 	down_write(&ni->file.run_lock);
512 	err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
513 			    new_mft_bytes, NULL, false, &attr);
514 
515 	if (err) {
516 		up_write(&ni->file.run_lock);
517 		goto out;
518 	}
519 
520 	attr->nres.valid_size = attr->nres.data_size;
521 	new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
522 	ni->mi.dirty = true;
523 
524 	/* Step 2: Resize $MFT::BITMAP. */
525 	new_bitmap_bytes = bitmap_size(new_mft_total);
526 
527 	err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
528 			    new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
529 
530 	/* Refresh MFT Zone if necessary. */
531 	down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
532 
533 	ntfs_refresh_zone(sbi);
534 
535 	up_write(&sbi->used.bitmap.rw_lock);
536 	up_write(&ni->file.run_lock);
537 
538 	if (err)
539 		goto out;
540 
541 	err = wnd_extend(wnd, new_mft_total);
542 
543 	if (err)
544 		goto out;
545 
546 	ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
547 
548 	err = _ni_write_inode(&ni->vfs_inode, 0);
549 out:
550 	return err;
551 }
552 
553 /*
554  * ntfs_look_free_mft - Look for a free MFT record.
555  */
ntfs_look_free_mft(struct ntfs_sb_info * sbi,CLST * rno,bool mft,struct ntfs_inode * ni,struct mft_inode ** mi)556 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
557 		       struct ntfs_inode *ni, struct mft_inode **mi)
558 {
559 	int err = 0;
560 	size_t zbit, zlen, from, to, fr;
561 	size_t mft_total;
562 	struct MFT_REF ref;
563 	struct super_block *sb = sbi->sb;
564 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
565 	u32 ir;
566 
567 	static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
568 		      MFT_REC_FREE - MFT_REC_RESERVED);
569 
570 	if (!mft)
571 		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
572 
573 	zlen = wnd_zone_len(wnd);
574 
575 	/* Always reserve space for MFT. */
576 	if (zlen) {
577 		if (mft) {
578 			zbit = wnd_zone_bit(wnd);
579 			*rno = zbit;
580 			wnd_zone_set(wnd, zbit + 1, zlen - 1);
581 		}
582 		goto found;
583 	}
584 
585 	/* No MFT zone. Find the nearest to '0' free MFT. */
586 	if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
587 		/* Resize MFT */
588 		mft_total = wnd->nbits;
589 
590 		err = ntfs_extend_mft(sbi);
591 		if (!err) {
592 			zbit = mft_total;
593 			goto reserve_mft;
594 		}
595 
596 		if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
597 			goto out;
598 
599 		err = 0;
600 
601 		/*
602 		 * Look for free record reserved area [11-16) ==
603 		 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
604 		 * marks it as used.
605 		 */
606 		if (!sbi->mft.reserved_bitmap) {
607 			/* Once per session create internal bitmap for 5 bits. */
608 			sbi->mft.reserved_bitmap = 0xFF;
609 
610 			ref.high = 0;
611 			for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
612 				struct inode *i;
613 				struct ntfs_inode *ni;
614 				struct MFT_REC *mrec;
615 
616 				ref.low = cpu_to_le32(ir);
617 				ref.seq = cpu_to_le16(ir);
618 
619 				i = ntfs_iget5(sb, &ref, NULL);
620 				if (IS_ERR(i)) {
621 next:
622 					ntfs_notice(
623 						sb,
624 						"Invalid reserved record %x",
625 						ref.low);
626 					continue;
627 				}
628 				if (is_bad_inode(i)) {
629 					iput(i);
630 					goto next;
631 				}
632 
633 				ni = ntfs_i(i);
634 
635 				mrec = ni->mi.mrec;
636 
637 				if (!is_rec_base(mrec))
638 					goto next;
639 
640 				if (mrec->hard_links)
641 					goto next;
642 
643 				if (!ni_std(ni))
644 					goto next;
645 
646 				if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
647 						 NULL, 0, NULL, NULL))
648 					goto next;
649 
650 				__clear_bit(ir - MFT_REC_RESERVED,
651 					    &sbi->mft.reserved_bitmap);
652 			}
653 		}
654 
655 		/* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
656 		zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
657 					  MFT_REC_FREE, MFT_REC_RESERVED);
658 		if (zbit >= MFT_REC_FREE) {
659 			sbi->mft.next_reserved = MFT_REC_FREE;
660 			goto out;
661 		}
662 
663 		zlen = 1;
664 		sbi->mft.next_reserved = zbit;
665 	} else {
666 reserve_mft:
667 		zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
668 		if (zbit + zlen > wnd->nbits)
669 			zlen = wnd->nbits - zbit;
670 
671 		while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
672 			zlen -= 1;
673 
674 		/* [zbit, zbit + zlen) will be used for MFT itself. */
675 		from = sbi->mft.used;
676 		if (from < zbit)
677 			from = zbit;
678 		to = zbit + zlen;
679 		if (from < to) {
680 			ntfs_clear_mft_tail(sbi, from, to);
681 			sbi->mft.used = to;
682 		}
683 	}
684 
685 	if (mft) {
686 		*rno = zbit;
687 		zbit += 1;
688 		zlen -= 1;
689 	}
690 
691 	wnd_zone_set(wnd, zbit, zlen);
692 
693 found:
694 	if (!mft) {
695 		/* The request to get record for general purpose. */
696 		if (sbi->mft.next_free < MFT_REC_USER)
697 			sbi->mft.next_free = MFT_REC_USER;
698 
699 		for (;;) {
700 			if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
701 			} else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
702 				sbi->mft.next_free = sbi->mft.bitmap.nbits;
703 			} else {
704 				*rno = fr;
705 				sbi->mft.next_free = *rno + 1;
706 				break;
707 			}
708 
709 			err = ntfs_extend_mft(sbi);
710 			if (err)
711 				goto out;
712 		}
713 	}
714 
715 	if (ni && !ni_add_subrecord(ni, *rno, mi)) {
716 		err = -ENOMEM;
717 		goto out;
718 	}
719 
720 	/* We have found a record that are not reserved for next MFT. */
721 	if (*rno >= MFT_REC_FREE)
722 		wnd_set_used(wnd, *rno, 1);
723 	else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
724 		__set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
725 
726 out:
727 	if (!mft)
728 		up_write(&wnd->rw_lock);
729 
730 	return err;
731 }
732 
733 /*
734  * ntfs_mark_rec_free - Mark record as free.
735  * is_mft - true if we are changing MFT
736  */
ntfs_mark_rec_free(struct ntfs_sb_info * sbi,CLST rno,bool is_mft)737 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
738 {
739 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
740 
741 	if (!is_mft)
742 		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
743 	if (rno >= wnd->nbits)
744 		goto out;
745 
746 	if (rno >= MFT_REC_FREE) {
747 		if (!wnd_is_used(wnd, rno, 1))
748 			ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
749 		else
750 			wnd_set_free(wnd, rno, 1);
751 	} else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
752 		__clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
753 	}
754 
755 	if (rno < wnd_zone_bit(wnd))
756 		wnd_zone_set(wnd, rno, 1);
757 	else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
758 		sbi->mft.next_free = rno;
759 
760 out:
761 	if (!is_mft)
762 		up_write(&wnd->rw_lock);
763 }
764 
765 /*
766  * ntfs_clear_mft_tail - Format empty records [from, to).
767  *
768  * sbi->mft.bitmap is locked for write.
769  */
ntfs_clear_mft_tail(struct ntfs_sb_info * sbi,size_t from,size_t to)770 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
771 {
772 	int err;
773 	u32 rs;
774 	u64 vbo;
775 	struct runs_tree *run;
776 	struct ntfs_inode *ni;
777 
778 	if (from >= to)
779 		return 0;
780 
781 	rs = sbi->record_size;
782 	ni = sbi->mft.ni;
783 	run = &ni->file.run;
784 
785 	down_read(&ni->file.run_lock);
786 	vbo = (u64)from * rs;
787 	for (; from < to; from++, vbo += rs) {
788 		struct ntfs_buffers nb;
789 
790 		err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
791 		if (err)
792 			goto out;
793 
794 		err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
795 		nb_put(&nb);
796 		if (err)
797 			goto out;
798 	}
799 
800 out:
801 	sbi->mft.used = from;
802 	up_read(&ni->file.run_lock);
803 	return err;
804 }
805 
806 /*
807  * ntfs_refresh_zone - Refresh MFT zone.
808  *
809  * sbi->used.bitmap is locked for rw.
810  * sbi->mft.bitmap is locked for write.
811  * sbi->mft.ni->file.run_lock for write.
812  */
ntfs_refresh_zone(struct ntfs_sb_info * sbi)813 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
814 {
815 	CLST lcn, vcn, len;
816 	size_t lcn_s, zlen;
817 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
818 	struct ntfs_inode *ni = sbi->mft.ni;
819 
820 	/* Do not change anything unless we have non empty MFT zone. */
821 	if (wnd_zone_len(wnd))
822 		return 0;
823 
824 	vcn = bytes_to_cluster(sbi,
825 			       (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
826 
827 	if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
828 		lcn = SPARSE_LCN;
829 
830 	/* We should always find Last Lcn for MFT. */
831 	if (lcn == SPARSE_LCN)
832 		return -EINVAL;
833 
834 	lcn_s = lcn + 1;
835 
836 	/* Try to allocate clusters after last MFT run. */
837 	zlen = wnd_find(wnd, sbi->zone_max, lcn_s, 0, &lcn_s);
838 	wnd_zone_set(wnd, lcn_s, zlen);
839 
840 	return 0;
841 }
842 
843 /*
844  * ntfs_update_mftmirr - Update $MFTMirr data.
845  */
ntfs_update_mftmirr(struct ntfs_sb_info * sbi,int wait)846 void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
847 {
848 	int err;
849 	struct super_block *sb = sbi->sb;
850 	u32 blocksize, bytes;
851 	sector_t block1, block2;
852 
853 	/*
854 	 * sb can be NULL here. In this case sbi->flags should be 0 too.
855 	 */
856 	if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR))
857 		return;
858 
859 	blocksize = sb->s_blocksize;
860 	bytes = sbi->mft.recs_mirr << sbi->record_bits;
861 	block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
862 	block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
863 
864 	for (; bytes >= blocksize; bytes -= blocksize) {
865 		struct buffer_head *bh1, *bh2;
866 
867 		bh1 = sb_bread(sb, block1++);
868 		if (!bh1)
869 			return;
870 
871 		bh2 = sb_getblk(sb, block2++);
872 		if (!bh2) {
873 			put_bh(bh1);
874 			return;
875 		}
876 
877 		if (buffer_locked(bh2))
878 			__wait_on_buffer(bh2);
879 
880 		lock_buffer(bh2);
881 		memcpy(bh2->b_data, bh1->b_data, blocksize);
882 		set_buffer_uptodate(bh2);
883 		mark_buffer_dirty(bh2);
884 		unlock_buffer(bh2);
885 
886 		put_bh(bh1);
887 		bh1 = NULL;
888 
889 		err = wait ? sync_dirty_buffer(bh2) : 0;
890 
891 		put_bh(bh2);
892 		if (err)
893 			return;
894 	}
895 
896 	sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
897 }
898 
899 /*
900  * ntfs_bad_inode
901  *
902  * Marks inode as bad and marks fs as 'dirty'
903  */
ntfs_bad_inode(struct inode * inode,const char * hint)904 void ntfs_bad_inode(struct inode *inode, const char *hint)
905 {
906 	struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
907 
908 	ntfs_inode_err(inode, "%s", hint);
909 	make_bad_inode(inode);
910 	ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
911 }
912 
913 /*
914  * ntfs_set_state
915  *
916  * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
917  * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
918  * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
919  */
ntfs_set_state(struct ntfs_sb_info * sbi,enum NTFS_DIRTY_FLAGS dirty)920 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
921 {
922 	int err;
923 	struct ATTRIB *attr;
924 	struct VOLUME_INFO *info;
925 	struct mft_inode *mi;
926 	struct ntfs_inode *ni;
927 	__le16 info_flags;
928 
929 	/*
930 	 * Do not change state if fs was real_dirty.
931 	 * Do not change state if fs already dirty(clear).
932 	 * Do not change any thing if mounted read only.
933 	 */
934 	if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
935 		return 0;
936 
937 	/* Check cached value. */
938 	if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
939 	    (sbi->volume.flags & VOLUME_FLAG_DIRTY))
940 		return 0;
941 
942 	ni = sbi->volume.ni;
943 	if (!ni)
944 		return -EINVAL;
945 
946 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
947 
948 	attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
949 	if (!attr) {
950 		err = -EINVAL;
951 		goto out;
952 	}
953 
954 	info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
955 	if (!info) {
956 		err = -EINVAL;
957 		goto out;
958 	}
959 
960 	info_flags = info->flags;
961 
962 	switch (dirty) {
963 	case NTFS_DIRTY_ERROR:
964 		ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
965 		sbi->volume.real_dirty = true;
966 		fallthrough;
967 	case NTFS_DIRTY_DIRTY:
968 		info->flags |= VOLUME_FLAG_DIRTY;
969 		break;
970 	case NTFS_DIRTY_CLEAR:
971 		info->flags &= ~VOLUME_FLAG_DIRTY;
972 		break;
973 	}
974 	/* Cache current volume flags. */
975 	if (info_flags != info->flags) {
976 		sbi->volume.flags = info->flags;
977 		mi->dirty = true;
978 	}
979 	err = 0;
980 
981 out:
982 	ni_unlock(ni);
983 	if (err)
984 		return err;
985 
986 	mark_inode_dirty_sync(&ni->vfs_inode);
987 	/* verify(!ntfs_update_mftmirr()); */
988 
989 	/* write mft record on disk. */
990 	err = _ni_write_inode(&ni->vfs_inode, 1);
991 
992 	return err;
993 }
994 
995 /*
996  * security_hash - Calculates a hash of security descriptor.
997  */
security_hash(const void * sd,size_t bytes)998 static inline __le32 security_hash(const void *sd, size_t bytes)
999 {
1000 	u32 hash = 0;
1001 	const __le32 *ptr = sd;
1002 
1003 	bytes >>= 2;
1004 	while (bytes--)
1005 		hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
1006 	return cpu_to_le32(hash);
1007 }
1008 
ntfs_sb_read(struct super_block * sb,u64 lbo,size_t bytes,void * buffer)1009 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
1010 {
1011 	struct block_device *bdev = sb->s_bdev;
1012 	u32 blocksize = sb->s_blocksize;
1013 	u64 block = lbo >> sb->s_blocksize_bits;
1014 	u32 off = lbo & (blocksize - 1);
1015 	u32 op = blocksize - off;
1016 
1017 	for (; bytes; block += 1, off = 0, op = blocksize) {
1018 		struct buffer_head *bh = __bread(bdev, block, blocksize);
1019 
1020 		if (!bh)
1021 			return -EIO;
1022 
1023 		if (op > bytes)
1024 			op = bytes;
1025 
1026 		memcpy(buffer, bh->b_data + off, op);
1027 
1028 		put_bh(bh);
1029 
1030 		bytes -= op;
1031 		buffer = Add2Ptr(buffer, op);
1032 	}
1033 
1034 	return 0;
1035 }
1036 
ntfs_sb_write(struct super_block * sb,u64 lbo,size_t bytes,const void * buf,int wait)1037 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1038 		  const void *buf, int wait)
1039 {
1040 	u32 blocksize = sb->s_blocksize;
1041 	struct block_device *bdev = sb->s_bdev;
1042 	sector_t block = lbo >> sb->s_blocksize_bits;
1043 	u32 off = lbo & (blocksize - 1);
1044 	u32 op = blocksize - off;
1045 	struct buffer_head *bh;
1046 
1047 	if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1048 		wait = 1;
1049 
1050 	for (; bytes; block += 1, off = 0, op = blocksize) {
1051 		if (op > bytes)
1052 			op = bytes;
1053 
1054 		if (op < blocksize) {
1055 			bh = __bread(bdev, block, blocksize);
1056 			if (!bh) {
1057 				ntfs_err(sb, "failed to read block %llx",
1058 					 (u64)block);
1059 				return -EIO;
1060 			}
1061 		} else {
1062 			bh = __getblk(bdev, block, blocksize);
1063 			if (!bh)
1064 				return -ENOMEM;
1065 		}
1066 
1067 		if (buffer_locked(bh))
1068 			__wait_on_buffer(bh);
1069 
1070 		lock_buffer(bh);
1071 		if (buf) {
1072 			memcpy(bh->b_data + off, buf, op);
1073 			buf = Add2Ptr(buf, op);
1074 		} else {
1075 			memset(bh->b_data + off, -1, op);
1076 		}
1077 
1078 		set_buffer_uptodate(bh);
1079 		mark_buffer_dirty(bh);
1080 		unlock_buffer(bh);
1081 
1082 		if (wait) {
1083 			int err = sync_dirty_buffer(bh);
1084 
1085 			if (err) {
1086 				ntfs_err(
1087 					sb,
1088 					"failed to sync buffer at block %llx, error %d",
1089 					(u64)block, err);
1090 				put_bh(bh);
1091 				return err;
1092 			}
1093 		}
1094 
1095 		put_bh(bh);
1096 
1097 		bytes -= op;
1098 	}
1099 	return 0;
1100 }
1101 
ntfs_sb_write_run(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,const void * buf,size_t bytes,int sync)1102 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1103 		      u64 vbo, const void *buf, size_t bytes, int sync)
1104 {
1105 	struct super_block *sb = sbi->sb;
1106 	u8 cluster_bits = sbi->cluster_bits;
1107 	u32 off = vbo & sbi->cluster_mask;
1108 	CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1109 	u64 lbo, len;
1110 	size_t idx;
1111 
1112 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1113 		return -ENOENT;
1114 
1115 	if (lcn == SPARSE_LCN)
1116 		return -EINVAL;
1117 
1118 	lbo = ((u64)lcn << cluster_bits) + off;
1119 	len = ((u64)clen << cluster_bits) - off;
1120 
1121 	for (;;) {
1122 		u32 op = min_t(u64, len, bytes);
1123 		int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1124 
1125 		if (err)
1126 			return err;
1127 
1128 		bytes -= op;
1129 		if (!bytes)
1130 			break;
1131 
1132 		vcn_next = vcn + clen;
1133 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1134 		    vcn != vcn_next)
1135 			return -ENOENT;
1136 
1137 		if (lcn == SPARSE_LCN)
1138 			return -EINVAL;
1139 
1140 		if (buf)
1141 			buf = Add2Ptr(buf, op);
1142 
1143 		lbo = ((u64)lcn << cluster_bits);
1144 		len = ((u64)clen << cluster_bits);
1145 	}
1146 
1147 	return 0;
1148 }
1149 
ntfs_bread_run(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo)1150 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1151 				   const struct runs_tree *run, u64 vbo)
1152 {
1153 	struct super_block *sb = sbi->sb;
1154 	u8 cluster_bits = sbi->cluster_bits;
1155 	CLST lcn;
1156 	u64 lbo;
1157 
1158 	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1159 		return ERR_PTR(-ENOENT);
1160 
1161 	lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1162 
1163 	return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1164 }
1165 
ntfs_read_run_nb(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,void * buf,u32 bytes,struct ntfs_buffers * nb)1166 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1167 		     u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1168 {
1169 	int err;
1170 	struct super_block *sb = sbi->sb;
1171 	u32 blocksize = sb->s_blocksize;
1172 	u8 cluster_bits = sbi->cluster_bits;
1173 	u32 off = vbo & sbi->cluster_mask;
1174 	u32 nbh = 0;
1175 	CLST vcn_next, vcn = vbo >> cluster_bits;
1176 	CLST lcn, clen;
1177 	u64 lbo, len;
1178 	size_t idx;
1179 	struct buffer_head *bh;
1180 
1181 	if (!run) {
1182 		/* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1183 		if (vbo > MFT_REC_VOL * sbi->record_size) {
1184 			err = -ENOENT;
1185 			goto out;
1186 		}
1187 
1188 		/* Use absolute boot's 'MFTCluster' to read record. */
1189 		lbo = vbo + sbi->mft.lbo;
1190 		len = sbi->record_size;
1191 	} else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1192 		err = -ENOENT;
1193 		goto out;
1194 	} else {
1195 		if (lcn == SPARSE_LCN) {
1196 			err = -EINVAL;
1197 			goto out;
1198 		}
1199 
1200 		lbo = ((u64)lcn << cluster_bits) + off;
1201 		len = ((u64)clen << cluster_bits) - off;
1202 	}
1203 
1204 	off = lbo & (blocksize - 1);
1205 	if (nb) {
1206 		nb->off = off;
1207 		nb->bytes = bytes;
1208 	}
1209 
1210 	for (;;) {
1211 		u32 len32 = len >= bytes ? bytes : len;
1212 		sector_t block = lbo >> sb->s_blocksize_bits;
1213 
1214 		do {
1215 			u32 op = blocksize - off;
1216 
1217 			if (op > len32)
1218 				op = len32;
1219 
1220 			bh = ntfs_bread(sb, block);
1221 			if (!bh) {
1222 				err = -EIO;
1223 				goto out;
1224 			}
1225 
1226 			if (buf) {
1227 				memcpy(buf, bh->b_data + off, op);
1228 				buf = Add2Ptr(buf, op);
1229 			}
1230 
1231 			if (!nb) {
1232 				put_bh(bh);
1233 			} else if (nbh >= ARRAY_SIZE(nb->bh)) {
1234 				err = -EINVAL;
1235 				goto out;
1236 			} else {
1237 				nb->bh[nbh++] = bh;
1238 				nb->nbufs = nbh;
1239 			}
1240 
1241 			bytes -= op;
1242 			if (!bytes)
1243 				return 0;
1244 			len32 -= op;
1245 			block += 1;
1246 			off = 0;
1247 
1248 		} while (len32);
1249 
1250 		vcn_next = vcn + clen;
1251 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1252 		    vcn != vcn_next) {
1253 			err = -ENOENT;
1254 			goto out;
1255 		}
1256 
1257 		if (lcn == SPARSE_LCN) {
1258 			err = -EINVAL;
1259 			goto out;
1260 		}
1261 
1262 		lbo = ((u64)lcn << cluster_bits);
1263 		len = ((u64)clen << cluster_bits);
1264 	}
1265 
1266 out:
1267 	if (!nbh)
1268 		return err;
1269 
1270 	while (nbh) {
1271 		put_bh(nb->bh[--nbh]);
1272 		nb->bh[nbh] = NULL;
1273 	}
1274 
1275 	nb->nbufs = 0;
1276 	return err;
1277 }
1278 
1279 /*
1280  * ntfs_read_bh
1281  *
1282  * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1283  */
ntfs_read_bh(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,struct NTFS_RECORD_HEADER * rhdr,u32 bytes,struct ntfs_buffers * nb)1284 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1285 		 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1286 		 struct ntfs_buffers *nb)
1287 {
1288 	int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1289 
1290 	if (err)
1291 		return err;
1292 	return ntfs_fix_post_read(rhdr, nb->bytes, true);
1293 }
1294 
ntfs_get_bh(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,u32 bytes,struct ntfs_buffers * nb)1295 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1296 		u32 bytes, struct ntfs_buffers *nb)
1297 {
1298 	int err = 0;
1299 	struct super_block *sb = sbi->sb;
1300 	u32 blocksize = sb->s_blocksize;
1301 	u8 cluster_bits = sbi->cluster_bits;
1302 	CLST vcn_next, vcn = vbo >> cluster_bits;
1303 	u32 off;
1304 	u32 nbh = 0;
1305 	CLST lcn, clen;
1306 	u64 lbo, len;
1307 	size_t idx;
1308 
1309 	nb->bytes = bytes;
1310 
1311 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1312 		err = -ENOENT;
1313 		goto out;
1314 	}
1315 
1316 	off = vbo & sbi->cluster_mask;
1317 	lbo = ((u64)lcn << cluster_bits) + off;
1318 	len = ((u64)clen << cluster_bits) - off;
1319 
1320 	nb->off = off = lbo & (blocksize - 1);
1321 
1322 	for (;;) {
1323 		u32 len32 = min_t(u64, len, bytes);
1324 		sector_t block = lbo >> sb->s_blocksize_bits;
1325 
1326 		do {
1327 			u32 op;
1328 			struct buffer_head *bh;
1329 
1330 			if (nbh >= ARRAY_SIZE(nb->bh)) {
1331 				err = -EINVAL;
1332 				goto out;
1333 			}
1334 
1335 			op = blocksize - off;
1336 			if (op > len32)
1337 				op = len32;
1338 
1339 			if (op == blocksize) {
1340 				bh = sb_getblk(sb, block);
1341 				if (!bh) {
1342 					err = -ENOMEM;
1343 					goto out;
1344 				}
1345 				if (buffer_locked(bh))
1346 					__wait_on_buffer(bh);
1347 				set_buffer_uptodate(bh);
1348 			} else {
1349 				bh = ntfs_bread(sb, block);
1350 				if (!bh) {
1351 					err = -EIO;
1352 					goto out;
1353 				}
1354 			}
1355 
1356 			nb->bh[nbh++] = bh;
1357 			bytes -= op;
1358 			if (!bytes) {
1359 				nb->nbufs = nbh;
1360 				return 0;
1361 			}
1362 
1363 			block += 1;
1364 			len32 -= op;
1365 			off = 0;
1366 		} while (len32);
1367 
1368 		vcn_next = vcn + clen;
1369 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1370 		    vcn != vcn_next) {
1371 			err = -ENOENT;
1372 			goto out;
1373 		}
1374 
1375 		lbo = ((u64)lcn << cluster_bits);
1376 		len = ((u64)clen << cluster_bits);
1377 	}
1378 
1379 out:
1380 	while (nbh) {
1381 		put_bh(nb->bh[--nbh]);
1382 		nb->bh[nbh] = NULL;
1383 	}
1384 
1385 	nb->nbufs = 0;
1386 
1387 	return err;
1388 }
1389 
ntfs_write_bh(struct ntfs_sb_info * sbi,struct NTFS_RECORD_HEADER * rhdr,struct ntfs_buffers * nb,int sync)1390 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1391 		  struct ntfs_buffers *nb, int sync)
1392 {
1393 	int err = 0;
1394 	struct super_block *sb = sbi->sb;
1395 	u32 block_size = sb->s_blocksize;
1396 	u32 bytes = nb->bytes;
1397 	u32 off = nb->off;
1398 	u16 fo = le16_to_cpu(rhdr->fix_off);
1399 	u16 fn = le16_to_cpu(rhdr->fix_num);
1400 	u32 idx;
1401 	__le16 *fixup;
1402 	__le16 sample;
1403 
1404 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1405 	    fn * SECTOR_SIZE > bytes) {
1406 		return -EINVAL;
1407 	}
1408 
1409 	for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1410 		u32 op = block_size - off;
1411 		char *bh_data;
1412 		struct buffer_head *bh = nb->bh[idx];
1413 		__le16 *ptr, *end_data;
1414 
1415 		if (op > bytes)
1416 			op = bytes;
1417 
1418 		if (buffer_locked(bh))
1419 			__wait_on_buffer(bh);
1420 
1421 		lock_buffer(bh);
1422 
1423 		bh_data = bh->b_data + off;
1424 		end_data = Add2Ptr(bh_data, op);
1425 		memcpy(bh_data, rhdr, op);
1426 
1427 		if (!idx) {
1428 			u16 t16;
1429 
1430 			fixup = Add2Ptr(bh_data, fo);
1431 			sample = *fixup;
1432 			t16 = le16_to_cpu(sample);
1433 			if (t16 >= 0x7FFF) {
1434 				sample = *fixup = cpu_to_le16(1);
1435 			} else {
1436 				sample = cpu_to_le16(t16 + 1);
1437 				*fixup = sample;
1438 			}
1439 
1440 			*(__le16 *)Add2Ptr(rhdr, fo) = sample;
1441 		}
1442 
1443 		ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1444 
1445 		do {
1446 			*++fixup = *ptr;
1447 			*ptr = sample;
1448 			ptr += SECTOR_SIZE / sizeof(short);
1449 		} while (ptr < end_data);
1450 
1451 		set_buffer_uptodate(bh);
1452 		mark_buffer_dirty(bh);
1453 		unlock_buffer(bh);
1454 
1455 		if (sync) {
1456 			int err2 = sync_dirty_buffer(bh);
1457 
1458 			if (!err && err2)
1459 				err = err2;
1460 		}
1461 
1462 		bytes -= op;
1463 		rhdr = Add2Ptr(rhdr, op);
1464 	}
1465 
1466 	return err;
1467 }
1468 
1469 /*
1470  * ntfs_bio_pages - Read/write pages from/to disk.
1471  */
ntfs_bio_pages(struct ntfs_sb_info * sbi,const struct runs_tree * run,struct page ** pages,u32 nr_pages,u64 vbo,u32 bytes,enum req_op op)1472 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1473 		   struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1474 		   enum req_op op)
1475 {
1476 	int err = 0;
1477 	struct bio *new, *bio = NULL;
1478 	struct super_block *sb = sbi->sb;
1479 	struct block_device *bdev = sb->s_bdev;
1480 	struct page *page;
1481 	u8 cluster_bits = sbi->cluster_bits;
1482 	CLST lcn, clen, vcn, vcn_next;
1483 	u32 add, off, page_idx;
1484 	u64 lbo, len;
1485 	size_t run_idx;
1486 	struct blk_plug plug;
1487 
1488 	if (!bytes)
1489 		return 0;
1490 
1491 	blk_start_plug(&plug);
1492 
1493 	/* Align vbo and bytes to be 512 bytes aligned. */
1494 	lbo = (vbo + bytes + 511) & ~511ull;
1495 	vbo = vbo & ~511ull;
1496 	bytes = lbo - vbo;
1497 
1498 	vcn = vbo >> cluster_bits;
1499 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1500 		err = -ENOENT;
1501 		goto out;
1502 	}
1503 	off = vbo & sbi->cluster_mask;
1504 	page_idx = 0;
1505 	page = pages[0];
1506 
1507 	for (;;) {
1508 		lbo = ((u64)lcn << cluster_bits) + off;
1509 		len = ((u64)clen << cluster_bits) - off;
1510 new_bio:
1511 		new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
1512 		if (bio) {
1513 			bio_chain(bio, new);
1514 			submit_bio(bio);
1515 		}
1516 		bio = new;
1517 		bio->bi_iter.bi_sector = lbo >> 9;
1518 
1519 		while (len) {
1520 			off = vbo & (PAGE_SIZE - 1);
1521 			add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1522 
1523 			if (bio_add_page(bio, page, add, off) < add)
1524 				goto new_bio;
1525 
1526 			if (bytes <= add)
1527 				goto out;
1528 			bytes -= add;
1529 			vbo += add;
1530 
1531 			if (add + off == PAGE_SIZE) {
1532 				page_idx += 1;
1533 				if (WARN_ON(page_idx >= nr_pages)) {
1534 					err = -EINVAL;
1535 					goto out;
1536 				}
1537 				page = pages[page_idx];
1538 			}
1539 
1540 			if (len <= add)
1541 				break;
1542 			len -= add;
1543 			lbo += add;
1544 		}
1545 
1546 		vcn_next = vcn + clen;
1547 		if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1548 		    vcn != vcn_next) {
1549 			err = -ENOENT;
1550 			goto out;
1551 		}
1552 		off = 0;
1553 	}
1554 out:
1555 	if (bio) {
1556 		if (!err)
1557 			err = submit_bio_wait(bio);
1558 		bio_put(bio);
1559 	}
1560 	blk_finish_plug(&plug);
1561 
1562 	return err;
1563 }
1564 
1565 /*
1566  * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1567  *
1568  * Fill on-disk logfile range by (-1)
1569  * this means empty logfile.
1570  */
ntfs_bio_fill_1(struct ntfs_sb_info * sbi,const struct runs_tree * run)1571 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1572 {
1573 	int err = 0;
1574 	struct super_block *sb = sbi->sb;
1575 	struct block_device *bdev = sb->s_bdev;
1576 	u8 cluster_bits = sbi->cluster_bits;
1577 	struct bio *new, *bio = NULL;
1578 	CLST lcn, clen;
1579 	u64 lbo, len;
1580 	size_t run_idx;
1581 	struct page *fill;
1582 	void *kaddr;
1583 	struct blk_plug plug;
1584 
1585 	fill = alloc_page(GFP_KERNEL);
1586 	if (!fill)
1587 		return -ENOMEM;
1588 
1589 	kaddr = kmap_atomic(fill);
1590 	memset(kaddr, -1, PAGE_SIZE);
1591 	kunmap_atomic(kaddr);
1592 	flush_dcache_page(fill);
1593 	lock_page(fill);
1594 
1595 	if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1596 		err = -ENOENT;
1597 		goto out;
1598 	}
1599 
1600 	/*
1601 	 * TODO: Try blkdev_issue_write_same.
1602 	 */
1603 	blk_start_plug(&plug);
1604 	do {
1605 		lbo = (u64)lcn << cluster_bits;
1606 		len = (u64)clen << cluster_bits;
1607 new_bio:
1608 		new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS);
1609 		if (bio) {
1610 			bio_chain(bio, new);
1611 			submit_bio(bio);
1612 		}
1613 		bio = new;
1614 		bio->bi_iter.bi_sector = lbo >> 9;
1615 
1616 		for (;;) {
1617 			u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1618 
1619 			if (bio_add_page(bio, fill, add, 0) < add)
1620 				goto new_bio;
1621 
1622 			lbo += add;
1623 			if (len <= add)
1624 				break;
1625 			len -= add;
1626 		}
1627 	} while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1628 
1629 	if (!err)
1630 		err = submit_bio_wait(bio);
1631 	bio_put(bio);
1632 
1633 	blk_finish_plug(&plug);
1634 out:
1635 	unlock_page(fill);
1636 	put_page(fill);
1637 
1638 	return err;
1639 }
1640 
ntfs_vbo_to_lbo(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,u64 * lbo,u64 * bytes)1641 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1642 		    u64 vbo, u64 *lbo, u64 *bytes)
1643 {
1644 	u32 off;
1645 	CLST lcn, len;
1646 	u8 cluster_bits = sbi->cluster_bits;
1647 
1648 	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1649 		return -ENOENT;
1650 
1651 	off = vbo & sbi->cluster_mask;
1652 	*lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1653 	*bytes = ((u64)len << cluster_bits) - off;
1654 
1655 	return 0;
1656 }
1657 
ntfs_new_inode(struct ntfs_sb_info * sbi,CLST rno,enum RECORD_FLAG flag)1658 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno,
1659 				  enum RECORD_FLAG flag)
1660 {
1661 	int err = 0;
1662 	struct super_block *sb = sbi->sb;
1663 	struct inode *inode = new_inode(sb);
1664 	struct ntfs_inode *ni;
1665 
1666 	if (!inode)
1667 		return ERR_PTR(-ENOMEM);
1668 
1669 	ni = ntfs_i(inode);
1670 
1671 	err = mi_format_new(&ni->mi, sbi, rno, flag, false);
1672 	if (err)
1673 		goto out;
1674 
1675 	inode->i_ino = rno;
1676 	if (insert_inode_locked(inode) < 0) {
1677 		err = -EIO;
1678 		goto out;
1679 	}
1680 
1681 out:
1682 	if (err) {
1683 		make_bad_inode(inode);
1684 		iput(inode);
1685 		ni = ERR_PTR(err);
1686 	}
1687 	return ni;
1688 }
1689 
1690 /*
1691  * O:BAG:BAD:(A;OICI;FA;;;WD)
1692  * Owner S-1-5-32-544 (Administrators)
1693  * Group S-1-5-32-544 (Administrators)
1694  * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1695  */
1696 const u8 s_default_security[] __aligned(8) = {
1697 	0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1698 	0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1699 	0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1700 	0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1701 	0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1702 	0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1703 	0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1704 };
1705 
1706 static_assert(sizeof(s_default_security) == 0x50);
1707 
sid_length(const struct SID * sid)1708 static inline u32 sid_length(const struct SID *sid)
1709 {
1710 	return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1711 }
1712 
1713 /*
1714  * is_acl_valid
1715  *
1716  * Thanks Mark Harmstone for idea.
1717  */
is_acl_valid(const struct ACL * acl,u32 len)1718 static bool is_acl_valid(const struct ACL *acl, u32 len)
1719 {
1720 	const struct ACE_HEADER *ace;
1721 	u32 i;
1722 	u16 ace_count, ace_size;
1723 
1724 	if (acl->AclRevision != ACL_REVISION &&
1725 	    acl->AclRevision != ACL_REVISION_DS) {
1726 		/*
1727 		 * This value should be ACL_REVISION, unless the ACL contains an
1728 		 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1729 		 * All ACEs in an ACL must be at the same revision level.
1730 		 */
1731 		return false;
1732 	}
1733 
1734 	if (acl->Sbz1)
1735 		return false;
1736 
1737 	if (le16_to_cpu(acl->AclSize) > len)
1738 		return false;
1739 
1740 	if (acl->Sbz2)
1741 		return false;
1742 
1743 	len -= sizeof(struct ACL);
1744 	ace = (struct ACE_HEADER *)&acl[1];
1745 	ace_count = le16_to_cpu(acl->AceCount);
1746 
1747 	for (i = 0; i < ace_count; i++) {
1748 		if (len < sizeof(struct ACE_HEADER))
1749 			return false;
1750 
1751 		ace_size = le16_to_cpu(ace->AceSize);
1752 		if (len < ace_size)
1753 			return false;
1754 
1755 		len -= ace_size;
1756 		ace = Add2Ptr(ace, ace_size);
1757 	}
1758 
1759 	return true;
1760 }
1761 
is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE * sd,u32 len)1762 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1763 {
1764 	u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1765 
1766 	if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1767 		return false;
1768 
1769 	if (sd->Revision != 1)
1770 		return false;
1771 
1772 	if (sd->Sbz1)
1773 		return false;
1774 
1775 	if (!(sd->Control & SE_SELF_RELATIVE))
1776 		return false;
1777 
1778 	sd_owner = le32_to_cpu(sd->Owner);
1779 	if (sd_owner) {
1780 		const struct SID *owner = Add2Ptr(sd, sd_owner);
1781 
1782 		if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1783 			return false;
1784 
1785 		if (owner->Revision != 1)
1786 			return false;
1787 
1788 		if (sd_owner + sid_length(owner) > len)
1789 			return false;
1790 	}
1791 
1792 	sd_group = le32_to_cpu(sd->Group);
1793 	if (sd_group) {
1794 		const struct SID *group = Add2Ptr(sd, sd_group);
1795 
1796 		if (sd_group + offsetof(struct SID, SubAuthority) > len)
1797 			return false;
1798 
1799 		if (group->Revision != 1)
1800 			return false;
1801 
1802 		if (sd_group + sid_length(group) > len)
1803 			return false;
1804 	}
1805 
1806 	sd_sacl = le32_to_cpu(sd->Sacl);
1807 	if (sd_sacl) {
1808 		const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1809 
1810 		if (sd_sacl + sizeof(struct ACL) > len)
1811 			return false;
1812 
1813 		if (!is_acl_valid(sacl, len - sd_sacl))
1814 			return false;
1815 	}
1816 
1817 	sd_dacl = le32_to_cpu(sd->Dacl);
1818 	if (sd_dacl) {
1819 		const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1820 
1821 		if (sd_dacl + sizeof(struct ACL) > len)
1822 			return false;
1823 
1824 		if (!is_acl_valid(dacl, len - sd_dacl))
1825 			return false;
1826 	}
1827 
1828 	return true;
1829 }
1830 
1831 /*
1832  * ntfs_security_init - Load and parse $Secure.
1833  */
ntfs_security_init(struct ntfs_sb_info * sbi)1834 int ntfs_security_init(struct ntfs_sb_info *sbi)
1835 {
1836 	int err;
1837 	struct super_block *sb = sbi->sb;
1838 	struct inode *inode;
1839 	struct ntfs_inode *ni;
1840 	struct MFT_REF ref;
1841 	struct ATTRIB *attr;
1842 	struct ATTR_LIST_ENTRY *le;
1843 	u64 sds_size;
1844 	size_t off;
1845 	struct NTFS_DE *ne;
1846 	struct NTFS_DE_SII *sii_e;
1847 	struct ntfs_fnd *fnd_sii = NULL;
1848 	const struct INDEX_ROOT *root_sii;
1849 	const struct INDEX_ROOT *root_sdh;
1850 	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1851 	struct ntfs_index *indx_sii = &sbi->security.index_sii;
1852 
1853 	ref.low = cpu_to_le32(MFT_REC_SECURE);
1854 	ref.high = 0;
1855 	ref.seq = cpu_to_le16(MFT_REC_SECURE);
1856 
1857 	inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1858 	if (IS_ERR(inode)) {
1859 		err = PTR_ERR(inode);
1860 		ntfs_err(sb, "Failed to load $Secure (%d).", err);
1861 		inode = NULL;
1862 		goto out;
1863 	}
1864 
1865 	ni = ntfs_i(inode);
1866 
1867 	le = NULL;
1868 
1869 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1870 			    ARRAY_SIZE(SDH_NAME), NULL, NULL);
1871 	if (!attr ||
1872 	    !(root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1873 	    root_sdh->type != ATTR_ZERO ||
1874 	    root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
1875 	    offsetof(struct INDEX_ROOT, ihdr) +
1876 			    le32_to_cpu(root_sdh->ihdr.used) >
1877 		    le32_to_cpu(attr->res.data_size)) {
1878 		ntfs_err(sb, "$Secure::$SDH is corrupted.");
1879 		err = -EINVAL;
1880 		goto out;
1881 	}
1882 
1883 	err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1884 	if (err) {
1885 		ntfs_err(sb, "Failed to initialize $Secure::$SDH (%d).", err);
1886 		goto out;
1887 	}
1888 
1889 	attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1890 			    ARRAY_SIZE(SII_NAME), NULL, NULL);
1891 	if (!attr ||
1892 	    !(root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1893 	    root_sii->type != ATTR_ZERO ||
1894 	    root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
1895 	    offsetof(struct INDEX_ROOT, ihdr) +
1896 			    le32_to_cpu(root_sii->ihdr.used) >
1897 		    le32_to_cpu(attr->res.data_size)) {
1898 		ntfs_err(sb, "$Secure::$SII is corrupted.");
1899 		err = -EINVAL;
1900 		goto out;
1901 	}
1902 
1903 	err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1904 	if (err) {
1905 		ntfs_err(sb, "Failed to initialize $Secure::$SII (%d).", err);
1906 		goto out;
1907 	}
1908 
1909 	fnd_sii = fnd_get();
1910 	if (!fnd_sii) {
1911 		err = -ENOMEM;
1912 		goto out;
1913 	}
1914 
1915 	sds_size = inode->i_size;
1916 
1917 	/* Find the last valid Id. */
1918 	sbi->security.next_id = SECURITY_ID_FIRST;
1919 	/* Always write new security at the end of bucket. */
1920 	sbi->security.next_off =
1921 		ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1922 
1923 	off = 0;
1924 	ne = NULL;
1925 
1926 	for (;;) {
1927 		u32 next_id;
1928 
1929 		err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1930 		if (err || !ne)
1931 			break;
1932 
1933 		sii_e = (struct NTFS_DE_SII *)ne;
1934 		if (le16_to_cpu(ne->view.data_size) < sizeof(sii_e->sec_hdr))
1935 			continue;
1936 
1937 		next_id = le32_to_cpu(sii_e->sec_id) + 1;
1938 		if (next_id >= sbi->security.next_id)
1939 			sbi->security.next_id = next_id;
1940 	}
1941 
1942 	sbi->security.ni = ni;
1943 	inode = NULL;
1944 out:
1945 	iput(inode);
1946 	fnd_put(fnd_sii);
1947 
1948 	return err;
1949 }
1950 
1951 /*
1952  * ntfs_get_security_by_id - Read security descriptor by id.
1953  */
ntfs_get_security_by_id(struct ntfs_sb_info * sbi,__le32 security_id,struct SECURITY_DESCRIPTOR_RELATIVE ** sd,size_t * size)1954 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1955 			    struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1956 			    size_t *size)
1957 {
1958 	int err;
1959 	int diff;
1960 	struct ntfs_inode *ni = sbi->security.ni;
1961 	struct ntfs_index *indx = &sbi->security.index_sii;
1962 	void *p = NULL;
1963 	struct NTFS_DE_SII *sii_e;
1964 	struct ntfs_fnd *fnd_sii;
1965 	struct SECURITY_HDR d_security;
1966 	const struct INDEX_ROOT *root_sii;
1967 	u32 t32;
1968 
1969 	*sd = NULL;
1970 
1971 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1972 
1973 	fnd_sii = fnd_get();
1974 	if (!fnd_sii) {
1975 		err = -ENOMEM;
1976 		goto out;
1977 	}
1978 
1979 	root_sii = indx_get_root(indx, ni, NULL, NULL);
1980 	if (!root_sii) {
1981 		err = -EINVAL;
1982 		goto out;
1983 	}
1984 
1985 	/* Try to find this SECURITY descriptor in SII indexes. */
1986 	err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
1987 			NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
1988 	if (err)
1989 		goto out;
1990 
1991 	if (diff)
1992 		goto out;
1993 
1994 	t32 = le32_to_cpu(sii_e->sec_hdr.size);
1995 	if (t32 < sizeof(struct SECURITY_HDR)) {
1996 		err = -EINVAL;
1997 		goto out;
1998 	}
1999 
2000 	if (t32 > sizeof(struct SECURITY_HDR) + 0x10000) {
2001 		/* Looks like too big security. 0x10000 - is arbitrary big number. */
2002 		err = -EFBIG;
2003 		goto out;
2004 	}
2005 
2006 	*size = t32 - sizeof(struct SECURITY_HDR);
2007 
2008 	p = kmalloc(*size, GFP_NOFS);
2009 	if (!p) {
2010 		err = -ENOMEM;
2011 		goto out;
2012 	}
2013 
2014 	err = ntfs_read_run_nb(sbi, &ni->file.run,
2015 			       le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2016 			       sizeof(d_security), NULL);
2017 	if (err)
2018 		goto out;
2019 
2020 	if (memcmp(&d_security, &sii_e->sec_hdr, sizeof(d_security))) {
2021 		err = -EINVAL;
2022 		goto out;
2023 	}
2024 
2025 	err = ntfs_read_run_nb(sbi, &ni->file.run,
2026 			       le64_to_cpu(sii_e->sec_hdr.off) +
2027 				       sizeof(struct SECURITY_HDR),
2028 			       p, *size, NULL);
2029 	if (err)
2030 		goto out;
2031 
2032 	*sd = p;
2033 	p = NULL;
2034 
2035 out:
2036 	kfree(p);
2037 	fnd_put(fnd_sii);
2038 	ni_unlock(ni);
2039 
2040 	return err;
2041 }
2042 
2043 /*
2044  * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2045  *
2046  * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2047  * and it contains a mirror copy of each security descriptor.  When writing
2048  * to a security descriptor at location X, another copy will be written at
2049  * location (X+256K).
2050  * When writing a security descriptor that will cross the 256K boundary,
2051  * the pointer will be advanced by 256K to skip
2052  * over the mirror portion.
2053  */
ntfs_insert_security(struct ntfs_sb_info * sbi,const struct SECURITY_DESCRIPTOR_RELATIVE * sd,u32 size_sd,__le32 * security_id,bool * inserted)2054 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2055 			 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2056 			 u32 size_sd, __le32 *security_id, bool *inserted)
2057 {
2058 	int err, diff;
2059 	struct ntfs_inode *ni = sbi->security.ni;
2060 	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2061 	struct ntfs_index *indx_sii = &sbi->security.index_sii;
2062 	struct NTFS_DE_SDH *e;
2063 	struct NTFS_DE_SDH sdh_e;
2064 	struct NTFS_DE_SII sii_e;
2065 	struct SECURITY_HDR *d_security;
2066 	u32 new_sec_size = size_sd + sizeof(struct SECURITY_HDR);
2067 	u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2068 	struct SECURITY_KEY hash_key;
2069 	struct ntfs_fnd *fnd_sdh = NULL;
2070 	const struct INDEX_ROOT *root_sdh;
2071 	const struct INDEX_ROOT *root_sii;
2072 	u64 mirr_off, new_sds_size;
2073 	u32 next, left;
2074 
2075 	static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2076 		      SecurityDescriptorsBlockSize);
2077 
2078 	hash_key.hash = security_hash(sd, size_sd);
2079 	hash_key.sec_id = SECURITY_ID_INVALID;
2080 
2081 	if (inserted)
2082 		*inserted = false;
2083 	*security_id = SECURITY_ID_INVALID;
2084 
2085 	/* Allocate a temporal buffer. */
2086 	d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2087 	if (!d_security)
2088 		return -ENOMEM;
2089 
2090 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2091 
2092 	fnd_sdh = fnd_get();
2093 	if (!fnd_sdh) {
2094 		err = -ENOMEM;
2095 		goto out;
2096 	}
2097 
2098 	root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2099 	if (!root_sdh) {
2100 		err = -EINVAL;
2101 		goto out;
2102 	}
2103 
2104 	root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2105 	if (!root_sii) {
2106 		err = -EINVAL;
2107 		goto out;
2108 	}
2109 
2110 	/*
2111 	 * Check if such security already exists.
2112 	 * Use "SDH" and hash -> to get the offset in "SDS".
2113 	 */
2114 	err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2115 			&d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2116 			fnd_sdh);
2117 	if (err)
2118 		goto out;
2119 
2120 	while (e) {
2121 		if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2122 			err = ntfs_read_run_nb(sbi, &ni->file.run,
2123 					       le64_to_cpu(e->sec_hdr.off),
2124 					       d_security, new_sec_size, NULL);
2125 			if (err)
2126 				goto out;
2127 
2128 			if (le32_to_cpu(d_security->size) == new_sec_size &&
2129 			    d_security->key.hash == hash_key.hash &&
2130 			    !memcmp(d_security + 1, sd, size_sd)) {
2131 				*security_id = d_security->key.sec_id;
2132 				/* Such security already exists. */
2133 				err = 0;
2134 				goto out;
2135 			}
2136 		}
2137 
2138 		err = indx_find_sort(indx_sdh, ni, root_sdh,
2139 				     (struct NTFS_DE **)&e, fnd_sdh);
2140 		if (err)
2141 			goto out;
2142 
2143 		if (!e || e->key.hash != hash_key.hash)
2144 			break;
2145 	}
2146 
2147 	/* Zero unused space. */
2148 	next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2149 	left = SecurityDescriptorsBlockSize - next;
2150 
2151 	/* Zero gap until SecurityDescriptorsBlockSize. */
2152 	if (left < new_sec_size) {
2153 		/* Zero "left" bytes from sbi->security.next_off. */
2154 		sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2155 	}
2156 
2157 	/* Zero tail of previous security. */
2158 	//used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2159 
2160 	/*
2161 	 * Example:
2162 	 * 0x40438 == ni->vfs_inode.i_size
2163 	 * 0x00440 == sbi->security.next_off
2164 	 * need to zero [0x438-0x440)
2165 	 * if (next > used) {
2166 	 *  u32 tozero = next - used;
2167 	 *  zero "tozero" bytes from sbi->security.next_off - tozero
2168 	 */
2169 
2170 	/* Format new security descriptor. */
2171 	d_security->key.hash = hash_key.hash;
2172 	d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2173 	d_security->off = cpu_to_le64(sbi->security.next_off);
2174 	d_security->size = cpu_to_le32(new_sec_size);
2175 	memcpy(d_security + 1, sd, size_sd);
2176 
2177 	/* Write main SDS bucket. */
2178 	err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2179 				d_security, aligned_sec_size, 0);
2180 
2181 	if (err)
2182 		goto out;
2183 
2184 	mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2185 	new_sds_size = mirr_off + aligned_sec_size;
2186 
2187 	if (new_sds_size > ni->vfs_inode.i_size) {
2188 		err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2189 				    ARRAY_SIZE(SDS_NAME), &ni->file.run,
2190 				    new_sds_size, &new_sds_size, false, NULL);
2191 		if (err)
2192 			goto out;
2193 	}
2194 
2195 	/* Write copy SDS bucket. */
2196 	err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2197 				aligned_sec_size, 0);
2198 	if (err)
2199 		goto out;
2200 
2201 	/* Fill SII entry. */
2202 	sii_e.de.view.data_off =
2203 		cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2204 	sii_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2205 	sii_e.de.view.res = 0;
2206 	sii_e.de.size = cpu_to_le16(sizeof(struct NTFS_DE_SII));
2207 	sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2208 	sii_e.de.flags = 0;
2209 	sii_e.de.res = 0;
2210 	sii_e.sec_id = d_security->key.sec_id;
2211 	memcpy(&sii_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2212 
2213 	err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2214 	if (err)
2215 		goto out;
2216 
2217 	/* Fill SDH entry. */
2218 	sdh_e.de.view.data_off =
2219 		cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2220 	sdh_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2221 	sdh_e.de.view.res = 0;
2222 	sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2223 	sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2224 	sdh_e.de.flags = 0;
2225 	sdh_e.de.res = 0;
2226 	sdh_e.key.hash = d_security->key.hash;
2227 	sdh_e.key.sec_id = d_security->key.sec_id;
2228 	memcpy(&sdh_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2229 	sdh_e.magic[0] = cpu_to_le16('I');
2230 	sdh_e.magic[1] = cpu_to_le16('I');
2231 
2232 	fnd_clear(fnd_sdh);
2233 	err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2234 				fnd_sdh, 0);
2235 	if (err)
2236 		goto out;
2237 
2238 	*security_id = d_security->key.sec_id;
2239 	if (inserted)
2240 		*inserted = true;
2241 
2242 	/* Update Id and offset for next descriptor. */
2243 	sbi->security.next_id += 1;
2244 	sbi->security.next_off += aligned_sec_size;
2245 
2246 out:
2247 	fnd_put(fnd_sdh);
2248 	mark_inode_dirty(&ni->vfs_inode);
2249 	ni_unlock(ni);
2250 	kfree(d_security);
2251 
2252 	return err;
2253 }
2254 
2255 /*
2256  * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2257  */
ntfs_reparse_init(struct ntfs_sb_info * sbi)2258 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2259 {
2260 	int err;
2261 	struct ntfs_inode *ni = sbi->reparse.ni;
2262 	struct ntfs_index *indx = &sbi->reparse.index_r;
2263 	struct ATTRIB *attr;
2264 	struct ATTR_LIST_ENTRY *le;
2265 	const struct INDEX_ROOT *root_r;
2266 
2267 	if (!ni)
2268 		return 0;
2269 
2270 	le = NULL;
2271 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2272 			    ARRAY_SIZE(SR_NAME), NULL, NULL);
2273 	if (!attr) {
2274 		err = -EINVAL;
2275 		goto out;
2276 	}
2277 
2278 	root_r = resident_data(attr);
2279 	if (root_r->type != ATTR_ZERO ||
2280 	    root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2281 		err = -EINVAL;
2282 		goto out;
2283 	}
2284 
2285 	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2286 	if (err)
2287 		goto out;
2288 
2289 out:
2290 	return err;
2291 }
2292 
2293 /*
2294  * ntfs_objid_init - Load and parse $Extend/$ObjId.
2295  */
ntfs_objid_init(struct ntfs_sb_info * sbi)2296 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2297 {
2298 	int err;
2299 	struct ntfs_inode *ni = sbi->objid.ni;
2300 	struct ntfs_index *indx = &sbi->objid.index_o;
2301 	struct ATTRIB *attr;
2302 	struct ATTR_LIST_ENTRY *le;
2303 	const struct INDEX_ROOT *root;
2304 
2305 	if (!ni)
2306 		return 0;
2307 
2308 	le = NULL;
2309 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2310 			    ARRAY_SIZE(SO_NAME), NULL, NULL);
2311 	if (!attr) {
2312 		err = -EINVAL;
2313 		goto out;
2314 	}
2315 
2316 	root = resident_data(attr);
2317 	if (root->type != ATTR_ZERO ||
2318 	    root->rule != NTFS_COLLATION_TYPE_UINTS) {
2319 		err = -EINVAL;
2320 		goto out;
2321 	}
2322 
2323 	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2324 	if (err)
2325 		goto out;
2326 
2327 out:
2328 	return err;
2329 }
2330 
ntfs_objid_remove(struct ntfs_sb_info * sbi,struct GUID * guid)2331 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2332 {
2333 	int err;
2334 	struct ntfs_inode *ni = sbi->objid.ni;
2335 	struct ntfs_index *indx = &sbi->objid.index_o;
2336 
2337 	if (!ni)
2338 		return -EINVAL;
2339 
2340 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2341 
2342 	err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2343 
2344 	mark_inode_dirty(&ni->vfs_inode);
2345 	ni_unlock(ni);
2346 
2347 	return err;
2348 }
2349 
ntfs_insert_reparse(struct ntfs_sb_info * sbi,__le32 rtag,const struct MFT_REF * ref)2350 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2351 			const struct MFT_REF *ref)
2352 {
2353 	int err;
2354 	struct ntfs_inode *ni = sbi->reparse.ni;
2355 	struct ntfs_index *indx = &sbi->reparse.index_r;
2356 	struct NTFS_DE_R re;
2357 
2358 	if (!ni)
2359 		return -EINVAL;
2360 
2361 	memset(&re, 0, sizeof(re));
2362 
2363 	re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2364 	re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2365 	re.de.key_size = cpu_to_le16(sizeof(re.key));
2366 
2367 	re.key.ReparseTag = rtag;
2368 	memcpy(&re.key.ref, ref, sizeof(*ref));
2369 
2370 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2371 
2372 	err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2373 
2374 	mark_inode_dirty(&ni->vfs_inode);
2375 	ni_unlock(ni);
2376 
2377 	return err;
2378 }
2379 
ntfs_remove_reparse(struct ntfs_sb_info * sbi,__le32 rtag,const struct MFT_REF * ref)2380 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2381 			const struct MFT_REF *ref)
2382 {
2383 	int err, diff;
2384 	struct ntfs_inode *ni = sbi->reparse.ni;
2385 	struct ntfs_index *indx = &sbi->reparse.index_r;
2386 	struct ntfs_fnd *fnd = NULL;
2387 	struct REPARSE_KEY rkey;
2388 	struct NTFS_DE_R *re;
2389 	struct INDEX_ROOT *root_r;
2390 
2391 	if (!ni)
2392 		return -EINVAL;
2393 
2394 	rkey.ReparseTag = rtag;
2395 	rkey.ref = *ref;
2396 
2397 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2398 
2399 	if (rtag) {
2400 		err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2401 		goto out1;
2402 	}
2403 
2404 	fnd = fnd_get();
2405 	if (!fnd) {
2406 		err = -ENOMEM;
2407 		goto out1;
2408 	}
2409 
2410 	root_r = indx_get_root(indx, ni, NULL, NULL);
2411 	if (!root_r) {
2412 		err = -EINVAL;
2413 		goto out;
2414 	}
2415 
2416 	/* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2417 	err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2418 			(struct NTFS_DE **)&re, fnd);
2419 	if (err)
2420 		goto out;
2421 
2422 	if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2423 		/* Impossible. Looks like volume corrupt? */
2424 		goto out;
2425 	}
2426 
2427 	memcpy(&rkey, &re->key, sizeof(rkey));
2428 
2429 	fnd_put(fnd);
2430 	fnd = NULL;
2431 
2432 	err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2433 	if (err)
2434 		goto out;
2435 
2436 out:
2437 	fnd_put(fnd);
2438 
2439 out1:
2440 	mark_inode_dirty(&ni->vfs_inode);
2441 	ni_unlock(ni);
2442 
2443 	return err;
2444 }
2445 
ntfs_unmap_and_discard(struct ntfs_sb_info * sbi,CLST lcn,CLST len)2446 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2447 					  CLST len)
2448 {
2449 	ntfs_unmap_meta(sbi->sb, lcn, len);
2450 	ntfs_discard(sbi, lcn, len);
2451 }
2452 
mark_as_free_ex(struct ntfs_sb_info * sbi,CLST lcn,CLST len,bool trim)2453 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2454 {
2455 	CLST end, i, zone_len, zlen;
2456 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
2457 	bool dirty = false;
2458 
2459 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2460 	if (!wnd_is_used(wnd, lcn, len)) {
2461 		/* mark volume as dirty out of wnd->rw_lock */
2462 		dirty = true;
2463 
2464 		end = lcn + len;
2465 		len = 0;
2466 		for (i = lcn; i < end; i++) {
2467 			if (wnd_is_used(wnd, i, 1)) {
2468 				if (!len)
2469 					lcn = i;
2470 				len += 1;
2471 				continue;
2472 			}
2473 
2474 			if (!len)
2475 				continue;
2476 
2477 			if (trim)
2478 				ntfs_unmap_and_discard(sbi, lcn, len);
2479 
2480 			wnd_set_free(wnd, lcn, len);
2481 			len = 0;
2482 		}
2483 
2484 		if (!len)
2485 			goto out;
2486 	}
2487 
2488 	if (trim)
2489 		ntfs_unmap_and_discard(sbi, lcn, len);
2490 	wnd_set_free(wnd, lcn, len);
2491 
2492 	/* append to MFT zone, if possible. */
2493 	zone_len = wnd_zone_len(wnd);
2494 	zlen = min(zone_len + len, sbi->zone_max);
2495 
2496 	if (zlen == zone_len) {
2497 		/* MFT zone already has maximum size. */
2498 	} else if (!zone_len) {
2499 		/* Create MFT zone only if 'zlen' is large enough. */
2500 		if (zlen == sbi->zone_max)
2501 			wnd_zone_set(wnd, lcn, zlen);
2502 	} else {
2503 		CLST zone_lcn = wnd_zone_bit(wnd);
2504 
2505 		if (lcn + len == zone_lcn) {
2506 			/* Append into head MFT zone. */
2507 			wnd_zone_set(wnd, lcn, zlen);
2508 		} else if (zone_lcn + zone_len == lcn) {
2509 			/* Append into tail MFT zone. */
2510 			wnd_zone_set(wnd, zone_lcn, zlen);
2511 		}
2512 	}
2513 
2514 out:
2515 	up_write(&wnd->rw_lock);
2516 	if (dirty)
2517 		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2518 }
2519 
2520 /*
2521  * run_deallocate - Deallocate clusters.
2522  */
run_deallocate(struct ntfs_sb_info * sbi,const struct runs_tree * run,bool trim)2523 int run_deallocate(struct ntfs_sb_info *sbi, const struct runs_tree *run,
2524 		   bool trim)
2525 {
2526 	CLST lcn, len;
2527 	size_t idx = 0;
2528 
2529 	while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2530 		if (lcn == SPARSE_LCN)
2531 			continue;
2532 
2533 		mark_as_free_ex(sbi, lcn, len, trim);
2534 	}
2535 
2536 	return 0;
2537 }
2538 
name_has_forbidden_chars(const struct le_str * fname)2539 static inline bool name_has_forbidden_chars(const struct le_str *fname)
2540 {
2541 	int i, ch;
2542 
2543 	/* check for forbidden chars */
2544 	for (i = 0; i < fname->len; ++i) {
2545 		ch = le16_to_cpu(fname->name[i]);
2546 
2547 		/* control chars */
2548 		if (ch < 0x20)
2549 			return true;
2550 
2551 		switch (ch) {
2552 		/* disallowed by Windows */
2553 		case '\\':
2554 		case '/':
2555 		case ':':
2556 		case '*':
2557 		case '?':
2558 		case '<':
2559 		case '>':
2560 		case '|':
2561 		case '\"':
2562 			return true;
2563 
2564 		default:
2565 			/* allowed char */
2566 			break;
2567 		}
2568 	}
2569 
2570 	/* file names cannot end with space or . */
2571 	if (fname->len > 0) {
2572 		ch = le16_to_cpu(fname->name[fname->len - 1]);
2573 		if (ch == ' ' || ch == '.')
2574 			return true;
2575 	}
2576 
2577 	return false;
2578 }
2579 
is_reserved_name(const struct ntfs_sb_info * sbi,const struct le_str * fname)2580 static inline bool is_reserved_name(const struct ntfs_sb_info *sbi,
2581 				    const struct le_str *fname)
2582 {
2583 	int port_digit;
2584 	const __le16 *name = fname->name;
2585 	int len = fname->len;
2586 	const u16 *upcase = sbi->upcase;
2587 
2588 	/* check for 3 chars reserved names (device names) */
2589 	/* name by itself or with any extension is forbidden */
2590 	if (len == 3 || (len > 3 && le16_to_cpu(name[3]) == '.'))
2591 		if (!ntfs_cmp_names(name, 3, CON_NAME, 3, upcase, false) ||
2592 		    !ntfs_cmp_names(name, 3, NUL_NAME, 3, upcase, false) ||
2593 		    !ntfs_cmp_names(name, 3, AUX_NAME, 3, upcase, false) ||
2594 		    !ntfs_cmp_names(name, 3, PRN_NAME, 3, upcase, false))
2595 			return true;
2596 
2597 	/* check for 4 chars reserved names (port name followed by 1..9) */
2598 	/* name by itself or with any extension is forbidden */
2599 	if (len == 4 || (len > 4 && le16_to_cpu(name[4]) == '.')) {
2600 		port_digit = le16_to_cpu(name[3]);
2601 		if (port_digit >= '1' && port_digit <= '9')
2602 			if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase,
2603 					    false) ||
2604 			    !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase,
2605 					    false))
2606 				return true;
2607 	}
2608 
2609 	return false;
2610 }
2611 
2612 /*
2613  * valid_windows_name - Check if a file name is valid in Windows.
2614  */
valid_windows_name(struct ntfs_sb_info * sbi,const struct le_str * fname)2615 bool valid_windows_name(struct ntfs_sb_info *sbi, const struct le_str *fname)
2616 {
2617 	return !name_has_forbidden_chars(fname) &&
2618 	       !is_reserved_name(sbi, fname);
2619 }
2620 
2621 /*
2622  * ntfs_set_label - updates current ntfs label.
2623  */
ntfs_set_label(struct ntfs_sb_info * sbi,u8 * label,int len)2624 int ntfs_set_label(struct ntfs_sb_info *sbi, u8 *label, int len)
2625 {
2626 	int err;
2627 	struct ATTRIB *attr;
2628 	struct ntfs_inode *ni = sbi->volume.ni;
2629 	const u8 max_ulen = 0x80; /* TODO: use attrdef to get maximum length */
2630 	/* Allocate PATH_MAX bytes. */
2631 	struct cpu_str *uni = __getname();
2632 
2633 	if (!uni)
2634 		return -ENOMEM;
2635 
2636 	err = ntfs_nls_to_utf16(sbi, label, len, uni, (PATH_MAX - 2) / 2,
2637 				UTF16_LITTLE_ENDIAN);
2638 	if (err < 0)
2639 		goto out;
2640 
2641 	if (uni->len > max_ulen) {
2642 		ntfs_warn(sbi->sb, "new label is too long");
2643 		err = -EFBIG;
2644 		goto out;
2645 	}
2646 
2647 	ni_lock(ni);
2648 
2649 	/* Ignore any errors. */
2650 	ni_remove_attr(ni, ATTR_LABEL, NULL, 0, false, NULL);
2651 
2652 	err = ni_insert_resident(ni, uni->len * sizeof(u16), ATTR_LABEL, NULL,
2653 				 0, &attr, NULL, NULL);
2654 	if (err < 0)
2655 		goto unlock_out;
2656 
2657 	/* write new label in on-disk struct. */
2658 	memcpy(resident_data(attr), uni->name, uni->len * sizeof(u16));
2659 
2660 	/* update cached value of current label. */
2661 	if (len >= ARRAY_SIZE(sbi->volume.label))
2662 		len = ARRAY_SIZE(sbi->volume.label) - 1;
2663 	memcpy(sbi->volume.label, label, len);
2664 	sbi->volume.label[len] = 0;
2665 	mark_inode_dirty_sync(&ni->vfs_inode);
2666 
2667 unlock_out:
2668 	ni_unlock(ni);
2669 
2670 	if (!err)
2671 		err = _ni_write_inode(&ni->vfs_inode, 0);
2672 
2673 out:
2674 	__putname(uni);
2675 	return err;
2676 }