1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   *
4   * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5   *
6   */
7  
8  #include <linux/fs.h>
9  
10  #include "debug.h"
11  #include "ntfs.h"
12  #include "ntfs_fs.h"
13  
compare_attr(const struct ATTRIB * left,enum ATTR_TYPE type,const __le16 * name,u8 name_len,const u16 * upcase)14  static inline int compare_attr(const struct ATTRIB *left, enum ATTR_TYPE type,
15  			       const __le16 *name, u8 name_len,
16  			       const u16 *upcase)
17  {
18  	/* First, compare the type codes. */
19  	int diff = le32_to_cpu(left->type) - le32_to_cpu(type);
20  
21  	if (diff)
22  		return diff;
23  
24  	/* They have the same type code, so we have to compare the names. */
25  	return ntfs_cmp_names(attr_name(left), left->name_len, name, name_len,
26  			      upcase, true);
27  }
28  
29  /*
30   * mi_new_attt_id
31   *
32   * Return: Unused attribute id that is less than mrec->next_attr_id.
33   */
mi_new_attt_id(struct mft_inode * mi)34  static __le16 mi_new_attt_id(struct mft_inode *mi)
35  {
36  	u16 free_id, max_id, t16;
37  	struct MFT_REC *rec = mi->mrec;
38  	struct ATTRIB *attr;
39  	__le16 id;
40  
41  	id = rec->next_attr_id;
42  	free_id = le16_to_cpu(id);
43  	if (free_id < 0x7FFF) {
44  		rec->next_attr_id = cpu_to_le16(free_id + 1);
45  		return id;
46  	}
47  
48  	/* One record can store up to 1024/24 ~= 42 attributes. */
49  	free_id = 0;
50  	max_id = 0;
51  
52  	attr = NULL;
53  
54  	for (;;) {
55  		attr = mi_enum_attr(mi, attr);
56  		if (!attr) {
57  			rec->next_attr_id = cpu_to_le16(max_id + 1);
58  			mi->dirty = true;
59  			return cpu_to_le16(free_id);
60  		}
61  
62  		t16 = le16_to_cpu(attr->id);
63  		if (t16 == free_id) {
64  			free_id += 1;
65  			attr = NULL;
66  		} else if (max_id < t16)
67  			max_id = t16;
68  	}
69  }
70  
mi_get(struct ntfs_sb_info * sbi,CLST rno,struct mft_inode ** mi)71  int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi)
72  {
73  	int err;
74  	struct mft_inode *m = kzalloc(sizeof(struct mft_inode), GFP_NOFS);
75  
76  	if (!m)
77  		return -ENOMEM;
78  
79  	err = mi_init(m, sbi, rno);
80  	if (err) {
81  		kfree(m);
82  		return err;
83  	}
84  
85  	err = mi_read(m, false);
86  	if (err) {
87  		mi_put(m);
88  		return err;
89  	}
90  
91  	*mi = m;
92  	return 0;
93  }
94  
mi_put(struct mft_inode * mi)95  void mi_put(struct mft_inode *mi)
96  {
97  	mi_clear(mi);
98  	kfree(mi);
99  }
100  
mi_init(struct mft_inode * mi,struct ntfs_sb_info * sbi,CLST rno)101  int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno)
102  {
103  	mi->sbi = sbi;
104  	mi->rno = rno;
105  	mi->mrec = kmalloc(sbi->record_size, GFP_NOFS);
106  	if (!mi->mrec)
107  		return -ENOMEM;
108  
109  	return 0;
110  }
111  
112  /*
113   * mi_read - Read MFT data.
114   */
mi_read(struct mft_inode * mi,bool is_mft)115  int mi_read(struct mft_inode *mi, bool is_mft)
116  {
117  	int err;
118  	struct MFT_REC *rec = mi->mrec;
119  	struct ntfs_sb_info *sbi = mi->sbi;
120  	u32 bpr = sbi->record_size;
121  	u64 vbo = (u64)mi->rno << sbi->record_bits;
122  	struct ntfs_inode *mft_ni = sbi->mft.ni;
123  	struct runs_tree *run = mft_ni ? &mft_ni->file.run : NULL;
124  	struct rw_semaphore *rw_lock = NULL;
125  
126  	if (is_mounted(sbi)) {
127  		if (!is_mft && mft_ni) {
128  			rw_lock = &mft_ni->file.run_lock;
129  			down_read(rw_lock);
130  		}
131  	}
132  
133  	err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb);
134  	if (rw_lock)
135  		up_read(rw_lock);
136  	if (!err)
137  		goto ok;
138  
139  	if (err == -E_NTFS_FIXUP) {
140  		mi->dirty = true;
141  		goto ok;
142  	}
143  
144  	if (err != -ENOENT)
145  		goto out;
146  
147  	if (rw_lock) {
148  		ni_lock(mft_ni);
149  		down_write(rw_lock);
150  	}
151  	err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, run,
152  				 vbo >> sbi->cluster_bits);
153  	if (rw_lock) {
154  		up_write(rw_lock);
155  		ni_unlock(mft_ni);
156  	}
157  	if (err)
158  		goto out;
159  
160  	if (rw_lock)
161  		down_read(rw_lock);
162  	err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb);
163  	if (rw_lock)
164  		up_read(rw_lock);
165  
166  	if (err == -E_NTFS_FIXUP) {
167  		mi->dirty = true;
168  		goto ok;
169  	}
170  	if (err)
171  		goto out;
172  
173  ok:
174  	/* Check field 'total' only here. */
175  	if (le32_to_cpu(rec->total) != bpr) {
176  		err = -EINVAL;
177  		goto out;
178  	}
179  
180  	return 0;
181  
182  out:
183  	if (err == -E_NTFS_CORRUPT) {
184  		ntfs_err(sbi->sb, "mft corrupted");
185  		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
186  		err = -EINVAL;
187  	}
188  
189  	return err;
190  }
191  
192  /*
193   * mi_enum_attr - start/continue attributes enumeration in record.
194   *
195   * NOTE: mi->mrec - memory of size sbi->record_size
196   * here we sure that mi->mrec->total == sbi->record_size (see mi_read)
197   */
mi_enum_attr(struct mft_inode * mi,struct ATTRIB * attr)198  struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
199  {
200  	const struct MFT_REC *rec = mi->mrec;
201  	u32 used = le32_to_cpu(rec->used);
202  	u32 t32, off, asize, prev_type;
203  	u16 t16;
204  	u64 data_size, alloc_size, tot_size;
205  
206  	if (!attr) {
207  		u32 total = le32_to_cpu(rec->total);
208  
209  		off = le16_to_cpu(rec->attr_off);
210  
211  		if (used > total)
212  			return NULL;
213  
214  		if (off >= used || off < MFTRECORD_FIXUP_OFFSET_1 ||
215  		    !IS_ALIGNED(off, 4)) {
216  			return NULL;
217  		}
218  
219  		/* Skip non-resident records. */
220  		if (!is_rec_inuse(rec))
221  			return NULL;
222  
223  		prev_type = 0;
224  		attr = Add2Ptr(rec, off);
225  	} else {
226  		/* Check if input attr inside record. */
227  		off = PtrOffset(rec, attr);
228  		if (off >= used)
229  			return NULL;
230  
231  		asize = le32_to_cpu(attr->size);
232  		if (asize < SIZEOF_RESIDENT) {
233  			/* Impossible 'cause we should not return such attribute. */
234  			return NULL;
235  		}
236  
237  		/* Overflow check. */
238  		if (off + asize < off)
239  			return NULL;
240  
241  		prev_type = le32_to_cpu(attr->type);
242  		attr = Add2Ptr(attr, asize);
243  		off += asize;
244  	}
245  
246  	asize = le32_to_cpu(attr->size);
247  
248  	/* Can we use the first field (attr->type). */
249  	if (off + 8 > used) {
250  		static_assert(ALIGN(sizeof(enum ATTR_TYPE), 8) == 8);
251  		return NULL;
252  	}
253  
254  	if (attr->type == ATTR_END) {
255  		/* End of enumeration. */
256  		return NULL;
257  	}
258  
259  	/* 0x100 is last known attribute for now. */
260  	t32 = le32_to_cpu(attr->type);
261  	if (!t32 || (t32 & 0xf) || (t32 > 0x100))
262  		return NULL;
263  
264  	/* attributes in record must be ordered by type */
265  	if (t32 < prev_type)
266  		return NULL;
267  
268  	/* Check overflow and boundary. */
269  	if (off + asize < off || off + asize > used)
270  		return NULL;
271  
272  	/* Check size of attribute. */
273  	if (!attr->non_res) {
274  		/* Check resident fields. */
275  		if (asize < SIZEOF_RESIDENT)
276  			return NULL;
277  
278  		t16 = le16_to_cpu(attr->res.data_off);
279  		if (t16 > asize)
280  			return NULL;
281  
282  		if (t16 + le32_to_cpu(attr->res.data_size) > asize)
283  			return NULL;
284  
285  		t32 = sizeof(short) * attr->name_len;
286  		if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
287  			return NULL;
288  
289  		return attr;
290  	}
291  
292  	/* Check nonresident fields. */
293  	if (attr->non_res != 1)
294  		return NULL;
295  
296  	t16 = le16_to_cpu(attr->nres.run_off);
297  	if (t16 > asize)
298  		return NULL;
299  
300  	t32 = sizeof(short) * attr->name_len;
301  	if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
302  		return NULL;
303  
304  	/* Check start/end vcn. */
305  	if (le64_to_cpu(attr->nres.svcn) > le64_to_cpu(attr->nres.evcn) + 1)
306  		return NULL;
307  
308  	data_size = le64_to_cpu(attr->nres.data_size);
309  	if (le64_to_cpu(attr->nres.valid_size) > data_size)
310  		return NULL;
311  
312  	alloc_size = le64_to_cpu(attr->nres.alloc_size);
313  	if (data_size > alloc_size)
314  		return NULL;
315  
316  	t32 = mi->sbi->cluster_mask;
317  	if (alloc_size & t32)
318  		return NULL;
319  
320  	if (!attr->nres.svcn && is_attr_ext(attr)) {
321  		/* First segment of sparse/compressed attribute */
322  		if (asize + 8 < SIZEOF_NONRESIDENT_EX)
323  			return NULL;
324  
325  		tot_size = le64_to_cpu(attr->nres.total_size);
326  		if (tot_size & t32)
327  			return NULL;
328  
329  		if (tot_size > alloc_size)
330  			return NULL;
331  	} else {
332  		if (asize + 8 < SIZEOF_NONRESIDENT)
333  			return NULL;
334  
335  		if (attr->nres.c_unit)
336  			return NULL;
337  	}
338  
339  	return attr;
340  }
341  
342  /*
343   * mi_find_attr - Find the attribute by type and name and id.
344   */
mi_find_attr(struct mft_inode * mi,struct ATTRIB * attr,enum ATTR_TYPE type,const __le16 * name,u8 name_len,const __le16 * id)345  struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
346  			    enum ATTR_TYPE type, const __le16 *name,
347  			    u8 name_len, const __le16 *id)
348  {
349  	u32 type_in = le32_to_cpu(type);
350  	u32 atype;
351  
352  next_attr:
353  	attr = mi_enum_attr(mi, attr);
354  	if (!attr)
355  		return NULL;
356  
357  	atype = le32_to_cpu(attr->type);
358  	if (atype > type_in)
359  		return NULL;
360  
361  	if (atype < type_in)
362  		goto next_attr;
363  
364  	if (attr->name_len != name_len)
365  		goto next_attr;
366  
367  	if (name_len && memcmp(attr_name(attr), name, name_len * sizeof(short)))
368  		goto next_attr;
369  
370  	if (id && *id != attr->id)
371  		goto next_attr;
372  
373  	return attr;
374  }
375  
mi_write(struct mft_inode * mi,int wait)376  int mi_write(struct mft_inode *mi, int wait)
377  {
378  	struct MFT_REC *rec;
379  	int err;
380  	struct ntfs_sb_info *sbi;
381  
382  	if (!mi->dirty)
383  		return 0;
384  
385  	sbi = mi->sbi;
386  	rec = mi->mrec;
387  
388  	err = ntfs_write_bh(sbi, &rec->rhdr, &mi->nb, wait);
389  	if (err)
390  		return err;
391  
392  	if (mi->rno < sbi->mft.recs_mirr)
393  		sbi->flags |= NTFS_FLAGS_MFTMIRR;
394  
395  	mi->dirty = false;
396  
397  	return 0;
398  }
399  
mi_format_new(struct mft_inode * mi,struct ntfs_sb_info * sbi,CLST rno,__le16 flags,bool is_mft)400  int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
401  		  __le16 flags, bool is_mft)
402  {
403  	int err;
404  	u16 seq = 1;
405  	struct MFT_REC *rec;
406  	u64 vbo = (u64)rno << sbi->record_bits;
407  
408  	err = mi_init(mi, sbi, rno);
409  	if (err)
410  		return err;
411  
412  	rec = mi->mrec;
413  
414  	if (rno == MFT_REC_MFT) {
415  		;
416  	} else if (rno < MFT_REC_FREE) {
417  		seq = rno;
418  	} else if (rno >= sbi->mft.used) {
419  		;
420  	} else if (mi_read(mi, is_mft)) {
421  		;
422  	} else if (rec->rhdr.sign == NTFS_FILE_SIGNATURE) {
423  		/* Record is reused. Update its sequence number. */
424  		seq = le16_to_cpu(rec->seq) + 1;
425  		if (!seq)
426  			seq = 1;
427  	}
428  
429  	memcpy(rec, sbi->new_rec, sbi->record_size);
430  
431  	rec->seq = cpu_to_le16(seq);
432  	rec->flags = RECORD_FLAG_IN_USE | flags;
433  	if (MFTRECORD_FIXUP_OFFSET == MFTRECORD_FIXUP_OFFSET_3)
434  		rec->mft_record = cpu_to_le32(rno);
435  
436  	mi->dirty = true;
437  
438  	if (!mi->nb.nbufs) {
439  		struct ntfs_inode *ni = sbi->mft.ni;
440  		bool lock = false;
441  
442  		if (is_mounted(sbi) && !is_mft) {
443  			down_read(&ni->file.run_lock);
444  			lock = true;
445  		}
446  
447  		err = ntfs_get_bh(sbi, &ni->file.run, vbo, sbi->record_size,
448  				  &mi->nb);
449  		if (lock)
450  			up_read(&ni->file.run_lock);
451  	}
452  
453  	return err;
454  }
455  
456  /*
457   * mi_insert_attr - Reserve space for new attribute.
458   *
459   * Return: Not full constructed attribute or NULL if not possible to create.
460   */
mi_insert_attr(struct mft_inode * mi,enum ATTR_TYPE type,const __le16 * name,u8 name_len,u32 asize,u16 name_off)461  struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
462  			      const __le16 *name, u8 name_len, u32 asize,
463  			      u16 name_off)
464  {
465  	size_t tail;
466  	struct ATTRIB *attr;
467  	__le16 id;
468  	struct MFT_REC *rec = mi->mrec;
469  	struct ntfs_sb_info *sbi = mi->sbi;
470  	u32 used = le32_to_cpu(rec->used);
471  	const u16 *upcase = sbi->upcase;
472  
473  	/* Can we insert mi attribute? */
474  	if (used + asize > sbi->record_size)
475  		return NULL;
476  
477  	/*
478  	 * Scan through the list of attributes to find the point
479  	 * at which we should insert it.
480  	 */
481  	attr = NULL;
482  	while ((attr = mi_enum_attr(mi, attr))) {
483  		int diff = compare_attr(attr, type, name, name_len, upcase);
484  
485  		if (diff < 0)
486  			continue;
487  
488  		if (!diff && !is_attr_indexed(attr))
489  			return NULL;
490  		break;
491  	}
492  
493  	if (!attr) {
494  		/* Append. */
495  		tail = 8;
496  		attr = Add2Ptr(rec, used - 8);
497  	} else {
498  		/* Insert before 'attr'. */
499  		tail = used - PtrOffset(rec, attr);
500  	}
501  
502  	id = mi_new_attt_id(mi);
503  
504  	memmove(Add2Ptr(attr, asize), attr, tail);
505  	memset(attr, 0, asize);
506  
507  	attr->type = type;
508  	attr->size = cpu_to_le32(asize);
509  	attr->name_len = name_len;
510  	attr->name_off = cpu_to_le16(name_off);
511  	attr->id = id;
512  
513  	memmove(Add2Ptr(attr, name_off), name, name_len * sizeof(short));
514  	rec->used = cpu_to_le32(used + asize);
515  
516  	mi->dirty = true;
517  
518  	return attr;
519  }
520  
521  /*
522   * mi_remove_attr - Remove the attribute from record.
523   *
524   * NOTE: The source attr will point to next attribute.
525   */
mi_remove_attr(struct ntfs_inode * ni,struct mft_inode * mi,struct ATTRIB * attr)526  bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
527  		    struct ATTRIB *attr)
528  {
529  	struct MFT_REC *rec = mi->mrec;
530  	u32 aoff = PtrOffset(rec, attr);
531  	u32 used = le32_to_cpu(rec->used);
532  	u32 asize = le32_to_cpu(attr->size);
533  
534  	if (aoff + asize > used)
535  		return false;
536  
537  	if (ni && is_attr_indexed(attr)) {
538  		le16_add_cpu(&ni->mi.mrec->hard_links, -1);
539  		ni->mi.dirty = true;
540  	}
541  
542  	used -= asize;
543  	memmove(attr, Add2Ptr(attr, asize), used - aoff);
544  	rec->used = cpu_to_le32(used);
545  	mi->dirty = true;
546  
547  	return true;
548  }
549  
550  /* bytes = "new attribute size" - "old attribute size" */
mi_resize_attr(struct mft_inode * mi,struct ATTRIB * attr,int bytes)551  bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes)
552  {
553  	struct MFT_REC *rec = mi->mrec;
554  	u32 aoff = PtrOffset(rec, attr);
555  	u32 total, used = le32_to_cpu(rec->used);
556  	u32 nsize, asize = le32_to_cpu(attr->size);
557  	u32 rsize = le32_to_cpu(attr->res.data_size);
558  	int tail = (int)(used - aoff - asize);
559  	int dsize;
560  	char *next;
561  
562  	if (tail < 0 || aoff >= used)
563  		return false;
564  
565  	if (!bytes)
566  		return true;
567  
568  	total = le32_to_cpu(rec->total);
569  	next = Add2Ptr(attr, asize);
570  
571  	if (bytes > 0) {
572  		dsize = ALIGN(bytes, 8);
573  		if (used + dsize > total)
574  			return false;
575  		nsize = asize + dsize;
576  		/* Move tail */
577  		memmove(next + dsize, next, tail);
578  		memset(next, 0, dsize);
579  		used += dsize;
580  		rsize += dsize;
581  	} else {
582  		dsize = ALIGN(-bytes, 8);
583  		if (dsize > asize)
584  			return false;
585  		nsize = asize - dsize;
586  		memmove(next - dsize, next, tail);
587  		used -= dsize;
588  		rsize -= dsize;
589  	}
590  
591  	rec->used = cpu_to_le32(used);
592  	attr->size = cpu_to_le32(nsize);
593  	if (!attr->non_res)
594  		attr->res.data_size = cpu_to_le32(rsize);
595  	mi->dirty = true;
596  
597  	return true;
598  }
599  
600  /*
601   * Pack runs in MFT record.
602   * If failed record is not changed.
603   */
mi_pack_runs(struct mft_inode * mi,struct ATTRIB * attr,struct runs_tree * run,CLST len)604  int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
605  		 struct runs_tree *run, CLST len)
606  {
607  	int err = 0;
608  	struct ntfs_sb_info *sbi = mi->sbi;
609  	u32 new_run_size;
610  	CLST plen;
611  	struct MFT_REC *rec = mi->mrec;
612  	CLST svcn = le64_to_cpu(attr->nres.svcn);
613  	u32 used = le32_to_cpu(rec->used);
614  	u32 aoff = PtrOffset(rec, attr);
615  	u32 asize = le32_to_cpu(attr->size);
616  	char *next = Add2Ptr(attr, asize);
617  	u16 run_off = le16_to_cpu(attr->nres.run_off);
618  	u32 run_size = asize - run_off;
619  	u32 tail = used - aoff - asize;
620  	u32 dsize = sbi->record_size - used;
621  
622  	/* Make a maximum gap in current record. */
623  	memmove(next + dsize, next, tail);
624  
625  	/* Pack as much as possible. */
626  	err = run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size + dsize,
627  		       &plen);
628  	if (err < 0) {
629  		memmove(next, next + dsize, tail);
630  		return err;
631  	}
632  
633  	new_run_size = ALIGN(err, 8);
634  
635  	memmove(next + new_run_size - run_size, next + dsize, tail);
636  
637  	attr->size = cpu_to_le32(asize + new_run_size - run_size);
638  	attr->nres.evcn = cpu_to_le64(svcn + plen - 1);
639  	rec->used = cpu_to_le32(used + new_run_size - run_size);
640  	mi->dirty = true;
641  
642  	return 0;
643  }
644