1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/stat.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8 #include <linux/export.h>
9 #include <linux/mm.h>
10 #include <linux/errno.h>
11 #include <linux/file.h>
12 #include <linux/highuid.h>
13 #include <linux/fs.h>
14 #include <linux/namei.h>
15 #include <linux/security.h>
16 #include <linux/cred.h>
17 #include <linux/syscalls.h>
18 #include <linux/pagemap.h>
19 #include <linux/compat.h>
20
21 #include <linux/uaccess.h>
22 #include <asm/unistd.h>
23
24 /**
25 * generic_fillattr - Fill in the basic attributes from the inode struct
26 * @inode: Inode to use as the source
27 * @stat: Where to fill in the attributes
28 *
29 * Fill in the basic attributes in the kstat structure from data that's to be
30 * found on the VFS inode structure. This is the default if no getattr inode
31 * operation is supplied.
32 */
generic_fillattr(struct inode * inode,struct kstat * stat)33 void generic_fillattr(struct inode *inode, struct kstat *stat)
34 {
35 stat->dev = inode->i_sb->s_dev;
36 stat->ino = inode->i_ino;
37 stat->mode = inode->i_mode;
38 stat->nlink = inode->i_nlink;
39 stat->uid = inode->i_uid;
40 stat->gid = inode->i_gid;
41 stat->rdev = inode->i_rdev;
42 stat->size = i_size_read(inode);
43 stat->atime = inode->i_atime;
44 stat->mtime = inode->i_mtime;
45 stat->ctime = inode->i_ctime;
46 stat->blksize = i_blocksize(inode);
47 stat->blocks = inode->i_blocks;
48 }
49 EXPORT_SYMBOL(generic_fillattr);
50
51 /**
52 * vfs_getattr_nosec - getattr without security checks
53 * @path: file to get attributes from
54 * @stat: structure to return attributes in
55 * @request_mask: STATX_xxx flags indicating what the caller wants
56 * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
57 *
58 * Get attributes without calling security_inode_getattr.
59 *
60 * Currently the only caller other than vfs_getattr is internal to the
61 * filehandle lookup code, which uses only the inode number and returns no
62 * attributes to any user. Any other code probably wants vfs_getattr.
63 */
vfs_getattr_nosec(const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)64 int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
65 u32 request_mask, unsigned int query_flags)
66 {
67 struct inode *inode = d_backing_inode(path->dentry);
68
69 memset(stat, 0, sizeof(*stat));
70 stat->result_mask |= STATX_BASIC_STATS;
71 request_mask &= STATX_ALL;
72 query_flags &= KSTAT_QUERY_FLAGS;
73
74 /* allow the fs to override these if it really wants to */
75 if (IS_NOATIME(inode))
76 stat->result_mask &= ~STATX_ATIME;
77 if (IS_AUTOMOUNT(inode))
78 stat->attributes |= STATX_ATTR_AUTOMOUNT;
79
80 if (inode->i_op->getattr)
81 return inode->i_op->getattr(path, stat, request_mask,
82 query_flags);
83
84 generic_fillattr(inode, stat);
85 return 0;
86 }
87 EXPORT_SYMBOL(vfs_getattr_nosec);
88
89 /*
90 * vfs_getattr - Get the enhanced basic attributes of a file
91 * @path: The file of interest
92 * @stat: Where to return the statistics
93 * @request_mask: STATX_xxx flags indicating what the caller wants
94 * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
95 *
96 * Ask the filesystem for a file's attributes. The caller must indicate in
97 * request_mask and query_flags to indicate what they want.
98 *
99 * If the file is remote, the filesystem can be forced to update the attributes
100 * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can
101 * suppress the update by passing AT_STATX_DONT_SYNC.
102 *
103 * Bits must have been set in request_mask to indicate which attributes the
104 * caller wants retrieving. Any such attribute not requested may be returned
105 * anyway, but the value may be approximate, and, if remote, may not have been
106 * synchronised with the server.
107 *
108 * 0 will be returned on success, and a -ve error code if unsuccessful.
109 */
vfs_getattr(const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)110 int vfs_getattr(const struct path *path, struct kstat *stat,
111 u32 request_mask, unsigned int query_flags)
112 {
113 int retval;
114
115 retval = security_inode_getattr(path);
116 if (retval)
117 return retval;
118 return vfs_getattr_nosec(path, stat, request_mask, query_flags);
119 }
120 EXPORT_SYMBOL(vfs_getattr);
121
122 /**
123 * vfs_statx_fd - Get the enhanced basic attributes by file descriptor
124 * @fd: The file descriptor referring to the file of interest
125 * @stat: The result structure to fill in.
126 * @request_mask: STATX_xxx flags indicating what the caller wants
127 * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
128 *
129 * This function is a wrapper around vfs_getattr(). The main difference is
130 * that it uses a file descriptor to determine the file location.
131 *
132 * 0 will be returned on success, and a -ve error code if unsuccessful.
133 */
vfs_statx_fd(unsigned int fd,struct kstat * stat,u32 request_mask,unsigned int query_flags)134 int vfs_statx_fd(unsigned int fd, struct kstat *stat,
135 u32 request_mask, unsigned int query_flags)
136 {
137 struct fd f;
138 int error = -EBADF;
139
140 if (query_flags & ~KSTAT_QUERY_FLAGS)
141 return -EINVAL;
142
143 f = fdget_raw(fd);
144 if (f.file) {
145 error = vfs_getattr(&f.file->f_path, stat,
146 request_mask, query_flags);
147 fdput(f);
148 }
149 return error;
150 }
151 EXPORT_SYMBOL(vfs_statx_fd);
152
153 /**
154 * vfs_statx - Get basic and extra attributes by filename
155 * @dfd: A file descriptor representing the base dir for a relative filename
156 * @filename: The name of the file of interest
157 * @flags: Flags to control the query
158 * @stat: The result structure to fill in.
159 * @request_mask: STATX_xxx flags indicating what the caller wants
160 *
161 * This function is a wrapper around vfs_getattr(). The main difference is
162 * that it uses a filename and base directory to determine the file location.
163 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
164 * at the given name from being referenced.
165 *
166 * 0 will be returned on success, and a -ve error code if unsuccessful.
167 */
vfs_statx(int dfd,const char __user * filename,int flags,struct kstat * stat,u32 request_mask)168 int vfs_statx(int dfd, const char __user *filename, int flags,
169 struct kstat *stat, u32 request_mask)
170 {
171 struct path path;
172 int error = -EINVAL;
173 unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_AUTOMOUNT;
174
175 if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
176 AT_EMPTY_PATH | KSTAT_QUERY_FLAGS)) != 0)
177 return -EINVAL;
178
179 if (flags & AT_SYMLINK_NOFOLLOW)
180 lookup_flags &= ~LOOKUP_FOLLOW;
181 if (flags & AT_NO_AUTOMOUNT)
182 lookup_flags &= ~LOOKUP_AUTOMOUNT;
183 if (flags & AT_EMPTY_PATH)
184 lookup_flags |= LOOKUP_EMPTY;
185
186 retry:
187 error = user_path_at(dfd, filename, lookup_flags, &path);
188 if (error)
189 goto out;
190
191 error = vfs_getattr(&path, stat, request_mask, flags);
192 path_put(&path);
193 if (retry_estale(error, lookup_flags)) {
194 lookup_flags |= LOOKUP_REVAL;
195 goto retry;
196 }
197 out:
198 return error;
199 }
200 EXPORT_SYMBOL(vfs_statx);
201
202
203 #ifdef __ARCH_WANT_OLD_STAT
204
205 /*
206 * For backward compatibility? Maybe this should be moved
207 * into arch/i386 instead?
208 */
cp_old_stat(struct kstat * stat,struct __old_kernel_stat __user * statbuf)209 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
210 {
211 static int warncount = 5;
212 struct __old_kernel_stat tmp;
213
214 if (warncount > 0) {
215 warncount--;
216 printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
217 current->comm);
218 } else if (warncount < 0) {
219 /* it's laughable, but... */
220 warncount = 0;
221 }
222
223 memset(&tmp, 0, sizeof(struct __old_kernel_stat));
224 tmp.st_dev = old_encode_dev(stat->dev);
225 tmp.st_ino = stat->ino;
226 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
227 return -EOVERFLOW;
228 tmp.st_mode = stat->mode;
229 tmp.st_nlink = stat->nlink;
230 if (tmp.st_nlink != stat->nlink)
231 return -EOVERFLOW;
232 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
233 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
234 tmp.st_rdev = old_encode_dev(stat->rdev);
235 #if BITS_PER_LONG == 32
236 if (stat->size > MAX_NON_LFS)
237 return -EOVERFLOW;
238 #endif
239 tmp.st_size = stat->size;
240 tmp.st_atime = stat->atime.tv_sec;
241 tmp.st_mtime = stat->mtime.tv_sec;
242 tmp.st_ctime = stat->ctime.tv_sec;
243 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
244 }
245
SYSCALL_DEFINE2(stat,const char __user *,filename,struct __old_kernel_stat __user *,statbuf)246 SYSCALL_DEFINE2(stat, const char __user *, filename,
247 struct __old_kernel_stat __user *, statbuf)
248 {
249 struct kstat stat;
250 int error;
251
252 error = vfs_stat(filename, &stat);
253 if (error)
254 return error;
255
256 return cp_old_stat(&stat, statbuf);
257 }
258
SYSCALL_DEFINE2(lstat,const char __user *,filename,struct __old_kernel_stat __user *,statbuf)259 SYSCALL_DEFINE2(lstat, const char __user *, filename,
260 struct __old_kernel_stat __user *, statbuf)
261 {
262 struct kstat stat;
263 int error;
264
265 error = vfs_lstat(filename, &stat);
266 if (error)
267 return error;
268
269 return cp_old_stat(&stat, statbuf);
270 }
271
SYSCALL_DEFINE2(fstat,unsigned int,fd,struct __old_kernel_stat __user *,statbuf)272 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
273 {
274 struct kstat stat;
275 int error = vfs_fstat(fd, &stat);
276
277 if (!error)
278 error = cp_old_stat(&stat, statbuf);
279
280 return error;
281 }
282
283 #endif /* __ARCH_WANT_OLD_STAT */
284
285 #ifdef __ARCH_WANT_NEW_STAT
286
287 #if BITS_PER_LONG == 32
288 # define choose_32_64(a,b) a
289 #else
290 # define choose_32_64(a,b) b
291 #endif
292
293 #define valid_dev(x) choose_32_64(old_valid_dev(x),true)
294 #define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
295
296 #ifndef INIT_STRUCT_STAT_PADDING
297 # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
298 #endif
299
cp_new_stat(struct kstat * stat,struct stat __user * statbuf)300 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
301 {
302 struct stat tmp;
303
304 if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
305 return -EOVERFLOW;
306 #if BITS_PER_LONG == 32
307 if (stat->size > MAX_NON_LFS)
308 return -EOVERFLOW;
309 #endif
310
311 INIT_STRUCT_STAT_PADDING(tmp);
312 tmp.st_dev = encode_dev(stat->dev);
313 tmp.st_ino = stat->ino;
314 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
315 return -EOVERFLOW;
316 tmp.st_mode = stat->mode;
317 tmp.st_nlink = stat->nlink;
318 if (tmp.st_nlink != stat->nlink)
319 return -EOVERFLOW;
320 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
321 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
322 tmp.st_rdev = encode_dev(stat->rdev);
323 tmp.st_size = stat->size;
324 tmp.st_atime = stat->atime.tv_sec;
325 tmp.st_mtime = stat->mtime.tv_sec;
326 tmp.st_ctime = stat->ctime.tv_sec;
327 #ifdef STAT_HAVE_NSEC
328 tmp.st_atime_nsec = stat->atime.tv_nsec;
329 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
330 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
331 #endif
332 tmp.st_blocks = stat->blocks;
333 tmp.st_blksize = stat->blksize;
334 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
335 }
336
SYSCALL_DEFINE2(newstat,const char __user *,filename,struct stat __user *,statbuf)337 SYSCALL_DEFINE2(newstat, const char __user *, filename,
338 struct stat __user *, statbuf)
339 {
340 struct kstat stat;
341 int error = vfs_stat(filename, &stat);
342
343 if (error)
344 return error;
345 return cp_new_stat(&stat, statbuf);
346 }
347
SYSCALL_DEFINE2(newlstat,const char __user *,filename,struct stat __user *,statbuf)348 SYSCALL_DEFINE2(newlstat, const char __user *, filename,
349 struct stat __user *, statbuf)
350 {
351 struct kstat stat;
352 int error;
353
354 error = vfs_lstat(filename, &stat);
355 if (error)
356 return error;
357
358 return cp_new_stat(&stat, statbuf);
359 }
360
361 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
SYSCALL_DEFINE4(newfstatat,int,dfd,const char __user *,filename,struct stat __user *,statbuf,int,flag)362 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
363 struct stat __user *, statbuf, int, flag)
364 {
365 struct kstat stat;
366 int error;
367
368 error = vfs_fstatat(dfd, filename, &stat, flag);
369 if (error)
370 return error;
371 return cp_new_stat(&stat, statbuf);
372 }
373 #endif
374
SYSCALL_DEFINE2(newfstat,unsigned int,fd,struct stat __user *,statbuf)375 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
376 {
377 struct kstat stat;
378 int error = vfs_fstat(fd, &stat);
379
380 if (!error)
381 error = cp_new_stat(&stat, statbuf);
382
383 return error;
384 }
385 #endif
386
do_readlinkat(int dfd,const char __user * pathname,char __user * buf,int bufsiz)387 static int do_readlinkat(int dfd, const char __user *pathname,
388 char __user *buf, int bufsiz)
389 {
390 struct path path;
391 int error;
392 int empty = 0;
393 unsigned int lookup_flags = LOOKUP_EMPTY;
394
395 if (bufsiz <= 0)
396 return -EINVAL;
397
398 retry:
399 error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
400 if (!error) {
401 struct inode *inode = d_backing_inode(path.dentry);
402
403 error = empty ? -ENOENT : -EINVAL;
404 /*
405 * AFS mountpoints allow readlink(2) but are not symlinks
406 */
407 if (d_is_symlink(path.dentry) || inode->i_op->readlink) {
408 error = security_inode_readlink(path.dentry);
409 if (!error) {
410 touch_atime(&path);
411 error = vfs_readlink(path.dentry, buf, bufsiz);
412 }
413 }
414 path_put(&path);
415 if (retry_estale(error, lookup_flags)) {
416 lookup_flags |= LOOKUP_REVAL;
417 goto retry;
418 }
419 }
420 return error;
421 }
422
SYSCALL_DEFINE4(readlinkat,int,dfd,const char __user *,pathname,char __user *,buf,int,bufsiz)423 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
424 char __user *, buf, int, bufsiz)
425 {
426 return do_readlinkat(dfd, pathname, buf, bufsiz);
427 }
428
SYSCALL_DEFINE3(readlink,const char __user *,path,char __user *,buf,int,bufsiz)429 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
430 int, bufsiz)
431 {
432 return do_readlinkat(AT_FDCWD, path, buf, bufsiz);
433 }
434
435
436 /* ---------- LFS-64 ----------- */
437 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
438
439 #ifndef INIT_STRUCT_STAT64_PADDING
440 # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
441 #endif
442
cp_new_stat64(struct kstat * stat,struct stat64 __user * statbuf)443 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
444 {
445 struct stat64 tmp;
446
447 INIT_STRUCT_STAT64_PADDING(tmp);
448 #ifdef CONFIG_MIPS
449 /* mips has weird padding, so we don't get 64 bits there */
450 tmp.st_dev = new_encode_dev(stat->dev);
451 tmp.st_rdev = new_encode_dev(stat->rdev);
452 #else
453 tmp.st_dev = huge_encode_dev(stat->dev);
454 tmp.st_rdev = huge_encode_dev(stat->rdev);
455 #endif
456 tmp.st_ino = stat->ino;
457 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
458 return -EOVERFLOW;
459 #ifdef STAT64_HAS_BROKEN_ST_INO
460 tmp.__st_ino = stat->ino;
461 #endif
462 tmp.st_mode = stat->mode;
463 tmp.st_nlink = stat->nlink;
464 tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
465 tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
466 tmp.st_atime = stat->atime.tv_sec;
467 tmp.st_atime_nsec = stat->atime.tv_nsec;
468 tmp.st_mtime = stat->mtime.tv_sec;
469 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
470 tmp.st_ctime = stat->ctime.tv_sec;
471 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
472 tmp.st_size = stat->size;
473 tmp.st_blocks = stat->blocks;
474 tmp.st_blksize = stat->blksize;
475 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
476 }
477
SYSCALL_DEFINE2(stat64,const char __user *,filename,struct stat64 __user *,statbuf)478 SYSCALL_DEFINE2(stat64, const char __user *, filename,
479 struct stat64 __user *, statbuf)
480 {
481 struct kstat stat;
482 int error = vfs_stat(filename, &stat);
483
484 if (!error)
485 error = cp_new_stat64(&stat, statbuf);
486
487 return error;
488 }
489
SYSCALL_DEFINE2(lstat64,const char __user *,filename,struct stat64 __user *,statbuf)490 SYSCALL_DEFINE2(lstat64, const char __user *, filename,
491 struct stat64 __user *, statbuf)
492 {
493 struct kstat stat;
494 int error = vfs_lstat(filename, &stat);
495
496 if (!error)
497 error = cp_new_stat64(&stat, statbuf);
498
499 return error;
500 }
501
SYSCALL_DEFINE2(fstat64,unsigned long,fd,struct stat64 __user *,statbuf)502 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
503 {
504 struct kstat stat;
505 int error = vfs_fstat(fd, &stat);
506
507 if (!error)
508 error = cp_new_stat64(&stat, statbuf);
509
510 return error;
511 }
512
SYSCALL_DEFINE4(fstatat64,int,dfd,const char __user *,filename,struct stat64 __user *,statbuf,int,flag)513 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
514 struct stat64 __user *, statbuf, int, flag)
515 {
516 struct kstat stat;
517 int error;
518
519 error = vfs_fstatat(dfd, filename, &stat, flag);
520 if (error)
521 return error;
522 return cp_new_stat64(&stat, statbuf);
523 }
524 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
525
526 static noinline_for_stack int
cp_statx(const struct kstat * stat,struct statx __user * buffer)527 cp_statx(const struct kstat *stat, struct statx __user *buffer)
528 {
529 struct statx tmp;
530
531 memset(&tmp, 0, sizeof(tmp));
532
533 tmp.stx_mask = stat->result_mask;
534 tmp.stx_blksize = stat->blksize;
535 tmp.stx_attributes = stat->attributes;
536 tmp.stx_nlink = stat->nlink;
537 tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
538 tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
539 tmp.stx_mode = stat->mode;
540 tmp.stx_ino = stat->ino;
541 tmp.stx_size = stat->size;
542 tmp.stx_blocks = stat->blocks;
543 tmp.stx_attributes_mask = stat->attributes_mask;
544 tmp.stx_atime.tv_sec = stat->atime.tv_sec;
545 tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
546 tmp.stx_btime.tv_sec = stat->btime.tv_sec;
547 tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
548 tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
549 tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
550 tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
551 tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
552 tmp.stx_rdev_major = MAJOR(stat->rdev);
553 tmp.stx_rdev_minor = MINOR(stat->rdev);
554 tmp.stx_dev_major = MAJOR(stat->dev);
555 tmp.stx_dev_minor = MINOR(stat->dev);
556
557 return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
558 }
559
560 /**
561 * sys_statx - System call to get enhanced stats
562 * @dfd: Base directory to pathwalk from *or* fd to stat.
563 * @filename: File to stat or "" with AT_EMPTY_PATH
564 * @flags: AT_* flags to control pathwalk.
565 * @mask: Parts of statx struct actually required.
566 * @buffer: Result buffer.
567 *
568 * Note that fstat() can be emulated by setting dfd to the fd of interest,
569 * supplying "" as the filename and setting AT_EMPTY_PATH in the flags.
570 */
SYSCALL_DEFINE5(statx,int,dfd,const char __user *,filename,unsigned,flags,unsigned int,mask,struct statx __user *,buffer)571 SYSCALL_DEFINE5(statx,
572 int, dfd, const char __user *, filename, unsigned, flags,
573 unsigned int, mask,
574 struct statx __user *, buffer)
575 {
576 struct kstat stat;
577 int error;
578
579 if (mask & STATX__RESERVED)
580 return -EINVAL;
581 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
582 return -EINVAL;
583
584 error = vfs_statx(dfd, filename, flags, &stat, mask);
585 if (error)
586 return error;
587
588 return cp_statx(&stat, buffer);
589 }
590
591 #ifdef CONFIG_COMPAT
cp_compat_stat(struct kstat * stat,struct compat_stat __user * ubuf)592 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
593 {
594 struct compat_stat tmp;
595
596 if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
597 return -EOVERFLOW;
598
599 memset(&tmp, 0, sizeof(tmp));
600 tmp.st_dev = old_encode_dev(stat->dev);
601 tmp.st_ino = stat->ino;
602 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
603 return -EOVERFLOW;
604 tmp.st_mode = stat->mode;
605 tmp.st_nlink = stat->nlink;
606 if (tmp.st_nlink != stat->nlink)
607 return -EOVERFLOW;
608 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
609 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
610 tmp.st_rdev = old_encode_dev(stat->rdev);
611 if ((u64) stat->size > MAX_NON_LFS)
612 return -EOVERFLOW;
613 tmp.st_size = stat->size;
614 tmp.st_atime = stat->atime.tv_sec;
615 tmp.st_atime_nsec = stat->atime.tv_nsec;
616 tmp.st_mtime = stat->mtime.tv_sec;
617 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
618 tmp.st_ctime = stat->ctime.tv_sec;
619 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
620 tmp.st_blocks = stat->blocks;
621 tmp.st_blksize = stat->blksize;
622 return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
623 }
624
COMPAT_SYSCALL_DEFINE2(newstat,const char __user *,filename,struct compat_stat __user *,statbuf)625 COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename,
626 struct compat_stat __user *, statbuf)
627 {
628 struct kstat stat;
629 int error;
630
631 error = vfs_stat(filename, &stat);
632 if (error)
633 return error;
634 return cp_compat_stat(&stat, statbuf);
635 }
636
COMPAT_SYSCALL_DEFINE2(newlstat,const char __user *,filename,struct compat_stat __user *,statbuf)637 COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename,
638 struct compat_stat __user *, statbuf)
639 {
640 struct kstat stat;
641 int error;
642
643 error = vfs_lstat(filename, &stat);
644 if (error)
645 return error;
646 return cp_compat_stat(&stat, statbuf);
647 }
648
649 #ifndef __ARCH_WANT_STAT64
COMPAT_SYSCALL_DEFINE4(newfstatat,unsigned int,dfd,const char __user *,filename,struct compat_stat __user *,statbuf,int,flag)650 COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd,
651 const char __user *, filename,
652 struct compat_stat __user *, statbuf, int, flag)
653 {
654 struct kstat stat;
655 int error;
656
657 error = vfs_fstatat(dfd, filename, &stat, flag);
658 if (error)
659 return error;
660 return cp_compat_stat(&stat, statbuf);
661 }
662 #endif
663
COMPAT_SYSCALL_DEFINE2(newfstat,unsigned int,fd,struct compat_stat __user *,statbuf)664 COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd,
665 struct compat_stat __user *, statbuf)
666 {
667 struct kstat stat;
668 int error = vfs_fstat(fd, &stat);
669
670 if (!error)
671 error = cp_compat_stat(&stat, statbuf);
672 return error;
673 }
674 #endif
675
676 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
__inode_add_bytes(struct inode * inode,loff_t bytes)677 void __inode_add_bytes(struct inode *inode, loff_t bytes)
678 {
679 inode->i_blocks += bytes >> 9;
680 bytes &= 511;
681 inode->i_bytes += bytes;
682 if (inode->i_bytes >= 512) {
683 inode->i_blocks++;
684 inode->i_bytes -= 512;
685 }
686 }
687 EXPORT_SYMBOL(__inode_add_bytes);
688
inode_add_bytes(struct inode * inode,loff_t bytes)689 void inode_add_bytes(struct inode *inode, loff_t bytes)
690 {
691 spin_lock(&inode->i_lock);
692 __inode_add_bytes(inode, bytes);
693 spin_unlock(&inode->i_lock);
694 }
695
696 EXPORT_SYMBOL(inode_add_bytes);
697
__inode_sub_bytes(struct inode * inode,loff_t bytes)698 void __inode_sub_bytes(struct inode *inode, loff_t bytes)
699 {
700 inode->i_blocks -= bytes >> 9;
701 bytes &= 511;
702 if (inode->i_bytes < bytes) {
703 inode->i_blocks--;
704 inode->i_bytes += 512;
705 }
706 inode->i_bytes -= bytes;
707 }
708
709 EXPORT_SYMBOL(__inode_sub_bytes);
710
inode_sub_bytes(struct inode * inode,loff_t bytes)711 void inode_sub_bytes(struct inode *inode, loff_t bytes)
712 {
713 spin_lock(&inode->i_lock);
714 __inode_sub_bytes(inode, bytes);
715 spin_unlock(&inode->i_lock);
716 }
717
718 EXPORT_SYMBOL(inode_sub_bytes);
719
inode_get_bytes(struct inode * inode)720 loff_t inode_get_bytes(struct inode *inode)
721 {
722 loff_t ret;
723
724 spin_lock(&inode->i_lock);
725 ret = __inode_get_bytes(inode);
726 spin_unlock(&inode->i_lock);
727 return ret;
728 }
729
730 EXPORT_SYMBOL(inode_get_bytes);
731
inode_set_bytes(struct inode * inode,loff_t bytes)732 void inode_set_bytes(struct inode *inode, loff_t bytes)
733 {
734 /* Caller is here responsible for sufficient locking
735 * (ie. inode->i_lock) */
736 inode->i_blocks = bytes >> 9;
737 inode->i_bytes = bytes & 511;
738 }
739
740 EXPORT_SYMBOL(inode_set_bytes);
741