/Linux-v5.15/fs/gfs2/ |
D | glops.c | 301 int isreg = ip && S_ISREG(ip->i_inode.i_mode); in inode_go_sync() 307 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); in inode_go_sync() 308 inode_dio_wait(&ip->i_inode); in inode_go_sync() 319 struct address_space *mapping = ip->i_inode.i_mapping; in inode_go_sync() 360 forget_all_cached_acls(&ip->i_inode); in inode_go_inval() 361 security_inode_invalidate_secctx(&ip->i_inode); in inode_go_inval() 372 if (ip && S_ISREG(ip->i_inode.i_mode)) in inode_go_inval() 373 truncate_inode_pages(ip->i_inode.i_mapping, 0); in inode_go_inval() 401 bool is_new = ip->i_inode.i_state & I_NEW; in gfs2_dinode_in() 405 if (unlikely(!is_new && inode_wrong_type(&ip->i_inode, mode))) in gfs2_dinode_in() [all …]
|
D | xattr.c | 91 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in ea_foreach_i() 95 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA)) in ea_foreach_i() 140 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) { in ea_foreach() 146 end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs; in ea_foreach() 234 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in ea_dealloc_unstuffed() 296 gfs2_add_inode_blocks(&ip->i_inode, -1); in ea_dealloc_unstuffed() 314 ip->i_inode.i_ctime = current_time(&ip->i_inode); in ea_dealloc_unstuffed() 315 __mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC); in ea_dealloc_unstuffed() 330 error = gfs2_rindex_update(GFS2_SB(&ip->i_inode)); in ea_remove_unstuffed() 354 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in ea_list_i() [all …]
|
D | inode.c | 357 error = gfs2_permission(&init_user_ns, &dip->i_inode, in create_ok() 363 if (!dip->i_inode.i_nlink) in create_ok() 368 if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1) in create_ok() 377 if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir && in munge_mode_uid_gid() 378 (dip->i_inode.i_mode & S_ISUID) && in munge_mode_uid_gid() 379 !uid_eq(dip->i_inode.i_uid, GLOBAL_ROOT_UID)) { in munge_mode_uid_gid() 382 else if (!uid_eq(dip->i_inode.i_uid, current_fsuid())) in munge_mode_uid_gid() 384 inode->i_uid = dip->i_inode.i_uid; in munge_mode_uid_gid() 388 if (dip->i_inode.i_mode & S_ISGID) { in munge_mode_uid_gid() 391 inode->i_gid = dip->i_inode.i_gid; in munge_mode_uid_gid() [all …]
|
D | dir.c | 111 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_JD)) { in gfs2_dir_get_existing_buffer() 131 if (ip->i_inode.i_size < offset + size) in gfs2_dir_write_stuffed() 132 i_size_write(&ip->i_inode, offset + size); in gfs2_dir_write_stuffed() 133 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode); in gfs2_dir_write_stuffed() 155 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_dir_write_data() 193 error = gfs2_alloc_extent(&ip->i_inode, lblock, &dblock, in gfs2_dir_write_data() 228 if (ip->i_inode.i_size < offset + copied) in gfs2_dir_write_data() 229 i_size_write(&ip->i_inode, offset + copied); in gfs2_dir_write_data() 230 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode); in gfs2_dir_write_data() 270 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_dir_read_data() [all …]
|
D | bmap.c | 58 struct inode *inode = &ip->i_inode; in gfs2_unstuffer_page() 107 if (i_size_read(&ip->i_inode)) { in __gfs2_unstuff_inode() 116 gfs2_trans_remove_revoke(GFS2_SB(&ip->i_inode), block, 1); in __gfs2_unstuff_inode() 136 if (i_size_read(&ip->i_inode)) { in __gfs2_unstuff_inode() 138 gfs2_add_inode_blocks(&ip->i_inode, 1); in __gfs2_unstuff_inode() 139 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); in __gfs2_unstuff_inode() 162 struct inode *inode = &ip->i_inode; in gfs2_unstuff_dinode() 778 gfs2_add_inode_blocks(&ip->i_inode, alloced); in __gfs2_iomap_alloc() 974 if (&ip->i_inode == sdp->sd_rindex) { in gfs2_write_lock() 996 if (&ip->i_inode == sdp->sd_rindex) { in gfs2_write_unlock() [all …]
|
D | quota.h | 43 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_lock_check() 55 ret = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid, ap); in gfs2_quota_lock_check()
|
D | super.c | 388 str->di_mode = cpu_to_be32(ip->i_inode.i_mode); in gfs2_dinode_out() 389 str->di_uid = cpu_to_be32(i_uid_read(&ip->i_inode)); in gfs2_dinode_out() 390 str->di_gid = cpu_to_be32(i_gid_read(&ip->i_inode)); in gfs2_dinode_out() 391 str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink); in gfs2_dinode_out() 392 str->di_size = cpu_to_be64(i_size_read(&ip->i_inode)); in gfs2_dinode_out() 393 str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); in gfs2_dinode_out() 394 str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec); in gfs2_dinode_out() 395 str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec); in gfs2_dinode_out() 396 str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec); in gfs2_dinode_out() 404 str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) && in gfs2_dinode_out() [all …]
|
D | quota.c | 384 bh_map.b_size = BIT(ip->i_inode.i_blkbits); in bh_get() 385 error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0); in bh_get() 535 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_qa_get() 566 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_hold() 585 error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd); in gfs2_quota_hold() 591 error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd); in gfs2_quota_hold() 598 !uid_eq(uid, ip->i_inode.i_uid)) { in gfs2_quota_hold() 607 !gid_eq(gid, ip->i_inode.i_gid)) { in gfs2_quota_hold() 624 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_unhold() 697 struct inode *inode = &ip->i_inode; in gfs2_write_buf_to_page() [all …]
|
D | rgrp.c | 603 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in check_and_update_goal() 896 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in read_rindex_entry() 902 if (pos >= i_size_read(&ip->i_inode)) in read_rindex_entry() 995 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_ri_update() 1582 struct inode *inode = &ip->i_inode; in rg_mblk_search() 2000 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_orlov_skip() 2058 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_inplace_reserve() 2083 if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV)) in gfs2_inplace_reserve() 2429 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_alloc_blocks() 2501 gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid); in gfs2_alloc_blocks() [all …]
|
D | meta_io.c | 407 struct address_space *mapping = ip->i_inode.i_mapping; in gfs2_getjdatabuf() 408 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_getjdatabuf() 442 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_journal_wipe() 483 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_meta_buffer()
|
D | bmap.h | 33 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_write_calc_reserv()
|
D | log.h | 60 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_ordered_add_inode()
|
D | incore.h | 383 struct inode i_inode; member 414 return container_of(inode, struct gfs2_inode, i_inode); in GFS2_I() 871 return GFS2_SB(&ip->i_inode)->sd_sb.sb_bsize - sizeof(struct gfs2_dinode); in gfs2_max_stuffed_size()
|
D | file.c | 600 !IS_NOATIME(&ip->i_inode)) { in gfs2_mmap() 683 if (S_ISREG(ip->i_inode.i_mode)) { in gfs2_open() 826 if (offset + len > i_size_read(&ip->i_inode)) in gfs2_file_direct_write() 1027 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in calc_max_reserv() 1285 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr, in do_flock() 1306 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error); in do_flock()
|
D | log.c | 726 if (ip->i_inode.i_mapping->nrpages == 0) { in gfs2_ordered_write() 732 filemap_fdatawrite(ip->i_inode.i_mapping); in gfs2_ordered_write() 747 if (ip->i_inode.i_mapping->nrpages == 0) in gfs2_ordered_wait() 750 filemap_fdatawait(ip->i_inode.i_mapping); in gfs2_ordered_wait() 758 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_ordered_del_inode()
|
D | inode.h | 42 return S_ISDIR(ip->i_inode.i_mode); in gfs2_is_dir()
|
D | main.c | 38 inode_init_once(&ip->i_inode); in gfs2_init_inode_once()
|
D | aops.c | 435 u64 dsize = i_size_read(&ip->i_inode); in stuffed_readpage() 514 struct address_space *mapping = ip->i_inode.i_mapping; in gfs2_internal_read()
|
D | util.c | 448 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_consist_inode_i()
|
D | recovery.c | 42 error = gfs2_get_extent(&ip->i_inode, blk, &dblock, &extlen); in gfs2_replay_read_block()
|
D | glock.c | 849 if (ip && !igrab(&ip->i_inode)) in gfs2_try_evict() 857 d_prune_aliases(&ip->i_inode); in gfs2_try_evict() 858 iput(&ip->i_inode); in gfs2_try_evict()
|
D | trace_gfs2.h | 531 (iomap->addr >> ip->i_inode.i_blkbits);
|