1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) International Business Machines Corp., 2000-2004
4 * Portions Copyright (C) Christoph Hellwig, 2001-2002
5 */
6
7 #include <linux/fs.h>
8 #include <linux/mpage.h>
9 #include <linux/buffer_head.h>
10 #include <linux/pagemap.h>
11 #include <linux/quotaops.h>
12 #include <linux/uio.h>
13 #include <linux/writeback.h>
14 #include "jfs_incore.h"
15 #include "jfs_inode.h"
16 #include "jfs_filsys.h"
17 #include "jfs_imap.h"
18 #include "jfs_extent.h"
19 #include "jfs_unicode.h"
20 #include "jfs_debug.h"
21 #include "jfs_dmap.h"
22
23
jfs_iget(struct super_block * sb,unsigned long ino)24 struct inode *jfs_iget(struct super_block *sb, unsigned long ino)
25 {
26 struct inode *inode;
27 int ret;
28
29 inode = iget_locked(sb, ino);
30 if (!inode)
31 return ERR_PTR(-ENOMEM);
32 if (!(inode->i_state & I_NEW))
33 return inode;
34
35 ret = diRead(inode);
36 if (ret < 0) {
37 iget_failed(inode);
38 return ERR_PTR(ret);
39 }
40
41 if (S_ISREG(inode->i_mode)) {
42 inode->i_op = &jfs_file_inode_operations;
43 inode->i_fop = &jfs_file_operations;
44 inode->i_mapping->a_ops = &jfs_aops;
45 } else if (S_ISDIR(inode->i_mode)) {
46 inode->i_op = &jfs_dir_inode_operations;
47 inode->i_fop = &jfs_dir_operations;
48 } else if (S_ISLNK(inode->i_mode)) {
49 if (inode->i_size >= IDATASIZE) {
50 inode->i_op = &page_symlink_inode_operations;
51 inode_nohighmem(inode);
52 inode->i_mapping->a_ops = &jfs_aops;
53 } else {
54 inode->i_op = &jfs_fast_symlink_inode_operations;
55 inode->i_link = JFS_IP(inode)->i_inline;
56 /*
57 * The inline data should be null-terminated, but
58 * don't let on-disk corruption crash the kernel
59 */
60 inode->i_link[inode->i_size] = '\0';
61 }
62 } else {
63 inode->i_op = &jfs_file_inode_operations;
64 init_special_inode(inode, inode->i_mode, inode->i_rdev);
65 }
66 unlock_new_inode(inode);
67 return inode;
68 }
69
70 /*
71 * Workhorse of both fsync & write_inode
72 */
jfs_commit_inode(struct inode * inode,int wait)73 int jfs_commit_inode(struct inode *inode, int wait)
74 {
75 int rc = 0;
76 tid_t tid;
77 static int noisy = 5;
78
79 jfs_info("In jfs_commit_inode, inode = 0x%p", inode);
80
81 /*
82 * Don't commit if inode has been committed since last being
83 * marked dirty, or if it has been deleted.
84 */
85 if (inode->i_nlink == 0 || !test_cflag(COMMIT_Dirty, inode))
86 return 0;
87
88 if (isReadOnly(inode)) {
89 /* kernel allows writes to devices on read-only
90 * partitions and may think inode is dirty
91 */
92 if (!special_file(inode->i_mode) && noisy) {
93 jfs_err("jfs_commit_inode(0x%p) called on read-only volume",
94 inode);
95 jfs_err("Is remount racy?");
96 noisy--;
97 }
98 return 0;
99 }
100
101 tid = txBegin(inode->i_sb, COMMIT_INODE);
102 mutex_lock(&JFS_IP(inode)->commit_mutex);
103
104 /*
105 * Retest inode state after taking commit_mutex
106 */
107 if (inode->i_nlink && test_cflag(COMMIT_Dirty, inode))
108 rc = txCommit(tid, 1, &inode, wait ? COMMIT_SYNC : 0);
109
110 txEnd(tid);
111 mutex_unlock(&JFS_IP(inode)->commit_mutex);
112 return rc;
113 }
114
jfs_write_inode(struct inode * inode,struct writeback_control * wbc)115 int jfs_write_inode(struct inode *inode, struct writeback_control *wbc)
116 {
117 int wait = wbc->sync_mode == WB_SYNC_ALL;
118
119 if (inode->i_nlink == 0)
120 return 0;
121 /*
122 * If COMMIT_DIRTY is not set, the inode isn't really dirty.
123 * It has been committed since the last change, but was still
124 * on the dirty inode list.
125 */
126 if (!test_cflag(COMMIT_Dirty, inode)) {
127 /* Make sure committed changes hit the disk */
128 jfs_flush_journal(JFS_SBI(inode->i_sb)->log, wait);
129 return 0;
130 }
131
132 if (jfs_commit_inode(inode, wait)) {
133 jfs_err("jfs_write_inode: jfs_commit_inode failed!");
134 return -EIO;
135 } else
136 return 0;
137 }
138
jfs_evict_inode(struct inode * inode)139 void jfs_evict_inode(struct inode *inode)
140 {
141 struct jfs_inode_info *ji = JFS_IP(inode);
142
143 jfs_info("In jfs_evict_inode, inode = 0x%p", inode);
144
145 if (!inode->i_nlink && !is_bad_inode(inode)) {
146 dquot_initialize(inode);
147
148 if (JFS_IP(inode)->fileset == FILESYSTEM_I) {
149 truncate_inode_pages_final(&inode->i_data);
150
151 if (test_cflag(COMMIT_Freewmap, inode))
152 jfs_free_zero_link(inode);
153
154 diFree(inode);
155
156 /*
157 * Free the inode from the quota allocation.
158 */
159 dquot_free_inode(inode);
160 }
161 } else {
162 truncate_inode_pages_final(&inode->i_data);
163 }
164 clear_inode(inode);
165 dquot_drop(inode);
166
167 BUG_ON(!list_empty(&ji->anon_inode_list));
168
169 spin_lock_irq(&ji->ag_lock);
170 if (ji->active_ag != -1) {
171 struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
172 atomic_dec(&bmap->db_active[ji->active_ag]);
173 ji->active_ag = -1;
174 }
175 spin_unlock_irq(&ji->ag_lock);
176 }
177
jfs_dirty_inode(struct inode * inode,int flags)178 void jfs_dirty_inode(struct inode *inode, int flags)
179 {
180 static int noisy = 5;
181
182 if (isReadOnly(inode)) {
183 if (!special_file(inode->i_mode) && noisy) {
184 /* kernel allows writes to devices on read-only
185 * partitions and may try to mark inode dirty
186 */
187 jfs_err("jfs_dirty_inode called on read-only volume");
188 jfs_err("Is remount racy?");
189 noisy--;
190 }
191 return;
192 }
193
194 set_cflag(COMMIT_Dirty, inode);
195 }
196
jfs_get_block(struct inode * ip,sector_t lblock,struct buffer_head * bh_result,int create)197 int jfs_get_block(struct inode *ip, sector_t lblock,
198 struct buffer_head *bh_result, int create)
199 {
200 s64 lblock64 = lblock;
201 int rc = 0;
202 xad_t xad;
203 s64 xaddr;
204 int xflag;
205 s32 xlen = bh_result->b_size >> ip->i_blkbits;
206
207 /*
208 * Take appropriate lock on inode
209 */
210 if (create)
211 IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
212 else
213 IREAD_LOCK(ip, RDWRLOCK_NORMAL);
214
215 if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) &&
216 (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) &&
217 xaddr) {
218 if (xflag & XAD_NOTRECORDED) {
219 if (!create)
220 /*
221 * Allocated but not recorded, read treats
222 * this as a hole
223 */
224 goto unlock;
225 #ifdef _JFS_4K
226 XADoffset(&xad, lblock64);
227 XADlength(&xad, xlen);
228 XADaddress(&xad, xaddr);
229 #else /* _JFS_4K */
230 /*
231 * As long as block size = 4K, this isn't a problem.
232 * We should mark the whole page not ABNR, but how
233 * will we know to mark the other blocks BH_New?
234 */
235 BUG();
236 #endif /* _JFS_4K */
237 rc = extRecord(ip, &xad);
238 if (rc)
239 goto unlock;
240 set_buffer_new(bh_result);
241 }
242
243 map_bh(bh_result, ip->i_sb, xaddr);
244 bh_result->b_size = xlen << ip->i_blkbits;
245 goto unlock;
246 }
247 if (!create)
248 goto unlock;
249
250 /*
251 * Allocate a new block
252 */
253 #ifdef _JFS_4K
254 if ((rc = extHint(ip, lblock64 << ip->i_sb->s_blocksize_bits, &xad)))
255 goto unlock;
256 rc = extAlloc(ip, xlen, lblock64, &xad, false);
257 if (rc)
258 goto unlock;
259
260 set_buffer_new(bh_result);
261 map_bh(bh_result, ip->i_sb, addressXAD(&xad));
262 bh_result->b_size = lengthXAD(&xad) << ip->i_blkbits;
263
264 #else /* _JFS_4K */
265 /*
266 * We need to do whatever it takes to keep all but the last buffers
267 * in 4K pages - see jfs_write.c
268 */
269 BUG();
270 #endif /* _JFS_4K */
271
272 unlock:
273 /*
274 * Release lock on inode
275 */
276 if (create)
277 IWRITE_UNLOCK(ip);
278 else
279 IREAD_UNLOCK(ip);
280 return rc;
281 }
282
jfs_writepage(struct page * page,struct writeback_control * wbc)283 static int jfs_writepage(struct page *page, struct writeback_control *wbc)
284 {
285 return block_write_full_page(page, jfs_get_block, wbc);
286 }
287
jfs_writepages(struct address_space * mapping,struct writeback_control * wbc)288 static int jfs_writepages(struct address_space *mapping,
289 struct writeback_control *wbc)
290 {
291 return mpage_writepages(mapping, wbc, jfs_get_block);
292 }
293
jfs_readpage(struct file * file,struct page * page)294 static int jfs_readpage(struct file *file, struct page *page)
295 {
296 return mpage_readpage(page, jfs_get_block);
297 }
298
jfs_readahead(struct readahead_control * rac)299 static void jfs_readahead(struct readahead_control *rac)
300 {
301 mpage_readahead(rac, jfs_get_block);
302 }
303
jfs_write_failed(struct address_space * mapping,loff_t to)304 static void jfs_write_failed(struct address_space *mapping, loff_t to)
305 {
306 struct inode *inode = mapping->host;
307
308 if (to > inode->i_size) {
309 truncate_pagecache(inode, inode->i_size);
310 jfs_truncate(inode);
311 }
312 }
313
jfs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata)314 static int jfs_write_begin(struct file *file, struct address_space *mapping,
315 loff_t pos, unsigned len, unsigned flags,
316 struct page **pagep, void **fsdata)
317 {
318 int ret;
319
320 ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
321 jfs_get_block);
322 if (unlikely(ret))
323 jfs_write_failed(mapping, pos + len);
324
325 return ret;
326 }
327
jfs_bmap(struct address_space * mapping,sector_t block)328 static sector_t jfs_bmap(struct address_space *mapping, sector_t block)
329 {
330 return generic_block_bmap(mapping, block, jfs_get_block);
331 }
332
jfs_direct_IO(struct kiocb * iocb,struct iov_iter * iter)333 static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
334 {
335 struct file *file = iocb->ki_filp;
336 struct address_space *mapping = file->f_mapping;
337 struct inode *inode = file->f_mapping->host;
338 size_t count = iov_iter_count(iter);
339 ssize_t ret;
340
341 ret = blockdev_direct_IO(iocb, inode, iter, jfs_get_block);
342
343 /*
344 * In case of error extending write may have instantiated a few
345 * blocks outside i_size. Trim these off again.
346 */
347 if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
348 loff_t isize = i_size_read(inode);
349 loff_t end = iocb->ki_pos + count;
350
351 if (end > isize)
352 jfs_write_failed(mapping, end);
353 }
354
355 return ret;
356 }
357
358 const struct address_space_operations jfs_aops = {
359 .readpage = jfs_readpage,
360 .readahead = jfs_readahead,
361 .writepage = jfs_writepage,
362 .writepages = jfs_writepages,
363 .write_begin = jfs_write_begin,
364 .write_end = nobh_write_end,
365 .bmap = jfs_bmap,
366 .direct_IO = jfs_direct_IO,
367 };
368
369 /*
370 * Guts of jfs_truncate. Called with locks already held. Can be called
371 * with directory for truncating directory index table.
372 */
jfs_truncate_nolock(struct inode * ip,loff_t length)373 void jfs_truncate_nolock(struct inode *ip, loff_t length)
374 {
375 loff_t newsize;
376 tid_t tid;
377
378 ASSERT(length >= 0);
379
380 if (test_cflag(COMMIT_Nolink, ip)) {
381 xtTruncate(0, ip, length, COMMIT_WMAP);
382 return;
383 }
384
385 do {
386 tid = txBegin(ip->i_sb, 0);
387
388 /*
389 * The commit_mutex cannot be taken before txBegin.
390 * txBegin may block and there is a chance the inode
391 * could be marked dirty and need to be committed
392 * before txBegin unblocks
393 */
394 mutex_lock(&JFS_IP(ip)->commit_mutex);
395
396 newsize = xtTruncate(tid, ip, length,
397 COMMIT_TRUNCATE | COMMIT_PWMAP);
398 if (newsize < 0) {
399 txEnd(tid);
400 mutex_unlock(&JFS_IP(ip)->commit_mutex);
401 break;
402 }
403
404 ip->i_mtime = ip->i_ctime = current_time(ip);
405 mark_inode_dirty(ip);
406
407 txCommit(tid, 1, &ip, 0);
408 txEnd(tid);
409 mutex_unlock(&JFS_IP(ip)->commit_mutex);
410 } while (newsize > length); /* Truncate isn't always atomic */
411 }
412
jfs_truncate(struct inode * ip)413 void jfs_truncate(struct inode *ip)
414 {
415 jfs_info("jfs_truncate: size = 0x%lx", (ulong) ip->i_size);
416
417 nobh_truncate_page(ip->i_mapping, ip->i_size, jfs_get_block);
418
419 IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
420 jfs_truncate_nolock(ip, ip->i_size);
421 IWRITE_UNLOCK(ip);
422 }
423