1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 */
6
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/pagemap.h>
13 #include <linux/pagevec.h>
14 #include <linux/mpage.h>
15 #include <linux/fs.h>
16 #include <linux/writeback.h>
17 #include <linux/swap.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/backing-dev.h>
20 #include <linux/uio.h>
21 #include <trace/events/writeback.h>
22 #include <linux/sched/signal.h>
23
24 #include "gfs2.h"
25 #include "incore.h"
26 #include "bmap.h"
27 #include "glock.h"
28 #include "inode.h"
29 #include "log.h"
30 #include "meta_io.h"
31 #include "quota.h"
32 #include "trans.h"
33 #include "rgrp.h"
34 #include "super.h"
35 #include "util.h"
36 #include "glops.h"
37 #include "aops.h"
38
39
gfs2_trans_add_databufs(struct gfs2_inode * ip,struct folio * folio,size_t from,size_t len)40 void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
41 size_t from, size_t len)
42 {
43 struct buffer_head *head = folio_buffers(folio);
44 unsigned int bsize = head->b_size;
45 struct buffer_head *bh;
46 size_t to = from + len;
47 size_t start, end;
48
49 for (bh = head, start = 0; bh != head || !start;
50 bh = bh->b_this_page, start = end) {
51 end = start + bsize;
52 if (end <= from)
53 continue;
54 if (start >= to)
55 break;
56 set_buffer_uptodate(bh);
57 gfs2_trans_add_data(ip->i_gl, bh);
58 }
59 }
60
61 /**
62 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
63 * @inode: The inode
64 * @lblock: The block number to look up
65 * @bh_result: The buffer head to return the result in
66 * @create: Non-zero if we may add block to the file
67 *
68 * Returns: errno
69 */
70
gfs2_get_block_noalloc(struct inode * inode,sector_t lblock,struct buffer_head * bh_result,int create)71 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72 struct buffer_head *bh_result, int create)
73 {
74 int error;
75
76 error = gfs2_block_map(inode, lblock, bh_result, 0);
77 if (error)
78 return error;
79 if (!buffer_mapped(bh_result))
80 return -ENODATA;
81 return 0;
82 }
83
84 /**
85 * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_folio
86 * @folio: The folio to write
87 * @wbc: The writeback control
88 *
89 * This is the same as calling block_write_full_folio, but it also
90 * writes pages outside of i_size
91 */
gfs2_write_jdata_folio(struct folio * folio,struct writeback_control * wbc)92 static int gfs2_write_jdata_folio(struct folio *folio,
93 struct writeback_control *wbc)
94 {
95 struct inode * const inode = folio->mapping->host;
96 loff_t i_size = i_size_read(inode);
97
98 /*
99 * The folio straddles i_size. It must be zeroed out on each and every
100 * writepage invocation because it may be mmapped. "A file is mapped
101 * in multiples of the page size. For a file that is not a multiple of
102 * the page size, the remaining memory is zeroed when mapped, and
103 * writes to that region are not written out to the file."
104 */
105 if (folio_pos(folio) < i_size &&
106 i_size < folio_pos(folio) + folio_size(folio))
107 folio_zero_segment(folio, offset_in_folio(folio, i_size),
108 folio_size(folio));
109
110 return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc,
111 wbc);
112 }
113
114 /**
115 * __gfs2_jdata_write_folio - The core of jdata writepage
116 * @folio: The folio to write
117 * @wbc: The writeback control
118 *
119 * Implements the core of write back. If a transaction is required then
120 * the checked flag will have been set and the transaction will have
121 * already been started before this is called.
122 */
__gfs2_jdata_write_folio(struct folio * folio,struct writeback_control * wbc)123 static int __gfs2_jdata_write_folio(struct folio *folio,
124 struct writeback_control *wbc)
125 {
126 struct inode *inode = folio->mapping->host;
127 struct gfs2_inode *ip = GFS2_I(inode);
128
129 if (folio_test_checked(folio)) {
130 folio_clear_checked(folio);
131 if (!folio_buffers(folio)) {
132 create_empty_buffers(folio,
133 inode->i_sb->s_blocksize,
134 BIT(BH_Dirty)|BIT(BH_Uptodate));
135 }
136 gfs2_trans_add_databufs(ip, folio, 0, folio_size(folio));
137 }
138 return gfs2_write_jdata_folio(folio, wbc);
139 }
140
141 /**
142 * gfs2_jdata_writepage - Write complete page
143 * @page: Page to write
144 * @wbc: The writeback control
145 *
146 * Returns: errno
147 *
148 */
149
gfs2_jdata_writepage(struct page * page,struct writeback_control * wbc)150 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
151 {
152 struct folio *folio = page_folio(page);
153 struct inode *inode = page->mapping->host;
154 struct gfs2_inode *ip = GFS2_I(inode);
155 struct gfs2_sbd *sdp = GFS2_SB(inode);
156
157 if (gfs2_assert_withdraw(sdp, ip->i_gl->gl_state == LM_ST_EXCLUSIVE))
158 goto out;
159 if (folio_test_checked(folio) || current->journal_info)
160 goto out_ignore;
161 return __gfs2_jdata_write_folio(folio, wbc);
162
163 out_ignore:
164 folio_redirty_for_writepage(wbc, folio);
165 out:
166 folio_unlock(folio);
167 return 0;
168 }
169
170 /**
171 * gfs2_writepages - Write a bunch of dirty pages back to disk
172 * @mapping: The mapping to write
173 * @wbc: Write-back control
174 *
175 * Used for both ordered and writeback modes.
176 */
gfs2_writepages(struct address_space * mapping,struct writeback_control * wbc)177 static int gfs2_writepages(struct address_space *mapping,
178 struct writeback_control *wbc)
179 {
180 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
181 struct iomap_writepage_ctx wpc = { };
182 int ret;
183
184 /*
185 * Even if we didn't write enough pages here, we might still be holding
186 * dirty pages in the ail. We forcibly flush the ail because we don't
187 * want balance_dirty_pages() to loop indefinitely trying to write out
188 * pages held in the ail that it can't find.
189 */
190 ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
191 if (ret == 0 && wbc->nr_to_write > 0)
192 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
193 return ret;
194 }
195
196 /**
197 * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
198 * @mapping: The mapping
199 * @wbc: The writeback control
200 * @fbatch: The batch of folios
201 * @done_index: Page index
202 *
203 * Returns: non-zero if loop should terminate, zero otherwise
204 */
205
gfs2_write_jdata_batch(struct address_space * mapping,struct writeback_control * wbc,struct folio_batch * fbatch,pgoff_t * done_index)206 static int gfs2_write_jdata_batch(struct address_space *mapping,
207 struct writeback_control *wbc,
208 struct folio_batch *fbatch,
209 pgoff_t *done_index)
210 {
211 struct inode *inode = mapping->host;
212 struct gfs2_sbd *sdp = GFS2_SB(inode);
213 unsigned nrblocks;
214 int i;
215 int ret;
216 size_t size = 0;
217 int nr_folios = folio_batch_count(fbatch);
218
219 for (i = 0; i < nr_folios; i++)
220 size += folio_size(fbatch->folios[i]);
221 nrblocks = size >> inode->i_blkbits;
222
223 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
224 if (ret < 0)
225 return ret;
226
227 for (i = 0; i < nr_folios; i++) {
228 struct folio *folio = fbatch->folios[i];
229
230 *done_index = folio->index;
231
232 folio_lock(folio);
233
234 if (unlikely(folio->mapping != mapping)) {
235 continue_unlock:
236 folio_unlock(folio);
237 continue;
238 }
239
240 if (!folio_test_dirty(folio)) {
241 /* someone wrote it for us */
242 goto continue_unlock;
243 }
244
245 if (folio_test_writeback(folio)) {
246 if (wbc->sync_mode != WB_SYNC_NONE)
247 folio_wait_writeback(folio);
248 else
249 goto continue_unlock;
250 }
251
252 BUG_ON(folio_test_writeback(folio));
253 if (!folio_clear_dirty_for_io(folio))
254 goto continue_unlock;
255
256 trace_wbc_writepage(wbc, inode_to_bdi(inode));
257
258 ret = __gfs2_jdata_write_folio(folio, wbc);
259 if (unlikely(ret)) {
260 if (ret == AOP_WRITEPAGE_ACTIVATE) {
261 folio_unlock(folio);
262 ret = 0;
263 } else {
264
265 /*
266 * done_index is set past this page,
267 * so media errors will not choke
268 * background writeout for the entire
269 * file. This has consequences for
270 * range_cyclic semantics (ie. it may
271 * not be suitable for data integrity
272 * writeout).
273 */
274 *done_index = folio_next_index(folio);
275 ret = 1;
276 break;
277 }
278 }
279
280 /*
281 * We stop writing back only if we are not doing
282 * integrity sync. In case of integrity sync we have to
283 * keep going until we have written all the pages
284 * we tagged for writeback prior to entering this loop.
285 */
286 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
287 ret = 1;
288 break;
289 }
290
291 }
292 gfs2_trans_end(sdp);
293 return ret;
294 }
295
296 /**
297 * gfs2_write_cache_jdata - Like write_cache_pages but different
298 * @mapping: The mapping to write
299 * @wbc: The writeback control
300 *
301 * The reason that we use our own function here is that we need to
302 * start transactions before we grab page locks. This allows us
303 * to get the ordering right.
304 */
305
gfs2_write_cache_jdata(struct address_space * mapping,struct writeback_control * wbc)306 static int gfs2_write_cache_jdata(struct address_space *mapping,
307 struct writeback_control *wbc)
308 {
309 int ret = 0;
310 int done = 0;
311 struct folio_batch fbatch;
312 int nr_folios;
313 pgoff_t writeback_index;
314 pgoff_t index;
315 pgoff_t end;
316 pgoff_t done_index;
317 int cycled;
318 int range_whole = 0;
319 xa_mark_t tag;
320
321 folio_batch_init(&fbatch);
322 if (wbc->range_cyclic) {
323 writeback_index = mapping->writeback_index; /* prev offset */
324 index = writeback_index;
325 if (index == 0)
326 cycled = 1;
327 else
328 cycled = 0;
329 end = -1;
330 } else {
331 index = wbc->range_start >> PAGE_SHIFT;
332 end = wbc->range_end >> PAGE_SHIFT;
333 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
334 range_whole = 1;
335 cycled = 1; /* ignore range_cyclic tests */
336 }
337 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
338 tag = PAGECACHE_TAG_TOWRITE;
339 else
340 tag = PAGECACHE_TAG_DIRTY;
341
342 retry:
343 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
344 tag_pages_for_writeback(mapping, index, end);
345 done_index = index;
346 while (!done && (index <= end)) {
347 nr_folios = filemap_get_folios_tag(mapping, &index, end,
348 tag, &fbatch);
349 if (nr_folios == 0)
350 break;
351
352 ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch,
353 &done_index);
354 if (ret)
355 done = 1;
356 if (ret > 0)
357 ret = 0;
358 folio_batch_release(&fbatch);
359 cond_resched();
360 }
361
362 if (!cycled && !done) {
363 /*
364 * range_cyclic:
365 * We hit the last page and there is more work to be done: wrap
366 * back to the start of the file
367 */
368 cycled = 1;
369 index = 0;
370 end = writeback_index - 1;
371 goto retry;
372 }
373
374 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
375 mapping->writeback_index = done_index;
376
377 return ret;
378 }
379
380
381 /**
382 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
383 * @mapping: The mapping to write
384 * @wbc: The writeback control
385 *
386 */
387
gfs2_jdata_writepages(struct address_space * mapping,struct writeback_control * wbc)388 static int gfs2_jdata_writepages(struct address_space *mapping,
389 struct writeback_control *wbc)
390 {
391 struct gfs2_inode *ip = GFS2_I(mapping->host);
392 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
393 int ret;
394
395 ret = gfs2_write_cache_jdata(mapping, wbc);
396 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
397 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
398 GFS2_LFC_JDATA_WPAGES);
399 ret = gfs2_write_cache_jdata(mapping, wbc);
400 }
401 return ret;
402 }
403
404 /**
405 * stuffed_read_folio - Fill in a Linux folio with stuffed file data
406 * @ip: the inode
407 * @folio: the folio
408 *
409 * Returns: errno
410 */
stuffed_read_folio(struct gfs2_inode * ip,struct folio * folio)411 static int stuffed_read_folio(struct gfs2_inode *ip, struct folio *folio)
412 {
413 struct buffer_head *dibh = NULL;
414 size_t dsize = i_size_read(&ip->i_inode);
415 void *from = NULL;
416 int error = 0;
417
418 /*
419 * Due to the order of unstuffing files and ->fault(), we can be
420 * asked for a zero folio in the case of a stuffed file being extended,
421 * so we need to supply one here. It doesn't happen often.
422 */
423 if (unlikely(folio->index)) {
424 dsize = 0;
425 } else {
426 error = gfs2_meta_inode_buffer(ip, &dibh);
427 if (error)
428 goto out;
429 from = dibh->b_data + sizeof(struct gfs2_dinode);
430 }
431
432 folio_fill_tail(folio, 0, from, dsize);
433 brelse(dibh);
434 out:
435 folio_end_read(folio, error == 0);
436
437 return error;
438 }
439
440 /**
441 * gfs2_read_folio - read a folio from a file
442 * @file: The file to read
443 * @folio: The folio in the file
444 */
gfs2_read_folio(struct file * file,struct folio * folio)445 static int gfs2_read_folio(struct file *file, struct folio *folio)
446 {
447 struct inode *inode = folio->mapping->host;
448 struct gfs2_inode *ip = GFS2_I(inode);
449 struct gfs2_sbd *sdp = GFS2_SB(inode);
450 int error;
451
452 if (!gfs2_is_jdata(ip) ||
453 (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
454 error = iomap_read_folio(folio, &gfs2_iomap_ops);
455 } else if (gfs2_is_stuffed(ip)) {
456 error = stuffed_read_folio(ip, folio);
457 } else {
458 error = mpage_read_folio(folio, gfs2_block_map);
459 }
460
461 if (gfs2_withdrawing_or_withdrawn(sdp))
462 return -EIO;
463
464 return error;
465 }
466
467 /**
468 * gfs2_internal_read - read an internal file
469 * @ip: The gfs2 inode
470 * @buf: The buffer to fill
471 * @pos: The file position
472 * @size: The amount to read
473 *
474 */
475
gfs2_internal_read(struct gfs2_inode * ip,char * buf,loff_t * pos,size_t size)476 ssize_t gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
477 size_t size)
478 {
479 struct address_space *mapping = ip->i_inode.i_mapping;
480 unsigned long index = *pos >> PAGE_SHIFT;
481 size_t copied = 0;
482
483 do {
484 size_t offset, chunk;
485 struct folio *folio;
486
487 folio = read_cache_folio(mapping, index, gfs2_read_folio, NULL);
488 if (IS_ERR(folio)) {
489 if (PTR_ERR(folio) == -EINTR)
490 continue;
491 return PTR_ERR(folio);
492 }
493 offset = *pos + copied - folio_pos(folio);
494 chunk = min(size - copied, folio_size(folio) - offset);
495 memcpy_from_folio(buf + copied, folio, offset, chunk);
496 index = folio_next_index(folio);
497 folio_put(folio);
498 copied += chunk;
499 } while(copied < size);
500 (*pos) += size;
501 return size;
502 }
503
504 /**
505 * gfs2_readahead - Read a bunch of pages at once
506 * @rac: Read-ahead control structure
507 *
508 * Some notes:
509 * 1. This is only for readahead, so we can simply ignore any things
510 * which are slightly inconvenient (such as locking conflicts between
511 * the page lock and the glock) and return having done no I/O. Its
512 * obviously not something we'd want to do on too regular a basis.
513 * Any I/O we ignore at this time will be done via readpage later.
514 * 2. We don't handle stuffed files here we let readpage do the honours.
515 * 3. mpage_readahead() does most of the heavy lifting in the common case.
516 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
517 */
518
gfs2_readahead(struct readahead_control * rac)519 static void gfs2_readahead(struct readahead_control *rac)
520 {
521 struct inode *inode = rac->mapping->host;
522 struct gfs2_inode *ip = GFS2_I(inode);
523
524 if (gfs2_is_stuffed(ip))
525 ;
526 else if (gfs2_is_jdata(ip))
527 mpage_readahead(rac, gfs2_block_map);
528 else
529 iomap_readahead(rac, &gfs2_iomap_ops);
530 }
531
532 /**
533 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
534 * @inode: the rindex inode
535 */
adjust_fs_space(struct inode * inode)536 void adjust_fs_space(struct inode *inode)
537 {
538 struct gfs2_sbd *sdp = GFS2_SB(inode);
539 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
540 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
541 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
542 struct buffer_head *m_bh;
543 u64 fs_total, new_free;
544
545 if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
546 return;
547
548 /* Total up the file system space, according to the latest rindex. */
549 fs_total = gfs2_ri_total(sdp);
550 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
551 goto out;
552
553 spin_lock(&sdp->sd_statfs_spin);
554 gfs2_statfs_change_in(m_sc, m_bh->b_data +
555 sizeof(struct gfs2_dinode));
556 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
557 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
558 else
559 new_free = 0;
560 spin_unlock(&sdp->sd_statfs_spin);
561 fs_warn(sdp, "File system extended by %llu blocks.\n",
562 (unsigned long long)new_free);
563 gfs2_statfs_change(sdp, new_free, new_free, 0);
564
565 update_statfs(sdp, m_bh);
566 brelse(m_bh);
567 out:
568 sdp->sd_rindex_uptodate = 0;
569 gfs2_trans_end(sdp);
570 }
571
jdata_dirty_folio(struct address_space * mapping,struct folio * folio)572 static bool jdata_dirty_folio(struct address_space *mapping,
573 struct folio *folio)
574 {
575 if (current->journal_info)
576 folio_set_checked(folio);
577 return block_dirty_folio(mapping, folio);
578 }
579
580 /**
581 * gfs2_bmap - Block map function
582 * @mapping: Address space info
583 * @lblock: The block to map
584 *
585 * Returns: The disk address for the block or 0 on hole or error
586 */
587
gfs2_bmap(struct address_space * mapping,sector_t lblock)588 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
589 {
590 struct gfs2_inode *ip = GFS2_I(mapping->host);
591 struct gfs2_holder i_gh;
592 sector_t dblock = 0;
593 int error;
594
595 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
596 if (error)
597 return 0;
598
599 if (!gfs2_is_stuffed(ip))
600 dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
601
602 gfs2_glock_dq_uninit(&i_gh);
603
604 return dblock;
605 }
606
gfs2_discard(struct gfs2_sbd * sdp,struct buffer_head * bh)607 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
608 {
609 struct gfs2_bufdata *bd;
610
611 lock_buffer(bh);
612 gfs2_log_lock(sdp);
613 clear_buffer_dirty(bh);
614 bd = bh->b_private;
615 if (bd) {
616 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
617 list_del_init(&bd->bd_list);
618 else {
619 spin_lock(&sdp->sd_ail_lock);
620 gfs2_remove_from_journal(bh, REMOVE_JDATA);
621 spin_unlock(&sdp->sd_ail_lock);
622 }
623 }
624 bh->b_bdev = NULL;
625 clear_buffer_mapped(bh);
626 clear_buffer_req(bh);
627 clear_buffer_new(bh);
628 gfs2_log_unlock(sdp);
629 unlock_buffer(bh);
630 }
631
gfs2_invalidate_folio(struct folio * folio,size_t offset,size_t length)632 static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
633 size_t length)
634 {
635 struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
636 size_t stop = offset + length;
637 int partial_page = (offset || length < folio_size(folio));
638 struct buffer_head *bh, *head;
639 unsigned long pos = 0;
640
641 BUG_ON(!folio_test_locked(folio));
642 if (!partial_page)
643 folio_clear_checked(folio);
644 head = folio_buffers(folio);
645 if (!head)
646 goto out;
647
648 bh = head;
649 do {
650 if (pos + bh->b_size > stop)
651 return;
652
653 if (offset <= pos)
654 gfs2_discard(sdp, bh);
655 pos += bh->b_size;
656 bh = bh->b_this_page;
657 } while (bh != head);
658 out:
659 if (!partial_page)
660 filemap_release_folio(folio, 0);
661 }
662
663 /**
664 * gfs2_release_folio - free the metadata associated with a folio
665 * @folio: the folio that's being released
666 * @gfp_mask: passed from Linux VFS, ignored by us
667 *
668 * Calls try_to_free_buffers() to free the buffers and put the folio if the
669 * buffers can be released.
670 *
671 * Returns: true if the folio was put or else false
672 */
673
gfs2_release_folio(struct folio * folio,gfp_t gfp_mask)674 bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
675 {
676 struct address_space *mapping = folio->mapping;
677 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
678 struct buffer_head *bh, *head;
679 struct gfs2_bufdata *bd;
680
681 head = folio_buffers(folio);
682 if (!head)
683 return false;
684
685 /*
686 * mm accommodates an old ext3 case where clean folios might
687 * not have had the dirty bit cleared. Thus, it can send actual
688 * dirty folios to ->release_folio() via shrink_active_list().
689 *
690 * As a workaround, we skip folios that contain dirty buffers
691 * below. Once ->release_folio isn't called on dirty folios
692 * anymore, we can warn on dirty buffers like we used to here
693 * again.
694 */
695
696 gfs2_log_lock(sdp);
697 bh = head;
698 do {
699 if (atomic_read(&bh->b_count))
700 goto cannot_release;
701 bd = bh->b_private;
702 if (bd && bd->bd_tr)
703 goto cannot_release;
704 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
705 goto cannot_release;
706 bh = bh->b_this_page;
707 } while (bh != head);
708
709 bh = head;
710 do {
711 bd = bh->b_private;
712 if (bd) {
713 gfs2_assert_warn(sdp, bd->bd_bh == bh);
714 bd->bd_bh = NULL;
715 bh->b_private = NULL;
716 /*
717 * The bd may still be queued as a revoke, in which
718 * case we must not dequeue nor free it.
719 */
720 if (!bd->bd_blkno && !list_empty(&bd->bd_list))
721 list_del_init(&bd->bd_list);
722 if (list_empty(&bd->bd_list))
723 kmem_cache_free(gfs2_bufdata_cachep, bd);
724 }
725
726 bh = bh->b_this_page;
727 } while (bh != head);
728 gfs2_log_unlock(sdp);
729
730 return try_to_free_buffers(folio);
731
732 cannot_release:
733 gfs2_log_unlock(sdp);
734 return false;
735 }
736
737 static const struct address_space_operations gfs2_aops = {
738 .writepages = gfs2_writepages,
739 .read_folio = gfs2_read_folio,
740 .readahead = gfs2_readahead,
741 .dirty_folio = iomap_dirty_folio,
742 .release_folio = iomap_release_folio,
743 .invalidate_folio = iomap_invalidate_folio,
744 .bmap = gfs2_bmap,
745 .migrate_folio = filemap_migrate_folio,
746 .is_partially_uptodate = iomap_is_partially_uptodate,
747 .error_remove_folio = generic_error_remove_folio,
748 };
749
750 static const struct address_space_operations gfs2_jdata_aops = {
751 .writepage = gfs2_jdata_writepage,
752 .writepages = gfs2_jdata_writepages,
753 .read_folio = gfs2_read_folio,
754 .readahead = gfs2_readahead,
755 .dirty_folio = jdata_dirty_folio,
756 .bmap = gfs2_bmap,
757 .migrate_folio = buffer_migrate_folio,
758 .invalidate_folio = gfs2_invalidate_folio,
759 .release_folio = gfs2_release_folio,
760 .is_partially_uptodate = block_is_partially_uptodate,
761 .error_remove_folio = generic_error_remove_folio,
762 };
763
gfs2_set_aops(struct inode * inode)764 void gfs2_set_aops(struct inode *inode)
765 {
766 if (gfs2_is_jdata(GFS2_I(inode)))
767 inode->i_mapping->a_ops = &gfs2_jdata_aops;
768 else
769 inode->i_mapping->a_ops = &gfs2_aops;
770 }
771