mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/
synced 2025-04-19 20:58:31 +09:00
f2fs-for-6.15-rc1
In this round, there are three major updates: 1) folio conversion, 2) refactor for mount API conversion, 3) some performance improvement such as direct IO, checkpoint speed, and IO priority hints. For stability, there are patches which add more sanity checks and fixes some major issues like i_size in atomic write operations and write pointer recovery in zoned devices. Enhancement: - huge folio converion work by Matthew Wilcox - clean up for mount API conversion by Eric Sandeen - improve direct IO speed in the overwrite case - add some sanity check on node consistency - set highest IO priority for checkpoint thread - keep POSIX_FADV_NOREUSE ranges and add sysfs entry to reclaim pages - add ioctl to get IO priority hint - add carve_out sysfs node for fsstat Bug fix: - disable nat_bits during umount to avoid potential nat entry corruption - fix missing i_size update on atomic writes - fix missing discard for active segments - fix running out of free segments - fix out-of-bounds access in f2fs_truncate_inode_blocks() - call f2fs_recover_quota_end() correctly - fix potential deadloop in prepare_compress_overwrite() - fix the missing write pointer correction for zoned device - fix to avoid panic once fallocation fails for pinfile - don't retry IO for corrupted data scenario There are many other clean up patches and minor bug fixes as usual. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE00UqedjCtOrGVvQiQBSofoJIUNIFAmfhjuYACgkQQBSofoJI UNJF+hAAip0Sf6bXwt43KR3lmbXg/YBTtPDACOs375Pn8pfRVsAPIAf5e1AnlBET rA0KpqUrEZHXenQFF9n4LIoHYqfuUw3EMempuvp4qgQcx15Kaajw+EGWVLYstNy2 9dELc9DA55f/i1uvHezGfQFy6hMfXasf+tkaYk0z0ZYDStYboLgkVAY+F869q1Dl D16Y7Lna12h4eCxDdssIPDsLjFH/2LDn7SmhXsnpZxwK0Zx8JSo83WxXHnzXHxz4 vUkukInKgqcBDvf6ufW/YF3/tqSs20XEXNK3cI1vyHx1dwij6Us+G0n0WJHL30QE zsliecR0X28JP1rJ3ldCjr+4Kxm6/u/Uwpinm2Fm1jL67UY4TZcaARBE8k20I6ND j/L1+sXrIdZ1aILM9/bwCjgXiVFdZbvlfGfpTj1duAkQgRd+/s9cjPlo5j5HTad5 XJmzJz6YOaUNarhP/E31Z9SV9M2kEcmoDOTxKBg6ZcMWXZ27B6Z0ag92prd0GWk6 rWDuVj0eP/LDY1QedbHRPbg1D84jVgcnldfPaf9ptln993skJGdgS0dkegTqMR0L H8RgpWOzWZI53gnQdePdej8diHmD8uRTrLf/oABm//GzTHC5BdwVpArOl1DCuLFF YkMtVEicgnWRmL3PgODAdTJXaDi3uGvT116i+lfMlUn9p+t93r0= =E+TE -----END PGP SIGNATURE----- Merge tag 'f2fs-for-6.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs Pull f2fs updates from Jaegeuk Kim: "In this round, there are three major updates: (1) folio conversion, (2) refactoring for mount API conversion, (3) some performance improvement such as direct IO, checkpoint speed, and IO priority hints. For stability, there are patches which add more sanity checks and fixes some major issues like i_size in atomic write operations and write pointer recovery in zoned devices. Enhancements: - huge folio converion work by Matthew Wilcox - clean up for mount API conversion by Eric Sandeen - improve direct IO speed in the overwrite case - add some sanity check on node consistency - set highest IO priority for checkpoint thread - keep POSIX_FADV_NOREUSE ranges and add sysfs entry to reclaim pages - add ioctl to get IO priority hint - add carve_out sysfs node for fsstat Bug fixes: - disable nat_bits during umount to avoid potential nat entry corruption - fix missing i_size update on atomic writes - fix missing discard for active segments - fix running out of free segments - fix out-of-bounds access in f2fs_truncate_inode_blocks() - call f2fs_recover_quota_end() correctly - fix potential deadloop in prepare_compress_overwrite() - fix the missing write pointer correction for zoned device - fix to avoid panic once fallocation fails for pinfile - don't retry IO for corrupted data scenario There are many other clean up patches and minor bug fixes as usual" * tag 'f2fs-for-6.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (68 commits) f2fs: fix missing discard for active segments f2fs: optimize f2fs DIO overwrites f2fs: fix to avoid atomicity corruption of atomic file f2fs: pass sbi rather than sb to parse_options() f2fs: pass sbi rather than sb to quota qf_name helpers f2fs: defer readonly check vs norecovery f2fs: Pass sbi rather than sb to f2fs_set_test_dummy_encryption f2fs: make LAZYTIME a mount option flag f2fs: make INLINECRYPT a mount option flag f2fs: factor out an f2fs_default_check function f2fs: consolidate unsupported option handling errors f2fs: use f2fs_sb_has_device_alias during option parsing f2fs: add carve_out sysfs node f2fs: fix to avoid running out of free segments f2fs: Remove f2fs_write_node_page() f2fs: Remove f2fs_write_meta_page() f2fs: Remove f2fs_write_data_page() f2fs: Remove check for ->writepage Revert "f2fs: rebuild nat_bits during umount" f2fs: fix to avoid accessing uninitialized curseg ...
This commit is contained in:
commit
81d8e5e213
@ -734,6 +734,7 @@ Description: Support configuring fault injection type, should be
|
||||
FAULT_BLKADDR_VALIDITY 0x000040000
|
||||
FAULT_BLKADDR_CONSISTENCE 0x000080000
|
||||
FAULT_NO_SEGMENT 0x000100000
|
||||
FAULT_INCONSISTENT_FOOTER 0x000200000
|
||||
=========================== ===========
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/discard_io_aware_gran
|
||||
@ -828,3 +829,20 @@ Date: November 2024
|
||||
Contact: "Chao Yu" <chao@kernel.org>
|
||||
Description: It controls max read extent count for per-inode, the value of threshold
|
||||
is 10240 by default.
|
||||
|
||||
What: /sys/fs/f2fs/tuning/reclaim_caches_kb
|
||||
Date: February 2025
|
||||
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
|
||||
Description: It reclaims the given KBs of file-backed pages registered by
|
||||
ioctl(F2FS_IOC_DONATE_RANGE).
|
||||
For example, writing N tries to drop N KBs spaces in LRU.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/carve_out
|
||||
Date: March 2025
|
||||
Contact: "Daeho Jeong" <daehojeong@google.com>
|
||||
Description: For several zoned storage devices, vendors will provide extra space which
|
||||
was used for device level GC than specs and F2FS can use this space for
|
||||
filesystem level GC. To do that, we can reserve the space using
|
||||
reserved_blocks. However, it is not enough, since this extra space should
|
||||
not be shown to users. So, with this new sysfs node, we can hide the space
|
||||
by substracting reserved_blocks from total bytes.
|
||||
|
@ -206,6 +206,7 @@ fault_type=%d Support configuring fault injection type, should be
|
||||
FAULT_BLKADDR_VALIDITY 0x000040000
|
||||
FAULT_BLKADDR_CONSISTENCE 0x000080000
|
||||
FAULT_NO_SEGMENT 0x000100000
|
||||
FAULT_INCONSISTENT_FOOTER 0x000200000
|
||||
=========================== ===========
|
||||
mode=%s Control block allocation mode which supports "adaptive"
|
||||
and "lfs". In "lfs" mode, there should be no random
|
||||
@ -365,6 +366,8 @@ errors=%s Specify f2fs behavior on critical errors. This supports modes:
|
||||
pending node write drop keep N/A
|
||||
pending meta write keep keep N/A
|
||||
====================== =============== =============== ========
|
||||
nat_bits Enable nat_bits feature to enhance full/empty nat blocks access,
|
||||
by default it's disabled.
|
||||
======================== ============================================================
|
||||
|
||||
Debugfs Entries
|
||||
|
@ -21,7 +21,7 @@
|
||||
#include "iostat.h"
|
||||
#include <trace/events/f2fs.h>
|
||||
|
||||
#define DEFAULT_CHECKPOINT_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
|
||||
#define DEFAULT_CHECKPOINT_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_RT, 3))
|
||||
|
||||
static struct kmem_cache *ino_entry_slab;
|
||||
struct kmem_cache *f2fs_inode_entry_slab;
|
||||
@ -58,7 +58,7 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
|
||||
bool is_meta)
|
||||
{
|
||||
struct address_space *mapping = META_MAPPING(sbi);
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
struct f2fs_io_info fio = {
|
||||
.sbi = sbi,
|
||||
.type = META,
|
||||
@ -74,37 +74,37 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
|
||||
if (unlikely(!is_meta))
|
||||
fio.op_flags &= ~REQ_META;
|
||||
repeat:
|
||||
page = f2fs_grab_cache_page(mapping, index, false);
|
||||
if (!page) {
|
||||
folio = f2fs_grab_cache_folio(mapping, index, false);
|
||||
if (IS_ERR(folio)) {
|
||||
cond_resched();
|
||||
goto repeat;
|
||||
}
|
||||
if (PageUptodate(page))
|
||||
if (folio_test_uptodate(folio))
|
||||
goto out;
|
||||
|
||||
fio.page = page;
|
||||
fio.page = &folio->page;
|
||||
|
||||
err = f2fs_submit_page_bio(&fio);
|
||||
if (err) {
|
||||
f2fs_put_page(page, 1);
|
||||
f2fs_folio_put(folio, true);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
f2fs_update_iostat(sbi, NULL, FS_META_READ_IO, F2FS_BLKSIZE);
|
||||
|
||||
lock_page(page);
|
||||
if (unlikely(page->mapping != mapping)) {
|
||||
f2fs_put_page(page, 1);
|
||||
folio_lock(folio);
|
||||
if (unlikely(folio->mapping != mapping)) {
|
||||
f2fs_folio_put(folio, true);
|
||||
goto repeat;
|
||||
}
|
||||
|
||||
if (unlikely(!PageUptodate(page))) {
|
||||
f2fs_handle_page_eio(sbi, page_folio(page), META);
|
||||
f2fs_put_page(page, 1);
|
||||
if (unlikely(!folio_test_uptodate(folio))) {
|
||||
f2fs_handle_page_eio(sbi, folio, META);
|
||||
f2fs_folio_put(folio, true);
|
||||
return ERR_PTR(-EIO);
|
||||
}
|
||||
out:
|
||||
return page;
|
||||
return &folio->page;
|
||||
}
|
||||
|
||||
struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
|
||||
@ -381,12 +381,6 @@ redirty_out:
|
||||
return AOP_WRITEPAGE_ACTIVATE;
|
||||
}
|
||||
|
||||
static int f2fs_write_meta_page(struct page *page,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
return __f2fs_write_meta_page(page, wbc, FS_META_IO);
|
||||
}
|
||||
|
||||
static int f2fs_write_meta_pages(struct address_space *mapping,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
@ -507,7 +501,6 @@ static bool f2fs_dirty_meta_folio(struct address_space *mapping,
|
||||
}
|
||||
|
||||
const struct address_space_operations f2fs_meta_aops = {
|
||||
.writepage = f2fs_write_meta_page,
|
||||
.writepages = f2fs_write_meta_pages,
|
||||
.dirty_folio = f2fs_dirty_meta_folio,
|
||||
.invalidate_folio = f2fs_invalidate_folio,
|
||||
@ -1237,7 +1230,7 @@ static int block_operations(struct f2fs_sb_info *sbi)
|
||||
retry_flush_quotas:
|
||||
f2fs_lock_all(sbi);
|
||||
if (__need_flush_quota(sbi)) {
|
||||
int locked;
|
||||
bool need_lock = sbi->umount_lock_holder != current;
|
||||
|
||||
if (++cnt > DEFAULT_RETRY_QUOTA_FLUSH_COUNT) {
|
||||
set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
|
||||
@ -1246,11 +1239,13 @@ retry_flush_quotas:
|
||||
}
|
||||
f2fs_unlock_all(sbi);
|
||||
|
||||
/* only failed during mount/umount/freeze/quotactl */
|
||||
locked = down_read_trylock(&sbi->sb->s_umount);
|
||||
f2fs_quota_sync(sbi->sb, -1);
|
||||
if (locked)
|
||||
/* don't grab s_umount lock during mount/umount/remount/freeze/quotactl */
|
||||
if (!need_lock) {
|
||||
f2fs_do_quota_sync(sbi->sb, -1);
|
||||
} else if (down_read_trylock(&sbi->sb->s_umount)) {
|
||||
f2fs_do_quota_sync(sbi->sb, -1);
|
||||
up_read(&sbi->sb->s_umount);
|
||||
}
|
||||
cond_resched();
|
||||
goto retry_flush_quotas;
|
||||
}
|
||||
@ -1344,21 +1339,13 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
||||
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
|
||||
unsigned long flags;
|
||||
|
||||
if (cpc->reason & CP_UMOUNT) {
|
||||
if (le32_to_cpu(ckpt->cp_pack_total_block_count) +
|
||||
NM_I(sbi)->nat_bits_blocks > BLKS_PER_SEG(sbi)) {
|
||||
clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
|
||||
f2fs_notice(sbi, "Disable nat_bits due to no space");
|
||||
} else if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG) &&
|
||||
f2fs_nat_bitmap_enabled(sbi)) {
|
||||
f2fs_enable_nat_bits(sbi);
|
||||
set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
|
||||
f2fs_notice(sbi, "Rebuild and enable nat_bits");
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&sbi->cp_lock, flags);
|
||||
|
||||
if ((cpc->reason & CP_UMOUNT) &&
|
||||
le32_to_cpu(ckpt->cp_pack_total_block_count) >
|
||||
sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks)
|
||||
disable_nat_bits(sbi, false);
|
||||
|
||||
if (cpc->reason & CP_TRIMMED)
|
||||
__set_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
|
||||
else
|
||||
@ -1541,8 +1528,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
||||
start_blk = __start_cp_next_addr(sbi);
|
||||
|
||||
/* write nat bits */
|
||||
if ((cpc->reason & CP_UMOUNT) &&
|
||||
is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) {
|
||||
if (enabled_nat_bits(sbi, cpc)) {
|
||||
__u64 cp_ver = cur_cp_version(ckpt);
|
||||
block_t blk;
|
||||
|
||||
@ -1867,7 +1853,8 @@ int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi)
|
||||
struct cp_control cpc;
|
||||
|
||||
cpc.reason = __get_cp_reason(sbi);
|
||||
if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC) {
|
||||
if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC ||
|
||||
sbi->umount_lock_holder == current) {
|
||||
int ret;
|
||||
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
|
@ -1150,6 +1150,7 @@ retry:
|
||||
f2fs_compress_ctx_add_page(cc, page_folio(page));
|
||||
|
||||
if (!PageUptodate(page)) {
|
||||
f2fs_handle_page_eio(sbi, page_folio(page), DATA);
|
||||
release_and_retry:
|
||||
f2fs_put_rpages(cc);
|
||||
f2fs_unlock_rpages(cc, i + 1);
|
||||
|
190
fs/f2fs/data.c
190
fs/f2fs/data.c
@ -319,8 +319,7 @@ static void f2fs_read_end_io(struct bio *bio)
|
||||
static void f2fs_write_end_io(struct bio *bio)
|
||||
{
|
||||
struct f2fs_sb_info *sbi;
|
||||
struct bio_vec *bvec;
|
||||
struct bvec_iter_all iter_all;
|
||||
struct folio_iter fi;
|
||||
|
||||
iostat_update_and_unbind_ctx(bio);
|
||||
sbi = bio->bi_private;
|
||||
@ -328,34 +327,41 @@ static void f2fs_write_end_io(struct bio *bio)
|
||||
if (time_to_inject(sbi, FAULT_WRITE_IO))
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
|
||||
bio_for_each_segment_all(bvec, bio, iter_all) {
|
||||
struct page *page = bvec->bv_page;
|
||||
enum count_type type = WB_DATA_TYPE(page, false);
|
||||
bio_for_each_folio_all(fi, bio) {
|
||||
struct folio *folio = fi.folio;
|
||||
enum count_type type;
|
||||
|
||||
fscrypt_finalize_bounce_page(&page);
|
||||
if (fscrypt_is_bounce_folio(folio)) {
|
||||
struct folio *io_folio = folio;
|
||||
|
||||
folio = fscrypt_pagecache_folio(io_folio);
|
||||
fscrypt_free_bounce_page(&io_folio->page);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
if (f2fs_is_compressed_page(page)) {
|
||||
f2fs_compress_write_end_io(bio, page);
|
||||
if (f2fs_is_compressed_page(&folio->page)) {
|
||||
f2fs_compress_write_end_io(bio, &folio->page);
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
type = WB_DATA_TYPE(&folio->page, false);
|
||||
|
||||
if (unlikely(bio->bi_status)) {
|
||||
mapping_set_error(page->mapping, -EIO);
|
||||
mapping_set_error(folio->mapping, -EIO);
|
||||
if (type == F2FS_WB_CP_DATA)
|
||||
f2fs_stop_checkpoint(sbi, true,
|
||||
STOP_CP_REASON_WRITE_FAIL);
|
||||
}
|
||||
|
||||
f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
|
||||
page_folio(page)->index != nid_of_node(page));
|
||||
f2fs_bug_on(sbi, folio->mapping == NODE_MAPPING(sbi) &&
|
||||
folio->index != nid_of_node(&folio->page));
|
||||
|
||||
dec_page_count(sbi, type);
|
||||
if (f2fs_in_warm_node_list(sbi, page))
|
||||
f2fs_del_fsync_node_entry(sbi, page);
|
||||
clear_page_private_gcing(page);
|
||||
end_page_writeback(page);
|
||||
if (f2fs_in_warm_node_list(sbi, folio))
|
||||
f2fs_del_fsync_node_entry(sbi, &folio->page);
|
||||
clear_page_private_gcing(&folio->page);
|
||||
folio_end_writeback(folio);
|
||||
}
|
||||
if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
|
||||
wq_has_sleeper(&sbi->cp_wait))
|
||||
@ -413,6 +419,7 @@ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
|
||||
static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio)
|
||||
{
|
||||
unsigned int temp_mask = GENMASK(NR_TEMP_TYPE - 1, 0);
|
||||
struct folio *fio_folio = page_folio(fio->page);
|
||||
unsigned int fua_flag, meta_flag, io_flag;
|
||||
blk_opf_t op_flags = 0;
|
||||
|
||||
@ -438,6 +445,11 @@ static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio)
|
||||
op_flags |= REQ_META;
|
||||
if (BIT(fio->temp) & fua_flag)
|
||||
op_flags |= REQ_FUA;
|
||||
|
||||
if (fio->type == DATA &&
|
||||
F2FS_I(fio_folio->mapping->host)->ioprio_hint == F2FS_IOPRIO_WRITE)
|
||||
op_flags |= REQ_PRIO;
|
||||
|
||||
return op_flags;
|
||||
}
|
||||
|
||||
@ -876,6 +888,7 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
|
||||
struct bio *bio = *fio->bio;
|
||||
struct page *page = fio->encrypted_page ?
|
||||
fio->encrypted_page : fio->page;
|
||||
struct folio *folio = page_folio(fio->page);
|
||||
|
||||
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
|
||||
__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
|
||||
@ -889,8 +902,8 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
|
||||
alloc_new:
|
||||
if (!bio) {
|
||||
bio = __bio_alloc(fio, BIO_MAX_VECS);
|
||||
f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
|
||||
page_folio(fio->page)->index, fio, GFP_NOIO);
|
||||
f2fs_set_bio_crypt_ctx(bio, folio->mapping->host,
|
||||
folio->index, fio, GFP_NOIO);
|
||||
|
||||
add_bio_entry(fio->sbi, bio, page, fio->temp);
|
||||
} else {
|
||||
@ -899,8 +912,7 @@ alloc_new:
|
||||
}
|
||||
|
||||
if (fio->io_wbc)
|
||||
wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page),
|
||||
PAGE_SIZE);
|
||||
wbc_account_cgroup_owner(fio->io_wbc, folio, folio_size(folio));
|
||||
|
||||
inc_page_count(fio->sbi, WB_DATA_TYPE(page, false));
|
||||
|
||||
@ -1041,8 +1053,6 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
|
||||
bio = bio_alloc_bioset(bdev, bio_max_segs(nr_pages),
|
||||
REQ_OP_READ | op_flag,
|
||||
for_write ? GFP_NOIO : GFP_KERNEL, &f2fs_bioset);
|
||||
if (!bio)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
|
||||
bio->bi_end_io = f2fs_read_end_io;
|
||||
@ -1193,18 +1203,17 @@ int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
|
||||
return err;
|
||||
}
|
||||
|
||||
struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
|
||||
blk_opf_t op_flags, bool for_write,
|
||||
pgoff_t *next_pgofs)
|
||||
struct folio *f2fs_get_read_data_folio(struct inode *inode, pgoff_t index,
|
||||
blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs)
|
||||
{
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
struct dnode_of_data dn;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
int err;
|
||||
|
||||
page = f2fs_grab_cache_page(mapping, index, for_write);
|
||||
if (!page)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
folio = f2fs_grab_cache_folio(mapping, index, for_write);
|
||||
if (IS_ERR(folio))
|
||||
return folio;
|
||||
|
||||
if (f2fs_lookup_read_extent_cache_block(inode, index,
|
||||
&dn.data_blkaddr)) {
|
||||
@ -1239,9 +1248,9 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
|
||||
goto put_err;
|
||||
}
|
||||
got_it:
|
||||
if (PageUptodate(page)) {
|
||||
unlock_page(page);
|
||||
return page;
|
||||
if (folio_test_uptodate(folio)) {
|
||||
folio_unlock(folio);
|
||||
return folio;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1252,48 +1261,51 @@ got_it:
|
||||
* f2fs_init_inode_metadata.
|
||||
*/
|
||||
if (dn.data_blkaddr == NEW_ADDR) {
|
||||
zero_user_segment(page, 0, PAGE_SIZE);
|
||||
if (!PageUptodate(page))
|
||||
SetPageUptodate(page);
|
||||
unlock_page(page);
|
||||
return page;
|
||||
folio_zero_segment(folio, 0, folio_size(folio));
|
||||
if (!folio_test_uptodate(folio))
|
||||
folio_mark_uptodate(folio);
|
||||
folio_unlock(folio);
|
||||
return folio;
|
||||
}
|
||||
|
||||
err = f2fs_submit_page_read(inode, page_folio(page), dn.data_blkaddr,
|
||||
err = f2fs_submit_page_read(inode, folio, dn.data_blkaddr,
|
||||
op_flags, for_write);
|
||||
if (err)
|
||||
goto put_err;
|
||||
return page;
|
||||
return folio;
|
||||
|
||||
put_err:
|
||||
f2fs_put_page(page, 1);
|
||||
f2fs_folio_put(folio, true);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
|
||||
struct folio *f2fs_find_data_folio(struct inode *inode, pgoff_t index,
|
||||
pgoff_t *next_pgofs)
|
||||
{
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
|
||||
page = find_get_page_flags(mapping, index, FGP_ACCESSED);
|
||||
if (page && PageUptodate(page))
|
||||
return page;
|
||||
f2fs_put_page(page, 0);
|
||||
folio = __filemap_get_folio(mapping, index, FGP_ACCESSED, 0);
|
||||
if (IS_ERR(folio))
|
||||
goto read;
|
||||
if (folio_test_uptodate(folio))
|
||||
return folio;
|
||||
f2fs_folio_put(folio, false);
|
||||
|
||||
page = f2fs_get_read_data_page(inode, index, 0, false, next_pgofs);
|
||||
if (IS_ERR(page))
|
||||
return page;
|
||||
read:
|
||||
folio = f2fs_get_read_data_folio(inode, index, 0, false, next_pgofs);
|
||||
if (IS_ERR(folio))
|
||||
return folio;
|
||||
|
||||
if (PageUptodate(page))
|
||||
return page;
|
||||
if (folio_test_uptodate(folio))
|
||||
return folio;
|
||||
|
||||
wait_on_page_locked(page);
|
||||
if (unlikely(!PageUptodate(page))) {
|
||||
f2fs_put_page(page, 0);
|
||||
folio_wait_locked(folio);
|
||||
if (unlikely(!folio_test_uptodate(folio))) {
|
||||
f2fs_folio_put(folio, false);
|
||||
return ERR_PTR(-EIO);
|
||||
}
|
||||
return page;
|
||||
return folio;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1301,23 +1313,23 @@ struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
|
||||
* Because, the callers, functions in dir.c and GC, should be able to know
|
||||
* whether this page exists or not.
|
||||
*/
|
||||
struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
|
||||
struct folio *f2fs_get_lock_data_folio(struct inode *inode, pgoff_t index,
|
||||
bool for_write)
|
||||
{
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
|
||||
page = f2fs_get_read_data_page(inode, index, 0, for_write, NULL);
|
||||
if (IS_ERR(page))
|
||||
return page;
|
||||
folio = f2fs_get_read_data_folio(inode, index, 0, for_write, NULL);
|
||||
if (IS_ERR(folio))
|
||||
return folio;
|
||||
|
||||
/* wait for read completion */
|
||||
lock_page(page);
|
||||
if (unlikely(page->mapping != mapping || !PageUptodate(page))) {
|
||||
f2fs_put_page(page, 1);
|
||||
folio_lock(folio);
|
||||
if (unlikely(folio->mapping != mapping || !folio_test_uptodate(folio))) {
|
||||
f2fs_folio_put(folio, true);
|
||||
return ERR_PTR(-EIO);
|
||||
}
|
||||
return page;
|
||||
return folio;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2178,6 +2190,12 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
|
||||
int i;
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(f2fs_cp_error(sbi))) {
|
||||
ret = -EIO;
|
||||
from_dnode = false;
|
||||
goto out_put_dnode;
|
||||
}
|
||||
|
||||
f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
|
||||
|
||||
last_block_in_file = F2FS_BYTES_TO_BLK(f2fs_readpage_limit(inode) +
|
||||
@ -2221,10 +2239,6 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (unlikely(f2fs_cp_error(sbi))) {
|
||||
ret = -EIO;
|
||||
goto out_put_dnode;
|
||||
}
|
||||
f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
|
||||
|
||||
skip_reading_dnode:
|
||||
@ -2921,29 +2935,6 @@ redirty_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int f2fs_write_data_page(struct page *page,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
struct inode *inode = folio->mapping->host;
|
||||
|
||||
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
|
||||
goto out;
|
||||
|
||||
if (f2fs_compressed_file(inode)) {
|
||||
if (f2fs_is_compressed_cluster(inode, folio->index)) {
|
||||
folio_redirty_for_writepage(wbc, folio);
|
||||
return AOP_WRITEPAGE_ACTIVATE;
|
||||
}
|
||||
}
|
||||
out:
|
||||
#endif
|
||||
|
||||
return f2fs_write_single_data_page(folio, NULL, NULL, NULL,
|
||||
wbc, FS_DATA_IO, 0, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function was copied from write_cache_pages from mm/page-writeback.c.
|
||||
* The major change is making write step of cold data page separately from
|
||||
@ -3266,10 +3257,6 @@ static int __f2fs_write_data_pages(struct address_space *mapping,
|
||||
int ret;
|
||||
bool locked = false;
|
||||
|
||||
/* deal with chardevs and other special file */
|
||||
if (!mapping->a_ops->writepage)
|
||||
return 0;
|
||||
|
||||
/* skip writing if there is no dirty page in this inode */
|
||||
if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
|
||||
return 0;
|
||||
@ -3390,7 +3377,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
|
||||
|
||||
restart:
|
||||
/* check inline_data */
|
||||
ipage = f2fs_get_node_page(sbi, inode->i_ino);
|
||||
ipage = f2fs_get_inode_page(sbi, inode->i_ino);
|
||||
if (IS_ERR(ipage)) {
|
||||
err = PTR_ERR(ipage);
|
||||
goto unlock_out;
|
||||
@ -3453,7 +3440,7 @@ static int __find_data_block(struct inode *inode, pgoff_t index,
|
||||
struct page *ipage;
|
||||
int err = 0;
|
||||
|
||||
ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
|
||||
ipage = f2fs_get_inode_page(F2FS_I_SB(inode), inode->i_ino);
|
||||
if (IS_ERR(ipage))
|
||||
return PTR_ERR(ipage);
|
||||
|
||||
@ -3483,7 +3470,7 @@ static int __reserve_data_block(struct inode *inode, pgoff_t index,
|
||||
|
||||
f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
|
||||
|
||||
ipage = f2fs_get_node_page(sbi, inode->i_ino);
|
||||
ipage = f2fs_get_inode_page(sbi, inode->i_ino);
|
||||
if (IS_ERR(ipage)) {
|
||||
err = PTR_ERR(ipage);
|
||||
goto unlock_out;
|
||||
@ -4101,7 +4088,6 @@ static void f2fs_swap_deactivate(struct file *file)
|
||||
const struct address_space_operations f2fs_dblock_aops = {
|
||||
.read_folio = f2fs_read_data_folio,
|
||||
.readahead = f2fs_readahead,
|
||||
.writepage = f2fs_write_data_page,
|
||||
.writepages = f2fs_write_data_pages,
|
||||
.write_begin = f2fs_write_begin,
|
||||
.write_end = f2fs_write_end,
|
||||
@ -4195,7 +4181,13 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
|
||||
map.m_next_pgofs = &next_pgofs;
|
||||
map.m_seg_type = f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode),
|
||||
inode->i_write_hint);
|
||||
if (flags & IOMAP_WRITE)
|
||||
|
||||
/*
|
||||
* If the blocks being overwritten are already allocated,
|
||||
* f2fs_map_lock and f2fs_balance_fs are not necessary.
|
||||
*/
|
||||
if ((flags & IOMAP_WRITE) &&
|
||||
!f2fs_overwrite_io(inode, offset, length))
|
||||
map.m_may_create = true;
|
||||
|
||||
err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DIO);
|
||||
|
@ -164,6 +164,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
|
||||
si->ndirty_imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
|
||||
si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE];
|
||||
si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
|
||||
si->ndonate_files = sbi->donate_files;
|
||||
si->nquota_files = sbi->nquota_files;
|
||||
si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
|
||||
si->aw_cnt = atomic_read(&sbi->atomic_files);
|
||||
@ -501,6 +502,8 @@ static int stat_show(struct seq_file *s, void *v)
|
||||
si->compr_inode, si->compr_blocks);
|
||||
seq_printf(s, " - Swapfile Inode: %u\n",
|
||||
si->swapfile_inode);
|
||||
seq_printf(s, " - Donate Inode: %u\n",
|
||||
si->ndonate_files);
|
||||
seq_printf(s, " - Orphan/Append/Update Inode: %u, %u, %u\n",
|
||||
si->orphans, si->append, si->update);
|
||||
seq_printf(s, "\nMain area: %d segs, %d secs %d zones\n",
|
||||
|
@ -551,7 +551,7 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
|
||||
goto put_error;
|
||||
}
|
||||
} else {
|
||||
page = f2fs_get_node_page(F2FS_I_SB(dir), inode->i_ino);
|
||||
page = f2fs_get_inode_page(F2FS_I_SB(dir), inode->i_ino);
|
||||
if (IS_ERR(page))
|
||||
return page;
|
||||
}
|
||||
|
157
fs/f2fs/f2fs.h
157
fs/f2fs/f2fs.h
@ -62,6 +62,7 @@ enum {
|
||||
FAULT_BLKADDR_VALIDITY,
|
||||
FAULT_BLKADDR_CONSISTENCE,
|
||||
FAULT_NO_SEGMENT,
|
||||
FAULT_INCONSISTENT_FOOTER,
|
||||
FAULT_MAX,
|
||||
};
|
||||
|
||||
@ -114,6 +115,13 @@ extern const char *f2fs_fault_name[FAULT_MAX];
|
||||
#define F2FS_MOUNT_GC_MERGE 0x02000000
|
||||
#define F2FS_MOUNT_COMPRESS_CACHE 0x04000000
|
||||
#define F2FS_MOUNT_AGE_EXTENT_CACHE 0x08000000
|
||||
#define F2FS_MOUNT_NAT_BITS 0x10000000
|
||||
#define F2FS_MOUNT_INLINECRYPT 0x20000000
|
||||
/*
|
||||
* Some f2fs environments expect to be able to pass the "lazytime" option
|
||||
* string rather than using the MS_LAZYTIME flag, so this must remain.
|
||||
*/
|
||||
#define F2FS_MOUNT_LAZYTIME 0x40000000
|
||||
|
||||
#define F2FS_OPTION(sbi) ((sbi)->mount_opt)
|
||||
#define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
|
||||
@ -830,6 +838,7 @@ struct f2fs_inode_info {
|
||||
|
||||
/* Use below internally in f2fs*/
|
||||
unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */
|
||||
unsigned int ioprio_hint; /* hint for IO priority */
|
||||
struct f2fs_rwsem i_sem; /* protect fi info */
|
||||
atomic_t dirty_pages; /* # of dirty pages */
|
||||
f2fs_hash_t chash; /* hash value of given file name */
|
||||
@ -849,6 +858,11 @@ struct f2fs_inode_info {
|
||||
#endif
|
||||
struct list_head dirty_list; /* dirty list for dirs and files */
|
||||
struct list_head gdirty_list; /* linked in global dirty list */
|
||||
|
||||
/* linked in global inode list for cache donation */
|
||||
struct list_head gdonate_list;
|
||||
pgoff_t donate_start, donate_end; /* inclusive */
|
||||
|
||||
struct task_struct *atomic_write_task; /* store atomic write task */
|
||||
struct extent_tree *extent_tree[NR_EXTENT_CACHES];
|
||||
/* cached extent_tree entry */
|
||||
@ -1273,6 +1287,7 @@ enum inode_type {
|
||||
DIR_INODE, /* for dirty dir inode */
|
||||
FILE_INODE, /* for dirty regular/symlink inode */
|
||||
DIRTY_META, /* for all dirtied inode metadata */
|
||||
DONATE_INODE, /* for all inode to donate pages */
|
||||
NR_INODE_TYPE,
|
||||
};
|
||||
|
||||
@ -1628,6 +1643,9 @@ struct f2fs_sb_info {
|
||||
unsigned int warm_data_age_threshold;
|
||||
unsigned int last_age_weight;
|
||||
|
||||
/* control donate caches */
|
||||
unsigned int donate_files;
|
||||
|
||||
/* basic filesystem units */
|
||||
unsigned int log_sectors_per_block; /* log2 sectors per block */
|
||||
unsigned int log_blocksize; /* log2 block size */
|
||||
@ -1659,6 +1677,7 @@ struct f2fs_sb_info {
|
||||
|
||||
unsigned int nquota_files; /* # of quota sysfile */
|
||||
struct f2fs_rwsem quota_sem; /* blocking cp for flags */
|
||||
struct task_struct *umount_lock_holder; /* s_umount lock holder */
|
||||
|
||||
/* # of pages, see count_type */
|
||||
atomic_t nr_pages[NR_COUNT_TYPE];
|
||||
@ -1800,6 +1819,9 @@ struct f2fs_sb_info {
|
||||
u64 committed_atomic_block;
|
||||
u64 revoked_atomic_block;
|
||||
|
||||
/* carve out reserved_blocks from total blocks */
|
||||
bool carve_out;
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
struct kmem_cache *page_array_slab; /* page array entry */
|
||||
unsigned int page_array_slab_size; /* default page array slab size */
|
||||
@ -2015,7 +2037,7 @@ static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
|
||||
return (struct f2fs_checkpoint *)(sbi->ckpt);
|
||||
}
|
||||
|
||||
static inline struct f2fs_node *F2FS_NODE(struct page *page)
|
||||
static inline struct f2fs_node *F2FS_NODE(const struct page *page)
|
||||
{
|
||||
return (struct f2fs_node *)page_address(page);
|
||||
}
|
||||
@ -2219,6 +2241,36 @@ static inline void f2fs_up_write(struct f2fs_rwsem *sem)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned char *nat_bits;
|
||||
|
||||
/*
|
||||
* In order to re-enable nat_bits we need to call fsck.f2fs by
|
||||
* set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost,
|
||||
* so let's rely on regular fsck or unclean shutdown.
|
||||
*/
|
||||
|
||||
if (lock)
|
||||
spin_lock_irqsave(&sbi->cp_lock, flags);
|
||||
__clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG);
|
||||
nat_bits = NM_I(sbi)->nat_bits;
|
||||
NM_I(sbi)->nat_bits = NULL;
|
||||
if (lock)
|
||||
spin_unlock_irqrestore(&sbi->cp_lock, flags);
|
||||
|
||||
kvfree(nat_bits);
|
||||
}
|
||||
|
||||
static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi,
|
||||
struct cp_control *cpc)
|
||||
{
|
||||
bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
|
||||
|
||||
return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set;
|
||||
}
|
||||
|
||||
static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
f2fs_down_read(&sbi->cp_rwsem);
|
||||
@ -2765,33 +2817,46 @@ static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
|
||||
return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
|
||||
}
|
||||
|
||||
static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
|
||||
pgoff_t index, bool for_write)
|
||||
static inline struct folio *f2fs_grab_cache_folio(struct address_space *mapping,
|
||||
pgoff_t index, bool for_write)
|
||||
{
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
unsigned int flags;
|
||||
|
||||
if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
|
||||
fgf_t fgf_flags;
|
||||
|
||||
if (!for_write)
|
||||
page = find_get_page_flags(mapping, index,
|
||||
FGP_LOCK | FGP_ACCESSED);
|
||||
fgf_flags = FGP_LOCK | FGP_ACCESSED;
|
||||
else
|
||||
page = find_lock_page(mapping, index);
|
||||
if (page)
|
||||
return page;
|
||||
fgf_flags = FGP_LOCK;
|
||||
folio = __filemap_get_folio(mapping, index, fgf_flags, 0);
|
||||
if (!IS_ERR(folio))
|
||||
return folio;
|
||||
|
||||
if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC))
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
if (!for_write)
|
||||
return grab_cache_page(mapping, index);
|
||||
return filemap_grab_folio(mapping, index);
|
||||
|
||||
flags = memalloc_nofs_save();
|
||||
page = grab_cache_page_write_begin(mapping, index);
|
||||
folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
|
||||
mapping_gfp_mask(mapping));
|
||||
memalloc_nofs_restore(flags);
|
||||
|
||||
return page;
|
||||
return folio;
|
||||
}
|
||||
|
||||
static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
|
||||
pgoff_t index, bool for_write)
|
||||
{
|
||||
struct folio *folio = f2fs_grab_cache_folio(mapping, index, for_write);
|
||||
|
||||
if (IS_ERR(folio))
|
||||
return NULL;
|
||||
return &folio->page;
|
||||
}
|
||||
|
||||
static inline struct page *f2fs_pagecache_get_page(
|
||||
@ -2804,16 +2869,23 @@ static inline struct page *f2fs_pagecache_get_page(
|
||||
return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
|
||||
}
|
||||
|
||||
static inline void f2fs_folio_put(struct folio *folio, bool unlock)
|
||||
{
|
||||
if (!folio)
|
||||
return;
|
||||
|
||||
if (unlock) {
|
||||
f2fs_bug_on(F2FS_F_SB(folio), !folio_test_locked(folio));
|
||||
folio_unlock(folio);
|
||||
}
|
||||
folio_put(folio);
|
||||
}
|
||||
|
||||
static inline void f2fs_put_page(struct page *page, int unlock)
|
||||
{
|
||||
if (!page)
|
||||
return;
|
||||
|
||||
if (unlock) {
|
||||
f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
|
||||
unlock_page(page);
|
||||
}
|
||||
put_page(page);
|
||||
f2fs_folio_put(page_folio(page), unlock);
|
||||
}
|
||||
|
||||
static inline void f2fs_put_dnode(struct dnode_of_data *dn)
|
||||
@ -3624,7 +3696,7 @@ int f2fs_inode_dirtied(struct inode *inode, bool sync);
|
||||
void f2fs_inode_synced(struct inode *inode);
|
||||
int f2fs_dquot_initialize(struct inode *inode);
|
||||
int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
|
||||
int f2fs_quota_sync(struct super_block *sb, int type);
|
||||
int f2fs_do_quota_sync(struct super_block *sb, int type);
|
||||
loff_t max_file_blocks(struct inode *inode);
|
||||
void f2fs_quota_off_umount(struct super_block *sb);
|
||||
void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag);
|
||||
@ -3647,7 +3719,8 @@ struct node_info;
|
||||
|
||||
int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
|
||||
bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
|
||||
bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
|
||||
bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi,
|
||||
const struct folio *folio);
|
||||
void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
|
||||
void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
|
||||
void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
|
||||
@ -3662,12 +3735,14 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
|
||||
int f2fs_truncate_xattr_node(struct inode *inode);
|
||||
int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
|
||||
unsigned int seq_id);
|
||||
bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi);
|
||||
int f2fs_remove_inode_page(struct inode *inode);
|
||||
struct page *f2fs_new_inode_page(struct inode *inode);
|
||||
struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
|
||||
void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
|
||||
struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
|
||||
struct folio *f2fs_get_inode_folio(struct f2fs_sb_info *sbi, pgoff_t ino);
|
||||
struct page *f2fs_get_inode_page(struct f2fs_sb_info *sbi, pgoff_t ino);
|
||||
struct page *f2fs_get_xnode_page(struct f2fs_sb_info *sbi, pgoff_t xnid);
|
||||
struct page *f2fs_get_node_page_ra(struct page *parent, int start);
|
||||
int f2fs_move_node_page(struct page *node_page, int gc_type);
|
||||
void f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
|
||||
@ -3687,7 +3762,6 @@ int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
|
||||
int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
|
||||
int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
|
||||
unsigned int segno, struct f2fs_summary_block *sum);
|
||||
void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi);
|
||||
int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
|
||||
int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
|
||||
void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
|
||||
@ -3758,8 +3832,10 @@ int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
||||
struct f2fs_io_info *fio);
|
||||
void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
|
||||
block_t blkaddr, unsigned int blkcnt);
|
||||
void f2fs_wait_on_page_writeback(struct page *page,
|
||||
enum page_type type, bool ordered, bool locked);
|
||||
void f2fs_folio_wait_writeback(struct folio *folio, enum page_type type,
|
||||
bool ordered, bool locked);
|
||||
#define f2fs_wait_on_page_writeback(page, type, ordered, locked) \
|
||||
f2fs_folio_wait_writeback(page_folio(page), type, ordered, locked)
|
||||
void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
|
||||
void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
|
||||
block_t len);
|
||||
@ -3871,11 +3947,11 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
|
||||
int f2fs_reserve_new_block(struct dnode_of_data *dn);
|
||||
int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index);
|
||||
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
|
||||
struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
|
||||
blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs);
|
||||
struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
|
||||
pgoff_t *next_pgofs);
|
||||
struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
|
||||
struct folio *f2fs_get_read_data_folio(struct inode *inode, pgoff_t index,
|
||||
blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs);
|
||||
struct folio *f2fs_find_data_folio(struct inode *inode, pgoff_t index,
|
||||
pgoff_t *next_pgofs);
|
||||
struct folio *f2fs_get_lock_data_folio(struct inode *inode, pgoff_t index,
|
||||
bool for_write);
|
||||
struct page *f2fs_get_new_data_page(struct inode *inode,
|
||||
struct page *ipage, pgoff_t index, bool new_i_size);
|
||||
@ -3902,6 +3978,22 @@ int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
|
||||
void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
|
||||
extern const struct iomap_ops f2fs_iomap_ops;
|
||||
|
||||
static inline struct page *f2fs_find_data_page(struct inode *inode,
|
||||
pgoff_t index, pgoff_t *next_pgofs)
|
||||
{
|
||||
struct folio *folio = f2fs_find_data_folio(inode, index, next_pgofs);
|
||||
|
||||
return &folio->page;
|
||||
}
|
||||
|
||||
static inline struct page *f2fs_get_lock_data_page(struct inode *inode,
|
||||
pgoff_t index, bool for_write)
|
||||
{
|
||||
struct folio *folio = f2fs_get_lock_data_folio(inode, index, for_write);
|
||||
|
||||
return &folio->page;
|
||||
}
|
||||
|
||||
/*
|
||||
* gc.c
|
||||
*/
|
||||
@ -3966,7 +4058,8 @@ struct f2fs_stat_info {
|
||||
unsigned long long allocated_data_blocks;
|
||||
int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
|
||||
int ndirty_data, ndirty_qdata;
|
||||
unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
|
||||
unsigned int ndirty_dirs, ndirty_files, ndirty_all;
|
||||
unsigned int nquota_files, ndonate_files;
|
||||
int nats, dirty_nats, sits, dirty_sits;
|
||||
int free_nids, avail_nids, alloc_nids;
|
||||
int total_count, utilization;
|
||||
@ -4231,6 +4324,8 @@ unsigned long f2fs_shrink_count(struct shrinker *shrink,
|
||||
struct shrink_control *sc);
|
||||
unsigned long f2fs_shrink_scan(struct shrinker *shrink,
|
||||
struct shrink_control *sc);
|
||||
unsigned int f2fs_donate_files(void);
|
||||
void f2fs_reclaim_caches(unsigned int reclaim_caches_kb);
|
||||
void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
|
||||
void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
|
||||
|
||||
|
126
fs/f2fs/file.c
126
fs/f2fs/file.c
@ -707,31 +707,33 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
|
||||
loff_t offset = from & (PAGE_SIZE - 1);
|
||||
pgoff_t index = from >> PAGE_SHIFT;
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
|
||||
if (!offset && !cache_only)
|
||||
return 0;
|
||||
|
||||
if (cache_only) {
|
||||
page = find_lock_page(mapping, index);
|
||||
if (page && PageUptodate(page))
|
||||
folio = filemap_lock_folio(mapping, index);
|
||||
if (IS_ERR(folio))
|
||||
return 0;
|
||||
if (folio_test_uptodate(folio))
|
||||
goto truncate_out;
|
||||
f2fs_put_page(page, 1);
|
||||
f2fs_folio_put(folio, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
page = f2fs_get_lock_data_page(inode, index, true);
|
||||
if (IS_ERR(page))
|
||||
return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
|
||||
folio = f2fs_get_lock_data_folio(inode, index, true);
|
||||
if (IS_ERR(folio))
|
||||
return PTR_ERR(folio) == -ENOENT ? 0 : PTR_ERR(folio);
|
||||
truncate_out:
|
||||
f2fs_wait_on_page_writeback(page, DATA, true, true);
|
||||
zero_user(page, offset, PAGE_SIZE - offset);
|
||||
f2fs_folio_wait_writeback(folio, DATA, true, true);
|
||||
folio_zero_segment(folio, offset, folio_size(folio));
|
||||
|
||||
/* An encrypted inode should have a key and truncate the last page. */
|
||||
f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
|
||||
if (!cache_only)
|
||||
set_page_dirty(page);
|
||||
f2fs_put_page(page, 1);
|
||||
folio_mark_dirty(folio);
|
||||
f2fs_folio_put(folio, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -759,7 +761,7 @@ int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
|
||||
if (lock)
|
||||
f2fs_lock_op(sbi);
|
||||
|
||||
ipage = f2fs_get_node_page(sbi, inode->i_ino);
|
||||
ipage = f2fs_get_inode_page(sbi, inode->i_ino);
|
||||
if (IS_ERR(ipage)) {
|
||||
err = PTR_ERR(ipage);
|
||||
goto out;
|
||||
@ -1834,18 +1836,32 @@ static int f2fs_expand_inode_data(struct inode *inode, loff_t offset,
|
||||
|
||||
map.m_len = sec_blks;
|
||||
next_alloc:
|
||||
f2fs_down_write(&sbi->pin_sem);
|
||||
|
||||
if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
|
||||
if (has_not_enough_free_secs(sbi, 0, 0)) {
|
||||
f2fs_up_write(&sbi->pin_sem);
|
||||
err = -ENOSPC;
|
||||
f2fs_warn_ratelimited(sbi,
|
||||
"ino:%lu, start:%lu, end:%lu, need to trigger GC to "
|
||||
"reclaim enough free segment when checkpoint is enabled",
|
||||
inode->i_ino, pg_start, pg_end);
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
if (has_not_enough_free_secs(sbi, 0, f2fs_sb_has_blkzoned(sbi) ?
|
||||
ZONED_PIN_SEC_REQUIRED_COUNT :
|
||||
GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
stat_inc_gc_call_count(sbi, FOREGROUND);
|
||||
err = f2fs_gc(sbi, &gc_control);
|
||||
if (err && err != -ENODATA)
|
||||
if (err && err != -ENODATA) {
|
||||
f2fs_up_write(&sbi->pin_sem);
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
f2fs_down_write(&sbi->pin_sem);
|
||||
|
||||
err = f2fs_allocate_pinning_section(sbi);
|
||||
if (err) {
|
||||
f2fs_up_write(&sbi->pin_sem);
|
||||
@ -2448,6 +2464,52 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void f2fs_keep_noreuse_range(struct inode *inode,
|
||||
loff_t offset, loff_t len)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
u64 max_bytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
|
||||
u64 start, end;
|
||||
|
||||
if (!S_ISREG(inode->i_mode))
|
||||
return;
|
||||
|
||||
if (offset >= max_bytes || len > max_bytes ||
|
||||
(offset + len) > max_bytes)
|
||||
return;
|
||||
|
||||
start = offset >> PAGE_SHIFT;
|
||||
end = DIV_ROUND_UP(offset + len, PAGE_SIZE);
|
||||
|
||||
inode_lock(inode);
|
||||
if (f2fs_is_atomic_file(inode)) {
|
||||
inode_unlock(inode);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&sbi->inode_lock[DONATE_INODE]);
|
||||
/* let's remove the range, if len = 0 */
|
||||
if (!len) {
|
||||
if (!list_empty(&F2FS_I(inode)->gdonate_list)) {
|
||||
list_del_init(&F2FS_I(inode)->gdonate_list);
|
||||
sbi->donate_files--;
|
||||
}
|
||||
} else {
|
||||
if (list_empty(&F2FS_I(inode)->gdonate_list)) {
|
||||
list_add_tail(&F2FS_I(inode)->gdonate_list,
|
||||
&sbi->inode_list[DONATE_INODE]);
|
||||
sbi->donate_files++;
|
||||
} else {
|
||||
list_move_tail(&F2FS_I(inode)->gdonate_list,
|
||||
&sbi->inode_list[DONATE_INODE]);
|
||||
}
|
||||
F2FS_I(inode)->donate_start = start;
|
||||
F2FS_I(inode)->donate_end = end - 1;
|
||||
}
|
||||
spin_unlock(&sbi->inode_lock[DONATE_INODE]);
|
||||
inode_unlock(inode);
|
||||
}
|
||||
|
||||
static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
|
||||
{
|
||||
struct inode *inode = file_inode(filp);
|
||||
@ -3446,6 +3508,23 @@ static int f2fs_ioc_get_dev_alias_file(struct file *filp, unsigned long arg)
|
||||
(u32 __user *)arg);
|
||||
}
|
||||
|
||||
static int f2fs_ioc_io_prio(struct file *filp, unsigned long arg)
|
||||
{
|
||||
struct inode *inode = file_inode(filp);
|
||||
__u32 level;
|
||||
|
||||
if (get_user(level, (__u32 __user *)arg))
|
||||
return -EFAULT;
|
||||
|
||||
if (!S_ISREG(inode->i_mode) || level >= F2FS_IOPRIO_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
inode_lock(inode);
|
||||
F2FS_I(inode)->ioprio_hint = level;
|
||||
inode_unlock(inode);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int f2fs_precache_extents(struct inode *inode)
|
||||
{
|
||||
struct f2fs_inode_info *fi = F2FS_I(inode);
|
||||
@ -4547,6 +4626,8 @@ static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
return f2fs_ioc_compress_file(filp);
|
||||
case F2FS_IOC_GET_DEV_ALIAS_FILE:
|
||||
return f2fs_ioc_get_dev_alias_file(filp, arg);
|
||||
case F2FS_IOC_IO_PRIO:
|
||||
return f2fs_ioc_io_prio(filp, arg);
|
||||
default:
|
||||
return -ENOTTY;
|
||||
}
|
||||
@ -5147,12 +5228,16 @@ static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
|
||||
}
|
||||
|
||||
err = generic_fadvise(filp, offset, len, advice);
|
||||
if (!err && advice == POSIX_FADV_DONTNEED &&
|
||||
test_opt(F2FS_I_SB(inode), COMPRESS_CACHE) &&
|
||||
f2fs_compressed_file(inode))
|
||||
f2fs_invalidate_compress_pages(F2FS_I_SB(inode), inode->i_ino);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return err;
|
||||
if (advice == POSIX_FADV_DONTNEED &&
|
||||
(test_opt(F2FS_I_SB(inode), COMPRESS_CACHE) &&
|
||||
f2fs_compressed_file(inode)))
|
||||
f2fs_invalidate_compress_pages(F2FS_I_SB(inode), inode->i_ino);
|
||||
else if (advice == POSIX_FADV_NOREUSE)
|
||||
f2fs_keep_noreuse_range(inode, offset, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
@ -5261,6 +5346,7 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
case F2FS_IOC_DECOMPRESS_FILE:
|
||||
case F2FS_IOC_COMPRESS_FILE:
|
||||
case F2FS_IOC_GET_DEV_ALIAS_FILE:
|
||||
case F2FS_IOC_IO_PRIO:
|
||||
break;
|
||||
default:
|
||||
return -ENOIOCTLCMD;
|
||||
|
42
fs/f2fs/gc.c
42
fs/f2fs/gc.c
@ -1449,14 +1449,14 @@ out:
|
||||
}
|
||||
|
||||
static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
|
||||
unsigned int segno, int off)
|
||||
unsigned int segno, int off)
|
||||
{
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
int err = 0;
|
||||
|
||||
page = f2fs_get_lock_data_page(inode, bidx, true);
|
||||
if (IS_ERR(page))
|
||||
return PTR_ERR(page);
|
||||
folio = f2fs_get_lock_data_folio(inode, bidx, true);
|
||||
if (IS_ERR(folio))
|
||||
return PTR_ERR(folio);
|
||||
|
||||
if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
|
||||
err = -ENOENT;
|
||||
@ -1468,12 +1468,12 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
|
||||
goto out;
|
||||
|
||||
if (gc_type == BG_GC) {
|
||||
if (folio_test_writeback(page_folio(page))) {
|
||||
if (folio_test_writeback(folio)) {
|
||||
err = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
set_page_dirty(page);
|
||||
set_page_private_gcing(page);
|
||||
folio_mark_dirty(folio);
|
||||
set_page_private_gcing(&folio->page);
|
||||
} else {
|
||||
struct f2fs_io_info fio = {
|
||||
.sbi = F2FS_I_SB(inode),
|
||||
@ -1483,37 +1483,37 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
|
||||
.op = REQ_OP_WRITE,
|
||||
.op_flags = REQ_SYNC,
|
||||
.old_blkaddr = NULL_ADDR,
|
||||
.page = page,
|
||||
.page = &folio->page,
|
||||
.encrypted_page = NULL,
|
||||
.need_lock = LOCK_REQ,
|
||||
.io_type = FS_GC_DATA_IO,
|
||||
};
|
||||
bool is_dirty = PageDirty(page);
|
||||
bool is_dirty = folio_test_dirty(folio);
|
||||
|
||||
retry:
|
||||
f2fs_wait_on_page_writeback(page, DATA, true, true);
|
||||
f2fs_folio_wait_writeback(folio, DATA, true, true);
|
||||
|
||||
set_page_dirty(page);
|
||||
if (clear_page_dirty_for_io(page)) {
|
||||
folio_mark_dirty(folio);
|
||||
if (folio_clear_dirty_for_io(folio)) {
|
||||
inode_dec_dirty_pages(inode);
|
||||
f2fs_remove_dirty_inode(inode);
|
||||
}
|
||||
|
||||
set_page_private_gcing(page);
|
||||
set_page_private_gcing(&folio->page);
|
||||
|
||||
err = f2fs_do_write_data_page(&fio);
|
||||
if (err) {
|
||||
clear_page_private_gcing(page);
|
||||
clear_page_private_gcing(&folio->page);
|
||||
if (err == -ENOMEM) {
|
||||
memalloc_retry_wait(GFP_NOFS);
|
||||
goto retry;
|
||||
}
|
||||
if (is_dirty)
|
||||
set_page_dirty(page);
|
||||
folio_mark_dirty(folio);
|
||||
}
|
||||
}
|
||||
out:
|
||||
f2fs_put_page(page, 1);
|
||||
f2fs_folio_put(folio, true);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1542,7 +1542,6 @@ next_step:
|
||||
entry = sum;
|
||||
|
||||
for (off = 0; off < usable_blks_in_seg; off++, entry++) {
|
||||
struct page *data_page;
|
||||
struct inode *inode;
|
||||
struct node_info dni; /* dnode info for the data */
|
||||
unsigned int ofs_in_node, nofs;
|
||||
@ -1585,6 +1584,7 @@ next_step:
|
||||
ofs_in_node = le16_to_cpu(entry->ofs_in_node);
|
||||
|
||||
if (phase == 3) {
|
||||
struct folio *data_folio;
|
||||
int err;
|
||||
|
||||
inode = f2fs_iget(sb, dni.ino);
|
||||
@ -1635,15 +1635,15 @@ next_step:
|
||||
continue;
|
||||
}
|
||||
|
||||
data_page = f2fs_get_read_data_page(inode, start_bidx,
|
||||
data_folio = f2fs_get_read_data_folio(inode, start_bidx,
|
||||
REQ_RAHEAD, true, NULL);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
if (IS_ERR(data_page)) {
|
||||
if (IS_ERR(data_folio)) {
|
||||
iput(inode);
|
||||
continue;
|
||||
}
|
||||
|
||||
f2fs_put_page(data_page, 0);
|
||||
f2fs_folio_put(data_folio, false);
|
||||
add_gc_inode(gc_list, inode);
|
||||
continue;
|
||||
}
|
||||
|
@ -119,7 +119,7 @@ int f2fs_read_inline_data(struct inode *inode, struct folio *folio)
|
||||
{
|
||||
struct page *ipage;
|
||||
|
||||
ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
|
||||
ipage = f2fs_get_inode_page(F2FS_I_SB(inode), inode->i_ino);
|
||||
if (IS_ERR(ipage)) {
|
||||
folio_unlock(folio);
|
||||
return PTR_ERR(ipage);
|
||||
@ -237,7 +237,7 @@ int f2fs_convert_inline_inode(struct inode *inode)
|
||||
|
||||
f2fs_lock_op(sbi);
|
||||
|
||||
ipage = f2fs_get_node_page(sbi, inode->i_ino);
|
||||
ipage = f2fs_get_inode_page(sbi, inode->i_ino);
|
||||
if (IS_ERR(ipage)) {
|
||||
err = PTR_ERR(ipage);
|
||||
goto out;
|
||||
@ -265,7 +265,7 @@ int f2fs_write_inline_data(struct inode *inode, struct folio *folio)
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
struct page *ipage;
|
||||
|
||||
ipage = f2fs_get_node_page(sbi, inode->i_ino);
|
||||
ipage = f2fs_get_inode_page(sbi, inode->i_ino);
|
||||
if (IS_ERR(ipage))
|
||||
return PTR_ERR(ipage);
|
||||
|
||||
@ -312,7 +312,7 @@ int f2fs_recover_inline_data(struct inode *inode, struct page *npage)
|
||||
if (f2fs_has_inline_data(inode) &&
|
||||
ri && (ri->i_inline & F2FS_INLINE_DATA)) {
|
||||
process_inline:
|
||||
ipage = f2fs_get_node_page(sbi, inode->i_ino);
|
||||
ipage = f2fs_get_inode_page(sbi, inode->i_ino);
|
||||
if (IS_ERR(ipage))
|
||||
return PTR_ERR(ipage);
|
||||
|
||||
@ -331,7 +331,7 @@ process_inline:
|
||||
}
|
||||
|
||||
if (f2fs_has_inline_data(inode)) {
|
||||
ipage = f2fs_get_node_page(sbi, inode->i_ino);
|
||||
ipage = f2fs_get_inode_page(sbi, inode->i_ino);
|
||||
if (IS_ERR(ipage))
|
||||
return PTR_ERR(ipage);
|
||||
f2fs_truncate_inline_inode(inode, ipage, 0);
|
||||
@ -361,7 +361,7 @@ struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
|
||||
struct page *ipage;
|
||||
void *inline_dentry;
|
||||
|
||||
ipage = f2fs_get_node_page(sbi, dir->i_ino);
|
||||
ipage = f2fs_get_inode_page(sbi, dir->i_ino);
|
||||
if (IS_ERR(ipage)) {
|
||||
*res_page = ipage;
|
||||
return NULL;
|
||||
@ -609,7 +609,7 @@ int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry)
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
ipage = f2fs_get_node_page(sbi, dir->i_ino);
|
||||
ipage = f2fs_get_inode_page(sbi, dir->i_ino);
|
||||
if (IS_ERR(ipage)) {
|
||||
err = PTR_ERR(ipage);
|
||||
goto out_fname;
|
||||
@ -644,7 +644,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
|
||||
struct page *page = NULL;
|
||||
int err = 0;
|
||||
|
||||
ipage = f2fs_get_node_page(sbi, dir->i_ino);
|
||||
ipage = f2fs_get_inode_page(sbi, dir->i_ino);
|
||||
if (IS_ERR(ipage))
|
||||
return PTR_ERR(ipage);
|
||||
|
||||
@ -734,7 +734,7 @@ bool f2fs_empty_inline_dir(struct inode *dir)
|
||||
void *inline_dentry;
|
||||
struct f2fs_dentry_ptr d;
|
||||
|
||||
ipage = f2fs_get_node_page(sbi, dir->i_ino);
|
||||
ipage = f2fs_get_inode_page(sbi, dir->i_ino);
|
||||
if (IS_ERR(ipage))
|
||||
return false;
|
||||
|
||||
@ -765,7 +765,7 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
|
||||
if (ctx->pos == d.max)
|
||||
return 0;
|
||||
|
||||
ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
|
||||
ipage = f2fs_get_inode_page(F2FS_I_SB(inode), inode->i_ino);
|
||||
if (IS_ERR(ipage))
|
||||
return PTR_ERR(ipage);
|
||||
|
||||
@ -797,7 +797,7 @@ int f2fs_inline_data_fiemap(struct inode *inode,
|
||||
struct page *ipage;
|
||||
int err = 0;
|
||||
|
||||
ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
|
||||
ipage = f2fs_get_inode_page(F2FS_I_SB(inode), inode->i_ino);
|
||||
if (IS_ERR(ipage))
|
||||
return PTR_ERR(ipage);
|
||||
|
||||
|
@ -34,10 +34,8 @@ void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
|
||||
if (f2fs_inode_dirtied(inode, sync))
|
||||
return;
|
||||
|
||||
if (f2fs_is_atomic_file(inode)) {
|
||||
set_inode_flag(inode, FI_ATOMIC_DIRTIED);
|
||||
if (f2fs_is_atomic_file(inode))
|
||||
return;
|
||||
}
|
||||
|
||||
mark_inode_dirty_sync(inode);
|
||||
}
|
||||
@ -410,7 +408,7 @@ static int do_read_inode(struct inode *inode)
|
||||
if (f2fs_check_nid_range(sbi, inode->i_ino))
|
||||
return -EINVAL;
|
||||
|
||||
node_page = f2fs_get_node_page(sbi, inode->i_ino);
|
||||
node_page = f2fs_get_inode_page(sbi, inode->i_ino);
|
||||
if (IS_ERR(node_page))
|
||||
return PTR_ERR(node_page);
|
||||
|
||||
@ -757,7 +755,7 @@ void f2fs_update_inode_page(struct inode *inode)
|
||||
struct page *node_page;
|
||||
int count = 0;
|
||||
retry:
|
||||
node_page = f2fs_get_node_page(sbi, inode->i_ino);
|
||||
node_page = f2fs_get_inode_page(sbi, inode->i_ino);
|
||||
if (IS_ERR(node_page)) {
|
||||
int err = PTR_ERR(node_page);
|
||||
|
||||
@ -765,8 +763,12 @@ retry:
|
||||
if (err == -ENOENT)
|
||||
return;
|
||||
|
||||
if (err == -EFSCORRUPTED)
|
||||
goto stop_checkpoint;
|
||||
|
||||
if (err == -ENOMEM || ++count <= DEFAULT_RETRY_IO_COUNT)
|
||||
goto retry;
|
||||
stop_checkpoint:
|
||||
f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_UPDATE_INODE);
|
||||
return;
|
||||
}
|
||||
@ -789,6 +791,13 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
|
||||
!is_inode_flag_set(inode, FI_DIRTY_INODE))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* no need to update inode page, ultimately f2fs_evict_inode() will
|
||||
* clear dirty status of inode.
|
||||
*/
|
||||
if (f2fs_cp_error(sbi))
|
||||
return -EIO;
|
||||
|
||||
if (!f2fs_is_checkpoint_ready(sbi)) {
|
||||
f2fs_mark_inode_dirty_sync(inode, true);
|
||||
return -ENOSPC;
|
||||
@ -804,6 +813,19 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void f2fs_remove_donate_inode(struct inode *inode)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
|
||||
if (list_empty(&F2FS_I(inode)->gdonate_list))
|
||||
return;
|
||||
|
||||
spin_lock(&sbi->inode_lock[DONATE_INODE]);
|
||||
list_del_init(&F2FS_I(inode)->gdonate_list);
|
||||
sbi->donate_files--;
|
||||
spin_unlock(&sbi->inode_lock[DONATE_INODE]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called at the last iput() if i_nlink is zero
|
||||
*/
|
||||
@ -838,6 +860,7 @@ void f2fs_evict_inode(struct inode *inode)
|
||||
|
||||
f2fs_bug_on(sbi, get_dirty_pages(inode));
|
||||
f2fs_remove_dirty_inode(inode);
|
||||
f2fs_remove_donate_inode(inode);
|
||||
|
||||
if (!IS_DEVICE_ALIASING(inode))
|
||||
f2fs_destroy_extent_tree(inode);
|
||||
|
@ -502,6 +502,14 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (inode->i_nlink == 0) {
|
||||
f2fs_warn(F2FS_I_SB(inode), "%s: inode (ino=%lx) has zero i_nlink",
|
||||
__func__, inode->i_ino);
|
||||
err = -EFSCORRUPTED;
|
||||
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
|
||||
goto out_iput;
|
||||
}
|
||||
|
||||
if (IS_ENCRYPTED(dir) &&
|
||||
(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
|
||||
!fscrypt_has_permitted_context(dir, inode)) {
|
||||
|
450
fs/f2fs/node.c
450
fs/f2fs/node.c
@ -310,10 +310,10 @@ static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
|
||||
start, nr);
|
||||
}
|
||||
|
||||
bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
|
||||
bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, const struct folio *folio)
|
||||
{
|
||||
return NODE_MAPPING(sbi) == page->mapping &&
|
||||
IS_DNODE(page) && is_cold_node(page);
|
||||
return NODE_MAPPING(sbi) == folio->mapping &&
|
||||
IS_DNODE(&folio->page) && is_cold_node(&folio->page);
|
||||
}
|
||||
|
||||
void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
|
||||
@ -778,7 +778,7 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
|
||||
npage[0] = dn->inode_page;
|
||||
|
||||
if (!npage[0]) {
|
||||
npage[0] = f2fs_get_node_page(sbi, nids[0]);
|
||||
npage[0] = f2fs_get_inode_page(sbi, nids[0]);
|
||||
if (IS_ERR(npage[0]))
|
||||
return PTR_ERR(npage[0]);
|
||||
}
|
||||
@ -1130,26 +1130,33 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
|
||||
unsigned int nofs = 0;
|
||||
struct f2fs_inode *ri;
|
||||
struct dnode_of_data dn;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
|
||||
trace_f2fs_truncate_inode_blocks_enter(inode, from);
|
||||
|
||||
level = get_node_path(inode, from, offset, noffset);
|
||||
if (level < 0) {
|
||||
if (level <= 0) {
|
||||
if (!level) {
|
||||
level = -EFSCORRUPTED;
|
||||
f2fs_err(sbi, "%s: inode ino=%lx has corrupted node block, from:%lu addrs:%u",
|
||||
__func__, inode->i_ino,
|
||||
from, ADDRS_PER_INODE(inode));
|
||||
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||
}
|
||||
trace_f2fs_truncate_inode_blocks_exit(inode, level);
|
||||
return level;
|
||||
}
|
||||
|
||||
page = f2fs_get_node_page(sbi, inode->i_ino);
|
||||
if (IS_ERR(page)) {
|
||||
trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
|
||||
return PTR_ERR(page);
|
||||
folio = f2fs_get_inode_folio(sbi, inode->i_ino);
|
||||
if (IS_ERR(folio)) {
|
||||
trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(folio));
|
||||
return PTR_ERR(folio);
|
||||
}
|
||||
|
||||
set_new_dnode(&dn, inode, page, NULL, 0);
|
||||
unlock_page(page);
|
||||
set_new_dnode(&dn, inode, &folio->page, NULL, 0);
|
||||
folio_unlock(folio);
|
||||
|
||||
ri = F2FS_INODE(page);
|
||||
ri = F2FS_INODE(&folio->page);
|
||||
switch (level) {
|
||||
case 0:
|
||||
case 1:
|
||||
@ -1178,7 +1185,7 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
|
||||
|
||||
skip_partial:
|
||||
while (cont) {
|
||||
dn.nid = get_nid(page, offset[0], true);
|
||||
dn.nid = get_nid(&folio->page, offset[0], true);
|
||||
switch (offset[0]) {
|
||||
case NODE_DIR1_BLOCK:
|
||||
case NODE_DIR2_BLOCK:
|
||||
@ -1199,7 +1206,7 @@ skip_partial:
|
||||
BUG();
|
||||
}
|
||||
if (err == -ENOENT) {
|
||||
set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
|
||||
set_sbi_flag(F2FS_F_SB(folio), SBI_NEED_FSCK);
|
||||
f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
|
||||
f2fs_err_ratelimited(sbi,
|
||||
"truncate node fail, ino:%lu, nid:%u, "
|
||||
@ -1210,18 +1217,18 @@ skip_partial:
|
||||
}
|
||||
if (err < 0)
|
||||
goto fail;
|
||||
if (offset[1] == 0 && get_nid(page, offset[0], true)) {
|
||||
lock_page(page);
|
||||
BUG_ON(page->mapping != NODE_MAPPING(sbi));
|
||||
set_nid(page, offset[0], 0, true);
|
||||
unlock_page(page);
|
||||
if (offset[1] == 0 && get_nid(&folio->page, offset[0], true)) {
|
||||
folio_lock(folio);
|
||||
BUG_ON(folio->mapping != NODE_MAPPING(sbi));
|
||||
set_nid(&folio->page, offset[0], 0, true);
|
||||
folio_unlock(folio);
|
||||
}
|
||||
offset[1] = 0;
|
||||
offset[0]++;
|
||||
nofs += err;
|
||||
}
|
||||
fail:
|
||||
f2fs_put_page(page, 0);
|
||||
f2fs_folio_put(folio, false);
|
||||
trace_f2fs_truncate_inode_blocks_exit(inode, err);
|
||||
return err > 0 ? 0 : err;
|
||||
}
|
||||
@ -1238,7 +1245,7 @@ int f2fs_truncate_xattr_node(struct inode *inode)
|
||||
if (!nid)
|
||||
return 0;
|
||||
|
||||
npage = f2fs_get_node_page(sbi, nid);
|
||||
npage = f2fs_get_xnode_page(sbi, nid);
|
||||
if (IS_ERR(npage))
|
||||
return PTR_ERR(npage);
|
||||
|
||||
@ -1449,10 +1456,32 @@ void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
|
||||
f2fs_put_page(apage, err ? 1 : 0);
|
||||
}
|
||||
|
||||
static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
|
||||
struct page *parent, int start)
|
||||
static int sanity_check_node_footer(struct f2fs_sb_info *sbi,
|
||||
struct page *page, pgoff_t nid,
|
||||
enum node_type ntype)
|
||||
{
|
||||
struct page *page;
|
||||
if (unlikely(nid != nid_of_node(page) ||
|
||||
(ntype == NODE_TYPE_INODE && !IS_INODE(page)) ||
|
||||
(ntype == NODE_TYPE_XATTR &&
|
||||
!f2fs_has_xattr_block(ofs_of_node(page))) ||
|
||||
time_to_inject(sbi, FAULT_INCONSISTENT_FOOTER))) {
|
||||
f2fs_warn(sbi, "inconsistent node block, node_type:%d, nid:%lu, "
|
||||
"node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
|
||||
ntype, nid, nid_of_node(page), ino_of_node(page),
|
||||
ofs_of_node(page), cpver_of_node(page),
|
||||
next_blkaddr_of_node(page));
|
||||
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||
f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct folio *__get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid,
|
||||
struct page *parent, int start,
|
||||
enum node_type ntype)
|
||||
{
|
||||
struct folio *folio;
|
||||
int err;
|
||||
|
||||
if (!nid)
|
||||
@ -1460,11 +1489,11 @@ static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
|
||||
if (f2fs_check_nid_range(sbi, nid))
|
||||
return ERR_PTR(-EINVAL);
|
||||
repeat:
|
||||
page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
|
||||
if (!page)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), nid, false);
|
||||
if (IS_ERR(folio))
|
||||
return folio;
|
||||
|
||||
err = read_node_page(page, 0);
|
||||
err = read_node_page(&folio->page, 0);
|
||||
if (err < 0) {
|
||||
goto out_put_err;
|
||||
} else if (err == LOCKED_PAGE) {
|
||||
@ -1475,54 +1504,72 @@ repeat:
|
||||
if (parent)
|
||||
f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
|
||||
|
||||
lock_page(page);
|
||||
folio_lock(folio);
|
||||
|
||||
if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
|
||||
f2fs_put_page(page, 1);
|
||||
if (unlikely(folio->mapping != NODE_MAPPING(sbi))) {
|
||||
f2fs_folio_put(folio, true);
|
||||
goto repeat;
|
||||
}
|
||||
|
||||
if (unlikely(!PageUptodate(page))) {
|
||||
if (unlikely(!folio_test_uptodate(folio))) {
|
||||
err = -EIO;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (!f2fs_inode_chksum_verify(sbi, page)) {
|
||||
if (!f2fs_inode_chksum_verify(sbi, &folio->page)) {
|
||||
err = -EFSBADCRC;
|
||||
goto out_err;
|
||||
}
|
||||
page_hit:
|
||||
if (likely(nid == nid_of_node(page)))
|
||||
return page;
|
||||
|
||||
f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
|
||||
nid, nid_of_node(page), ino_of_node(page),
|
||||
ofs_of_node(page), cpver_of_node(page),
|
||||
next_blkaddr_of_node(page));
|
||||
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||
f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
|
||||
err = -EFSCORRUPTED;
|
||||
err = sanity_check_node_footer(sbi, &folio->page, nid, ntype);
|
||||
if (!err)
|
||||
return folio;
|
||||
out_err:
|
||||
ClearPageUptodate(page);
|
||||
folio_clear_uptodate(folio);
|
||||
out_put_err:
|
||||
/* ENOENT comes from read_node_page which is not an error. */
|
||||
if (err != -ENOENT)
|
||||
f2fs_handle_page_eio(sbi, page_folio(page), NODE);
|
||||
f2fs_put_page(page, 1);
|
||||
f2fs_handle_page_eio(sbi, folio, NODE);
|
||||
f2fs_folio_put(folio, true);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
|
||||
{
|
||||
return __get_node_page(sbi, nid, NULL, 0);
|
||||
struct folio *folio = __get_node_folio(sbi, nid, NULL, 0,
|
||||
NODE_TYPE_REGULAR);
|
||||
|
||||
return &folio->page;
|
||||
}
|
||||
|
||||
struct folio *f2fs_get_inode_folio(struct f2fs_sb_info *sbi, pgoff_t ino)
|
||||
{
|
||||
return __get_node_folio(sbi, ino, NULL, 0, NODE_TYPE_INODE);
|
||||
}
|
||||
|
||||
struct page *f2fs_get_inode_page(struct f2fs_sb_info *sbi, pgoff_t ino)
|
||||
{
|
||||
struct folio *folio = f2fs_get_inode_folio(sbi, ino);
|
||||
|
||||
return &folio->page;
|
||||
}
|
||||
|
||||
struct page *f2fs_get_xnode_page(struct f2fs_sb_info *sbi, pgoff_t xnid)
|
||||
{
|
||||
struct folio *folio = __get_node_folio(sbi, xnid, NULL, 0,
|
||||
NODE_TYPE_XATTR);
|
||||
|
||||
return &folio->page;
|
||||
}
|
||||
|
||||
struct page *f2fs_get_node_page_ra(struct page *parent, int start)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
|
||||
nid_t nid = get_nid(parent, start, false);
|
||||
struct folio *folio = __get_node_folio(sbi, nid, parent, start,
|
||||
NODE_TYPE_REGULAR);
|
||||
|
||||
return __get_node_page(sbi, nid, parent, start);
|
||||
return &folio->page;
|
||||
}
|
||||
|
||||
static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
|
||||
@ -1561,11 +1608,11 @@ iput_out:
|
||||
iput(inode);
|
||||
}
|
||||
|
||||
static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
|
||||
static struct folio *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
|
||||
{
|
||||
pgoff_t index;
|
||||
struct folio_batch fbatch;
|
||||
struct page *last_page = NULL;
|
||||
struct folio *last_folio = NULL;
|
||||
int nr_folios;
|
||||
|
||||
folio_batch_init(&fbatch);
|
||||
@ -1577,45 +1624,45 @@ static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_folios; i++) {
|
||||
struct page *page = &fbatch.folios[i]->page;
|
||||
struct folio *folio = fbatch.folios[i];
|
||||
|
||||
if (unlikely(f2fs_cp_error(sbi))) {
|
||||
f2fs_put_page(last_page, 0);
|
||||
f2fs_folio_put(last_folio, false);
|
||||
folio_batch_release(&fbatch);
|
||||
return ERR_PTR(-EIO);
|
||||
}
|
||||
|
||||
if (!IS_DNODE(page) || !is_cold_node(page))
|
||||
if (!IS_DNODE(&folio->page) || !is_cold_node(&folio->page))
|
||||
continue;
|
||||
if (ino_of_node(page) != ino)
|
||||
if (ino_of_node(&folio->page) != ino)
|
||||
continue;
|
||||
|
||||
lock_page(page);
|
||||
folio_lock(folio);
|
||||
|
||||
if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
|
||||
if (unlikely(folio->mapping != NODE_MAPPING(sbi))) {
|
||||
continue_unlock:
|
||||
unlock_page(page);
|
||||
folio_unlock(folio);
|
||||
continue;
|
||||
}
|
||||
if (ino_of_node(page) != ino)
|
||||
if (ino_of_node(&folio->page) != ino)
|
||||
goto continue_unlock;
|
||||
|
||||
if (!PageDirty(page)) {
|
||||
if (!folio_test_dirty(folio)) {
|
||||
/* someone wrote it for us */
|
||||
goto continue_unlock;
|
||||
}
|
||||
|
||||
if (last_page)
|
||||
f2fs_put_page(last_page, 0);
|
||||
if (last_folio)
|
||||
f2fs_folio_put(last_folio, false);
|
||||
|
||||
get_page(page);
|
||||
last_page = page;
|
||||
unlock_page(page);
|
||||
folio_get(folio);
|
||||
last_folio = folio;
|
||||
folio_unlock(folio);
|
||||
}
|
||||
folio_batch_release(&fbatch);
|
||||
cond_resched();
|
||||
}
|
||||
return last_page;
|
||||
return last_folio;
|
||||
}
|
||||
|
||||
static int __write_node_page(struct page *page, bool atomic, bool *submitted,
|
||||
@ -1694,7 +1741,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
|
||||
fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
|
||||
|
||||
/* should add to global list before clearing PAGECACHE status */
|
||||
if (f2fs_in_warm_node_list(sbi, page)) {
|
||||
if (f2fs_in_warm_node_list(sbi, folio)) {
|
||||
seq = f2fs_add_fsync_node_entry(sbi, page);
|
||||
if (seq_id)
|
||||
*seq_id = seq;
|
||||
@ -1769,13 +1816,6 @@ release_page:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int f2fs_write_node_page(struct page *page,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
return __write_node_page(page, false, NULL, wbc, false,
|
||||
FS_NODE_IO, NULL);
|
||||
}
|
||||
|
||||
int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
|
||||
struct writeback_control *wbc, bool atomic,
|
||||
unsigned int *seq_id)
|
||||
@ -1783,16 +1823,16 @@ int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
|
||||
pgoff_t index;
|
||||
struct folio_batch fbatch;
|
||||
int ret = 0;
|
||||
struct page *last_page = NULL;
|
||||
struct folio *last_folio = NULL;
|
||||
bool marked = false;
|
||||
nid_t ino = inode->i_ino;
|
||||
int nr_folios;
|
||||
int nwritten = 0;
|
||||
|
||||
if (atomic) {
|
||||
last_page = last_fsync_dnode(sbi, ino);
|
||||
if (IS_ERR_OR_NULL(last_page))
|
||||
return PTR_ERR_OR_ZERO(last_page);
|
||||
last_folio = last_fsync_dnode(sbi, ino);
|
||||
if (IS_ERR_OR_NULL(last_folio))
|
||||
return PTR_ERR_OR_ZERO(last_folio);
|
||||
}
|
||||
retry:
|
||||
folio_batch_init(&fbatch);
|
||||
@ -1804,73 +1844,73 @@ retry:
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_folios; i++) {
|
||||
struct page *page = &fbatch.folios[i]->page;
|
||||
struct folio *folio = fbatch.folios[i];
|
||||
bool submitted = false;
|
||||
|
||||
if (unlikely(f2fs_cp_error(sbi))) {
|
||||
f2fs_put_page(last_page, 0);
|
||||
f2fs_folio_put(last_folio, false);
|
||||
folio_batch_release(&fbatch);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!IS_DNODE(page) || !is_cold_node(page))
|
||||
if (!IS_DNODE(&folio->page) || !is_cold_node(&folio->page))
|
||||
continue;
|
||||
if (ino_of_node(page) != ino)
|
||||
if (ino_of_node(&folio->page) != ino)
|
||||
continue;
|
||||
|
||||
lock_page(page);
|
||||
folio_lock(folio);
|
||||
|
||||
if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
|
||||
if (unlikely(folio->mapping != NODE_MAPPING(sbi))) {
|
||||
continue_unlock:
|
||||
unlock_page(page);
|
||||
folio_unlock(folio);
|
||||
continue;
|
||||
}
|
||||
if (ino_of_node(page) != ino)
|
||||
if (ino_of_node(&folio->page) != ino)
|
||||
goto continue_unlock;
|
||||
|
||||
if (!PageDirty(page) && page != last_page) {
|
||||
if (!folio_test_dirty(folio) && folio != last_folio) {
|
||||
/* someone wrote it for us */
|
||||
goto continue_unlock;
|
||||
}
|
||||
|
||||
f2fs_wait_on_page_writeback(page, NODE, true, true);
|
||||
f2fs_folio_wait_writeback(folio, NODE, true, true);
|
||||
|
||||
set_fsync_mark(page, 0);
|
||||
set_dentry_mark(page, 0);
|
||||
set_fsync_mark(&folio->page, 0);
|
||||
set_dentry_mark(&folio->page, 0);
|
||||
|
||||
if (!atomic || page == last_page) {
|
||||
set_fsync_mark(page, 1);
|
||||
if (!atomic || folio == last_folio) {
|
||||
set_fsync_mark(&folio->page, 1);
|
||||
percpu_counter_inc(&sbi->rf_node_block_count);
|
||||
if (IS_INODE(page)) {
|
||||
if (IS_INODE(&folio->page)) {
|
||||
if (is_inode_flag_set(inode,
|
||||
FI_DIRTY_INODE))
|
||||
f2fs_update_inode(inode, page);
|
||||
set_dentry_mark(page,
|
||||
f2fs_update_inode(inode, &folio->page);
|
||||
set_dentry_mark(&folio->page,
|
||||
f2fs_need_dentry_mark(sbi, ino));
|
||||
}
|
||||
/* may be written by other thread */
|
||||
if (!PageDirty(page))
|
||||
set_page_dirty(page);
|
||||
if (!folio_test_dirty(folio))
|
||||
folio_mark_dirty(folio);
|
||||
}
|
||||
|
||||
if (!clear_page_dirty_for_io(page))
|
||||
if (!folio_clear_dirty_for_io(folio))
|
||||
goto continue_unlock;
|
||||
|
||||
ret = __write_node_page(page, atomic &&
|
||||
page == last_page,
|
||||
ret = __write_node_page(&folio->page, atomic &&
|
||||
folio == last_folio,
|
||||
&submitted, wbc, true,
|
||||
FS_NODE_IO, seq_id);
|
||||
if (ret) {
|
||||
unlock_page(page);
|
||||
f2fs_put_page(last_page, 0);
|
||||
folio_unlock(folio);
|
||||
f2fs_folio_put(last_folio, false);
|
||||
break;
|
||||
} else if (submitted) {
|
||||
nwritten++;
|
||||
}
|
||||
|
||||
if (page == last_page) {
|
||||
f2fs_put_page(page, 0);
|
||||
if (folio == last_folio) {
|
||||
f2fs_folio_put(folio, false);
|
||||
marked = true;
|
||||
break;
|
||||
}
|
||||
@ -1883,11 +1923,11 @@ continue_unlock:
|
||||
}
|
||||
if (!ret && atomic && !marked) {
|
||||
f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
|
||||
ino, page_folio(last_page)->index);
|
||||
lock_page(last_page);
|
||||
f2fs_wait_on_page_writeback(last_page, NODE, true, true);
|
||||
set_page_dirty(last_page);
|
||||
unlock_page(last_page);
|
||||
ino, last_folio->index);
|
||||
folio_lock(last_folio);
|
||||
f2fs_folio_wait_writeback(last_folio, NODE, true, true);
|
||||
folio_mark_dirty(last_folio);
|
||||
folio_unlock(last_folio);
|
||||
goto retry;
|
||||
}
|
||||
out:
|
||||
@ -1920,18 +1960,18 @@ static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static bool flush_dirty_inode(struct page *page)
|
||||
static bool flush_dirty_inode(struct folio *folio)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
|
||||
struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
|
||||
struct inode *inode;
|
||||
nid_t ino = ino_of_node(page);
|
||||
nid_t ino = ino_of_node(&folio->page);
|
||||
|
||||
inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL);
|
||||
if (!inode)
|
||||
return false;
|
||||
|
||||
f2fs_update_inode(inode, page);
|
||||
unlock_page(page);
|
||||
f2fs_update_inode(inode, &folio->page);
|
||||
folio_unlock(folio);
|
||||
|
||||
iput(inode);
|
||||
return true;
|
||||
@ -1951,32 +1991,27 @@ void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_folios; i++) {
|
||||
struct page *page = &fbatch.folios[i]->page;
|
||||
struct folio *folio = fbatch.folios[i];
|
||||
|
||||
if (!IS_INODE(page))
|
||||
if (!IS_INODE(&folio->page))
|
||||
continue;
|
||||
|
||||
lock_page(page);
|
||||
folio_lock(folio);
|
||||
|
||||
if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
|
||||
continue_unlock:
|
||||
unlock_page(page);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!PageDirty(page)) {
|
||||
/* someone wrote it for us */
|
||||
goto continue_unlock;
|
||||
}
|
||||
if (unlikely(folio->mapping != NODE_MAPPING(sbi)))
|
||||
goto unlock;
|
||||
if (!folio_test_dirty(folio))
|
||||
goto unlock;
|
||||
|
||||
/* flush inline_data, if it's async context. */
|
||||
if (page_private_inline(page)) {
|
||||
clear_page_private_inline(page);
|
||||
unlock_page(page);
|
||||
flush_inline_data(sbi, ino_of_node(page));
|
||||
if (page_private_inline(&folio->page)) {
|
||||
clear_page_private_inline(&folio->page);
|
||||
folio_unlock(folio);
|
||||
flush_inline_data(sbi, ino_of_node(&folio->page));
|
||||
continue;
|
||||
}
|
||||
unlock_page(page);
|
||||
unlock:
|
||||
folio_unlock(folio);
|
||||
}
|
||||
folio_batch_release(&fbatch);
|
||||
cond_resched();
|
||||
@ -2005,7 +2040,7 @@ next_step:
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_folios; i++) {
|
||||
struct page *page = &fbatch.folios[i]->page;
|
||||
struct folio *folio = fbatch.folios[i];
|
||||
bool submitted = false;
|
||||
|
||||
/* give a priority to WB_SYNC threads */
|
||||
@ -2021,27 +2056,27 @@ next_step:
|
||||
* 1. dentry dnodes
|
||||
* 2. file dnodes
|
||||
*/
|
||||
if (step == 0 && IS_DNODE(page))
|
||||
if (step == 0 && IS_DNODE(&folio->page))
|
||||
continue;
|
||||
if (step == 1 && (!IS_DNODE(page) ||
|
||||
is_cold_node(page)))
|
||||
if (step == 1 && (!IS_DNODE(&folio->page) ||
|
||||
is_cold_node(&folio->page)))
|
||||
continue;
|
||||
if (step == 2 && (!IS_DNODE(page) ||
|
||||
!is_cold_node(page)))
|
||||
if (step == 2 && (!IS_DNODE(&folio->page) ||
|
||||
!is_cold_node(&folio->page)))
|
||||
continue;
|
||||
lock_node:
|
||||
if (wbc->sync_mode == WB_SYNC_ALL)
|
||||
lock_page(page);
|
||||
else if (!trylock_page(page))
|
||||
folio_lock(folio);
|
||||
else if (!folio_trylock(folio))
|
||||
continue;
|
||||
|
||||
if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
|
||||
if (unlikely(folio->mapping != NODE_MAPPING(sbi))) {
|
||||
continue_unlock:
|
||||
unlock_page(page);
|
||||
folio_unlock(folio);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!PageDirty(page)) {
|
||||
if (!folio_test_dirty(folio)) {
|
||||
/* someone wrote it for us */
|
||||
goto continue_unlock;
|
||||
}
|
||||
@ -2051,29 +2086,29 @@ continue_unlock:
|
||||
goto write_node;
|
||||
|
||||
/* flush inline_data */
|
||||
if (page_private_inline(page)) {
|
||||
clear_page_private_inline(page);
|
||||
unlock_page(page);
|
||||
flush_inline_data(sbi, ino_of_node(page));
|
||||
if (page_private_inline(&folio->page)) {
|
||||
clear_page_private_inline(&folio->page);
|
||||
folio_unlock(folio);
|
||||
flush_inline_data(sbi, ino_of_node(&folio->page));
|
||||
goto lock_node;
|
||||
}
|
||||
|
||||
/* flush dirty inode */
|
||||
if (IS_INODE(page) && flush_dirty_inode(page))
|
||||
if (IS_INODE(&folio->page) && flush_dirty_inode(folio))
|
||||
goto lock_node;
|
||||
write_node:
|
||||
f2fs_wait_on_page_writeback(page, NODE, true, true);
|
||||
f2fs_folio_wait_writeback(folio, NODE, true, true);
|
||||
|
||||
if (!clear_page_dirty_for_io(page))
|
||||
if (!folio_clear_dirty_for_io(folio))
|
||||
goto continue_unlock;
|
||||
|
||||
set_fsync_mark(page, 0);
|
||||
set_dentry_mark(page, 0);
|
||||
set_fsync_mark(&folio->page, 0);
|
||||
set_dentry_mark(&folio->page, 0);
|
||||
|
||||
ret = __write_node_page(page, false, &submitted,
|
||||
ret = __write_node_page(&folio->page, false, &submitted,
|
||||
wbc, do_balance, io_type, NULL);
|
||||
if (ret)
|
||||
unlock_page(page);
|
||||
folio_unlock(folio);
|
||||
else if (submitted)
|
||||
nwritten++;
|
||||
|
||||
@ -2207,7 +2242,6 @@ static bool f2fs_dirty_node_folio(struct address_space *mapping,
|
||||
* Structure of the f2fs node operations
|
||||
*/
|
||||
const struct address_space_operations f2fs_node_aops = {
|
||||
.writepage = f2fs_write_node_page,
|
||||
.writepages = f2fs_write_node_pages,
|
||||
.dirty_folio = f2fs_dirty_node_folio,
|
||||
.invalidate_folio = f2fs_invalidate_folio,
|
||||
@ -2269,24 +2303,6 @@ static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
|
||||
}
|
||||
}
|
||||
|
||||
bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
struct f2fs_nm_info *nm_i = NM_I(sbi);
|
||||
unsigned int i;
|
||||
bool ret = true;
|
||||
|
||||
f2fs_down_read(&nm_i->nat_tree_lock);
|
||||
for (i = 0; i < nm_i->nat_blocks; i++) {
|
||||
if (!test_bit_le(i, nm_i->nat_block_bitmap)) {
|
||||
ret = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
f2fs_up_read(&nm_i->nat_tree_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
|
||||
bool set, bool build)
|
||||
{
|
||||
@ -2717,7 +2733,7 @@ int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
|
||||
struct page *ipage;
|
||||
struct f2fs_inode *ri;
|
||||
|
||||
ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
|
||||
ipage = f2fs_get_inode_page(F2FS_I_SB(inode), inode->i_ino);
|
||||
if (IS_ERR(ipage))
|
||||
return PTR_ERR(ipage);
|
||||
|
||||
@ -2965,23 +2981,7 @@ add_out:
|
||||
list_add_tail(&nes->set_list, head);
|
||||
}
|
||||
|
||||
static void __update_nat_bits(struct f2fs_nm_info *nm_i, unsigned int nat_ofs,
|
||||
unsigned int valid)
|
||||
{
|
||||
if (valid == 0) {
|
||||
__set_bit_le(nat_ofs, nm_i->empty_nat_bits);
|
||||
__clear_bit_le(nat_ofs, nm_i->full_nat_bits);
|
||||
return;
|
||||
}
|
||||
|
||||
__clear_bit_le(nat_ofs, nm_i->empty_nat_bits);
|
||||
if (valid == NAT_ENTRY_PER_BLOCK)
|
||||
__set_bit_le(nat_ofs, nm_i->full_nat_bits);
|
||||
else
|
||||
__clear_bit_le(nat_ofs, nm_i->full_nat_bits);
|
||||
}
|
||||
|
||||
static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
|
||||
static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
|
||||
struct page *page)
|
||||
{
|
||||
struct f2fs_nm_info *nm_i = NM_I(sbi);
|
||||
@ -2990,7 +2990,7 @@ static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
|
||||
int valid = 0;
|
||||
int i = 0;
|
||||
|
||||
if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
|
||||
if (!enabled_nat_bits(sbi, NULL))
|
||||
return;
|
||||
|
||||
if (nat_index == 0) {
|
||||
@ -3001,36 +3001,17 @@ static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
|
||||
if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
|
||||
valid++;
|
||||
}
|
||||
|
||||
__update_nat_bits(nm_i, nat_index, valid);
|
||||
}
|
||||
|
||||
void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
struct f2fs_nm_info *nm_i = NM_I(sbi);
|
||||
unsigned int nat_ofs;
|
||||
|
||||
f2fs_down_read(&nm_i->nat_tree_lock);
|
||||
|
||||
for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) {
|
||||
unsigned int valid = 0, nid_ofs = 0;
|
||||
|
||||
/* handle nid zero due to it should never be used */
|
||||
if (unlikely(nat_ofs == 0)) {
|
||||
valid = 1;
|
||||
nid_ofs = 1;
|
||||
}
|
||||
|
||||
for (; nid_ofs < NAT_ENTRY_PER_BLOCK; nid_ofs++) {
|
||||
if (!test_bit_le(nid_ofs,
|
||||
nm_i->free_nid_bitmap[nat_ofs]))
|
||||
valid++;
|
||||
}
|
||||
|
||||
__update_nat_bits(nm_i, nat_ofs, valid);
|
||||
if (valid == 0) {
|
||||
__set_bit_le(nat_index, nm_i->empty_nat_bits);
|
||||
__clear_bit_le(nat_index, nm_i->full_nat_bits);
|
||||
return;
|
||||
}
|
||||
|
||||
f2fs_up_read(&nm_i->nat_tree_lock);
|
||||
__clear_bit_le(nat_index, nm_i->empty_nat_bits);
|
||||
if (valid == NAT_ENTRY_PER_BLOCK)
|
||||
__set_bit_le(nat_index, nm_i->full_nat_bits);
|
||||
else
|
||||
__clear_bit_le(nat_index, nm_i->full_nat_bits);
|
||||
}
|
||||
|
||||
static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
|
||||
@ -3049,7 +3030,7 @@ static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
|
||||
* #1, flush nat entries to journal in current hot data summary block.
|
||||
* #2, flush nat entries to nat page.
|
||||
*/
|
||||
if ((cpc->reason & CP_UMOUNT) ||
|
||||
if (enabled_nat_bits(sbi, cpc) ||
|
||||
!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
|
||||
to_journal = false;
|
||||
|
||||
@ -3096,7 +3077,7 @@ static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
|
||||
if (to_journal) {
|
||||
up_write(&curseg->journal_rwsem);
|
||||
} else {
|
||||
update_nat_bits(sbi, start_nid, page);
|
||||
__update_nat_bits(sbi, start_nid, page);
|
||||
f2fs_put_page(page, 1);
|
||||
}
|
||||
|
||||
@ -3127,7 +3108,7 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
||||
* during unmount, let's flush nat_bits before checking
|
||||
* nat_cnt[DIRTY_NAT].
|
||||
*/
|
||||
if (cpc->reason & CP_UMOUNT) {
|
||||
if (enabled_nat_bits(sbi, cpc)) {
|
||||
f2fs_down_write(&nm_i->nat_tree_lock);
|
||||
remove_nats_in_journal(sbi);
|
||||
f2fs_up_write(&nm_i->nat_tree_lock);
|
||||
@ -3143,7 +3124,7 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
||||
* entries, remove all entries from journal and merge them
|
||||
* into nat entry set.
|
||||
*/
|
||||
if (cpc->reason & CP_UMOUNT ||
|
||||
if (enabled_nat_bits(sbi, cpc) ||
|
||||
!__has_cursum_space(journal,
|
||||
nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
|
||||
remove_nats_in_journal(sbi);
|
||||
@ -3180,18 +3161,15 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
|
||||
__u64 cp_ver = cur_cp_version(ckpt);
|
||||
block_t nat_bits_addr;
|
||||
|
||||
if (!enabled_nat_bits(sbi, NULL))
|
||||
return 0;
|
||||
|
||||
nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
|
||||
nm_i->nat_bits = f2fs_kvzalloc(sbi,
|
||||
F2FS_BLK_TO_BYTES(nm_i->nat_bits_blocks), GFP_KERNEL);
|
||||
if (!nm_i->nat_bits)
|
||||
return -ENOMEM;
|
||||
|
||||
nm_i->full_nat_bits = nm_i->nat_bits + 8;
|
||||
nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
|
||||
|
||||
if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
|
||||
return 0;
|
||||
|
||||
nat_bits_addr = __start_cp_addr(sbi) + BLKS_PER_SEG(sbi) -
|
||||
nm_i->nat_bits_blocks;
|
||||
for (i = 0; i < nm_i->nat_bits_blocks; i++) {
|
||||
@ -3208,12 +3186,13 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
|
||||
|
||||
cp_ver |= (cur_cp_crc(ckpt) << 32);
|
||||
if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
|
||||
clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
|
||||
f2fs_notice(sbi, "Disable nat_bits due to incorrect cp_ver (%llu, %llu)",
|
||||
cp_ver, le64_to_cpu(*(__le64 *)nm_i->nat_bits));
|
||||
disable_nat_bits(sbi, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
nm_i->full_nat_bits = nm_i->nat_bits + 8;
|
||||
nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
|
||||
|
||||
f2fs_notice(sbi, "Found nat_bits in checkpoint");
|
||||
return 0;
|
||||
}
|
||||
@ -3224,7 +3203,7 @@ static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
|
||||
unsigned int i = 0;
|
||||
nid_t nid, last_nid;
|
||||
|
||||
if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
|
||||
if (!enabled_nat_bits(sbi, NULL))
|
||||
return;
|
||||
|
||||
for (i = 0; i < nm_i->nat_blocks; i++) {
|
||||
@ -3296,6 +3275,9 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
|
||||
if (!nm_i->nat_bitmap)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!test_opt(sbi, NAT_BITS))
|
||||
disable_nat_bits(sbi, true);
|
||||
|
||||
err = __get_nat_bitmaps(sbi);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -52,6 +52,13 @@ enum {
|
||||
IS_PREALLOC, /* nat entry is preallocated */
|
||||
};
|
||||
|
||||
/* For node type in __get_node_folio() */
|
||||
enum node_type {
|
||||
NODE_TYPE_REGULAR,
|
||||
NODE_TYPE_INODE,
|
||||
NODE_TYPE_XATTR,
|
||||
};
|
||||
|
||||
/*
|
||||
* For node information
|
||||
*/
|
||||
@ -248,7 +255,7 @@ static inline nid_t nid_of_node(struct page *node_page)
|
||||
return le32_to_cpu(rn->footer.nid);
|
||||
}
|
||||
|
||||
static inline unsigned int ofs_of_node(struct page *node_page)
|
||||
static inline unsigned int ofs_of_node(const struct page *node_page)
|
||||
{
|
||||
struct f2fs_node *rn = F2FS_NODE(node_page);
|
||||
unsigned flag = le32_to_cpu(rn->footer.flag);
|
||||
@ -342,7 +349,7 @@ static inline bool is_recoverable_dnode(struct page *page)
|
||||
* `- indirect node ((6 + 2N) + (N - 1)(N + 1))
|
||||
* `- direct node
|
||||
*/
|
||||
static inline bool IS_DNODE(struct page *node_page)
|
||||
static inline bool IS_DNODE(const struct page *node_page)
|
||||
{
|
||||
unsigned int ofs = ofs_of_node(node_page);
|
||||
|
||||
@ -389,7 +396,7 @@ static inline nid_t get_nid(struct page *p, int off, bool i)
|
||||
* - Mark cold data pages in page cache
|
||||
*/
|
||||
|
||||
static inline int is_node(struct page *page, int type)
|
||||
static inline int is_node(const struct page *page, int type)
|
||||
{
|
||||
struct f2fs_node *rn = F2FS_NODE(page);
|
||||
return le32_to_cpu(rn->footer.flag) & BIT(type);
|
||||
|
@ -2096,7 +2096,9 @@ static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
|
||||
return false;
|
||||
|
||||
if (!force) {
|
||||
if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
|
||||
if (!f2fs_realtime_discard_enable(sbi) ||
|
||||
(!se->valid_blocks &&
|
||||
!IS_CURSEG(sbi, cpc->trim_start)) ||
|
||||
SM_I(sbi)->dcc_info->nr_discards >=
|
||||
SM_I(sbi)->dcc_info->max_discards)
|
||||
return false;
|
||||
@ -2320,10 +2322,9 @@ static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
|
||||
dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
|
||||
dcc->max_ordered_discard = DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY;
|
||||
dcc->discard_io_aware = DPOLICY_IO_AWARE_ENABLE;
|
||||
if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
|
||||
if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT ||
|
||||
F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
|
||||
dcc->discard_granularity = BLKS_PER_SEG(sbi);
|
||||
else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
|
||||
dcc->discard_granularity = BLKS_PER_SEC(sbi);
|
||||
|
||||
INIT_LIST_HEAD(&dcc->entry_list);
|
||||
for (i = 0; i < MAX_PLIST_NUM; i++)
|
||||
@ -2806,7 +2807,7 @@ find_other_zone:
|
||||
MAIN_SECS(sbi));
|
||||
if (secno >= MAIN_SECS(sbi)) {
|
||||
ret = -ENOSPC;
|
||||
f2fs_bug_on(sbi, 1);
|
||||
f2fs_bug_on(sbi, !pinning);
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
@ -2848,7 +2849,7 @@ got_it:
|
||||
out_unlock:
|
||||
spin_unlock(&free_i->segmap_lock);
|
||||
|
||||
if (ret == -ENOSPC)
|
||||
if (ret == -ENOSPC && !pinning)
|
||||
f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_NO_SEGMENT);
|
||||
return ret;
|
||||
}
|
||||
@ -2921,6 +2922,13 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
|
||||
return curseg->segno;
|
||||
}
|
||||
|
||||
static void reset_curseg_fields(struct curseg_info *curseg)
|
||||
{
|
||||
curseg->inited = false;
|
||||
curseg->segno = NULL_SEGNO;
|
||||
curseg->next_segno = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a current working segment.
|
||||
* This function always allocates a free segment in LFS manner.
|
||||
@ -2939,7 +2947,7 @@ static int new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
|
||||
ret = get_new_segment(sbi, &segno, new_sec, pinning);
|
||||
if (ret) {
|
||||
if (ret == -ENOSPC)
|
||||
curseg->segno = NULL_SEGNO;
|
||||
reset_curseg_fields(curseg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -3710,13 +3718,6 @@ static void f2fs_randomize_chunk(struct f2fs_sb_info *sbi,
|
||||
get_random_u32_inclusive(1, sbi->max_fragment_hole);
|
||||
}
|
||||
|
||||
static void reset_curseg_fields(struct curseg_info *curseg)
|
||||
{
|
||||
curseg->inited = false;
|
||||
curseg->segno = NULL_SEGNO;
|
||||
curseg->next_segno = 0;
|
||||
}
|
||||
|
||||
int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
||||
block_t old_blkaddr, block_t *new_blkaddr,
|
||||
struct f2fs_summary *sum, int type,
|
||||
@ -3902,6 +3903,7 @@ static int log_type_to_seg_type(enum log_type type)
|
||||
|
||||
static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
|
||||
{
|
||||
struct folio *folio = page_folio(fio->page);
|
||||
enum log_type type = __get_segment_type(fio);
|
||||
int seg_type = log_type_to_seg_type(type);
|
||||
bool keep_order = (f2fs_lfs_mode(fio->sbi) &&
|
||||
@ -3912,10 +3914,10 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
|
||||
|
||||
if (f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
|
||||
&fio->new_blkaddr, sum, type, fio)) {
|
||||
if (fscrypt_inode_uses_fs_layer_crypto(fio->page->mapping->host))
|
||||
if (fscrypt_inode_uses_fs_layer_crypto(folio->mapping->host))
|
||||
fscrypt_finalize_bounce_page(&fio->encrypted_page);
|
||||
end_page_writeback(fio->page);
|
||||
if (f2fs_in_warm_node_list(fio->sbi, fio->page))
|
||||
folio_end_writeback(folio);
|
||||
if (f2fs_in_warm_node_list(fio->sbi, folio))
|
||||
f2fs_del_fsync_node_entry(fio->sbi, fio->page);
|
||||
goto out;
|
||||
}
|
||||
@ -4154,22 +4156,21 @@ void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
|
||||
f2fs_update_data_blkaddr(dn, new_addr);
|
||||
}
|
||||
|
||||
void f2fs_wait_on_page_writeback(struct page *page,
|
||||
enum page_type type, bool ordered, bool locked)
|
||||
void f2fs_folio_wait_writeback(struct folio *folio, enum page_type type,
|
||||
bool ordered, bool locked)
|
||||
{
|
||||
if (folio_test_writeback(page_folio(page))) {
|
||||
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
|
||||
if (folio_test_writeback(folio)) {
|
||||
struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
|
||||
|
||||
/* submit cached LFS IO */
|
||||
f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
|
||||
f2fs_submit_merged_write_cond(sbi, NULL, &folio->page, 0, type);
|
||||
/* submit cached IPU IO */
|
||||
f2fs_submit_merged_ipu_write(sbi, NULL, page);
|
||||
f2fs_submit_merged_ipu_write(sbi, NULL, &folio->page);
|
||||
if (ordered) {
|
||||
wait_on_page_writeback(page);
|
||||
f2fs_bug_on(sbi, locked &&
|
||||
folio_test_writeback(page_folio(page)));
|
||||
folio_wait_writeback(folio);
|
||||
f2fs_bug_on(sbi, locked && folio_test_writeback(folio));
|
||||
} else {
|
||||
wait_for_stable_page(page);
|
||||
folio_wait_stable(folio);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -559,13 +559,16 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
|
||||
unsigned int node_blocks, unsigned int data_blocks,
|
||||
unsigned int dent_blocks)
|
||||
{
|
||||
|
||||
unsigned int segno, left_blocks, blocks;
|
||||
int i;
|
||||
|
||||
/* check current data/node sections in the worst case. */
|
||||
for (i = CURSEG_HOT_DATA; i < NR_PERSISTENT_LOG; i++) {
|
||||
segno = CURSEG_I(sbi, i)->segno;
|
||||
|
||||
if (unlikely(segno == NULL_SEGNO))
|
||||
return false;
|
||||
|
||||
left_blocks = CAP_BLKS_PER_SEC(sbi) -
|
||||
get_ckpt_valid_blocks(sbi, segno, true);
|
||||
|
||||
@ -576,6 +579,10 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
|
||||
|
||||
/* check current data section for dentry blocks. */
|
||||
segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno;
|
||||
|
||||
if (unlikely(segno == NULL_SEGNO))
|
||||
return false;
|
||||
|
||||
left_blocks = CAP_BLKS_PER_SEC(sbi) -
|
||||
get_ckpt_valid_blocks(sbi, segno, true);
|
||||
if (dent_blocks > left_blocks)
|
||||
|
@ -73,7 +73,7 @@ unsigned long f2fs_shrink_count(struct shrinker *shrink,
|
||||
mutex_unlock(&sbi->umount_mutex);
|
||||
}
|
||||
spin_unlock(&f2fs_list_lock);
|
||||
return count;
|
||||
return count ?: SHRINK_EMPTY;
|
||||
}
|
||||
|
||||
unsigned long f2fs_shrink_scan(struct shrinker *shrink,
|
||||
@ -130,6 +130,96 @@ unsigned long f2fs_shrink_scan(struct shrinker *shrink,
|
||||
return freed;
|
||||
}
|
||||
|
||||
unsigned int f2fs_donate_files(void)
|
||||
{
|
||||
struct f2fs_sb_info *sbi;
|
||||
struct list_head *p;
|
||||
unsigned int donate_files = 0;
|
||||
|
||||
spin_lock(&f2fs_list_lock);
|
||||
p = f2fs_list.next;
|
||||
while (p != &f2fs_list) {
|
||||
sbi = list_entry(p, struct f2fs_sb_info, s_list);
|
||||
|
||||
/* stop f2fs_put_super */
|
||||
if (!mutex_trylock(&sbi->umount_mutex)) {
|
||||
p = p->next;
|
||||
continue;
|
||||
}
|
||||
spin_unlock(&f2fs_list_lock);
|
||||
|
||||
donate_files += sbi->donate_files;
|
||||
|
||||
spin_lock(&f2fs_list_lock);
|
||||
p = p->next;
|
||||
mutex_unlock(&sbi->umount_mutex);
|
||||
}
|
||||
spin_unlock(&f2fs_list_lock);
|
||||
|
||||
return donate_files;
|
||||
}
|
||||
|
||||
static unsigned int do_reclaim_caches(struct f2fs_sb_info *sbi,
|
||||
unsigned int reclaim_caches_kb)
|
||||
{
|
||||
struct inode *inode;
|
||||
struct f2fs_inode_info *fi;
|
||||
unsigned int nfiles = sbi->donate_files;
|
||||
pgoff_t npages = reclaim_caches_kb >> (PAGE_SHIFT - 10);
|
||||
|
||||
while (npages && nfiles--) {
|
||||
pgoff_t len;
|
||||
|
||||
spin_lock(&sbi->inode_lock[DONATE_INODE]);
|
||||
if (list_empty(&sbi->inode_list[DONATE_INODE])) {
|
||||
spin_unlock(&sbi->inode_lock[DONATE_INODE]);
|
||||
break;
|
||||
}
|
||||
fi = list_first_entry(&sbi->inode_list[DONATE_INODE],
|
||||
struct f2fs_inode_info, gdonate_list);
|
||||
list_move_tail(&fi->gdonate_list, &sbi->inode_list[DONATE_INODE]);
|
||||
inode = igrab(&fi->vfs_inode);
|
||||
spin_unlock(&sbi->inode_lock[DONATE_INODE]);
|
||||
|
||||
if (!inode)
|
||||
continue;
|
||||
|
||||
len = fi->donate_end - fi->donate_start + 1;
|
||||
npages = npages < len ? 0 : npages - len;
|
||||
invalidate_inode_pages2_range(inode->i_mapping,
|
||||
fi->donate_start, fi->donate_end);
|
||||
iput(inode);
|
||||
cond_resched();
|
||||
}
|
||||
return npages << (PAGE_SHIFT - 10);
|
||||
}
|
||||
|
||||
void f2fs_reclaim_caches(unsigned int reclaim_caches_kb)
|
||||
{
|
||||
struct f2fs_sb_info *sbi;
|
||||
struct list_head *p;
|
||||
|
||||
spin_lock(&f2fs_list_lock);
|
||||
p = f2fs_list.next;
|
||||
while (p != &f2fs_list && reclaim_caches_kb) {
|
||||
sbi = list_entry(p, struct f2fs_sb_info, s_list);
|
||||
|
||||
/* stop f2fs_put_super */
|
||||
if (!mutex_trylock(&sbi->umount_mutex)) {
|
||||
p = p->next;
|
||||
continue;
|
||||
}
|
||||
spin_unlock(&f2fs_list_lock);
|
||||
|
||||
reclaim_caches_kb = do_reclaim_caches(sbi, reclaim_caches_kb);
|
||||
|
||||
spin_lock(&f2fs_list_lock);
|
||||
p = p->next;
|
||||
mutex_unlock(&sbi->umount_mutex);
|
||||
}
|
||||
spin_unlock(&f2fs_list_lock);
|
||||
}
|
||||
|
||||
void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
spin_lock(&f2fs_list_lock);
|
||||
|
191
fs/f2fs/super.c
191
fs/f2fs/super.c
@ -63,6 +63,7 @@ const char *f2fs_fault_name[FAULT_MAX] = {
|
||||
[FAULT_BLKADDR_VALIDITY] = "invalid blkaddr",
|
||||
[FAULT_BLKADDR_CONSISTENCE] = "inconsistent blkaddr",
|
||||
[FAULT_NO_SEGMENT] = "no free segment",
|
||||
[FAULT_INCONSISTENT_FOOTER] = "inconsistent footer",
|
||||
};
|
||||
|
||||
int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
|
||||
@ -190,6 +191,7 @@ enum {
|
||||
Opt_memory_mode,
|
||||
Opt_age_extent_cache,
|
||||
Opt_errors,
|
||||
Opt_nat_bits,
|
||||
Opt_err,
|
||||
};
|
||||
|
||||
@ -269,6 +271,7 @@ static match_table_t f2fs_tokens = {
|
||||
{Opt_memory_mode, "memory=%s"},
|
||||
{Opt_age_extent_cache, "age_extent_cache"},
|
||||
{Opt_errors, "errors=%s"},
|
||||
{Opt_nat_bits, "nat_bits"},
|
||||
{Opt_err, NULL},
|
||||
};
|
||||
|
||||
@ -383,10 +386,10 @@ static void init_once(void *foo)
|
||||
#ifdef CONFIG_QUOTA
|
||||
static const char * const quotatypes[] = INITQFNAMES;
|
||||
#define QTYPE2NAME(t) (quotatypes[t])
|
||||
static int f2fs_set_qf_name(struct super_block *sb, int qtype,
|
||||
static int f2fs_set_qf_name(struct f2fs_sb_info *sbi, int qtype,
|
||||
substring_t *args)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_SB(sb);
|
||||
struct super_block *sb = sbi->sb;
|
||||
char *qname;
|
||||
int ret = -EINVAL;
|
||||
|
||||
@ -424,9 +427,9 @@ errout:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
|
||||
static int f2fs_clear_qf_name(struct f2fs_sb_info *sbi, int qtype)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_SB(sb);
|
||||
struct super_block *sb = sbi->sb;
|
||||
|
||||
if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
|
||||
f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
|
||||
@ -483,12 +486,11 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int f2fs_set_test_dummy_encryption(struct super_block *sb,
|
||||
static int f2fs_set_test_dummy_encryption(struct f2fs_sb_info *sbi,
|
||||
const char *opt,
|
||||
const substring_t *arg,
|
||||
bool is_remount)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_SB(sb);
|
||||
struct fs_parameter param = {
|
||||
.type = fs_value_is_string,
|
||||
.string = arg->from ? arg->from : "",
|
||||
@ -671,9 +673,8 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
static int parse_options(struct f2fs_sb_info *sbi, char *options, bool is_remount)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_SB(sb);
|
||||
substring_t args[MAX_OPT_ARGS];
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
unsigned char (*ext)[F2FS_EXTENSION_LEN];
|
||||
@ -687,7 +688,7 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
int ret;
|
||||
|
||||
if (!options)
|
||||
goto default_check;
|
||||
return 0;
|
||||
|
||||
while ((p = strsep(&options, ",")) != NULL) {
|
||||
int token;
|
||||
@ -728,10 +729,8 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
set_opt(sbi, DISABLE_ROLL_FORWARD);
|
||||
break;
|
||||
case Opt_norecovery:
|
||||
/* this option mounts f2fs with ro */
|
||||
/* requires ro mount, checked in f2fs_default_check */
|
||||
set_opt(sbi, NORECOVERY);
|
||||
if (!f2fs_readonly(sb))
|
||||
return -EINVAL;
|
||||
break;
|
||||
case Opt_discard:
|
||||
if (!f2fs_hw_support_discard(sbi)) {
|
||||
@ -772,16 +771,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
break;
|
||||
#else
|
||||
case Opt_user_xattr:
|
||||
f2fs_info(sbi, "user_xattr options not supported");
|
||||
break;
|
||||
case Opt_nouser_xattr:
|
||||
f2fs_info(sbi, "nouser_xattr options not supported");
|
||||
break;
|
||||
case Opt_inline_xattr:
|
||||
f2fs_info(sbi, "inline_xattr options not supported");
|
||||
break;
|
||||
case Opt_noinline_xattr:
|
||||
f2fs_info(sbi, "noinline_xattr options not supported");
|
||||
case Opt_inline_xattr_size:
|
||||
f2fs_info(sbi, "xattr options not supported");
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_F2FS_FS_POSIX_ACL
|
||||
@ -793,10 +787,8 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
break;
|
||||
#else
|
||||
case Opt_acl:
|
||||
f2fs_info(sbi, "acl options not supported");
|
||||
break;
|
||||
case Opt_noacl:
|
||||
f2fs_info(sbi, "noacl options not supported");
|
||||
f2fs_info(sbi, "acl options not supported");
|
||||
break;
|
||||
#endif
|
||||
case Opt_active_logs:
|
||||
@ -838,7 +830,7 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
set_opt(sbi, READ_EXTENT_CACHE);
|
||||
break;
|
||||
case Opt_noextent_cache:
|
||||
if (F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_DEVICE_ALIAS)) {
|
||||
if (f2fs_sb_has_device_alias(sbi)) {
|
||||
f2fs_err(sbi, "device aliasing requires extent cache");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -919,18 +911,15 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
break;
|
||||
#else
|
||||
case Opt_fault_injection:
|
||||
f2fs_info(sbi, "fault_injection options not supported");
|
||||
break;
|
||||
|
||||
case Opt_fault_type:
|
||||
f2fs_info(sbi, "fault_type options not supported");
|
||||
f2fs_info(sbi, "fault injection options not supported");
|
||||
break;
|
||||
#endif
|
||||
case Opt_lazytime:
|
||||
sb->s_flags |= SB_LAZYTIME;
|
||||
set_opt(sbi, LAZYTIME);
|
||||
break;
|
||||
case Opt_nolazytime:
|
||||
sb->s_flags &= ~SB_LAZYTIME;
|
||||
clear_opt(sbi, LAZYTIME);
|
||||
break;
|
||||
#ifdef CONFIG_QUOTA
|
||||
case Opt_quota:
|
||||
@ -944,32 +933,32 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
set_opt(sbi, PRJQUOTA);
|
||||
break;
|
||||
case Opt_usrjquota:
|
||||
ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
|
||||
ret = f2fs_set_qf_name(sbi, USRQUOTA, &args[0]);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
case Opt_grpjquota:
|
||||
ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
|
||||
ret = f2fs_set_qf_name(sbi, GRPQUOTA, &args[0]);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
case Opt_prjjquota:
|
||||
ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
|
||||
ret = f2fs_set_qf_name(sbi, PRJQUOTA, &args[0]);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
case Opt_offusrjquota:
|
||||
ret = f2fs_clear_qf_name(sb, USRQUOTA);
|
||||
ret = f2fs_clear_qf_name(sbi, USRQUOTA);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
case Opt_offgrpjquota:
|
||||
ret = f2fs_clear_qf_name(sb, GRPQUOTA);
|
||||
ret = f2fs_clear_qf_name(sbi, GRPQUOTA);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
case Opt_offprjjquota:
|
||||
ret = f2fs_clear_qf_name(sb, PRJQUOTA);
|
||||
ret = f2fs_clear_qf_name(sbi, PRJQUOTA);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
@ -1039,14 +1028,14 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
kfree(name);
|
||||
break;
|
||||
case Opt_test_dummy_encryption:
|
||||
ret = f2fs_set_test_dummy_encryption(sb, p, &args[0],
|
||||
ret = f2fs_set_test_dummy_encryption(sbi, p, &args[0],
|
||||
is_remount);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
case Opt_inlinecrypt:
|
||||
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
|
||||
sb->s_flags |= SB_INLINECRYPT;
|
||||
set_opt(sbi, INLINECRYPT);
|
||||
#else
|
||||
f2fs_info(sbi, "inline encryption not supported");
|
||||
#endif
|
||||
@ -1322,13 +1311,20 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
}
|
||||
kfree(name);
|
||||
break;
|
||||
case Opt_nat_bits:
|
||||
set_opt(sbi, NAT_BITS);
|
||||
break;
|
||||
default:
|
||||
f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
|
||||
p);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
default_check:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int f2fs_default_check(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
#ifdef CONFIG_QUOTA
|
||||
if (f2fs_check_quota_options(sbi))
|
||||
return -EINVAL;
|
||||
@ -1418,6 +1414,12 @@ default_check:
|
||||
f2fs_err(sbi, "Allow to mount readonly mode only");
|
||||
return -EROFS;
|
||||
}
|
||||
|
||||
if (test_opt(sbi, NORECOVERY) && !f2fs_readonly(sbi->sb)) {
|
||||
f2fs_err(sbi, "norecovery requires readonly mount");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1441,6 +1443,7 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
|
||||
spin_lock_init(&fi->i_size_lock);
|
||||
INIT_LIST_HEAD(&fi->dirty_list);
|
||||
INIT_LIST_HEAD(&fi->gdirty_list);
|
||||
INIT_LIST_HEAD(&fi->gdonate_list);
|
||||
init_f2fs_rwsem(&fi->i_gc_rwsem[READ]);
|
||||
init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
|
||||
init_f2fs_rwsem(&fi->i_xattr_sem);
|
||||
@ -1527,6 +1530,10 @@ int f2fs_inode_dirtied(struct inode *inode, bool sync)
|
||||
inc_page_count(sbi, F2FS_DIRTY_IMETA);
|
||||
}
|
||||
spin_unlock(&sbi->inode_lock[DIRTY_META]);
|
||||
|
||||
if (!ret && f2fs_is_atomic_file(inode))
|
||||
set_inode_flag(inode, FI_ATOMIC_DIRTIED);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1737,22 +1744,28 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
|
||||
|
||||
static int f2fs_freeze(struct super_block *sb)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_SB(sb);
|
||||
|
||||
if (f2fs_readonly(sb))
|
||||
return 0;
|
||||
|
||||
/* IO error happened before */
|
||||
if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
|
||||
if (unlikely(f2fs_cp_error(sbi)))
|
||||
return -EIO;
|
||||
|
||||
/* must be clean, since sync_filesystem() was already called */
|
||||
if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
|
||||
if (is_sbi_flag_set(sbi, SBI_IS_DIRTY))
|
||||
return -EINVAL;
|
||||
|
||||
sbi->umount_lock_holder = current;
|
||||
|
||||
/* Let's flush checkpoints and stop the thread. */
|
||||
f2fs_flush_ckpt_thread(F2FS_SB(sb));
|
||||
f2fs_flush_ckpt_thread(sbi);
|
||||
|
||||
sbi->umount_lock_holder = NULL;
|
||||
|
||||
/* to avoid deadlock on f2fs_evict_inode->SB_FREEZE_FS */
|
||||
set_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
|
||||
set_sbi_flag(sbi, SBI_IS_FREEZING);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1836,7 +1849,8 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
||||
buf->f_blocks = total_count - start_count;
|
||||
|
||||
spin_lock(&sbi->stat_lock);
|
||||
|
||||
if (sbi->carve_out)
|
||||
buf->f_blocks -= sbi->current_reserved_blocks;
|
||||
user_block_count = sbi->user_block_count;
|
||||
total_valid_node_count = valid_node_count(sbi);
|
||||
avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
|
||||
@ -2128,6 +2142,9 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
|
||||
else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC)
|
||||
seq_printf(seq, ",errors=%s", "panic");
|
||||
|
||||
if (test_opt(sbi, NAT_BITS))
|
||||
seq_puts(seq, ",nat_bits");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2175,8 +2192,8 @@ static void default_options(struct f2fs_sb_info *sbi, bool remount)
|
||||
set_opt(sbi, INLINE_DATA);
|
||||
set_opt(sbi, INLINE_DENTRY);
|
||||
set_opt(sbi, MERGE_CHECKPOINT);
|
||||
set_opt(sbi, LAZYTIME);
|
||||
F2FS_OPTION(sbi).unusable_cap = 0;
|
||||
sbi->sb->s_flags |= SB_LAZYTIME;
|
||||
if (!f2fs_is_readonly(sbi))
|
||||
set_opt(sbi, FLUSH_MERGE);
|
||||
if (f2fs_sb_has_blkzoned(sbi))
|
||||
@ -2318,6 +2335,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
|
||||
bool no_discard = !test_opt(sbi, DISCARD);
|
||||
bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
|
||||
bool block_unit_discard = f2fs_block_unit_discard(sbi);
|
||||
bool no_nat_bits = !test_opt(sbi, NAT_BITS);
|
||||
#ifdef CONFIG_QUOTA
|
||||
int i, j;
|
||||
#endif
|
||||
@ -2329,6 +2347,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
|
||||
org_mount_opt = sbi->mount_opt;
|
||||
old_sb_flags = sb->s_flags;
|
||||
|
||||
sbi->umount_lock_holder = current;
|
||||
|
||||
#ifdef CONFIG_QUOTA
|
||||
org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
|
||||
for (i = 0; i < MAXQUOTAS; i++) {
|
||||
@ -2359,7 +2379,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
|
||||
default_options(sbi, true);
|
||||
|
||||
/* parse mount options */
|
||||
err = parse_options(sb, data, true);
|
||||
err = parse_options(sbi, data, true);
|
||||
if (err)
|
||||
goto restore_opts;
|
||||
|
||||
@ -2374,6 +2394,10 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
|
||||
}
|
||||
#endif
|
||||
|
||||
err = f2fs_default_check(sbi);
|
||||
if (err)
|
||||
goto restore_opts;
|
||||
|
||||
/* flush outstanding errors before changing fs state */
|
||||
flush_work(&sbi->s_error_work);
|
||||
|
||||
@ -2444,6 +2468,12 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
|
||||
goto restore_opts;
|
||||
}
|
||||
|
||||
if (no_nat_bits == !!test_opt(sbi, NAT_BITS)) {
|
||||
err = -EINVAL;
|
||||
f2fs_warn(sbi, "switch nat_bits option is not allowed");
|
||||
goto restore_opts;
|
||||
}
|
||||
|
||||
if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
|
||||
err = -EINVAL;
|
||||
f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
|
||||
@ -2552,6 +2582,8 @@ skip:
|
||||
|
||||
limit_reserve_root(sbi);
|
||||
*flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
|
||||
|
||||
sbi->umount_lock_holder = NULL;
|
||||
return 0;
|
||||
restore_checkpoint:
|
||||
if (need_enable_checkpoint) {
|
||||
@ -2592,6 +2624,8 @@ restore_opts:
|
||||
#endif
|
||||
sbi->mount_opt = org_mount_opt;
|
||||
sb->s_flags = old_sb_flags;
|
||||
|
||||
sbi->umount_lock_holder = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -2908,7 +2942,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int f2fs_quota_sync(struct super_block *sb, int type)
|
||||
int f2fs_do_quota_sync(struct super_block *sb, int type)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_SB(sb);
|
||||
struct quota_info *dqopt = sb_dqopt(sb);
|
||||
@ -2956,11 +2990,21 @@ int f2fs_quota_sync(struct super_block *sb, int type)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int f2fs_quota_sync(struct super_block *sb, int type)
|
||||
{
|
||||
int ret;
|
||||
|
||||
F2FS_SB(sb)->umount_lock_holder = current;
|
||||
ret = f2fs_do_quota_sync(sb, type);
|
||||
F2FS_SB(sb)->umount_lock_holder = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
|
||||
const struct path *path)
|
||||
{
|
||||
struct inode *inode;
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
/* if quota sysfile exists, deny enabling quota with specific file */
|
||||
if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
|
||||
@ -2971,31 +3015,34 @@ static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
|
||||
if (path->dentry->d_sb != sb)
|
||||
return -EXDEV;
|
||||
|
||||
err = f2fs_quota_sync(sb, type);
|
||||
F2FS_SB(sb)->umount_lock_holder = current;
|
||||
|
||||
err = f2fs_do_quota_sync(sb, type);
|
||||
if (err)
|
||||
return err;
|
||||
goto out;
|
||||
|
||||
inode = d_inode(path->dentry);
|
||||
|
||||
err = filemap_fdatawrite(inode->i_mapping);
|
||||
if (err)
|
||||
return err;
|
||||
goto out;
|
||||
|
||||
err = filemap_fdatawait(inode->i_mapping);
|
||||
if (err)
|
||||
return err;
|
||||
goto out;
|
||||
|
||||
err = dquot_quota_on(sb, type, format_id, path);
|
||||
if (err)
|
||||
return err;
|
||||
goto out;
|
||||
|
||||
inode_lock(inode);
|
||||
F2FS_I(inode)->i_flags |= F2FS_QUOTA_DEFAULT_FL;
|
||||
f2fs_set_inode_flags(inode);
|
||||
inode_unlock(inode);
|
||||
f2fs_mark_inode_dirty_sync(inode, false);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
F2FS_SB(sb)->umount_lock_holder = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __f2fs_quota_off(struct super_block *sb, int type)
|
||||
@ -3006,7 +3053,7 @@ static int __f2fs_quota_off(struct super_block *sb, int type)
|
||||
if (!inode || !igrab(inode))
|
||||
return dquot_quota_off(sb, type);
|
||||
|
||||
err = f2fs_quota_sync(sb, type);
|
||||
err = f2fs_do_quota_sync(sb, type);
|
||||
if (err)
|
||||
goto out_put;
|
||||
|
||||
@ -3029,6 +3076,8 @@ static int f2fs_quota_off(struct super_block *sb, int type)
|
||||
struct f2fs_sb_info *sbi = F2FS_SB(sb);
|
||||
int err;
|
||||
|
||||
F2FS_SB(sb)->umount_lock_holder = current;
|
||||
|
||||
err = __f2fs_quota_off(sb, type);
|
||||
|
||||
/*
|
||||
@ -3038,6 +3087,9 @@ static int f2fs_quota_off(struct super_block *sb, int type)
|
||||
*/
|
||||
if (is_journalled_quota(sbi))
|
||||
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
|
||||
|
||||
F2FS_SB(sb)->umount_lock_holder = NULL;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -3170,7 +3222,7 @@ int f2fs_dquot_initialize(struct inode *inode)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int f2fs_quota_sync(struct super_block *sb, int type)
|
||||
int f2fs_do_quota_sync(struct super_block *sb, int type)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@ -4220,6 +4272,8 @@ void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason)
|
||||
|
||||
if (shutdown)
|
||||
set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
|
||||
else
|
||||
dump_stack();
|
||||
|
||||
/*
|
||||
* Continue filesystem operators if errors=continue. Should not set
|
||||
@ -4495,7 +4549,11 @@ try_onemore:
|
||||
goto free_sb_buf;
|
||||
}
|
||||
|
||||
err = parse_options(sb, options, false);
|
||||
err = parse_options(sbi, options, false);
|
||||
if (err)
|
||||
goto free_options;
|
||||
|
||||
err = f2fs_default_check(sbi);
|
||||
if (err)
|
||||
goto free_options;
|
||||
|
||||
@ -4533,6 +4591,14 @@ try_onemore:
|
||||
sb->s_time_gran = 1;
|
||||
sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
|
||||
(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
|
||||
if (test_opt(sbi, INLINECRYPT))
|
||||
sb->s_flags |= SB_INLINECRYPT;
|
||||
|
||||
if (test_opt(sbi, LAZYTIME))
|
||||
sb->s_flags |= SB_LAZYTIME;
|
||||
else
|
||||
sb->s_flags &= ~SB_LAZYTIME;
|
||||
|
||||
super_set_uuid(sb, (void *) raw_super->uuid, sizeof(raw_super->uuid));
|
||||
super_set_sysfs_name_bdev(sb);
|
||||
sb->s_iflags |= SB_I_CGROUPWB;
|
||||
@ -4703,6 +4769,7 @@ try_onemore:
|
||||
if (err)
|
||||
goto free_compress_inode;
|
||||
|
||||
sbi->umount_lock_holder = current;
|
||||
#ifdef CONFIG_QUOTA
|
||||
/* Enable quota usage during mount */
|
||||
if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
|
||||
@ -4718,8 +4785,10 @@ try_onemore:
|
||||
if (err)
|
||||
goto free_meta;
|
||||
|
||||
if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
|
||||
if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG))) {
|
||||
skip_recovery = true;
|
||||
goto reset_checkpoint;
|
||||
}
|
||||
|
||||
/* recover fsynced data */
|
||||
if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
|
||||
@ -4769,10 +4838,10 @@ try_onemore:
|
||||
}
|
||||
}
|
||||
|
||||
reset_checkpoint:
|
||||
#ifdef CONFIG_QUOTA
|
||||
f2fs_recover_quota_end(sbi, quota_enabled);
|
||||
#endif
|
||||
reset_checkpoint:
|
||||
/*
|
||||
* If the f2fs is not readonly and fsync data recovery succeeds,
|
||||
* write pointer consistency of cursegs and other zones are already
|
||||
@ -4829,6 +4898,8 @@ reset_checkpoint:
|
||||
f2fs_update_time(sbi, CP_TIME);
|
||||
f2fs_update_time(sbi, REQ_TIME);
|
||||
clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
|
||||
|
||||
sbi->umount_lock_holder = NULL;
|
||||
return 0;
|
||||
|
||||
sync_free_meta:
|
||||
@ -4931,6 +5002,8 @@ static void kill_f2fs_super(struct super_block *sb)
|
||||
struct f2fs_sb_info *sbi = F2FS_SB(sb);
|
||||
|
||||
if (sb->s_root) {
|
||||
sbi->umount_lock_holder = current;
|
||||
|
||||
set_sbi_flag(sbi, SBI_IS_CLOSE);
|
||||
f2fs_stop_gc_thread(sbi);
|
||||
f2fs_stop_discard_thread(sbi);
|
||||
|
139
fs/f2fs/sysfs.c
139
fs/f2fs/sysfs.c
@ -61,6 +61,12 @@ struct f2fs_attr {
|
||||
int id;
|
||||
};
|
||||
|
||||
struct f2fs_base_attr {
|
||||
struct attribute attr;
|
||||
ssize_t (*show)(struct f2fs_base_attr *a, char *buf);
|
||||
ssize_t (*store)(struct f2fs_base_attr *a, const char *buf, size_t len);
|
||||
};
|
||||
|
||||
static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
|
||||
struct f2fs_sb_info *sbi, char *buf);
|
||||
|
||||
@ -862,6 +868,25 @@ static void f2fs_sb_release(struct kobject *kobj)
|
||||
complete(&sbi->s_kobj_unregister);
|
||||
}
|
||||
|
||||
static ssize_t f2fs_base_attr_show(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
{
|
||||
struct f2fs_base_attr *a = container_of(attr,
|
||||
struct f2fs_base_attr, attr);
|
||||
|
||||
return a->show ? a->show(a, buf) : 0;
|
||||
}
|
||||
|
||||
static ssize_t f2fs_base_attr_store(struct kobject *kobj,
|
||||
struct attribute *attr,
|
||||
const char *buf, size_t len)
|
||||
{
|
||||
struct f2fs_base_attr *a = container_of(attr,
|
||||
struct f2fs_base_attr, attr);
|
||||
|
||||
return a->store ? a->store(a, buf, len) : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note that there are three feature list entries:
|
||||
* 1) /sys/fs/f2fs/features
|
||||
@ -880,18 +905,50 @@ static void f2fs_sb_release(struct kobject *kobj)
|
||||
* please add new on-disk feature in this list only.
|
||||
* - ref. F2FS_SB_FEATURE_RO_ATTR()
|
||||
*/
|
||||
static ssize_t f2fs_feature_show(struct f2fs_attr *a,
|
||||
struct f2fs_sb_info *sbi, char *buf)
|
||||
static ssize_t f2fs_feature_show(struct f2fs_base_attr *a, char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "supported\n");
|
||||
}
|
||||
|
||||
#define F2FS_FEATURE_RO_ATTR(_name) \
|
||||
static struct f2fs_attr f2fs_attr_##_name = { \
|
||||
static struct f2fs_base_attr f2fs_base_attr_##_name = { \
|
||||
.attr = {.name = __stringify(_name), .mode = 0444 }, \
|
||||
.show = f2fs_feature_show, \
|
||||
}
|
||||
|
||||
static ssize_t f2fs_tune_show(struct f2fs_base_attr *a, char *buf)
|
||||
{
|
||||
unsigned int res = 0;
|
||||
|
||||
if (!strcmp(a->attr.name, "reclaim_caches_kb"))
|
||||
res = f2fs_donate_files();
|
||||
|
||||
return sysfs_emit(buf, "%u\n", res);
|
||||
}
|
||||
|
||||
static ssize_t f2fs_tune_store(struct f2fs_base_attr *a,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
unsigned long t;
|
||||
int ret;
|
||||
|
||||
ret = kstrtoul(skip_spaces(buf), 0, &t);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!strcmp(a->attr.name, "reclaim_caches_kb"))
|
||||
f2fs_reclaim_caches(t);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
#define F2FS_TUNE_RW_ATTR(_name) \
|
||||
static struct f2fs_base_attr f2fs_base_attr_##_name = { \
|
||||
.attr = {.name = __stringify(_name), .mode = 0644 }, \
|
||||
.show = f2fs_tune_show, \
|
||||
.store = f2fs_tune_store, \
|
||||
}
|
||||
|
||||
static ssize_t f2fs_sb_feature_show(struct f2fs_attr *a,
|
||||
struct f2fs_sb_info *sbi, char *buf)
|
||||
{
|
||||
@ -1065,6 +1122,7 @@ F2FS_SBI_GENERAL_RW_ATTR(max_read_extent_count);
|
||||
F2FS_SBI_GENERAL_RO_ATTR(unusable_blocks_per_sec);
|
||||
F2FS_SBI_GENERAL_RW_ATTR(blkzone_alloc_policy);
|
||||
#endif
|
||||
F2FS_SBI_GENERAL_RW_ATTR(carve_out);
|
||||
|
||||
/* STAT_INFO ATTR */
|
||||
#ifdef CONFIG_F2FS_STAT_FS
|
||||
@ -1252,41 +1310,43 @@ static struct attribute *f2fs_attrs[] = {
|
||||
ATTR_LIST(warm_data_age_threshold),
|
||||
ATTR_LIST(last_age_weight),
|
||||
ATTR_LIST(max_read_extent_count),
|
||||
ATTR_LIST(carve_out),
|
||||
NULL,
|
||||
};
|
||||
ATTRIBUTE_GROUPS(f2fs);
|
||||
|
||||
#define BASE_ATTR_LIST(name) (&f2fs_base_attr_##name.attr)
|
||||
static struct attribute *f2fs_feat_attrs[] = {
|
||||
#ifdef CONFIG_FS_ENCRYPTION
|
||||
ATTR_LIST(encryption),
|
||||
ATTR_LIST(test_dummy_encryption_v2),
|
||||
BASE_ATTR_LIST(encryption),
|
||||
BASE_ATTR_LIST(test_dummy_encryption_v2),
|
||||
#if IS_ENABLED(CONFIG_UNICODE)
|
||||
ATTR_LIST(encrypted_casefold),
|
||||
BASE_ATTR_LIST(encrypted_casefold),
|
||||
#endif
|
||||
#endif /* CONFIG_FS_ENCRYPTION */
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
ATTR_LIST(block_zoned),
|
||||
BASE_ATTR_LIST(block_zoned),
|
||||
#endif
|
||||
ATTR_LIST(atomic_write),
|
||||
ATTR_LIST(extra_attr),
|
||||
ATTR_LIST(project_quota),
|
||||
ATTR_LIST(inode_checksum),
|
||||
ATTR_LIST(flexible_inline_xattr),
|
||||
ATTR_LIST(quota_ino),
|
||||
ATTR_LIST(inode_crtime),
|
||||
ATTR_LIST(lost_found),
|
||||
BASE_ATTR_LIST(atomic_write),
|
||||
BASE_ATTR_LIST(extra_attr),
|
||||
BASE_ATTR_LIST(project_quota),
|
||||
BASE_ATTR_LIST(inode_checksum),
|
||||
BASE_ATTR_LIST(flexible_inline_xattr),
|
||||
BASE_ATTR_LIST(quota_ino),
|
||||
BASE_ATTR_LIST(inode_crtime),
|
||||
BASE_ATTR_LIST(lost_found),
|
||||
#ifdef CONFIG_FS_VERITY
|
||||
ATTR_LIST(verity),
|
||||
BASE_ATTR_LIST(verity),
|
||||
#endif
|
||||
ATTR_LIST(sb_checksum),
|
||||
BASE_ATTR_LIST(sb_checksum),
|
||||
#if IS_ENABLED(CONFIG_UNICODE)
|
||||
ATTR_LIST(casefold),
|
||||
BASE_ATTR_LIST(casefold),
|
||||
#endif
|
||||
ATTR_LIST(readonly),
|
||||
BASE_ATTR_LIST(readonly),
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
ATTR_LIST(compression),
|
||||
BASE_ATTR_LIST(compression),
|
||||
#endif
|
||||
ATTR_LIST(pin_file),
|
||||
BASE_ATTR_LIST(pin_file),
|
||||
NULL,
|
||||
};
|
||||
ATTRIBUTE_GROUPS(f2fs_feat);
|
||||
@ -1343,6 +1403,14 @@ static struct attribute *f2fs_sb_feat_attrs[] = {
|
||||
};
|
||||
ATTRIBUTE_GROUPS(f2fs_sb_feat);
|
||||
|
||||
F2FS_TUNE_RW_ATTR(reclaim_caches_kb);
|
||||
|
||||
static struct attribute *f2fs_tune_attrs[] = {
|
||||
BASE_ATTR_LIST(reclaim_caches_kb),
|
||||
NULL,
|
||||
};
|
||||
ATTRIBUTE_GROUPS(f2fs_tune);
|
||||
|
||||
static const struct sysfs_ops f2fs_attr_ops = {
|
||||
.show = f2fs_attr_show,
|
||||
.store = f2fs_attr_store,
|
||||
@ -1362,15 +1430,34 @@ static struct kset f2fs_kset = {
|
||||
.kobj = {.ktype = &f2fs_ktype},
|
||||
};
|
||||
|
||||
static const struct sysfs_ops f2fs_feat_attr_ops = {
|
||||
.show = f2fs_base_attr_show,
|
||||
.store = f2fs_base_attr_store,
|
||||
};
|
||||
|
||||
static const struct kobj_type f2fs_feat_ktype = {
|
||||
.default_groups = f2fs_feat_groups,
|
||||
.sysfs_ops = &f2fs_attr_ops,
|
||||
.sysfs_ops = &f2fs_feat_attr_ops,
|
||||
};
|
||||
|
||||
static struct kobject f2fs_feat = {
|
||||
.kset = &f2fs_kset,
|
||||
};
|
||||
|
||||
static const struct sysfs_ops f2fs_tune_attr_ops = {
|
||||
.show = f2fs_base_attr_show,
|
||||
.store = f2fs_base_attr_store,
|
||||
};
|
||||
|
||||
static const struct kobj_type f2fs_tune_ktype = {
|
||||
.default_groups = f2fs_tune_groups,
|
||||
.sysfs_ops = &f2fs_tune_attr_ops,
|
||||
};
|
||||
|
||||
static struct kobject f2fs_tune = {
|
||||
.kset = &f2fs_kset,
|
||||
};
|
||||
|
||||
static ssize_t f2fs_stat_attr_show(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
{
|
||||
@ -1607,6 +1694,11 @@ int __init f2fs_init_sysfs(void)
|
||||
if (ret)
|
||||
goto put_kobject;
|
||||
|
||||
ret = kobject_init_and_add(&f2fs_tune, &f2fs_tune_ktype,
|
||||
NULL, "tuning");
|
||||
if (ret)
|
||||
goto put_kobject;
|
||||
|
||||
f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
|
||||
if (!f2fs_proc_root) {
|
||||
ret = -ENOMEM;
|
||||
@ -1614,7 +1706,9 @@ int __init f2fs_init_sysfs(void)
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
put_kobject:
|
||||
kobject_put(&f2fs_tune);
|
||||
kobject_put(&f2fs_feat);
|
||||
kset_unregister(&f2fs_kset);
|
||||
return ret;
|
||||
@ -1622,6 +1716,7 @@ put_kobject:
|
||||
|
||||
void f2fs_exit_sysfs(void)
|
||||
{
|
||||
kobject_put(&f2fs_tune);
|
||||
kobject_put(&f2fs_feat);
|
||||
kset_unregister(&f2fs_kset);
|
||||
remove_proc_entry("fs/f2fs", NULL);
|
||||
|
@ -282,7 +282,7 @@ static int read_inline_xattr(struct inode *inode, struct page *ipage,
|
||||
if (ipage) {
|
||||
inline_addr = inline_xattr_addr(inode, ipage);
|
||||
} else {
|
||||
page = f2fs_get_node_page(sbi, inode->i_ino);
|
||||
page = f2fs_get_inode_page(sbi, inode->i_ino);
|
||||
if (IS_ERR(page))
|
||||
return PTR_ERR(page);
|
||||
|
||||
@ -303,7 +303,7 @@ static int read_xattr_block(struct inode *inode, void *txattr_addr)
|
||||
void *xattr_addr;
|
||||
|
||||
/* The inode already has an extended attribute block. */
|
||||
xpage = f2fs_get_node_page(sbi, xnid);
|
||||
xpage = f2fs_get_xnode_page(sbi, xnid);
|
||||
if (IS_ERR(xpage))
|
||||
return PTR_ERR(xpage);
|
||||
|
||||
@ -449,7 +449,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
|
||||
if (ipage) {
|
||||
inline_addr = inline_xattr_addr(inode, ipage);
|
||||
} else {
|
||||
in_page = f2fs_get_node_page(sbi, inode->i_ino);
|
||||
in_page = f2fs_get_inode_page(sbi, inode->i_ino);
|
||||
if (IS_ERR(in_page)) {
|
||||
f2fs_alloc_nid_failed(sbi, new_nid);
|
||||
return PTR_ERR(in_page);
|
||||
@ -475,7 +475,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
|
||||
|
||||
/* write to xattr node block */
|
||||
if (F2FS_I(inode)->i_xattr_nid) {
|
||||
xpage = f2fs_get_node_page(sbi, F2FS_I(inode)->i_xattr_nid);
|
||||
xpage = f2fs_get_xnode_page(sbi, F2FS_I(inode)->i_xattr_nid);
|
||||
if (IS_ERR(xpage)) {
|
||||
err = PTR_ERR(xpage);
|
||||
f2fs_alloc_nid_failed(sbi, new_nid);
|
||||
|
@ -990,9 +990,6 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
|
||||
unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
|
||||
pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch);
|
||||
|
||||
struct page *grab_cache_page_write_begin(struct address_space *mapping,
|
||||
pgoff_t index);
|
||||
|
||||
/*
|
||||
* Returns locked page at given index in given cache, creating it if needed.
|
||||
*/
|
||||
@ -1247,18 +1244,12 @@ static inline int folio_wait_locked_killable(struct folio *folio)
|
||||
return folio_wait_bit_killable(folio, PG_locked);
|
||||
}
|
||||
|
||||
static inline void wait_on_page_locked(struct page *page)
|
||||
{
|
||||
folio_wait_locked(page_folio(page));
|
||||
}
|
||||
|
||||
void folio_end_read(struct folio *folio, bool success);
|
||||
void wait_on_page_writeback(struct page *page);
|
||||
void folio_wait_writeback(struct folio *folio);
|
||||
int folio_wait_writeback_killable(struct folio *folio);
|
||||
void end_page_writeback(struct page *page);
|
||||
void folio_end_writeback(struct folio *folio);
|
||||
void wait_for_stable_page(struct page *page);
|
||||
void folio_wait_stable(struct folio *folio);
|
||||
void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn);
|
||||
void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb);
|
||||
|
@ -44,6 +44,7 @@
|
||||
#define F2FS_IOC_COMPRESS_FILE _IO(F2FS_IOCTL_MAGIC, 24)
|
||||
#define F2FS_IOC_START_ATOMIC_REPLACE _IO(F2FS_IOCTL_MAGIC, 25)
|
||||
#define F2FS_IOC_GET_DEV_ALIAS_FILE _IOR(F2FS_IOCTL_MAGIC, 26, __u32)
|
||||
#define F2FS_IOC_IO_PRIO _IOW(F2FS_IOCTL_MAGIC, 27, __u32)
|
||||
|
||||
/*
|
||||
* should be same as XFS_IOC_GOINGDOWN.
|
||||
@ -63,6 +64,12 @@
|
||||
#define F2FS_TRIM_FILE_ZEROOUT 0x2 /* zero out */
|
||||
#define F2FS_TRIM_FILE_MASK 0x3
|
||||
|
||||
/* for F2FS_IOC_IO_PRIO */
|
||||
enum {
|
||||
F2FS_IOPRIO_WRITE = 1, /* high write priority */
|
||||
F2FS_IOPRIO_MAX,
|
||||
};
|
||||
|
||||
struct f2fs_gc_range {
|
||||
__u32 sync;
|
||||
__u64 start;
|
||||
|
@ -1390,7 +1390,7 @@ repeat:
|
||||
* @ptl: already locked ptl. This function will drop the lock.
|
||||
*
|
||||
* Wait for a migration entry referencing the given page to be removed. This is
|
||||
* equivalent to put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE) except
|
||||
* equivalent to folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE) except
|
||||
* this can be called without taking a reference on the page. Instead this
|
||||
* should be called while holding the ptl for the migration entry referencing
|
||||
* the page.
|
||||
|
@ -28,12 +28,6 @@ void wait_on_page_writeback(struct page *page)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(wait_on_page_writeback);
|
||||
|
||||
void wait_for_stable_page(struct page *page)
|
||||
{
|
||||
return folio_wait_stable(page_folio(page));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(wait_for_stable_page);
|
||||
|
||||
void mark_page_accessed(struct page *page)
|
||||
{
|
||||
folio_mark_accessed(page_folio(page));
|
||||
@ -90,11 +84,3 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
|
||||
return folio_file_page(folio, index);
|
||||
}
|
||||
EXPORT_SYMBOL(pagecache_get_page);
|
||||
|
||||
struct page *grab_cache_page_write_begin(struct address_space *mapping,
|
||||
pgoff_t index)
|
||||
{
|
||||
return pagecache_get_page(mapping, index, FGP_WRITEBEGIN,
|
||||
mapping_gfp_mask(mapping));
|
||||
}
|
||||
EXPORT_SYMBOL(grab_cache_page_write_begin);
|
||||
|
Loading…
x
Reference in New Issue
Block a user