// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/inline.c
* Copyright (c) 2013, Intel Corporation
* Authors: Huajun Li <huajun.li@intel.com>
* Haicheng Li <haicheng.li@intel.com>
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/fiemap.h>
#include "f2fs.h"
#include "node.h"
#include <trace/events/f2fs.h>
static bool support_inline_data(struct inode *inode)
{
if (f2fs_used_in_atomic_write(inode))
return false;
if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))
return false;
if (i_size_read(inode) > MAX_INLINE_DATA(inode))
return false;
return true;
}
bool f2fs_may_inline_data(struct inode *inode)
{
if (!support_inline_data(inode))
return false;
return !f2fs_post_read_required(inode);
}
static bool inode_has_blocks(struct inode *inode, struct folio *ifolio)
{
struct f2fs_inode *ri = F2FS_INODE(ifolio);
int i;
if (F2FS_HAS_BLOCKS(inode))
return true;
for (i = 0; i < DEF_NIDS_PER_INODE; i++) {
if (ri->i_nid[i])
return true;
}
return false;
}
bool f2fs_sanity_check_inline_data(struct inode *inode, struct folio *ifolio)
{
if (!f2fs_has_inline_data(inode))
return false;
if (inode_has_blocks(inode, ifolio))
return false;
if (!support_inline_data(inode))
return true;
/*
* used by sanity_check_inode(), when disk layout fields has not
* been synchronized to inmem fields.
*/
return (S_ISREG(inode->i_mode) &&
(file_is_encrypt(inode) || file_is_verity(inode) ||
(F2FS_I(inode)->i_flags & F2FS_COMPR_FL)));
}
bool f2fs_may_inline_dentry(struct inode *inode)
{
if (!test_opt(F2FS_I_SB(inode), INLINE_DENTRY))
return false;
if (!S_ISDIR(inode->i_mode))
return false;
return true;
}
void f2fs_do_read_inline_data(struct folio *folio, struct folio *ifolio)
{
struct inode *inode = folio->mapping->host;
if (folio_test_uptodate(folio))
return;
f2fs_bug_on(F2FS_I_SB(inode), folio->index);
folio_zero_segment(folio, MAX_INLINE_DATA(inode), folio_size(folio));
/* Copy the whole inline data block */
memcpy_to_folio(folio, 0, inline_data_addr(inode, ifolio),
MAX_INLINE_DATA(inode));
if (!folio_test_uptodate(folio))
folio_mark_uptodate(folio);
}
void f2fs_truncate_inline_inode(struct inode *inode, struct folio *ifolio,
u64 from)
{
void *addr;
if (from >= MAX_INLINE_DATA(inode))
return;
addr = inline_data_addr(inode, ifolio);
f2fs_folio_wait_writeback(ifolio, NODE, true, true);
memset(addr + from, 0, MAX_INLINE_DATA(inode) - from);
folio_mark_dirty(ifolio);
if (from == 0)
clear_inode_flag(inode, FI_DATA_EXIST);
}
int f2fs_read_inline_data(struct inode *inode, struct folio *folio)
{
struct folio *ifolio;
ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino);
if (IS_ERR(ifolio)) {
folio_unlock(folio);
return PTR_ERR(ifolio);
}
if (!f2fs_has_inline_data(inode)) {
f2fs_folio_put(ifolio, true);
return -EAGAIN;
}
if (folio->index)
folio_zero_segment(folio, 0, folio_size(folio));
else
f2fs_do_read_inline_data(folio, ifolio);
if (!folio_test_uptodate(folio))
folio_mark_uptodate(folio);
f2fs_folio_put(ifolio, true);
folio_unlock(folio);
return 0;
}
int f2fs_convert_inline_folio(struct dnode_of_data *dn, struct folio *folio)
{
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(dn->inode),
.ino = dn->inode->i_ino,
.type = DATA,
.op = REQ_OP_WRITE,
.op_flags = REQ_SYNC | REQ_PRIO,
.folio = folio,
.encrypted_page = NULL,
.io_type = FS_DATA_IO,
};
struct node_info ni;
int dirty, err;
if (!f2fs_exist_data(dn->inode))
goto clear_out;
err = f2fs_reserve_block(dn, 0);
if (err)
return err;
err = f2fs_get_node_info(fio.sbi, dn->nid, &ni, false);
if (err) {
f2fs_truncate_data_blocks_range(dn, 1);
f2fs_put_dnode(dn);
return err;
}
fio.version = ni.version;
if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
f2fs_put_dnode(dn);
set_sbi_flag(fio.sbi, SBI_NEED_FSCK