1 // SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/inode.c
5 * Copyright (C) 2017-2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
15 #include <trace/events/erofs.h>
18 static int read_inode(struct inode *inode, void *data)
20 struct erofs_vnode *vi = EROFS_V(inode);
21 struct erofs_inode_v1 *v1 = data;
22 const unsigned int advise = le16_to_cpu(v1->i_advise);
24 vi->data_mapping_mode = __inode_data_mapping(advise);
26 if (unlikely(vi->data_mapping_mode >= EROFS_INODE_LAYOUT_MAX)) {
27 errln("unknown data mapping mode %u of nid %llu",
28 vi->data_mapping_mode, vi->nid);
33 if (__inode_version(advise) == EROFS_INODE_LAYOUT_V2) {
34 struct erofs_inode_v2 *v2 = data;
36 vi->inode_isize = sizeof(struct erofs_inode_v2);
37 vi->xattr_isize = ondisk_xattr_ibody_size(v2->i_xattr_icount);
39 inode->i_mode = le16_to_cpu(v2->i_mode);
40 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
41 S_ISLNK(inode->i_mode)) {
42 vi->raw_blkaddr = le32_to_cpu(v2->i_u.raw_blkaddr);
43 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
45 new_decode_dev(le32_to_cpu(v2->i_u.rdev));
46 } else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
52 i_uid_write(inode, le32_to_cpu(v2->i_uid));
53 i_gid_write(inode, le32_to_cpu(v2->i_gid));
54 set_nlink(inode, le32_to_cpu(v2->i_nlink));
57 inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
58 le64_to_cpu(v2->i_ctime);
59 inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
60 le32_to_cpu(v2->i_ctime_nsec);
62 inode->i_size = le64_to_cpu(v2->i_size);
63 } else if (__inode_version(advise) == EROFS_INODE_LAYOUT_V1) {
64 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
66 vi->inode_isize = sizeof(struct erofs_inode_v1);
67 vi->xattr_isize = ondisk_xattr_ibody_size(v1->i_xattr_icount);
69 inode->i_mode = le16_to_cpu(v1->i_mode);
70 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
71 S_ISLNK(inode->i_mode)) {
72 vi->raw_blkaddr = le32_to_cpu(v1->i_u.raw_blkaddr);
73 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
75 new_decode_dev(le32_to_cpu(v1->i_u.rdev));
76 } else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
82 i_uid_write(inode, le16_to_cpu(v1->i_uid));
83 i_gid_write(inode, le16_to_cpu(v1->i_gid));
84 set_nlink(inode, le16_to_cpu(v1->i_nlink));
86 /* use build time to derive all file time */
87 inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
89 inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
92 inode->i_size = le32_to_cpu(v1->i_size);
94 errln("unsupported on-disk inode version %u of nid %llu",
95 __inode_version(advise), vi->nid);
100 /* measure inode.i_blocks as the generic filesystem */
101 inode->i_blocks = ((inode->i_size - 1) >> 9) + 1;
106 * try_lock can be required since locking order is:
107 * file data(fs_inode)
109 * but the majority of the callers is "iget",
110 * in that case we are pretty sure no deadlock since
111 * no data operations exist. However I tend to
112 * try_lock since it takes no much overhead and
113 * will success immediately.
115 static int fill_inline_data(struct inode *inode, void *data,
118 struct erofs_vnode *vi = EROFS_V(inode);
119 struct erofs_sb_info *sbi = EROFS_I_SB(inode);
120 int mode = vi->data_mapping_mode;
122 DBG_BUGON(mode >= EROFS_INODE_LAYOUT_MAX);
124 /* should be inode inline C */
125 if (mode != EROFS_INODE_LAYOUT_INLINE)
128 /* fast symlink (following ext4) */
129 if (S_ISLNK(inode->i_mode) && inode->i_size < PAGE_SIZE) {
130 char *lnk = erofs_kmalloc(sbi, inode->i_size + 1, GFP_KERNEL);
132 if (unlikely(lnk == NULL))
135 m_pofs += vi->inode_isize + vi->xattr_isize;
137 /* inline symlink data shouldn't across page boundary as well */
138 if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) {
144 /* get in-page inline data */
145 memcpy(lnk, data + m_pofs, inode->i_size);
146 lnk[inode->i_size] = '\0';
149 set_inode_fast_symlink(inode);
154 static int fill_inode(struct inode *inode, int isdir)
156 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
157 struct erofs_vnode *vi = EROFS_V(inode);
164 trace_erofs_fill_inode(inode, isdir);
166 blkaddr = erofs_blknr(iloc(sbi, vi->nid));
167 ofs = erofs_blkoff(iloc(sbi, vi->nid));
169 debugln("%s, reading inode nid %llu at %u of blkaddr %u",
170 __func__, vi->nid, ofs, blkaddr);
172 page = erofs_get_meta_page(inode->i_sb, blkaddr, isdir);
175 errln("failed to get inode (nid: %llu) page, err %ld",
176 vi->nid, PTR_ERR(page));
177 return PTR_ERR(page);
180 DBG_BUGON(!PageUptodate(page));
181 data = page_address(page);
183 err = read_inode(inode, data + ofs);
185 /* setup the new inode */
186 if (S_ISREG(inode->i_mode)) {
187 #ifdef CONFIG_EROFS_FS_XATTR
189 inode->i_op = &erofs_generic_xattr_iops;
191 inode->i_fop = &generic_ro_fops;
192 } else if (S_ISDIR(inode->i_mode)) {
194 #ifdef CONFIG_EROFS_FS_XATTR
195 vi->xattr_isize ? &erofs_dir_xattr_iops :
198 inode->i_fop = &erofs_dir_fops;
199 } else if (S_ISLNK(inode->i_mode)) {
200 /* by default, page_get_link is used for symlink */
202 #ifdef CONFIG_EROFS_FS_XATTR
203 &erofs_symlink_xattr_iops,
205 &page_symlink_inode_operations;
207 inode_nohighmem(inode);
208 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
209 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
210 #ifdef CONFIG_EROFS_FS_XATTR
211 inode->i_op = &erofs_special_inode_operations;
213 init_special_inode(inode, inode->i_mode, inode->i_rdev);
219 if (is_inode_layout_compression(inode)) {
220 #ifdef CONFIG_EROFS_FS_ZIP
221 inode->i_mapping->a_ops =
222 &z_erofs_vle_normalaccess_aops;
229 inode->i_mapping->a_ops = &erofs_raw_access_aops;
231 /* fill last page if inline data is available */
232 fill_inline_data(inode, data, ofs);
242 * erofs nid is 64bits, but i_ino is 'unsigned long', therefore
243 * we should do more for 32-bit platform to find the right inode.
245 #if BITS_PER_LONG == 32
246 static int erofs_ilookup_test_actor(struct inode *inode, void *opaque)
248 const erofs_nid_t nid = *(erofs_nid_t *)opaque;
250 return EROFS_V(inode)->nid == nid;
253 static int erofs_iget_set_actor(struct inode *inode, void *opaque)
255 const erofs_nid_t nid = *(erofs_nid_t *)opaque;
257 inode->i_ino = erofs_inode_hash(nid);
262 static inline struct inode *erofs_iget_locked(struct super_block *sb,
265 const unsigned long hashval = erofs_inode_hash(nid);
267 #if BITS_PER_LONG >= 64
268 /* it is safe to use iget_locked for >= 64-bit platform */
269 return iget_locked(sb, hashval);
271 return iget5_locked(sb, hashval, erofs_ilookup_test_actor,
272 erofs_iget_set_actor, &nid);
276 struct inode *erofs_iget(struct super_block *sb,
277 erofs_nid_t nid, bool isdir)
279 struct inode *inode = erofs_iget_locked(sb, nid);
281 if (unlikely(inode == NULL))
282 return ERR_PTR(-ENOMEM);
284 if (inode->i_state & I_NEW) {
286 struct erofs_vnode *vi = EROFS_V(inode);
289 err = fill_inode(inode, isdir);
291 unlock_new_inode(inode);
294 inode = ERR_PTR(err);
300 #ifdef CONFIG_EROFS_FS_XATTR
301 const struct inode_operations erofs_generic_xattr_iops = {
302 .listxattr = erofs_listxattr,
305 const struct inode_operations erofs_symlink_xattr_iops = {
306 .get_link = page_get_link,
307 .listxattr = erofs_listxattr,
310 const struct inode_operations erofs_special_inode_operations = {
311 .listxattr = erofs_listxattr,
314 const struct inode_operations erofs_fast_symlink_xattr_iops = {
315 .get_link = simple_get_link,
316 .listxattr = erofs_listxattr,