Merge tag 'mm-nonmm-stable-2024-05-19-11-56' of git://git.kernel.org/pub/scm/linux...
[sfrench/cifs-2.6.git] / fs / hfsplus / inode.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/hfsplus/inode.c
4  *
5  * Copyright (C) 2001
6  * Brad Boyer (flar@allandria.com)
7  * (C) 2003 Ardis Technologies <roman@ardistech.com>
8  *
9  * Inode handling routines
10  */
11
12 #include <linux/blkdev.h>
13 #include <linux/mm.h>
14 #include <linux/fs.h>
15 #include <linux/pagemap.h>
16 #include <linux/mpage.h>
17 #include <linux/sched.h>
18 #include <linux/cred.h>
19 #include <linux/uio.h>
20 #include <linux/fileattr.h>
21
22 #include "hfsplus_fs.h"
23 #include "hfsplus_raw.h"
24 #include "xattr.h"
25
26 static int hfsplus_read_folio(struct file *file, struct folio *folio)
27 {
28         return block_read_full_folio(folio, hfsplus_get_block);
29 }
30
31 static void hfsplus_write_failed(struct address_space *mapping, loff_t to)
32 {
33         struct inode *inode = mapping->host;
34
35         if (to > inode->i_size) {
36                 truncate_pagecache(inode, inode->i_size);
37                 hfsplus_file_truncate(inode);
38         }
39 }
40
41 int hfsplus_write_begin(struct file *file, struct address_space *mapping,
42                 loff_t pos, unsigned len, struct page **pagep, void **fsdata)
43 {
44         int ret;
45
46         *pagep = NULL;
47         ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
48                                 hfsplus_get_block,
49                                 &HFSPLUS_I(mapping->host)->phys_size);
50         if (unlikely(ret))
51                 hfsplus_write_failed(mapping, pos + len);
52
53         return ret;
54 }
55
56 static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
57 {
58         return generic_block_bmap(mapping, block, hfsplus_get_block);
59 }
60
61 static bool hfsplus_release_folio(struct folio *folio, gfp_t mask)
62 {
63         struct inode *inode = folio->mapping->host;
64         struct super_block *sb = inode->i_sb;
65         struct hfs_btree *tree;
66         struct hfs_bnode *node;
67         u32 nidx;
68         int i;
69         bool res = true;
70
71         switch (inode->i_ino) {
72         case HFSPLUS_EXT_CNID:
73                 tree = HFSPLUS_SB(sb)->ext_tree;
74                 break;
75         case HFSPLUS_CAT_CNID:
76                 tree = HFSPLUS_SB(sb)->cat_tree;
77                 break;
78         case HFSPLUS_ATTR_CNID:
79                 tree = HFSPLUS_SB(sb)->attr_tree;
80                 break;
81         default:
82                 BUG();
83                 return false;
84         }
85         if (!tree)
86                 return false;
87         if (tree->node_size >= PAGE_SIZE) {
88                 nidx = folio->index >>
89                         (tree->node_size_shift - PAGE_SHIFT);
90                 spin_lock(&tree->hash_lock);
91                 node = hfs_bnode_findhash(tree, nidx);
92                 if (!node)
93                         ;
94                 else if (atomic_read(&node->refcnt))
95                         res = false;
96                 if (res && node) {
97                         hfs_bnode_unhash(node);
98                         hfs_bnode_free(node);
99                 }
100                 spin_unlock(&tree->hash_lock);
101         } else {
102                 nidx = folio->index <<
103                         (PAGE_SHIFT - tree->node_size_shift);
104                 i = 1 << (PAGE_SHIFT - tree->node_size_shift);
105                 spin_lock(&tree->hash_lock);
106                 do {
107                         node = hfs_bnode_findhash(tree, nidx++);
108                         if (!node)
109                                 continue;
110                         if (atomic_read(&node->refcnt)) {
111                                 res = false;
112                                 break;
113                         }
114                         hfs_bnode_unhash(node);
115                         hfs_bnode_free(node);
116                 } while (--i && nidx < tree->node_count);
117                 spin_unlock(&tree->hash_lock);
118         }
119         return res ? try_to_free_buffers(folio) : false;
120 }
121
122 static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
123 {
124         struct file *file = iocb->ki_filp;
125         struct address_space *mapping = file->f_mapping;
126         struct inode *inode = mapping->host;
127         size_t count = iov_iter_count(iter);
128         ssize_t ret;
129
130         ret = blockdev_direct_IO(iocb, inode, iter, hfsplus_get_block);
131
132         /*
133          * In case of error extending write may have instantiated a few
134          * blocks outside i_size. Trim these off again.
135          */
136         if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
137                 loff_t isize = i_size_read(inode);
138                 loff_t end = iocb->ki_pos + count;
139
140                 if (end > isize)
141                         hfsplus_write_failed(mapping, end);
142         }
143
144         return ret;
145 }
146
147 static int hfsplus_writepages(struct address_space *mapping,
148                               struct writeback_control *wbc)
149 {
150         return mpage_writepages(mapping, wbc, hfsplus_get_block);
151 }
152
153 const struct address_space_operations hfsplus_btree_aops = {
154         .dirty_folio    = block_dirty_folio,
155         .invalidate_folio = block_invalidate_folio,
156         .read_folio     = hfsplus_read_folio,
157         .writepages     = hfsplus_writepages,
158         .write_begin    = hfsplus_write_begin,
159         .write_end      = generic_write_end,
160         .migrate_folio  = buffer_migrate_folio,
161         .bmap           = hfsplus_bmap,
162         .release_folio  = hfsplus_release_folio,
163 };
164
165 const struct address_space_operations hfsplus_aops = {
166         .dirty_folio    = block_dirty_folio,
167         .invalidate_folio = block_invalidate_folio,
168         .read_folio     = hfsplus_read_folio,
169         .write_begin    = hfsplus_write_begin,
170         .write_end      = generic_write_end,
171         .bmap           = hfsplus_bmap,
172         .direct_IO      = hfsplus_direct_IO,
173         .writepages     = hfsplus_writepages,
174         .migrate_folio  = buffer_migrate_folio,
175 };
176
177 const struct dentry_operations hfsplus_dentry_operations = {
178         .d_hash       = hfsplus_hash_dentry,
179         .d_compare    = hfsplus_compare_dentry,
180 };
181
182 static void hfsplus_get_perms(struct inode *inode,
183                 struct hfsplus_perm *perms, int dir)
184 {
185         struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
186         u16 mode;
187
188         mode = be16_to_cpu(perms->mode);
189
190         i_uid_write(inode, be32_to_cpu(perms->owner));
191         if ((test_bit(HFSPLUS_SB_UID, &sbi->flags)) || (!i_uid_read(inode) && !mode))
192                 inode->i_uid = sbi->uid;
193
194         i_gid_write(inode, be32_to_cpu(perms->group));
195         if ((test_bit(HFSPLUS_SB_GID, &sbi->flags)) || (!i_gid_read(inode) && !mode))
196                 inode->i_gid = sbi->gid;
197
198         if (dir) {
199                 mode = mode ? (mode & S_IALLUGO) : (S_IRWXUGO & ~(sbi->umask));
200                 mode |= S_IFDIR;
201         } else if (!mode)
202                 mode = S_IFREG | ((S_IRUGO|S_IWUGO) & ~(sbi->umask));
203         inode->i_mode = mode;
204
205         HFSPLUS_I(inode)->userflags = perms->userflags;
206         if (perms->rootflags & HFSPLUS_FLG_IMMUTABLE)
207                 inode->i_flags |= S_IMMUTABLE;
208         else
209                 inode->i_flags &= ~S_IMMUTABLE;
210         if (perms->rootflags & HFSPLUS_FLG_APPEND)
211                 inode->i_flags |= S_APPEND;
212         else
213                 inode->i_flags &= ~S_APPEND;
214 }
215
216 static int hfsplus_file_open(struct inode *inode, struct file *file)
217 {
218         if (HFSPLUS_IS_RSRC(inode))
219                 inode = HFSPLUS_I(inode)->rsrc_inode;
220         if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
221                 return -EOVERFLOW;
222         atomic_inc(&HFSPLUS_I(inode)->opencnt);
223         return 0;
224 }
225
226 static int hfsplus_file_release(struct inode *inode, struct file *file)
227 {
228         struct super_block *sb = inode->i_sb;
229
230         if (HFSPLUS_IS_RSRC(inode))
231                 inode = HFSPLUS_I(inode)->rsrc_inode;
232         if (atomic_dec_and_test(&HFSPLUS_I(inode)->opencnt)) {
233                 inode_lock(inode);
234                 hfsplus_file_truncate(inode);
235                 if (inode->i_flags & S_DEAD) {
236                         hfsplus_delete_cat(inode->i_ino,
237                                            HFSPLUS_SB(sb)->hidden_dir, NULL);
238                         hfsplus_delete_inode(inode);
239                 }
240                 inode_unlock(inode);
241         }
242         return 0;
243 }
244
245 static int hfsplus_setattr(struct mnt_idmap *idmap,
246                            struct dentry *dentry, struct iattr *attr)
247 {
248         struct inode *inode = d_inode(dentry);
249         int error;
250
251         error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
252         if (error)
253                 return error;
254
255         if ((attr->ia_valid & ATTR_SIZE) &&
256             attr->ia_size != i_size_read(inode)) {
257                 inode_dio_wait(inode);
258                 if (attr->ia_size > inode->i_size) {
259                         error = generic_cont_expand_simple(inode,
260                                                            attr->ia_size);
261                         if (error)
262                                 return error;
263                 }
264                 truncate_setsize(inode, attr->ia_size);
265                 hfsplus_file_truncate(inode);
266                 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
267         }
268
269         setattr_copy(&nop_mnt_idmap, inode, attr);
270         mark_inode_dirty(inode);
271
272         return 0;
273 }
274
275 int hfsplus_getattr(struct mnt_idmap *idmap, const struct path *path,
276                     struct kstat *stat, u32 request_mask,
277                     unsigned int query_flags)
278 {
279         struct inode *inode = d_inode(path->dentry);
280         struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
281
282         if (request_mask & STATX_BTIME) {
283                 stat->result_mask |= STATX_BTIME;
284                 stat->btime = hfsp_mt2ut(hip->create_date);
285         }
286
287         if (inode->i_flags & S_APPEND)
288                 stat->attributes |= STATX_ATTR_APPEND;
289         if (inode->i_flags & S_IMMUTABLE)
290                 stat->attributes |= STATX_ATTR_IMMUTABLE;
291         if (hip->userflags & HFSPLUS_FLG_NODUMP)
292                 stat->attributes |= STATX_ATTR_NODUMP;
293
294         stat->attributes_mask |= STATX_ATTR_APPEND | STATX_ATTR_IMMUTABLE |
295                                  STATX_ATTR_NODUMP;
296
297         generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
298         return 0;
299 }
300
301 int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
302                        int datasync)
303 {
304         struct inode *inode = file->f_mapping->host;
305         struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
306         struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
307         int error = 0, error2;
308
309         error = file_write_and_wait_range(file, start, end);
310         if (error)
311                 return error;
312         inode_lock(inode);
313
314         /*
315          * Sync inode metadata into the catalog and extent trees.
316          */
317         sync_inode_metadata(inode, 1);
318
319         /*
320          * And explicitly write out the btrees.
321          */
322         if (test_and_clear_bit(HFSPLUS_I_CAT_DIRTY, &hip->flags))
323                 error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping);
324
325         if (test_and_clear_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags)) {
326                 error2 =
327                         filemap_write_and_wait(sbi->ext_tree->inode->i_mapping);
328                 if (!error)
329                         error = error2;
330         }
331
332         if (test_and_clear_bit(HFSPLUS_I_ATTR_DIRTY, &hip->flags)) {
333                 if (sbi->attr_tree) {
334                         error2 =
335                                 filemap_write_and_wait(
336                                             sbi->attr_tree->inode->i_mapping);
337                         if (!error)
338                                 error = error2;
339                 } else {
340                         pr_err("sync non-existent attributes tree\n");
341                 }
342         }
343
344         if (test_and_clear_bit(HFSPLUS_I_ALLOC_DIRTY, &hip->flags)) {
345                 error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping);
346                 if (!error)
347                         error = error2;
348         }
349
350         if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
351                 blkdev_issue_flush(inode->i_sb->s_bdev);
352
353         inode_unlock(inode);
354
355         return error;
356 }
357
358 static const struct inode_operations hfsplus_file_inode_operations = {
359         .setattr        = hfsplus_setattr,
360         .getattr        = hfsplus_getattr,
361         .listxattr      = hfsplus_listxattr,
362         .fileattr_get   = hfsplus_fileattr_get,
363         .fileattr_set   = hfsplus_fileattr_set,
364 };
365
366 static const struct file_operations hfsplus_file_operations = {
367         .llseek         = generic_file_llseek,
368         .read_iter      = generic_file_read_iter,
369         .write_iter     = generic_file_write_iter,
370         .mmap           = generic_file_mmap,
371         .splice_read    = filemap_splice_read,
372         .fsync          = hfsplus_file_fsync,
373         .open           = hfsplus_file_open,
374         .release        = hfsplus_file_release,
375         .unlocked_ioctl = hfsplus_ioctl,
376 };
377
378 struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir,
379                                 umode_t mode)
380 {
381         struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
382         struct inode *inode = new_inode(sb);
383         struct hfsplus_inode_info *hip;
384
385         if (!inode)
386                 return NULL;
387
388         inode->i_ino = sbi->next_cnid++;
389         inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
390         set_nlink(inode, 1);
391         simple_inode_init_ts(inode);
392
393         hip = HFSPLUS_I(inode);
394         INIT_LIST_HEAD(&hip->open_dir_list);
395         spin_lock_init(&hip->open_dir_lock);
396         mutex_init(&hip->extents_lock);
397         atomic_set(&hip->opencnt, 0);
398         hip->extent_state = 0;
399         hip->flags = 0;
400         hip->userflags = 0;
401         hip->subfolders = 0;
402         memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec));
403         memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
404         hip->alloc_blocks = 0;
405         hip->first_blocks = 0;
406         hip->cached_start = 0;
407         hip->cached_blocks = 0;
408         hip->phys_size = 0;
409         hip->fs_blocks = 0;
410         hip->rsrc_inode = NULL;
411         if (S_ISDIR(inode->i_mode)) {
412                 inode->i_size = 2;
413                 sbi->folder_count++;
414                 inode->i_op = &hfsplus_dir_inode_operations;
415                 inode->i_fop = &hfsplus_dir_operations;
416         } else if (S_ISREG(inode->i_mode)) {
417                 sbi->file_count++;
418                 inode->i_op = &hfsplus_file_inode_operations;
419                 inode->i_fop = &hfsplus_file_operations;
420                 inode->i_mapping->a_ops = &hfsplus_aops;
421                 hip->clump_blocks = sbi->data_clump_blocks;
422         } else if (S_ISLNK(inode->i_mode)) {
423                 sbi->file_count++;
424                 inode->i_op = &page_symlink_inode_operations;
425                 inode_nohighmem(inode);
426                 inode->i_mapping->a_ops = &hfsplus_aops;
427                 hip->clump_blocks = 1;
428         } else
429                 sbi->file_count++;
430         insert_inode_hash(inode);
431         mark_inode_dirty(inode);
432         hfsplus_mark_mdb_dirty(sb);
433
434         return inode;
435 }
436
437 void hfsplus_delete_inode(struct inode *inode)
438 {
439         struct super_block *sb = inode->i_sb;
440
441         if (S_ISDIR(inode->i_mode)) {
442                 HFSPLUS_SB(sb)->folder_count--;
443                 hfsplus_mark_mdb_dirty(sb);
444                 return;
445         }
446         HFSPLUS_SB(sb)->file_count--;
447         if (S_ISREG(inode->i_mode)) {
448                 if (!inode->i_nlink) {
449                         inode->i_size = 0;
450                         hfsplus_file_truncate(inode);
451                 }
452         } else if (S_ISLNK(inode->i_mode)) {
453                 inode->i_size = 0;
454                 hfsplus_file_truncate(inode);
455         }
456         hfsplus_mark_mdb_dirty(sb);
457 }
458
459 void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
460 {
461         struct super_block *sb = inode->i_sb;
462         struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
463         struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
464         u32 count;
465         int i;
466
467         memcpy(&hip->first_extents, &fork->extents, sizeof(hfsplus_extent_rec));
468         for (count = 0, i = 0; i < 8; i++)
469                 count += be32_to_cpu(fork->extents[i].block_count);
470         hip->first_blocks = count;
471         memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
472         hip->cached_start = 0;
473         hip->cached_blocks = 0;
474
475         hip->alloc_blocks = be32_to_cpu(fork->total_blocks);
476         hip->phys_size = inode->i_size = be64_to_cpu(fork->total_size);
477         hip->fs_blocks =
478                 (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
479         inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits);
480         hip->clump_blocks =
481                 be32_to_cpu(fork->clump_size) >> sbi->alloc_blksz_shift;
482         if (!hip->clump_blocks) {
483                 hip->clump_blocks = HFSPLUS_IS_RSRC(inode) ?
484                         sbi->rsrc_clump_blocks :
485                         sbi->data_clump_blocks;
486         }
487 }
488
489 void hfsplus_inode_write_fork(struct inode *inode,
490                 struct hfsplus_fork_raw *fork)
491 {
492         memcpy(&fork->extents, &HFSPLUS_I(inode)->first_extents,
493                sizeof(hfsplus_extent_rec));
494         fork->total_size = cpu_to_be64(inode->i_size);
495         fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode)->alloc_blocks);
496 }
497
498 int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
499 {
500         hfsplus_cat_entry entry;
501         int res = 0;
502         u16 type;
503
504         type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
505
506         HFSPLUS_I(inode)->linkid = 0;
507         if (type == HFSPLUS_FOLDER) {
508                 struct hfsplus_cat_folder *folder = &entry.folder;
509
510                 if (fd->entrylength < sizeof(struct hfsplus_cat_folder)) {
511                         pr_err("bad catalog folder entry\n");
512                         res = -EIO;
513                         goto out;
514                 }
515                 hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
516                                         sizeof(struct hfsplus_cat_folder));
517                 hfsplus_get_perms(inode, &folder->permissions, 1);
518                 set_nlink(inode, 1);
519                 inode->i_size = 2 + be32_to_cpu(folder->valence);
520                 inode_set_atime_to_ts(inode, hfsp_mt2ut(folder->access_date));
521                 inode_set_mtime_to_ts(inode,
522                                       hfsp_mt2ut(folder->content_mod_date));
523                 inode_set_ctime_to_ts(inode,
524                                       hfsp_mt2ut(folder->attribute_mod_date));
525                 HFSPLUS_I(inode)->create_date = folder->create_date;
526                 HFSPLUS_I(inode)->fs_blocks = 0;
527                 if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
528                         HFSPLUS_I(inode)->subfolders =
529                                 be32_to_cpu(folder->subfolders);
530                 }
531                 inode->i_op = &hfsplus_dir_inode_operations;
532                 inode->i_fop = &hfsplus_dir_operations;
533         } else if (type == HFSPLUS_FILE) {
534                 struct hfsplus_cat_file *file = &entry.file;
535
536                 if (fd->entrylength < sizeof(struct hfsplus_cat_file)) {
537                         pr_err("bad catalog file entry\n");
538                         res = -EIO;
539                         goto out;
540                 }
541                 hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
542                                         sizeof(struct hfsplus_cat_file));
543
544                 hfsplus_inode_read_fork(inode, HFSPLUS_IS_RSRC(inode) ?
545                                         &file->rsrc_fork : &file->data_fork);
546                 hfsplus_get_perms(inode, &file->permissions, 0);
547                 set_nlink(inode, 1);
548                 if (S_ISREG(inode->i_mode)) {
549                         if (file->permissions.dev)
550                                 set_nlink(inode,
551                                           be32_to_cpu(file->permissions.dev));
552                         inode->i_op = &hfsplus_file_inode_operations;
553                         inode->i_fop = &hfsplus_file_operations;
554                         inode->i_mapping->a_ops = &hfsplus_aops;
555                 } else if (S_ISLNK(inode->i_mode)) {
556                         inode->i_op = &page_symlink_inode_operations;
557                         inode_nohighmem(inode);
558                         inode->i_mapping->a_ops = &hfsplus_aops;
559                 } else {
560                         init_special_inode(inode, inode->i_mode,
561                                            be32_to_cpu(file->permissions.dev));
562                 }
563                 inode_set_atime_to_ts(inode, hfsp_mt2ut(file->access_date));
564                 inode_set_mtime_to_ts(inode,
565                                       hfsp_mt2ut(file->content_mod_date));
566                 inode_set_ctime_to_ts(inode,
567                                       hfsp_mt2ut(file->attribute_mod_date));
568                 HFSPLUS_I(inode)->create_date = file->create_date;
569         } else {
570                 pr_err("bad catalog entry used to create inode\n");
571                 res = -EIO;
572         }
573 out:
574         return res;
575 }
576
577 int hfsplus_cat_write_inode(struct inode *inode)
578 {
579         struct inode *main_inode = inode;
580         struct hfs_find_data fd;
581         hfsplus_cat_entry entry;
582         int res = 0;
583
584         if (HFSPLUS_IS_RSRC(inode))
585                 main_inode = HFSPLUS_I(inode)->rsrc_inode;
586
587         if (!main_inode->i_nlink)
588                 return 0;
589
590         if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb)->cat_tree, &fd))
591                 /* panic? */
592                 return -EIO;
593
594         if (hfsplus_find_cat(main_inode->i_sb, main_inode->i_ino, &fd))
595                 /* panic? */
596                 goto out;
597
598         if (S_ISDIR(main_inode->i_mode)) {
599                 struct hfsplus_cat_folder *folder = &entry.folder;
600
601                 if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) {
602                         pr_err("bad catalog folder entry\n");
603                         res = -EIO;
604                         goto out;
605                 }
606                 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
607                                         sizeof(struct hfsplus_cat_folder));
608                 /* simple node checks? */
609                 hfsplus_cat_set_perms(inode, &folder->permissions);
610                 folder->access_date = hfsp_ut2mt(inode_get_atime(inode));
611                 folder->content_mod_date = hfsp_ut2mt(inode_get_mtime(inode));
612                 folder->attribute_mod_date = hfsp_ut2mt(inode_get_ctime(inode));
613                 folder->valence = cpu_to_be32(inode->i_size - 2);
614                 if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
615                         folder->subfolders =
616                                 cpu_to_be32(HFSPLUS_I(inode)->subfolders);
617                 }
618                 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
619                                          sizeof(struct hfsplus_cat_folder));
620         } else if (HFSPLUS_IS_RSRC(inode)) {
621                 struct hfsplus_cat_file *file = &entry.file;
622                 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
623                                sizeof(struct hfsplus_cat_file));
624                 hfsplus_inode_write_fork(inode, &file->rsrc_fork);
625                 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
626                                 sizeof(struct hfsplus_cat_file));
627         } else {
628                 struct hfsplus_cat_file *file = &entry.file;
629
630                 if (fd.entrylength < sizeof(struct hfsplus_cat_file)) {
631                         pr_err("bad catalog file entry\n");
632                         res = -EIO;
633                         goto out;
634                 }
635                 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
636                                         sizeof(struct hfsplus_cat_file));
637                 hfsplus_inode_write_fork(inode, &file->data_fork);
638                 hfsplus_cat_set_perms(inode, &file->permissions);
639                 if (HFSPLUS_FLG_IMMUTABLE &
640                                 (file->permissions.rootflags |
641                                         file->permissions.userflags))
642                         file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
643                 else
644                         file->flags &= cpu_to_be16(~HFSPLUS_FILE_LOCKED);
645                 file->access_date = hfsp_ut2mt(inode_get_atime(inode));
646                 file->content_mod_date = hfsp_ut2mt(inode_get_mtime(inode));
647                 file->attribute_mod_date = hfsp_ut2mt(inode_get_ctime(inode));
648                 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
649                                          sizeof(struct hfsplus_cat_file));
650         }
651
652         set_bit(HFSPLUS_I_CAT_DIRTY, &HFSPLUS_I(inode)->flags);
653 out:
654         hfs_find_exit(&fd);
655         return res;
656 }
657
658 int hfsplus_fileattr_get(struct dentry *dentry, struct fileattr *fa)
659 {
660         struct inode *inode = d_inode(dentry);
661         struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
662         unsigned int flags = 0;
663
664         if (inode->i_flags & S_IMMUTABLE)
665                 flags |= FS_IMMUTABLE_FL;
666         if (inode->i_flags & S_APPEND)
667                 flags |= FS_APPEND_FL;
668         if (hip->userflags & HFSPLUS_FLG_NODUMP)
669                 flags |= FS_NODUMP_FL;
670
671         fileattr_fill_flags(fa, flags);
672
673         return 0;
674 }
675
676 int hfsplus_fileattr_set(struct mnt_idmap *idmap,
677                          struct dentry *dentry, struct fileattr *fa)
678 {
679         struct inode *inode = d_inode(dentry);
680         struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
681         unsigned int new_fl = 0;
682
683         if (fileattr_has_fsx(fa))
684                 return -EOPNOTSUPP;
685
686         /* don't silently ignore unsupported ext2 flags */
687         if (fa->flags & ~(FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NODUMP_FL))
688                 return -EOPNOTSUPP;
689
690         if (fa->flags & FS_IMMUTABLE_FL)
691                 new_fl |= S_IMMUTABLE;
692
693         if (fa->flags & FS_APPEND_FL)
694                 new_fl |= S_APPEND;
695
696         inode_set_flags(inode, new_fl, S_IMMUTABLE | S_APPEND);
697
698         if (fa->flags & FS_NODUMP_FL)
699                 hip->userflags |= HFSPLUS_FLG_NODUMP;
700         else
701                 hip->userflags &= ~HFSPLUS_FLG_NODUMP;
702
703         inode_set_ctime_current(inode);
704         mark_inode_dirty(inode);
705
706         return 0;
707 }