]> git.samba.org - sfrench/cifs-2.6.git/blob - fs/f2fs/compress.c
fix short copy handling in copy_mc_pipe_to_iter()
[sfrench/cifs-2.6.git] / fs / f2fs / compress.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * f2fs compress support
4  *
5  * Copyright (c) 2019 Chao Yu <chao@kernel.org>
6  */
7
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/moduleparam.h>
11 #include <linux/writeback.h>
12 #include <linux/backing-dev.h>
13 #include <linux/lzo.h>
14 #include <linux/lz4.h>
15 #include <linux/zstd.h>
16 #include <linux/pagevec.h>
17
18 #include "f2fs.h"
19 #include "node.h"
20 #include "segment.h"
21 #include <trace/events/f2fs.h>
22
23 static struct kmem_cache *cic_entry_slab;
24 static struct kmem_cache *dic_entry_slab;
25
26 static void *page_array_alloc(struct inode *inode, int nr)
27 {
28         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
29         unsigned int size = sizeof(struct page *) * nr;
30
31         if (likely(size <= sbi->page_array_slab_size))
32                 return f2fs_kmem_cache_alloc(sbi->page_array_slab,
33                                         GFP_F2FS_ZERO, false, F2FS_I_SB(inode));
34         return f2fs_kzalloc(sbi, size, GFP_NOFS);
35 }
36
37 static void page_array_free(struct inode *inode, void *pages, int nr)
38 {
39         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
40         unsigned int size = sizeof(struct page *) * nr;
41
42         if (!pages)
43                 return;
44
45         if (likely(size <= sbi->page_array_slab_size))
46                 kmem_cache_free(sbi->page_array_slab, pages);
47         else
48                 kfree(pages);
49 }
50
51 struct f2fs_compress_ops {
52         int (*init_compress_ctx)(struct compress_ctx *cc);
53         void (*destroy_compress_ctx)(struct compress_ctx *cc);
54         int (*compress_pages)(struct compress_ctx *cc);
55         int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
56         void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
57         int (*decompress_pages)(struct decompress_io_ctx *dic);
58 };
59
60 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
61 {
62         return index & (cc->cluster_size - 1);
63 }
64
65 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
66 {
67         return index >> cc->log_cluster_size;
68 }
69
70 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
71 {
72         return cc->cluster_idx << cc->log_cluster_size;
73 }
74
75 bool f2fs_is_compressed_page(struct page *page)
76 {
77         if (!PagePrivate(page))
78                 return false;
79         if (!page_private(page))
80                 return false;
81         if (page_private_nonpointer(page))
82                 return false;
83
84         f2fs_bug_on(F2FS_M_SB(page->mapping),
85                 *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
86         return true;
87 }
88
89 static void f2fs_set_compressed_page(struct page *page,
90                 struct inode *inode, pgoff_t index, void *data)
91 {
92         attach_page_private(page, (void *)data);
93
94         /* i_crypto_info and iv index */
95         page->index = index;
96         page->mapping = inode->i_mapping;
97 }
98
99 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
100 {
101         int i;
102
103         for (i = 0; i < len; i++) {
104                 if (!cc->rpages[i])
105                         continue;
106                 if (unlock)
107                         unlock_page(cc->rpages[i]);
108                 else
109                         put_page(cc->rpages[i]);
110         }
111 }
112
113 static void f2fs_put_rpages(struct compress_ctx *cc)
114 {
115         f2fs_drop_rpages(cc, cc->cluster_size, false);
116 }
117
118 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
119 {
120         f2fs_drop_rpages(cc, len, true);
121 }
122
123 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
124                 struct writeback_control *wbc, bool redirty, int unlock)
125 {
126         unsigned int i;
127
128         for (i = 0; i < cc->cluster_size; i++) {
129                 if (!cc->rpages[i])
130                         continue;
131                 if (redirty)
132                         redirty_page_for_writepage(wbc, cc->rpages[i]);
133                 f2fs_put_page(cc->rpages[i], unlock);
134         }
135 }
136
137 struct page *f2fs_compress_control_page(struct page *page)
138 {
139         return ((struct compress_io_ctx *)page_private(page))->rpages[0];
140 }
141
142 int f2fs_init_compress_ctx(struct compress_ctx *cc)
143 {
144         if (cc->rpages)
145                 return 0;
146
147         cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
148         return cc->rpages ? 0 : -ENOMEM;
149 }
150
151 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
152 {
153         page_array_free(cc->inode, cc->rpages, cc->cluster_size);
154         cc->rpages = NULL;
155         cc->nr_rpages = 0;
156         cc->nr_cpages = 0;
157         cc->valid_nr_cpages = 0;
158         if (!reuse)
159                 cc->cluster_idx = NULL_CLUSTER;
160 }
161
162 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
163 {
164         unsigned int cluster_ofs;
165
166         if (!f2fs_cluster_can_merge_page(cc, page->index))
167                 f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
168
169         cluster_ofs = offset_in_cluster(cc, page->index);
170         cc->rpages[cluster_ofs] = page;
171         cc->nr_rpages++;
172         cc->cluster_idx = cluster_idx(cc, page->index);
173 }
174
175 #ifdef CONFIG_F2FS_FS_LZO
176 static int lzo_init_compress_ctx(struct compress_ctx *cc)
177 {
178         cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
179                                 LZO1X_MEM_COMPRESS, GFP_NOFS);
180         if (!cc->private)
181                 return -ENOMEM;
182
183         cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
184         return 0;
185 }
186
187 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
188 {
189         kvfree(cc->private);
190         cc->private = NULL;
191 }
192
193 static int lzo_compress_pages(struct compress_ctx *cc)
194 {
195         int ret;
196
197         ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
198                                         &cc->clen, cc->private);
199         if (ret != LZO_E_OK) {
200                 printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
201                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
202                 return -EIO;
203         }
204         return 0;
205 }
206
207 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
208 {
209         int ret;
210
211         ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
212                                                 dic->rbuf, &dic->rlen);
213         if (ret != LZO_E_OK) {
214                 printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
215                                 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
216                 return -EIO;
217         }
218
219         if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
220                 printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
221                                         "expected:%lu\n", KERN_ERR,
222                                         F2FS_I_SB(dic->inode)->sb->s_id,
223                                         dic->rlen,
224                                         PAGE_SIZE << dic->log_cluster_size);
225                 return -EIO;
226         }
227         return 0;
228 }
229
230 static const struct f2fs_compress_ops f2fs_lzo_ops = {
231         .init_compress_ctx      = lzo_init_compress_ctx,
232         .destroy_compress_ctx   = lzo_destroy_compress_ctx,
233         .compress_pages         = lzo_compress_pages,
234         .decompress_pages       = lzo_decompress_pages,
235 };
236 #endif
237
238 #ifdef CONFIG_F2FS_FS_LZ4
239 static int lz4_init_compress_ctx(struct compress_ctx *cc)
240 {
241         unsigned int size = LZ4_MEM_COMPRESS;
242
243 #ifdef CONFIG_F2FS_FS_LZ4HC
244         if (F2FS_I(cc->inode)->i_compress_flag >> COMPRESS_LEVEL_OFFSET)
245                 size = LZ4HC_MEM_COMPRESS;
246 #endif
247
248         cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
249         if (!cc->private)
250                 return -ENOMEM;
251
252         /*
253          * we do not change cc->clen to LZ4_compressBound(inputsize) to
254          * adapt worst compress case, because lz4 compressor can handle
255          * output budget properly.
256          */
257         cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
258         return 0;
259 }
260
261 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
262 {
263         kvfree(cc->private);
264         cc->private = NULL;
265 }
266
267 #ifdef CONFIG_F2FS_FS_LZ4HC
268 static int lz4hc_compress_pages(struct compress_ctx *cc)
269 {
270         unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
271                                                 COMPRESS_LEVEL_OFFSET;
272         int len;
273
274         if (level)
275                 len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
276                                         cc->clen, level, cc->private);
277         else
278                 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
279                                                 cc->clen, cc->private);
280         if (!len)
281                 return -EAGAIN;
282
283         cc->clen = len;
284         return 0;
285 }
286 #endif
287
288 static int lz4_compress_pages(struct compress_ctx *cc)
289 {
290         int len;
291
292 #ifdef CONFIG_F2FS_FS_LZ4HC
293         return lz4hc_compress_pages(cc);
294 #endif
295         len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
296                                                 cc->clen, cc->private);
297         if (!len)
298                 return -EAGAIN;
299
300         cc->clen = len;
301         return 0;
302 }
303
304 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
305 {
306         int ret;
307
308         ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
309                                                 dic->clen, dic->rlen);
310         if (ret < 0) {
311                 printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
312                                 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
313                 return -EIO;
314         }
315
316         if (ret != PAGE_SIZE << dic->log_cluster_size) {
317                 printk_ratelimited("%sF2FS-fs (%s): lz4 invalid ret:%d, "
318                                         "expected:%lu\n", KERN_ERR,
319                                         F2FS_I_SB(dic->inode)->sb->s_id, ret,
320                                         PAGE_SIZE << dic->log_cluster_size);
321                 return -EIO;
322         }
323         return 0;
324 }
325
326 static const struct f2fs_compress_ops f2fs_lz4_ops = {
327         .init_compress_ctx      = lz4_init_compress_ctx,
328         .destroy_compress_ctx   = lz4_destroy_compress_ctx,
329         .compress_pages         = lz4_compress_pages,
330         .decompress_pages       = lz4_decompress_pages,
331 };
332 #endif
333
334 #ifdef CONFIG_F2FS_FS_ZSTD
335 #define F2FS_ZSTD_DEFAULT_CLEVEL        1
336
337 static int zstd_init_compress_ctx(struct compress_ctx *cc)
338 {
339         zstd_parameters params;
340         zstd_cstream *stream;
341         void *workspace;
342         unsigned int workspace_size;
343         unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
344                                                 COMPRESS_LEVEL_OFFSET;
345
346         if (!level)
347                 level = F2FS_ZSTD_DEFAULT_CLEVEL;
348
349         params = zstd_get_params(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen);
350         workspace_size = zstd_cstream_workspace_bound(&params.cParams);
351
352         workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
353                                         workspace_size, GFP_NOFS);
354         if (!workspace)
355                 return -ENOMEM;
356
357         stream = zstd_init_cstream(&params, 0, workspace, workspace_size);
358         if (!stream) {
359                 printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_cstream failed\n",
360                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
361                                 __func__);
362                 kvfree(workspace);
363                 return -EIO;
364         }
365
366         cc->private = workspace;
367         cc->private2 = stream;
368
369         cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
370         return 0;
371 }
372
373 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
374 {
375         kvfree(cc->private);
376         cc->private = NULL;
377         cc->private2 = NULL;
378 }
379
380 static int zstd_compress_pages(struct compress_ctx *cc)
381 {
382         zstd_cstream *stream = cc->private2;
383         zstd_in_buffer inbuf;
384         zstd_out_buffer outbuf;
385         int src_size = cc->rlen;
386         int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
387         int ret;
388
389         inbuf.pos = 0;
390         inbuf.src = cc->rbuf;
391         inbuf.size = src_size;
392
393         outbuf.pos = 0;
394         outbuf.dst = cc->cbuf->cdata;
395         outbuf.size = dst_size;
396
397         ret = zstd_compress_stream(stream, &outbuf, &inbuf);
398         if (zstd_is_error(ret)) {
399                 printk_ratelimited("%sF2FS-fs (%s): %s zstd_compress_stream failed, ret: %d\n",
400                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
401                                 __func__, zstd_get_error_code(ret));
402                 return -EIO;
403         }
404
405         ret = zstd_end_stream(stream, &outbuf);
406         if (zstd_is_error(ret)) {
407                 printk_ratelimited("%sF2FS-fs (%s): %s zstd_end_stream returned %d\n",
408                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
409                                 __func__, zstd_get_error_code(ret));
410                 return -EIO;
411         }
412
413         /*
414          * there is compressed data remained in intermediate buffer due to
415          * no more space in cbuf.cdata
416          */
417         if (ret)
418                 return -EAGAIN;
419
420         cc->clen = outbuf.pos;
421         return 0;
422 }
423
424 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
425 {
426         zstd_dstream *stream;
427         void *workspace;
428         unsigned int workspace_size;
429         unsigned int max_window_size =
430                         MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
431
432         workspace_size = zstd_dstream_workspace_bound(max_window_size);
433
434         workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
435                                         workspace_size, GFP_NOFS);
436         if (!workspace)
437                 return -ENOMEM;
438
439         stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
440         if (!stream) {
441                 printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_dstream failed\n",
442                                 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
443                                 __func__);
444                 kvfree(workspace);
445                 return -EIO;
446         }
447
448         dic->private = workspace;
449         dic->private2 = stream;
450
451         return 0;
452 }
453
454 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
455 {
456         kvfree(dic->private);
457         dic->private = NULL;
458         dic->private2 = NULL;
459 }
460
461 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
462 {
463         zstd_dstream *stream = dic->private2;
464         zstd_in_buffer inbuf;
465         zstd_out_buffer outbuf;
466         int ret;
467
468         inbuf.pos = 0;
469         inbuf.src = dic->cbuf->cdata;
470         inbuf.size = dic->clen;
471
472         outbuf.pos = 0;
473         outbuf.dst = dic->rbuf;
474         outbuf.size = dic->rlen;
475
476         ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
477         if (zstd_is_error(ret)) {
478                 printk_ratelimited("%sF2FS-fs (%s): %s zstd_decompress_stream failed, ret: %d\n",
479                                 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
480                                 __func__, zstd_get_error_code(ret));
481                 return -EIO;
482         }
483
484         if (dic->rlen != outbuf.pos) {
485                 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
486                                 "expected:%lu\n", KERN_ERR,
487                                 F2FS_I_SB(dic->inode)->sb->s_id,
488                                 __func__, dic->rlen,
489                                 PAGE_SIZE << dic->log_cluster_size);
490                 return -EIO;
491         }
492
493         return 0;
494 }
495
496 static const struct f2fs_compress_ops f2fs_zstd_ops = {
497         .init_compress_ctx      = zstd_init_compress_ctx,
498         .destroy_compress_ctx   = zstd_destroy_compress_ctx,
499         .compress_pages         = zstd_compress_pages,
500         .init_decompress_ctx    = zstd_init_decompress_ctx,
501         .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
502         .decompress_pages       = zstd_decompress_pages,
503 };
504 #endif
505
506 #ifdef CONFIG_F2FS_FS_LZO
507 #ifdef CONFIG_F2FS_FS_LZORLE
508 static int lzorle_compress_pages(struct compress_ctx *cc)
509 {
510         int ret;
511
512         ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
513                                         &cc->clen, cc->private);
514         if (ret != LZO_E_OK) {
515                 printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
516                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
517                 return -EIO;
518         }
519         return 0;
520 }
521
522 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
523         .init_compress_ctx      = lzo_init_compress_ctx,
524         .destroy_compress_ctx   = lzo_destroy_compress_ctx,
525         .compress_pages         = lzorle_compress_pages,
526         .decompress_pages       = lzo_decompress_pages,
527 };
528 #endif
529 #endif
530
531 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
532 #ifdef CONFIG_F2FS_FS_LZO
533         &f2fs_lzo_ops,
534 #else
535         NULL,
536 #endif
537 #ifdef CONFIG_F2FS_FS_LZ4
538         &f2fs_lz4_ops,
539 #else
540         NULL,
541 #endif
542 #ifdef CONFIG_F2FS_FS_ZSTD
543         &f2fs_zstd_ops,
544 #else
545         NULL,
546 #endif
547 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
548         &f2fs_lzorle_ops,
549 #else
550         NULL,
551 #endif
552 };
553
554 bool f2fs_is_compress_backend_ready(struct inode *inode)
555 {
556         if (!f2fs_compressed_file(inode))
557                 return true;
558         return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
559 }
560
561 static mempool_t *compress_page_pool;
562 static int num_compress_pages = 512;
563 module_param(num_compress_pages, uint, 0444);
564 MODULE_PARM_DESC(num_compress_pages,
565                 "Number of intermediate compress pages to preallocate");
566
567 int f2fs_init_compress_mempool(void)
568 {
569         compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
570         if (!compress_page_pool)
571                 return -ENOMEM;
572
573         return 0;
574 }
575
576 void f2fs_destroy_compress_mempool(void)
577 {
578         mempool_destroy(compress_page_pool);
579 }
580
581 static struct page *f2fs_compress_alloc_page(void)
582 {
583         struct page *page;
584
585         page = mempool_alloc(compress_page_pool, GFP_NOFS);
586         lock_page(page);
587
588         return page;
589 }
590
591 static void f2fs_compress_free_page(struct page *page)
592 {
593         if (!page)
594                 return;
595         detach_page_private(page);
596         page->mapping = NULL;
597         unlock_page(page);
598         mempool_free(page, compress_page_pool);
599 }
600
601 #define MAX_VMAP_RETRIES        3
602
603 static void *f2fs_vmap(struct page **pages, unsigned int count)
604 {
605         int i;
606         void *buf = NULL;
607
608         for (i = 0; i < MAX_VMAP_RETRIES; i++) {
609                 buf = vm_map_ram(pages, count, -1);
610                 if (buf)
611                         break;
612                 vm_unmap_aliases();
613         }
614         return buf;
615 }
616
617 static int f2fs_compress_pages(struct compress_ctx *cc)
618 {
619         struct f2fs_inode_info *fi = F2FS_I(cc->inode);
620         const struct f2fs_compress_ops *cops =
621                                 f2fs_cops[fi->i_compress_algorithm];
622         unsigned int max_len, new_nr_cpages;
623         u32 chksum = 0;
624         int i, ret;
625
626         trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
627                                 cc->cluster_size, fi->i_compress_algorithm);
628
629         if (cops->init_compress_ctx) {
630                 ret = cops->init_compress_ctx(cc);
631                 if (ret)
632                         goto out;
633         }
634
635         max_len = COMPRESS_HEADER_SIZE + cc->clen;
636         cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
637         cc->valid_nr_cpages = cc->nr_cpages;
638
639         cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
640         if (!cc->cpages) {
641                 ret = -ENOMEM;
642                 goto destroy_compress_ctx;
643         }
644
645         for (i = 0; i < cc->nr_cpages; i++) {
646                 cc->cpages[i] = f2fs_compress_alloc_page();
647                 if (!cc->cpages[i]) {
648                         ret = -ENOMEM;
649                         goto out_free_cpages;
650                 }
651         }
652
653         cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
654         if (!cc->rbuf) {
655                 ret = -ENOMEM;
656                 goto out_free_cpages;
657         }
658
659         cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
660         if (!cc->cbuf) {
661                 ret = -ENOMEM;
662                 goto out_vunmap_rbuf;
663         }
664
665         ret = cops->compress_pages(cc);
666         if (ret)
667                 goto out_vunmap_cbuf;
668
669         max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
670
671         if (cc->clen > max_len) {
672                 ret = -EAGAIN;
673                 goto out_vunmap_cbuf;
674         }
675
676         cc->cbuf->clen = cpu_to_le32(cc->clen);
677
678         if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)
679                 chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
680                                         cc->cbuf->cdata, cc->clen);
681         cc->cbuf->chksum = cpu_to_le32(chksum);
682
683         for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
684                 cc->cbuf->reserved[i] = cpu_to_le32(0);
685
686         new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
687
688         /* zero out any unused part of the last page */
689         memset(&cc->cbuf->cdata[cc->clen], 0,
690                         (new_nr_cpages * PAGE_SIZE) -
691                         (cc->clen + COMPRESS_HEADER_SIZE));
692
693         vm_unmap_ram(cc->cbuf, cc->nr_cpages);
694         vm_unmap_ram(cc->rbuf, cc->cluster_size);
695
696         for (i = 0; i < cc->nr_cpages; i++) {
697                 if (i < new_nr_cpages)
698                         continue;
699                 f2fs_compress_free_page(cc->cpages[i]);
700                 cc->cpages[i] = NULL;
701         }
702
703         if (cops->destroy_compress_ctx)
704                 cops->destroy_compress_ctx(cc);
705
706         cc->valid_nr_cpages = new_nr_cpages;
707
708         trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
709                                                         cc->clen, ret);
710         return 0;
711
712 out_vunmap_cbuf:
713         vm_unmap_ram(cc->cbuf, cc->nr_cpages);
714 out_vunmap_rbuf:
715         vm_unmap_ram(cc->rbuf, cc->cluster_size);
716 out_free_cpages:
717         for (i = 0; i < cc->nr_cpages; i++) {
718                 if (cc->cpages[i])
719                         f2fs_compress_free_page(cc->cpages[i]);
720         }
721         page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
722         cc->cpages = NULL;
723 destroy_compress_ctx:
724         if (cops->destroy_compress_ctx)
725                 cops->destroy_compress_ctx(cc);
726 out:
727         trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
728                                                         cc->clen, ret);
729         return ret;
730 }
731
732 void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
733 {
734         struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
735         struct f2fs_inode_info *fi = F2FS_I(dic->inode);
736         const struct f2fs_compress_ops *cops =
737                         f2fs_cops[fi->i_compress_algorithm];
738         int ret;
739         int i;
740
741         trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
742                                 dic->cluster_size, fi->i_compress_algorithm);
743
744         if (dic->failed) {
745                 ret = -EIO;
746                 goto out_end_io;
747         }
748
749         dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
750         if (!dic->tpages) {
751                 ret = -ENOMEM;
752                 goto out_end_io;
753         }
754
755         for (i = 0; i < dic->cluster_size; i++) {
756                 if (dic->rpages[i]) {
757                         dic->tpages[i] = dic->rpages[i];
758                         continue;
759                 }
760
761                 dic->tpages[i] = f2fs_compress_alloc_page();
762                 if (!dic->tpages[i]) {
763                         ret = -ENOMEM;
764                         goto out_end_io;
765                 }
766         }
767
768         if (cops->init_decompress_ctx) {
769                 ret = cops->init_decompress_ctx(dic);
770                 if (ret)
771                         goto out_end_io;
772         }
773
774         dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
775         if (!dic->rbuf) {
776                 ret = -ENOMEM;
777                 goto out_destroy_decompress_ctx;
778         }
779
780         dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
781         if (!dic->cbuf) {
782                 ret = -ENOMEM;
783                 goto out_vunmap_rbuf;
784         }
785
786         dic->clen = le32_to_cpu(dic->cbuf->clen);
787         dic->rlen = PAGE_SIZE << dic->log_cluster_size;
788
789         if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
790                 ret = -EFSCORRUPTED;
791                 goto out_vunmap_cbuf;
792         }
793
794         ret = cops->decompress_pages(dic);
795
796         if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
797                 u32 provided = le32_to_cpu(dic->cbuf->chksum);
798                 u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
799
800                 if (provided != calculated) {
801                         if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
802                                 set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
803                                 printk_ratelimited(
804                                         "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
805                                         KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
806                                         provided, calculated);
807                         }
808                         set_sbi_flag(sbi, SBI_NEED_FSCK);
809                 }
810         }
811
812 out_vunmap_cbuf:
813         vm_unmap_ram(dic->cbuf, dic->nr_cpages);
814 out_vunmap_rbuf:
815         vm_unmap_ram(dic->rbuf, dic->cluster_size);
816 out_destroy_decompress_ctx:
817         if (cops->destroy_decompress_ctx)
818                 cops->destroy_decompress_ctx(dic);
819 out_end_io:
820         trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
821                                                         dic->clen, ret);
822         f2fs_decompress_end_io(dic, ret);
823 }
824
825 /*
826  * This is called when a page of a compressed cluster has been read from disk
827  * (or failed to be read from disk).  It checks whether this page was the last
828  * page being waited on in the cluster, and if so, it decompresses the cluster
829  * (or in the case of a failure, cleans up without actually decompressing).
830  */
831 void f2fs_end_read_compressed_page(struct page *page, bool failed,
832                                                 block_t blkaddr)
833 {
834         struct decompress_io_ctx *dic =
835                         (struct decompress_io_ctx *)page_private(page);
836         struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
837
838         dec_page_count(sbi, F2FS_RD_DATA);
839
840         if (failed)
841                 WRITE_ONCE(dic->failed, true);
842         else if (blkaddr)
843                 f2fs_cache_compressed_page(sbi, page,
844                                         dic->inode->i_ino, blkaddr);
845
846         if (atomic_dec_and_test(&dic->remaining_pages))
847                 f2fs_decompress_cluster(dic);
848 }
849
850 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
851 {
852         if (cc->cluster_idx == NULL_CLUSTER)
853                 return true;
854         return cc->cluster_idx == cluster_idx(cc, index);
855 }
856
857 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
858 {
859         return cc->nr_rpages == 0;
860 }
861
862 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
863 {
864         return cc->cluster_size == cc->nr_rpages;
865 }
866
867 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
868 {
869         if (f2fs_cluster_is_empty(cc))
870                 return true;
871         return is_page_in_cluster(cc, index);
872 }
873
874 bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec *pvec,
875                                 int index, int nr_pages)
876 {
877         unsigned long pgidx;
878         int i;
879
880         if (nr_pages - index < cc->cluster_size)
881                 return false;
882
883         pgidx = pvec->pages[index]->index;
884
885         for (i = 1; i < cc->cluster_size; i++) {
886                 if (pvec->pages[index + i]->index != pgidx + i)
887                         return false;
888         }
889
890         return true;
891 }
892
893 static bool cluster_has_invalid_data(struct compress_ctx *cc)
894 {
895         loff_t i_size = i_size_read(cc->inode);
896         unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
897         int i;
898
899         for (i = 0; i < cc->cluster_size; i++) {
900                 struct page *page = cc->rpages[i];
901
902                 f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
903
904                 /* beyond EOF */
905                 if (page->index >= nr_pages)
906                         return true;
907         }
908         return false;
909 }
910
911 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
912 {
913         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
914         unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
915         bool compressed = dn->data_blkaddr == COMPRESS_ADDR;
916         int cluster_end = 0;
917         int i;
918         char *reason = "";
919
920         if (!compressed)
921                 return false;
922
923         /* [..., COMPR_ADDR, ...] */
924         if (dn->ofs_in_node % cluster_size) {
925                 reason = "[*|C|*|*]";
926                 goto out;
927         }
928
929         for (i = 1; i < cluster_size; i++) {
930                 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
931                                                         dn->ofs_in_node + i);
932
933                 /* [COMPR_ADDR, ..., COMPR_ADDR] */
934                 if (blkaddr == COMPRESS_ADDR) {
935                         reason = "[C|*|C|*]";
936                         goto out;
937                 }
938                 if (compressed) {
939                         if (!__is_valid_data_blkaddr(blkaddr)) {
940                                 if (!cluster_end)
941                                         cluster_end = i;
942                                 continue;
943                         }
944                         /* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */
945                         if (cluster_end) {
946                                 reason = "[C|N|N|V]";
947                                 goto out;
948                         }
949                 }
950         }
951         return false;
952 out:
953         f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s",
954                         dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason);
955         set_sbi_flag(sbi, SBI_NEED_FSCK);
956         return true;
957 }
958
959 static int __f2fs_cluster_blocks(struct inode *inode,
960                                 unsigned int cluster_idx, bool compr)
961 {
962         struct dnode_of_data dn;
963         unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
964         unsigned int start_idx = cluster_idx <<
965                                 F2FS_I(inode)->i_log_cluster_size;
966         int ret;
967
968         set_new_dnode(&dn, inode, NULL, NULL, 0);
969         ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
970         if (ret) {
971                 if (ret == -ENOENT)
972                         ret = 0;
973                 goto fail;
974         }
975
976         if (f2fs_sanity_check_cluster(&dn)) {
977                 ret = -EFSCORRUPTED;
978                 goto fail;
979         }
980
981         if (dn.data_blkaddr == COMPRESS_ADDR) {
982                 int i;
983
984                 ret = 1;
985                 for (i = 1; i < cluster_size; i++) {
986                         block_t blkaddr;
987
988                         blkaddr = data_blkaddr(dn.inode,
989                                         dn.node_page, dn.ofs_in_node + i);
990                         if (compr) {
991                                 if (__is_valid_data_blkaddr(blkaddr))
992                                         ret++;
993                         } else {
994                                 if (blkaddr != NULL_ADDR)
995                                         ret++;
996                         }
997                 }
998
999                 f2fs_bug_on(F2FS_I_SB(inode),
1000                         !compr && ret != cluster_size &&
1001                         !is_inode_flag_set(inode, FI_COMPRESS_RELEASED));
1002         }
1003 fail:
1004         f2fs_put_dnode(&dn);
1005         return ret;
1006 }
1007
1008 /* return # of compressed blocks in compressed cluster */
1009 static int f2fs_compressed_blocks(struct compress_ctx *cc)
1010 {
1011         return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
1012 }
1013
1014 /* return # of valid blocks in compressed cluster */
1015 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
1016 {
1017         return __f2fs_cluster_blocks(inode,
1018                 index >> F2FS_I(inode)->i_log_cluster_size,
1019                 false);
1020 }
1021
1022 static bool cluster_may_compress(struct compress_ctx *cc)
1023 {
1024         if (!f2fs_need_compress_data(cc->inode))
1025                 return false;
1026         if (f2fs_is_atomic_file(cc->inode))
1027                 return false;
1028         if (!f2fs_cluster_is_full(cc))
1029                 return false;
1030         if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
1031                 return false;
1032         return !cluster_has_invalid_data(cc);
1033 }
1034
1035 static void set_cluster_writeback(struct compress_ctx *cc)
1036 {
1037         int i;
1038
1039         for (i = 0; i < cc->cluster_size; i++) {
1040                 if (cc->rpages[i])
1041                         set_page_writeback(cc->rpages[i]);
1042         }
1043 }
1044
1045 static void set_cluster_dirty(struct compress_ctx *cc)
1046 {
1047         int i;
1048
1049         for (i = 0; i < cc->cluster_size; i++)
1050                 if (cc->rpages[i])
1051                         set_page_dirty(cc->rpages[i]);
1052 }
1053
1054 static int prepare_compress_overwrite(struct compress_ctx *cc,
1055                 struct page **pagep, pgoff_t index, void **fsdata)
1056 {
1057         struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1058         struct address_space *mapping = cc->inode->i_mapping;
1059         struct page *page;
1060         sector_t last_block_in_bio;
1061         unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
1062         pgoff_t start_idx = start_idx_of_cluster(cc);
1063         int i, ret;
1064
1065 retry:
1066         ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
1067         if (ret <= 0)
1068                 return ret;
1069
1070         ret = f2fs_init_compress_ctx(cc);
1071         if (ret)
1072                 return ret;
1073
1074         /* keep page reference to avoid page reclaim */
1075         for (i = 0; i < cc->cluster_size; i++) {
1076                 page = f2fs_pagecache_get_page(mapping, start_idx + i,
1077                                                         fgp_flag, GFP_NOFS);
1078                 if (!page) {
1079                         ret = -ENOMEM;
1080                         goto unlock_pages;
1081                 }
1082
1083                 if (PageUptodate(page))
1084                         f2fs_put_page(page, 1);
1085                 else
1086                         f2fs_compress_ctx_add_page(cc, page);
1087         }
1088
1089         if (!f2fs_cluster_is_empty(cc)) {
1090                 struct bio *bio = NULL;
1091
1092                 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
1093                                         &last_block_in_bio, false, true);
1094                 f2fs_put_rpages(cc);
1095                 f2fs_destroy_compress_ctx(cc, true);
1096                 if (ret)
1097                         goto out;
1098                 if (bio)
1099                         f2fs_submit_bio(sbi, bio, DATA);
1100
1101                 ret = f2fs_init_compress_ctx(cc);
1102                 if (ret)
1103                         goto out;
1104         }
1105
1106         for (i = 0; i < cc->cluster_size; i++) {
1107                 f2fs_bug_on(sbi, cc->rpages[i]);
1108
1109                 page = find_lock_page(mapping, start_idx + i);
1110                 if (!page) {
1111                         /* page can be truncated */
1112                         goto release_and_retry;
1113                 }
1114
1115                 f2fs_wait_on_page_writeback(page, DATA, true, true);
1116                 f2fs_compress_ctx_add_page(cc, page);
1117
1118                 if (!PageUptodate(page)) {
1119 release_and_retry:
1120                         f2fs_put_rpages(cc);
1121                         f2fs_unlock_rpages(cc, i + 1);
1122                         f2fs_destroy_compress_ctx(cc, true);
1123                         goto retry;
1124                 }
1125         }
1126
1127         if (likely(!ret)) {
1128                 *fsdata = cc->rpages;
1129                 *pagep = cc->rpages[offset_in_cluster(cc, index)];
1130                 return cc->cluster_size;
1131         }
1132
1133 unlock_pages:
1134         f2fs_put_rpages(cc);
1135         f2fs_unlock_rpages(cc, i);
1136         f2fs_destroy_compress_ctx(cc, true);
1137 out:
1138         return ret;
1139 }
1140
1141 int f2fs_prepare_compress_overwrite(struct inode *inode,
1142                 struct page **pagep, pgoff_t index, void **fsdata)
1143 {
1144         struct compress_ctx cc = {
1145                 .inode = inode,
1146                 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1147                 .cluster_size = F2FS_I(inode)->i_cluster_size,
1148                 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1149                 .rpages = NULL,
1150                 .nr_rpages = 0,
1151         };
1152
1153         return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1154 }
1155
1156 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1157                                         pgoff_t index, unsigned copied)
1158
1159 {
1160         struct compress_ctx cc = {
1161                 .inode = inode,
1162                 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1163                 .cluster_size = F2FS_I(inode)->i_cluster_size,
1164                 .rpages = fsdata,
1165         };
1166         bool first_index = (index == cc.rpages[0]->index);
1167
1168         if (copied)
1169                 set_cluster_dirty(&cc);
1170
1171         f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1172         f2fs_destroy_compress_ctx(&cc, false);
1173
1174         return first_index;
1175 }
1176
1177 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1178 {
1179         void *fsdata = NULL;
1180         struct page *pagep;
1181         int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1182         pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1183                                                         log_cluster_size;
1184         int err;
1185
1186         err = f2fs_is_compressed_cluster(inode, start_idx);
1187         if (err < 0)
1188                 return err;
1189
1190         /* truncate normal cluster */
1191         if (!err)
1192                 return f2fs_do_truncate_blocks(inode, from, lock);
1193
1194         /* truncate compressed cluster */
1195         err = f2fs_prepare_compress_overwrite(inode, &pagep,
1196                                                 start_idx, &fsdata);
1197
1198         /* should not be a normal cluster */
1199         f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1200
1201         if (err <= 0)
1202                 return err;
1203
1204         if (err > 0) {
1205                 struct page **rpages = fsdata;
1206                 int cluster_size = F2FS_I(inode)->i_cluster_size;
1207                 int i;
1208
1209                 for (i = cluster_size - 1; i >= 0; i--) {
1210                         loff_t start = rpages[i]->index << PAGE_SHIFT;
1211
1212                         if (from <= start) {
1213                                 zero_user_segment(rpages[i], 0, PAGE_SIZE);
1214                         } else {
1215                                 zero_user_segment(rpages[i], from - start,
1216                                                                 PAGE_SIZE);
1217                                 break;
1218                         }
1219                 }
1220
1221                 f2fs_compress_write_end(inode, fsdata, start_idx, true);
1222         }
1223         return 0;
1224 }
1225
1226 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1227                                         int *submitted,
1228                                         struct writeback_control *wbc,
1229                                         enum iostat_type io_type)
1230 {
1231         struct inode *inode = cc->inode;
1232         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1233         struct f2fs_inode_info *fi = F2FS_I(inode);
1234         struct f2fs_io_info fio = {
1235                 .sbi = sbi,
1236                 .ino = cc->inode->i_ino,
1237                 .type = DATA,
1238                 .op = REQ_OP_WRITE,
1239                 .op_flags = wbc_to_write_flags(wbc),
1240                 .old_blkaddr = NEW_ADDR,
1241                 .page = NULL,
1242                 .encrypted_page = NULL,
1243                 .compressed_page = NULL,
1244                 .submitted = false,
1245                 .io_type = io_type,
1246                 .io_wbc = wbc,
1247                 .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
1248         };
1249         struct dnode_of_data dn;
1250         struct node_info ni;
1251         struct compress_io_ctx *cic;
1252         pgoff_t start_idx = start_idx_of_cluster(cc);
1253         unsigned int last_index = cc->cluster_size - 1;
1254         loff_t psize;
1255         int i, err;
1256
1257         /* we should bypass data pages to proceed the kworkder jobs */
1258         if (unlikely(f2fs_cp_error(sbi))) {
1259                 mapping_set_error(cc->rpages[0]->mapping, -EIO);
1260                 goto out_free;
1261         }
1262
1263         if (IS_NOQUOTA(inode)) {
1264                 /*
1265                  * We need to wait for node_write to avoid block allocation during
1266                  * checkpoint. This can only happen to quota writes which can cause
1267                  * the below discard race condition.
1268                  */
1269                 f2fs_down_read(&sbi->node_write);
1270         } else if (!f2fs_trylock_op(sbi)) {
1271                 goto out_free;
1272         }
1273
1274         set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1275
1276         err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1277         if (err)
1278                 goto out_unlock_op;
1279
1280         for (i = 0; i < cc->cluster_size; i++) {
1281                 if (data_blkaddr(dn.inode, dn.node_page,
1282                                         dn.ofs_in_node + i) == NULL_ADDR)
1283                         goto out_put_dnode;
1284         }
1285
1286         psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1287
1288         err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1289         if (err)
1290                 goto out_put_dnode;
1291
1292         fio.version = ni.version;
1293
1294         cic = f2fs_kmem_cache_alloc(cic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1295         if (!cic)
1296                 goto out_put_dnode;
1297
1298         cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1299         cic->inode = inode;
1300         atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
1301         cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1302         if (!cic->rpages)
1303                 goto out_put_cic;
1304
1305         cic->nr_rpages = cc->cluster_size;
1306
1307         for (i = 0; i < cc->valid_nr_cpages; i++) {
1308                 f2fs_set_compressed_page(cc->cpages[i], inode,
1309                                         cc->rpages[i + 1]->index, cic);
1310                 fio.compressed_page = cc->cpages[i];
1311
1312                 fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1313                                                 dn.ofs_in_node + i + 1);
1314
1315                 /* wait for GCed page writeback via META_MAPPING */
1316                 f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1317
1318                 if (fio.encrypted) {
1319                         fio.page = cc->rpages[i + 1];
1320                         err = f2fs_encrypt_one_page(&fio);
1321                         if (err)
1322                                 goto out_destroy_crypt;
1323                         cc->cpages[i] = fio.encrypted_page;
1324                 }
1325         }
1326
1327         set_cluster_writeback(cc);
1328
1329         for (i = 0; i < cc->cluster_size; i++)
1330                 cic->rpages[i] = cc->rpages[i];
1331
1332         for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1333                 block_t blkaddr;
1334
1335                 blkaddr = f2fs_data_blkaddr(&dn);
1336                 fio.page = cc->rpages[i];
1337                 fio.old_blkaddr = blkaddr;
1338
1339                 /* cluster header */
1340                 if (i == 0) {
1341                         if (blkaddr == COMPRESS_ADDR)
1342                                 fio.compr_blocks++;
1343                         if (__is_valid_data_blkaddr(blkaddr))
1344                                 f2fs_invalidate_blocks(sbi, blkaddr);
1345                         f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1346                         goto unlock_continue;
1347                 }
1348
1349                 if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1350                         fio.compr_blocks++;
1351
1352                 if (i > cc->valid_nr_cpages) {
1353                         if (__is_valid_data_blkaddr(blkaddr)) {
1354                                 f2fs_invalidate_blocks(sbi, blkaddr);
1355                                 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1356                         }
1357                         goto unlock_continue;
1358                 }
1359
1360                 f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1361
1362                 if (fio.encrypted)
1363                         fio.encrypted_page = cc->cpages[i - 1];
1364                 else
1365                         fio.compressed_page = cc->cpages[i - 1];
1366
1367                 cc->cpages[i - 1] = NULL;
1368                 f2fs_outplace_write_data(&dn, &fio);
1369                 (*submitted)++;
1370 unlock_continue:
1371                 inode_dec_dirty_pages(cc->inode);
1372                 unlock_page(fio.page);
1373         }
1374
1375         if (fio.compr_blocks)
1376                 f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1377         f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true);
1378         add_compr_block_stat(inode, cc->valid_nr_cpages);
1379
1380         set_inode_flag(cc->inode, FI_APPEND_WRITE);
1381         if (cc->cluster_idx == 0)
1382                 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1383
1384         f2fs_put_dnode(&dn);
1385         if (IS_NOQUOTA(inode))
1386                 f2fs_up_read(&sbi->node_write);
1387         else
1388                 f2fs_unlock_op(sbi);
1389
1390         spin_lock(&fi->i_size_lock);
1391         if (fi->last_disk_size < psize)
1392                 fi->last_disk_size = psize;
1393         spin_unlock(&fi->i_size_lock);
1394
1395         f2fs_put_rpages(cc);
1396         page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1397         cc->cpages = NULL;
1398         f2fs_destroy_compress_ctx(cc, false);
1399         return 0;
1400
1401 out_destroy_crypt:
1402         page_array_free(cc->inode, cic->rpages, cc->cluster_size);
1403
1404         for (--i; i >= 0; i--)
1405                 fscrypt_finalize_bounce_page(&cc->cpages[i]);
1406 out_put_cic:
1407         kmem_cache_free(cic_entry_slab, cic);
1408 out_put_dnode:
1409         f2fs_put_dnode(&dn);
1410 out_unlock_op:
1411         if (IS_NOQUOTA(inode))
1412                 f2fs_up_read(&sbi->node_write);
1413         else
1414                 f2fs_unlock_op(sbi);
1415 out_free:
1416         for (i = 0; i < cc->valid_nr_cpages; i++) {
1417                 f2fs_compress_free_page(cc->cpages[i]);
1418                 cc->cpages[i] = NULL;
1419         }
1420         page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1421         cc->cpages = NULL;
1422         return -EAGAIN;
1423 }
1424
1425 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1426 {
1427         struct f2fs_sb_info *sbi = bio->bi_private;
1428         struct compress_io_ctx *cic =
1429                         (struct compress_io_ctx *)page_private(page);
1430         int i;
1431
1432         if (unlikely(bio->bi_status))
1433                 mapping_set_error(cic->inode->i_mapping, -EIO);
1434
1435         f2fs_compress_free_page(page);
1436
1437         dec_page_count(sbi, F2FS_WB_DATA);
1438
1439         if (atomic_dec_return(&cic->pending_pages))
1440                 return;
1441
1442         for (i = 0; i < cic->nr_rpages; i++) {
1443                 WARN_ON(!cic->rpages[i]);
1444                 clear_page_private_gcing(cic->rpages[i]);
1445                 end_page_writeback(cic->rpages[i]);
1446         }
1447
1448         page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
1449         kmem_cache_free(cic_entry_slab, cic);
1450 }
1451
1452 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1453                                         int *submitted,
1454                                         struct writeback_control *wbc,
1455                                         enum iostat_type io_type)
1456 {
1457         struct address_space *mapping = cc->inode->i_mapping;
1458         int _submitted, compr_blocks, ret, i;
1459
1460         compr_blocks = f2fs_compressed_blocks(cc);
1461
1462         for (i = 0; i < cc->cluster_size; i++) {
1463                 if (!cc->rpages[i])
1464                         continue;
1465
1466                 redirty_page_for_writepage(wbc, cc->rpages[i]);
1467                 unlock_page(cc->rpages[i]);
1468         }
1469
1470         if (compr_blocks < 0)
1471                 return compr_blocks;
1472
1473         for (i = 0; i < cc->cluster_size; i++) {
1474                 if (!cc->rpages[i])
1475                         continue;
1476 retry_write:
1477                 lock_page(cc->rpages[i]);
1478
1479                 if (cc->rpages[i]->mapping != mapping) {
1480 continue_unlock:
1481                         unlock_page(cc->rpages[i]);
1482                         continue;
1483                 }
1484
1485                 if (!PageDirty(cc->rpages[i]))
1486                         goto continue_unlock;
1487
1488                 if (!clear_page_dirty_for_io(cc->rpages[i]))
1489                         goto continue_unlock;
1490
1491                 ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1492                                                 NULL, NULL, wbc, io_type,
1493                                                 compr_blocks, false);
1494                 if (ret) {
1495                         if (ret == AOP_WRITEPAGE_ACTIVATE) {
1496                                 unlock_page(cc->rpages[i]);
1497                                 ret = 0;
1498                         } else if (ret == -EAGAIN) {
1499                                 /*
1500                                  * for quota file, just redirty left pages to
1501                                  * avoid deadlock caused by cluster update race
1502                                  * from foreground operation.
1503                                  */
1504                                 if (IS_NOQUOTA(cc->inode))
1505                                         return 0;
1506                                 ret = 0;
1507                                 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
1508                                 goto retry_write;
1509                         }
1510                         return ret;
1511                 }
1512
1513                 *submitted += _submitted;
1514         }
1515
1516         f2fs_balance_fs(F2FS_M_SB(mapping), true);
1517
1518         return 0;
1519 }
1520
1521 int f2fs_write_multi_pages(struct compress_ctx *cc,
1522                                         int *submitted,
1523                                         struct writeback_control *wbc,
1524                                         enum iostat_type io_type)
1525 {
1526         int err;
1527
1528         *submitted = 0;
1529         if (cluster_may_compress(cc)) {
1530                 err = f2fs_compress_pages(cc);
1531                 if (err == -EAGAIN) {
1532                         add_compr_block_stat(cc->inode, cc->cluster_size);
1533                         goto write;
1534                 } else if (err) {
1535                         f2fs_put_rpages_wbc(cc, wbc, true, 1);
1536                         goto destroy_out;
1537                 }
1538
1539                 err = f2fs_write_compressed_pages(cc, submitted,
1540                                                         wbc, io_type);
1541                 if (!err)
1542                         return 0;
1543                 f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1544         }
1545 write:
1546         f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1547
1548         err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1549         f2fs_put_rpages_wbc(cc, wbc, false, 0);
1550 destroy_out:
1551         f2fs_destroy_compress_ctx(cc, false);
1552         return err;
1553 }
1554
1555 static void f2fs_free_dic(struct decompress_io_ctx *dic);
1556
1557 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1558 {
1559         struct decompress_io_ctx *dic;
1560         pgoff_t start_idx = start_idx_of_cluster(cc);
1561         int i;
1562
1563         dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO,
1564                                         false, F2FS_I_SB(cc->inode));
1565         if (!dic)
1566                 return ERR_PTR(-ENOMEM);
1567
1568         dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1569         if (!dic->rpages) {
1570                 kmem_cache_free(dic_entry_slab, dic);
1571                 return ERR_PTR(-ENOMEM);
1572         }
1573
1574         dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1575         dic->inode = cc->inode;
1576         atomic_set(&dic->remaining_pages, cc->nr_cpages);
1577         dic->cluster_idx = cc->cluster_idx;
1578         dic->cluster_size = cc->cluster_size;
1579         dic->log_cluster_size = cc->log_cluster_size;
1580         dic->nr_cpages = cc->nr_cpages;
1581         refcount_set(&dic->refcnt, 1);
1582         dic->failed = false;
1583         dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
1584
1585         for (i = 0; i < dic->cluster_size; i++)
1586                 dic->rpages[i] = cc->rpages[i];
1587         dic->nr_rpages = cc->cluster_size;
1588
1589         dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
1590         if (!dic->cpages)
1591                 goto out_free;
1592
1593         for (i = 0; i < dic->nr_cpages; i++) {
1594                 struct page *page;
1595
1596                 page = f2fs_compress_alloc_page();
1597                 if (!page)
1598                         goto out_free;
1599
1600                 f2fs_set_compressed_page(page, cc->inode,
1601                                         start_idx + i + 1, dic);
1602                 dic->cpages[i] = page;
1603         }
1604
1605         return dic;
1606
1607 out_free:
1608         f2fs_free_dic(dic);
1609         return ERR_PTR(-ENOMEM);
1610 }
1611
1612 static void f2fs_free_dic(struct decompress_io_ctx *dic)
1613 {
1614         int i;
1615
1616         if (dic->tpages) {
1617                 for (i = 0; i < dic->cluster_size; i++) {
1618                         if (dic->rpages[i])
1619                                 continue;
1620                         if (!dic->tpages[i])
1621                                 continue;
1622                         f2fs_compress_free_page(dic->tpages[i]);
1623                 }
1624                 page_array_free(dic->inode, dic->tpages, dic->cluster_size);
1625         }
1626
1627         if (dic->cpages) {
1628                 for (i = 0; i < dic->nr_cpages; i++) {
1629                         if (!dic->cpages[i])
1630                                 continue;
1631                         f2fs_compress_free_page(dic->cpages[i]);
1632                 }
1633                 page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
1634         }
1635
1636         page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
1637         kmem_cache_free(dic_entry_slab, dic);
1638 }
1639
1640 static void f2fs_put_dic(struct decompress_io_ctx *dic)
1641 {
1642         if (refcount_dec_and_test(&dic->refcnt))
1643                 f2fs_free_dic(dic);
1644 }
1645
1646 /*
1647  * Update and unlock the cluster's pagecache pages, and release the reference to
1648  * the decompress_io_ctx that was being held for I/O completion.
1649  */
1650 static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
1651 {
1652         int i;
1653
1654         for (i = 0; i < dic->cluster_size; i++) {
1655                 struct page *rpage = dic->rpages[i];
1656
1657                 if (!rpage)
1658                         continue;
1659
1660                 /* PG_error was set if verity failed. */
1661                 if (failed || PageError(rpage)) {
1662                         ClearPageUptodate(rpage);
1663                         /* will re-read again later */
1664                         ClearPageError(rpage);
1665                 } else {
1666                         SetPageUptodate(rpage);
1667                 }
1668                 unlock_page(rpage);
1669         }
1670
1671         f2fs_put_dic(dic);
1672 }
1673
1674 static void f2fs_verify_cluster(struct work_struct *work)
1675 {
1676         struct decompress_io_ctx *dic =
1677                 container_of(work, struct decompress_io_ctx, verity_work);
1678         int i;
1679
1680         /* Verify the cluster's decompressed pages with fs-verity. */
1681         for (i = 0; i < dic->cluster_size; i++) {
1682                 struct page *rpage = dic->rpages[i];
1683
1684                 if (rpage && !fsverity_verify_page(rpage))
1685                         SetPageError(rpage);
1686         }
1687
1688         __f2fs_decompress_end_io(dic, false);
1689 }
1690
1691 /*
1692  * This is called when a compressed cluster has been decompressed
1693  * (or failed to be read and/or decompressed).
1694  */
1695 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
1696 {
1697         if (!failed && dic->need_verity) {
1698                 /*
1699                  * Note that to avoid deadlocks, the verity work can't be done
1700                  * on the decompression workqueue.  This is because verifying
1701                  * the data pages can involve reading metadata pages from the
1702                  * file, and these metadata pages may be compressed.
1703                  */
1704                 INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
1705                 fsverity_enqueue_verify_work(&dic->verity_work);
1706         } else {
1707                 __f2fs_decompress_end_io(dic, failed);
1708         }
1709 }
1710
1711 /*
1712  * Put a reference to a compressed page's decompress_io_ctx.
1713  *
1714  * This is called when the page is no longer needed and can be freed.
1715  */
1716 void f2fs_put_page_dic(struct page *page)
1717 {
1718         struct decompress_io_ctx *dic =
1719                         (struct decompress_io_ctx *)page_private(page);
1720
1721         f2fs_put_dic(dic);
1722 }
1723
1724 /*
1725  * check whether cluster blocks are contiguous, and add extent cache entry
1726  * only if cluster blocks are logically and physically contiguous.
1727  */
1728 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
1729 {
1730         bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
1731         int i = compressed ? 1 : 0;
1732         block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
1733                                                 dn->ofs_in_node + i);
1734
1735         for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
1736                 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
1737                                                 dn->ofs_in_node + i);
1738
1739                 if (!__is_valid_data_blkaddr(blkaddr))
1740                         break;
1741                 if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr)
1742                         return 0;
1743         }
1744
1745         return compressed ? i - 1 : i;
1746 }
1747
1748 const struct address_space_operations f2fs_compress_aops = {
1749         .release_folio = f2fs_release_folio,
1750         .invalidate_folio = f2fs_invalidate_folio,
1751 };
1752
1753 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
1754 {
1755         return sbi->compress_inode->i_mapping;
1756 }
1757
1758 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
1759 {
1760         if (!sbi->compress_inode)
1761                 return;
1762         invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
1763 }
1764
1765 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1766                                                 nid_t ino, block_t blkaddr)
1767 {
1768         struct page *cpage;
1769         int ret;
1770
1771         if (!test_opt(sbi, COMPRESS_CACHE))
1772                 return;
1773
1774         if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1775                 return;
1776
1777         if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
1778                 return;
1779
1780         cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
1781         if (cpage) {
1782                 f2fs_put_page(cpage, 0);
1783                 return;
1784         }
1785
1786         cpage = alloc_page(__GFP_NOWARN | __GFP_IO);
1787         if (!cpage)
1788                 return;
1789
1790         ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
1791                                                 blkaddr, GFP_NOFS);
1792         if (ret) {
1793                 f2fs_put_page(cpage, 0);
1794                 return;
1795         }
1796
1797         set_page_private_data(cpage, ino);
1798
1799         if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1800                 goto out;
1801
1802         memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
1803         SetPageUptodate(cpage);
1804 out:
1805         f2fs_put_page(cpage, 1);
1806 }
1807
1808 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1809                                                                 block_t blkaddr)
1810 {
1811         struct page *cpage;
1812         bool hitted = false;
1813
1814         if (!test_opt(sbi, COMPRESS_CACHE))
1815                 return false;
1816
1817         cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
1818                                 blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
1819         if (cpage) {
1820                 if (PageUptodate(cpage)) {
1821                         atomic_inc(&sbi->compress_page_hit);
1822                         memcpy(page_address(page),
1823                                 page_address(cpage), PAGE_SIZE);
1824                         hitted = true;
1825                 }
1826                 f2fs_put_page(cpage, 1);
1827         }
1828
1829         return hitted;
1830 }
1831
1832 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
1833 {
1834         struct address_space *mapping = sbi->compress_inode->i_mapping;
1835         struct pagevec pvec;
1836         pgoff_t index = 0;
1837         pgoff_t end = MAX_BLKADDR(sbi);
1838
1839         if (!mapping->nrpages)
1840                 return;
1841
1842         pagevec_init(&pvec);
1843
1844         do {
1845                 unsigned int nr_pages;
1846                 int i;
1847
1848                 nr_pages = pagevec_lookup_range(&pvec, mapping,
1849                                                 &index, end - 1);
1850                 if (!nr_pages)
1851                         break;
1852
1853                 for (i = 0; i < nr_pages; i++) {
1854                         struct page *page = pvec.pages[i];
1855
1856                         if (page->index > end)
1857                                 break;
1858
1859                         lock_page(page);
1860                         if (page->mapping != mapping) {
1861                                 unlock_page(page);
1862                                 continue;
1863                         }
1864
1865                         if (ino != get_page_private_data(page)) {
1866                                 unlock_page(page);
1867                                 continue;
1868                         }
1869
1870                         generic_error_remove_page(mapping, page);
1871                         unlock_page(page);
1872                 }
1873                 pagevec_release(&pvec);
1874                 cond_resched();
1875         } while (index < end);
1876 }
1877
1878 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
1879 {
1880         struct inode *inode;
1881
1882         if (!test_opt(sbi, COMPRESS_CACHE))
1883                 return 0;
1884
1885         inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
1886         if (IS_ERR(inode))
1887                 return PTR_ERR(inode);
1888         sbi->compress_inode = inode;
1889
1890         sbi->compress_percent = COMPRESS_PERCENT;
1891         sbi->compress_watermark = COMPRESS_WATERMARK;
1892
1893         atomic_set(&sbi->compress_page_hit, 0);
1894
1895         return 0;
1896 }
1897
1898 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
1899 {
1900         if (!sbi->compress_inode)
1901                 return;
1902         iput(sbi->compress_inode);
1903         sbi->compress_inode = NULL;
1904 }
1905
1906 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
1907 {
1908         dev_t dev = sbi->sb->s_bdev->bd_dev;
1909         char slab_name[32];
1910
1911         sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
1912
1913         sbi->page_array_slab_size = sizeof(struct page *) <<
1914                                         F2FS_OPTION(sbi).compress_log_size;
1915
1916         sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
1917                                         sbi->page_array_slab_size);
1918         if (!sbi->page_array_slab)
1919                 return -ENOMEM;
1920         return 0;
1921 }
1922
1923 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
1924 {
1925         kmem_cache_destroy(sbi->page_array_slab);
1926 }
1927
1928 static int __init f2fs_init_cic_cache(void)
1929 {
1930         cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
1931                                         sizeof(struct compress_io_ctx));
1932         if (!cic_entry_slab)
1933                 return -ENOMEM;
1934         return 0;
1935 }
1936
1937 static void f2fs_destroy_cic_cache(void)
1938 {
1939         kmem_cache_destroy(cic_entry_slab);
1940 }
1941
1942 static int __init f2fs_init_dic_cache(void)
1943 {
1944         dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
1945                                         sizeof(struct decompress_io_ctx));
1946         if (!dic_entry_slab)
1947                 return -ENOMEM;
1948         return 0;
1949 }
1950
1951 static void f2fs_destroy_dic_cache(void)
1952 {
1953         kmem_cache_destroy(dic_entry_slab);
1954 }
1955
1956 int __init f2fs_init_compress_cache(void)
1957 {
1958         int err;
1959
1960         err = f2fs_init_cic_cache();
1961         if (err)
1962                 goto out;
1963         err = f2fs_init_dic_cache();
1964         if (err)
1965                 goto free_cic;
1966         return 0;
1967 free_cic:
1968         f2fs_destroy_cic_cache();
1969 out:
1970         return -ENOMEM;
1971 }
1972
1973 void f2fs_destroy_compress_cache(void)
1974 {
1975         f2fs_destroy_dic_cache();
1976         f2fs_destroy_cic_cache();
1977 }