Merge branches 'acpi-scan', 'acpi-resource', 'acpi-apei', 'acpi-extlog' and 'acpi...
[sfrench/cifs-2.6.git] / fs / nilfs2 / segment.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * NILFS segment constructor.
4  *
5  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6  *
7  * Written by Ryusuke Konishi.
8  *
9  */
10
11 #include <linux/pagemap.h>
12 #include <linux/buffer_head.h>
13 #include <linux/writeback.h>
14 #include <linux/bitops.h>
15 #include <linux/bio.h>
16 #include <linux/completion.h>
17 #include <linux/blkdev.h>
18 #include <linux/backing-dev.h>
19 #include <linux/freezer.h>
20 #include <linux/kthread.h>
21 #include <linux/crc32.h>
22 #include <linux/pagevec.h>
23 #include <linux/slab.h>
24 #include <linux/sched/signal.h>
25
26 #include "nilfs.h"
27 #include "btnode.h"
28 #include "page.h"
29 #include "segment.h"
30 #include "sufile.h"
31 #include "cpfile.h"
32 #include "ifile.h"
33 #include "segbuf.h"
34
35
36 /*
37  * Segment constructor
38  */
39 #define SC_N_INODEVEC   16   /* Size of locally allocated inode vector */
40
41 #define SC_MAX_SEGDELTA 64   /*
42                               * Upper limit of the number of segments
43                               * appended in collection retry loop
44                               */
45
46 /* Construction mode */
47 enum {
48         SC_LSEG_SR = 1, /* Make a logical segment having a super root */
49         SC_LSEG_DSYNC,  /*
50                          * Flush data blocks of a given file and make
51                          * a logical segment without a super root.
52                          */
53         SC_FLUSH_FILE,  /*
54                          * Flush data files, leads to segment writes without
55                          * creating a checkpoint.
56                          */
57         SC_FLUSH_DAT,   /*
58                          * Flush DAT file.  This also creates segments
59                          * without a checkpoint.
60                          */
61 };
62
63 /* Stage numbers of dirty block collection */
64 enum {
65         NILFS_ST_INIT = 0,
66         NILFS_ST_GC,            /* Collecting dirty blocks for GC */
67         NILFS_ST_FILE,
68         NILFS_ST_IFILE,
69         NILFS_ST_CPFILE,
70         NILFS_ST_SUFILE,
71         NILFS_ST_DAT,
72         NILFS_ST_SR,            /* Super root */
73         NILFS_ST_DSYNC,         /* Data sync blocks */
74         NILFS_ST_DONE,
75 };
76
77 #define CREATE_TRACE_POINTS
78 #include <trace/events/nilfs2.h>
79
80 /*
81  * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are
82  * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of
83  * the variable must use them because transition of stage count must involve
84  * trace events (trace_nilfs2_collection_stage_transition).
85  *
86  * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't
87  * produce tracepoint events. It is provided just for making the intention
88  * clear.
89  */
90 static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci)
91 {
92         sci->sc_stage.scnt++;
93         trace_nilfs2_collection_stage_transition(sci);
94 }
95
96 static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt)
97 {
98         sci->sc_stage.scnt = next_scnt;
99         trace_nilfs2_collection_stage_transition(sci);
100 }
101
102 static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci)
103 {
104         return sci->sc_stage.scnt;
105 }
106
107 /* State flags of collection */
108 #define NILFS_CF_NODE           0x0001  /* Collecting node blocks */
109 #define NILFS_CF_IFILE_STARTED  0x0002  /* IFILE stage has started */
110 #define NILFS_CF_SUFREED        0x0004  /* segment usages has been freed */
111 #define NILFS_CF_HISTORY_MASK   (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
112
113 /* Operations depending on the construction mode and file type */
114 struct nilfs_sc_operations {
115         int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
116                             struct inode *);
117         int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
118                             struct inode *);
119         int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
120                             struct inode *);
121         void (*write_data_binfo)(struct nilfs_sc_info *,
122                                  struct nilfs_segsum_pointer *,
123                                  union nilfs_binfo *);
124         void (*write_node_binfo)(struct nilfs_sc_info *,
125                                  struct nilfs_segsum_pointer *,
126                                  union nilfs_binfo *);
127 };
128
129 /*
130  * Other definitions
131  */
132 static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
133 static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
134 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
135 static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
136
137 #define nilfs_cnt32_ge(a, b)   \
138         (typecheck(__u32, a) && typecheck(__u32, b) && \
139          ((__s32)(a) - (__s32)(b) >= 0))
140
141 static int nilfs_prepare_segment_lock(struct super_block *sb,
142                                       struct nilfs_transaction_info *ti)
143 {
144         struct nilfs_transaction_info *cur_ti = current->journal_info;
145         void *save = NULL;
146
147         if (cur_ti) {
148                 if (cur_ti->ti_magic == NILFS_TI_MAGIC)
149                         return ++cur_ti->ti_count;
150
151                 /*
152                  * If journal_info field is occupied by other FS,
153                  * it is saved and will be restored on
154                  * nilfs_transaction_commit().
155                  */
156                 nilfs_warn(sb, "journal info from a different FS");
157                 save = current->journal_info;
158         }
159         if (!ti) {
160                 ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
161                 if (!ti)
162                         return -ENOMEM;
163                 ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
164         } else {
165                 ti->ti_flags = 0;
166         }
167         ti->ti_count = 0;
168         ti->ti_save = save;
169         ti->ti_magic = NILFS_TI_MAGIC;
170         current->journal_info = ti;
171         return 0;
172 }
173
174 /**
175  * nilfs_transaction_begin - start indivisible file operations.
176  * @sb: super block
177  * @ti: nilfs_transaction_info
178  * @vacancy_check: flags for vacancy rate checks
179  *
180  * nilfs_transaction_begin() acquires a reader/writer semaphore, called
181  * the segment semaphore, to make a segment construction and write tasks
182  * exclusive.  The function is used with nilfs_transaction_commit() in pairs.
183  * The region enclosed by these two functions can be nested.  To avoid a
184  * deadlock, the semaphore is only acquired or released in the outermost call.
185  *
186  * This function allocates a nilfs_transaction_info struct to keep context
187  * information on it.  It is initialized and hooked onto the current task in
188  * the outermost call.  If a pre-allocated struct is given to @ti, it is used
189  * instead; otherwise a new struct is assigned from a slab.
190  *
191  * When @vacancy_check flag is set, this function will check the amount of
192  * free space, and will wait for the GC to reclaim disk space if low capacity.
193  *
194  * Return Value: On success, 0 is returned. On error, one of the following
195  * negative error code is returned.
196  *
197  * %-ENOMEM - Insufficient memory available.
198  *
199  * %-ENOSPC - No space left on device
200  */
201 int nilfs_transaction_begin(struct super_block *sb,
202                             struct nilfs_transaction_info *ti,
203                             int vacancy_check)
204 {
205         struct the_nilfs *nilfs;
206         int ret = nilfs_prepare_segment_lock(sb, ti);
207         struct nilfs_transaction_info *trace_ti;
208
209         if (unlikely(ret < 0))
210                 return ret;
211         if (ret > 0) {
212                 trace_ti = current->journal_info;
213
214                 trace_nilfs2_transaction_transition(sb, trace_ti,
215                                     trace_ti->ti_count, trace_ti->ti_flags,
216                                     TRACE_NILFS2_TRANSACTION_BEGIN);
217                 return 0;
218         }
219
220         sb_start_intwrite(sb);
221
222         nilfs = sb->s_fs_info;
223         down_read(&nilfs->ns_segctor_sem);
224         if (vacancy_check && nilfs_near_disk_full(nilfs)) {
225                 up_read(&nilfs->ns_segctor_sem);
226                 ret = -ENOSPC;
227                 goto failed;
228         }
229
230         trace_ti = current->journal_info;
231         trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count,
232                                             trace_ti->ti_flags,
233                                             TRACE_NILFS2_TRANSACTION_BEGIN);
234         return 0;
235
236  failed:
237         ti = current->journal_info;
238         current->journal_info = ti->ti_save;
239         if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
240                 kmem_cache_free(nilfs_transaction_cachep, ti);
241         sb_end_intwrite(sb);
242         return ret;
243 }
244
245 /**
246  * nilfs_transaction_commit - commit indivisible file operations.
247  * @sb: super block
248  *
249  * nilfs_transaction_commit() releases the read semaphore which is
250  * acquired by nilfs_transaction_begin(). This is only performed
251  * in outermost call of this function.  If a commit flag is set,
252  * nilfs_transaction_commit() sets a timer to start the segment
253  * constructor.  If a sync flag is set, it starts construction
254  * directly.
255  */
256 int nilfs_transaction_commit(struct super_block *sb)
257 {
258         struct nilfs_transaction_info *ti = current->journal_info;
259         struct the_nilfs *nilfs = sb->s_fs_info;
260         int err = 0;
261
262         BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
263         ti->ti_flags |= NILFS_TI_COMMIT;
264         if (ti->ti_count > 0) {
265                 ti->ti_count--;
266                 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
267                             ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
268                 return 0;
269         }
270         if (nilfs->ns_writer) {
271                 struct nilfs_sc_info *sci = nilfs->ns_writer;
272
273                 if (ti->ti_flags & NILFS_TI_COMMIT)
274                         nilfs_segctor_start_timer(sci);
275                 if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
276                         nilfs_segctor_do_flush(sci, 0);
277         }
278         up_read(&nilfs->ns_segctor_sem);
279         trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
280                             ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
281
282         current->journal_info = ti->ti_save;
283
284         if (ti->ti_flags & NILFS_TI_SYNC)
285                 err = nilfs_construct_segment(sb);
286         if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
287                 kmem_cache_free(nilfs_transaction_cachep, ti);
288         sb_end_intwrite(sb);
289         return err;
290 }
291
292 void nilfs_transaction_abort(struct super_block *sb)
293 {
294         struct nilfs_transaction_info *ti = current->journal_info;
295         struct the_nilfs *nilfs = sb->s_fs_info;
296
297         BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
298         if (ti->ti_count > 0) {
299                 ti->ti_count--;
300                 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
301                             ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
302                 return;
303         }
304         up_read(&nilfs->ns_segctor_sem);
305
306         trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
307                     ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
308
309         current->journal_info = ti->ti_save;
310         if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
311                 kmem_cache_free(nilfs_transaction_cachep, ti);
312         sb_end_intwrite(sb);
313 }
314
315 void nilfs_relax_pressure_in_lock(struct super_block *sb)
316 {
317         struct the_nilfs *nilfs = sb->s_fs_info;
318         struct nilfs_sc_info *sci = nilfs->ns_writer;
319
320         if (!sci || !sci->sc_flush_request)
321                 return;
322
323         set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
324         up_read(&nilfs->ns_segctor_sem);
325
326         down_write(&nilfs->ns_segctor_sem);
327         if (sci->sc_flush_request &&
328             test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
329                 struct nilfs_transaction_info *ti = current->journal_info;
330
331                 ti->ti_flags |= NILFS_TI_WRITER;
332                 nilfs_segctor_do_immediate_flush(sci);
333                 ti->ti_flags &= ~NILFS_TI_WRITER;
334         }
335         downgrade_write(&nilfs->ns_segctor_sem);
336 }
337
338 static void nilfs_transaction_lock(struct super_block *sb,
339                                    struct nilfs_transaction_info *ti,
340                                    int gcflag)
341 {
342         struct nilfs_transaction_info *cur_ti = current->journal_info;
343         struct the_nilfs *nilfs = sb->s_fs_info;
344         struct nilfs_sc_info *sci = nilfs->ns_writer;
345
346         WARN_ON(cur_ti);
347         ti->ti_flags = NILFS_TI_WRITER;
348         ti->ti_count = 0;
349         ti->ti_save = cur_ti;
350         ti->ti_magic = NILFS_TI_MAGIC;
351         current->journal_info = ti;
352
353         for (;;) {
354                 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
355                             ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK);
356
357                 down_write(&nilfs->ns_segctor_sem);
358                 if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
359                         break;
360
361                 nilfs_segctor_do_immediate_flush(sci);
362
363                 up_write(&nilfs->ns_segctor_sem);
364                 cond_resched();
365         }
366         if (gcflag)
367                 ti->ti_flags |= NILFS_TI_GC;
368
369         trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
370                             ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK);
371 }
372
373 static void nilfs_transaction_unlock(struct super_block *sb)
374 {
375         struct nilfs_transaction_info *ti = current->journal_info;
376         struct the_nilfs *nilfs = sb->s_fs_info;
377
378         BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
379         BUG_ON(ti->ti_count > 0);
380
381         up_write(&nilfs->ns_segctor_sem);
382         current->journal_info = ti->ti_save;
383
384         trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
385                             ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK);
386 }
387
388 static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
389                                             struct nilfs_segsum_pointer *ssp,
390                                             unsigned int bytes)
391 {
392         struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
393         unsigned int blocksize = sci->sc_super->s_blocksize;
394         void *p;
395
396         if (unlikely(ssp->offset + bytes > blocksize)) {
397                 ssp->offset = 0;
398                 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
399                                                &segbuf->sb_segsum_buffers));
400                 ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
401         }
402         p = ssp->bh->b_data + ssp->offset;
403         ssp->offset += bytes;
404         return p;
405 }
406
407 /**
408  * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
409  * @sci: nilfs_sc_info
410  */
411 static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
412 {
413         struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
414         struct buffer_head *sumbh;
415         unsigned int sumbytes;
416         unsigned int flags = 0;
417         int err;
418
419         if (nilfs_doing_gc())
420                 flags = NILFS_SS_GC;
421         err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
422         if (unlikely(err))
423                 return err;
424
425         sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
426         sumbytes = segbuf->sb_sum.sumbytes;
427         sci->sc_finfo_ptr.bh = sumbh;  sci->sc_finfo_ptr.offset = sumbytes;
428         sci->sc_binfo_ptr.bh = sumbh;  sci->sc_binfo_ptr.offset = sumbytes;
429         sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
430         return 0;
431 }
432
433 static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
434 {
435         sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
436         if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
437                 return -E2BIG; /*
438                                 * The current segment is filled up
439                                 * (internal code)
440                                 */
441         sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
442         return nilfs_segctor_reset_segment_buffer(sci);
443 }
444
445 static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
446 {
447         struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
448         int err;
449
450         if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
451                 err = nilfs_segctor_feed_segment(sci);
452                 if (err)
453                         return err;
454                 segbuf = sci->sc_curseg;
455         }
456         err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
457         if (likely(!err))
458                 segbuf->sb_sum.flags |= NILFS_SS_SR;
459         return err;
460 }
461
462 /*
463  * Functions for making segment summary and payloads
464  */
465 static int nilfs_segctor_segsum_block_required(
466         struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
467         unsigned int binfo_size)
468 {
469         unsigned int blocksize = sci->sc_super->s_blocksize;
470         /* Size of finfo and binfo is enough small against blocksize */
471
472         return ssp->offset + binfo_size +
473                 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
474                 blocksize;
475 }
476
477 static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
478                                       struct inode *inode)
479 {
480         sci->sc_curseg->sb_sum.nfinfo++;
481         sci->sc_binfo_ptr = sci->sc_finfo_ptr;
482         nilfs_segctor_map_segsum_entry(
483                 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
484
485         if (NILFS_I(inode)->i_root &&
486             !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
487                 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
488         /* skip finfo */
489 }
490
491 static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
492                                     struct inode *inode)
493 {
494         struct nilfs_finfo *finfo;
495         struct nilfs_inode_info *ii;
496         struct nilfs_segment_buffer *segbuf;
497         __u64 cno;
498
499         if (sci->sc_blk_cnt == 0)
500                 return;
501
502         ii = NILFS_I(inode);
503
504         if (test_bit(NILFS_I_GCINODE, &ii->i_state))
505                 cno = ii->i_cno;
506         else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
507                 cno = 0;
508         else
509                 cno = sci->sc_cno;
510
511         finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
512                                                  sizeof(*finfo));
513         finfo->fi_ino = cpu_to_le64(inode->i_ino);
514         finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
515         finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
516         finfo->fi_cno = cpu_to_le64(cno);
517
518         segbuf = sci->sc_curseg;
519         segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
520                 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
521         sci->sc_finfo_ptr = sci->sc_binfo_ptr;
522         sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
523 }
524
525 static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
526                                         struct buffer_head *bh,
527                                         struct inode *inode,
528                                         unsigned int binfo_size)
529 {
530         struct nilfs_segment_buffer *segbuf;
531         int required, err = 0;
532
533  retry:
534         segbuf = sci->sc_curseg;
535         required = nilfs_segctor_segsum_block_required(
536                 sci, &sci->sc_binfo_ptr, binfo_size);
537         if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
538                 nilfs_segctor_end_finfo(sci, inode);
539                 err = nilfs_segctor_feed_segment(sci);
540                 if (err)
541                         return err;
542                 goto retry;
543         }
544         if (unlikely(required)) {
545                 err = nilfs_segbuf_extend_segsum(segbuf);
546                 if (unlikely(err))
547                         goto failed;
548         }
549         if (sci->sc_blk_cnt == 0)
550                 nilfs_segctor_begin_finfo(sci, inode);
551
552         nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
553         /* Substitution to vblocknr is delayed until update_blocknr() */
554         nilfs_segbuf_add_file_buffer(segbuf, bh);
555         sci->sc_blk_cnt++;
556  failed:
557         return err;
558 }
559
560 /*
561  * Callback functions that enumerate, mark, and collect dirty blocks
562  */
563 static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
564                                    struct buffer_head *bh, struct inode *inode)
565 {
566         int err;
567
568         err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
569         if (err < 0)
570                 return err;
571
572         err = nilfs_segctor_add_file_block(sci, bh, inode,
573                                            sizeof(struct nilfs_binfo_v));
574         if (!err)
575                 sci->sc_datablk_cnt++;
576         return err;
577 }
578
579 static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
580                                    struct buffer_head *bh,
581                                    struct inode *inode)
582 {
583         return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
584 }
585
586 static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
587                                    struct buffer_head *bh,
588                                    struct inode *inode)
589 {
590         WARN_ON(!buffer_dirty(bh));
591         return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
592 }
593
594 static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
595                                         struct nilfs_segsum_pointer *ssp,
596                                         union nilfs_binfo *binfo)
597 {
598         struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
599                 sci, ssp, sizeof(*binfo_v));
600         *binfo_v = binfo->bi_v;
601 }
602
603 static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
604                                         struct nilfs_segsum_pointer *ssp,
605                                         union nilfs_binfo *binfo)
606 {
607         __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
608                 sci, ssp, sizeof(*vblocknr));
609         *vblocknr = binfo->bi_v.bi_vblocknr;
610 }
611
612 static const struct nilfs_sc_operations nilfs_sc_file_ops = {
613         .collect_data = nilfs_collect_file_data,
614         .collect_node = nilfs_collect_file_node,
615         .collect_bmap = nilfs_collect_file_bmap,
616         .write_data_binfo = nilfs_write_file_data_binfo,
617         .write_node_binfo = nilfs_write_file_node_binfo,
618 };
619
620 static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
621                                   struct buffer_head *bh, struct inode *inode)
622 {
623         int err;
624
625         err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
626         if (err < 0)
627                 return err;
628
629         err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
630         if (!err)
631                 sci->sc_datablk_cnt++;
632         return err;
633 }
634
635 static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
636                                   struct buffer_head *bh, struct inode *inode)
637 {
638         WARN_ON(!buffer_dirty(bh));
639         return nilfs_segctor_add_file_block(sci, bh, inode,
640                                             sizeof(struct nilfs_binfo_dat));
641 }
642
643 static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
644                                        struct nilfs_segsum_pointer *ssp,
645                                        union nilfs_binfo *binfo)
646 {
647         __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
648                                                           sizeof(*blkoff));
649         *blkoff = binfo->bi_dat.bi_blkoff;
650 }
651
652 static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
653                                        struct nilfs_segsum_pointer *ssp,
654                                        union nilfs_binfo *binfo)
655 {
656         struct nilfs_binfo_dat *binfo_dat =
657                 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
658         *binfo_dat = binfo->bi_dat;
659 }
660
661 static const struct nilfs_sc_operations nilfs_sc_dat_ops = {
662         .collect_data = nilfs_collect_dat_data,
663         .collect_node = nilfs_collect_file_node,
664         .collect_bmap = nilfs_collect_dat_bmap,
665         .write_data_binfo = nilfs_write_dat_data_binfo,
666         .write_node_binfo = nilfs_write_dat_node_binfo,
667 };
668
669 static const struct nilfs_sc_operations nilfs_sc_dsync_ops = {
670         .collect_data = nilfs_collect_file_data,
671         .collect_node = NULL,
672         .collect_bmap = NULL,
673         .write_data_binfo = nilfs_write_file_data_binfo,
674         .write_node_binfo = NULL,
675 };
676
677 static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
678                                               struct list_head *listp,
679                                               size_t nlimit,
680                                               loff_t start, loff_t end)
681 {
682         struct address_space *mapping = inode->i_mapping;
683         struct pagevec pvec;
684         pgoff_t index = 0, last = ULONG_MAX;
685         size_t ndirties = 0;
686         int i;
687
688         if (unlikely(start != 0 || end != LLONG_MAX)) {
689                 /*
690                  * A valid range is given for sync-ing data pages. The
691                  * range is rounded to per-page; extra dirty buffers
692                  * may be included if blocksize < pagesize.
693                  */
694                 index = start >> PAGE_SHIFT;
695                 last = end >> PAGE_SHIFT;
696         }
697         pagevec_init(&pvec);
698  repeat:
699         if (unlikely(index > last) ||
700             !pagevec_lookup_range_tag(&pvec, mapping, &index, last,
701                                 PAGECACHE_TAG_DIRTY))
702                 return ndirties;
703
704         for (i = 0; i < pagevec_count(&pvec); i++) {
705                 struct buffer_head *bh, *head;
706                 struct page *page = pvec.pages[i];
707
708                 lock_page(page);
709                 if (!page_has_buffers(page))
710                         create_empty_buffers(page, i_blocksize(inode), 0);
711                 unlock_page(page);
712
713                 bh = head = page_buffers(page);
714                 do {
715                         if (!buffer_dirty(bh) || buffer_async_write(bh))
716                                 continue;
717                         get_bh(bh);
718                         list_add_tail(&bh->b_assoc_buffers, listp);
719                         ndirties++;
720                         if (unlikely(ndirties >= nlimit)) {
721                                 pagevec_release(&pvec);
722                                 cond_resched();
723                                 return ndirties;
724                         }
725                 } while (bh = bh->b_this_page, bh != head);
726         }
727         pagevec_release(&pvec);
728         cond_resched();
729         goto repeat;
730 }
731
732 static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
733                                             struct list_head *listp)
734 {
735         struct nilfs_inode_info *ii = NILFS_I(inode);
736         struct inode *btnc_inode = ii->i_assoc_inode;
737         struct pagevec pvec;
738         struct buffer_head *bh, *head;
739         unsigned int i;
740         pgoff_t index = 0;
741
742         if (!btnc_inode)
743                 return;
744
745         pagevec_init(&pvec);
746
747         while (pagevec_lookup_tag(&pvec, btnc_inode->i_mapping, &index,
748                                         PAGECACHE_TAG_DIRTY)) {
749                 for (i = 0; i < pagevec_count(&pvec); i++) {
750                         bh = head = page_buffers(pvec.pages[i]);
751                         do {
752                                 if (buffer_dirty(bh) &&
753                                                 !buffer_async_write(bh)) {
754                                         get_bh(bh);
755                                         list_add_tail(&bh->b_assoc_buffers,
756                                                       listp);
757                                 }
758                                 bh = bh->b_this_page;
759                         } while (bh != head);
760                 }
761                 pagevec_release(&pvec);
762                 cond_resched();
763         }
764 }
765
766 static void nilfs_dispose_list(struct the_nilfs *nilfs,
767                                struct list_head *head, int force)
768 {
769         struct nilfs_inode_info *ii, *n;
770         struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
771         unsigned int nv = 0;
772
773         while (!list_empty(head)) {
774                 spin_lock(&nilfs->ns_inode_lock);
775                 list_for_each_entry_safe(ii, n, head, i_dirty) {
776                         list_del_init(&ii->i_dirty);
777                         if (force) {
778                                 if (unlikely(ii->i_bh)) {
779                                         brelse(ii->i_bh);
780                                         ii->i_bh = NULL;
781                                 }
782                         } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
783                                 set_bit(NILFS_I_QUEUED, &ii->i_state);
784                                 list_add_tail(&ii->i_dirty,
785                                               &nilfs->ns_dirty_files);
786                                 continue;
787                         }
788                         ivec[nv++] = ii;
789                         if (nv == SC_N_INODEVEC)
790                                 break;
791                 }
792                 spin_unlock(&nilfs->ns_inode_lock);
793
794                 for (pii = ivec; nv > 0; pii++, nv--)
795                         iput(&(*pii)->vfs_inode);
796         }
797 }
798
799 static void nilfs_iput_work_func(struct work_struct *work)
800 {
801         struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
802                                                  sc_iput_work);
803         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
804
805         nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
806 }
807
808 static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
809                                      struct nilfs_root *root)
810 {
811         int ret = 0;
812
813         if (nilfs_mdt_fetch_dirty(root->ifile))
814                 ret++;
815         if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
816                 ret++;
817         if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
818                 ret++;
819         if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
820                 ret++;
821         return ret;
822 }
823
824 static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
825 {
826         return list_empty(&sci->sc_dirty_files) &&
827                 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
828                 sci->sc_nfreesegs == 0 &&
829                 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
830 }
831
832 static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
833 {
834         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
835         int ret = 0;
836
837         if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
838                 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
839
840         spin_lock(&nilfs->ns_inode_lock);
841         if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
842                 ret++;
843
844         spin_unlock(&nilfs->ns_inode_lock);
845         return ret;
846 }
847
848 static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
849 {
850         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
851
852         nilfs_mdt_clear_dirty(sci->sc_root->ifile);
853         nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
854         nilfs_mdt_clear_dirty(nilfs->ns_sufile);
855         nilfs_mdt_clear_dirty(nilfs->ns_dat);
856 }
857
858 static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
859 {
860         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
861         struct buffer_head *bh_cp;
862         struct nilfs_checkpoint *raw_cp;
863         int err;
864
865         /* XXX: this interface will be changed */
866         err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
867                                           &raw_cp, &bh_cp);
868         if (likely(!err)) {
869                 /*
870                  * The following code is duplicated with cpfile.  But, it is
871                  * needed to collect the checkpoint even if it was not newly
872                  * created.
873                  */
874                 mark_buffer_dirty(bh_cp);
875                 nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
876                 nilfs_cpfile_put_checkpoint(
877                         nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
878         } else if (err == -EINVAL || err == -ENOENT) {
879                 nilfs_error(sci->sc_super,
880                             "checkpoint creation failed due to metadata corruption.");
881                 err = -EIO;
882         }
883         return err;
884 }
885
886 static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
887 {
888         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
889         struct buffer_head *bh_cp;
890         struct nilfs_checkpoint *raw_cp;
891         int err;
892
893         err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
894                                           &raw_cp, &bh_cp);
895         if (unlikely(err)) {
896                 if (err == -EINVAL || err == -ENOENT) {
897                         nilfs_error(sci->sc_super,
898                                     "checkpoint finalization failed due to metadata corruption.");
899                         err = -EIO;
900                 }
901                 goto failed_ibh;
902         }
903         raw_cp->cp_snapshot_list.ssl_next = 0;
904         raw_cp->cp_snapshot_list.ssl_prev = 0;
905         raw_cp->cp_inodes_count =
906                 cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count));
907         raw_cp->cp_blocks_count =
908                 cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count));
909         raw_cp->cp_nblk_inc =
910                 cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
911         raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
912         raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
913
914         if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
915                 nilfs_checkpoint_clear_minor(raw_cp);
916         else
917                 nilfs_checkpoint_set_minor(raw_cp);
918
919         nilfs_write_inode_common(sci->sc_root->ifile,
920                                  &raw_cp->cp_ifile_inode, 1);
921         nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
922         return 0;
923
924  failed_ibh:
925         return err;
926 }
927
928 static void nilfs_fill_in_file_bmap(struct inode *ifile,
929                                     struct nilfs_inode_info *ii)
930
931 {
932         struct buffer_head *ibh;
933         struct nilfs_inode *raw_inode;
934
935         if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
936                 ibh = ii->i_bh;
937                 BUG_ON(!ibh);
938                 raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
939                                                   ibh);
940                 nilfs_bmap_write(ii->i_bmap, raw_inode);
941                 nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
942         }
943 }
944
945 static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
946 {
947         struct nilfs_inode_info *ii;
948
949         list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
950                 nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
951                 set_bit(NILFS_I_COLLECTED, &ii->i_state);
952         }
953 }
954
955 static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
956                                              struct the_nilfs *nilfs)
957 {
958         struct buffer_head *bh_sr;
959         struct nilfs_super_root *raw_sr;
960         unsigned int isz, srsz;
961
962         bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
963         raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
964         isz = nilfs->ns_inode_size;
965         srsz = NILFS_SR_BYTES(isz);
966
967         raw_sr->sr_bytes = cpu_to_le16(srsz);
968         raw_sr->sr_nongc_ctime
969                 = cpu_to_le64(nilfs_doing_gc() ?
970                               nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
971         raw_sr->sr_flags = 0;
972
973         nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
974                                  NILFS_SR_DAT_OFFSET(isz), 1);
975         nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
976                                  NILFS_SR_CPFILE_OFFSET(isz), 1);
977         nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
978                                  NILFS_SR_SUFILE_OFFSET(isz), 1);
979         memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
980 }
981
982 static void nilfs_redirty_inodes(struct list_head *head)
983 {
984         struct nilfs_inode_info *ii;
985
986         list_for_each_entry(ii, head, i_dirty) {
987                 if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
988                         clear_bit(NILFS_I_COLLECTED, &ii->i_state);
989         }
990 }
991
992 static void nilfs_drop_collected_inodes(struct list_head *head)
993 {
994         struct nilfs_inode_info *ii;
995
996         list_for_each_entry(ii, head, i_dirty) {
997                 if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
998                         continue;
999
1000                 clear_bit(NILFS_I_INODE_SYNC, &ii->i_state);
1001                 set_bit(NILFS_I_UPDATED, &ii->i_state);
1002         }
1003 }
1004
1005 static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
1006                                        struct inode *inode,
1007                                        struct list_head *listp,
1008                                        int (*collect)(struct nilfs_sc_info *,
1009                                                       struct buffer_head *,
1010                                                       struct inode *))
1011 {
1012         struct buffer_head *bh, *n;
1013         int err = 0;
1014
1015         if (collect) {
1016                 list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
1017                         list_del_init(&bh->b_assoc_buffers);
1018                         err = collect(sci, bh, inode);
1019                         brelse(bh);
1020                         if (unlikely(err))
1021                                 goto dispose_buffers;
1022                 }
1023                 return 0;
1024         }
1025
1026  dispose_buffers:
1027         while (!list_empty(listp)) {
1028                 bh = list_first_entry(listp, struct buffer_head,
1029                                       b_assoc_buffers);
1030                 list_del_init(&bh->b_assoc_buffers);
1031                 brelse(bh);
1032         }
1033         return err;
1034 }
1035
1036 static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
1037 {
1038         /* Remaining number of blocks within segment buffer */
1039         return sci->sc_segbuf_nblocks -
1040                 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
1041 }
1042
1043 static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
1044                                    struct inode *inode,
1045                                    const struct nilfs_sc_operations *sc_ops)
1046 {
1047         LIST_HEAD(data_buffers);
1048         LIST_HEAD(node_buffers);
1049         int err;
1050
1051         if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1052                 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1053
1054                 n = nilfs_lookup_dirty_data_buffers(
1055                         inode, &data_buffers, rest + 1, 0, LLONG_MAX);
1056                 if (n > rest) {
1057                         err = nilfs_segctor_apply_buffers(
1058                                 sci, inode, &data_buffers,
1059                                 sc_ops->collect_data);
1060                         BUG_ON(!err); /* always receive -E2BIG or true error */
1061                         goto break_or_fail;
1062                 }
1063         }
1064         nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
1065
1066         if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1067                 err = nilfs_segctor_apply_buffers(
1068                         sci, inode, &data_buffers, sc_ops->collect_data);
1069                 if (unlikely(err)) {
1070                         /* dispose node list */
1071                         nilfs_segctor_apply_buffers(
1072                                 sci, inode, &node_buffers, NULL);
1073                         goto break_or_fail;
1074                 }
1075                 sci->sc_stage.flags |= NILFS_CF_NODE;
1076         }
1077         /* Collect node */
1078         err = nilfs_segctor_apply_buffers(
1079                 sci, inode, &node_buffers, sc_ops->collect_node);
1080         if (unlikely(err))
1081                 goto break_or_fail;
1082
1083         nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1084         err = nilfs_segctor_apply_buffers(
1085                 sci, inode, &node_buffers, sc_ops->collect_bmap);
1086         if (unlikely(err))
1087                 goto break_or_fail;
1088
1089         nilfs_segctor_end_finfo(sci, inode);
1090         sci->sc_stage.flags &= ~NILFS_CF_NODE;
1091
1092  break_or_fail:
1093         return err;
1094 }
1095
1096 static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1097                                          struct inode *inode)
1098 {
1099         LIST_HEAD(data_buffers);
1100         size_t n, rest = nilfs_segctor_buffer_rest(sci);
1101         int err;
1102
1103         n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1104                                             sci->sc_dsync_start,
1105                                             sci->sc_dsync_end);
1106
1107         err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1108                                           nilfs_collect_file_data);
1109         if (!err) {
1110                 nilfs_segctor_end_finfo(sci, inode);
1111                 BUG_ON(n > rest);
1112                 /* always receive -E2BIG or true error if n > rest */
1113         }
1114         return err;
1115 }
1116
1117 static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1118 {
1119         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1120         struct list_head *head;
1121         struct nilfs_inode_info *ii;
1122         size_t ndone;
1123         int err = 0;
1124
1125         switch (nilfs_sc_cstage_get(sci)) {
1126         case NILFS_ST_INIT:
1127                 /* Pre-processes */
1128                 sci->sc_stage.flags = 0;
1129
1130                 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1131                         sci->sc_nblk_inc = 0;
1132                         sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1133                         if (mode == SC_LSEG_DSYNC) {
1134                                 nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC);
1135                                 goto dsync_mode;
1136                         }
1137                 }
1138
1139                 sci->sc_stage.dirty_file_ptr = NULL;
1140                 sci->sc_stage.gc_inode_ptr = NULL;
1141                 if (mode == SC_FLUSH_DAT) {
1142                         nilfs_sc_cstage_set(sci, NILFS_ST_DAT);
1143                         goto dat_stage;
1144                 }
1145                 nilfs_sc_cstage_inc(sci);
1146                 fallthrough;
1147         case NILFS_ST_GC:
1148                 if (nilfs_doing_gc()) {
1149                         head = &sci->sc_gc_inodes;
1150                         ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1151                                                 head, i_dirty);
1152                         list_for_each_entry_continue(ii, head, i_dirty) {
1153                                 err = nilfs_segctor_scan_file(
1154                                         sci, &ii->vfs_inode,
1155                                         &nilfs_sc_file_ops);
1156                                 if (unlikely(err)) {
1157                                         sci->sc_stage.gc_inode_ptr = list_entry(
1158                                                 ii->i_dirty.prev,
1159                                                 struct nilfs_inode_info,
1160                                                 i_dirty);
1161                                         goto break_or_fail;
1162                                 }
1163                                 set_bit(NILFS_I_COLLECTED, &ii->i_state);
1164                         }
1165                         sci->sc_stage.gc_inode_ptr = NULL;
1166                 }
1167                 nilfs_sc_cstage_inc(sci);
1168                 fallthrough;
1169         case NILFS_ST_FILE:
1170                 head = &sci->sc_dirty_files;
1171                 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1172                                         i_dirty);
1173                 list_for_each_entry_continue(ii, head, i_dirty) {
1174                         clear_bit(NILFS_I_DIRTY, &ii->i_state);
1175
1176                         err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1177                                                       &nilfs_sc_file_ops);
1178                         if (unlikely(err)) {
1179                                 sci->sc_stage.dirty_file_ptr =
1180                                         list_entry(ii->i_dirty.prev,
1181                                                    struct nilfs_inode_info,
1182                                                    i_dirty);
1183                                 goto break_or_fail;
1184                         }
1185                         /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1186                         /* XXX: required ? */
1187                 }
1188                 sci->sc_stage.dirty_file_ptr = NULL;
1189                 if (mode == SC_FLUSH_FILE) {
1190                         nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1191                         return 0;
1192                 }
1193                 nilfs_sc_cstage_inc(sci);
1194                 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1195                 fallthrough;
1196         case NILFS_ST_IFILE:
1197                 err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
1198                                               &nilfs_sc_file_ops);
1199                 if (unlikely(err))
1200                         break;
1201                 nilfs_sc_cstage_inc(sci);
1202                 /* Creating a checkpoint */
1203                 err = nilfs_segctor_create_checkpoint(sci);
1204                 if (unlikely(err))
1205                         break;
1206                 fallthrough;
1207         case NILFS_ST_CPFILE:
1208                 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1209                                               &nilfs_sc_file_ops);
1210                 if (unlikely(err))
1211                         break;
1212                 nilfs_sc_cstage_inc(sci);
1213                 fallthrough;
1214         case NILFS_ST_SUFILE:
1215                 err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1216                                          sci->sc_nfreesegs, &ndone);
1217                 if (unlikely(err)) {
1218                         nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1219                                                   sci->sc_freesegs, ndone,
1220                                                   NULL);
1221                         break;
1222                 }
1223                 sci->sc_stage.flags |= NILFS_CF_SUFREED;
1224
1225                 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1226                                               &nilfs_sc_file_ops);
1227                 if (unlikely(err))
1228                         break;
1229                 nilfs_sc_cstage_inc(sci);
1230                 fallthrough;
1231         case NILFS_ST_DAT:
1232  dat_stage:
1233                 err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
1234                                               &nilfs_sc_dat_ops);
1235                 if (unlikely(err))
1236                         break;
1237                 if (mode == SC_FLUSH_DAT) {
1238                         nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1239                         return 0;
1240                 }
1241                 nilfs_sc_cstage_inc(sci);
1242                 fallthrough;
1243         case NILFS_ST_SR:
1244                 if (mode == SC_LSEG_SR) {
1245                         /* Appending a super root */
1246                         err = nilfs_segctor_add_super_root(sci);
1247                         if (unlikely(err))
1248                                 break;
1249                 }
1250                 /* End of a logical segment */
1251                 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1252                 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1253                 return 0;
1254         case NILFS_ST_DSYNC:
1255  dsync_mode:
1256                 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
1257                 ii = sci->sc_dsync_inode;
1258                 if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1259                         break;
1260
1261                 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1262                 if (unlikely(err))
1263                         break;
1264                 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1265                 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1266                 return 0;
1267         case NILFS_ST_DONE:
1268                 return 0;
1269         default:
1270                 BUG();
1271         }
1272
1273  break_or_fail:
1274         return err;
1275 }
1276
1277 /**
1278  * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1279  * @sci: nilfs_sc_info
1280  * @nilfs: nilfs object
1281  */
1282 static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1283                                             struct the_nilfs *nilfs)
1284 {
1285         struct nilfs_segment_buffer *segbuf, *prev;
1286         __u64 nextnum;
1287         int err, alloc = 0;
1288
1289         segbuf = nilfs_segbuf_new(sci->sc_super);
1290         if (unlikely(!segbuf))
1291                 return -ENOMEM;
1292
1293         if (list_empty(&sci->sc_write_logs)) {
1294                 nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1295                                  nilfs->ns_pseg_offset, nilfs);
1296                 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1297                         nilfs_shift_to_next_segment(nilfs);
1298                         nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1299                 }
1300
1301                 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1302                 nextnum = nilfs->ns_nextnum;
1303
1304                 if (nilfs->ns_segnum == nilfs->ns_nextnum)
1305                         /* Start from the head of a new full segment */
1306                         alloc++;
1307         } else {
1308                 /* Continue logs */
1309                 prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1310                 nilfs_segbuf_map_cont(segbuf, prev);
1311                 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1312                 nextnum = prev->sb_nextnum;
1313
1314                 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1315                         nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1316                         segbuf->sb_sum.seg_seq++;
1317                         alloc++;
1318                 }
1319         }
1320
1321         err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
1322         if (err)
1323                 goto failed;
1324
1325         if (alloc) {
1326                 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
1327                 if (err)
1328                         goto failed;
1329         }
1330         nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1331
1332         BUG_ON(!list_empty(&sci->sc_segbufs));
1333         list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1334         sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1335         return 0;
1336
1337  failed:
1338         nilfs_segbuf_free(segbuf);
1339         return err;
1340 }
1341
1342 static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1343                                          struct the_nilfs *nilfs, int nadd)
1344 {
1345         struct nilfs_segment_buffer *segbuf, *prev;
1346         struct inode *sufile = nilfs->ns_sufile;
1347         __u64 nextnextnum;
1348         LIST_HEAD(list);
1349         int err, ret, i;
1350
1351         prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1352         /*
1353          * Since the segment specified with nextnum might be allocated during
1354          * the previous construction, the buffer including its segusage may
1355          * not be dirty.  The following call ensures that the buffer is dirty
1356          * and will pin the buffer on memory until the sufile is written.
1357          */
1358         err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
1359         if (unlikely(err))
1360                 return err;
1361
1362         for (i = 0; i < nadd; i++) {
1363                 /* extend segment info */
1364                 err = -ENOMEM;
1365                 segbuf = nilfs_segbuf_new(sci->sc_super);
1366                 if (unlikely(!segbuf))
1367                         goto failed;
1368
1369                 /* map this buffer to region of segment on-disk */
1370                 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1371                 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1372
1373                 /* allocate the next next full segment */
1374                 err = nilfs_sufile_alloc(sufile, &nextnextnum);
1375                 if (unlikely(err))
1376                         goto failed_segbuf;
1377
1378                 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1379                 nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1380
1381                 list_add_tail(&segbuf->sb_list, &list);
1382                 prev = segbuf;
1383         }
1384         list_splice_tail(&list, &sci->sc_segbufs);
1385         return 0;
1386
1387  failed_segbuf:
1388         nilfs_segbuf_free(segbuf);
1389  failed:
1390         list_for_each_entry(segbuf, &list, sb_list) {
1391                 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1392                 WARN_ON(ret); /* never fails */
1393         }
1394         nilfs_destroy_logs(&list);
1395         return err;
1396 }
1397
1398 static void nilfs_free_incomplete_logs(struct list_head *logs,
1399                                        struct the_nilfs *nilfs)
1400 {
1401         struct nilfs_segment_buffer *segbuf, *prev;
1402         struct inode *sufile = nilfs->ns_sufile;
1403         int ret;
1404
1405         segbuf = NILFS_FIRST_SEGBUF(logs);
1406         if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
1407                 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1408                 WARN_ON(ret); /* never fails */
1409         }
1410         if (atomic_read(&segbuf->sb_err)) {
1411                 /* Case 1: The first segment failed */
1412                 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
1413                         /*
1414                          * Case 1a:  Partial segment appended into an existing
1415                          * segment
1416                          */
1417                         nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1418                                                 segbuf->sb_fseg_end);
1419                 else /* Case 1b:  New full segment */
1420                         set_nilfs_discontinued(nilfs);
1421         }
1422
1423         prev = segbuf;
1424         list_for_each_entry_continue(segbuf, logs, sb_list) {
1425                 if (prev->sb_nextnum != segbuf->sb_nextnum) {
1426                         ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1427                         WARN_ON(ret); /* never fails */
1428                 }
1429                 if (atomic_read(&segbuf->sb_err) &&
1430                     segbuf->sb_segnum != nilfs->ns_nextnum)
1431                         /* Case 2: extended segment (!= next) failed */
1432                         nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1433                 prev = segbuf;
1434         }
1435 }
1436
1437 static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1438                                           struct inode *sufile)
1439 {
1440         struct nilfs_segment_buffer *segbuf;
1441         unsigned long live_blocks;
1442         int ret;
1443
1444         list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1445                 live_blocks = segbuf->sb_sum.nblocks +
1446                         (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
1447                 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1448                                                      live_blocks,
1449                                                      sci->sc_seg_ctime);
1450                 WARN_ON(ret); /* always succeed because the segusage is dirty */
1451         }
1452 }
1453
1454 static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
1455 {
1456         struct nilfs_segment_buffer *segbuf;
1457         int ret;
1458
1459         segbuf = NILFS_FIRST_SEGBUF(logs);
1460         ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1461                                              segbuf->sb_pseg_start -
1462                                              segbuf->sb_fseg_start, 0);
1463         WARN_ON(ret); /* always succeed because the segusage is dirty */
1464
1465         list_for_each_entry_continue(segbuf, logs, sb_list) {
1466                 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1467                                                      0, 0);
1468                 WARN_ON(ret); /* always succeed */
1469         }
1470 }
1471
1472 static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1473                                             struct nilfs_segment_buffer *last,
1474                                             struct inode *sufile)
1475 {
1476         struct nilfs_segment_buffer *segbuf = last;
1477         int ret;
1478
1479         list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1480                 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1481                 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1482                 WARN_ON(ret);
1483         }
1484         nilfs_truncate_logs(&sci->sc_segbufs, last);
1485 }
1486
1487
1488 static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1489                                  struct the_nilfs *nilfs, int mode)
1490 {
1491         struct nilfs_cstage prev_stage = sci->sc_stage;
1492         int err, nadd = 1;
1493
1494         /* Collection retry loop */
1495         for (;;) {
1496                 sci->sc_nblk_this_inc = 0;
1497                 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1498
1499                 err = nilfs_segctor_reset_segment_buffer(sci);
1500                 if (unlikely(err))
1501                         goto failed;
1502
1503                 err = nilfs_segctor_collect_blocks(sci, mode);
1504                 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1505                 if (!err)
1506                         break;
1507
1508                 if (unlikely(err != -E2BIG))
1509                         goto failed;
1510
1511                 /* The current segment is filled up */
1512                 if (mode != SC_LSEG_SR ||
1513                     nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE)
1514                         break;
1515
1516                 nilfs_clear_logs(&sci->sc_segbufs);
1517
1518                 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1519                         err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1520                                                         sci->sc_freesegs,
1521                                                         sci->sc_nfreesegs,
1522                                                         NULL);
1523                         WARN_ON(err); /* do not happen */
1524                         sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
1525                 }
1526
1527                 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1528                 if (unlikely(err))
1529                         return err;
1530
1531                 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1532                 sci->sc_stage = prev_stage;
1533         }
1534         nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1535         return 0;
1536
1537  failed:
1538         return err;
1539 }
1540
1541 static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1542                                       struct buffer_head *new_bh)
1543 {
1544         BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1545
1546         list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1547         /* The caller must release old_bh */
1548 }
1549
1550 static int
1551 nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1552                                      struct nilfs_segment_buffer *segbuf,
1553                                      int mode)
1554 {
1555         struct inode *inode = NULL;
1556         sector_t blocknr;
1557         unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1558         unsigned long nblocks = 0, ndatablk = 0;
1559         const struct nilfs_sc_operations *sc_op = NULL;
1560         struct nilfs_segsum_pointer ssp;
1561         struct nilfs_finfo *finfo = NULL;
1562         union nilfs_binfo binfo;
1563         struct buffer_head *bh, *bh_org;
1564         ino_t ino = 0;
1565         int err = 0;
1566
1567         if (!nfinfo)
1568                 goto out;
1569
1570         blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1571         ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1572         ssp.offset = sizeof(struct nilfs_segment_summary);
1573
1574         list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1575                 if (bh == segbuf->sb_super_root)
1576                         break;
1577                 if (!finfo) {
1578                         finfo = nilfs_segctor_map_segsum_entry(
1579                                 sci, &ssp, sizeof(*finfo));
1580                         ino = le64_to_cpu(finfo->fi_ino);
1581                         nblocks = le32_to_cpu(finfo->fi_nblocks);
1582                         ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1583
1584                         inode = bh->b_page->mapping->host;
1585
1586                         if (mode == SC_LSEG_DSYNC)
1587                                 sc_op = &nilfs_sc_dsync_ops;
1588                         else if (ino == NILFS_DAT_INO)
1589                                 sc_op = &nilfs_sc_dat_ops;
1590                         else /* file blocks */
1591                                 sc_op = &nilfs_sc_file_ops;
1592                 }
1593                 bh_org = bh;
1594                 get_bh(bh_org);
1595                 err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1596                                         &binfo);
1597                 if (bh != bh_org)
1598                         nilfs_list_replace_buffer(bh_org, bh);
1599                 brelse(bh_org);
1600                 if (unlikely(err))
1601                         goto failed_bmap;
1602
1603                 if (ndatablk > 0)
1604                         sc_op->write_data_binfo(sci, &ssp, &binfo);
1605                 else
1606                         sc_op->write_node_binfo(sci, &ssp, &binfo);
1607
1608                 blocknr++;
1609                 if (--nblocks == 0) {
1610                         finfo = NULL;
1611                         if (--nfinfo == 0)
1612                                 break;
1613                 } else if (ndatablk > 0)
1614                         ndatablk--;
1615         }
1616  out:
1617         return 0;
1618
1619  failed_bmap:
1620         return err;
1621 }
1622
1623 static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1624 {
1625         struct nilfs_segment_buffer *segbuf;
1626         int err;
1627
1628         list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1629                 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1630                 if (unlikely(err))
1631                         return err;
1632                 nilfs_segbuf_fill_in_segsum(segbuf);
1633         }
1634         return 0;
1635 }
1636
1637 static void nilfs_begin_page_io(struct page *page)
1638 {
1639         if (!page || PageWriteback(page))
1640                 /*
1641                  * For split b-tree node pages, this function may be called
1642                  * twice.  We ignore the 2nd or later calls by this check.
1643                  */
1644                 return;
1645
1646         lock_page(page);
1647         clear_page_dirty_for_io(page);
1648         set_page_writeback(page);
1649         unlock_page(page);
1650 }
1651
1652 static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
1653 {
1654         struct nilfs_segment_buffer *segbuf;
1655         struct page *bd_page = NULL, *fs_page = NULL;
1656
1657         list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1658                 struct buffer_head *bh;
1659
1660                 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1661                                     b_assoc_buffers) {
1662                         if (bh->b_page != bd_page) {
1663                                 if (bd_page) {
1664                                         lock_page(bd_page);
1665                                         clear_page_dirty_for_io(bd_page);
1666                                         set_page_writeback(bd_page);
1667                                         unlock_page(bd_page);
1668                                 }
1669                                 bd_page = bh->b_page;
1670                         }
1671                 }
1672
1673                 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1674                                     b_assoc_buffers) {
1675                         set_buffer_async_write(bh);
1676                         if (bh == segbuf->sb_super_root) {
1677                                 if (bh->b_page != bd_page) {
1678                                         lock_page(bd_page);
1679                                         clear_page_dirty_for_io(bd_page);
1680                                         set_page_writeback(bd_page);
1681                                         unlock_page(bd_page);
1682                                         bd_page = bh->b_page;
1683                                 }
1684                                 break;
1685                         }
1686                         if (bh->b_page != fs_page) {
1687                                 nilfs_begin_page_io(fs_page);
1688                                 fs_page = bh->b_page;
1689                         }
1690                 }
1691         }
1692         if (bd_page) {
1693                 lock_page(bd_page);
1694                 clear_page_dirty_for_io(bd_page);
1695                 set_page_writeback(bd_page);
1696                 unlock_page(bd_page);
1697         }
1698         nilfs_begin_page_io(fs_page);
1699 }
1700
1701 static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1702                                struct the_nilfs *nilfs)
1703 {
1704         int ret;
1705
1706         ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
1707         list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1708         return ret;
1709 }
1710
1711 static void nilfs_end_page_io(struct page *page, int err)
1712 {
1713         if (!page)
1714                 return;
1715
1716         if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
1717                 /*
1718                  * For b-tree node pages, this function may be called twice
1719                  * or more because they might be split in a segment.
1720                  */
1721                 if (PageDirty(page)) {
1722                         /*
1723                          * For pages holding split b-tree node buffers, dirty
1724                          * flag on the buffers may be cleared discretely.
1725                          * In that case, the page is once redirtied for
1726                          * remaining buffers, and it must be cancelled if
1727                          * all the buffers get cleaned later.
1728                          */
1729                         lock_page(page);
1730                         if (nilfs_page_buffers_clean(page))
1731                                 __nilfs_clear_page_dirty(page);
1732                         unlock_page(page);
1733                 }
1734                 return;
1735         }
1736
1737         if (!err) {
1738                 if (!nilfs_page_buffers_clean(page))
1739                         __set_page_dirty_nobuffers(page);
1740                 ClearPageError(page);
1741         } else {
1742                 __set_page_dirty_nobuffers(page);
1743                 SetPageError(page);
1744         }
1745
1746         end_page_writeback(page);
1747 }
1748
1749 static void nilfs_abort_logs(struct list_head *logs, int err)
1750 {
1751         struct nilfs_segment_buffer *segbuf;
1752         struct page *bd_page = NULL, *fs_page = NULL;
1753         struct buffer_head *bh;
1754
1755         if (list_empty(logs))
1756                 return;
1757
1758         list_for_each_entry(segbuf, logs, sb_list) {
1759                 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1760                                     b_assoc_buffers) {
1761                         if (bh->b_page != bd_page) {
1762                                 if (bd_page)
1763                                         end_page_writeback(bd_page);
1764                                 bd_page = bh->b_page;
1765                         }
1766                 }
1767
1768                 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1769                                     b_assoc_buffers) {
1770                         clear_buffer_async_write(bh);
1771                         if (bh == segbuf->sb_super_root) {
1772                                 if (bh->b_page != bd_page) {
1773                                         end_page_writeback(bd_page);
1774                                         bd_page = bh->b_page;
1775                                 }
1776                                 break;
1777                         }
1778                         if (bh->b_page != fs_page) {
1779                                 nilfs_end_page_io(fs_page, err);
1780                                 fs_page = bh->b_page;
1781                         }
1782                 }
1783         }
1784         if (bd_page)
1785                 end_page_writeback(bd_page);
1786
1787         nilfs_end_page_io(fs_page, err);
1788 }
1789
1790 static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1791                                              struct the_nilfs *nilfs, int err)
1792 {
1793         LIST_HEAD(logs);
1794         int ret;
1795
1796         list_splice_tail_init(&sci->sc_write_logs, &logs);
1797         ret = nilfs_wait_on_logs(&logs);
1798         nilfs_abort_logs(&logs, ret ? : err);
1799
1800         list_splice_tail_init(&sci->sc_segbufs, &logs);
1801         nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1802         nilfs_free_incomplete_logs(&logs, nilfs);
1803
1804         if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1805                 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1806                                                 sci->sc_freesegs,
1807                                                 sci->sc_nfreesegs,
1808                                                 NULL);
1809                 WARN_ON(ret); /* do not happen */
1810         }
1811
1812         nilfs_destroy_logs(&logs);
1813 }
1814
1815 static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1816                                    struct nilfs_segment_buffer *segbuf)
1817 {
1818         nilfs->ns_segnum = segbuf->sb_segnum;
1819         nilfs->ns_nextnum = segbuf->sb_nextnum;
1820         nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1821                 + segbuf->sb_sum.nblocks;
1822         nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1823         nilfs->ns_ctime = segbuf->sb_sum.ctime;
1824 }
1825
1826 static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1827 {
1828         struct nilfs_segment_buffer *segbuf;
1829         struct page *bd_page = NULL, *fs_page = NULL;
1830         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1831         int update_sr = false;
1832
1833         list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
1834                 struct buffer_head *bh;
1835
1836                 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1837                                     b_assoc_buffers) {
1838                         set_buffer_uptodate(bh);
1839                         clear_buffer_dirty(bh);
1840                         if (bh->b_page != bd_page) {
1841                                 if (bd_page)
1842                                         end_page_writeback(bd_page);
1843                                 bd_page = bh->b_page;
1844                         }
1845                 }
1846                 /*
1847                  * We assume that the buffers which belong to the same page
1848                  * continue over the buffer list.
1849                  * Under this assumption, the last BHs of pages is
1850                  * identifiable by the discontinuity of bh->b_page
1851                  * (page != fs_page).
1852                  *
1853                  * For B-tree node blocks, however, this assumption is not
1854                  * guaranteed.  The cleanup code of B-tree node pages needs
1855                  * special care.
1856                  */
1857                 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1858                                     b_assoc_buffers) {
1859                         const unsigned long set_bits = BIT(BH_Uptodate);
1860                         const unsigned long clear_bits =
1861                                 (BIT(BH_Dirty) | BIT(BH_Async_Write) |
1862                                  BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
1863                                  BIT(BH_NILFS_Redirected));
1864
1865                         set_mask_bits(&bh->b_state, clear_bits, set_bits);
1866                         if (bh == segbuf->sb_super_root) {
1867                                 if (bh->b_page != bd_page) {
1868                                         end_page_writeback(bd_page);
1869                                         bd_page = bh->b_page;
1870                                 }
1871                                 update_sr = true;
1872                                 break;
1873                         }
1874                         if (bh->b_page != fs_page) {
1875                                 nilfs_end_page_io(fs_page, 0);
1876                                 fs_page = bh->b_page;
1877                         }
1878                 }
1879
1880                 if (!nilfs_segbuf_simplex(segbuf)) {
1881                         if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
1882                                 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1883                                 sci->sc_lseg_stime = jiffies;
1884                         }
1885                         if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
1886                                 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1887                 }
1888         }
1889         /*
1890          * Since pages may continue over multiple segment buffers,
1891          * end of the last page must be checked outside of the loop.
1892          */
1893         if (bd_page)
1894                 end_page_writeback(bd_page);
1895
1896         nilfs_end_page_io(fs_page, 0);
1897
1898         nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1899
1900         if (nilfs_doing_gc())
1901                 nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
1902         else
1903                 nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
1904
1905         sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1906
1907         segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1908         nilfs_set_next_segment(nilfs, segbuf);
1909
1910         if (update_sr) {
1911                 nilfs->ns_flushed_device = 0;
1912                 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
1913                                        segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
1914
1915                 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
1916                 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1917                 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1918                 nilfs_segctor_clear_metadata_dirty(sci);
1919         } else
1920                 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1921 }
1922
1923 static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1924 {
1925         int ret;
1926
1927         ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1928         if (!ret) {
1929                 nilfs_segctor_complete_write(sci);
1930                 nilfs_destroy_logs(&sci->sc_write_logs);
1931         }
1932         return ret;
1933 }
1934
1935 static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
1936                                              struct the_nilfs *nilfs)
1937 {
1938         struct nilfs_inode_info *ii, *n;
1939         struct inode *ifile = sci->sc_root->ifile;
1940
1941         spin_lock(&nilfs->ns_inode_lock);
1942  retry:
1943         list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
1944                 if (!ii->i_bh) {
1945                         struct buffer_head *ibh;
1946                         int err;
1947
1948                         spin_unlock(&nilfs->ns_inode_lock);
1949                         err = nilfs_ifile_get_inode_block(
1950                                 ifile, ii->vfs_inode.i_ino, &ibh);
1951                         if (unlikely(err)) {
1952                                 nilfs_warn(sci->sc_super,
1953                                            "log writer: error %d getting inode block (ino=%lu)",
1954                                            err, ii->vfs_inode.i_ino);
1955                                 return err;
1956                         }
1957                         spin_lock(&nilfs->ns_inode_lock);
1958                         if (likely(!ii->i_bh))
1959                                 ii->i_bh = ibh;
1960                         else
1961                                 brelse(ibh);
1962                         goto retry;
1963                 }
1964
1965                 // Always redirty the buffer to avoid race condition
1966                 mark_buffer_dirty(ii->i_bh);
1967                 nilfs_mdt_mark_dirty(ifile);
1968
1969                 clear_bit(NILFS_I_QUEUED, &ii->i_state);
1970                 set_bit(NILFS_I_BUSY, &ii->i_state);
1971                 list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
1972         }
1973         spin_unlock(&nilfs->ns_inode_lock);
1974
1975         return 0;
1976 }
1977
1978 static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
1979                                              struct the_nilfs *nilfs)
1980 {
1981         struct nilfs_inode_info *ii, *n;
1982         int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE);
1983         int defer_iput = false;
1984
1985         spin_lock(&nilfs->ns_inode_lock);
1986         list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
1987                 if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
1988                     test_bit(NILFS_I_DIRTY, &ii->i_state))
1989                         continue;
1990
1991                 clear_bit(NILFS_I_BUSY, &ii->i_state);
1992                 brelse(ii->i_bh);
1993                 ii->i_bh = NULL;
1994                 list_del_init(&ii->i_dirty);
1995                 if (!ii->vfs_inode.i_nlink || during_mount) {
1996                         /*
1997                          * Defer calling iput() to avoid deadlocks if
1998                          * i_nlink == 0 or mount is not yet finished.
1999                          */
2000                         list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
2001                         defer_iput = true;
2002                 } else {
2003                         spin_unlock(&nilfs->ns_inode_lock);
2004                         iput(&ii->vfs_inode);
2005                         spin_lock(&nilfs->ns_inode_lock);
2006                 }
2007         }
2008         spin_unlock(&nilfs->ns_inode_lock);
2009
2010         if (defer_iput)
2011                 schedule_work(&sci->sc_iput_work);
2012 }
2013
2014 /*
2015  * Main procedure of segment constructor
2016  */
2017 static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2018 {
2019         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2020         int err;
2021
2022         nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
2023         sci->sc_cno = nilfs->ns_cno;
2024
2025         err = nilfs_segctor_collect_dirty_files(sci, nilfs);
2026         if (unlikely(err))
2027                 goto out;
2028
2029         if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
2030                 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2031
2032         if (nilfs_segctor_clean(sci))
2033                 goto out;
2034
2035         do {
2036                 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
2037
2038                 err = nilfs_segctor_begin_construction(sci, nilfs);
2039                 if (unlikely(err))
2040                         goto out;
2041
2042                 /* Update time stamp */
2043                 sci->sc_seg_ctime = ktime_get_real_seconds();
2044
2045                 err = nilfs_segctor_collect(sci, nilfs, mode);
2046                 if (unlikely(err))
2047                         goto failed;
2048
2049                 /* Avoid empty segment */
2050                 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE &&
2051                     nilfs_segbuf_empty(sci->sc_curseg)) {
2052                         nilfs_segctor_abort_construction(sci, nilfs, 1);
2053                         goto out;
2054                 }
2055
2056                 err = nilfs_segctor_assign(sci, mode);
2057                 if (unlikely(err))
2058                         goto failed;
2059
2060                 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2061                         nilfs_segctor_fill_in_file_bmap(sci);
2062
2063                 if (mode == SC_LSEG_SR &&
2064                     nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) {
2065                         err = nilfs_segctor_fill_in_checkpoint(sci);
2066                         if (unlikely(err))
2067                                 goto failed_to_write;
2068
2069                         nilfs_segctor_fill_in_super_root(sci, nilfs);
2070                 }
2071                 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2072
2073                 /* Write partial segments */
2074                 nilfs_segctor_prepare_write(sci);
2075
2076                 nilfs_add_checksums_on_logs(&sci->sc_segbufs,
2077                                             nilfs->ns_crc_seed);
2078
2079                 err = nilfs_segctor_write(sci, nilfs);
2080                 if (unlikely(err))
2081                         goto failed_to_write;
2082
2083                 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
2084                     nilfs->ns_blocksize_bits != PAGE_SHIFT) {
2085                         /*
2086                          * At this point, we avoid double buffering
2087                          * for blocksize < pagesize because page dirty
2088                          * flag is turned off during write and dirty
2089                          * buffers are not properly collected for
2090                          * pages crossing over segments.
2091                          */
2092                         err = nilfs_segctor_wait(sci);
2093                         if (err)
2094                                 goto failed_to_write;
2095                 }
2096         } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE);
2097
2098  out:
2099         nilfs_segctor_drop_written_files(sci, nilfs);
2100         return err;
2101
2102  failed_to_write:
2103         if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2104                 nilfs_redirty_inodes(&sci->sc_dirty_files);
2105
2106  failed:
2107         if (nilfs_doing_gc())
2108                 nilfs_redirty_inodes(&sci->sc_gc_inodes);
2109         nilfs_segctor_abort_construction(sci, nilfs, err);
2110         goto out;
2111 }
2112
2113 /**
2114  * nilfs_segctor_start_timer - set timer of background write
2115  * @sci: nilfs_sc_info
2116  *
2117  * If the timer has already been set, it ignores the new request.
2118  * This function MUST be called within a section locking the segment
2119  * semaphore.
2120  */
2121 static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2122 {
2123         spin_lock(&sci->sc_state_lock);
2124         if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2125                 sci->sc_timer.expires = jiffies + sci->sc_interval;
2126                 add_timer(&sci->sc_timer);
2127                 sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2128         }
2129         spin_unlock(&sci->sc_state_lock);
2130 }
2131
2132 static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2133 {
2134         spin_lock(&sci->sc_state_lock);
2135         if (!(sci->sc_flush_request & BIT(bn))) {
2136                 unsigned long prev_req = sci->sc_flush_request;
2137
2138                 sci->sc_flush_request |= BIT(bn);
2139                 if (!prev_req)
2140                         wake_up(&sci->sc_wait_daemon);
2141         }
2142         spin_unlock(&sci->sc_state_lock);
2143 }
2144
2145 /**
2146  * nilfs_flush_segment - trigger a segment construction for resource control
2147  * @sb: super block
2148  * @ino: inode number of the file to be flushed out.
2149  */
2150 void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2151 {
2152         struct the_nilfs *nilfs = sb->s_fs_info;
2153         struct nilfs_sc_info *sci = nilfs->ns_writer;
2154
2155         if (!sci || nilfs_doing_construction())
2156                 return;
2157         nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2158                                         /* assign bit 0 to data files */
2159 }
2160
2161 struct nilfs_segctor_wait_request {
2162         wait_queue_entry_t      wq;
2163         __u32           seq;
2164         int             err;
2165         atomic_t        done;
2166 };
2167
2168 static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2169 {
2170         struct nilfs_segctor_wait_request wait_req;
2171         int err = 0;
2172
2173         spin_lock(&sci->sc_state_lock);
2174         init_wait(&wait_req.wq);
2175         wait_req.err = 0;
2176         atomic_set(&wait_req.done, 0);
2177         wait_req.seq = ++sci->sc_seq_request;
2178         spin_unlock(&sci->sc_state_lock);
2179
2180         init_waitqueue_entry(&wait_req.wq, current);
2181         add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2182         set_current_state(TASK_INTERRUPTIBLE);
2183         wake_up(&sci->sc_wait_daemon);
2184
2185         for (;;) {
2186                 if (atomic_read(&wait_req.done)) {
2187                         err = wait_req.err;
2188                         break;
2189                 }
2190                 if (!signal_pending(current)) {
2191                         schedule();
2192                         continue;
2193                 }
2194                 err = -ERESTARTSYS;
2195                 break;
2196         }
2197         finish_wait(&sci->sc_wait_request, &wait_req.wq);
2198         return err;
2199 }
2200
2201 static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
2202 {
2203         struct nilfs_segctor_wait_request *wrq, *n;
2204         unsigned long flags;
2205
2206         spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2207         list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
2208                 if (!atomic_read(&wrq->done) &&
2209                     nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
2210                         wrq->err = err;
2211                         atomic_set(&wrq->done, 1);
2212                 }
2213                 if (atomic_read(&wrq->done)) {
2214                         wrq->wq.func(&wrq->wq,
2215                                      TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2216                                      0, NULL);
2217                 }
2218         }
2219         spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2220 }
2221
2222 /**
2223  * nilfs_construct_segment - construct a logical segment
2224  * @sb: super block
2225  *
2226  * Return Value: On success, 0 is returned. On errors, one of the following
2227  * negative error code is returned.
2228  *
2229  * %-EROFS - Read only filesystem.
2230  *
2231  * %-EIO - I/O error
2232  *
2233  * %-ENOSPC - No space left on device (only in a panic state).
2234  *
2235  * %-ERESTARTSYS - Interrupted.
2236  *
2237  * %-ENOMEM - Insufficient memory available.
2238  */
2239 int nilfs_construct_segment(struct super_block *sb)
2240 {
2241         struct the_nilfs *nilfs = sb->s_fs_info;
2242         struct nilfs_sc_info *sci = nilfs->ns_writer;
2243         struct nilfs_transaction_info *ti;
2244
2245         if (!sci)
2246                 return -EROFS;
2247
2248         /* A call inside transactions causes a deadlock. */
2249         BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2250
2251         return nilfs_segctor_sync(sci);
2252 }
2253
2254 /**
2255  * nilfs_construct_dsync_segment - construct a data-only logical segment
2256  * @sb: super block
2257  * @inode: inode whose data blocks should be written out
2258  * @start: start byte offset
2259  * @end: end byte offset (inclusive)
2260  *
2261  * Return Value: On success, 0 is returned. On errors, one of the following
2262  * negative error code is returned.
2263  *
2264  * %-EROFS - Read only filesystem.
2265  *
2266  * %-EIO - I/O error
2267  *
2268  * %-ENOSPC - No space left on device (only in a panic state).
2269  *
2270  * %-ERESTARTSYS - Interrupted.
2271  *
2272  * %-ENOMEM - Insufficient memory available.
2273  */
2274 int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2275                                   loff_t start, loff_t end)
2276 {
2277         struct the_nilfs *nilfs = sb->s_fs_info;
2278         struct nilfs_sc_info *sci = nilfs->ns_writer;
2279         struct nilfs_inode_info *ii;
2280         struct nilfs_transaction_info ti;
2281         int err = 0;
2282
2283         if (!sci)
2284                 return -EROFS;
2285
2286         nilfs_transaction_lock(sb, &ti, 0);
2287
2288         ii = NILFS_I(inode);
2289         if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) ||
2290             nilfs_test_opt(nilfs, STRICT_ORDER) ||
2291             test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2292             nilfs_discontinued(nilfs)) {
2293                 nilfs_transaction_unlock(sb);
2294                 err = nilfs_segctor_sync(sci);
2295                 return err;
2296         }
2297
2298         spin_lock(&nilfs->ns_inode_lock);
2299         if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2300             !test_bit(NILFS_I_BUSY, &ii->i_state)) {
2301                 spin_unlock(&nilfs->ns_inode_lock);
2302                 nilfs_transaction_unlock(sb);
2303                 return 0;
2304         }
2305         spin_unlock(&nilfs->ns_inode_lock);
2306         sci->sc_dsync_inode = ii;
2307         sci->sc_dsync_start = start;
2308         sci->sc_dsync_end = end;
2309
2310         err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2311         if (!err)
2312                 nilfs->ns_flushed_device = 0;
2313
2314         nilfs_transaction_unlock(sb);
2315         return err;
2316 }
2317
2318 #define FLUSH_FILE_BIT  (0x1) /* data file only */
2319 #define FLUSH_DAT_BIT   BIT(NILFS_DAT_INO) /* DAT only */
2320
2321 /**
2322  * nilfs_segctor_accept - record accepted sequence count of log-write requests
2323  * @sci: segment constructor object
2324  */
2325 static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
2326 {
2327         spin_lock(&sci->sc_state_lock);
2328         sci->sc_seq_accepted = sci->sc_seq_request;
2329         spin_unlock(&sci->sc_state_lock);
2330         del_timer_sync(&sci->sc_timer);
2331 }
2332
2333 /**
2334  * nilfs_segctor_notify - notify the result of request to caller threads
2335  * @sci: segment constructor object
2336  * @mode: mode of log forming
2337  * @err: error code to be notified
2338  */
2339 static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
2340 {
2341         /* Clear requests (even when the construction failed) */
2342         spin_lock(&sci->sc_state_lock);
2343
2344         if (mode == SC_LSEG_SR) {
2345                 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2346                 sci->sc_seq_done = sci->sc_seq_accepted;
2347                 nilfs_segctor_wakeup(sci, err);
2348                 sci->sc_flush_request = 0;
2349         } else {
2350                 if (mode == SC_FLUSH_FILE)
2351                         sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2352                 else if (mode == SC_FLUSH_DAT)
2353                         sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2354
2355                 /* re-enable timer if checkpoint creation was not done */
2356                 if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2357                     time_before(jiffies, sci->sc_timer.expires))
2358                         add_timer(&sci->sc_timer);
2359         }
2360         spin_unlock(&sci->sc_state_lock);
2361 }
2362
2363 /**
2364  * nilfs_segctor_construct - form logs and write them to disk
2365  * @sci: segment constructor object
2366  * @mode: mode of log forming
2367  */
2368 static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
2369 {
2370         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2371         struct nilfs_super_block **sbp;
2372         int err = 0;
2373
2374         nilfs_segctor_accept(sci);
2375
2376         if (nilfs_discontinued(nilfs))
2377                 mode = SC_LSEG_SR;
2378         if (!nilfs_segctor_confirm(sci))
2379                 err = nilfs_segctor_do_construct(sci, mode);
2380
2381         if (likely(!err)) {
2382                 if (mode != SC_FLUSH_DAT)
2383                         atomic_set(&nilfs->ns_ndirtyblks, 0);
2384                 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2385                     nilfs_discontinued(nilfs)) {
2386                         down_write(&nilfs->ns_sem);
2387                         err = -EIO;
2388                         sbp = nilfs_prepare_super(sci->sc_super,
2389                                                   nilfs_sb_will_flip(nilfs));
2390                         if (likely(sbp)) {
2391                                 nilfs_set_log_cursor(sbp[0], nilfs);
2392                                 err = nilfs_commit_super(sci->sc_super,
2393                                                          NILFS_SB_COMMIT);
2394                         }
2395                         up_write(&nilfs->ns_sem);
2396                 }
2397         }
2398
2399         nilfs_segctor_notify(sci, mode, err);
2400         return err;
2401 }
2402
2403 static void nilfs_construction_timeout(struct timer_list *t)
2404 {
2405         struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer);
2406
2407         wake_up_process(sci->sc_timer_task);
2408 }
2409
2410 static void
2411 nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2412 {
2413         struct nilfs_inode_info *ii, *n;
2414
2415         list_for_each_entry_safe(ii, n, head, i_dirty) {
2416                 if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2417                         continue;
2418                 list_del_init(&ii->i_dirty);
2419                 truncate_inode_pages(&ii->vfs_inode.i_data, 0);
2420                 nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
2421                 iput(&ii->vfs_inode);
2422         }
2423 }
2424
2425 int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2426                          void **kbufs)
2427 {
2428         struct the_nilfs *nilfs = sb->s_fs_info;
2429         struct nilfs_sc_info *sci = nilfs->ns_writer;
2430         struct nilfs_transaction_info ti;
2431         int err;
2432
2433         if (unlikely(!sci))
2434                 return -EROFS;
2435
2436         nilfs_transaction_lock(sb, &ti, 1);
2437
2438         err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
2439         if (unlikely(err))
2440                 goto out_unlock;
2441
2442         err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
2443         if (unlikely(err)) {
2444                 nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
2445                 goto out_unlock;
2446         }
2447
2448         sci->sc_freesegs = kbufs[4];
2449         sci->sc_nfreesegs = argv[4].v_nmembs;
2450         list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
2451
2452         for (;;) {
2453                 err = nilfs_segctor_construct(sci, SC_LSEG_SR);
2454                 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2455
2456                 if (likely(!err))
2457                         break;
2458
2459                 nilfs_warn(sb, "error %d cleaning segments", err);
2460                 set_current_state(TASK_INTERRUPTIBLE);
2461                 schedule_timeout(sci->sc_interval);
2462         }
2463         if (nilfs_test_opt(nilfs, DISCARD)) {
2464                 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2465                                                  sci->sc_nfreesegs);
2466                 if (ret) {
2467                         nilfs_warn(sb,
2468                                    "error %d on discard request, turning discards off for the device",
2469                                    ret);
2470                         nilfs_clear_opt(nilfs, DISCARD);
2471                 }
2472         }
2473
2474  out_unlock:
2475         sci->sc_freesegs = NULL;
2476         sci->sc_nfreesegs = 0;
2477         nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
2478         nilfs_transaction_unlock(sb);
2479         return err;
2480 }
2481
2482 static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2483 {
2484         struct nilfs_transaction_info ti;
2485
2486         nilfs_transaction_lock(sci->sc_super, &ti, 0);
2487         nilfs_segctor_construct(sci, mode);
2488
2489         /*
2490          * Unclosed segment should be retried.  We do this using sc_timer.
2491          * Timeout of sc_timer will invoke complete construction which leads
2492          * to close the current logical segment.
2493          */
2494         if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2495                 nilfs_segctor_start_timer(sci);
2496
2497         nilfs_transaction_unlock(sci->sc_super);
2498 }
2499
2500 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2501 {
2502         int mode = 0;
2503
2504         spin_lock(&sci->sc_state_lock);
2505         mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2506                 SC_FLUSH_DAT : SC_FLUSH_FILE;
2507         spin_unlock(&sci->sc_state_lock);
2508
2509         if (mode) {
2510                 nilfs_segctor_do_construct(sci, mode);
2511
2512                 spin_lock(&sci->sc_state_lock);
2513                 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2514                         ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2515                 spin_unlock(&sci->sc_state_lock);
2516         }
2517         clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2518 }
2519
2520 static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2521 {
2522         if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2523             time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2524                 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2525                         return SC_FLUSH_FILE;
2526                 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2527                         return SC_FLUSH_DAT;
2528         }
2529         return SC_LSEG_SR;
2530 }
2531
2532 /**
2533  * nilfs_segctor_thread - main loop of the segment constructor thread.
2534  * @arg: pointer to a struct nilfs_sc_info.
2535  *
2536  * nilfs_segctor_thread() initializes a timer and serves as a daemon
2537  * to execute segment constructions.
2538  */
2539 static int nilfs_segctor_thread(void *arg)
2540 {
2541         struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2542         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2543         int timeout = 0;
2544
2545         sci->sc_timer_task = current;
2546
2547         /* start sync. */
2548         sci->sc_task = current;
2549         wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
2550         nilfs_info(sci->sc_super,
2551                    "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
2552                    sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
2553
2554         spin_lock(&sci->sc_state_lock);
2555  loop:
2556         for (;;) {
2557                 int mode;
2558
2559                 if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2560                         goto end_thread;
2561
2562                 if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2563                         mode = SC_LSEG_SR;
2564                 else if (sci->sc_flush_request)
2565                         mode = nilfs_segctor_flush_mode(sci);
2566                 else
2567                         break;
2568
2569                 spin_unlock(&sci->sc_state_lock);
2570                 nilfs_segctor_thread_construct(sci, mode);
2571                 spin_lock(&sci->sc_state_lock);
2572                 timeout = 0;
2573         }
2574
2575
2576         if (freezing(current)) {
2577                 spin_unlock(&sci->sc_state_lock);
2578                 try_to_freeze();
2579                 spin_lock(&sci->sc_state_lock);
2580         } else {
2581                 DEFINE_WAIT(wait);
2582                 int should_sleep = 1;
2583
2584                 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2585                                 TASK_INTERRUPTIBLE);
2586
2587                 if (sci->sc_seq_request != sci->sc_seq_done)
2588                         should_sleep = 0;
2589                 else if (sci->sc_flush_request)
2590                         should_sleep = 0;
2591                 else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2592                         should_sleep = time_before(jiffies,
2593                                         sci->sc_timer.expires);
2594
2595                 if (should_sleep) {
2596                         spin_unlock(&sci->sc_state_lock);
2597                         schedule();
2598                         spin_lock(&sci->sc_state_lock);
2599                 }
2600                 finish_wait(&sci->sc_wait_daemon, &wait);
2601                 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2602                            time_after_eq(jiffies, sci->sc_timer.expires));
2603
2604                 if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
2605                         set_nilfs_discontinued(nilfs);
2606         }
2607         goto loop;
2608
2609  end_thread:
2610         spin_unlock(&sci->sc_state_lock);
2611
2612         /* end sync. */
2613         sci->sc_task = NULL;
2614         wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
2615         return 0;
2616 }
2617
2618 static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2619 {
2620         struct task_struct *t;
2621
2622         t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2623         if (IS_ERR(t)) {
2624                 int err = PTR_ERR(t);
2625
2626                 nilfs_err(sci->sc_super, "error %d creating segctord thread",
2627                           err);
2628                 return err;
2629         }
2630         wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2631         return 0;
2632 }
2633
2634 static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
2635         __acquires(&sci->sc_state_lock)
2636         __releases(&sci->sc_state_lock)
2637 {
2638         sci->sc_state |= NILFS_SEGCTOR_QUIT;
2639
2640         while (sci->sc_task) {
2641                 wake_up(&sci->sc_wait_daemon);
2642                 spin_unlock(&sci->sc_state_lock);
2643                 wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2644                 spin_lock(&sci->sc_state_lock);
2645         }
2646 }
2647
2648 /*
2649  * Setup & clean-up functions
2650  */
2651 static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
2652                                                struct nilfs_root *root)
2653 {
2654         struct the_nilfs *nilfs = sb->s_fs_info;
2655         struct nilfs_sc_info *sci;
2656
2657         sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2658         if (!sci)
2659                 return NULL;
2660
2661         sci->sc_super = sb;
2662
2663         nilfs_get_root(root);
2664         sci->sc_root = root;
2665
2666         init_waitqueue_head(&sci->sc_wait_request);
2667         init_waitqueue_head(&sci->sc_wait_daemon);
2668         init_waitqueue_head(&sci->sc_wait_task);
2669         spin_lock_init(&sci->sc_state_lock);
2670         INIT_LIST_HEAD(&sci->sc_dirty_files);
2671         INIT_LIST_HEAD(&sci->sc_segbufs);
2672         INIT_LIST_HEAD(&sci->sc_write_logs);
2673         INIT_LIST_HEAD(&sci->sc_gc_inodes);
2674         INIT_LIST_HEAD(&sci->sc_iput_queue);
2675         INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
2676         timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
2677
2678         sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2679         sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2680         sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2681
2682         if (nilfs->ns_interval)
2683                 sci->sc_interval = HZ * nilfs->ns_interval;
2684         if (nilfs->ns_watermark)
2685                 sci->sc_watermark = nilfs->ns_watermark;
2686         return sci;
2687 }
2688
2689 static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2690 {
2691         int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2692
2693         /*
2694          * The segctord thread was stopped and its timer was removed.
2695          * But some tasks remain.
2696          */
2697         do {
2698                 struct nilfs_transaction_info ti;
2699
2700                 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2701                 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2702                 nilfs_transaction_unlock(sci->sc_super);
2703
2704                 flush_work(&sci->sc_iput_work);
2705
2706         } while (ret && retrycount-- > 0);
2707 }
2708
2709 /**
2710  * nilfs_segctor_destroy - destroy the segment constructor.
2711  * @sci: nilfs_sc_info
2712  *
2713  * nilfs_segctor_destroy() kills the segctord thread and frees
2714  * the nilfs_sc_info struct.
2715  * Caller must hold the segment semaphore.
2716  */
2717 static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2718 {
2719         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2720         int flag;
2721
2722         up_write(&nilfs->ns_segctor_sem);
2723
2724         spin_lock(&sci->sc_state_lock);
2725         nilfs_segctor_kill_thread(sci);
2726         flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2727                 || sci->sc_seq_request != sci->sc_seq_done);
2728         spin_unlock(&sci->sc_state_lock);
2729
2730         if (flush_work(&sci->sc_iput_work))
2731                 flag = true;
2732
2733         if (flag || !nilfs_segctor_confirm(sci))
2734                 nilfs_segctor_write_out(sci);
2735
2736         if (!list_empty(&sci->sc_dirty_files)) {
2737                 nilfs_warn(sci->sc_super,
2738                            "disposed unprocessed dirty file(s) when stopping log writer");
2739                 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
2740         }
2741
2742         if (!list_empty(&sci->sc_iput_queue)) {
2743                 nilfs_warn(sci->sc_super,
2744                            "disposed unprocessed inode(s) in iput queue when stopping log writer");
2745                 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
2746         }
2747
2748         WARN_ON(!list_empty(&sci->sc_segbufs));
2749         WARN_ON(!list_empty(&sci->sc_write_logs));
2750
2751         nilfs_put_root(sci->sc_root);
2752
2753         down_write(&nilfs->ns_segctor_sem);
2754
2755         del_timer_sync(&sci->sc_timer);
2756         kfree(sci);
2757 }
2758
2759 /**
2760  * nilfs_attach_log_writer - attach log writer
2761  * @sb: super block instance
2762  * @root: root object of the current filesystem tree
2763  *
2764  * This allocates a log writer object, initializes it, and starts the
2765  * log writer.
2766  *
2767  * Return Value: On success, 0 is returned. On error, one of the following
2768  * negative error code is returned.
2769  *
2770  * %-ENOMEM - Insufficient memory available.
2771  */
2772 int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
2773 {
2774         struct the_nilfs *nilfs = sb->s_fs_info;
2775         int err;
2776
2777         if (nilfs->ns_writer) {
2778                 /*
2779                  * This happens if the filesystem was remounted
2780                  * read/write after nilfs_error degenerated it into a
2781                  * read-only mount.
2782                  */
2783                 nilfs_detach_log_writer(sb);
2784         }
2785
2786         nilfs->ns_writer = nilfs_segctor_new(sb, root);
2787         if (!nilfs->ns_writer)
2788                 return -ENOMEM;
2789
2790         inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL);
2791
2792         err = nilfs_segctor_start_thread(nilfs->ns_writer);
2793         if (unlikely(err))
2794                 nilfs_detach_log_writer(sb);
2795
2796         return err;
2797 }
2798
2799 /**
2800  * nilfs_detach_log_writer - destroy log writer
2801  * @sb: super block instance
2802  *
2803  * This kills log writer daemon, frees the log writer object, and
2804  * destroys list of dirty files.
2805  */
2806 void nilfs_detach_log_writer(struct super_block *sb)
2807 {
2808         struct the_nilfs *nilfs = sb->s_fs_info;
2809         LIST_HEAD(garbage_list);
2810
2811         down_write(&nilfs->ns_segctor_sem);
2812         if (nilfs->ns_writer) {
2813                 nilfs_segctor_destroy(nilfs->ns_writer);
2814                 nilfs->ns_writer = NULL;
2815         }
2816
2817         /* Force to free the list of dirty files */
2818         spin_lock(&nilfs->ns_inode_lock);
2819         if (!list_empty(&nilfs->ns_dirty_files)) {
2820                 list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
2821                 nilfs_warn(sb,
2822                            "disposed unprocessed dirty file(s) when detaching log writer");
2823         }
2824         spin_unlock(&nilfs->ns_inode_lock);
2825         up_write(&nilfs->ns_segctor_sem);
2826
2827         nilfs_dispose_list(nilfs, &garbage_list, 1);
2828 }