gfs2: Make flush bios explicitely sync
[sfrench/cifs-2.6.git] / fs / gfs2 / log.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/crc32.h>
17 #include <linux/delay.h>
18 #include <linux/kthread.h>
19 #include <linux/freezer.h>
20 #include <linux/bio.h>
21 #include <linux/blkdev.h>
22 #include <linux/writeback.h>
23 #include <linux/list_sort.h>
24
25 #include "gfs2.h"
26 #include "incore.h"
27 #include "bmap.h"
28 #include "glock.h"
29 #include "log.h"
30 #include "lops.h"
31 #include "meta_io.h"
32 #include "util.h"
33 #include "dir.h"
34 #include "trace_gfs2.h"
35
36 /**
37  * gfs2_struct2blk - compute stuff
38  * @sdp: the filesystem
39  * @nstruct: the number of structures
40  * @ssize: the size of the structures
41  *
42  * Compute the number of log descriptor blocks needed to hold a certain number
43  * of structures of a certain size.
44  *
45  * Returns: the number of blocks needed (minimum is always 1)
46  */
47
48 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
49                              unsigned int ssize)
50 {
51         unsigned int blks;
52         unsigned int first, second;
53
54         blks = 1;
55         first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
56
57         if (nstruct > first) {
58                 second = (sdp->sd_sb.sb_bsize -
59                           sizeof(struct gfs2_meta_header)) / ssize;
60                 blks += DIV_ROUND_UP(nstruct - first, second);
61         }
62
63         return blks;
64 }
65
66 /**
67  * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
68  * @mapping: The associated mapping (maybe NULL)
69  * @bd: The gfs2_bufdata to remove
70  *
71  * The ail lock _must_ be held when calling this function
72  *
73  */
74
75 void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
76 {
77         bd->bd_tr = NULL;
78         list_del_init(&bd->bd_ail_st_list);
79         list_del_init(&bd->bd_ail_gl_list);
80         atomic_dec(&bd->bd_gl->gl_ail_count);
81         brelse(bd->bd_bh);
82 }
83
84 /**
85  * gfs2_ail1_start_one - Start I/O on a part of the AIL
86  * @sdp: the filesystem
87  * @wbc: The writeback control structure
88  * @ai: The ail structure
89  *
90  */
91
92 static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
93                                struct writeback_control *wbc,
94                                struct gfs2_trans *tr)
95 __releases(&sdp->sd_ail_lock)
96 __acquires(&sdp->sd_ail_lock)
97 {
98         struct gfs2_glock *gl = NULL;
99         struct address_space *mapping;
100         struct gfs2_bufdata *bd, *s;
101         struct buffer_head *bh;
102
103         list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
104                 bh = bd->bd_bh;
105
106                 gfs2_assert(sdp, bd->bd_tr == tr);
107
108                 if (!buffer_busy(bh)) {
109                         if (!buffer_uptodate(bh))
110                                 gfs2_io_error_bh(sdp, bh);
111                         list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
112                         continue;
113                 }
114
115                 if (!buffer_dirty(bh))
116                         continue;
117                 if (gl == bd->bd_gl)
118                         continue;
119                 gl = bd->bd_gl;
120                 list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
121                 mapping = bh->b_page->mapping;
122                 if (!mapping)
123                         continue;
124                 spin_unlock(&sdp->sd_ail_lock);
125                 generic_writepages(mapping, wbc);
126                 spin_lock(&sdp->sd_ail_lock);
127                 if (wbc->nr_to_write <= 0)
128                         break;
129                 return 1;
130         }
131
132         return 0;
133 }
134
135
136 /**
137  * gfs2_ail1_flush - start writeback of some ail1 entries 
138  * @sdp: The super block
139  * @wbc: The writeback control structure
140  *
141  * Writes back some ail1 entries, according to the limits in the
142  * writeback control structure
143  */
144
145 void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
146 {
147         struct list_head *head = &sdp->sd_ail1_list;
148         struct gfs2_trans *tr;
149         struct blk_plug plug;
150
151         trace_gfs2_ail_flush(sdp, wbc, 1);
152         blk_start_plug(&plug);
153         spin_lock(&sdp->sd_ail_lock);
154 restart:
155         list_for_each_entry_reverse(tr, head, tr_list) {
156                 if (wbc->nr_to_write <= 0)
157                         break;
158                 if (gfs2_ail1_start_one(sdp, wbc, tr))
159                         goto restart;
160         }
161         spin_unlock(&sdp->sd_ail_lock);
162         blk_finish_plug(&plug);
163         trace_gfs2_ail_flush(sdp, wbc, 0);
164 }
165
166 /**
167  * gfs2_ail1_start - start writeback of all ail1 entries
168  * @sdp: The superblock
169  */
170
171 static void gfs2_ail1_start(struct gfs2_sbd *sdp)
172 {
173         struct writeback_control wbc = {
174                 .sync_mode = WB_SYNC_NONE,
175                 .nr_to_write = LONG_MAX,
176                 .range_start = 0,
177                 .range_end = LLONG_MAX,
178         };
179
180         return gfs2_ail1_flush(sdp, &wbc);
181 }
182
183 /**
184  * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
185  * @sdp: the filesystem
186  * @ai: the AIL entry
187  *
188  */
189
190 static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
191 {
192         struct gfs2_bufdata *bd, *s;
193         struct buffer_head *bh;
194
195         list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
196                                          bd_ail_st_list) {
197                 bh = bd->bd_bh;
198                 gfs2_assert(sdp, bd->bd_tr == tr);
199                 if (buffer_busy(bh))
200                         continue;
201                 if (!buffer_uptodate(bh))
202                         gfs2_io_error_bh(sdp, bh);
203                 list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
204         }
205
206 }
207
208 /**
209  * gfs2_ail1_empty - Try to empty the ail1 lists
210  * @sdp: The superblock
211  *
212  * Tries to empty the ail1 lists, starting with the oldest first
213  */
214
215 static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
216 {
217         struct gfs2_trans *tr, *s;
218         int oldest_tr = 1;
219         int ret;
220
221         spin_lock(&sdp->sd_ail_lock);
222         list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
223                 gfs2_ail1_empty_one(sdp, tr);
224                 if (list_empty(&tr->tr_ail1_list) && oldest_tr)
225                         list_move(&tr->tr_list, &sdp->sd_ail2_list);
226                 else
227                         oldest_tr = 0;
228         }
229         ret = list_empty(&sdp->sd_ail1_list);
230         spin_unlock(&sdp->sd_ail_lock);
231
232         return ret;
233 }
234
235 static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
236 {
237         struct gfs2_trans *tr;
238         struct gfs2_bufdata *bd;
239         struct buffer_head *bh;
240
241         spin_lock(&sdp->sd_ail_lock);
242         list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
243                 list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
244                         bh = bd->bd_bh;
245                         if (!buffer_locked(bh))
246                                 continue;
247                         get_bh(bh);
248                         spin_unlock(&sdp->sd_ail_lock);
249                         wait_on_buffer(bh);
250                         brelse(bh);
251                         return;
252                 }
253         }
254         spin_unlock(&sdp->sd_ail_lock);
255 }
256
257 /**
258  * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
259  * @sdp: the filesystem
260  * @ai: the AIL entry
261  *
262  */
263
264 static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
265 {
266         struct list_head *head = &tr->tr_ail2_list;
267         struct gfs2_bufdata *bd;
268
269         while (!list_empty(head)) {
270                 bd = list_entry(head->prev, struct gfs2_bufdata,
271                                 bd_ail_st_list);
272                 gfs2_assert(sdp, bd->bd_tr == tr);
273                 gfs2_remove_from_ail(bd);
274         }
275 }
276
277 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
278 {
279         struct gfs2_trans *tr, *safe;
280         unsigned int old_tail = sdp->sd_log_tail;
281         int wrap = (new_tail < old_tail);
282         int a, b, rm;
283
284         spin_lock(&sdp->sd_ail_lock);
285
286         list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) {
287                 a = (old_tail <= tr->tr_first);
288                 b = (tr->tr_first < new_tail);
289                 rm = (wrap) ? (a || b) : (a && b);
290                 if (!rm)
291                         continue;
292
293                 gfs2_ail2_empty_one(sdp, tr);
294                 list_del(&tr->tr_list);
295                 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
296                 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
297                 kfree(tr);
298         }
299
300         spin_unlock(&sdp->sd_ail_lock);
301 }
302
303 /**
304  * gfs2_log_release - Release a given number of log blocks
305  * @sdp: The GFS2 superblock
306  * @blks: The number of blocks
307  *
308  */
309
310 void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
311 {
312
313         atomic_add(blks, &sdp->sd_log_blks_free);
314         trace_gfs2_log_blocks(sdp, blks);
315         gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
316                                   sdp->sd_jdesc->jd_blocks);
317         up_read(&sdp->sd_log_flush_lock);
318 }
319
320 /**
321  * gfs2_log_reserve - Make a log reservation
322  * @sdp: The GFS2 superblock
323  * @blks: The number of blocks to reserve
324  *
325  * Note that we never give out the last few blocks of the journal. Thats
326  * due to the fact that there is a small number of header blocks
327  * associated with each log flush. The exact number can't be known until
328  * flush time, so we ensure that we have just enough free blocks at all
329  * times to avoid running out during a log flush.
330  *
331  * We no longer flush the log here, instead we wake up logd to do that
332  * for us. To avoid the thundering herd and to ensure that we deal fairly
333  * with queued waiters, we use an exclusive wait. This means that when we
334  * get woken with enough journal space to get our reservation, we need to
335  * wake the next waiter on the list.
336  *
337  * Returns: errno
338  */
339
340 int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
341 {
342         int ret = 0;
343         unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
344         unsigned wanted = blks + reserved_blks;
345         DEFINE_WAIT(wait);
346         int did_wait = 0;
347         unsigned int free_blocks;
348
349         if (gfs2_assert_warn(sdp, blks) ||
350             gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
351                 return -EINVAL;
352         atomic_add(blks, &sdp->sd_log_blks_needed);
353 retry:
354         free_blocks = atomic_read(&sdp->sd_log_blks_free);
355         if (unlikely(free_blocks <= wanted)) {
356                 do {
357                         prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
358                                         TASK_UNINTERRUPTIBLE);
359                         wake_up(&sdp->sd_logd_waitq);
360                         did_wait = 1;
361                         if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
362                                 io_schedule();
363                         free_blocks = atomic_read(&sdp->sd_log_blks_free);
364                 } while(free_blocks <= wanted);
365                 finish_wait(&sdp->sd_log_waitq, &wait);
366         }
367         atomic_inc(&sdp->sd_reserving_log);
368         if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
369                                 free_blocks - blks) != free_blocks) {
370                 if (atomic_dec_and_test(&sdp->sd_reserving_log))
371                         wake_up(&sdp->sd_reserving_log_wait);
372                 goto retry;
373         }
374         atomic_sub(blks, &sdp->sd_log_blks_needed);
375         trace_gfs2_log_blocks(sdp, -blks);
376
377         /*
378          * If we waited, then so might others, wake them up _after_ we get
379          * our share of the log.
380          */
381         if (unlikely(did_wait))
382                 wake_up(&sdp->sd_log_waitq);
383
384         down_read(&sdp->sd_log_flush_lock);
385         if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
386                 gfs2_log_release(sdp, blks);
387                 ret = -EROFS;
388         }
389         if (atomic_dec_and_test(&sdp->sd_reserving_log))
390                 wake_up(&sdp->sd_reserving_log_wait);
391         return ret;
392 }
393
394 /**
395  * log_distance - Compute distance between two journal blocks
396  * @sdp: The GFS2 superblock
397  * @newer: The most recent journal block of the pair
398  * @older: The older journal block of the pair
399  *
400  *   Compute the distance (in the journal direction) between two
401  *   blocks in the journal
402  *
403  * Returns: the distance in blocks
404  */
405
406 static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
407                                         unsigned int older)
408 {
409         int dist;
410
411         dist = newer - older;
412         if (dist < 0)
413                 dist += sdp->sd_jdesc->jd_blocks;
414
415         return dist;
416 }
417
418 /**
419  * calc_reserved - Calculate the number of blocks to reserve when
420  *                 refunding a transaction's unused buffers.
421  * @sdp: The GFS2 superblock
422  *
423  * This is complex.  We need to reserve room for all our currently used
424  * metadata buffers (e.g. normal file I/O rewriting file time stamps) and 
425  * all our journaled data buffers for journaled files (e.g. files in the 
426  * meta_fs like rindex, or files for which chattr +j was done.)
427  * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
428  * will count it as free space (sd_log_blks_free) and corruption will follow.
429  *
430  * We can have metadata bufs and jdata bufs in the same journal.  So each
431  * type gets its own log header, for which we need to reserve a block.
432  * In fact, each type has the potential for needing more than one header 
433  * in cases where we have more buffers than will fit on a journal page.
434  * Metadata journal entries take up half the space of journaled buffer entries.
435  * Thus, metadata entries have buf_limit (502) and journaled buffers have
436  * databuf_limit (251) before they cause a wrap around.
437  *
438  * Also, we need to reserve blocks for revoke journal entries and one for an
439  * overall header for the lot.
440  *
441  * Returns: the number of blocks reserved
442  */
443 static unsigned int calc_reserved(struct gfs2_sbd *sdp)
444 {
445         unsigned int reserved = 0;
446         unsigned int mbuf;
447         unsigned int dbuf;
448         struct gfs2_trans *tr = sdp->sd_log_tr;
449
450         if (tr) {
451                 mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
452                 dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
453                 reserved = mbuf + dbuf;
454                 /* Account for header blocks */
455                 reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp));
456                 reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp));
457         }
458
459         if (sdp->sd_log_commited_revoke > 0)
460                 reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
461                                           sizeof(u64));
462         /* One for the overall header */
463         if (reserved)
464                 reserved++;
465         return reserved;
466 }
467
468 static unsigned int current_tail(struct gfs2_sbd *sdp)
469 {
470         struct gfs2_trans *tr;
471         unsigned int tail;
472
473         spin_lock(&sdp->sd_ail_lock);
474
475         if (list_empty(&sdp->sd_ail1_list)) {
476                 tail = sdp->sd_log_head;
477         } else {
478                 tr = list_entry(sdp->sd_ail1_list.prev, struct gfs2_trans,
479                                 tr_list);
480                 tail = tr->tr_first;
481         }
482
483         spin_unlock(&sdp->sd_ail_lock);
484
485         return tail;
486 }
487
488 static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
489 {
490         unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
491
492         ail2_empty(sdp, new_tail);
493
494         atomic_add(dist, &sdp->sd_log_blks_free);
495         trace_gfs2_log_blocks(sdp, dist);
496         gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
497                              sdp->sd_jdesc->jd_blocks);
498
499         sdp->sd_log_tail = new_tail;
500 }
501
502
503 static void log_flush_wait(struct gfs2_sbd *sdp)
504 {
505         DEFINE_WAIT(wait);
506
507         if (atomic_read(&sdp->sd_log_in_flight)) {
508                 do {
509                         prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
510                                         TASK_UNINTERRUPTIBLE);
511                         if (atomic_read(&sdp->sd_log_in_flight))
512                                 io_schedule();
513                 } while(atomic_read(&sdp->sd_log_in_flight));
514                 finish_wait(&sdp->sd_log_flush_wait, &wait);
515         }
516 }
517
518 static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
519 {
520         struct gfs2_inode *ipa, *ipb;
521
522         ipa = list_entry(a, struct gfs2_inode, i_ordered);
523         ipb = list_entry(b, struct gfs2_inode, i_ordered);
524
525         if (ipa->i_no_addr < ipb->i_no_addr)
526                 return -1;
527         if (ipa->i_no_addr > ipb->i_no_addr)
528                 return 1;
529         return 0;
530 }
531
532 static void gfs2_ordered_write(struct gfs2_sbd *sdp)
533 {
534         struct gfs2_inode *ip;
535         LIST_HEAD(written);
536
537         spin_lock(&sdp->sd_ordered_lock);
538         list_sort(NULL, &sdp->sd_log_le_ordered, &ip_cmp);
539         while (!list_empty(&sdp->sd_log_le_ordered)) {
540                 ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered);
541                 list_move(&ip->i_ordered, &written);
542                 if (ip->i_inode.i_mapping->nrpages == 0)
543                         continue;
544                 spin_unlock(&sdp->sd_ordered_lock);
545                 filemap_fdatawrite(ip->i_inode.i_mapping);
546                 spin_lock(&sdp->sd_ordered_lock);
547         }
548         list_splice(&written, &sdp->sd_log_le_ordered);
549         spin_unlock(&sdp->sd_ordered_lock);
550 }
551
552 static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
553 {
554         struct gfs2_inode *ip;
555
556         spin_lock(&sdp->sd_ordered_lock);
557         while (!list_empty(&sdp->sd_log_le_ordered)) {
558                 ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered);
559                 list_del(&ip->i_ordered);
560                 WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags));
561                 if (ip->i_inode.i_mapping->nrpages == 0)
562                         continue;
563                 spin_unlock(&sdp->sd_ordered_lock);
564                 filemap_fdatawait(ip->i_inode.i_mapping);
565                 spin_lock(&sdp->sd_ordered_lock);
566         }
567         spin_unlock(&sdp->sd_ordered_lock);
568 }
569
570 void gfs2_ordered_del_inode(struct gfs2_inode *ip)
571 {
572         struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
573
574         spin_lock(&sdp->sd_ordered_lock);
575         if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags))
576                 list_del(&ip->i_ordered);
577         spin_unlock(&sdp->sd_ordered_lock);
578 }
579
580 void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
581 {
582         struct buffer_head *bh = bd->bd_bh;
583         struct gfs2_glock *gl = bd->bd_gl;
584
585         bh->b_private = NULL;
586         bd->bd_blkno = bh->b_blocknr;
587         gfs2_remove_from_ail(bd); /* drops ref on bh */
588         bd->bd_bh = NULL;
589         bd->bd_ops = &gfs2_revoke_lops;
590         sdp->sd_log_num_revoke++;
591         atomic_inc(&gl->gl_revokes);
592         set_bit(GLF_LFLUSH, &gl->gl_flags);
593         list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
594 }
595
596 void gfs2_write_revokes(struct gfs2_sbd *sdp)
597 {
598         struct gfs2_trans *tr;
599         struct gfs2_bufdata *bd, *tmp;
600         int have_revokes = 0;
601         int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
602
603         gfs2_ail1_empty(sdp);
604         spin_lock(&sdp->sd_ail_lock);
605         list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) {
606                 list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) {
607                         if (list_empty(&bd->bd_list)) {
608                                 have_revokes = 1;
609                                 goto done;
610                         }
611                 }
612         }
613 done:
614         spin_unlock(&sdp->sd_ail_lock);
615         if (have_revokes == 0)
616                 return;
617         while (sdp->sd_log_num_revoke > max_revokes)
618                 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
619         max_revokes -= sdp->sd_log_num_revoke;
620         if (!sdp->sd_log_num_revoke) {
621                 atomic_dec(&sdp->sd_log_blks_free);
622                 /* If no blocks have been reserved, we need to also
623                  * reserve a block for the header */
624                 if (!sdp->sd_log_blks_reserved)
625                         atomic_dec(&sdp->sd_log_blks_free);
626         }
627         gfs2_log_lock(sdp);
628         spin_lock(&sdp->sd_ail_lock);
629         list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) {
630                 list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) {
631                         if (max_revokes == 0)
632                                 goto out_of_blocks;
633                         if (!list_empty(&bd->bd_list))
634                                 continue;
635                         gfs2_add_revoke(sdp, bd);
636                         max_revokes--;
637                 }
638         }
639 out_of_blocks:
640         spin_unlock(&sdp->sd_ail_lock);
641         gfs2_log_unlock(sdp);
642
643         if (!sdp->sd_log_num_revoke) {
644                 atomic_inc(&sdp->sd_log_blks_free);
645                 if (!sdp->sd_log_blks_reserved)
646                         atomic_inc(&sdp->sd_log_blks_free);
647         }
648 }
649
650 /**
651  * log_write_header - Get and initialize a journal header buffer
652  * @sdp: The GFS2 superblock
653  *
654  * Returns: the initialized log buffer descriptor
655  */
656
657 static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
658 {
659         struct gfs2_log_header *lh;
660         unsigned int tail;
661         u32 hash;
662         int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
663         struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
664         enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
665         lh = page_address(page);
666         clear_page(lh);
667
668         gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
669
670         tail = current_tail(sdp);
671
672         lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
673         lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
674         lh->lh_header.__pad0 = cpu_to_be64(0);
675         lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
676         lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
677         lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
678         lh->lh_flags = cpu_to_be32(flags);
679         lh->lh_tail = cpu_to_be32(tail);
680         lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head);
681         hash = gfs2_disk_hash(page_address(page), sizeof(struct gfs2_log_header));
682         lh->lh_hash = cpu_to_be32(hash);
683
684         if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
685                 gfs2_ordered_wait(sdp);
686                 log_flush_wait(sdp);
687                 op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
688         }
689
690         sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
691         gfs2_log_write_page(sdp, page);
692         gfs2_log_flush_bio(sdp, REQ_OP_WRITE, op_flags);
693         log_flush_wait(sdp);
694
695         if (sdp->sd_log_tail != tail)
696                 log_pull_tail(sdp, tail);
697 }
698
699 /**
700  * gfs2_log_flush - flush incore transaction(s)
701  * @sdp: the filesystem
702  * @gl: The glock structure to flush.  If NULL, flush the whole incore log
703  *
704  */
705
706 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
707                     enum gfs2_flush_type type)
708 {
709         struct gfs2_trans *tr;
710         enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
711
712         down_write(&sdp->sd_log_flush_lock);
713
714         /* Log might have been flushed while we waited for the flush lock */
715         if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) {
716                 up_write(&sdp->sd_log_flush_lock);
717                 return;
718         }
719         trace_gfs2_log_flush(sdp, 1);
720
721         if (type == SHUTDOWN_FLUSH)
722                 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
723
724         sdp->sd_log_flush_head = sdp->sd_log_head;
725         sdp->sd_log_flush_wrapped = 0;
726         tr = sdp->sd_log_tr;
727         if (tr) {
728                 sdp->sd_log_tr = NULL;
729                 INIT_LIST_HEAD(&tr->tr_ail1_list);
730                 INIT_LIST_HEAD(&tr->tr_ail2_list);
731                 tr->tr_first = sdp->sd_log_flush_head;
732                 if (unlikely (state == SFS_FROZEN))
733                         gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new);
734         }
735
736         if (unlikely(state == SFS_FROZEN))
737                 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
738         gfs2_assert_withdraw(sdp,
739                         sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
740
741         gfs2_ordered_write(sdp);
742         lops_before_commit(sdp, tr);
743         gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
744
745         if (sdp->sd_log_head != sdp->sd_log_flush_head) {
746                 log_flush_wait(sdp);
747                 log_write_header(sdp, 0);
748         } else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
749                 atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
750                 trace_gfs2_log_blocks(sdp, -1);
751                 log_write_header(sdp, 0);
752         }
753         lops_after_commit(sdp, tr);
754
755         gfs2_log_lock(sdp);
756         sdp->sd_log_head = sdp->sd_log_flush_head;
757         sdp->sd_log_blks_reserved = 0;
758         sdp->sd_log_commited_revoke = 0;
759
760         spin_lock(&sdp->sd_ail_lock);
761         if (tr && !list_empty(&tr->tr_ail1_list)) {
762                 list_add(&tr->tr_list, &sdp->sd_ail1_list);
763                 tr = NULL;
764         }
765         spin_unlock(&sdp->sd_ail_lock);
766         gfs2_log_unlock(sdp);
767
768         if (type != NORMAL_FLUSH) {
769                 if (!sdp->sd_log_idle) {
770                         for (;;) {
771                                 gfs2_ail1_start(sdp);
772                                 gfs2_ail1_wait(sdp);
773                                 if (gfs2_ail1_empty(sdp))
774                                         break;
775                         }
776                         atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
777                         trace_gfs2_log_blocks(sdp, -1);
778                         sdp->sd_log_flush_wrapped = 0;
779                         log_write_header(sdp, 0);
780                         sdp->sd_log_head = sdp->sd_log_flush_head;
781                 }
782                 if (type == SHUTDOWN_FLUSH || type == FREEZE_FLUSH)
783                         gfs2_log_shutdown(sdp);
784                 if (type == FREEZE_FLUSH)
785                         atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
786         }
787
788         trace_gfs2_log_flush(sdp, 0);
789         up_write(&sdp->sd_log_flush_lock);
790
791         kfree(tr);
792 }
793
794 /**
795  * gfs2_merge_trans - Merge a new transaction into a cached transaction
796  * @old: Original transaction to be expanded
797  * @new: New transaction to be merged
798  */
799
800 static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new)
801 {
802         WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags));
803
804         old->tr_num_buf_new     += new->tr_num_buf_new;
805         old->tr_num_databuf_new += new->tr_num_databuf_new;
806         old->tr_num_buf_rm      += new->tr_num_buf_rm;
807         old->tr_num_databuf_rm  += new->tr_num_databuf_rm;
808         old->tr_num_revoke      += new->tr_num_revoke;
809         old->tr_num_revoke_rm   += new->tr_num_revoke_rm;
810
811         list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
812         list_splice_tail_init(&new->tr_buf, &old->tr_buf);
813 }
814
815 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
816 {
817         unsigned int reserved;
818         unsigned int unused;
819         unsigned int maxres;
820
821         gfs2_log_lock(sdp);
822
823         if (sdp->sd_log_tr) {
824                 gfs2_merge_trans(sdp->sd_log_tr, tr);
825         } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
826                 gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags));
827                 sdp->sd_log_tr = tr;
828                 set_bit(TR_ATTACHED, &tr->tr_flags);
829         }
830
831         sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
832         reserved = calc_reserved(sdp);
833         maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
834         gfs2_assert_withdraw(sdp, maxres >= reserved);
835         unused = maxres - reserved;
836         atomic_add(unused, &sdp->sd_log_blks_free);
837         trace_gfs2_log_blocks(sdp, unused);
838         gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
839                              sdp->sd_jdesc->jd_blocks);
840         sdp->sd_log_blks_reserved = reserved;
841
842         gfs2_log_unlock(sdp);
843 }
844
845 /**
846  * gfs2_log_commit - Commit a transaction to the log
847  * @sdp: the filesystem
848  * @tr: the transaction
849  *
850  * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
851  * or the total number of used blocks (pinned blocks plus AIL blocks)
852  * is greater than thresh2.
853  *
854  * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
855  * journal size.
856  *
857  * Returns: errno
858  */
859
860 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
861 {
862         log_refund(sdp, tr);
863
864         if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
865             ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
866             atomic_read(&sdp->sd_log_thresh2)))
867                 wake_up(&sdp->sd_logd_waitq);
868 }
869
870 /**
871  * gfs2_log_shutdown - write a shutdown header into a journal
872  * @sdp: the filesystem
873  *
874  */
875
876 void gfs2_log_shutdown(struct gfs2_sbd *sdp)
877 {
878         gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
879         gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
880         gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
881
882         sdp->sd_log_flush_head = sdp->sd_log_head;
883         sdp->sd_log_flush_wrapped = 0;
884
885         log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT);
886
887         gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
888         gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
889
890         sdp->sd_log_head = sdp->sd_log_flush_head;
891         sdp->sd_log_tail = sdp->sd_log_head;
892 }
893
894 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
895 {
896         return (atomic_read(&sdp->sd_log_pinned) +
897                 atomic_read(&sdp->sd_log_blks_needed) >=
898                 atomic_read(&sdp->sd_log_thresh1));
899 }
900
901 static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
902 {
903         unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
904         return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
905                 atomic_read(&sdp->sd_log_thresh2);
906 }
907
908 /**
909  * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
910  * @sdp: Pointer to GFS2 superblock
911  *
912  * Also, periodically check to make sure that we're using the most recent
913  * journal index.
914  */
915
916 int gfs2_logd(void *data)
917 {
918         struct gfs2_sbd *sdp = data;
919         unsigned long t = 1;
920         DEFINE_WAIT(wait);
921         bool did_flush;
922
923         while (!kthread_should_stop()) {
924
925                 did_flush = false;
926                 if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
927                         gfs2_ail1_empty(sdp);
928                         gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
929                         did_flush = true;
930                 }
931
932                 if (gfs2_ail_flush_reqd(sdp)) {
933                         gfs2_ail1_start(sdp);
934                         gfs2_ail1_wait(sdp);
935                         gfs2_ail1_empty(sdp);
936                         gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
937                         did_flush = true;
938                 }
939
940                 if (!gfs2_ail_flush_reqd(sdp) || did_flush)
941                         wake_up(&sdp->sd_log_waitq);
942
943                 t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
944
945                 try_to_freeze();
946
947                 do {
948                         prepare_to_wait(&sdp->sd_logd_waitq, &wait,
949                                         TASK_INTERRUPTIBLE);
950                         if (!gfs2_ail_flush_reqd(sdp) &&
951                             !gfs2_jrnl_flush_reqd(sdp) &&
952                             !kthread_should_stop())
953                                 t = schedule_timeout(t);
954                 } while(t && !gfs2_ail_flush_reqd(sdp) &&
955                         !gfs2_jrnl_flush_reqd(sdp) &&
956                         !kthread_should_stop());
957                 finish_wait(&sdp->sd_logd_waitq, &wait);
958         }
959
960         return 0;
961 }
962