Group short-lived and reclaimable kernel allocations
[sfrench/cifs-2.6.git] / fs / jbd / journal.c
index 7f96b5cb67816109e38a51b79a6ee80364e48c5d..a6be78c05dce50420fdf1c08fad3cd80ac5120cd 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * linux/fs/journal.c
+ * linux/fs/jbd/journal.c
  *
  * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
  *
 #include <linux/jbd.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
-#include <linux/smp_lock.h>
 #include <linux/init.h>
 #include <linux/mm.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/pagemap.h>
 #include <linux/kthread.h>
+#include <linux/poison.h>
 #include <linux/proc_fs.h>
 
 #include <asm/uaccess.h>
@@ -83,6 +83,7 @@ EXPORT_SYMBOL(journal_force_commit);
 
 static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
 static void __journal_abort_soft (journal_t *journal, int errno);
+static int journal_create_jbd_slab(size_t slab_size);
 
 /*
  * Helper function used to manage commit timeouts
@@ -179,7 +180,7 @@ loop:
                                                transaction->t_expires))
                        should_sleep = 0;
                if (journal->j_flags & JFS_UNMOUNT)
-                       should_sleep = 0;
+                       should_sleep = 0;
                if (should_sleep) {
                        spin_unlock(&journal->j_state_lock);
                        schedule();
@@ -209,10 +210,16 @@ end_loop:
        return 0;
 }
 
-static void journal_start_thread(journal_t *journal)
+static int journal_start_thread(journal_t *journal)
 {
-       kthread_run(kjournald, journal, "kjournald");
+       struct task_struct *t;
+
+       t = kthread_run(kjournald, journal, "kjournald");
+       if (IS_ERR(t))
+               return PTR_ERR(t);
+
        wait_event(journal->j_wait_done_commit, journal->j_task != 0);
+       return 0;
 }
 
 static void journal_kill_thread(journal_t *journal)
@@ -269,7 +276,7 @@ static void journal_kill_thread(journal_t *journal)
 int journal_write_metadata_buffer(transaction_t *transaction,
                                  struct journal_head  *jh_in,
                                  struct journal_head **jh_out,
-                                 int blocknr)
+                                 unsigned long blocknr)
 {
        int need_copy_out = 0;
        int done_copy_out = 0;
@@ -327,10 +334,10 @@ repeat:
                char *tmp;
 
                jbd_unlock_bh_state(bh_in);
-               tmp = jbd_rep_kmalloc(bh_in->b_size, GFP_NOFS);
+               tmp = jbd_slab_alloc(bh_in->b_size, GFP_NOFS);
                jbd_lock_bh_state(bh_in);
                if (jh_in->b_frozen_data) {
-                       kfree(tmp);
+                       jbd_slab_free(tmp, bh_in->b_size);
                        goto repeat;
                }
 
@@ -576,7 +583,7 @@ int journal_next_log_block(journal_t *journal, unsigned long *retp)
  * this is a no-op.  If needed, we can use j_blk_offset - everything is
  * ready.
  */
-int journal_bmap(journal_t *journal, unsigned long blocknr, 
+int journal_bmap(journal_t *journal, unsigned long blocknr,
                 unsigned long *retp)
 {
        int err = 0;
@@ -694,13 +701,13 @@ fail:
  *  @bdev: Block device on which to create the journal
  *  @fs_dev: Device which hold journalled filesystem for this journal.
  *  @start: Block nr Start of journal.
- *  @len:  Lenght of the journal in blocks.
+ *  @len:  Length of the journal in blocks.
  *  @blocksize: blocksize of journalling device
  *  @returns: a newly created journal_t *
- *  
+ *
  *  journal_init_dev creates a journal which maps a fixed contiguous
  *  range of blocks on an arbitrary block device.
- * 
+ *
  */
 journal_t * journal_init_dev(struct block_device *bdev,
                        struct block_device *fs_dev,
@@ -713,18 +720,8 @@ journal_t * journal_init_dev(struct block_device *bdev,
        if (!journal)
                return NULL;
 
-       journal->j_dev = bdev;
-       journal->j_fs_dev = fs_dev;
-       journal->j_blk_offset = start;
-       journal->j_maxlen = len;
-       journal->j_blocksize = blocksize;
-
-       bh = __getblk(journal->j_dev, start, journal->j_blocksize);
-       J_ASSERT(bh != NULL);
-       journal->j_sb_buffer = bh;
-       journal->j_superblock = (journal_superblock_t *)bh->b_data;
-
        /* journal descriptor can store up to n blocks -bzzz */
+       journal->j_blocksize = blocksize;
        n = journal->j_blocksize / sizeof(journal_block_tag_t);
        journal->j_wbufsize = n;
        journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
@@ -733,15 +730,25 @@ journal_t * journal_init_dev(struct block_device *bdev,
                        __FUNCTION__);
                kfree(journal);
                journal = NULL;
+               goto out;
        }
+       journal->j_dev = bdev;
+       journal->j_fs_dev = fs_dev;
+       journal->j_blk_offset = start;
+       journal->j_maxlen = len;
 
+       bh = __getblk(journal->j_dev, start, journal->j_blocksize);
+       J_ASSERT(bh != NULL);
+       journal->j_sb_buffer = bh;
+       journal->j_superblock = (journal_superblock_t *)bh->b_data;
+out:
        return journal;
 }
-/** 
+
+/**
  *  journal_t * journal_init_inode () - creates a journal which maps to a inode.
  *  @inode: An inode to create the journal in
- *  
+ *
  * journal_init_inode creates a journal which maps an on-disk inode as
  * the journal.  The inode must exist already, must support bmap() and
  * must have all data blocks preallocated.
@@ -761,7 +768,7 @@ journal_t * journal_init_inode (struct inode *inode)
        journal->j_inode = inode;
        jbd_debug(1,
                  "journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n",
-                 journal, inode->i_sb->s_id, inode->i_ino, 
+                 journal, inode->i_sb->s_id, inode->i_ino,
                  (long long) inode->i_size,
                  inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize);
 
@@ -796,10 +803,10 @@ journal_t * journal_init_inode (struct inode *inode)
        return journal;
 }
 
-/* 
+/*
  * If the journal init or create aborts, we need to mark the journal
  * superblock as being NULL to prevent the journal destroy from writing
- * back a bogus superblock. 
+ * back a bogus superblock.
  */
 static void journal_fail_superblock (journal_t *journal)
 {
@@ -818,7 +825,7 @@ static void journal_fail_superblock (journal_t *journal)
 static int journal_reset(journal_t *journal)
 {
        journal_superblock_t *sb = journal->j_superblock;
-       unsigned int first, last;
+       unsigned long first, last;
 
        first = be32_to_cpu(sb->s_first);
        last = be32_to_cpu(sb->s_maxlen);
@@ -838,17 +845,16 @@ static int journal_reset(journal_t *journal)
 
        /* Add the dynamic fields and write it to disk. */
        journal_update_superblock(journal, 1);
-       journal_start_thread(journal);
-       return 0;
+       return journal_start_thread(journal);
 }
 
-/** 
+/**
  * int journal_create() - Initialise the new journal file
  * @journal: Journal to create. This structure must have been initialised
- * 
+ *
  * Given a journal_t structure which tells us which disk blocks we can
  * use, create a new journal superblock and initialise all of the
- * journal fields from scratch.  
+ * journal fields from scratch.
  **/
 int journal_create(journal_t *journal)
 {
@@ -913,7 +919,7 @@ int journal_create(journal_t *journal)
        return journal_reset(journal);
 }
 
-/** 
+/**
  * void journal_update_superblock() - Update journal sb on disk.
  * @journal: The journal to update.
  * @wait: Set to '0' if you don't want to wait for IO completion.
@@ -937,7 +943,7 @@ void journal_update_superblock(journal_t *journal, int wait)
                                journal->j_transaction_sequence) {
                jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
                        "(start %ld, seq %d, errno %d)\n",
-                       journal->j_tail, journal->j_tail_sequence, 
+                       journal->j_tail, journal->j_tail_sequence,
                        journal->j_errno);
                goto out;
        }
@@ -1060,7 +1066,7 @@ static int load_superblock(journal_t *journal)
 /**
  * int journal_load() - Read journal from disk.
  * @journal: Journal to act on.
- * 
+ *
  * Given a journal_t structure which tells us which disk blocks contain
  * a journal, read the journal from disk to initialise the in-memory
  * structures.
@@ -1068,17 +1074,17 @@ static int load_superblock(journal_t *journal)
 int journal_load(journal_t *journal)
 {
        int err;
+       journal_superblock_t *sb;
 
        err = load_superblock(journal);
        if (err)
                return err;
 
+       sb = journal->j_superblock;
        /* If this is a V2 superblock, then we have to check the
         * features flags on it. */
 
        if (journal->j_format_version >= 2) {
-               journal_superblock_t *sb = journal->j_superblock;
-
                if ((sb->s_feature_ro_compat &
                     ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) ||
                    (sb->s_feature_incompat &
@@ -1089,6 +1095,13 @@ int journal_load(journal_t *journal)
                }
        }
 
+       /*
+        * Create a slab for this blocksize
+        */
+       err = journal_create_jbd_slab(be32_to_cpu(sb->s_blocksize));
+       if (err)
+               return err;
+
        /* Let the recovery code check whether it needs to recover any
         * data from the journal. */
        if (journal_recover(journal))
@@ -1163,9 +1176,9 @@ void journal_destroy(journal_t *journal)
  * @compat: bitmask of compatible features
  * @ro: bitmask of features that force read-only mount
  * @incompat: bitmask of incompatible features
- * 
+ *
  * Check whether the journal uses all of a given set of
- * features.  Return true (non-zero) if it does. 
+ * features.  Return true (non-zero) if it does.
  **/
 
 int journal_check_used_features (journal_t *journal, unsigned long compat,
@@ -1194,7 +1207,7 @@ int journal_check_used_features (journal_t *journal, unsigned long compat,
  * @compat: bitmask of compatible features
  * @ro: bitmask of features that force read-only mount
  * @incompat: bitmask of incompatible features
- * 
+ *
  * Check whether the journaling code supports the use of
  * all of a given set of features on this journal.  Return true
  * (non-zero) if it can. */
@@ -1232,7 +1245,7 @@ int journal_check_available_features (journal_t *journal, unsigned long compat,
  * @incompat: bitmask of incompatible features
  *
  * Mark a given journal feature as present on the
- * superblock.  Returns true if the requested features could be set. 
+ * superblock.  Returns true if the requested features could be set.
  *
  */
 
@@ -1318,7 +1331,7 @@ static int journal_convert_superblock_v1(journal_t *journal,
 /**
  * int journal_flush () - Flush journal
  * @journal: Journal to act on.
- * 
+ *
  * Flush all data for a given journal to disk and empty the journal.
  * Filesystems can use this when remounting readonly to ensure that
  * recovery does not need to happen on remount.
@@ -1385,7 +1398,7 @@ int journal_flush(journal_t *journal)
  * int journal_wipe() - Wipe journal contents
  * @journal: Journal to act on.
  * @write: flag (see below)
- * 
+ *
  * Wipe out all of the contents of a journal, safely.  This will produce
  * a warning if the journal contains any valid recovery information.
  * Must be called between journal_init_*() and journal_load().
@@ -1440,7 +1453,7 @@ static const char *journal_dev_name(journal_t *journal, char *buffer)
 
 /*
  * Journal abort has very specific semantics, which we describe
- * for journal abort. 
+ * for journal abort.
  *
  * Two internal function, which provide abort to te jbd layer
  * itself are here.
@@ -1495,7 +1508,7 @@ static void __journal_abort_soft (journal_t *journal, int errno)
  * Perform a complete, immediate shutdown of the ENTIRE
  * journal (not of a single transaction).  This operation cannot be
  * undone without closing and reopening the journal.
- *           
+ *
  * The journal_abort function is intended to support higher level error
  * recovery mechanisms such as the ext2/ext3 remount-readonly error
  * mode.
@@ -1529,7 +1542,7 @@ static void __journal_abort_soft (journal_t *journal, int errno)
  * supply an errno; a null errno implies that absolutely no further
  * writes are done to the journal (unless there are any already in
  * progress).
- * 
+ *
  */
 
 void journal_abort(journal_t *journal, int errno)
@@ -1537,7 +1550,7 @@ void journal_abort(journal_t *journal, int errno)
        __journal_abort_soft(journal, errno);
 }
 
-/** 
+/**
  * int journal_errno () - returns the journal's error state.
  * @journal: journal to examine.
  *
@@ -1561,7 +1574,7 @@ int journal_errno(journal_t *journal)
        return err;
 }
 
-/** 
+/**
  * int journal_clear_err () - clears the journal's error state
  * @journal: journal to act on.
  *
@@ -1581,7 +1594,7 @@ int journal_clear_err(journal_t *journal)
        return err;
 }
 
-/** 
+/**
  * void journal_ack_err() - Ack journal err.
  * @journal: journal to act on.
  *
@@ -1603,17 +1616,88 @@ int journal_blocks_per_page(struct inode *inode)
 
 /*
  * Simple support for retrying memory allocations.  Introduced to help to
- * debug different VM deadlock avoidance strategies. 
+ * debug different VM deadlock avoidance strategies.
  */
 void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
 {
        return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0));
 }
 
+/*
+ * jbd slab management: create 1k, 2k, 4k, 8k slabs as needed
+ * and allocate frozen and commit buffers from these slabs.
+ *
+ * Reason for doing this is to avoid, SLAB_DEBUG - since it could
+ * cause bh to cross page boundary.
+ */
+
+#define JBD_MAX_SLABS 5
+#define JBD_SLAB_INDEX(size)  (size >> 11)
+
+static struct kmem_cache *jbd_slab[JBD_MAX_SLABS];
+static const char *jbd_slab_names[JBD_MAX_SLABS] = {
+       "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k"
+};
+
+static void journal_destroy_jbd_slabs(void)
+{
+       int i;
+
+       for (i = 0; i < JBD_MAX_SLABS; i++) {
+               if (jbd_slab[i])
+                       kmem_cache_destroy(jbd_slab[i]);
+               jbd_slab[i] = NULL;
+       }
+}
+
+static int journal_create_jbd_slab(size_t slab_size)
+{
+       int i = JBD_SLAB_INDEX(slab_size);
+
+       BUG_ON(i >= JBD_MAX_SLABS);
+
+       /*
+        * Check if we already have a slab created for this size
+        */
+       if (jbd_slab[i])
+               return 0;
+
+       /*
+        * Create a slab and force alignment to be same as slabsize -
+        * this will make sure that allocations won't cross the page
+        * boundary.
+        */
+       jbd_slab[i] = kmem_cache_create(jbd_slab_names[i],
+                               slab_size, slab_size, 0, NULL);
+       if (!jbd_slab[i]) {
+               printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+void * jbd_slab_alloc(size_t size, gfp_t flags)
+{
+       int idx;
+
+       idx = JBD_SLAB_INDEX(size);
+       BUG_ON(jbd_slab[idx] == NULL);
+       return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL);
+}
+
+void jbd_slab_free(void *ptr,  size_t size)
+{
+       int idx;
+
+       idx = JBD_SLAB_INDEX(size);
+       BUG_ON(jbd_slab[idx] == NULL);
+       kmem_cache_free(jbd_slab[idx], ptr);
+}
+
 /*
  * Journal_head storage management
  */
-static kmem_cache_t *journal_head_cache;
+static struct kmem_cache *journal_head_cache;
 #ifdef CONFIG_JBD_DEBUG
 static atomic_t nr_journal_heads = ATOMIC_INIT(0);
 #endif
@@ -1626,9 +1710,8 @@ static int journal_init_journal_head_cache(void)
        journal_head_cache = kmem_cache_create("journal_head",
                                sizeof(struct journal_head),
                                0,              /* offset */
-                               0,              /* flags */
-                               NULL,           /* ctor */
-                               NULL);          /* dtor */
+                               SLAB_TEMPORARY, /* flags */
+                               NULL);          /* ctor */
        retval = 0;
        if (journal_head_cache == 0) {
                retval = -ENOMEM;
@@ -1675,7 +1758,7 @@ static void journal_free_journal_head(struct journal_head *jh)
 {
 #ifdef CONFIG_JBD_DEBUG
        atomic_dec(&nr_journal_heads);
-       memset(jh, 0x5b, sizeof(*jh));
+       memset(jh, JBD_POISON_FREE, sizeof(*jh));
 #endif
        kmem_cache_free(journal_head_cache, jh);
 }
@@ -1798,13 +1881,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
                                printk(KERN_WARNING "%s: freeing "
                                                "b_frozen_data\n",
                                                __FUNCTION__);
-                               kfree(jh->b_frozen_data);
+                               jbd_slab_free(jh->b_frozen_data, bh->b_size);
                        }
                        if (jh->b_committed_data) {
                                printk(KERN_WARNING "%s: freeing "
                                                "b_committed_data\n",
                                                __FUNCTION__);
-                               kfree(jh->b_committed_data);
+                               jbd_slab_free(jh->b_committed_data, bh->b_size);
                        }
                        bh->b_private = NULL;
                        jh->b_bh = NULL;        /* debug, really */
@@ -1916,16 +1999,15 @@ static void __exit remove_jbd_proc_entry(void)
 
 #endif
 
-kmem_cache_t *jbd_handle_cache;
+struct kmem_cache *jbd_handle_cache;
 
 static int __init journal_init_handle_cache(void)
 {
        jbd_handle_cache = kmem_cache_create("journal_handle",
                                sizeof(handle_t),
                                0,              /* offset */
-                               0,              /* flags */
-                               NULL,           /* ctor */
-                               NULL);          /* dtor */
+                               SLAB_TEMPORARY, /* flags */
+                               NULL);          /* ctor */
        if (jbd_handle_cache == NULL) {
                printk(KERN_EMERG "JBD: failed to create handle cache\n");
                return -ENOMEM;
@@ -1960,19 +2042,14 @@ static void journal_destroy_caches(void)
        journal_destroy_revoke_caches();
        journal_destroy_journal_head_cache();
        journal_destroy_handle_cache();
+       journal_destroy_jbd_slabs();
 }
 
 static int __init journal_init(void)
 {
        int ret;
 
-/* Static check for data structure consistency.  There's no code
- * invoked --- we'll just get a linker failure if things aren't right.
- */
-       extern void journal_bad_superblock_size(void);
-       if (sizeof(struct journal_superblock_s) != 1024)
-               journal_bad_superblock_size();
-
+       BUILD_BUG_ON(sizeof(struct journal_superblock_s) != 1024);
 
        ret = journal_init_caches();
        if (ret != 0)