Group short-lived and reclaimable kernel allocations
[sfrench/cifs-2.6.git] / fs / dcache.c
index d68631f18df122824a0d36a307c647f15931c7bd..7da0cf50873e5088fea6a496872a28f401f7a899 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/fsnotify.h>
 #include <linux/slab.h>
 #include <linux/init.h>
-#include <linux/smp_lock.h>
 #include <linux/hash.h>
 #include <linux/cache.h>
 #include <linux/module.h>
@@ -121,6 +120,28 @@ static void dentry_iput(struct dentry * dentry)
        }
 }
 
+/**
+ * d_kill - kill dentry and return parent
+ * @dentry: dentry to kill
+ *
+ * Called with dcache_lock and d_lock, releases both.  The dentry must
+ * already be unhashed and removed from the LRU.
+ *
+ * If this is the root of the dentry tree, return NULL.
+ */
+static struct dentry *d_kill(struct dentry *dentry)
+{
+       struct dentry *parent;
+
+       list_del(&dentry->d_u.d_child);
+       dentry_stat.nr_dentry--;        /* For d_free, below */
+       /*drops the locks, at that point nobody can reach this dentry */
+       dentry_iput(dentry);
+       parent = dentry->d_parent;
+       d_free(dentry);
+       return dentry == parent ? NULL : parent;
+}
+
 /* 
  * This is dput
  *
@@ -189,28 +210,17 @@ repeat:
 
 unhash_it:
        __d_drop(dentry);
-
-kill_it: {
-               struct dentry *parent;
-
-               /* If dentry was on d_lru list
-                * delete it from there
-                */
-               if (!list_empty(&dentry->d_lru)) {
-                       list_del(&dentry->d_lru);
-                       dentry_stat.nr_unused--;
-               }
-               list_del(&dentry->d_u.d_child);
-               dentry_stat.nr_dentry--;        /* For d_free, below */
-               /*drops the locks, at that point nobody can reach this dentry */
-               dentry_iput(dentry);
-               parent = dentry->d_parent;
-               d_free(dentry);
-               if (dentry == parent)
-                       return;
-               dentry = parent;
-               goto repeat;
+kill_it:
+       /* If dentry was on d_lru list
+        * delete it from there
+        */
+       if (!list_empty(&dentry->d_lru)) {
+               list_del(&dentry->d_lru);
+               dentry_stat.nr_unused--;
        }
+       dentry = d_kill(dentry);
+       if (dentry)
+               goto repeat;
 }
 
 /**
@@ -371,22 +381,40 @@ restart:
  * Throw away a dentry - free the inode, dput the parent.  This requires that
  * the LRU list has already been removed.
  *
+ * If prune_parents is true, try to prune ancestors as well.
+ *
  * Called with dcache_lock, drops it and then regains.
  * Called with dentry->d_lock held, drops it.
  */
-static void prune_one_dentry(struct dentry * dentry)
+static void prune_one_dentry(struct dentry * dentry, int prune_parents)
 {
-       struct dentry * parent;
-
        __d_drop(dentry);
-       list_del(&dentry->d_u.d_child);
-       dentry_stat.nr_dentry--;        /* For d_free, below */
-       dentry_iput(dentry);
-       parent = dentry->d_parent;
-       d_free(dentry);
-       if (parent != dentry)
-               dput(parent);
+       dentry = d_kill(dentry);
+       if (!prune_parents) {
+               dput(dentry);
+               spin_lock(&dcache_lock);
+               return;
+       }
+
+       /*
+        * Prune ancestors.  Locking is simpler than in dput(),
+        * because dcache_lock needs to be taken anyway.
+        */
        spin_lock(&dcache_lock);
+       while (dentry) {
+               if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock))
+                       return;
+
+               if (dentry->d_op && dentry->d_op->d_delete)
+                       dentry->d_op->d_delete(dentry);
+               if (!list_empty(&dentry->d_lru)) {
+                       list_del(&dentry->d_lru);
+                       dentry_stat.nr_unused--;
+               }
+               __d_drop(dentry);
+               dentry = d_kill(dentry);
+               spin_lock(&dcache_lock);
+       }
 }
 
 /**
@@ -394,6 +422,7 @@ static void prune_one_dentry(struct dentry * dentry)
  * @count: number of entries to try and free
  * @sb: if given, ignore dentries for other superblocks
  *         which are being unmounted.
+ * @prune_parents: if true, try to prune ancestors as well in one go
  *
  * Shrink the dcache. This is done when we need
  * more memory, or simply when we need to unmount
@@ -404,7 +433,7 @@ static void prune_one_dentry(struct dentry * dentry)
  * all the dentries are in use.
  */
  
-static void prune_dcache(int count, struct super_block *sb)
+static void prune_dcache(int count, struct super_block *sb, int prune_parents)
 {
        spin_lock(&dcache_lock);
        for (; count ; count--) {
@@ -464,7 +493,7 @@ static void prune_dcache(int count, struct super_block *sb)
                 * without taking the s_umount lock (I already hold it).
                 */
                if (sb && dentry->d_sb == sb) {
-                       prune_one_dentry(dentry);
+                       prune_one_dentry(dentry, prune_parents);
                        continue;
                }
                /*
@@ -479,7 +508,7 @@ static void prune_dcache(int count, struct super_block *sb)
                s_umount = &dentry->d_sb->s_umount;
                if (down_read_trylock(s_umount)) {
                        if (dentry->d_sb->s_root != NULL) {
-                               prune_one_dentry(dentry);
+                               prune_one_dentry(dentry, prune_parents);
                                up_read(s_umount);
                                continue;
                        }
@@ -550,7 +579,7 @@ repeat:
                        spin_unlock(&dentry->d_lock);
                        continue;
                }
-               prune_one_dentry(dentry);
+               prune_one_dentry(dentry, 1);
                cond_resched_lock(&dcache_lock);
                goto repeat;
        }
@@ -829,7 +858,7 @@ void shrink_dcache_parent(struct dentry * parent)
        int found;
 
        while ((found = select_parent(parent)) != 0)
-               prune_dcache(found, parent->d_sb);
+               prune_dcache(found, parent->d_sb, 1);
 }
 
 /*
@@ -849,11 +878,16 @@ static int shrink_dcache_memory(int nr, gfp_t gfp_mask)
        if (nr) {
                if (!(gfp_mask & __GFP_FS))
                        return -1;
-               prune_dcache(nr, NULL);
+               prune_dcache(nr, NULL, 1);
        }
        return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
 }
 
+static struct shrinker dcache_shrinker = {
+       .shrink = shrink_dcache_memory,
+       .seeks = DEFAULT_SEEKS,
+};
+
 /**
  * d_alloc     -       allocate a dcache entry
  * @parent: parent of entry to allocate
@@ -869,7 +903,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
        struct dentry *dentry;
        char *dname;
 
-       dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); 
+       dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
        if (!dentry)
                return NULL;
 
@@ -1823,6 +1857,16 @@ char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
        struct vfsmount *rootmnt;
        struct dentry *root;
 
+       /*
+        * We have various synthetic filesystems that never get mounted.  On
+        * these filesystems dentries are never used for lookup purposes, and
+        * thus don't need to be hashed.  They also don't need a name until a
+        * user wants to identify the object in /proc/pid/fd/.  The little hack
+        * below allows us to generate a name for these objects on demand:
+        */
+       if (dentry->d_op && dentry->d_op->d_dname)
+               return dentry->d_op->d_dname(dentry, buf, buflen);
+
        read_lock(&current->fs->lock);
        rootmnt = mntget(current->fs->rootmnt);
        root = dget(current->fs->root);
@@ -1835,6 +1879,27 @@ char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
        return res;
 }
 
+/*
+ * Helper function for dentry_operations.d_dname() members
+ */
+char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
+                       const char *fmt, ...)
+{
+       va_list args;
+       char temp[64];
+       int sz;
+
+       va_start(args, fmt);
+       sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
+       va_end(args);
+
+       if (sz > sizeof(temp) || sz > buflen)
+               return ERR_PTR(-ENAMETOOLONG);
+
+       buffer += buflen - sz;
+       return memcpy(buffer, temp, sz);
+}
+
 /*
  * NOTE! The user-level library version returns a
  * character pointer. The kernel system call just
@@ -2052,14 +2117,10 @@ static void __init dcache_init(unsigned long mempages)
         * but it is probably not worth it because of the cache nature
         * of the dcache. 
         */
-       dentry_cache = kmem_cache_create("dentry_cache",
-                                        sizeof(struct dentry),
-                                        0,
-                                        (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
-                                        SLAB_MEM_SPREAD),
-                                        NULL, NULL);
+       dentry_cache = KMEM_CACHE(dentry,
+               SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
        
-       set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory);
+       register_shrinker(&dcache_shrinker);
 
        /* Hash may have been set up in dcache_init_early */
        if (!hashdist)
@@ -2104,10 +2165,10 @@ void __init vfs_caches_init(unsigned long mempages)
        mempages -= reserve;
 
        names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
 
        filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
 
        dcache_init(mempages);
        inode_init(mempages);