2 * Copyright(c) 2017 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/pagemap.h>
14 #include <linux/module.h>
15 #include <linux/mount.h>
16 #include <linux/magic.h>
17 #include <linux/genhd.h>
18 #include <linux/cdev.h>
19 #include <linux/hash.h>
20 #include <linux/slab.h>
21 #include <linux/dax.h>
24 static dev_t dax_devt;
25 DEFINE_STATIC_SRCU(dax_srcu);
26 static struct vfsmount *dax_mnt;
27 static DEFINE_IDA(dax_minor_ida);
28 static struct kmem_cache *dax_cache __read_mostly;
29 static struct super_block *dax_superblock __read_mostly;
31 #define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head))
32 static struct hlist_head dax_host_list[DAX_HASH_SIZE];
33 static DEFINE_SPINLOCK(dax_host_lock);
35 int dax_read_lock(void)
37 return srcu_read_lock(&dax_srcu);
39 EXPORT_SYMBOL_GPL(dax_read_lock);
41 void dax_read_unlock(int id)
43 srcu_read_unlock(&dax_srcu, id);
45 EXPORT_SYMBOL_GPL(dax_read_unlock);
48 int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
51 phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512;
54 *pgoff = PHYS_PFN(phys_off);
55 if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
59 EXPORT_SYMBOL(bdev_dax_pgoff);
62 * __bdev_dax_supported() - Check if the device supports dax for filesystem
63 * @sb: The superblock of the device
64 * @blocksize: The block size of the device
66 * This is a library function for filesystems to check if the block device
67 * can be mounted with dax option.
69 * Return: negative errno if unsupported, 0 if supported.
71 int __bdev_dax_supported(struct super_block *sb, int blocksize)
73 struct block_device *bdev = sb->s_bdev;
74 struct dax_device *dax_dev;
81 if (blocksize != PAGE_SIZE) {
82 pr_err("VFS (%s): error: unsupported blocksize for dax\n",
87 err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
89 pr_err("VFS (%s): error: unaligned partition for dax\n",
94 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
96 pr_err("VFS (%s): error: device does not support dax\n",
101 id = dax_read_lock();
102 len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
108 pr_err("VFS (%s): error: dax access failed (%ld)",
110 return len < 0 ? len : -EIO;
115 EXPORT_SYMBOL_GPL(__bdev_dax_supported);
119 * struct dax_device - anchor object for dax services
121 * @cdev: optional character interface for "device dax"
122 * @host: optional name for lookups where the device path is not available
123 * @private: dax driver private data
124 * @alive: !alive + rcu grace period == no new operations / mappings
127 struct hlist_node list;
133 const struct dax_operations *ops;
137 * dax_direct_access() - translate a device pgoff to an absolute pfn
138 * @dax_dev: a dax_device instance representing the logical memory range
139 * @pgoff: offset in pages from the start of the device to translate
140 * @nr_pages: number of consecutive pages caller can handle relative to @pfn
141 * @kaddr: output parameter that returns a virtual address mapping of pfn
142 * @pfn: output parameter that returns an absolute pfn translation of @pgoff
144 * Return: negative errno if an error occurs, otherwise the number of
145 * pages accessible at the device relative @pgoff.
147 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
148 void **kaddr, pfn_t *pfn)
153 * The device driver is allowed to sleep, in order to make the
154 * memory directly accessible.
161 if (!dax_alive(dax_dev))
167 avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
171 return min(avail, nr_pages);
173 EXPORT_SYMBOL_GPL(dax_direct_access);
175 bool dax_alive(struct dax_device *dax_dev)
177 lockdep_assert_held(&dax_srcu);
178 return dax_dev->alive;
180 EXPORT_SYMBOL_GPL(dax_alive);
182 static int dax_host_hash(const char *host)
184 return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
188 * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
189 * that any fault handlers or operations that might have seen
190 * dax_alive(), have completed. Any operations that start after
191 * synchronize_srcu() has run will abort upon seeing !dax_alive().
193 void kill_dax(struct dax_device *dax_dev)
198 dax_dev->alive = false;
200 synchronize_srcu(&dax_srcu);
202 spin_lock(&dax_host_lock);
203 hlist_del_init(&dax_dev->list);
204 spin_unlock(&dax_host_lock);
206 dax_dev->private = NULL;
208 EXPORT_SYMBOL_GPL(kill_dax);
210 static struct inode *dax_alloc_inode(struct super_block *sb)
212 struct dax_device *dax_dev;
214 dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
215 return &dax_dev->inode;
218 static struct dax_device *to_dax_dev(struct inode *inode)
220 return container_of(inode, struct dax_device, inode);
223 static void dax_i_callback(struct rcu_head *head)
225 struct inode *inode = container_of(head, struct inode, i_rcu);
226 struct dax_device *dax_dev = to_dax_dev(inode);
228 kfree(dax_dev->host);
229 dax_dev->host = NULL;
230 ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
231 kmem_cache_free(dax_cache, dax_dev);
234 static void dax_destroy_inode(struct inode *inode)
236 struct dax_device *dax_dev = to_dax_dev(inode);
238 WARN_ONCE(dax_dev->alive,
239 "kill_dax() must be called before final iput()\n");
240 call_rcu(&inode->i_rcu, dax_i_callback);
243 static const struct super_operations dax_sops = {
244 .statfs = simple_statfs,
245 .alloc_inode = dax_alloc_inode,
246 .destroy_inode = dax_destroy_inode,
247 .drop_inode = generic_delete_inode,
250 static struct dentry *dax_mount(struct file_system_type *fs_type,
251 int flags, const char *dev_name, void *data)
253 return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC);
256 static struct file_system_type dax_fs_type = {
259 .kill_sb = kill_anon_super,
262 static int dax_test(struct inode *inode, void *data)
264 dev_t devt = *(dev_t *) data;
266 return inode->i_rdev == devt;
269 static int dax_set(struct inode *inode, void *data)
271 dev_t devt = *(dev_t *) data;
273 inode->i_rdev = devt;
277 static struct dax_device *dax_dev_get(dev_t devt)
279 struct dax_device *dax_dev;
282 inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
283 dax_test, dax_set, &devt);
288 dax_dev = to_dax_dev(inode);
289 if (inode->i_state & I_NEW) {
290 dax_dev->alive = true;
291 inode->i_cdev = &dax_dev->cdev;
292 inode->i_mode = S_IFCHR;
293 inode->i_flags = S_DAX;
294 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
295 unlock_new_inode(inode);
301 static void dax_add_host(struct dax_device *dax_dev, const char *host)
306 * Unconditionally init dax_dev since it's coming from a
307 * non-zeroed slab cache
309 INIT_HLIST_NODE(&dax_dev->list);
310 dax_dev->host = host;
314 hash = dax_host_hash(host);
315 spin_lock(&dax_host_lock);
316 hlist_add_head(&dax_dev->list, &dax_host_list[hash]);
317 spin_unlock(&dax_host_lock);
320 struct dax_device *alloc_dax(void *private, const char *__host,
321 const struct dax_operations *ops)
323 struct dax_device *dax_dev;
328 host = kstrdup(__host, GFP_KERNEL);
332 minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
336 devt = MKDEV(MAJOR(dax_devt), minor);
337 dax_dev = dax_dev_get(devt);
341 dax_add_host(dax_dev, host);
343 dax_dev->private = private;
347 ida_simple_remove(&dax_minor_ida, minor);
352 EXPORT_SYMBOL_GPL(alloc_dax);
354 void put_dax(struct dax_device *dax_dev)
358 iput(&dax_dev->inode);
360 EXPORT_SYMBOL_GPL(put_dax);
363 * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
364 * @host: alternate name for the device registered by a dax driver
366 struct dax_device *dax_get_by_host(const char *host)
368 struct dax_device *dax_dev, *found = NULL;
374 hash = dax_host_hash(host);
376 id = dax_read_lock();
377 spin_lock(&dax_host_lock);
378 hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
379 if (!dax_alive(dax_dev)
380 || strcmp(host, dax_dev->host) != 0)
383 if (igrab(&dax_dev->inode))
387 spin_unlock(&dax_host_lock);
392 EXPORT_SYMBOL_GPL(dax_get_by_host);
395 * inode_dax: convert a public inode into its dax_dev
396 * @inode: An inode with i_cdev pointing to a dax_dev
398 * Note this is not equivalent to to_dax_dev() which is for private
399 * internal use where we know the inode filesystem type == dax_fs_type.
401 struct dax_device *inode_dax(struct inode *inode)
403 struct cdev *cdev = inode->i_cdev;
405 return container_of(cdev, struct dax_device, cdev);
407 EXPORT_SYMBOL_GPL(inode_dax);
409 struct inode *dax_inode(struct dax_device *dax_dev)
411 return &dax_dev->inode;
413 EXPORT_SYMBOL_GPL(dax_inode);
415 void *dax_get_private(struct dax_device *dax_dev)
417 return dax_dev->private;
419 EXPORT_SYMBOL_GPL(dax_get_private);
421 static void init_once(void *_dax_dev)
423 struct dax_device *dax_dev = _dax_dev;
424 struct inode *inode = &dax_dev->inode;
426 inode_init_once(inode);
429 static int __dax_fs_init(void)
433 dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0,
434 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
435 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
440 rc = register_filesystem(&dax_fs_type);
442 goto err_register_fs;
444 dax_mnt = kern_mount(&dax_fs_type);
445 if (IS_ERR(dax_mnt)) {
446 rc = PTR_ERR(dax_mnt);
449 dax_superblock = dax_mnt->mnt_sb;
454 unregister_filesystem(&dax_fs_type);
456 kmem_cache_destroy(dax_cache);
461 static void __dax_fs_exit(void)
463 kern_unmount(dax_mnt);
464 unregister_filesystem(&dax_fs_type);
465 kmem_cache_destroy(dax_cache);
468 static int __init dax_fs_init(void)
472 rc = __dax_fs_init();
476 rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax");
482 static void __exit dax_fs_exit(void)
484 unregister_chrdev_region(dax_devt, MINORMASK+1);
485 ida_destroy(&dax_minor_ida);
489 MODULE_AUTHOR("Intel Corporation");
490 MODULE_LICENSE("GPL v2");
491 subsys_initcall(dax_fs_init);
492 module_exit(dax_fs_exit);