2 * memrar_handler 1.0: An Intel restricted access region handler device
4 * Copyright (C) 2010 Intel Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General
8 * Public License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be
11 * useful, but WITHOUT ANY WARRANTY; without even the implied
12 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
13 * PURPOSE. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the Free
16 * Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this
19 * distribution in the file called COPYING.
21 * -------------------------------------------------------------------
23 * Moorestown restricted access regions (RAR) provide isolated
24 * areas of main memory that are only acceessible by authorized
27 * The Intel Moorestown RAR handler module exposes a kernel space
28 * RAR memory management mechanism. It is essentially a
29 * RAR-specific allocator.
31 * Besides providing RAR buffer management, the RAR handler also
32 * behaves in many ways like an OS virtual memory manager. For
33 * example, the RAR "handles" created by the RAR handler are
34 * analogous to user space virtual addresses.
36 * RAR memory itself is never accessed directly by the RAR
40 #include <linux/miscdevice.h>
42 #include <linux/slab.h>
43 #include <linux/kref.h>
44 #include <linux/mutex.h>
45 #include <linux/kernel.h>
46 #include <linux/uaccess.h>
48 #include <linux/ioport.h>
51 #include "../rar_register/rar_register.h"
54 #include "memrar_allocator.h"
57 #define MEMRAR_VER "1.0"
60 * Moorestown supports three restricted access regions.
62 * We only care about the first two, video and audio. The third,
63 * reserved for Chaabi and the P-unit, will be handled by their
66 #define MRST_NUM_RAR 2
68 /* ---------------- -------------------- ------------------- */
71 * struct memrar_buffer_info - struct that keeps track of all RAR buffers
72 * @list: Linked list of memrar_buffer_info objects.
73 * @buffer: Core RAR buffer information.
74 * @refcount: Reference count.
75 * @owner: File handle corresponding to process that reserved the
76 * block of memory in RAR. This will be zero for buffers
77 * allocated by other drivers instead of by a user space
80 * This structure encapsulates a link list of RAR buffers, as well as
81 * other characteristics specific to a given list node, such as the
82 * reference count on the corresponding RAR buffer.
84 struct memrar_buffer_info {
85 struct list_head list;
86 struct RAR_buffer buffer;
92 * struct memrar_rar_info - characteristics of a given RAR
93 * @base: Base bus address of the RAR.
94 * @length: Length of the RAR.
95 * @iobase: Virtual address of RAR mapped into kernel.
96 * @allocator: Allocator associated with the RAR. Note the allocator
97 * "capacity" may be smaller than the RAR length if the
98 * length is not a multiple of the configured allocator
100 * @buffers: Table that keeps track of all reserved RAR buffers.
101 * @lock: Lock used to synchronize access to RAR-specific data
104 * Each RAR has an associated memrar_rar_info structure that describes
105 * where in memory the RAR is located, how large it is, and a list of
106 * reserved RAR buffers inside that RAR. Each RAR also has a mutex
107 * associated with it to reduce lock contention when operations on
108 * multiple RARs are performed in parallel.
110 struct memrar_rar_info {
112 unsigned long length;
113 void __iomem *iobase;
114 struct memrar_allocator *allocator;
115 struct memrar_buffer_info buffers;
117 int allocated; /* True if we own this RAR */
121 * Array of RAR characteristics.
123 static struct memrar_rar_info memrars[MRST_NUM_RAR];
125 /* ---------------- -------------------- ------------------- */
127 /* Validate RAR type. */
128 static inline int memrar_is_valid_rar_type(u32 type)
130 return type == RAR_TYPE_VIDEO || type == RAR_TYPE_AUDIO;
133 /* Check if an address/handle falls with the given RAR memory range. */
134 static inline int memrar_handle_in_range(struct memrar_rar_info *rar,
137 unsigned long const iobase = (unsigned long) (rar->iobase);
138 return (vaddr >= iobase && vaddr < iobase + rar->length);
141 /* Retrieve RAR information associated with the given handle. */
142 static struct memrar_rar_info *memrar_get_rar_info(u32 vaddr)
145 for (i = 0; i < MRST_NUM_RAR; ++i) {
146 struct memrar_rar_info * const rar = &memrars[i];
147 if (memrar_handle_in_range(rar, vaddr))
155 * memrar_get_bus address - handle to bus address
157 * Retrieve bus address from given handle.
159 * Returns address corresponding to given handle. Zero if handle is
162 static dma_addr_t memrar_get_bus_address(
163 struct memrar_rar_info *rar,
166 unsigned long const iobase = (unsigned long) (rar->iobase);
168 if (!memrar_handle_in_range(rar, vaddr))
172 * An assumption is made that the virtual address offset is
173 * the same as the bus address offset, at least based on the
174 * way this driver is implemented. For example, vaddr + 2 ==
177 * @todo Is that a valid assumption?
179 return rar->base + (vaddr - iobase);
183 * memrar_get_physical_address - handle to physical address
185 * Retrieve physical address from given handle.
187 * Returns address corresponding to given handle. Zero if handle is
190 static dma_addr_t memrar_get_physical_address(
191 struct memrar_rar_info *rar,
195 * @todo This assumes that the bus address and physical
196 * address are the same. That is true for Moorestown
197 * but not necessarily on other platforms. This
198 * deficiency should be addressed at some point.
200 return memrar_get_bus_address(rar, vaddr);
204 * memrar_release_block - release a block to the pool
205 * @kref: kref of block
207 * Core block release code. A node has hit zero references so can
208 * be released and the lists must be updated.
210 * Note: This code removes the node from a list. Make sure any list
211 * iteration is performed using list_for_each_safe().
213 static void memrar_release_block_i(struct kref *ref)
216 * Last reference is being released. Remove from the table,
217 * and reclaim resources.
220 struct memrar_buffer_info * const node =
221 container_of(ref, struct memrar_buffer_info, refcount);
223 struct RAR_block_info * const user_info =
226 struct memrar_allocator * const allocator =
227 memrars[user_info->type].allocator;
229 list_del(&node->list);
231 memrar_allocator_free(allocator, user_info->handle);
237 * memrar_init_rar_resources - configure a RAR
238 * @rarnum: rar that has been allocated
239 * @devname: name of our device
241 * Initialize RAR parameters, such as bus addresses, etc and make
242 * the resource accessible.
244 static int memrar_init_rar_resources(int rarnum, char const *devname)
246 /* ---- Sanity Checks ----
247 * 1. RAR bus addresses in both Lincroft and Langwell RAR
248 * registers should be the same.
249 * a. There's no way we can do this through IA.
251 * 2. Secure device ID in Langwell RAR registers should be set
252 * appropriately, e.g. only LPE DMA for the audio RAR, and
253 * security for the other Langwell based RAR registers.
254 * a. There's no way we can do this through IA.
256 * 3. Audio and video RAR registers and RAR access should be
257 * locked down. If not, enable RAR access control. Except
258 * for debugging purposes, there is no reason for them to
260 * a. We can only do this for the Lincroft (IA) side.
262 * @todo Should the RAR handler driver even be aware of audio
263 * and video RAR settings?
267 * RAR buffer block size.
269 * We choose it to be the size of a page to simplify the
270 * /dev/memrar mmap() implementation and usage. Otherwise
271 * paging is not involved once an RAR is locked down.
273 static size_t const RAR_BLOCK_SIZE = PAGE_SIZE;
275 dma_addr_t low, high;
276 struct memrar_rar_info * const rar = &memrars[rarnum];
278 BUG_ON(MRST_NUM_RAR != ARRAY_SIZE(memrars));
279 BUG_ON(!memrar_is_valid_rar_type(rarnum));
280 BUG_ON(rar->allocated);
282 mutex_init(&rar->lock);
285 * Initialize the process table before we reach any
286 * code that exit on failure since the finalization
287 * code requires an initialized list.
289 INIT_LIST_HEAD(&rar->buffers.list);
291 if (rar_get_address(rarnum, &low, &high) != 0)
292 /* No RAR is available. */
295 if (low == 0 || high == 0) {
299 rar->allocator = NULL;
304 * @todo Verify that LNC and LNW RAR register contents
305 * addresses, security, etc are compatible and
309 rar->length = high - low + 1;
311 /* Claim RAR memory as our own. */
312 if (request_mem_region(low, rar->length, devname) == NULL) {
314 pr_err("%s: Unable to claim RAR[%d] memory.\n", devname, rarnum);
315 pr_err("%s: RAR[%d] disabled.\n", devname, rarnum);
322 * Now map it into the kernel address space.
324 * Note that the RAR memory may only be accessed by IA
325 * when debugging. Otherwise attempts to access the
326 * RAR memory when it is locked down will result in
327 * behavior similar to writing to /dev/null and
328 * reading from /dev/zero. This behavior is enforced
329 * by the hardware. Even if we don't access the
330 * memory, mapping it into the kernel provides us with
331 * a convenient RAR handle to bus address mapping.
333 rar->iobase = ioremap_nocache(rar->base, rar->length);
334 if (rar->iobase == NULL) {
335 pr_err("%s: Unable to map RAR memory.\n", devname);
336 release_mem_region(low, rar->length);
340 /* Initialize corresponding memory allocator. */
341 rar->allocator = memrar_create_allocator((unsigned long) rar->iobase,
342 rar->length, RAR_BLOCK_SIZE);
343 if (rar->allocator == NULL) {
344 iounmap(rar->iobase);
345 release_mem_region(low, rar->length);
349 pr_info("%s: BRAR[%d] bus address range = [0x%lx, 0x%lx]\n",
350 devname, rarnum, (unsigned long) low, (unsigned long) high);
352 pr_info("%s: BRAR[%d] size = %zu KiB\n",
353 devname, rarnum, rar->allocator->capacity / 1024);
360 * memrar_fini_rar_resources - free up RAR resources
362 * Finalize RAR resources. Free up the resource tables, hand the memory
363 * back to the kernel, unmap the device and release the address space.
365 static void memrar_fini_rar_resources(void)
368 struct memrar_buffer_info *pos;
369 struct memrar_buffer_info *tmp;
372 * @todo Do we need to hold a lock at this point in time?
373 * (module initialization failure or exit?)
376 for (z = MRST_NUM_RAR; z-- != 0; ) {
377 struct memrar_rar_info * const rar = &memrars[z];
382 /* Clean up remaining resources. */
384 list_for_each_entry_safe(pos,
388 kref_put(&pos->refcount, memrar_release_block_i);
391 memrar_destroy_allocator(rar->allocator);
392 rar->allocator = NULL;
394 iounmap(rar->iobase);
395 release_mem_region(rar->base, rar->length);
406 * memrar_reserve_block - handle an allocation request
407 * @request: block being requested
408 * @filp: owner it is tied to
410 * Allocate a block of the requested RAR. If successful return the
411 * request object filled in and zero, if not report an error code
414 static long memrar_reserve_block(struct RAR_buffer *request,
417 struct RAR_block_info * const rinfo = &request->info;
418 struct RAR_buffer *buffer;
419 struct memrar_buffer_info *buffer_info;
421 struct memrar_rar_info *rar = NULL;
423 /* Prevent array overflow. */
424 if (!memrar_is_valid_rar_type(rinfo->type))
427 rar = &memrars[rinfo->type];
431 /* Reserve memory in RAR. */
432 handle = memrar_allocator_alloc(rar->allocator, rinfo->size);
436 buffer_info = kmalloc(sizeof(*buffer_info), GFP_KERNEL);
438 if (buffer_info == NULL) {
439 memrar_allocator_free(rar->allocator, handle);
443 buffer = &buffer_info->buffer;
444 buffer->info.type = rinfo->type;
445 buffer->info.size = rinfo->size;
447 /* Memory handle corresponding to the bus address. */
448 buffer->info.handle = handle;
449 buffer->bus_address = memrar_get_bus_address(rar, handle);
452 * Keep track of owner so that we can later cleanup if
455 buffer_info->owner = filp;
457 kref_init(&buffer_info->refcount);
459 mutex_lock(&rar->lock);
460 list_add(&buffer_info->list, &rar->buffers.list);
461 mutex_unlock(&rar->lock);
463 rinfo->handle = buffer->info.handle;
464 request->bus_address = buffer->bus_address;
470 * memrar_release_block - release a RAR block
471 * @addr: address in RAR space
473 * Release a previously allocated block. Releases act on complete
474 * blocks, partially freeing a block is not supported
477 static long memrar_release_block(u32 addr)
479 struct memrar_buffer_info *pos;
480 struct memrar_buffer_info *tmp;
481 struct memrar_rar_info * const rar = memrar_get_rar_info(addr);
482 long result = -EINVAL;
487 mutex_lock(&rar->lock);
490 * Iterate through the buffer list to find the corresponding
491 * buffer to be released.
493 list_for_each_entry_safe(pos,
497 struct RAR_block_info * const info =
501 * Take into account handle offsets that may have been
502 * added to the base handle, such as in the following
505 * u32 handle = base + offset;
506 * rar_handle_to_bus(handle);
507 * rar_release(handle);
509 if (addr >= info->handle
510 && addr < (info->handle + info->size)
511 && memrar_is_valid_rar_type(info->type)) {
512 kref_put(&pos->refcount, memrar_release_block_i);
518 mutex_unlock(&rar->lock);
524 * memrar_get_stats - read statistics for a RAR
525 * @r: statistics to be filled in
527 * Returns the statistics data for the RAR, or an error code if
528 * the request cannot be completed
530 static long memrar_get_stat(struct RAR_stat *r)
532 struct memrar_allocator *allocator;
534 if (!memrar_is_valid_rar_type(r->type))
537 if (!memrars[r->type].allocated)
540 allocator = memrars[r->type].allocator;
542 BUG_ON(allocator == NULL);
545 * Allocator capacity doesn't change over time. No
546 * need to synchronize.
548 r->capacity = allocator->capacity;
550 mutex_lock(&allocator->lock);
551 r->largest_block_size = allocator->largest_free_area;
552 mutex_unlock(&allocator->lock);
557 * memrar_ioctl - ioctl callback
558 * @filp: file issuing the request
560 * @arg: pointer to control information
562 * Perform one of the ioctls supported by the memrar device
565 static long memrar_ioctl(struct file *filp,
569 void __user *argp = (void __user *)arg;
572 struct RAR_buffer buffer;
573 struct RAR_block_info * const request = &buffer.info;
574 struct RAR_stat rar_info;
578 case RAR_HANDLER_RESERVE:
579 if (copy_from_user(request,
584 result = memrar_reserve_block(&buffer, filp);
588 return copy_to_user(argp, request, sizeof(*request));
590 case RAR_HANDLER_RELEASE:
591 if (copy_from_user(&rar_handle,
596 return memrar_release_block(rar_handle);
598 case RAR_HANDLER_STAT:
599 if (copy_from_user(&rar_info,
605 * Populate the RAR_stat structure based on the RAR
606 * type given by the user
608 if (memrar_get_stat(&rar_info) != 0)
612 * @todo Do we need to verify destination pointer
613 * "argp" is non-zero? Is that already done by
616 return copy_to_user(argp,
618 sizeof(rar_info)) ? -EFAULT : 0;
628 * memrar_mmap - mmap helper for deubgging
629 * @filp: handle doing the mapping
632 * Support the mmap operation on the RAR space for debugging systems
633 * when the memory is not locked down.
636 static int memrar_mmap(struct file *filp, struct vm_area_struct *vma)
639 * This mmap() implementation is predominantly useful for
640 * debugging since the CPU will be prevented from accessing
641 * RAR memory by the hardware when RAR is properly locked
644 * In order for this implementation to be useful RAR memory
645 * must be not be locked down. However, we only want to do
646 * that when debugging. DO NOT leave RAR memory unlocked in a
647 * deployed device that utilizes RAR.
650 size_t const size = vma->vm_end - vma->vm_start;
652 /* Users pass the RAR handle as the mmap() offset parameter. */
653 unsigned long const handle = vma->vm_pgoff << PAGE_SHIFT;
655 struct memrar_rar_info * const rar = memrar_get_rar_info(handle);
658 /* Only allow priviledged apps to go poking around this way */
659 if (!capable(CAP_SYS_RAWIO))
662 /* Invalid RAR handle or size passed to mmap(). */
665 || size > (handle - (unsigned long) rar->iobase))
669 * Retrieve physical address corresponding to the RAR handle,
670 * and convert it to a page frame.
672 pfn = memrar_get_physical_address(rar, handle) >> PAGE_SHIFT;
675 pr_debug("memrar: mapping RAR range [0x%lx, 0x%lx) into user space.\n",
680 * Map RAR memory into user space. This is really only useful
681 * for debugging purposes since the memory won't be
682 * accessible, i.e. reads return zero and writes are ignored,
683 * when RAR access control is enabled.
685 if (remap_pfn_range(vma,
692 /* vma->vm_ops = &memrar_mem_ops; */
698 * memrar_open - device open method
699 * @inode: inode to open
702 * As we support multiple arbitary opens there is no work to be done
706 static int memrar_open(struct inode *inode, struct file *filp)
708 nonseekable_open(inode, filp);
713 * memrar_release - close method for miscev
714 * @inode: inode of device
715 * @filp: handle that is going away
717 * Free up all the regions that belong to this file handle. We use
718 * the handle as a natural Linux style 'lifetime' indicator and to
719 * ensure resources are not leaked when their owner explodes in an
723 static int memrar_release(struct inode *inode, struct file *filp)
725 /* Free all regions associated with the given file handle. */
727 struct memrar_buffer_info *pos;
728 struct memrar_buffer_info *tmp;
731 for (z = 0; z != MRST_NUM_RAR; ++z) {
732 struct memrar_rar_info * const rar = &memrars[z];
734 mutex_lock(&rar->lock);
736 list_for_each_entry_safe(pos,
740 if (filp == pos->owner)
741 kref_put(&pos->refcount,
742 memrar_release_block_i);
745 mutex_unlock(&rar->lock);
752 * rar_reserve - reserve RAR memory
753 * @buffers: buffers to reserve
754 * @count: number wanted
756 * Reserve a series of buffers in the RAR space. Returns the number of
757 * buffers successfully allocated
760 size_t rar_reserve(struct RAR_buffer *buffers, size_t count)
762 struct RAR_buffer * const end =
763 (buffers == NULL ? buffers : buffers + count);
764 struct RAR_buffer *i;
766 size_t reserve_count = 0;
768 for (i = buffers; i != end; ++i) {
769 if (memrar_reserve_block(i, NULL) == 0)
775 return reserve_count;
777 EXPORT_SYMBOL(rar_reserve);
780 * rar_release - return RAR buffers
781 * @buffers: buffers to release
782 * @size: size of released block
784 * Return a set of buffers to the RAR pool
787 size_t rar_release(struct RAR_buffer *buffers, size_t count)
789 struct RAR_buffer * const end =
790 (buffers == NULL ? buffers : buffers + count);
791 struct RAR_buffer *i;
793 size_t release_count = 0;
795 for (i = buffers; i != end; ++i) {
796 u32 * const handle = &i->info.handle;
797 if (memrar_release_block(*handle) == 0) {
799 * @todo We assume we should do this each time
800 * the ref count is decremented. Should
801 * we instead only do this when the ref
802 * count has dropped to zero, and the
803 * buffer has been completely
811 return release_count;
813 EXPORT_SYMBOL(rar_release);
816 * rar_handle_to_bus - RAR to bus address
817 * @buffers: RAR buffer structure
818 * @count: number of buffers to convert
820 * Turn a list of RAR handle mappings into actual bus addresses. Note
821 * that when the device is locked down the bus addresses in question
822 * are not CPU accessible.
825 size_t rar_handle_to_bus(struct RAR_buffer *buffers, size_t count)
827 struct RAR_buffer * const end =
828 (buffers == NULL ? buffers : buffers + count);
829 struct RAR_buffer *i;
830 struct memrar_buffer_info *pos;
832 size_t conversion_count = 0;
835 * Find all bus addresses corresponding to the given handles.
837 * @todo Not liking this nested loop. Optimize.
839 for (i = buffers; i != end; ++i) {
840 struct memrar_rar_info * const rar =
841 memrar_get_rar_info(i->info.handle);
844 * Check if we have a bogus handle, and then continue
845 * with remaining buffers.
852 mutex_lock(&rar->lock);
854 list_for_each_entry(pos, &rar->buffers.list, list) {
855 struct RAR_block_info * const user_info =
859 * Take into account handle offsets that may
860 * have been added to the base handle, such as
861 * in the following scenario:
863 * u32 handle = base + offset;
864 * rar_handle_to_bus(handle);
867 if (i->info.handle >= user_info->handle
868 && i->info.handle < (user_info->handle
869 + user_info->size)) {
871 i->info.handle - user_info->handle;
873 i->info.type = user_info->type;
874 i->info.size = user_info->size - offset;
876 pos->buffer.bus_address
879 /* Increment the reference count. */
880 kref_get(&pos->refcount);
889 mutex_unlock(&rar->lock);
892 return conversion_count;
894 EXPORT_SYMBOL(rar_handle_to_bus);
896 static const struct file_operations memrar_fops = {
897 .owner = THIS_MODULE,
898 .unlocked_ioctl = memrar_ioctl,
901 .release = memrar_release,
904 static struct miscdevice memrar_miscdev = {
905 .minor = MISC_DYNAMIC_MINOR, /* dynamic allocation */
906 .name = "memrar", /* /dev/memrar */
910 static char const banner[] __initdata =
912 "Intel RAR Handler: " MEMRAR_VER " initialized.\n";
915 * memrar_registration_callback - RAR obtained
918 * We have been granted ownership of the RAR. Add it to our memory
922 static int memrar_registration_callback(unsigned long rar)
925 * We initialize the RAR parameters early on so that we can
926 * discontinue memrar device initialization and registration
927 * if suitably configured RARs are not available.
929 return memrar_init_rar_resources(rar, memrar_miscdev.name);
933 * memrar_init - initialise RAR support
935 * Initialise support for RAR handlers. This may get loaded before
936 * the RAR support is activated, but the callbacks on the registration
937 * will handle that situation for us anyway.
940 static int __init memrar_init(void)
946 err = misc_register(&memrar_miscdev);
950 /* Now claim the two RARs we want */
951 err = register_rar(0, memrar_registration_callback, 0);
955 err = register_rar(1, memrar_registration_callback, 1);
959 /* It is possible rar 0 registered and allocated resources then rar 1
960 failed so do a full resource free */
961 memrar_fini_rar_resources();
963 misc_deregister(&memrar_miscdev);
968 * memrar_exit - unregister and unload
970 * Unregister the device and then unload any mappings and release
974 static void __exit memrar_exit(void)
976 misc_deregister(&memrar_miscdev);
977 memrar_fini_rar_resources();
981 module_init(memrar_init);
982 module_exit(memrar_exit);
985 MODULE_AUTHOR("Ossama Othman <ossama.othman@intel.com>");
986 MODULE_DESCRIPTION("Intel Restricted Access Region Handler");
987 MODULE_LICENSE("GPL");
988 MODULE_VERSION(MEMRAR_VER);
994 c-file-style: "linux"