Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[sfrench/cifs-2.6.git] / arch / arm / mm / vmregion.c
1 #include <linux/spinlock.h>
2 #include <linux/list.h>
3 #include <linux/slab.h>
4
5 #include "vmregion.h"
6
7 /*
8  * VM region handling support.
9  *
10  * This should become something generic, handling VM region allocations for
11  * vmalloc and similar (ioremap, module space, etc).
12  *
13  * I envisage vmalloc()'s supporting vm_struct becoming:
14  *
15  *  struct vm_struct {
16  *    struct vmregion   region;
17  *    unsigned long     flags;
18  *    struct page       **pages;
19  *    unsigned int      nr_pages;
20  *    unsigned long     phys_addr;
21  *  };
22  *
23  * get_vm_area() would then call vmregion_alloc with an appropriate
24  * struct vmregion head (eg):
25  *
26  *  struct vmregion vmalloc_head = {
27  *      .vm_list        = LIST_HEAD_INIT(vmalloc_head.vm_list),
28  *      .vm_start       = VMALLOC_START,
29  *      .vm_end         = VMALLOC_END,
30  *  };
31  *
32  * However, vmalloc_head.vm_start is variable (typically, it is dependent on
33  * the amount of RAM found at boot time.)  I would imagine that get_vm_area()
34  * would have to initialise this each time prior to calling vmregion_alloc().
35  */
36
37 struct arm_vmregion *
38 arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
39                    size_t size, gfp_t gfp)
40 {
41         unsigned long addr = head->vm_start, end = head->vm_end - size;
42         unsigned long flags;
43         struct arm_vmregion *c, *new;
44
45         if (head->vm_end - head->vm_start < size) {
46                 printk(KERN_WARNING "%s: allocation too big (requested %#x)\n",
47                         __func__, size);
48                 goto out;
49         }
50
51         new = kmalloc(sizeof(struct arm_vmregion), gfp);
52         if (!new)
53                 goto out;
54
55         spin_lock_irqsave(&head->vm_lock, flags);
56
57         list_for_each_entry(c, &head->vm_list, vm_list) {
58                 if ((addr + size) < addr)
59                         goto nospc;
60                 if ((addr + size) <= c->vm_start)
61                         goto found;
62                 addr = ALIGN(c->vm_end, align);
63                 if (addr > end)
64                         goto nospc;
65         }
66
67  found:
68         /*
69          * Insert this entry _before_ the one we found.
70          */
71         list_add_tail(&new->vm_list, &c->vm_list);
72         new->vm_start = addr;
73         new->vm_end = addr + size;
74         new->vm_active = 1;
75
76         spin_unlock_irqrestore(&head->vm_lock, flags);
77         return new;
78
79  nospc:
80         spin_unlock_irqrestore(&head->vm_lock, flags);
81         kfree(new);
82  out:
83         return NULL;
84 }
85
86 static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
87 {
88         struct arm_vmregion *c;
89
90         list_for_each_entry(c, &head->vm_list, vm_list) {
91                 if (c->vm_active && c->vm_start == addr)
92                         goto out;
93         }
94         c = NULL;
95  out:
96         return c;
97 }
98
99 struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
100 {
101         struct arm_vmregion *c;
102         unsigned long flags;
103
104         spin_lock_irqsave(&head->vm_lock, flags);
105         c = __arm_vmregion_find(head, addr);
106         spin_unlock_irqrestore(&head->vm_lock, flags);
107         return c;
108 }
109
110 struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr)
111 {
112         struct arm_vmregion *c;
113         unsigned long flags;
114
115         spin_lock_irqsave(&head->vm_lock, flags);
116         c = __arm_vmregion_find(head, addr);
117         if (c)
118                 c->vm_active = 0;
119         spin_unlock_irqrestore(&head->vm_lock, flags);
120         return c;
121 }
122
123 void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c)
124 {
125         unsigned long flags;
126
127         spin_lock_irqsave(&head->vm_lock, flags);
128         list_del(&c->vm_list);
129         spin_unlock_irqrestore(&head->vm_lock, flags);
130
131         kfree(c);
132 }