Commit | Line | Data |
---|---|---|
45cd5290 | 1 | #include <linux/fs.h> |
13ccf3ad RK |
2 | #include <linux/spinlock.h> |
3 | #include <linux/list.h> | |
45cd5290 RK |
4 | #include <linux/proc_fs.h> |
5 | #include <linux/seq_file.h> | |
13ccf3ad RK |
6 | #include <linux/slab.h> |
7 | ||
8 | #include "vmregion.h" | |
9 | ||
10 | /* | |
11 | * VM region handling support. | |
12 | * | |
13 | * This should become something generic, handling VM region allocations for | |
14 | * vmalloc and similar (ioremap, module space, etc). | |
15 | * | |
16 | * I envisage vmalloc()'s supporting vm_struct becoming: | |
17 | * | |
18 | * struct vm_struct { | |
19 | * struct vmregion region; | |
20 | * unsigned long flags; | |
21 | * struct page **pages; | |
22 | * unsigned int nr_pages; | |
23 | * unsigned long phys_addr; | |
24 | * }; | |
25 | * | |
26 | * get_vm_area() would then call vmregion_alloc with an appropriate | |
27 | * struct vmregion head (eg): | |
28 | * | |
29 | * struct vmregion vmalloc_head = { | |
30 | * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), | |
31 | * .vm_start = VMALLOC_START, | |
32 | * .vm_end = VMALLOC_END, | |
33 | * }; | |
34 | * | |
35 | * However, vmalloc_head.vm_start is variable (typically, it is dependent on | |
36 | * the amount of RAM found at boot time.) I would imagine that get_vm_area() | |
37 | * would have to initialise this each time prior to calling vmregion_alloc(). | |
38 | */ | |
39 | ||
40 | struct arm_vmregion * | |
5bc23d32 | 41 | arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, |
45cd5290 | 42 | size_t size, gfp_t gfp, const void *caller) |
13ccf3ad | 43 | { |
459c1517 | 44 | unsigned long start = head->vm_start, addr = head->vm_end; |
13ccf3ad RK |
45 | unsigned long flags; |
46 | struct arm_vmregion *c, *new; | |
47 | ||
48 | if (head->vm_end - head->vm_start < size) { | |
49 | printk(KERN_WARNING "%s: allocation too big (requested %#x)\n", | |
50 | __func__, size); | |
51 | goto out; | |
52 | } | |
53 | ||
54 | new = kmalloc(sizeof(struct arm_vmregion), gfp); | |
55 | if (!new) | |
56 | goto out; | |
57 | ||
45cd5290 RK |
58 | new->caller = caller; |
59 | ||
13ccf3ad RK |
60 | spin_lock_irqsave(&head->vm_lock, flags); |
61 | ||
459c1517 RK |
62 | addr = rounddown(addr - size, align); |
63 | list_for_each_entry_reverse(c, &head->vm_list, vm_list) { | |
64 | if (addr >= c->vm_end) | |
13ccf3ad | 65 | goto found; |
459c1517 RK |
66 | addr = rounddown(c->vm_start - size, align); |
67 | if (addr < start) | |
13ccf3ad RK |
68 | goto nospc; |
69 | } | |
70 | ||
71 | found: | |
72 | /* | |
459c1517 | 73 | * Insert this entry after the one we found. |
13ccf3ad | 74 | */ |
459c1517 | 75 | list_add(&new->vm_list, &c->vm_list); |
13ccf3ad RK |
76 | new->vm_start = addr; |
77 | new->vm_end = addr + size; | |
78 | new->vm_active = 1; | |
79 | ||
80 | spin_unlock_irqrestore(&head->vm_lock, flags); | |
81 | return new; | |
82 | ||
83 | nospc: | |
84 | spin_unlock_irqrestore(&head->vm_lock, flags); | |
85 | kfree(new); | |
86 | out: | |
87 | return NULL; | |
88 | } | |
89 | ||
90 | static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr) | |
91 | { | |
92 | struct arm_vmregion *c; | |
93 | ||
94 | list_for_each_entry(c, &head->vm_list, vm_list) { | |
95 | if (c->vm_active && c->vm_start == addr) | |
96 | goto out; | |
97 | } | |
98 | c = NULL; | |
99 | out: | |
100 | return c; | |
101 | } | |
102 | ||
103 | struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr) | |
104 | { | |
105 | struct arm_vmregion *c; | |
106 | unsigned long flags; | |
107 | ||
108 | spin_lock_irqsave(&head->vm_lock, flags); | |
109 | c = __arm_vmregion_find(head, addr); | |
110 | spin_unlock_irqrestore(&head->vm_lock, flags); | |
111 | return c; | |
112 | } | |
113 | ||
114 | struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr) | |
115 | { | |
116 | struct arm_vmregion *c; | |
117 | unsigned long flags; | |
118 | ||
119 | spin_lock_irqsave(&head->vm_lock, flags); | |
120 | c = __arm_vmregion_find(head, addr); | |
121 | if (c) | |
122 | c->vm_active = 0; | |
123 | spin_unlock_irqrestore(&head->vm_lock, flags); | |
124 | return c; | |
125 | } | |
126 | ||
127 | void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c) | |
128 | { | |
129 | unsigned long flags; | |
130 | ||
131 | spin_lock_irqsave(&head->vm_lock, flags); | |
132 | list_del(&c->vm_list); | |
133 | spin_unlock_irqrestore(&head->vm_lock, flags); | |
134 | ||
135 | kfree(c); | |
136 | } | |
45cd5290 RK |
137 | |
138 | #ifdef CONFIG_PROC_FS | |
139 | static int arm_vmregion_show(struct seq_file *m, void *p) | |
140 | { | |
141 | struct arm_vmregion *c = list_entry(p, struct arm_vmregion, vm_list); | |
142 | ||
143 | seq_printf(m, "0x%08lx-0x%08lx %7lu", c->vm_start, c->vm_end, | |
144 | c->vm_end - c->vm_start); | |
145 | if (c->caller) | |
146 | seq_printf(m, " %pS", (void *)c->caller); | |
147 | seq_putc(m, '\n'); | |
148 | return 0; | |
149 | } | |
150 | ||
151 | static void *arm_vmregion_start(struct seq_file *m, loff_t *pos) | |
152 | { | |
153 | struct arm_vmregion_head *h = m->private; | |
154 | spin_lock_irq(&h->vm_lock); | |
155 | return seq_list_start(&h->vm_list, *pos); | |
156 | } | |
157 | ||
158 | static void *arm_vmregion_next(struct seq_file *m, void *p, loff_t *pos) | |
159 | { | |
160 | struct arm_vmregion_head *h = m->private; | |
161 | return seq_list_next(p, &h->vm_list, pos); | |
162 | } | |
163 | ||
164 | static void arm_vmregion_stop(struct seq_file *m, void *p) | |
165 | { | |
166 | struct arm_vmregion_head *h = m->private; | |
167 | spin_unlock_irq(&h->vm_lock); | |
168 | } | |
169 | ||
170 | static const struct seq_operations arm_vmregion_ops = { | |
171 | .start = arm_vmregion_start, | |
172 | .stop = arm_vmregion_stop, | |
173 | .next = arm_vmregion_next, | |
174 | .show = arm_vmregion_show, | |
175 | }; | |
176 | ||
177 | static int arm_vmregion_open(struct inode *inode, struct file *file) | |
178 | { | |
179 | struct arm_vmregion_head *h = PDE(inode)->data; | |
180 | int ret = seq_open(file, &arm_vmregion_ops); | |
181 | if (!ret) { | |
182 | struct seq_file *m = file->private_data; | |
183 | m->private = h; | |
184 | } | |
185 | return ret; | |
186 | } | |
187 | ||
188 | static const struct file_operations arm_vmregion_fops = { | |
189 | .open = arm_vmregion_open, | |
190 | .read = seq_read, | |
191 | .llseek = seq_lseek, | |
192 | .release = seq_release, | |
193 | }; | |
194 | ||
195 | int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h) | |
196 | { | |
197 | proc_create_data(path, S_IRUSR, NULL, &arm_vmregion_fops, h); | |
198 | return 0; | |
199 | } | |
200 | #else | |
201 | int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h) | |
202 | { | |
203 | return 0; | |
204 | } | |
205 | #endif |