drm: fix some lindent damage
[deliverable/linux.git] / drivers / char / drm / i915_mem.c
1 /* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
2 */
3 /**************************************************************************
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 **************************************************************************/
29
30 #include "drmP.h"
31 #include "drm.h"
32 #include "i915_drm.h"
33 #include "i915_drv.h"
34
35 /* This memory manager is integrated into the global/local lru
36 * mechanisms used by the clients. Specifically, it operates by
37 * setting the 'in_use' fields of the global LRU to indicate whether
38 * this region is privately allocated to a client.
39 *
40 * This does require the client to actually respect that field.
41 *
42 * Currently no effort is made to allocate 'private' memory in any
43 * clever way - the LRU information isn't used to determine which
44 * block to allocate, and the ring is drained prior to allocations --
45 * in other words allocation is expensive.
46 */
47 static void mark_block(drm_device_t * dev, struct mem_block *p, int in_use)
48 {
49 drm_i915_private_t *dev_priv = dev->dev_private;
50 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
51 drm_tex_region_t *list;
52 unsigned shift, nr;
53 unsigned start;
54 unsigned end;
55 unsigned i;
56 int age;
57
58 shift = dev_priv->tex_lru_log_granularity;
59 nr = I915_NR_TEX_REGIONS;
60
61 start = p->start >> shift;
62 end = (p->start + p->size - 1) >> shift;
63
64 age = ++sarea_priv->texAge;
65 list = sarea_priv->texList;
66
67 /* Mark the regions with the new flag and update their age. Move
68 * them to head of list to preserve LRU semantics.
69 */
70 for (i = start; i <= end; i++) {
71 list[i].in_use = in_use;
72 list[i].age = age;
73
74 /* remove_from_list(i)
75 */
76 list[(unsigned)list[i].next].prev = list[i].prev;
77 list[(unsigned)list[i].prev].next = list[i].next;
78
79 /* insert_at_head(list, i)
80 */
81 list[i].prev = nr;
82 list[i].next = list[nr].next;
83 list[(unsigned)list[nr].next].prev = i;
84 list[nr].next = i;
85 }
86 }
87
88 /* Very simple allocator for agp memory, working on a static range
89 * already mapped into each client's address space.
90 */
91
92 static struct mem_block *split_block(struct mem_block *p, int start, int size,
93 DRMFILE filp)
94 {
95 /* Maybe cut off the start of an existing block */
96 if (start > p->start) {
97 struct mem_block *newblock =
98 drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
99 if (!newblock)
100 goto out;
101 newblock->start = start;
102 newblock->size = p->size - (start - p->start);
103 newblock->filp = NULL;
104 newblock->next = p->next;
105 newblock->prev = p;
106 p->next->prev = newblock;
107 p->next = newblock;
108 p->size -= newblock->size;
109 p = newblock;
110 }
111
112 /* Maybe cut off the end of an existing block */
113 if (size < p->size) {
114 struct mem_block *newblock =
115 drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
116 if (!newblock)
117 goto out;
118 newblock->start = start + size;
119 newblock->size = p->size - size;
120 newblock->filp = NULL;
121 newblock->next = p->next;
122 newblock->prev = p;
123 p->next->prev = newblock;
124 p->next = newblock;
125 p->size = size;
126 }
127
128 out:
129 /* Our block is in the middle */
130 p->filp = filp;
131 return p;
132 }
133
134 static struct mem_block *alloc_block(struct mem_block *heap, int size,
135 int align2, DRMFILE filp)
136 {
137 struct mem_block *p;
138 int mask = (1 << align2) - 1;
139
140 for (p = heap->next; p != heap; p = p->next) {
141 int start = (p->start + mask) & ~mask;
142 if (p->filp == NULL && start + size <= p->start + p->size)
143 return split_block(p, start, size, filp);
144 }
145
146 return NULL;
147 }
148
149 static struct mem_block *find_block(struct mem_block *heap, int start)
150 {
151 struct mem_block *p;
152
153 for (p = heap->next; p != heap; p = p->next)
154 if (p->start == start)
155 return p;
156
157 return NULL;
158 }
159
160 static void free_block(struct mem_block *p)
161 {
162 p->filp = NULL;
163
164 /* Assumes a single contiguous range. Needs a special filp in
165 * 'heap' to stop it being subsumed.
166 */
167 if (p->next->filp == NULL) {
168 struct mem_block *q = p->next;
169 p->size += q->size;
170 p->next = q->next;
171 p->next->prev = p;
172 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
173 }
174
175 if (p->prev->filp == NULL) {
176 struct mem_block *q = p->prev;
177 q->size += p->size;
178 q->next = p->next;
179 q->next->prev = q;
180 drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS);
181 }
182 }
183
184 /* Initialize. How to check for an uninitialized heap?
185 */
186 static int init_heap(struct mem_block **heap, int start, int size)
187 {
188 struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS);
189
190 if (!blocks)
191 return -ENOMEM;
192
193 *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS);
194 if (!*heap) {
195 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS);
196 return -ENOMEM;
197 }
198
199 blocks->start = start;
200 blocks->size = size;
201 blocks->filp = NULL;
202 blocks->next = blocks->prev = *heap;
203
204 memset(*heap, 0, sizeof(**heap));
205 (*heap)->filp = (DRMFILE) - 1;
206 (*heap)->next = (*heap)->prev = blocks;
207 return 0;
208 }
209
210 /* Free all blocks associated with the releasing file.
211 */
212 void i915_mem_release(drm_device_t * dev, DRMFILE filp, struct mem_block *heap)
213 {
214 struct mem_block *p;
215
216 if (!heap || !heap->next)
217 return;
218
219 for (p = heap->next; p != heap; p = p->next) {
220 if (p->filp == filp) {
221 p->filp = NULL;
222 mark_block(dev, p, 0);
223 }
224 }
225
226 /* Assumes a single contiguous range. Needs a special filp in
227 * 'heap' to stop it being subsumed.
228 */
229 for (p = heap->next; p != heap; p = p->next) {
230 while (p->filp == NULL && p->next->filp == NULL) {
231 struct mem_block *q = p->next;
232 p->size += q->size;
233 p->next = q->next;
234 p->next->prev = p;
235 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
236 }
237 }
238 }
239
240 /* Shutdown.
241 */
242 void i915_mem_takedown(struct mem_block **heap)
243 {
244 struct mem_block *p;
245
246 if (!*heap)
247 return;
248
249 for (p = (*heap)->next; p != *heap;) {
250 struct mem_block *q = p;
251 p = p->next;
252 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
253 }
254
255 drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS);
256 *heap = NULL;
257 }
258
259 static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
260 {
261 switch (region) {
262 case I915_MEM_REGION_AGP:
263 return &dev_priv->agp_heap;
264 default:
265 return NULL;
266 }
267 }
268
269 /* IOCTL HANDLERS */
270
271 int i915_mem_alloc(DRM_IOCTL_ARGS)
272 {
273 DRM_DEVICE;
274 drm_i915_private_t *dev_priv = dev->dev_private;
275 drm_i915_mem_alloc_t alloc;
276 struct mem_block *block, **heap;
277
278 if (!dev_priv) {
279 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
280 return DRM_ERR(EINVAL);
281 }
282
283 DRM_COPY_FROM_USER_IOCTL(alloc, (drm_i915_mem_alloc_t __user *) data,
284 sizeof(alloc));
285
286 heap = get_heap(dev_priv, alloc.region);
287 if (!heap || !*heap)
288 return DRM_ERR(EFAULT);
289
290 /* Make things easier on ourselves: all allocations at least
291 * 4k aligned.
292 */
293 if (alloc.alignment < 12)
294 alloc.alignment = 12;
295
296 block = alloc_block(*heap, alloc.size, alloc.alignment, filp);
297
298 if (!block)
299 return DRM_ERR(ENOMEM);
300
301 mark_block(dev, block, 1);
302
303 if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) {
304 DRM_ERROR("copy_to_user\n");
305 return DRM_ERR(EFAULT);
306 }
307
308 return 0;
309 }
310
311 int i915_mem_free(DRM_IOCTL_ARGS)
312 {
313 DRM_DEVICE;
314 drm_i915_private_t *dev_priv = dev->dev_private;
315 drm_i915_mem_free_t memfree;
316 struct mem_block *block, **heap;
317
318 if (!dev_priv) {
319 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
320 return DRM_ERR(EINVAL);
321 }
322
323 DRM_COPY_FROM_USER_IOCTL(memfree, (drm_i915_mem_free_t __user *) data,
324 sizeof(memfree));
325
326 heap = get_heap(dev_priv, memfree.region);
327 if (!heap || !*heap)
328 return DRM_ERR(EFAULT);
329
330 block = find_block(*heap, memfree.region_offset);
331 if (!block)
332 return DRM_ERR(EFAULT);
333
334 if (block->filp != filp)
335 return DRM_ERR(EPERM);
336
337 mark_block(dev, block, 0);
338 free_block(block);
339 return 0;
340 }
341
342 int i915_mem_init_heap(DRM_IOCTL_ARGS)
343 {
344 DRM_DEVICE;
345 drm_i915_private_t *dev_priv = dev->dev_private;
346 drm_i915_mem_init_heap_t initheap;
347 struct mem_block **heap;
348
349 if (!dev_priv) {
350 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
351 return DRM_ERR(EINVAL);
352 }
353
354 DRM_COPY_FROM_USER_IOCTL(initheap,
355 (drm_i915_mem_init_heap_t __user *) data,
356 sizeof(initheap));
357
358 heap = get_heap(dev_priv, initheap.region);
359 if (!heap)
360 return DRM_ERR(EFAULT);
361
362 if (*heap) {
363 DRM_ERROR("heap already initialized?");
364 return DRM_ERR(EFAULT);
365 }
366
367 return init_heap(heap, initheap.start, initheap.size);
368 }
This page took 0.050769 seconds and 5 git commands to generate.