staging: tidspbridge: remove dmm_init() and dmm_exit()
[deliverable/linux.git] / drivers / staging / tidspbridge / pmgr / dmm.c
CommitLineData
677f2ded
FC
1/*
2 * dmm.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * The Dynamic Memory Manager (DMM) module manages the DSP Virtual address
7 * space that can be directly mapped to any MPU buffer or memory region
8 *
9 * Notes:
10 * Region: Generic memory entitiy having a start address and a size
11 * Chunk: Reserved region
12 *
13 * Copyright (C) 2005-2006 Texas Instruments, Inc.
14 *
15 * This package is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License version 2 as
17 * published by the Free Software Foundation.
18 *
19 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
21 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
22 */
23#include <linux/types.h>
24
25/* ----------------------------------- Host OS */
26#include <dspbridge/host_os.h>
27
28/* ----------------------------------- DSP/BIOS Bridge */
29#include <dspbridge/dbdefs.h>
30
677f2ded
FC
31/* ----------------------------------- OS Adaptation Layer */
32#include <dspbridge/sync.h>
33
34/* ----------------------------------- Platform Manager */
35#include <dspbridge/dev.h>
36#include <dspbridge/proc.h>
37
38/* ----------------------------------- This */
39#include <dspbridge/dmm.h>
40
41/* ----------------------------------- Defines, Data Structures, Typedefs */
42#define DMM_ADDR_VIRTUAL(a) \
43 (((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\
44 dyn_mem_map_beg)
45#define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K)
46
47/* DMM Mgr */
48struct dmm_object {
49 /* Dmm Lock is used to serialize access mem manager for
50 * multi-threads. */
51 spinlock_t dmm_lock; /* Lock to access dmm mgr */
52};
53
677f2ded
FC
54struct map_page {
55 u32 region_size:15;
56 u32 mapped_size:15;
57 u32 reserved:1;
58 u32 mapped:1;
59};
60
61/* Create the free list */
62static struct map_page *virtual_mapping_table;
63static u32 free_region; /* The index of free region */
64static u32 free_size;
65static u32 dyn_mem_map_beg; /* The Beginning of dynamic memory mapping */
66static u32 table_size; /* The size of virt and phys pages tables */
67
68/* ----------------------------------- Function Prototypes */
69static struct map_page *get_region(u32 addr);
70static struct map_page *get_free_region(u32 len);
71static struct map_page *get_mapped_region(u32 addrs);
72
73/* ======== dmm_create_tables ========
74 * Purpose:
75 * Create table to hold the information of physical address
76 * the buffer pages that is passed by the user, and the table
77 * to hold the information of the virtual memory that is reserved
78 * for DSP.
79 */
80int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size)
81{
82 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
83 int status = 0;
84
85 status = dmm_delete_tables(dmm_obj);
86 if (!status) {
87 dyn_mem_map_beg = addr;
88 table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K;
89 /* Create the free list */
90 virtual_mapping_table = __vmalloc(table_size *
91 sizeof(struct map_page), GFP_KERNEL |
92 __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
93 if (virtual_mapping_table == NULL)
94 status = -ENOMEM;
95 else {
96 /* On successful allocation,
97 * all entries are zero ('free') */
98 free_region = 0;
99 free_size = table_size * PG_SIZE4K;
100 virtual_mapping_table[0].region_size = table_size;
101 }
102 }
103
104 if (status)
105 pr_err("%s: failure, status 0x%x\n", __func__, status);
106
107 return status;
108}
109
110/*
111 * ======== dmm_create ========
112 * Purpose:
113 * Create a dynamic memory manager object.
114 */
115int dmm_create(struct dmm_object **dmm_manager,
116 struct dev_object *hdev_obj,
117 const struct dmm_mgrattrs *mgr_attrts)
118{
119 struct dmm_object *dmm_obj = NULL;
120 int status = 0;
677f2ded
FC
121
122 *dmm_manager = NULL;
123 /* create, zero, and tag a cmm mgr object */
124 dmm_obj = kzalloc(sizeof(struct dmm_object), GFP_KERNEL);
125 if (dmm_obj != NULL) {
126 spin_lock_init(&dmm_obj->dmm_lock);
127 *dmm_manager = dmm_obj;
128 } else {
129 status = -ENOMEM;
130 }
131
132 return status;
133}
134
135/*
136 * ======== dmm_destroy ========
137 * Purpose:
138 * Release the communication memory manager resources.
139 */
140int dmm_destroy(struct dmm_object *dmm_mgr)
141{
142 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
143 int status = 0;
144
677f2ded
FC
145 if (dmm_mgr) {
146 status = dmm_delete_tables(dmm_obj);
147 if (!status)
148 kfree(dmm_obj);
149 } else
150 status = -EFAULT;
151
152 return status;
153}
154
155/*
156 * ======== dmm_delete_tables ========
157 * Purpose:
158 * Delete DMM Tables.
159 */
160int dmm_delete_tables(struct dmm_object *dmm_mgr)
161{
162 int status = 0;
163
677f2ded
FC
164 /* Delete all DMM tables */
165 if (dmm_mgr)
166 vfree(virtual_mapping_table);
167 else
168 status = -EFAULT;
169 return status;
170}
171
677f2ded
FC
172/*
173 * ======== dmm_get_handle ========
174 * Purpose:
175 * Return the dynamic memory manager object for this device.
176 * This is typically called from the client process.
177 */
178int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
179{
180 int status = 0;
181 struct dev_object *hdev_obj;
182
677f2ded
FC
183 if (hprocessor != NULL)
184 status = proc_get_dev_object(hprocessor, &hdev_obj);
185 else
186 hdev_obj = dev_get_first(); /* default */
187
188 if (!status)
189 status = dev_get_dmm_mgr(hdev_obj, dmm_manager);
190
191 return status;
192}
193
677f2ded
FC
194/*
195 * ======== dmm_map_memory ========
196 * Purpose:
197 * Add a mapping block to the reserved chunk. DMM assumes that this block
198 * will be mapped in the DSP/IVA's address space. DMM returns an error if a
199 * mapping overlaps another one. This function stores the info that will be
200 * required later while unmapping the block.
201 */
202int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size)
203{
204 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
205 struct map_page *chunk;
206 int status = 0;
207
208 spin_lock(&dmm_obj->dmm_lock);
209 /* Find the Reserved memory chunk containing the DSP block to
210 * be mapped */
211 chunk = (struct map_page *)get_region(addr);
212 if (chunk != NULL) {
213 /* Mark the region 'mapped', leave the 'reserved' info as-is */
214 chunk->mapped = true;
215 chunk->mapped_size = (size / PG_SIZE4K);
216 } else
217 status = -ENOENT;
218 spin_unlock(&dmm_obj->dmm_lock);
219
220 dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, "
221 "chunk %p", __func__, dmm_mgr, addr, size, status, chunk);
222
223 return status;
224}
225
226/*
227 * ======== dmm_reserve_memory ========
228 * Purpose:
229 * Reserve a chunk of virtually contiguous DSP/IVA address space.
230 */
231int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size,
232 u32 *prsv_addr)
233{
234 int status = 0;
235 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
236 struct map_page *node;
237 u32 rsv_addr = 0;
238 u32 rsv_size = 0;
239
240 spin_lock(&dmm_obj->dmm_lock);
241
242 /* Try to get a DSP chunk from the free list */
243 node = get_free_region(size);
244 if (node != NULL) {
245 /* DSP chunk of given size is available. */
246 rsv_addr = DMM_ADDR_VIRTUAL(node);
247 /* Calculate the number entries to use */
248 rsv_size = size / PG_SIZE4K;
249 if (rsv_size < node->region_size) {
250 /* Mark remainder of free region */
251 node[rsv_size].mapped = false;
252 node[rsv_size].reserved = false;
253 node[rsv_size].region_size =
254 node->region_size - rsv_size;
255 node[rsv_size].mapped_size = 0;
256 }
257 /* get_region will return first fit chunk. But we only use what
258 is requested. */
259 node->mapped = false;
260 node->reserved = true;
261 node->region_size = rsv_size;
262 node->mapped_size = 0;
263 /* Return the chunk's starting address */
264 *prsv_addr = rsv_addr;
265 } else
266 /*dSP chunk of given size is not available */
267 status = -ENOMEM;
268
269 spin_unlock(&dmm_obj->dmm_lock);
270
271 dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, "
272 "rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size,
273 prsv_addr, status, rsv_addr, rsv_size);
274
275 return status;
276}
277
278/*
279 * ======== dmm_un_map_memory ========
280 * Purpose:
281 * Remove the mapped block from the reserved chunk.
282 */
283int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize)
284{
285 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
286 struct map_page *chunk;
287 int status = 0;
288
289 spin_lock(&dmm_obj->dmm_lock);
290 chunk = get_mapped_region(addr);
291 if (chunk == NULL)
292 status = -ENOENT;
293
294 if (!status) {
295 /* Unmap the region */
296 *psize = chunk->mapped_size * PG_SIZE4K;
297 chunk->mapped = false;
298 chunk->mapped_size = 0;
299 }
300 spin_unlock(&dmm_obj->dmm_lock);
301
302 dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, "
303 "chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk);
304
305 return status;
306}
307
308/*
309 * ======== dmm_un_reserve_memory ========
310 * Purpose:
311 * Free a chunk of reserved DSP/IVA address space.
312 */
313int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, u32 rsv_addr)
314{
315 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
316 struct map_page *chunk;
317 u32 i;
318 int status = 0;
319 u32 chunk_size;
320
321 spin_lock(&dmm_obj->dmm_lock);
322
323 /* Find the chunk containing the reserved address */
324 chunk = get_mapped_region(rsv_addr);
325 if (chunk == NULL)
326 status = -ENOENT;
327
328 if (!status) {
329 /* Free all the mapped pages for this reserved region */
330 i = 0;
331 while (i < chunk->region_size) {
332 if (chunk[i].mapped) {
333 /* Remove mapping from the page tables. */
334 chunk_size = chunk[i].mapped_size;
335 /* Clear the mapping flags */
336 chunk[i].mapped = false;
337 chunk[i].mapped_size = 0;
338 i += chunk_size;
339 } else
340 i++;
341 }
342 /* Clear the flags (mark the region 'free') */
343 chunk->reserved = false;
344 /* NOTE: We do NOT coalesce free regions here.
345 * Free regions are coalesced in get_region(), as it traverses
346 *the whole mapping table
347 */
348 }
349 spin_unlock(&dmm_obj->dmm_lock);
350
351 dev_dbg(bridge, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p",
352 __func__, dmm_mgr, rsv_addr, status, chunk);
353
354 return status;
355}
356
357/*
358 * ======== get_region ========
359 * Purpose:
360 * Returns a region containing the specified memory region
361 */
362static struct map_page *get_region(u32 addr)
363{
364 struct map_page *curr_region = NULL;
365 u32 i = 0;
366
367 if (virtual_mapping_table != NULL) {
368 /* find page mapped by this address */
369 i = DMM_ADDR_TO_INDEX(addr);
370 if (i < table_size)
371 curr_region = virtual_mapping_table + i;
372 }
373
374 dev_dbg(bridge, "%s: curr_region %p, free_region %d, free_size %d\n",
375 __func__, curr_region, free_region, free_size);
376 return curr_region;
377}
378
379/*
380 * ======== get_free_region ========
381 * Purpose:
382 * Returns the requested free region
383 */
384static struct map_page *get_free_region(u32 len)
385{
386 struct map_page *curr_region = NULL;
387 u32 i = 0;
388 u32 region_size = 0;
389 u32 next_i = 0;
390
391 if (virtual_mapping_table == NULL)
392 return curr_region;
393 if (len > free_size) {
394 /* Find the largest free region
395 * (coalesce during the traversal) */
396 while (i < table_size) {
397 region_size = virtual_mapping_table[i].region_size;
398 next_i = i + region_size;
399 if (virtual_mapping_table[i].reserved == false) {
400 /* Coalesce, if possible */
401 if (next_i < table_size &&
402 virtual_mapping_table[next_i].reserved
403 == false) {
404 virtual_mapping_table[i].region_size +=
405 virtual_mapping_table
406 [next_i].region_size;
407 continue;
408 }
409 region_size *= PG_SIZE4K;
410 if (region_size > free_size) {
411 free_region = i;
412 free_size = region_size;
413 }
414 }
415 i = next_i;
416 }
417 }
418 if (len <= free_size) {
419 curr_region = virtual_mapping_table + free_region;
420 free_region += (len / PG_SIZE4K);
421 free_size -= len;
422 }
423 return curr_region;
424}
425
426/*
427 * ======== get_mapped_region ========
428 * Purpose:
429 * Returns the requestedmapped region
430 */
431static struct map_page *get_mapped_region(u32 addrs)
432{
433 u32 i = 0;
434 struct map_page *curr_region = NULL;
435
436 if (virtual_mapping_table == NULL)
437 return curr_region;
438
439 i = DMM_ADDR_TO_INDEX(addrs);
440 if (i < table_size && (virtual_mapping_table[i].mapped ||
441 virtual_mapping_table[i].reserved))
442 curr_region = virtual_mapping_table + i;
443 return curr_region;
444}
445
446#ifdef DSP_DMM_DEBUG
447u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr)
448{
449 struct map_page *curr_node = NULL;
450 u32 i;
451 u32 freemem = 0;
452 u32 bigsize = 0;
453
454 spin_lock(&dmm_mgr->dmm_lock);
455
456 if (virtual_mapping_table != NULL) {
457 for (i = 0; i < table_size; i +=
458 virtual_mapping_table[i].region_size) {
459 curr_node = virtual_mapping_table + i;
460 if (curr_node->reserved) {
461 /*printk("RESERVED size = 0x%x, "
462 "Map size = 0x%x\n",
463 (curr_node->region_size * PG_SIZE4K),
464 (curr_node->mapped == false) ? 0 :
465 (curr_node->mapped_size * PG_SIZE4K));
466 */
467 } else {
468/* printk("UNRESERVED size = 0x%x\n",
469 (curr_node->region_size * PG_SIZE4K));
470 */
471 freemem += (curr_node->region_size * PG_SIZE4K);
472 if (curr_node->region_size > bigsize)
473 bigsize = curr_node->region_size;
474 }
475 }
476 }
477 spin_unlock(&dmm_mgr->dmm_lock);
478 printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n",
479 freemem / (1024 * 1024));
480 printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n",
481 (((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024));
482 printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n",
483 (bigsize * PG_SIZE4K / (1024 * 1024)));
484
485 return 0;
486}
487#endif
This page took 0.126374 seconds and 5 git commands to generate.