staging: tidspbridge: remove cmm_init() and cmm_exit()
[deliverable/linux.git] / drivers / staging / tidspbridge / pmgr / cmm.c
CommitLineData
c4ca3d5a
ORL
1/*
2 * cmm.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * The Communication(Shared) Memory Management(CMM) module provides
7 * shared memory management services for DSP/BIOS Bridge data streaming
8 * and messaging.
9 *
10 * Multiple shared memory segments can be registered with CMM.
11 * Each registered SM segment is represented by a SM "allocator" that
12 * describes a block of physically contiguous shared memory used for
13 * future allocations by CMM.
14 *
5fb45dac 15 * Memory is coalesced back to the appropriate heap when a buffer is
c4ca3d5a
ORL
16 * freed.
17 *
18 * Notes:
19 * Va: Virtual address.
20 * Pa: Physical or kernel system address.
21 *
22 * Copyright (C) 2005-2006 Texas Instruments, Inc.
23 *
24 * This package is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License version 2 as
26 * published by the Free Software Foundation.
27 *
28 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
29 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
30 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
31 */
2094f12d 32#include <linux/types.h>
5fb45dac 33#include <linux/list.h>
c4ca3d5a
ORL
34
35/* ----------------------------------- DSP/BIOS Bridge */
c4ca3d5a
ORL
36#include <dspbridge/dbdefs.h>
37
c4ca3d5a 38/* ----------------------------------- OS Adaptation Layer */
c4ca3d5a 39#include <dspbridge/sync.h>
c4ca3d5a
ORL
40
41/* ----------------------------------- Platform Manager */
42#include <dspbridge/dev.h>
43#include <dspbridge/proc.h>
44
45/* ----------------------------------- This */
46#include <dspbridge/cmm.h>
47
48/* ----------------------------------- Defines, Data Structures, Typedefs */
085467b8 49#define NEXT_PA(pnode) (pnode->pa + pnode->size)
c4ca3d5a
ORL
50
51/* Other bus/platform translations */
52#define DSPPA2GPPPA(base, x, y) ((x)+(y))
53#define GPPPA2DSPPA(base, x, y) ((x)-(y))
54
55/*
56 * Allocators define a block of contiguous memory used for future allocations.
57 *
58 * sma - shared memory allocator.
59 * vma - virtual memory allocator.(not used).
60 */
61struct cmm_allocator { /* sma */
62 unsigned int shm_base; /* Start of physical SM block */
085467b8 63 u32 sm_size; /* Size of SM block in bytes */
3c882de5 64 unsigned int vm_base; /* Start of VM block. (Dev driver
c4ca3d5a 65 * context for 'sma') */
b4da7fc3 66 u32 dsp_phys_addr_offset; /* DSP PA to GPP PA offset for this
c4ca3d5a
ORL
67 * SM space */
68 s8 c_factor; /* DSPPa to GPPPa Conversion Factor */
b4da7fc3 69 unsigned int dsp_base; /* DSP virt base byte address */
dab7f7fe 70 u32 dsp_size; /* DSP seg size in bytes */
085467b8 71 struct cmm_object *cmm_mgr; /* back ref to parent mgr */
c4ca3d5a 72 /* node list of available memory */
5fb45dac 73 struct list_head free_list;
c4ca3d5a 74 /* node list of memory in use */
5fb45dac 75 struct list_head in_use_list;
c4ca3d5a
ORL
76};
77
78struct cmm_xlator { /* Pa<->Va translator object */
79 /* CMM object this translator associated */
085467b8 80 struct cmm_object *cmm_mgr;
c4ca3d5a
ORL
81 /*
82 * Client process virtual base address that corresponds to phys SM
085467b8 83 * base address for translator's seg_id.
c4ca3d5a
ORL
84 * Only 1 segment ID currently supported.
85 */
3c882de5 86 unsigned int virt_base; /* virtual base address */
085467b8
RS
87 u32 virt_size; /* size of virt space in bytes */
88 u32 seg_id; /* Segment Id */
c4ca3d5a
ORL
89};
90
91/* CMM Mgr */
92struct cmm_object {
93 /*
94 * Cmm Lock is used to serialize access mem manager for multi-threads.
95 */
96 struct mutex cmm_lock; /* Lock to access cmm mgr */
5fb45dac 97 struct list_head node_free_list; /* Free list of memory nodes */
6c66e948 98 u32 min_block_size; /* Min SM block; default 16 bytes */
5108de0a 99 u32 page_size; /* Memory Page size (1k/4k) */
c4ca3d5a
ORL
100 /* GPP SM segment ptrs */
101 struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS];
102};
103
104/* Default CMM Mgr attributes */
105static struct cmm_mgrattrs cmm_dfltmgrattrs = {
6c66e948 106 /* min_block_size, min block size(bytes) allocated by cmm mgr */
c4ca3d5a
ORL
107 16
108};
109
110/* Default allocation attributes */
111static struct cmm_attrs cmm_dfltalctattrs = {
085467b8 112 1 /* seg_id, default segment Id for allocator */
c4ca3d5a
ORL
113};
114
115/* Address translator default attrs */
116static struct cmm_xlatorattrs cmm_dfltxlatorattrs = {
085467b8 117 /* seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */
c4ca3d5a 118 1,
b4da7fc3
RS
119 0, /* dsp_bufs */
120 0, /* dsp_buf_size */
c4ca3d5a 121 NULL, /* vm_base */
3c882de5 122 0, /* vm_size */
c4ca3d5a
ORL
123};
124
125/* SM node representing a block of memory. */
126struct cmm_mnode {
127 struct list_head link; /* must be 1st element */
5108de0a 128 u32 pa; /* Phys addr */
3c882de5 129 u32 va; /* Virtual address in device process context */
085467b8 130 u32 size; /* SM block size in bytes */
c4ca3d5a
ORL
131 u32 client_proc; /* Process that allocated this mem block */
132};
133
c4ca3d5a
ORL
134/* ----------------------------------- Function Prototypes */
135static void add_to_free_list(struct cmm_allocator *allocator,
136 struct cmm_mnode *pnode);
137static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
138 u32 ul_seg_id);
139static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
140 u32 usize);
141static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
142 u32 dw_va, u32 ul_size);
143/* get available slot for new allocator */
c8c1ad8c 144static s32 get_slot(struct cmm_object *cmm_mgr_obj);
c4ca3d5a
ORL
145static void un_register_gppsm_seg(struct cmm_allocator *psma);
146
147/*
148 * ======== cmm_calloc_buf ========
149 * Purpose:
150 * Allocate a SM buffer, zero contents, and return the physical address
151 * and optional driver context virtual address(pp_buf_va).
152 *
153 * The freelist is sorted in increasing size order. Get the first
154 * block that satifies the request and sort the remaining back on
155 * the freelist; if large enough. The kept block is placed on the
156 * inUseList.
157 */
158void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
e6bf74f0 159 struct cmm_attrs *pattrs, void **pp_buf_va)
c4ca3d5a
ORL
160{
161 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
162 void *buf_pa = NULL;
163 struct cmm_mnode *pnode = NULL;
164 struct cmm_mnode *new_node = NULL;
165 struct cmm_allocator *allocator = NULL;
166 u32 delta_size;
167 u8 *pbyte = NULL;
168 s32 cnt;
169
170 if (pattrs == NULL)
171 pattrs = &cmm_dfltalctattrs;
172
173 if (pp_buf_va != NULL)
174 *pp_buf_va = NULL;
175
176 if (cmm_mgr_obj && (usize != 0)) {
085467b8 177 if (pattrs->seg_id > 0) {
c4ca3d5a
ORL
178 /* SegId > 0 is SM */
179 /* get the allocator object for this segment id */
180 allocator =
085467b8 181 get_allocator(cmm_mgr_obj, pattrs->seg_id);
6c66e948 182 /* keep block size a multiple of min_block_size */
c4ca3d5a 183 usize =
6c66e948 184 ((usize - 1) & ~(cmm_mgr_obj->min_block_size -
c4ca3d5a 185 1))
6c66e948 186 + cmm_mgr_obj->min_block_size;
c4ca3d5a
ORL
187 mutex_lock(&cmm_mgr_obj->cmm_lock);
188 pnode = get_free_block(allocator, usize);
189 }
190 if (pnode) {
085467b8 191 delta_size = (pnode->size - usize);
6c66e948 192 if (delta_size >= cmm_mgr_obj->min_block_size) {
c4ca3d5a
ORL
193 /* create a new block with the leftovers and
194 * add to freelist */
195 new_node =
5108de0a 196 get_node(cmm_mgr_obj, pnode->pa + usize,
3c882de5 197 pnode->va + usize,
c4ca3d5a
ORL
198 (u32) delta_size);
199 /* leftovers go free */
200 add_to_free_list(allocator, new_node);
201 /* adjust our node's size */
085467b8 202 pnode->size = usize;
c4ca3d5a
ORL
203 }
204 /* Tag node with client process requesting allocation
205 * We'll need to free up a process's alloc'd SM if the
206 * client process goes away.
207 */
208 /* Return TGID instead of process handle */
209 pnode->client_proc = current->tgid;
210
211 /* put our node on InUse list */
5fb45dac 212 list_add_tail(&pnode->link, &allocator->in_use_list);
5108de0a 213 buf_pa = (void *)pnode->pa; /* physical address */
c4ca3d5a 214 /* clear mem */
3c882de5 215 pbyte = (u8 *) pnode->va;
c4ca3d5a
ORL
216 for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++)
217 *pbyte = 0;
218
219 if (pp_buf_va != NULL) {
220 /* Virtual address */
3c882de5 221 *pp_buf_va = (void *)pnode->va;
c4ca3d5a
ORL
222 }
223 }
224 mutex_unlock(&cmm_mgr_obj->cmm_lock);
225 }
226 return buf_pa;
227}
228
229/*
230 * ======== cmm_create ========
231 * Purpose:
232 * Create a communication memory manager object.
233 */
e6bf74f0 234int cmm_create(struct cmm_object **ph_cmm_mgr,
c4ca3d5a 235 struct dev_object *hdev_obj,
9d7d0a52 236 const struct cmm_mgrattrs *mgr_attrts)
c4ca3d5a
ORL
237{
238 struct cmm_object *cmm_obj = NULL;
239 int status = 0;
c4ca3d5a 240
c4ca3d5a
ORL
241 *ph_cmm_mgr = NULL;
242 /* create, zero, and tag a cmm mgr object */
243 cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL);
ba44df6f
IN
244 if (!cmm_obj)
245 return -ENOMEM;
246
247 if (mgr_attrts == NULL)
248 mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */
249
ba44df6f 250 /* save away smallest block allocation for this cmm mgr */
6c66e948 251 cmm_obj->min_block_size = mgr_attrts->min_block_size;
5108de0a 252 cmm_obj->page_size = PAGE_SIZE;
ba44df6f
IN
253
254 /* create node free list */
255 INIT_LIST_HEAD(&cmm_obj->node_free_list);
256 mutex_init(&cmm_obj->cmm_lock);
257 *ph_cmm_mgr = cmm_obj;
258
c4ca3d5a
ORL
259 return status;
260}
261
262/*
263 * ======== cmm_destroy ========
264 * Purpose:
265 * Release the communication memory manager resources.
266 */
a6bff488 267int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
c4ca3d5a
ORL
268{
269 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
270 struct cmm_info temp_info;
271 int status = 0;
272 s32 slot_seg;
5fb45dac 273 struct cmm_mnode *node, *tmp;
c4ca3d5a 274
c4ca3d5a
ORL
275 if (!hcmm_mgr) {
276 status = -EFAULT;
277 return status;
278 }
279 mutex_lock(&cmm_mgr_obj->cmm_lock);
280 /* If not force then fail if outstanding allocations exist */
a6bff488 281 if (!force) {
c4ca3d5a
ORL
282 /* Check for outstanding memory allocations */
283 status = cmm_get_info(hcmm_mgr, &temp_info);
157990f0 284 if (!status) {
085467b8 285 if (temp_info.total_in_use_cnt > 0) {
c4ca3d5a
ORL
286 /* outstanding allocations */
287 status = -EPERM;
288 }
289 }
290 }
157990f0 291 if (!status) {
c4ca3d5a
ORL
292 /* UnRegister SM allocator */
293 for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
294 if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] != NULL) {
295 un_register_gppsm_seg
296 (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg]);
297 /* Set slot to NULL for future reuse */
298 cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = NULL;
299 }
300 }
301 }
5fb45dac
IN
302 list_for_each_entry_safe(node, tmp, &cmm_mgr_obj->node_free_list,
303 link) {
304 list_del(&node->link);
305 kfree(node);
c4ca3d5a
ORL
306 }
307 mutex_unlock(&cmm_mgr_obj->cmm_lock);
157990f0 308 if (!status) {
c4ca3d5a
ORL
309 /* delete CS & cmm mgr object */
310 mutex_destroy(&cmm_mgr_obj->cmm_lock);
311 kfree(cmm_mgr_obj);
312 }
313 return status;
314}
315
c4ca3d5a
ORL
316/*
317 * ======== cmm_free_buf ========
318 * Purpose:
319 * Free the given buffer.
320 */
ba44df6f 321int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa, u32 ul_seg_id)
c4ca3d5a
ORL
322{
323 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
324 int status = -EFAULT;
5fb45dac 325 struct cmm_mnode *curr, *tmp;
ba44df6f 326 struct cmm_allocator *allocator;
c4ca3d5a
ORL
327 struct cmm_attrs *pattrs;
328
c4ca3d5a
ORL
329 if (ul_seg_id == 0) {
330 pattrs = &cmm_dfltalctattrs;
085467b8 331 ul_seg_id = pattrs->seg_id;
c4ca3d5a
ORL
332 }
333 if (!hcmm_mgr || !(ul_seg_id > 0)) {
334 status = -EFAULT;
335 return status;
336 }
ba44df6f 337
c4ca3d5a 338 allocator = get_allocator(cmm_mgr_obj, ul_seg_id);
ba44df6f
IN
339 if (!allocator)
340 return status;
341
342 mutex_lock(&cmm_mgr_obj->cmm_lock);
343 list_for_each_entry_safe(curr, tmp, &allocator->in_use_list, link) {
5108de0a 344 if (curr->pa == (u32) buf_pa) {
ba44df6f
IN
345 list_del(&curr->link);
346 add_to_free_list(allocator, curr);
347 status = 0;
348 break;
c4ca3d5a 349 }
c4ca3d5a 350 }
ba44df6f
IN
351 mutex_unlock(&cmm_mgr_obj->cmm_lock);
352
c4ca3d5a
ORL
353 return status;
354}
355
356/*
357 * ======== cmm_get_handle ========
358 * Purpose:
359 * Return the communication memory manager object for this device.
360 * This is typically called from the client process.
361 */
e6bf74f0 362int cmm_get_handle(void *hprocessor, struct cmm_object ** ph_cmm_mgr)
c4ca3d5a
ORL
363{
364 int status = 0;
365 struct dev_object *hdev_obj;
366
c4ca3d5a
ORL
367 if (hprocessor != NULL)
368 status = proc_get_dev_object(hprocessor, &hdev_obj);
369 else
370 hdev_obj = dev_get_first(); /* default */
371
157990f0 372 if (!status)
c4ca3d5a
ORL
373 status = dev_get_cmm_mgr(hdev_obj, ph_cmm_mgr);
374
375 return status;
376}
377
378/*
379 * ======== cmm_get_info ========
380 * Purpose:
381 * Return the current memory utilization information.
382 */
383int cmm_get_info(struct cmm_object *hcmm_mgr,
e6bf74f0 384 struct cmm_info *cmm_info_obj)
c4ca3d5a
ORL
385{
386 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
387 u32 ul_seg;
388 int status = 0;
389 struct cmm_allocator *altr;
5fb45dac 390 struct cmm_mnode *curr;
c4ca3d5a 391
c4ca3d5a
ORL
392 if (!hcmm_mgr) {
393 status = -EFAULT;
394 return status;
395 }
396 mutex_lock(&cmm_mgr_obj->cmm_lock);
6c66e948 397 cmm_info_obj->num_gppsm_segs = 0; /* # of SM segments */
c4ca3d5a 398 /* Total # of outstanding alloc */
085467b8 399 cmm_info_obj->total_in_use_cnt = 0;
c4ca3d5a 400 /* min block size */
6c66e948 401 cmm_info_obj->min_block_size = cmm_mgr_obj->min_block_size;
c4ca3d5a
ORL
402 /* check SM memory segments */
403 for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) {
404 /* get the allocator object for this segment id */
405 altr = get_allocator(cmm_mgr_obj, ul_seg);
ba44df6f
IN
406 if (!altr)
407 continue;
6c66e948 408 cmm_info_obj->num_gppsm_segs++;
5108de0a 409 cmm_info_obj->seg_info[ul_seg - 1].seg_base_pa =
dab7f7fe 410 altr->shm_base - altr->dsp_size;
085467b8
RS
411 cmm_info_obj->seg_info[ul_seg - 1].total_seg_size =
412 altr->dsp_size + altr->sm_size;
b4da7fc3 413 cmm_info_obj->seg_info[ul_seg - 1].gpp_base_pa =
ba44df6f 414 altr->shm_base;
6c66e948 415 cmm_info_obj->seg_info[ul_seg - 1].gpp_size =
085467b8 416 altr->sm_size;
b4da7fc3
RS
417 cmm_info_obj->seg_info[ul_seg - 1].dsp_base_va =
418 altr->dsp_base;
dab7f7fe
RS
419 cmm_info_obj->seg_info[ul_seg - 1].dsp_size =
420 altr->dsp_size;
3c882de5 421 cmm_info_obj->seg_info[ul_seg - 1].seg_base_va =
dab7f7fe 422 altr->vm_base - altr->dsp_size;
6c66e948 423 cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt = 0;
ba44df6f
IN
424
425 list_for_each_entry(curr, &altr->in_use_list, link) {
085467b8 426 cmm_info_obj->total_in_use_cnt++;
6c66e948 427 cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt++;
c4ca3d5a 428 }
ba44df6f 429 }
c4ca3d5a
ORL
430 mutex_unlock(&cmm_mgr_obj->cmm_lock);
431 return status;
432}
433
c4ca3d5a
ORL
434/*
435 * ======== cmm_register_gppsm_seg ========
436 * Purpose:
437 * Register a block of SM with the CMM to be used for later GPP SM
438 * allocations.
439 */
440int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
441 u32 dw_gpp_base_pa, u32 ul_size,
b301c858 442 u32 dsp_addr_offset, s8 c_factor,
c4ca3d5a 443 u32 dw_dsp_base, u32 ul_dsp_size,
c8c1ad8c 444 u32 *sgmt_id, u32 gpp_base_va)
c4ca3d5a
ORL
445{
446 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
447 struct cmm_allocator *psma = NULL;
448 int status = 0;
449 struct cmm_mnode *new_node;
450 s32 slot_seg;
451
b301c858 452 dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x "
ba44df6f
IN
453 "dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n",
454 __func__, dw_gpp_base_pa, ul_size, dsp_addr_offset,
455 dw_dsp_base, ul_dsp_size, gpp_base_va);
456
457 if (!hcmm_mgr)
458 return -EFAULT;
459
c4ca3d5a
ORL
460 /* make sure we have room for another allocator */
461 mutex_lock(&cmm_mgr_obj->cmm_lock);
ba44df6f 462
c4ca3d5a
ORL
463 slot_seg = get_slot(cmm_mgr_obj);
464 if (slot_seg < 0) {
c4ca3d5a
ORL
465 status = -EPERM;
466 goto func_end;
467 }
ba44df6f 468
c4ca3d5a 469 /* Check if input ul_size is big enough to alloc at least one block */
6c66e948 470 if (ul_size < cmm_mgr_obj->min_block_size) {
157990f0
ER
471 status = -EINVAL;
472 goto func_end;
c4ca3d5a 473 }
157990f0
ER
474
475 /* create, zero, and tag an SM allocator object */
476 psma = kzalloc(sizeof(struct cmm_allocator), GFP_KERNEL);
ba44df6f
IN
477 if (!psma) {
478 status = -ENOMEM;
479 goto func_end;
480 }
481
085467b8 482 psma->cmm_mgr = hcmm_mgr; /* ref to parent */
ba44df6f 483 psma->shm_base = dw_gpp_base_pa; /* SM Base phys */
085467b8 484 psma->sm_size = ul_size; /* SM segment size in bytes */
3c882de5 485 psma->vm_base = gpp_base_va;
b4da7fc3 486 psma->dsp_phys_addr_offset = dsp_addr_offset;
ba44df6f 487 psma->c_factor = c_factor;
b4da7fc3 488 psma->dsp_base = dw_dsp_base;
dab7f7fe 489 psma->dsp_size = ul_dsp_size;
3c882de5 490 if (psma->vm_base == 0) {
ba44df6f
IN
491 status = -EPERM;
492 goto func_end;
493 }
494 /* return the actual segment identifier */
495 *sgmt_id = (u32) slot_seg + 1;
496
497 INIT_LIST_HEAD(&psma->free_list);
498 INIT_LIST_HEAD(&psma->in_use_list);
499
500 /* Get a mem node for this hunk-o-memory */
501 new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa,
3c882de5 502 psma->vm_base, ul_size);
ba44df6f
IN
503 /* Place node on the SM allocator's free list */
504 if (new_node) {
505 list_add_tail(&new_node->link, &psma->free_list);
c4ca3d5a
ORL
506 } else {
507 status = -ENOMEM;
508 goto func_end;
509 }
510 /* make entry */
157990f0 511 cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = psma;
c4ca3d5a
ORL
512
513func_end:
ba44df6f
IN
514 /* Cleanup allocator */
515 if (status && psma)
157990f0 516 un_register_gppsm_seg(psma);
c4ca3d5a 517 mutex_unlock(&cmm_mgr_obj->cmm_lock);
ba44df6f 518
c4ca3d5a
ORL
519 return status;
520}
521
522/*
523 * ======== cmm_un_register_gppsm_seg ========
524 * Purpose:
525 * UnRegister GPP SM segments with the CMM.
526 */
527int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
528 u32 ul_seg_id)
529{
530 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
531 int status = 0;
532 struct cmm_allocator *psma;
533 u32 ul_id = ul_seg_id;
534
ba44df6f
IN
535 if (!hcmm_mgr)
536 return -EFAULT;
537
538 if (ul_seg_id == CMM_ALLSEGMENTS)
539 ul_id = 1;
540
541 if ((ul_id <= 0) || (ul_id > CMM_MAXGPPSEGS))
542 return -EINVAL;
543
544 /*
545 * FIXME: CMM_MAXGPPSEGS == 1. why use a while cycle? Seems to me like
546 * the ul_seg_id is not needed here. It must be always 1.
547 */
548 while (ul_id <= CMM_MAXGPPSEGS) {
549 mutex_lock(&cmm_mgr_obj->cmm_lock);
550 /* slot = seg_id-1 */
551 psma = cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1];
552 if (psma != NULL) {
553 un_register_gppsm_seg(psma);
554 /* Set alctr ptr to NULL for future reuse */
555 cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1] = NULL;
556 } else if (ul_seg_id != CMM_ALLSEGMENTS) {
557 status = -EPERM;
c4ca3d5a 558 }
ba44df6f
IN
559 mutex_unlock(&cmm_mgr_obj->cmm_lock);
560 if (ul_seg_id != CMM_ALLSEGMENTS)
561 break;
562
563 ul_id++;
564 } /* end while */
c4ca3d5a
ORL
565 return status;
566}
567
568/*
569 * ======== un_register_gppsm_seg ========
570 * Purpose:
571 * UnRegister the SM allocator by freeing all its resources and
572 * nulling cmm mgr table entry.
573 * Note:
574 * This routine is always called within cmm lock crit sect.
575 */
576static void un_register_gppsm_seg(struct cmm_allocator *psma)
577{
5fb45dac 578 struct cmm_mnode *curr, *tmp;
c4ca3d5a 579
5fb45dac
IN
580 /* free nodes on free list */
581 list_for_each_entry_safe(curr, tmp, &psma->free_list, link) {
582 list_del(&curr->link);
583 kfree(curr);
584 }
585
586 /* free nodes on InUse list */
587 list_for_each_entry_safe(curr, tmp, &psma->in_use_list, link) {
588 list_del(&curr->link);
589 kfree(curr);
c4ca3d5a 590 }
5fb45dac 591
3c882de5
RS
592 if ((void *)psma->vm_base != NULL)
593 MEM_UNMAP_LINEAR_ADDRESS((void *)psma->vm_base);
c4ca3d5a
ORL
594
595 /* Free allocator itself */
596 kfree(psma);
597}
598
599/*
600 * ======== get_slot ========
601 * Purpose:
602 * An available slot # is returned. Returns negative on failure.
603 */
604static s32 get_slot(struct cmm_object *cmm_mgr_obj)
605{
606 s32 slot_seg = -1; /* neg on failure */
c4ca3d5a
ORL
607 /* get first available slot in cmm mgr SMSegTab[] */
608 for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
609 if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL)
610 break;
611
612 }
613 if (slot_seg == CMM_MAXGPPSEGS)
614 slot_seg = -1; /* failed */
615
616 return slot_seg;
617}
618
619/*
620 * ======== get_node ========
621 * Purpose:
622 * Get a memory node from freelist or create a new one.
623 */
624static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
625 u32 dw_va, u32 ul_size)
626{
ba44df6f 627 struct cmm_mnode *pnode;
c4ca3d5a 628
c4ca3d5a 629 /* Check cmm mgr's node freelist */
5fb45dac 630 if (list_empty(&cmm_mgr_obj->node_free_list)) {
c4ca3d5a 631 pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL);
ba44df6f
IN
632 if (!pnode)
633 return NULL;
c4ca3d5a
ORL
634 } else {
635 /* surely a valid element */
5fb45dac
IN
636 pnode = list_first_entry(&cmm_mgr_obj->node_free_list,
637 struct cmm_mnode, link);
ba44df6f 638 list_del_init(&pnode->link);
c4ca3d5a 639 }
ba44df6f 640
5108de0a 641 pnode->pa = dw_pa;
3c882de5 642 pnode->va = dw_va;
085467b8 643 pnode->size = ul_size;
ba44df6f 644
c4ca3d5a
ORL
645 return pnode;
646}
647
648/*
649 * ======== delete_node ========
650 * Purpose:
651 * Put a memory node on the cmm nodelist for later use.
652 * Doesn't actually delete the node. Heap thrashing friendly.
653 */
654static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
655{
5fb45dac 656 list_add_tail(&pnode->link, &cmm_mgr_obj->node_free_list);
c4ca3d5a
ORL
657}
658
659/*
660 * ====== get_free_block ========
661 * Purpose:
662 * Scan the free block list and return the first block that satisfies
663 * the size.
664 */
665static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
666 u32 usize)
667{
5fb45dac
IN
668 struct cmm_mnode *node, *tmp;
669
670 if (!allocator)
671 return NULL;
672
673 list_for_each_entry_safe(node, tmp, &allocator->free_list, link) {
085467b8 674 if (usize <= node->size) {
5fb45dac
IN
675 list_del(&node->link);
676 return node;
c4ca3d5a
ORL
677 }
678 }
5fb45dac 679
c4ca3d5a
ORL
680 return NULL;
681}
682
683/*
684 * ======== add_to_free_list ========
685 * Purpose:
5fb45dac 686 * Coalesce node into the freelist in ascending size order.
c4ca3d5a
ORL
687 */
688static void add_to_free_list(struct cmm_allocator *allocator,
ba44df6f 689 struct cmm_mnode *node)
c4ca3d5a 690{
ba44df6f
IN
691 struct cmm_mnode *curr;
692
693 if (!node) {
694 pr_err("%s: failed - node is NULL\n", __func__);
5fb45dac
IN
695 return;
696 }
697
ba44df6f 698 list_for_each_entry(curr, &allocator->free_list, link) {
5108de0a 699 if (NEXT_PA(curr) == node->pa) {
085467b8
RS
700 curr->size += node->size;
701 delete_node(allocator->cmm_mgr, node);
ba44df6f
IN
702 return;
703 }
5108de0a
RS
704 if (curr->pa == NEXT_PA(node)) {
705 curr->pa = node->pa;
3c882de5 706 curr->va = node->va;
085467b8
RS
707 curr->size += node->size;
708 delete_node(allocator->cmm_mgr, node);
ba44df6f 709 return;
c4ca3d5a 710 }
c4ca3d5a 711 }
ba44df6f 712 list_for_each_entry(curr, &allocator->free_list, link) {
085467b8 713 if (curr->size >= node->size) {
ba44df6f 714 list_add_tail(&node->link, &curr->link);
5fb45dac
IN
715 return;
716 }
c4ca3d5a 717 }
ba44df6f 718 list_add_tail(&node->link, &allocator->free_list);
c4ca3d5a
ORL
719}
720
721/*
722 * ======== get_allocator ========
723 * Purpose:
724 * Return the allocator for the given SM Segid.
725 * SegIds: 1,2,3..max.
726 */
727static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
728 u32 ul_seg_id)
729{
ba44df6f 730 return cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1];
c4ca3d5a
ORL
731}
732
733/*
734 * The CMM_Xlator[xxx] routines below are used by Node and Stream
735 * to perform SM address translation to the client process address space.
736 * A "translator" object is created by a node/stream for each SM seg used.
737 */
738
739/*
740 * ======== cmm_xlator_create ========
741 * Purpose:
742 * Create an address translator object.
743 */
e6bf74f0 744int cmm_xlator_create(struct cmm_xlatorobject **xlator,
c4ca3d5a 745 struct cmm_object *hcmm_mgr,
318b5df9 746 struct cmm_xlatorattrs *xlator_attrs)
c4ca3d5a
ORL
747{
748 struct cmm_xlator *xlator_object = NULL;
749 int status = 0;
750
daa89e6c 751 *xlator = NULL;
318b5df9
RS
752 if (xlator_attrs == NULL)
753 xlator_attrs = &cmm_dfltxlatorattrs; /* set defaults */
c4ca3d5a
ORL
754
755 xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL);
756 if (xlator_object != NULL) {
085467b8 757 xlator_object->cmm_mgr = hcmm_mgr; /* ref back to CMM */
c4ca3d5a 758 /* SM seg_id */
085467b8 759 xlator_object->seg_id = xlator_attrs->seg_id;
c4ca3d5a
ORL
760 } else {
761 status = -ENOMEM;
762 }
157990f0 763 if (!status)
daa89e6c 764 *xlator = (struct cmm_xlatorobject *)xlator_object;
c4ca3d5a
ORL
765
766 return status;
767}
768
c4ca3d5a
ORL
769/*
770 * ======== cmm_xlator_alloc_buf ========
771 */
318b5df9 772void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *va_buf,
5e2eae57 773 u32 pa_size)
c4ca3d5a
ORL
774{
775 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
776 void *pbuf = NULL;
b3c8aef0 777 void *tmp_va_buff;
c4ca3d5a
ORL
778 struct cmm_attrs attrs;
779
c4ca3d5a 780 if (xlator_obj) {
085467b8 781 attrs.seg_id = xlator_obj->seg_id;
b3c8aef0 782 __raw_writel(0, va_buf);
c4ca3d5a
ORL
783 /* Alloc SM */
784 pbuf =
085467b8 785 cmm_calloc_buf(xlator_obj->cmm_mgr, pa_size, &attrs, NULL);
c4ca3d5a
ORL
786 if (pbuf) {
787 /* convert to translator(node/strm) process Virtual
788 * address */
b3c8aef0 789 tmp_va_buff = cmm_xlator_translate(xlator,
c4ca3d5a 790 pbuf, CMM_PA2VA);
b3c8aef0 791 __raw_writel((u32)tmp_va_buff, va_buf);
c4ca3d5a
ORL
792 }
793 }
794 return pbuf;
795}
796
797/*
798 * ======== cmm_xlator_free_buf ========
799 * Purpose:
800 * Free the given SM buffer and descriptor.
801 * Does not free virtual memory.
802 */
aa09b091 803int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
c4ca3d5a
ORL
804{
805 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
806 int status = -EPERM;
807 void *buf_pa = NULL;
808
c4ca3d5a
ORL
809 if (xlator_obj) {
810 /* convert Va to Pa so we can free it. */
aa09b091 811 buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA);
c4ca3d5a 812 if (buf_pa) {
085467b8
RS
813 status = cmm_free_buf(xlator_obj->cmm_mgr, buf_pa,
814 xlator_obj->seg_id);
51d5e099 815 if (status) {
c4ca3d5a
ORL
816 /* Uh oh, this shouldn't happen. Descriptor
817 * gone! */
40e6336d
VMJL
818 pr_err("%s, line %d: Assertion failed\n",
819 __FILE__, __LINE__);
c4ca3d5a
ORL
820 }
821 }
822 }
823 return status;
824}
825
826/*
827 * ======== cmm_xlator_info ========
828 * Purpose:
829 * Set/Get translator info.
830 */
e6bf74f0 831int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 ** paddr,
5e2eae57 832 u32 ul_size, u32 segm_id, bool set_info)
c4ca3d5a
ORL
833{
834 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
835 int status = 0;
836
c4ca3d5a
ORL
837 if (xlator_obj) {
838 if (set_info) {
839 /* set translators virtual address range */
3c882de5 840 xlator_obj->virt_base = (u32) *paddr;
085467b8 841 xlator_obj->virt_size = ul_size;
c4ca3d5a 842 } else { /* return virt base address */
3c882de5 843 *paddr = (u8 *) xlator_obj->virt_base;
c4ca3d5a
ORL
844 }
845 } else {
846 status = -EFAULT;
847 }
848 return status;
849}
850
851/*
852 * ======== cmm_xlator_translate ========
853 */
854void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
5a09ddea 855 enum cmm_xlatetype xtype)
c4ca3d5a
ORL
856{
857 u32 dw_addr_xlate = 0;
858 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
859 struct cmm_object *cmm_mgr_obj = NULL;
860 struct cmm_allocator *allocator = NULL;
861 u32 dw_offset = 0;
862
c4ca3d5a
ORL
863 if (!xlator_obj)
864 goto loop_cont;
865
085467b8 866 cmm_mgr_obj = (struct cmm_object *)xlator_obj->cmm_mgr;
c4ca3d5a 867 /* get this translator's default SM allocator */
085467b8 868 allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->seg_id - 1];
c4ca3d5a
ORL
869 if (!allocator)
870 goto loop_cont;
871
5a09ddea
RS
872 if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_VA2PA) ||
873 (xtype == CMM_PA2VA)) {
874 if (xtype == CMM_PA2VA) {
c4ca3d5a
ORL
875 /* Gpp Va = Va Base + offset */
876 dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base -
877 allocator->
dab7f7fe 878 dsp_size);
3c882de5 879 dw_addr_xlate = xlator_obj->virt_base + dw_offset;
c4ca3d5a 880 /* Check if translated Va base is in range */
3c882de5 881 if ((dw_addr_xlate < xlator_obj->virt_base) ||
c4ca3d5a 882 (dw_addr_xlate >=
3c882de5 883 (xlator_obj->virt_base +
085467b8 884 xlator_obj->virt_size))) {
c4ca3d5a
ORL
885 dw_addr_xlate = 0; /* bad address */
886 }
887 } else {
888 /* Gpp PA = Gpp Base + offset */
889 dw_offset =
3c882de5 890 (u8 *) paddr - (u8 *) xlator_obj->virt_base;
c4ca3d5a 891 dw_addr_xlate =
dab7f7fe 892 allocator->shm_base - allocator->dsp_size +
c4ca3d5a
ORL
893 dw_offset;
894 }
895 } else {
896 dw_addr_xlate = (u32) paddr;
897 }
898 /*Now convert address to proper target physical address if needed */
5a09ddea 899 if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_PA2DSPPA)) {
c4ca3d5a
ORL
900 /* Got Gpp Pa now, convert to DSP Pa */
901 dw_addr_xlate =
dab7f7fe 902 GPPPA2DSPPA((allocator->shm_base - allocator->dsp_size),
c4ca3d5a 903 dw_addr_xlate,
b4da7fc3 904 allocator->dsp_phys_addr_offset *
c4ca3d5a 905 allocator->c_factor);
5a09ddea 906 } else if (xtype == CMM_DSPPA2PA) {
c4ca3d5a
ORL
907 /* Got DSP Pa, convert to GPP Pa */
908 dw_addr_xlate =
dab7f7fe 909 DSPPA2GPPPA(allocator->shm_base - allocator->dsp_size,
c4ca3d5a 910 dw_addr_xlate,
b4da7fc3 911 allocator->dsp_phys_addr_offset *
c4ca3d5a
ORL
912 allocator->c_factor);
913 }
914loop_cont:
915 return (void *)dw_addr_xlate;
916}
This page took 0.204885 seconds and 5 git commands to generate.