powerpc/ps3: Add ps3_mm_set_repository_highmem
[deliverable/linux.git] / arch / powerpc / platforms / ps3 / mm.c
1 /*
2 * PS3 address space management.
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21 #include <linux/kernel.h>
22 #include <linux/export.h>
23 #include <linux/memblock.h>
24 #include <linux/slab.h>
25
26 #include <asm/cell-regs.h>
27 #include <asm/firmware.h>
28 #include <asm/prom.h>
29 #include <asm/udbg.h>
30 #include <asm/lv1call.h>
31 #include <asm/setup.h>
32
33 #include "platform.h"
34
35 #if defined(DEBUG)
36 #define DBG udbg_printf
37 #else
38 #define DBG pr_devel
39 #endif
40
41 enum {
42 #if defined(CONFIG_PS3_DYNAMIC_DMA)
43 USE_DYNAMIC_DMA = 1,
44 #else
45 USE_DYNAMIC_DMA = 0,
46 #endif
47 };
48
49 enum {
50 PAGE_SHIFT_4K = 12U,
51 PAGE_SHIFT_64K = 16U,
52 PAGE_SHIFT_16M = 24U,
53 };
54
55 static unsigned long make_page_sizes(unsigned long a, unsigned long b)
56 {
57 return (a << 56) | (b << 48);
58 }
59
60 enum {
61 ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04,
62 ALLOCATE_MEMORY_ADDR_ZERO = 0X08,
63 };
64
65 /* valid htab sizes are {18,19,20} = 256K, 512K, 1M */
66
67 enum {
68 HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */
69 HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */
70 };
71
72 /*============================================================================*/
73 /* virtual address space routines */
74 /*============================================================================*/
75
76 /**
77 * struct mem_region - memory region structure
78 * @base: base address
79 * @size: size in bytes
80 * @offset: difference between base and rm.size
81 * @destroy: flag if region should be destroyed upon shutdown
82 */
83
84 struct mem_region {
85 u64 base;
86 u64 size;
87 unsigned long offset;
88 int destroy;
89 };
90
91 /**
92 * struct map - address space state variables holder
93 * @total: total memory available as reported by HV
94 * @vas_id - HV virtual address space id
95 * @htab_size: htab size in bytes
96 *
97 * The HV virtual address space (vas) allows for hotplug memory regions.
98 * Memory regions can be created and destroyed in the vas at runtime.
99 * @rm: real mode (bootmem) region
100 * @r1: highmem region(s)
101 *
102 * ps3 addresses
103 * virt_addr: a cpu 'translated' effective address
104 * phys_addr: an address in what Linux thinks is the physical address space
105 * lpar_addr: an address in the HV virtual address space
106 * bus_addr: an io controller 'translated' address on a device bus
107 */
108
109 struct map {
110 u64 total;
111 u64 vas_id;
112 u64 htab_size;
113 struct mem_region rm;
114 struct mem_region r1;
115 };
116
117 #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
118 static void __maybe_unused _debug_dump_map(const struct map *m,
119 const char *func, int line)
120 {
121 DBG("%s:%d: map.total = %llxh\n", func, line, m->total);
122 DBG("%s:%d: map.rm.size = %llxh\n", func, line, m->rm.size);
123 DBG("%s:%d: map.vas_id = %llu\n", func, line, m->vas_id);
124 DBG("%s:%d: map.htab_size = %llxh\n", func, line, m->htab_size);
125 DBG("%s:%d: map.r1.base = %llxh\n", func, line, m->r1.base);
126 DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset);
127 DBG("%s:%d: map.r1.size = %llxh\n", func, line, m->r1.size);
128 }
129
130 static struct map map;
131
132 /**
133 * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address
134 * @phys_addr: linux physical address
135 */
136
137 unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr)
138 {
139 BUG_ON(is_kernel_addr(phys_addr));
140 return (phys_addr < map.rm.size || phys_addr >= map.total)
141 ? phys_addr : phys_addr + map.r1.offset;
142 }
143
144 EXPORT_SYMBOL(ps3_mm_phys_to_lpar);
145
146 /**
147 * ps3_mm_vas_create - create the virtual address space
148 */
149
150 void __init ps3_mm_vas_create(unsigned long* htab_size)
151 {
152 int result;
153 u64 start_address;
154 u64 size;
155 u64 access_right;
156 u64 max_page_size;
157 u64 flags;
158
159 result = lv1_query_logical_partition_address_region_info(0,
160 &start_address, &size, &access_right, &max_page_size,
161 &flags);
162
163 if (result) {
164 DBG("%s:%d: lv1_query_logical_partition_address_region_info "
165 "failed: %s\n", __func__, __LINE__,
166 ps3_result(result));
167 goto fail;
168 }
169
170 if (max_page_size < PAGE_SHIFT_16M) {
171 DBG("%s:%d: bad max_page_size %llxh\n", __func__, __LINE__,
172 max_page_size);
173 goto fail;
174 }
175
176 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX);
177 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN);
178
179 result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE,
180 2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K),
181 &map.vas_id, &map.htab_size);
182
183 if (result) {
184 DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n",
185 __func__, __LINE__, ps3_result(result));
186 goto fail;
187 }
188
189 result = lv1_select_virtual_address_space(map.vas_id);
190
191 if (result) {
192 DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n",
193 __func__, __LINE__, ps3_result(result));
194 goto fail;
195 }
196
197 *htab_size = map.htab_size;
198
199 debug_dump_map(&map);
200
201 return;
202
203 fail:
204 panic("ps3_mm_vas_create failed");
205 }
206
207 /**
208 * ps3_mm_vas_destroy -
209 */
210
211 void ps3_mm_vas_destroy(void)
212 {
213 int result;
214
215 DBG("%s:%d: map.vas_id = %llu\n", __func__, __LINE__, map.vas_id);
216
217 if (map.vas_id) {
218 result = lv1_select_virtual_address_space(0);
219 BUG_ON(result);
220 result = lv1_destruct_virtual_address_space(map.vas_id);
221 BUG_ON(result);
222 map.vas_id = 0;
223 }
224 }
225
226 static int ps3_mm_get_repository_highmem(struct mem_region *r)
227 {
228 int result;
229
230 /* Assume a single highmem region. */
231
232 result = ps3_repository_read_highmem_info(0, &r->base, &r->size);
233
234 if (result)
235 goto zero_region;
236
237 if (!r->base || !r->size) {
238 result = -1;
239 goto zero_region;
240 }
241
242 r->offset = r->base - map.rm.size;
243
244 DBG("%s:%d: Found high region in repository: %llxh %llxh\n",
245 __func__, __LINE__, r->base, r->size);
246
247 return 0;
248
249 zero_region:
250 DBG("%s:%d: No high region in repository.\n", __func__, __LINE__);
251
252 r->size = r->base = r->offset = 0;
253 return result;
254 }
255
256 static int ps3_mm_set_repository_highmem(const struct mem_region *r)
257 {
258 /* Assume a single highmem region. */
259
260 return r ? ps3_repository_write_highmem_info(0, r->base, r->size) :
261 ps3_repository_write_highmem_info(0, 0, 0);
262 }
263
264 /**
265 * ps3_mm_region_create - create a memory region in the vas
266 * @r: pointer to a struct mem_region to accept initialized values
267 * @size: requested region size
268 *
269 * This implementation creates the region with the vas large page size.
270 * @size is rounded down to a multiple of the vas large page size.
271 */
272
273 static int ps3_mm_region_create(struct mem_region *r, unsigned long size)
274 {
275 int result;
276 u64 muid;
277
278 r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
279
280 DBG("%s:%d requested %lxh\n", __func__, __LINE__, size);
281 DBG("%s:%d actual %llxh\n", __func__, __LINE__, r->size);
282 DBG("%s:%d difference %llxh (%lluMB)\n", __func__, __LINE__,
283 size - r->size, (size - r->size) / 1024 / 1024);
284
285 if (r->size == 0) {
286 DBG("%s:%d: size == 0\n", __func__, __LINE__);
287 result = -1;
288 goto zero_region;
289 }
290
291 result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0,
292 ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid);
293
294 if (result || r->base < map.rm.size) {
295 DBG("%s:%d: lv1_allocate_memory failed: %s\n",
296 __func__, __LINE__, ps3_result(result));
297 goto zero_region;
298 }
299
300 r->destroy = 1;
301 r->offset = r->base - map.rm.size;
302 return result;
303
304 zero_region:
305 r->size = r->base = r->offset = 0;
306 return result;
307 }
308
309 /**
310 * ps3_mm_region_destroy - destroy a memory region
311 * @r: pointer to struct mem_region
312 */
313
314 static void ps3_mm_region_destroy(struct mem_region *r)
315 {
316 int result;
317
318 if (!r->destroy) {
319 pr_info("%s:%d: Not destroying high region: %llxh %llxh\n",
320 __func__, __LINE__, r->base, r->size);
321 return;
322 }
323
324 DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base);
325
326 if (r->base) {
327 result = lv1_release_memory(r->base);
328 BUG_ON(result);
329 r->size = r->base = r->offset = 0;
330 map.total = map.rm.size;
331 }
332 }
333
334 /*============================================================================*/
335 /* dma routines */
336 /*============================================================================*/
337
338 /**
339 * dma_sb_lpar_to_bus - Translate an lpar address to ioc mapped bus address.
340 * @r: pointer to dma region structure
341 * @lpar_addr: HV lpar address
342 */
343
344 static unsigned long dma_sb_lpar_to_bus(struct ps3_dma_region *r,
345 unsigned long lpar_addr)
346 {
347 if (lpar_addr >= map.rm.size)
348 lpar_addr -= map.r1.offset;
349 BUG_ON(lpar_addr < r->offset);
350 BUG_ON(lpar_addr >= r->offset + r->len);
351 return r->bus_addr + lpar_addr - r->offset;
352 }
353
354 #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
355 static void __maybe_unused _dma_dump_region(const struct ps3_dma_region *r,
356 const char *func, int line)
357 {
358 DBG("%s:%d: dev %llu:%llu\n", func, line, r->dev->bus_id,
359 r->dev->dev_id);
360 DBG("%s:%d: page_size %u\n", func, line, r->page_size);
361 DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr);
362 DBG("%s:%d: len %lxh\n", func, line, r->len);
363 DBG("%s:%d: offset %lxh\n", func, line, r->offset);
364 }
365
366 /**
367 * dma_chunk - A chunk of dma pages mapped by the io controller.
368 * @region - The dma region that owns this chunk.
369 * @lpar_addr: Starting lpar address of the area to map.
370 * @bus_addr: Starting ioc bus address of the area to map.
371 * @len: Length in bytes of the area to map.
372 * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the
373 * list of all chuncks owned by the region.
374 *
375 * This implementation uses a very simple dma page manager
376 * based on the dma_chunk structure. This scheme assumes
377 * that all drivers use very well behaved dma ops.
378 */
379
380 struct dma_chunk {
381 struct ps3_dma_region *region;
382 unsigned long lpar_addr;
383 unsigned long bus_addr;
384 unsigned long len;
385 struct list_head link;
386 unsigned int usage_count;
387 };
388
389 #define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__)
390 static void _dma_dump_chunk (const struct dma_chunk* c, const char* func,
391 int line)
392 {
393 DBG("%s:%d: r.dev %llu:%llu\n", func, line,
394 c->region->dev->bus_id, c->region->dev->dev_id);
395 DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr);
396 DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size);
397 DBG("%s:%d: r.len %lxh\n", func, line, c->region->len);
398 DBG("%s:%d: r.offset %lxh\n", func, line, c->region->offset);
399 DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr);
400 DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr);
401 DBG("%s:%d: c.len %lxh\n", func, line, c->len);
402 }
403
404 static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
405 unsigned long bus_addr, unsigned long len)
406 {
407 struct dma_chunk *c;
408 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size);
409 unsigned long aligned_len = _ALIGN_UP(len+bus_addr-aligned_bus,
410 1 << r->page_size);
411
412 list_for_each_entry(c, &r->chunk_list.head, link) {
413 /* intersection */
414 if (aligned_bus >= c->bus_addr &&
415 aligned_bus + aligned_len <= c->bus_addr + c->len)
416 return c;
417
418 /* below */
419 if (aligned_bus + aligned_len <= c->bus_addr)
420 continue;
421
422 /* above */
423 if (aligned_bus >= c->bus_addr + c->len)
424 continue;
425
426 /* we don't handle the multi-chunk case for now */
427 dma_dump_chunk(c);
428 BUG();
429 }
430 return NULL;
431 }
432
433 static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r,
434 unsigned long lpar_addr, unsigned long len)
435 {
436 struct dma_chunk *c;
437 unsigned long aligned_lpar = _ALIGN_DOWN(lpar_addr, 1 << r->page_size);
438 unsigned long aligned_len = _ALIGN_UP(len + lpar_addr - aligned_lpar,
439 1 << r->page_size);
440
441 list_for_each_entry(c, &r->chunk_list.head, link) {
442 /* intersection */
443 if (c->lpar_addr <= aligned_lpar &&
444 aligned_lpar < c->lpar_addr + c->len) {
445 if (aligned_lpar + aligned_len <= c->lpar_addr + c->len)
446 return c;
447 else {
448 dma_dump_chunk(c);
449 BUG();
450 }
451 }
452 /* below */
453 if (aligned_lpar + aligned_len <= c->lpar_addr) {
454 continue;
455 }
456 /* above */
457 if (c->lpar_addr + c->len <= aligned_lpar) {
458 continue;
459 }
460 }
461 return NULL;
462 }
463
464 static int dma_sb_free_chunk(struct dma_chunk *c)
465 {
466 int result = 0;
467
468 if (c->bus_addr) {
469 result = lv1_unmap_device_dma_region(c->region->dev->bus_id,
470 c->region->dev->dev_id, c->bus_addr, c->len);
471 BUG_ON(result);
472 }
473
474 kfree(c);
475 return result;
476 }
477
478 static int dma_ioc0_free_chunk(struct dma_chunk *c)
479 {
480 int result = 0;
481 int iopage;
482 unsigned long offset;
483 struct ps3_dma_region *r = c->region;
484
485 DBG("%s:start\n", __func__);
486 for (iopage = 0; iopage < (c->len >> r->page_size); iopage++) {
487 offset = (1 << r->page_size) * iopage;
488 /* put INVALID entry */
489 result = lv1_put_iopte(0,
490 c->bus_addr + offset,
491 c->lpar_addr + offset,
492 r->ioid,
493 0);
494 DBG("%s: bus=%#lx, lpar=%#lx, ioid=%d\n", __func__,
495 c->bus_addr + offset,
496 c->lpar_addr + offset,
497 r->ioid);
498
499 if (result) {
500 DBG("%s:%d: lv1_put_iopte failed: %s\n", __func__,
501 __LINE__, ps3_result(result));
502 }
503 }
504 kfree(c);
505 DBG("%s:end\n", __func__);
506 return result;
507 }
508
509 /**
510 * dma_sb_map_pages - Maps dma pages into the io controller bus address space.
511 * @r: Pointer to a struct ps3_dma_region.
512 * @phys_addr: Starting physical address of the area to map.
513 * @len: Length in bytes of the area to map.
514 * c_out: A pointer to receive an allocated struct dma_chunk for this area.
515 *
516 * This is the lowest level dma mapping routine, and is the one that will
517 * make the HV call to add the pages into the io controller address space.
518 */
519
520 static int dma_sb_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
521 unsigned long len, struct dma_chunk **c_out, u64 iopte_flag)
522 {
523 int result;
524 struct dma_chunk *c;
525
526 c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
527
528 if (!c) {
529 result = -ENOMEM;
530 goto fail_alloc;
531 }
532
533 c->region = r;
534 c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
535 c->bus_addr = dma_sb_lpar_to_bus(r, c->lpar_addr);
536 c->len = len;
537
538 BUG_ON(iopte_flag != 0xf800000000000000UL);
539 result = lv1_map_device_dma_region(c->region->dev->bus_id,
540 c->region->dev->dev_id, c->lpar_addr,
541 c->bus_addr, c->len, iopte_flag);
542 if (result) {
543 DBG("%s:%d: lv1_map_device_dma_region failed: %s\n",
544 __func__, __LINE__, ps3_result(result));
545 goto fail_map;
546 }
547
548 list_add(&c->link, &r->chunk_list.head);
549
550 *c_out = c;
551 return 0;
552
553 fail_map:
554 kfree(c);
555 fail_alloc:
556 *c_out = NULL;
557 DBG(" <- %s:%d\n", __func__, __LINE__);
558 return result;
559 }
560
561 static int dma_ioc0_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
562 unsigned long len, struct dma_chunk **c_out,
563 u64 iopte_flag)
564 {
565 int result;
566 struct dma_chunk *c, *last;
567 int iopage, pages;
568 unsigned long offset;
569
570 DBG(KERN_ERR "%s: phy=%#lx, lpar%#lx, len=%#lx\n", __func__,
571 phys_addr, ps3_mm_phys_to_lpar(phys_addr), len);
572 c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
573
574 if (!c) {
575 result = -ENOMEM;
576 goto fail_alloc;
577 }
578
579 c->region = r;
580 c->len = len;
581 c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
582 /* allocate IO address */
583 if (list_empty(&r->chunk_list.head)) {
584 /* first one */
585 c->bus_addr = r->bus_addr;
586 } else {
587 /* derive from last bus addr*/
588 last = list_entry(r->chunk_list.head.next,
589 struct dma_chunk, link);
590 c->bus_addr = last->bus_addr + last->len;
591 DBG("%s: last bus=%#lx, len=%#lx\n", __func__,
592 last->bus_addr, last->len);
593 }
594
595 /* FIXME: check whether length exceeds region size */
596
597 /* build ioptes for the area */
598 pages = len >> r->page_size;
599 DBG("%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#llx\n", __func__,
600 r->page_size, r->len, pages, iopte_flag);
601 for (iopage = 0; iopage < pages; iopage++) {
602 offset = (1 << r->page_size) * iopage;
603 result = lv1_put_iopte(0,
604 c->bus_addr + offset,
605 c->lpar_addr + offset,
606 r->ioid,
607 iopte_flag);
608 if (result) {
609 pr_warning("%s:%d: lv1_put_iopte failed: %s\n",
610 __func__, __LINE__, ps3_result(result));
611 goto fail_map;
612 }
613 DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__,
614 iopage, c->bus_addr + offset, c->lpar_addr + offset,
615 r->ioid);
616 }
617
618 /* be sure that last allocated one is inserted at head */
619 list_add(&c->link, &r->chunk_list.head);
620
621 *c_out = c;
622 DBG("%s: end\n", __func__);
623 return 0;
624
625 fail_map:
626 for (iopage--; 0 <= iopage; iopage--) {
627 lv1_put_iopte(0,
628 c->bus_addr + offset,
629 c->lpar_addr + offset,
630 r->ioid,
631 0);
632 }
633 kfree(c);
634 fail_alloc:
635 *c_out = NULL;
636 return result;
637 }
638
639 /**
640 * dma_sb_region_create - Create a device dma region.
641 * @r: Pointer to a struct ps3_dma_region.
642 *
643 * This is the lowest level dma region create routine, and is the one that
644 * will make the HV call to create the region.
645 */
646
647 static int dma_sb_region_create(struct ps3_dma_region *r)
648 {
649 int result;
650 u64 bus_addr;
651
652 DBG(" -> %s:%d:\n", __func__, __LINE__);
653
654 BUG_ON(!r);
655
656 if (!r->dev->bus_id) {
657 pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
658 r->dev->bus_id, r->dev->dev_id);
659 return 0;
660 }
661
662 DBG("%s:%u: len = 0x%lx, page_size = %u, offset = 0x%lx\n", __func__,
663 __LINE__, r->len, r->page_size, r->offset);
664
665 BUG_ON(!r->len);
666 BUG_ON(!r->page_size);
667 BUG_ON(!r->region_ops);
668
669 INIT_LIST_HEAD(&r->chunk_list.head);
670 spin_lock_init(&r->chunk_list.lock);
671
672 result = lv1_allocate_device_dma_region(r->dev->bus_id, r->dev->dev_id,
673 roundup_pow_of_two(r->len), r->page_size, r->region_type,
674 &bus_addr);
675 r->bus_addr = bus_addr;
676
677 if (result) {
678 DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
679 __func__, __LINE__, ps3_result(result));
680 r->len = r->bus_addr = 0;
681 }
682
683 return result;
684 }
685
686 static int dma_ioc0_region_create(struct ps3_dma_region *r)
687 {
688 int result;
689 u64 bus_addr;
690
691 INIT_LIST_HEAD(&r->chunk_list.head);
692 spin_lock_init(&r->chunk_list.lock);
693
694 result = lv1_allocate_io_segment(0,
695 r->len,
696 r->page_size,
697 &bus_addr);
698 r->bus_addr = bus_addr;
699 if (result) {
700 DBG("%s:%d: lv1_allocate_io_segment failed: %s\n",
701 __func__, __LINE__, ps3_result(result));
702 r->len = r->bus_addr = 0;
703 }
704 DBG("%s: len=%#lx, pg=%d, bus=%#lx\n", __func__,
705 r->len, r->page_size, r->bus_addr);
706 return result;
707 }
708
709 /**
710 * dma_region_free - Free a device dma region.
711 * @r: Pointer to a struct ps3_dma_region.
712 *
713 * This is the lowest level dma region free routine, and is the one that
714 * will make the HV call to free the region.
715 */
716
717 static int dma_sb_region_free(struct ps3_dma_region *r)
718 {
719 int result;
720 struct dma_chunk *c;
721 struct dma_chunk *tmp;
722
723 BUG_ON(!r);
724
725 if (!r->dev->bus_id) {
726 pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
727 r->dev->bus_id, r->dev->dev_id);
728 return 0;
729 }
730
731 list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) {
732 list_del(&c->link);
733 dma_sb_free_chunk(c);
734 }
735
736 result = lv1_free_device_dma_region(r->dev->bus_id, r->dev->dev_id,
737 r->bus_addr);
738
739 if (result)
740 DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
741 __func__, __LINE__, ps3_result(result));
742
743 r->bus_addr = 0;
744
745 return result;
746 }
747
748 static int dma_ioc0_region_free(struct ps3_dma_region *r)
749 {
750 int result;
751 struct dma_chunk *c, *n;
752
753 DBG("%s: start\n", __func__);
754 list_for_each_entry_safe(c, n, &r->chunk_list.head, link) {
755 list_del(&c->link);
756 dma_ioc0_free_chunk(c);
757 }
758
759 result = lv1_release_io_segment(0, r->bus_addr);
760
761 if (result)
762 DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
763 __func__, __LINE__, ps3_result(result));
764
765 r->bus_addr = 0;
766 DBG("%s: end\n", __func__);
767
768 return result;
769 }
770
771 /**
772 * dma_sb_map_area - Map an area of memory into a device dma region.
773 * @r: Pointer to a struct ps3_dma_region.
774 * @virt_addr: Starting virtual address of the area to map.
775 * @len: Length in bytes of the area to map.
776 * @bus_addr: A pointer to return the starting ioc bus address of the area to
777 * map.
778 *
779 * This is the common dma mapping routine.
780 */
781
782 static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
783 unsigned long len, dma_addr_t *bus_addr,
784 u64 iopte_flag)
785 {
786 int result;
787 unsigned long flags;
788 struct dma_chunk *c;
789 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
790 : virt_addr;
791 unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
792 unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
793 1 << r->page_size);
794 *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
795
796 if (!USE_DYNAMIC_DMA) {
797 unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
798 DBG(" -> %s:%d\n", __func__, __LINE__);
799 DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__,
800 virt_addr);
801 DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__,
802 phys_addr);
803 DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__,
804 lpar_addr);
805 DBG("%s:%d len %lxh\n", __func__, __LINE__, len);
806 DBG("%s:%d bus_addr %llxh (%lxh)\n", __func__, __LINE__,
807 *bus_addr, len);
808 }
809
810 spin_lock_irqsave(&r->chunk_list.lock, flags);
811 c = dma_find_chunk(r, *bus_addr, len);
812
813 if (c) {
814 DBG("%s:%d: reusing mapped chunk", __func__, __LINE__);
815 dma_dump_chunk(c);
816 c->usage_count++;
817 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
818 return 0;
819 }
820
821 result = dma_sb_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag);
822
823 if (result) {
824 *bus_addr = 0;
825 DBG("%s:%d: dma_sb_map_pages failed (%d)\n",
826 __func__, __LINE__, result);
827 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
828 return result;
829 }
830
831 c->usage_count = 1;
832
833 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
834 return result;
835 }
836
837 static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
838 unsigned long len, dma_addr_t *bus_addr,
839 u64 iopte_flag)
840 {
841 int result;
842 unsigned long flags;
843 struct dma_chunk *c;
844 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
845 : virt_addr;
846 unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
847 unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
848 1 << r->page_size);
849
850 DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__,
851 virt_addr, len);
852 DBG(KERN_ERR "%s: ph=%#lx a_ph=%#lx a_l=%#lx\n", __func__,
853 phys_addr, aligned_phys, aligned_len);
854
855 spin_lock_irqsave(&r->chunk_list.lock, flags);
856 c = dma_find_chunk_lpar(r, ps3_mm_phys_to_lpar(phys_addr), len);
857
858 if (c) {
859 /* FIXME */
860 BUG();
861 *bus_addr = c->bus_addr + phys_addr - aligned_phys;
862 c->usage_count++;
863 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
864 return 0;
865 }
866
867 result = dma_ioc0_map_pages(r, aligned_phys, aligned_len, &c,
868 iopte_flag);
869
870 if (result) {
871 *bus_addr = 0;
872 DBG("%s:%d: dma_ioc0_map_pages failed (%d)\n",
873 __func__, __LINE__, result);
874 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
875 return result;
876 }
877 *bus_addr = c->bus_addr + phys_addr - aligned_phys;
878 DBG("%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#llx\n", __func__,
879 virt_addr, phys_addr, aligned_phys, *bus_addr);
880 c->usage_count = 1;
881
882 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
883 return result;
884 }
885
886 /**
887 * dma_sb_unmap_area - Unmap an area of memory from a device dma region.
888 * @r: Pointer to a struct ps3_dma_region.
889 * @bus_addr: The starting ioc bus address of the area to unmap.
890 * @len: Length in bytes of the area to unmap.
891 *
892 * This is the common dma unmap routine.
893 */
894
895 static int dma_sb_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr,
896 unsigned long len)
897 {
898 unsigned long flags;
899 struct dma_chunk *c;
900
901 spin_lock_irqsave(&r->chunk_list.lock, flags);
902 c = dma_find_chunk(r, bus_addr, len);
903
904 if (!c) {
905 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
906 1 << r->page_size);
907 unsigned long aligned_len = _ALIGN_UP(len + bus_addr
908 - aligned_bus, 1 << r->page_size);
909 DBG("%s:%d: not found: bus_addr %llxh\n",
910 __func__, __LINE__, bus_addr);
911 DBG("%s:%d: not found: len %lxh\n",
912 __func__, __LINE__, len);
913 DBG("%s:%d: not found: aligned_bus %lxh\n",
914 __func__, __LINE__, aligned_bus);
915 DBG("%s:%d: not found: aligned_len %lxh\n",
916 __func__, __LINE__, aligned_len);
917 BUG();
918 }
919
920 c->usage_count--;
921
922 if (!c->usage_count) {
923 list_del(&c->link);
924 dma_sb_free_chunk(c);
925 }
926
927 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
928 return 0;
929 }
930
931 static int dma_ioc0_unmap_area(struct ps3_dma_region *r,
932 dma_addr_t bus_addr, unsigned long len)
933 {
934 unsigned long flags;
935 struct dma_chunk *c;
936
937 DBG("%s: start a=%#llx l=%#lx\n", __func__, bus_addr, len);
938 spin_lock_irqsave(&r->chunk_list.lock, flags);
939 c = dma_find_chunk(r, bus_addr, len);
940
941 if (!c) {
942 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
943 1 << r->page_size);
944 unsigned long aligned_len = _ALIGN_UP(len + bus_addr
945 - aligned_bus,
946 1 << r->page_size);
947 DBG("%s:%d: not found: bus_addr %llxh\n",
948 __func__, __LINE__, bus_addr);
949 DBG("%s:%d: not found: len %lxh\n",
950 __func__, __LINE__, len);
951 DBG("%s:%d: not found: aligned_bus %lxh\n",
952 __func__, __LINE__, aligned_bus);
953 DBG("%s:%d: not found: aligned_len %lxh\n",
954 __func__, __LINE__, aligned_len);
955 BUG();
956 }
957
958 c->usage_count--;
959
960 if (!c->usage_count) {
961 list_del(&c->link);
962 dma_ioc0_free_chunk(c);
963 }
964
965 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
966 DBG("%s: end\n", __func__);
967 return 0;
968 }
969
970 /**
971 * dma_sb_region_create_linear - Setup a linear dma mapping for a device.
972 * @r: Pointer to a struct ps3_dma_region.
973 *
974 * This routine creates an HV dma region for the device and maps all available
975 * ram into the io controller bus address space.
976 */
977
978 static int dma_sb_region_create_linear(struct ps3_dma_region *r)
979 {
980 int result;
981 unsigned long virt_addr, len;
982 dma_addr_t tmp;
983
984 if (r->len > 16*1024*1024) { /* FIXME: need proper fix */
985 /* force 16M dma pages for linear mapping */
986 if (r->page_size != PS3_DMA_16M) {
987 pr_info("%s:%d: forcing 16M pages for linear map\n",
988 __func__, __LINE__);
989 r->page_size = PS3_DMA_16M;
990 r->len = _ALIGN_UP(r->len, 1 << r->page_size);
991 }
992 }
993
994 result = dma_sb_region_create(r);
995 BUG_ON(result);
996
997 if (r->offset < map.rm.size) {
998 /* Map (part of) 1st RAM chunk */
999 virt_addr = map.rm.base + r->offset;
1000 len = map.rm.size - r->offset;
1001 if (len > r->len)
1002 len = r->len;
1003 result = dma_sb_map_area(r, virt_addr, len, &tmp,
1004 CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
1005 CBE_IOPTE_M);
1006 BUG_ON(result);
1007 }
1008
1009 if (r->offset + r->len > map.rm.size) {
1010 /* Map (part of) 2nd RAM chunk */
1011 virt_addr = map.rm.size;
1012 len = r->len;
1013 if (r->offset >= map.rm.size)
1014 virt_addr += r->offset - map.rm.size;
1015 else
1016 len -= map.rm.size - r->offset;
1017 result = dma_sb_map_area(r, virt_addr, len, &tmp,
1018 CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
1019 CBE_IOPTE_M);
1020 BUG_ON(result);
1021 }
1022
1023 return result;
1024 }
1025
1026 /**
1027 * dma_sb_region_free_linear - Free a linear dma mapping for a device.
1028 * @r: Pointer to a struct ps3_dma_region.
1029 *
1030 * This routine will unmap all mapped areas and free the HV dma region.
1031 */
1032
1033 static int dma_sb_region_free_linear(struct ps3_dma_region *r)
1034 {
1035 int result;
1036 dma_addr_t bus_addr;
1037 unsigned long len, lpar_addr;
1038
1039 if (r->offset < map.rm.size) {
1040 /* Unmap (part of) 1st RAM chunk */
1041 lpar_addr = map.rm.base + r->offset;
1042 len = map.rm.size - r->offset;
1043 if (len > r->len)
1044 len = r->len;
1045 bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1046 result = dma_sb_unmap_area(r, bus_addr, len);
1047 BUG_ON(result);
1048 }
1049
1050 if (r->offset + r->len > map.rm.size) {
1051 /* Unmap (part of) 2nd RAM chunk */
1052 lpar_addr = map.r1.base;
1053 len = r->len;
1054 if (r->offset >= map.rm.size)
1055 lpar_addr += r->offset - map.rm.size;
1056 else
1057 len -= map.rm.size - r->offset;
1058 bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1059 result = dma_sb_unmap_area(r, bus_addr, len);
1060 BUG_ON(result);
1061 }
1062
1063 result = dma_sb_region_free(r);
1064 BUG_ON(result);
1065
1066 return result;
1067 }
1068
1069 /**
1070 * dma_sb_map_area_linear - Map an area of memory into a device dma region.
1071 * @r: Pointer to a struct ps3_dma_region.
1072 * @virt_addr: Starting virtual address of the area to map.
1073 * @len: Length in bytes of the area to map.
1074 * @bus_addr: A pointer to return the starting ioc bus address of the area to
1075 * map.
1076 *
1077 * This routine just returns the corresponding bus address. Actual mapping
1078 * occurs in dma_region_create_linear().
1079 */
1080
1081 static int dma_sb_map_area_linear(struct ps3_dma_region *r,
1082 unsigned long virt_addr, unsigned long len, dma_addr_t *bus_addr,
1083 u64 iopte_flag)
1084 {
1085 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
1086 : virt_addr;
1087 *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
1088 return 0;
1089 }
1090
1091 /**
1092 * dma_unmap_area_linear - Unmap an area of memory from a device dma region.
1093 * @r: Pointer to a struct ps3_dma_region.
1094 * @bus_addr: The starting ioc bus address of the area to unmap.
1095 * @len: Length in bytes of the area to unmap.
1096 *
1097 * This routine does nothing. Unmapping occurs in dma_sb_region_free_linear().
1098 */
1099
1100 static int dma_sb_unmap_area_linear(struct ps3_dma_region *r,
1101 dma_addr_t bus_addr, unsigned long len)
1102 {
1103 return 0;
1104 };
1105
1106 static const struct ps3_dma_region_ops ps3_dma_sb_region_ops = {
1107 .create = dma_sb_region_create,
1108 .free = dma_sb_region_free,
1109 .map = dma_sb_map_area,
1110 .unmap = dma_sb_unmap_area
1111 };
1112
1113 static const struct ps3_dma_region_ops ps3_dma_sb_region_linear_ops = {
1114 .create = dma_sb_region_create_linear,
1115 .free = dma_sb_region_free_linear,
1116 .map = dma_sb_map_area_linear,
1117 .unmap = dma_sb_unmap_area_linear
1118 };
1119
1120 static const struct ps3_dma_region_ops ps3_dma_ioc0_region_ops = {
1121 .create = dma_ioc0_region_create,
1122 .free = dma_ioc0_region_free,
1123 .map = dma_ioc0_map_area,
1124 .unmap = dma_ioc0_unmap_area
1125 };
1126
1127 int ps3_dma_region_init(struct ps3_system_bus_device *dev,
1128 struct ps3_dma_region *r, enum ps3_dma_page_size page_size,
1129 enum ps3_dma_region_type region_type, void *addr, unsigned long len)
1130 {
1131 unsigned long lpar_addr;
1132
1133 lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0;
1134
1135 r->dev = dev;
1136 r->page_size = page_size;
1137 r->region_type = region_type;
1138 r->offset = lpar_addr;
1139 if (r->offset >= map.rm.size)
1140 r->offset -= map.r1.offset;
1141 r->len = len ? len : _ALIGN_UP(map.total, 1 << r->page_size);
1142
1143 switch (dev->dev_type) {
1144 case PS3_DEVICE_TYPE_SB:
1145 r->region_ops = (USE_DYNAMIC_DMA)
1146 ? &ps3_dma_sb_region_ops
1147 : &ps3_dma_sb_region_linear_ops;
1148 break;
1149 case PS3_DEVICE_TYPE_IOC0:
1150 r->region_ops = &ps3_dma_ioc0_region_ops;
1151 break;
1152 default:
1153 BUG();
1154 return -EINVAL;
1155 }
1156 return 0;
1157 }
1158 EXPORT_SYMBOL(ps3_dma_region_init);
1159
1160 int ps3_dma_region_create(struct ps3_dma_region *r)
1161 {
1162 BUG_ON(!r);
1163 BUG_ON(!r->region_ops);
1164 BUG_ON(!r->region_ops->create);
1165 return r->region_ops->create(r);
1166 }
1167 EXPORT_SYMBOL(ps3_dma_region_create);
1168
1169 int ps3_dma_region_free(struct ps3_dma_region *r)
1170 {
1171 BUG_ON(!r);
1172 BUG_ON(!r->region_ops);
1173 BUG_ON(!r->region_ops->free);
1174 return r->region_ops->free(r);
1175 }
1176 EXPORT_SYMBOL(ps3_dma_region_free);
1177
1178 int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
1179 unsigned long len, dma_addr_t *bus_addr,
1180 u64 iopte_flag)
1181 {
1182 return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag);
1183 }
1184
1185 int ps3_dma_unmap(struct ps3_dma_region *r, dma_addr_t bus_addr,
1186 unsigned long len)
1187 {
1188 return r->region_ops->unmap(r, bus_addr, len);
1189 }
1190
1191 /*============================================================================*/
1192 /* system startup routines */
1193 /*============================================================================*/
1194
1195 /**
1196 * ps3_mm_init - initialize the address space state variables
1197 */
1198
1199 void __init ps3_mm_init(void)
1200 {
1201 int result;
1202
1203 DBG(" -> %s:%d\n", __func__, __LINE__);
1204
1205 result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size,
1206 &map.total);
1207
1208 if (result)
1209 panic("ps3_repository_read_mm_info() failed");
1210
1211 map.rm.offset = map.rm.base;
1212 map.vas_id = map.htab_size = 0;
1213
1214 /* this implementation assumes map.rm.base is zero */
1215
1216 BUG_ON(map.rm.base);
1217 BUG_ON(!map.rm.size);
1218
1219 /* Check if we got the highmem region from an earlier boot step */
1220
1221 if (ps3_mm_get_repository_highmem(&map.r1))
1222 ps3_mm_region_create(&map.r1, map.total - map.rm.size);
1223
1224 /* correct map.total for the real total amount of memory we use */
1225 map.total = map.rm.size + map.r1.size;
1226
1227 if (!map.r1.size) {
1228 DBG("%s:%d: No highmem region found\n", __func__, __LINE__);
1229 } else {
1230 DBG("%s:%d: Adding highmem region: %llxh %llxh\n",
1231 __func__, __LINE__, map.rm.size,
1232 map.total - map.rm.size);
1233 memblock_add(map.rm.size, map.total - map.rm.size);
1234 }
1235
1236 DBG(" <- %s:%d\n", __func__, __LINE__);
1237 }
1238
1239 /**
1240 * ps3_mm_shutdown - final cleanup of address space
1241 */
1242
1243 void ps3_mm_shutdown(void)
1244 {
1245 ps3_mm_region_destroy(&map.r1);
1246 }
This page took 0.078302 seconds and 5 git commands to generate.