powerpc/ps3: The lv1_ routines have u64 parameters
[deliverable/linux.git] / arch / powerpc / platforms / ps3 / mm.c
1 /*
2 * PS3 address space management.
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/memory_hotplug.h>
24 #include <linux/lmb.h>
25
26 #include <asm/firmware.h>
27 #include <asm/prom.h>
28 #include <asm/udbg.h>
29 #include <asm/lv1call.h>
30
31 #include "platform.h"
32
33 #if defined(DEBUG)
34 #define DBG udbg_printf
35 #else
36 #define DBG pr_debug
37 #endif
38
39 enum {
40 #if defined(CONFIG_PS3_DYNAMIC_DMA)
41 USE_DYNAMIC_DMA = 1,
42 #else
43 USE_DYNAMIC_DMA = 0,
44 #endif
45 };
46
47 enum {
48 PAGE_SHIFT_4K = 12U,
49 PAGE_SHIFT_64K = 16U,
50 PAGE_SHIFT_16M = 24U,
51 };
52
53 static unsigned long make_page_sizes(unsigned long a, unsigned long b)
54 {
55 return (a << 56) | (b << 48);
56 }
57
58 enum {
59 ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04,
60 ALLOCATE_MEMORY_ADDR_ZERO = 0X08,
61 };
62
63 /* valid htab sizes are {18,19,20} = 256K, 512K, 1M */
64
65 enum {
66 HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */
67 HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */
68 };
69
70 /*============================================================================*/
71 /* virtual address space routines */
72 /*============================================================================*/
73
74 /**
75 * struct mem_region - memory region structure
76 * @base: base address
77 * @size: size in bytes
78 * @offset: difference between base and rm.size
79 */
80
81 struct mem_region {
82 u64 base;
83 unsigned long size;
84 unsigned long offset;
85 };
86
87 /**
88 * struct map - address space state variables holder
89 * @total: total memory available as reported by HV
90 * @vas_id - HV virtual address space id
91 * @htab_size: htab size in bytes
92 *
93 * The HV virtual address space (vas) allows for hotplug memory regions.
94 * Memory regions can be created and destroyed in the vas at runtime.
95 * @rm: real mode (bootmem) region
96 * @r1: hotplug memory region(s)
97 *
98 * ps3 addresses
99 * virt_addr: a cpu 'translated' effective address
100 * phys_addr: an address in what Linux thinks is the physical address space
101 * lpar_addr: an address in the HV virtual address space
102 * bus_addr: an io controller 'translated' address on a device bus
103 */
104
105 struct map {
106 unsigned long total;
107 u64 vas_id;
108 u64 htab_size;
109 struct mem_region rm;
110 struct mem_region r1;
111 };
112
113 #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
114 static void __maybe_unused _debug_dump_map(const struct map *m,
115 const char *func, int line)
116 {
117 DBG("%s:%d: map.total = %lxh\n", func, line, m->total);
118 DBG("%s:%d: map.rm.size = %lxh\n", func, line, m->rm.size);
119 DBG("%s:%d: map.vas_id = %llu\n", func, line, m->vas_id);
120 DBG("%s:%d: map.htab_size = %llxh\n", func, line, m->htab_size);
121 DBG("%s:%d: map.r1.base = %llxh\n", func, line, m->r1.base);
122 DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset);
123 DBG("%s:%d: map.r1.size = %lxh\n", func, line, m->r1.size);
124 }
125
126 static struct map map;
127
128 /**
129 * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address
130 * @phys_addr: linux physical address
131 */
132
133 unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr)
134 {
135 BUG_ON(is_kernel_addr(phys_addr));
136 return (phys_addr < map.rm.size || phys_addr >= map.total)
137 ? phys_addr : phys_addr + map.r1.offset;
138 }
139
140 EXPORT_SYMBOL(ps3_mm_phys_to_lpar);
141
142 /**
143 * ps3_mm_vas_create - create the virtual address space
144 */
145
146 void __init ps3_mm_vas_create(unsigned long* htab_size)
147 {
148 int result;
149 u64 start_address;
150 u64 size;
151 u64 access_right;
152 u64 max_page_size;
153 u64 flags;
154
155 result = lv1_query_logical_partition_address_region_info(0,
156 &start_address, &size, &access_right, &max_page_size,
157 &flags);
158
159 if (result) {
160 DBG("%s:%d: lv1_query_logical_partition_address_region_info "
161 "failed: %s\n", __func__, __LINE__,
162 ps3_result(result));
163 goto fail;
164 }
165
166 if (max_page_size < PAGE_SHIFT_16M) {
167 DBG("%s:%d: bad max_page_size %llxh\n", __func__, __LINE__,
168 max_page_size);
169 goto fail;
170 }
171
172 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX);
173 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN);
174
175 result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE,
176 2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K),
177 &map.vas_id, &map.htab_size);
178
179 if (result) {
180 DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n",
181 __func__, __LINE__, ps3_result(result));
182 goto fail;
183 }
184
185 result = lv1_select_virtual_address_space(map.vas_id);
186
187 if (result) {
188 DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n",
189 __func__, __LINE__, ps3_result(result));
190 goto fail;
191 }
192
193 *htab_size = map.htab_size;
194
195 debug_dump_map(&map);
196
197 return;
198
199 fail:
200 panic("ps3_mm_vas_create failed");
201 }
202
203 /**
204 * ps3_mm_vas_destroy -
205 */
206
207 void ps3_mm_vas_destroy(void)
208 {
209 int result;
210
211 DBG("%s:%d: map.vas_id = %llu\n", __func__, __LINE__, map.vas_id);
212
213 if (map.vas_id) {
214 result = lv1_select_virtual_address_space(0);
215 BUG_ON(result);
216 result = lv1_destruct_virtual_address_space(map.vas_id);
217 BUG_ON(result);
218 map.vas_id = 0;
219 }
220 }
221
222 /*============================================================================*/
223 /* memory hotplug routines */
224 /*============================================================================*/
225
226 /**
227 * ps3_mm_region_create - create a memory region in the vas
228 * @r: pointer to a struct mem_region to accept initialized values
229 * @size: requested region size
230 *
231 * This implementation creates the region with the vas large page size.
232 * @size is rounded down to a multiple of the vas large page size.
233 */
234
235 static int ps3_mm_region_create(struct mem_region *r, unsigned long size)
236 {
237 int result;
238 u64 muid;
239
240 r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
241
242 DBG("%s:%d requested %lxh\n", __func__, __LINE__, size);
243 DBG("%s:%d actual %lxh\n", __func__, __LINE__, r->size);
244 DBG("%s:%d difference %lxh (%luMB)\n", __func__, __LINE__,
245 (unsigned long)(size - r->size),
246 (size - r->size) / 1024 / 1024);
247
248 if (r->size == 0) {
249 DBG("%s:%d: size == 0\n", __func__, __LINE__);
250 result = -1;
251 goto zero_region;
252 }
253
254 result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0,
255 ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid);
256
257 if (result || r->base < map.rm.size) {
258 DBG("%s:%d: lv1_allocate_memory failed: %s\n",
259 __func__, __LINE__, ps3_result(result));
260 goto zero_region;
261 }
262
263 r->offset = r->base - map.rm.size;
264 return result;
265
266 zero_region:
267 r->size = r->base = r->offset = 0;
268 return result;
269 }
270
271 /**
272 * ps3_mm_region_destroy - destroy a memory region
273 * @r: pointer to struct mem_region
274 */
275
276 static void ps3_mm_region_destroy(struct mem_region *r)
277 {
278 int result;
279
280 DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base);
281 if (r->base) {
282 result = lv1_release_memory(r->base);
283 BUG_ON(result);
284 r->size = r->base = r->offset = 0;
285 map.total = map.rm.size;
286 }
287 }
288
289 /**
290 * ps3_mm_add_memory - hot add memory
291 */
292
293 static int __init ps3_mm_add_memory(void)
294 {
295 int result;
296 unsigned long start_addr;
297 unsigned long start_pfn;
298 unsigned long nr_pages;
299
300 if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
301 return -ENODEV;
302
303 BUG_ON(!mem_init_done);
304
305 start_addr = map.rm.size;
306 start_pfn = start_addr >> PAGE_SHIFT;
307 nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT;
308
309 DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n",
310 __func__, __LINE__, start_addr, start_pfn, nr_pages);
311
312 result = add_memory(0, start_addr, map.r1.size);
313
314 if (result) {
315 DBG("%s:%d: add_memory failed: (%d)\n",
316 __func__, __LINE__, result);
317 return result;
318 }
319
320 lmb_add(start_addr, map.r1.size);
321 lmb_analyze();
322
323 result = online_pages(start_pfn, nr_pages);
324
325 if (result)
326 DBG("%s:%d: online_pages failed: (%d)\n",
327 __func__, __LINE__, result);
328
329 return result;
330 }
331
332 core_initcall(ps3_mm_add_memory);
333
334 /*============================================================================*/
335 /* dma routines */
336 /*============================================================================*/
337
338 /**
339 * dma_sb_lpar_to_bus - Translate an lpar address to ioc mapped bus address.
340 * @r: pointer to dma region structure
341 * @lpar_addr: HV lpar address
342 */
343
344 static unsigned long dma_sb_lpar_to_bus(struct ps3_dma_region *r,
345 unsigned long lpar_addr)
346 {
347 if (lpar_addr >= map.rm.size)
348 lpar_addr -= map.r1.offset;
349 BUG_ON(lpar_addr < r->offset);
350 BUG_ON(lpar_addr >= r->offset + r->len);
351 return r->bus_addr + lpar_addr - r->offset;
352 }
353
354 #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
355 static void __maybe_unused _dma_dump_region(const struct ps3_dma_region *r,
356 const char *func, int line)
357 {
358 DBG("%s:%d: dev %lu:%lu\n", func, line, r->dev->bus_id,
359 r->dev->dev_id);
360 DBG("%s:%d: page_size %u\n", func, line, r->page_size);
361 DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr);
362 DBG("%s:%d: len %lxh\n", func, line, r->len);
363 DBG("%s:%d: offset %lxh\n", func, line, r->offset);
364 }
365
366 /**
367 * dma_chunk - A chunk of dma pages mapped by the io controller.
368 * @region - The dma region that owns this chunk.
369 * @lpar_addr: Starting lpar address of the area to map.
370 * @bus_addr: Starting ioc bus address of the area to map.
371 * @len: Length in bytes of the area to map.
372 * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the
373 * list of all chuncks owned by the region.
374 *
375 * This implementation uses a very simple dma page manager
376 * based on the dma_chunk structure. This scheme assumes
377 * that all drivers use very well behaved dma ops.
378 */
379
380 struct dma_chunk {
381 struct ps3_dma_region *region;
382 unsigned long lpar_addr;
383 unsigned long bus_addr;
384 unsigned long len;
385 struct list_head link;
386 unsigned int usage_count;
387 };
388
389 #define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__)
390 static void _dma_dump_chunk (const struct dma_chunk* c, const char* func,
391 int line)
392 {
393 DBG("%s:%d: r.dev %lu:%lu\n", func, line,
394 c->region->dev->bus_id, c->region->dev->dev_id);
395 DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr);
396 DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size);
397 DBG("%s:%d: r.len %lxh\n", func, line, c->region->len);
398 DBG("%s:%d: r.offset %lxh\n", func, line, c->region->offset);
399 DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr);
400 DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr);
401 DBG("%s:%d: c.len %lxh\n", func, line, c->len);
402 }
403
404 static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
405 unsigned long bus_addr, unsigned long len)
406 {
407 struct dma_chunk *c;
408 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size);
409 unsigned long aligned_len = _ALIGN_UP(len+bus_addr-aligned_bus,
410 1 << r->page_size);
411
412 list_for_each_entry(c, &r->chunk_list.head, link) {
413 /* intersection */
414 if (aligned_bus >= c->bus_addr &&
415 aligned_bus + aligned_len <= c->bus_addr + c->len)
416 return c;
417
418 /* below */
419 if (aligned_bus + aligned_len <= c->bus_addr)
420 continue;
421
422 /* above */
423 if (aligned_bus >= c->bus_addr + c->len)
424 continue;
425
426 /* we don't handle the multi-chunk case for now */
427 dma_dump_chunk(c);
428 BUG();
429 }
430 return NULL;
431 }
432
433 static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r,
434 unsigned long lpar_addr, unsigned long len)
435 {
436 struct dma_chunk *c;
437 unsigned long aligned_lpar = _ALIGN_DOWN(lpar_addr, 1 << r->page_size);
438 unsigned long aligned_len = _ALIGN_UP(len + lpar_addr - aligned_lpar,
439 1 << r->page_size);
440
441 list_for_each_entry(c, &r->chunk_list.head, link) {
442 /* intersection */
443 if (c->lpar_addr <= aligned_lpar &&
444 aligned_lpar < c->lpar_addr + c->len) {
445 if (aligned_lpar + aligned_len <= c->lpar_addr + c->len)
446 return c;
447 else {
448 dma_dump_chunk(c);
449 BUG();
450 }
451 }
452 /* below */
453 if (aligned_lpar + aligned_len <= c->lpar_addr) {
454 continue;
455 }
456 /* above */
457 if (c->lpar_addr + c->len <= aligned_lpar) {
458 continue;
459 }
460 }
461 return NULL;
462 }
463
464 static int dma_sb_free_chunk(struct dma_chunk *c)
465 {
466 int result = 0;
467
468 if (c->bus_addr) {
469 result = lv1_unmap_device_dma_region(c->region->dev->bus_id,
470 c->region->dev->dev_id, c->bus_addr, c->len);
471 BUG_ON(result);
472 }
473
474 kfree(c);
475 return result;
476 }
477
478 static int dma_ioc0_free_chunk(struct dma_chunk *c)
479 {
480 int result = 0;
481 int iopage;
482 unsigned long offset;
483 struct ps3_dma_region *r = c->region;
484
485 DBG("%s:start\n", __func__);
486 for (iopage = 0; iopage < (c->len >> r->page_size); iopage++) {
487 offset = (1 << r->page_size) * iopage;
488 /* put INVALID entry */
489 result = lv1_put_iopte(0,
490 c->bus_addr + offset,
491 c->lpar_addr + offset,
492 r->ioid,
493 0);
494 DBG("%s: bus=%#lx, lpar=%#lx, ioid=%d\n", __func__,
495 c->bus_addr + offset,
496 c->lpar_addr + offset,
497 r->ioid);
498
499 if (result) {
500 DBG("%s:%d: lv1_put_iopte failed: %s\n", __func__,
501 __LINE__, ps3_result(result));
502 }
503 }
504 kfree(c);
505 DBG("%s:end\n", __func__);
506 return result;
507 }
508
509 /**
510 * dma_sb_map_pages - Maps dma pages into the io controller bus address space.
511 * @r: Pointer to a struct ps3_dma_region.
512 * @phys_addr: Starting physical address of the area to map.
513 * @len: Length in bytes of the area to map.
514 * c_out: A pointer to receive an allocated struct dma_chunk for this area.
515 *
516 * This is the lowest level dma mapping routine, and is the one that will
517 * make the HV call to add the pages into the io controller address space.
518 */
519
520 static int dma_sb_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
521 unsigned long len, struct dma_chunk **c_out, u64 iopte_flag)
522 {
523 int result;
524 struct dma_chunk *c;
525
526 c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
527
528 if (!c) {
529 result = -ENOMEM;
530 goto fail_alloc;
531 }
532
533 c->region = r;
534 c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
535 c->bus_addr = dma_sb_lpar_to_bus(r, c->lpar_addr);
536 c->len = len;
537
538 BUG_ON(iopte_flag != 0xf800000000000000UL);
539 result = lv1_map_device_dma_region(c->region->dev->bus_id,
540 c->region->dev->dev_id, c->lpar_addr,
541 c->bus_addr, c->len, iopte_flag);
542 if (result) {
543 DBG("%s:%d: lv1_map_device_dma_region failed: %s\n",
544 __func__, __LINE__, ps3_result(result));
545 goto fail_map;
546 }
547
548 list_add(&c->link, &r->chunk_list.head);
549
550 *c_out = c;
551 return 0;
552
553 fail_map:
554 kfree(c);
555 fail_alloc:
556 *c_out = NULL;
557 DBG(" <- %s:%d\n", __func__, __LINE__);
558 return result;
559 }
560
561 static int dma_ioc0_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
562 unsigned long len, struct dma_chunk **c_out,
563 u64 iopte_flag)
564 {
565 int result;
566 struct dma_chunk *c, *last;
567 int iopage, pages;
568 unsigned long offset;
569
570 DBG(KERN_ERR "%s: phy=%#lx, lpar%#lx, len=%#lx\n", __func__,
571 phys_addr, ps3_mm_phys_to_lpar(phys_addr), len);
572 c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
573
574 if (!c) {
575 result = -ENOMEM;
576 goto fail_alloc;
577 }
578
579 c->region = r;
580 c->len = len;
581 c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
582 /* allocate IO address */
583 if (list_empty(&r->chunk_list.head)) {
584 /* first one */
585 c->bus_addr = r->bus_addr;
586 } else {
587 /* derive from last bus addr*/
588 last = list_entry(r->chunk_list.head.next,
589 struct dma_chunk, link);
590 c->bus_addr = last->bus_addr + last->len;
591 DBG("%s: last bus=%#lx, len=%#lx\n", __func__,
592 last->bus_addr, last->len);
593 }
594
595 /* FIXME: check whether length exceeds region size */
596
597 /* build ioptes for the area */
598 pages = len >> r->page_size;
599 DBG("%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#lx\n", __func__,
600 r->page_size, r->len, pages, iopte_flag);
601 for (iopage = 0; iopage < pages; iopage++) {
602 offset = (1 << r->page_size) * iopage;
603 result = lv1_put_iopte(0,
604 c->bus_addr + offset,
605 c->lpar_addr + offset,
606 r->ioid,
607 iopte_flag);
608 if (result) {
609 printk(KERN_WARNING "%s:%d: lv1_map_device_dma_region "
610 "failed: %s\n", __func__, __LINE__,
611 ps3_result(result));
612 goto fail_map;
613 }
614 DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__,
615 iopage, c->bus_addr + offset, c->lpar_addr + offset,
616 r->ioid);
617 }
618
619 /* be sure that last allocated one is inserted at head */
620 list_add(&c->link, &r->chunk_list.head);
621
622 *c_out = c;
623 DBG("%s: end\n", __func__);
624 return 0;
625
626 fail_map:
627 for (iopage--; 0 <= iopage; iopage--) {
628 lv1_put_iopte(0,
629 c->bus_addr + offset,
630 c->lpar_addr + offset,
631 r->ioid,
632 0);
633 }
634 kfree(c);
635 fail_alloc:
636 *c_out = NULL;
637 return result;
638 }
639
640 /**
641 * dma_sb_region_create - Create a device dma region.
642 * @r: Pointer to a struct ps3_dma_region.
643 *
644 * This is the lowest level dma region create routine, and is the one that
645 * will make the HV call to create the region.
646 */
647
648 static int dma_sb_region_create(struct ps3_dma_region *r)
649 {
650 int result;
651 u64 bus_addr;
652
653 DBG(" -> %s:%d:\n", __func__, __LINE__);
654
655 BUG_ON(!r);
656
657 if (!r->dev->bus_id) {
658 pr_info("%s:%d: %lu:%lu no dma\n", __func__, __LINE__,
659 r->dev->bus_id, r->dev->dev_id);
660 return 0;
661 }
662
663 DBG("%s:%u: len = 0x%lx, page_size = %u, offset = 0x%lx\n", __func__,
664 __LINE__, r->len, r->page_size, r->offset);
665
666 BUG_ON(!r->len);
667 BUG_ON(!r->page_size);
668 BUG_ON(!r->region_ops);
669
670 INIT_LIST_HEAD(&r->chunk_list.head);
671 spin_lock_init(&r->chunk_list.lock);
672
673 result = lv1_allocate_device_dma_region(r->dev->bus_id, r->dev->dev_id,
674 roundup_pow_of_two(r->len), r->page_size, r->region_type,
675 &bus_addr);
676 r->bus_addr = bus_addr;
677
678 if (result) {
679 DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
680 __func__, __LINE__, ps3_result(result));
681 r->len = r->bus_addr = 0;
682 }
683
684 return result;
685 }
686
687 static int dma_ioc0_region_create(struct ps3_dma_region *r)
688 {
689 int result;
690 u64 bus_addr;
691
692 INIT_LIST_HEAD(&r->chunk_list.head);
693 spin_lock_init(&r->chunk_list.lock);
694
695 result = lv1_allocate_io_segment(0,
696 r->len,
697 r->page_size,
698 &bus_addr);
699 r->bus_addr = bus_addr;
700 if (result) {
701 DBG("%s:%d: lv1_allocate_io_segment failed: %s\n",
702 __func__, __LINE__, ps3_result(result));
703 r->len = r->bus_addr = 0;
704 }
705 DBG("%s: len=%#lx, pg=%d, bus=%#lx\n", __func__,
706 r->len, r->page_size, r->bus_addr);
707 return result;
708 }
709
710 /**
711 * dma_region_free - Free a device dma region.
712 * @r: Pointer to a struct ps3_dma_region.
713 *
714 * This is the lowest level dma region free routine, and is the one that
715 * will make the HV call to free the region.
716 */
717
718 static int dma_sb_region_free(struct ps3_dma_region *r)
719 {
720 int result;
721 struct dma_chunk *c;
722 struct dma_chunk *tmp;
723
724 BUG_ON(!r);
725
726 if (!r->dev->bus_id) {
727 pr_info("%s:%d: %lu:%lu no dma\n", __func__, __LINE__,
728 r->dev->bus_id, r->dev->dev_id);
729 return 0;
730 }
731
732 list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) {
733 list_del(&c->link);
734 dma_sb_free_chunk(c);
735 }
736
737 result = lv1_free_device_dma_region(r->dev->bus_id, r->dev->dev_id,
738 r->bus_addr);
739
740 if (result)
741 DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
742 __func__, __LINE__, ps3_result(result));
743
744 r->bus_addr = 0;
745
746 return result;
747 }
748
749 static int dma_ioc0_region_free(struct ps3_dma_region *r)
750 {
751 int result;
752 struct dma_chunk *c, *n;
753
754 DBG("%s: start\n", __func__);
755 list_for_each_entry_safe(c, n, &r->chunk_list.head, link) {
756 list_del(&c->link);
757 dma_ioc0_free_chunk(c);
758 }
759
760 result = lv1_release_io_segment(0, r->bus_addr);
761
762 if (result)
763 DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
764 __func__, __LINE__, ps3_result(result));
765
766 r->bus_addr = 0;
767 DBG("%s: end\n", __func__);
768
769 return result;
770 }
771
772 /**
773 * dma_sb_map_area - Map an area of memory into a device dma region.
774 * @r: Pointer to a struct ps3_dma_region.
775 * @virt_addr: Starting virtual address of the area to map.
776 * @len: Length in bytes of the area to map.
777 * @bus_addr: A pointer to return the starting ioc bus address of the area to
778 * map.
779 *
780 * This is the common dma mapping routine.
781 */
782
783 static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
784 unsigned long len, dma_addr_t *bus_addr,
785 u64 iopte_flag)
786 {
787 int result;
788 unsigned long flags;
789 struct dma_chunk *c;
790 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
791 : virt_addr;
792 unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
793 unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
794 1 << r->page_size);
795 *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
796
797 if (!USE_DYNAMIC_DMA) {
798 unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
799 DBG(" -> %s:%d\n", __func__, __LINE__);
800 DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__,
801 virt_addr);
802 DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__,
803 phys_addr);
804 DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__,
805 lpar_addr);
806 DBG("%s:%d len %lxh\n", __func__, __LINE__, len);
807 DBG("%s:%d bus_addr %llxh (%lxh)\n", __func__, __LINE__,
808 *bus_addr, len);
809 }
810
811 spin_lock_irqsave(&r->chunk_list.lock, flags);
812 c = dma_find_chunk(r, *bus_addr, len);
813
814 if (c) {
815 DBG("%s:%d: reusing mapped chunk", __func__, __LINE__);
816 dma_dump_chunk(c);
817 c->usage_count++;
818 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
819 return 0;
820 }
821
822 result = dma_sb_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag);
823
824 if (result) {
825 *bus_addr = 0;
826 DBG("%s:%d: dma_sb_map_pages failed (%d)\n",
827 __func__, __LINE__, result);
828 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
829 return result;
830 }
831
832 c->usage_count = 1;
833
834 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
835 return result;
836 }
837
838 static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
839 unsigned long len, dma_addr_t *bus_addr,
840 u64 iopte_flag)
841 {
842 int result;
843 unsigned long flags;
844 struct dma_chunk *c;
845 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
846 : virt_addr;
847 unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
848 unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
849 1 << r->page_size);
850
851 DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__,
852 virt_addr, len);
853 DBG(KERN_ERR "%s: ph=%#lx a_ph=%#lx a_l=%#lx\n", __func__,
854 phys_addr, aligned_phys, aligned_len);
855
856 spin_lock_irqsave(&r->chunk_list.lock, flags);
857 c = dma_find_chunk_lpar(r, ps3_mm_phys_to_lpar(phys_addr), len);
858
859 if (c) {
860 /* FIXME */
861 BUG();
862 *bus_addr = c->bus_addr + phys_addr - aligned_phys;
863 c->usage_count++;
864 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
865 return 0;
866 }
867
868 result = dma_ioc0_map_pages(r, aligned_phys, aligned_len, &c,
869 iopte_flag);
870
871 if (result) {
872 *bus_addr = 0;
873 DBG("%s:%d: dma_ioc0_map_pages failed (%d)\n",
874 __func__, __LINE__, result);
875 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
876 return result;
877 }
878 *bus_addr = c->bus_addr + phys_addr - aligned_phys;
879 DBG("%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#llx\n", __func__,
880 virt_addr, phys_addr, aligned_phys, *bus_addr);
881 c->usage_count = 1;
882
883 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
884 return result;
885 }
886
887 /**
888 * dma_sb_unmap_area - Unmap an area of memory from a device dma region.
889 * @r: Pointer to a struct ps3_dma_region.
890 * @bus_addr: The starting ioc bus address of the area to unmap.
891 * @len: Length in bytes of the area to unmap.
892 *
893 * This is the common dma unmap routine.
894 */
895
896 static int dma_sb_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr,
897 unsigned long len)
898 {
899 unsigned long flags;
900 struct dma_chunk *c;
901
902 spin_lock_irqsave(&r->chunk_list.lock, flags);
903 c = dma_find_chunk(r, bus_addr, len);
904
905 if (!c) {
906 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
907 1 << r->page_size);
908 unsigned long aligned_len = _ALIGN_UP(len + bus_addr
909 - aligned_bus, 1 << r->page_size);
910 DBG("%s:%d: not found: bus_addr %llxh\n",
911 __func__, __LINE__, bus_addr);
912 DBG("%s:%d: not found: len %lxh\n",
913 __func__, __LINE__, len);
914 DBG("%s:%d: not found: aligned_bus %lxh\n",
915 __func__, __LINE__, aligned_bus);
916 DBG("%s:%d: not found: aligned_len %lxh\n",
917 __func__, __LINE__, aligned_len);
918 BUG();
919 }
920
921 c->usage_count--;
922
923 if (!c->usage_count) {
924 list_del(&c->link);
925 dma_sb_free_chunk(c);
926 }
927
928 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
929 return 0;
930 }
931
932 static int dma_ioc0_unmap_area(struct ps3_dma_region *r,
933 dma_addr_t bus_addr, unsigned long len)
934 {
935 unsigned long flags;
936 struct dma_chunk *c;
937
938 DBG("%s: start a=%#llx l=%#lx\n", __func__, bus_addr, len);
939 spin_lock_irqsave(&r->chunk_list.lock, flags);
940 c = dma_find_chunk(r, bus_addr, len);
941
942 if (!c) {
943 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
944 1 << r->page_size);
945 unsigned long aligned_len = _ALIGN_UP(len + bus_addr
946 - aligned_bus,
947 1 << r->page_size);
948 DBG("%s:%d: not found: bus_addr %llxh\n",
949 __func__, __LINE__, bus_addr);
950 DBG("%s:%d: not found: len %lxh\n",
951 __func__, __LINE__, len);
952 DBG("%s:%d: not found: aligned_bus %lxh\n",
953 __func__, __LINE__, aligned_bus);
954 DBG("%s:%d: not found: aligned_len %lxh\n",
955 __func__, __LINE__, aligned_len);
956 BUG();
957 }
958
959 c->usage_count--;
960
961 if (!c->usage_count) {
962 list_del(&c->link);
963 dma_ioc0_free_chunk(c);
964 }
965
966 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
967 DBG("%s: end\n", __func__);
968 return 0;
969 }
970
971 /**
972 * dma_sb_region_create_linear - Setup a linear dma mapping for a device.
973 * @r: Pointer to a struct ps3_dma_region.
974 *
975 * This routine creates an HV dma region for the device and maps all available
976 * ram into the io controller bus address space.
977 */
978
979 static int dma_sb_region_create_linear(struct ps3_dma_region *r)
980 {
981 int result;
982 unsigned long virt_addr, len;
983 dma_addr_t tmp;
984
985 if (r->len > 16*1024*1024) { /* FIXME: need proper fix */
986 /* force 16M dma pages for linear mapping */
987 if (r->page_size != PS3_DMA_16M) {
988 pr_info("%s:%d: forcing 16M pages for linear map\n",
989 __func__, __LINE__);
990 r->page_size = PS3_DMA_16M;
991 r->len = _ALIGN_UP(r->len, 1 << r->page_size);
992 }
993 }
994
995 result = dma_sb_region_create(r);
996 BUG_ON(result);
997
998 if (r->offset < map.rm.size) {
999 /* Map (part of) 1st RAM chunk */
1000 virt_addr = map.rm.base + r->offset;
1001 len = map.rm.size - r->offset;
1002 if (len > r->len)
1003 len = r->len;
1004 result = dma_sb_map_area(r, virt_addr, len, &tmp,
1005 IOPTE_PP_W | IOPTE_PP_R | IOPTE_SO_RW | IOPTE_M);
1006 BUG_ON(result);
1007 }
1008
1009 if (r->offset + r->len > map.rm.size) {
1010 /* Map (part of) 2nd RAM chunk */
1011 virt_addr = map.rm.size;
1012 len = r->len;
1013 if (r->offset >= map.rm.size)
1014 virt_addr += r->offset - map.rm.size;
1015 else
1016 len -= map.rm.size - r->offset;
1017 result = dma_sb_map_area(r, virt_addr, len, &tmp,
1018 IOPTE_PP_W | IOPTE_PP_R | IOPTE_SO_RW | IOPTE_M);
1019 BUG_ON(result);
1020 }
1021
1022 return result;
1023 }
1024
1025 /**
1026 * dma_sb_region_free_linear - Free a linear dma mapping for a device.
1027 * @r: Pointer to a struct ps3_dma_region.
1028 *
1029 * This routine will unmap all mapped areas and free the HV dma region.
1030 */
1031
1032 static int dma_sb_region_free_linear(struct ps3_dma_region *r)
1033 {
1034 int result;
1035 dma_addr_t bus_addr;
1036 unsigned long len, lpar_addr;
1037
1038 if (r->offset < map.rm.size) {
1039 /* Unmap (part of) 1st RAM chunk */
1040 lpar_addr = map.rm.base + r->offset;
1041 len = map.rm.size - r->offset;
1042 if (len > r->len)
1043 len = r->len;
1044 bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1045 result = dma_sb_unmap_area(r, bus_addr, len);
1046 BUG_ON(result);
1047 }
1048
1049 if (r->offset + r->len > map.rm.size) {
1050 /* Unmap (part of) 2nd RAM chunk */
1051 lpar_addr = map.r1.base;
1052 len = r->len;
1053 if (r->offset >= map.rm.size)
1054 lpar_addr += r->offset - map.rm.size;
1055 else
1056 len -= map.rm.size - r->offset;
1057 bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1058 result = dma_sb_unmap_area(r, bus_addr, len);
1059 BUG_ON(result);
1060 }
1061
1062 result = dma_sb_region_free(r);
1063 BUG_ON(result);
1064
1065 return result;
1066 }
1067
1068 /**
1069 * dma_sb_map_area_linear - Map an area of memory into a device dma region.
1070 * @r: Pointer to a struct ps3_dma_region.
1071 * @virt_addr: Starting virtual address of the area to map.
1072 * @len: Length in bytes of the area to map.
1073 * @bus_addr: A pointer to return the starting ioc bus address of the area to
1074 * map.
1075 *
1076 * This routine just returns the corresponding bus address. Actual mapping
1077 * occurs in dma_region_create_linear().
1078 */
1079
1080 static int dma_sb_map_area_linear(struct ps3_dma_region *r,
1081 unsigned long virt_addr, unsigned long len, dma_addr_t *bus_addr,
1082 u64 iopte_flag)
1083 {
1084 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
1085 : virt_addr;
1086 *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
1087 return 0;
1088 }
1089
1090 /**
1091 * dma_unmap_area_linear - Unmap an area of memory from a device dma region.
1092 * @r: Pointer to a struct ps3_dma_region.
1093 * @bus_addr: The starting ioc bus address of the area to unmap.
1094 * @len: Length in bytes of the area to unmap.
1095 *
1096 * This routine does nothing. Unmapping occurs in dma_sb_region_free_linear().
1097 */
1098
1099 static int dma_sb_unmap_area_linear(struct ps3_dma_region *r,
1100 dma_addr_t bus_addr, unsigned long len)
1101 {
1102 return 0;
1103 };
1104
1105 static const struct ps3_dma_region_ops ps3_dma_sb_region_ops = {
1106 .create = dma_sb_region_create,
1107 .free = dma_sb_region_free,
1108 .map = dma_sb_map_area,
1109 .unmap = dma_sb_unmap_area
1110 };
1111
1112 static const struct ps3_dma_region_ops ps3_dma_sb_region_linear_ops = {
1113 .create = dma_sb_region_create_linear,
1114 .free = dma_sb_region_free_linear,
1115 .map = dma_sb_map_area_linear,
1116 .unmap = dma_sb_unmap_area_linear
1117 };
1118
1119 static const struct ps3_dma_region_ops ps3_dma_ioc0_region_ops = {
1120 .create = dma_ioc0_region_create,
1121 .free = dma_ioc0_region_free,
1122 .map = dma_ioc0_map_area,
1123 .unmap = dma_ioc0_unmap_area
1124 };
1125
1126 int ps3_dma_region_init(struct ps3_system_bus_device *dev,
1127 struct ps3_dma_region *r, enum ps3_dma_page_size page_size,
1128 enum ps3_dma_region_type region_type, void *addr, unsigned long len)
1129 {
1130 unsigned long lpar_addr;
1131
1132 lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0;
1133
1134 r->dev = dev;
1135 r->page_size = page_size;
1136 r->region_type = region_type;
1137 r->offset = lpar_addr;
1138 if (r->offset >= map.rm.size)
1139 r->offset -= map.r1.offset;
1140 r->len = len ? len : _ALIGN_UP(map.total, 1 << r->page_size);
1141
1142 switch (dev->dev_type) {
1143 case PS3_DEVICE_TYPE_SB:
1144 r->region_ops = (USE_DYNAMIC_DMA)
1145 ? &ps3_dma_sb_region_ops
1146 : &ps3_dma_sb_region_linear_ops;
1147 break;
1148 case PS3_DEVICE_TYPE_IOC0:
1149 r->region_ops = &ps3_dma_ioc0_region_ops;
1150 break;
1151 default:
1152 BUG();
1153 return -EINVAL;
1154 }
1155 return 0;
1156 }
1157 EXPORT_SYMBOL(ps3_dma_region_init);
1158
1159 int ps3_dma_region_create(struct ps3_dma_region *r)
1160 {
1161 BUG_ON(!r);
1162 BUG_ON(!r->region_ops);
1163 BUG_ON(!r->region_ops->create);
1164 return r->region_ops->create(r);
1165 }
1166 EXPORT_SYMBOL(ps3_dma_region_create);
1167
1168 int ps3_dma_region_free(struct ps3_dma_region *r)
1169 {
1170 BUG_ON(!r);
1171 BUG_ON(!r->region_ops);
1172 BUG_ON(!r->region_ops->free);
1173 return r->region_ops->free(r);
1174 }
1175 EXPORT_SYMBOL(ps3_dma_region_free);
1176
1177 int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
1178 unsigned long len, dma_addr_t *bus_addr,
1179 u64 iopte_flag)
1180 {
1181 return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag);
1182 }
1183
1184 int ps3_dma_unmap(struct ps3_dma_region *r, dma_addr_t bus_addr,
1185 unsigned long len)
1186 {
1187 return r->region_ops->unmap(r, bus_addr, len);
1188 }
1189
1190 /*============================================================================*/
1191 /* system startup routines */
1192 /*============================================================================*/
1193
1194 /**
1195 * ps3_mm_init - initialize the address space state variables
1196 */
1197
1198 void __init ps3_mm_init(void)
1199 {
1200 int result;
1201
1202 DBG(" -> %s:%d\n", __func__, __LINE__);
1203
1204 result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size,
1205 &map.total);
1206
1207 if (result)
1208 panic("ps3_repository_read_mm_info() failed");
1209
1210 map.rm.offset = map.rm.base;
1211 map.vas_id = map.htab_size = 0;
1212
1213 /* this implementation assumes map.rm.base is zero */
1214
1215 BUG_ON(map.rm.base);
1216 BUG_ON(!map.rm.size);
1217
1218
1219 /* arrange to do this in ps3_mm_add_memory */
1220 ps3_mm_region_create(&map.r1, map.total - map.rm.size);
1221
1222 /* correct map.total for the real total amount of memory we use */
1223 map.total = map.rm.size + map.r1.size;
1224
1225 DBG(" <- %s:%d\n", __func__, __LINE__);
1226 }
1227
1228 /**
1229 * ps3_mm_shutdown - final cleanup of address space
1230 */
1231
1232 void ps3_mm_shutdown(void)
1233 {
1234 ps3_mm_region_destroy(&map.r1);
1235 }
This page took 0.059341 seconds and 5 git commands to generate.