powerpc: Add support for swiotlb on 32-bit
[deliverable/linux.git] / arch / powerpc / kernel / dma-swiotlb.c
CommitLineData
ec3cf2ec
BB
1/*
2 * Contains routines needed to support swiotlb for ppc.
3 *
4 * Copyright (C) 2009 Becky Bruce, Freescale Semiconductor
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 */
12
13#include <linux/dma-mapping.h>
14#include <linux/pfn.h>
15#include <linux/of_platform.h>
16#include <linux/platform_device.h>
17#include <linux/pci.h>
18
19#include <asm/machdep.h>
20#include <asm/swiotlb.h>
21#include <asm/dma.h>
22#include <asm/abs_addr.h>
23
24int swiotlb __read_mostly;
25unsigned int ppc_swiotlb_enable;
26
27void *swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t addr)
28{
29 unsigned long pfn = PFN_DOWN(swiotlb_bus_to_phys(hwdev, addr));
30 void *pageaddr = page_address(pfn_to_page(pfn));
31
32 if (pageaddr != NULL)
33 return pageaddr + (addr % PAGE_SIZE);
34 return NULL;
35}
36
37dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
38{
39 return paddr + get_dma_direct_offset(hwdev);
40}
41
42phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
43
44{
45 return baddr - get_dma_direct_offset(hwdev);
46}
47
48/*
49 * Determine if an address needs bounce buffering via swiotlb.
50 * Going forward I expect the swiotlb code to generalize on using
51 * a dma_ops->addr_needs_map, and this function will move from here to the
52 * generic swiotlb code.
53 */
54int
55swiotlb_arch_address_needs_mapping(struct device *hwdev, dma_addr_t addr,
56 size_t size)
57{
58 struct dma_mapping_ops *dma_ops = get_dma_ops(hwdev);
59
60 BUG_ON(!dma_ops);
61 return dma_ops->addr_needs_map(hwdev, addr, size);
62}
63
64/*
65 * Determine if an address is reachable by a pci device, or if we must bounce.
66 */
67static int
68swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
69{
70 u64 mask = dma_get_mask(hwdev);
71 dma_addr_t max;
72 struct pci_controller *hose;
73 struct pci_dev *pdev = to_pci_dev(hwdev);
74
75 hose = pci_bus_to_host(pdev->bus);
76 max = hose->dma_window_base_cur + hose->dma_window_size;
77
78 /* check that we're within mapped pci window space */
79 if ((addr + size > max) | (addr < hose->dma_window_base_cur))
80 return 1;
81
82 return !is_buffer_dma_capable(mask, addr, size);
83}
84
85static int
86swiotlb_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
87{
88 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
89}
90
91
92/*
93 * At the moment, all platforms that use this code only require
94 * swiotlb to be used if we're operating on HIGHMEM. Since
95 * we don't ever call anything other than map_sg, unmap_sg,
96 * map_page, and unmap_page on highmem, use normal dma_ops
97 * for everything else.
98 */
99struct dma_mapping_ops swiotlb_dma_ops = {
100 .alloc_coherent = dma_direct_alloc_coherent,
101 .free_coherent = dma_direct_free_coherent,
102 .map_sg = swiotlb_map_sg_attrs,
103 .unmap_sg = swiotlb_unmap_sg_attrs,
104 .dma_supported = swiotlb_dma_supported,
105 .map_page = swiotlb_map_page,
106 .unmap_page = swiotlb_unmap_page,
107 .addr_needs_map = swiotlb_addr_needs_map,
108 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
109 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
110 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
111 .sync_sg_for_device = swiotlb_sync_sg_for_device
112};
113
114struct dma_mapping_ops swiotlb_pci_dma_ops = {
115 .alloc_coherent = dma_direct_alloc_coherent,
116 .free_coherent = dma_direct_free_coherent,
117 .map_sg = swiotlb_map_sg_attrs,
118 .unmap_sg = swiotlb_unmap_sg_attrs,
119 .dma_supported = swiotlb_dma_supported,
120 .map_page = swiotlb_map_page,
121 .unmap_page = swiotlb_unmap_page,
122 .addr_needs_map = swiotlb_pci_addr_needs_map,
123 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
124 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
125 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
126 .sync_sg_for_device = swiotlb_sync_sg_for_device
127};
128
129static int ppc_swiotlb_bus_notify(struct notifier_block *nb,
130 unsigned long action, void *data)
131{
132 struct device *dev = data;
133
134 /* We are only intereted in device addition */
135 if (action != BUS_NOTIFY_ADD_DEVICE)
136 return 0;
137
138 /* May need to bounce if the device can't address all of DRAM */
139 if (dma_get_mask(dev) < lmb_end_of_DRAM())
140 set_dma_ops(dev, &swiotlb_dma_ops);
141
142 return NOTIFY_DONE;
143}
144
145static struct notifier_block ppc_swiotlb_plat_bus_notifier = {
146 .notifier_call = ppc_swiotlb_bus_notify,
147 .priority = 0,
148};
149
150static struct notifier_block ppc_swiotlb_of_bus_notifier = {
151 .notifier_call = ppc_swiotlb_bus_notify,
152 .priority = 0,
153};
154
155int __init swiotlb_setup_bus_notifier(void)
156{
157 bus_register_notifier(&platform_bus_type,
158 &ppc_swiotlb_plat_bus_notifier);
159 bus_register_notifier(&of_platform_bus_type,
160 &ppc_swiotlb_of_bus_notifier);
161
162 return 0;
163}
This page took 0.048582 seconds and 5 git commands to generate.