2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
8 * Routines for PCI DMA mapping. See Documentation/DMA-API.txt for
9 * a description of how these routines should be used.
12 #include <linux/module.h>
13 #include <linux/dma-attrs.h>
14 #include <linux/dma-mapping.h>
16 #include <asm/sn/intr.h>
17 #include <asm/sn/pcibus_provider_defs.h>
18 #include <asm/sn/pcidev.h>
19 #include <asm/sn/sn_sal.h>
21 #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
22 #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
25 * sn_dma_supported - test a DMA mask
26 * @dev: device to test
27 * @mask: DMA mask to test
29 * Return whether the given PCI device DMA address mask can be supported
30 * properly. For example, if your device can only drive the low 24-bits
31 * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
32 * this function. Of course, SN only supports devices that have 32 or more
33 * address bits when using the PMU.
35 int sn_dma_supported(struct device
*dev
, u64 mask
)
37 BUG_ON(dev
->bus
!= &pci_bus_type
);
39 if (mask
< 0x7fffffff)
43 EXPORT_SYMBOL(sn_dma_supported
);
46 * sn_dma_set_mask - set the DMA mask
50 * Set @dev's DMA mask if the hw supports it.
52 int sn_dma_set_mask(struct device
*dev
, u64 dma_mask
)
54 BUG_ON(dev
->bus
!= &pci_bus_type
);
56 if (!sn_dma_supported(dev
, dma_mask
))
59 *dev
->dma_mask
= dma_mask
;
62 EXPORT_SYMBOL(sn_dma_set_mask
);
65 * sn_dma_alloc_coherent - allocate memory for coherent DMA
66 * @dev: device to allocate for
67 * @size: size of the region
68 * @dma_handle: DMA (bus) address
69 * @flags: memory allocation flags
71 * dma_alloc_coherent() returns a pointer to a memory region suitable for
72 * coherent DMA traffic to/from a PCI device. On SN platforms, this means
73 * that @dma_handle will have the %PCIIO_DMA_CMD flag set.
75 * This interface is usually used for "command" streams (e.g. the command
76 * queue for a SCSI controller). See Documentation/DMA-API.txt for
79 void *sn_dma_alloc_coherent(struct device
*dev
, size_t size
,
80 dma_addr_t
* dma_handle
, gfp_t flags
)
83 unsigned long phys_addr
;
85 struct pci_dev
*pdev
= to_pci_dev(dev
);
86 struct sn_pcibus_provider
*provider
= SN_PCIDEV_BUSPROVIDER(pdev
);
88 BUG_ON(dev
->bus
!= &pci_bus_type
);
91 * Allocate the memory.
93 node
= pcibus_to_node(pdev
->bus
);
94 if (likely(node
>=0)) {
95 struct page
*p
= alloc_pages_node(node
, flags
, get_order(size
));
98 cpuaddr
= page_address(p
);
102 cpuaddr
= (void *)__get_free_pages(flags
, get_order(size
));
104 if (unlikely(!cpuaddr
))
107 memset(cpuaddr
, 0x0, size
);
109 /* physical addr. of the memory we just got */
110 phys_addr
= __pa(cpuaddr
);
113 * 64 bit address translations should never fail.
114 * 32 bit translations can fail if there are insufficient mapping
118 *dma_handle
= provider
->dma_map_consistent(pdev
, phys_addr
, size
,
121 printk(KERN_ERR
"%s: out of ATEs\n", __func__
);
122 free_pages((unsigned long)cpuaddr
, get_order(size
));
128 EXPORT_SYMBOL(sn_dma_alloc_coherent
);
131 * sn_pci_free_coherent - free memory associated with coherent DMAable region
132 * @dev: device to free for
133 * @size: size to free
134 * @cpu_addr: kernel virtual address to free
135 * @dma_handle: DMA address associated with this region
137 * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
138 * any associated IOMMU mappings.
140 void sn_dma_free_coherent(struct device
*dev
, size_t size
, void *cpu_addr
,
141 dma_addr_t dma_handle
)
143 struct pci_dev
*pdev
= to_pci_dev(dev
);
144 struct sn_pcibus_provider
*provider
= SN_PCIDEV_BUSPROVIDER(pdev
);
146 BUG_ON(dev
->bus
!= &pci_bus_type
);
148 provider
->dma_unmap(pdev
, dma_handle
, 0);
149 free_pages((unsigned long)cpu_addr
, get_order(size
));
151 EXPORT_SYMBOL(sn_dma_free_coherent
);
154 * sn_dma_map_single_attrs - map a single page for DMA
155 * @dev: device to map for
156 * @cpu_addr: kernel virtual address of the region to map
157 * @size: size of the region
158 * @direction: DMA direction
159 * @attrs: optional dma attributes
161 * Map the region pointed to by @cpu_addr for DMA and return the
164 * We map this to the one step pcibr_dmamap_trans interface rather than
165 * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have
166 * no way of saving the dmamap handle from the alloc to later free
167 * (which is pretty much unacceptable).
169 * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
170 * dma_map_consistent() so that writes force a flush of pending DMA.
171 * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
172 * Document Number: 007-4763-001)
174 * TODO: simplify our interface;
175 * figure out how to save dmamap handle so can use two step.
177 dma_addr_t
sn_dma_map_single_attrs(struct device
*dev
, void *cpu_addr
,
178 size_t size
, int direction
,
179 struct dma_attrs
*attrs
)
182 unsigned long phys_addr
;
183 struct pci_dev
*pdev
= to_pci_dev(dev
);
184 struct sn_pcibus_provider
*provider
= SN_PCIDEV_BUSPROVIDER(pdev
);
187 dmabarr
= dma_get_attr(DMA_ATTR_WRITE_BARRIER
, attrs
);
189 BUG_ON(dev
->bus
!= &pci_bus_type
);
191 phys_addr
= __pa(cpu_addr
);
193 dma_addr
= provider
->dma_map_consistent(pdev
, phys_addr
,
194 size
, SN_DMA_ADDR_PHYS
);
196 dma_addr
= provider
->dma_map(pdev
, phys_addr
, size
,
200 printk(KERN_ERR
"%s: out of ATEs\n", __func__
);
205 EXPORT_SYMBOL(sn_dma_map_single_attrs
);
208 * sn_dma_unmap_single_attrs - unamp a DMA mapped page
209 * @dev: device to sync
210 * @dma_addr: DMA address to sync
211 * @size: size of region
212 * @direction: DMA direction
213 * @attrs: optional dma attributes
215 * This routine is supposed to sync the DMA region specified
216 * by @dma_handle into the coherence domain. On SN, we're always cache
217 * coherent, so we just need to free any ATEs associated with this mapping.
219 void sn_dma_unmap_single_attrs(struct device
*dev
, dma_addr_t dma_addr
,
220 size_t size
, int direction
,
221 struct dma_attrs
*attrs
)
223 struct pci_dev
*pdev
= to_pci_dev(dev
);
224 struct sn_pcibus_provider
*provider
= SN_PCIDEV_BUSPROVIDER(pdev
);
226 BUG_ON(dev
->bus
!= &pci_bus_type
);
228 provider
->dma_unmap(pdev
, dma_addr
, direction
);
230 EXPORT_SYMBOL(sn_dma_unmap_single_attrs
);
233 * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist
234 * @dev: device to unmap
235 * @sg: scatterlist to unmap
236 * @nhwentries: number of scatterlist entries
237 * @direction: DMA direction
238 * @attrs: optional dma attributes
240 * Unmap a set of streaming mode DMA translations.
242 void sn_dma_unmap_sg_attrs(struct device
*dev
, struct scatterlist
*sgl
,
243 int nhwentries
, int direction
,
244 struct dma_attrs
*attrs
)
247 struct pci_dev
*pdev
= to_pci_dev(dev
);
248 struct sn_pcibus_provider
*provider
= SN_PCIDEV_BUSPROVIDER(pdev
);
249 struct scatterlist
*sg
;
251 BUG_ON(dev
->bus
!= &pci_bus_type
);
253 for_each_sg(sgl
, sg
, nhwentries
, i
) {
254 provider
->dma_unmap(pdev
, sg
->dma_address
, direction
);
255 sg
->dma_address
= (dma_addr_t
) NULL
;
259 EXPORT_SYMBOL(sn_dma_unmap_sg_attrs
);
262 * sn_dma_map_sg_attrs - map a scatterlist for DMA
263 * @dev: device to map for
264 * @sg: scatterlist to map
265 * @nhwentries: number of entries
266 * @direction: direction of the DMA transaction
267 * @attrs: optional dma attributes
269 * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
270 * dma_map_consistent() so that writes force a flush of pending DMA.
271 * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
272 * Document Number: 007-4763-001)
274 * Maps each entry of @sg for DMA.
276 int sn_dma_map_sg_attrs(struct device
*dev
, struct scatterlist
*sgl
,
277 int nhwentries
, int direction
, struct dma_attrs
*attrs
)
279 unsigned long phys_addr
;
280 struct scatterlist
*saved_sg
= sgl
, *sg
;
281 struct pci_dev
*pdev
= to_pci_dev(dev
);
282 struct sn_pcibus_provider
*provider
= SN_PCIDEV_BUSPROVIDER(pdev
);
286 dmabarr
= dma_get_attr(DMA_ATTR_WRITE_BARRIER
, attrs
);
288 BUG_ON(dev
->bus
!= &pci_bus_type
);
291 * Setup a DMA address for each entry in the scatterlist.
293 for_each_sg(sgl
, sg
, nhwentries
, i
) {
295 phys_addr
= SG_ENT_PHYS_ADDRESS(sg
);
297 dma_addr
= provider
->dma_map_consistent(pdev
,
302 dma_addr
= provider
->dma_map(pdev
, phys_addr
,
306 sg
->dma_address
= dma_addr
;
307 if (!sg
->dma_address
) {
308 printk(KERN_ERR
"%s: out of ATEs\n", __func__
);
311 * Free any successfully allocated entries.
314 sn_dma_unmap_sg_attrs(dev
, saved_sg
, i
,
319 sg
->dma_length
= sg
->length
;
324 EXPORT_SYMBOL(sn_dma_map_sg_attrs
);
326 void sn_dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
327 size_t size
, int direction
)
329 BUG_ON(dev
->bus
!= &pci_bus_type
);
331 EXPORT_SYMBOL(sn_dma_sync_single_for_cpu
);
333 void sn_dma_sync_single_for_device(struct device
*dev
, dma_addr_t dma_handle
,
334 size_t size
, int direction
)
336 BUG_ON(dev
->bus
!= &pci_bus_type
);
338 EXPORT_SYMBOL(sn_dma_sync_single_for_device
);
340 void sn_dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
341 int nelems
, int direction
)
343 BUG_ON(dev
->bus
!= &pci_bus_type
);
345 EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu
);
347 void sn_dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
348 int nelems
, int direction
)
350 BUG_ON(dev
->bus
!= &pci_bus_type
);
352 EXPORT_SYMBOL(sn_dma_sync_sg_for_device
);
354 int sn_dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
358 EXPORT_SYMBOL(sn_dma_mapping_error
);
360 char *sn_pci_get_legacy_mem(struct pci_bus
*bus
)
362 if (!SN_PCIBUS_BUSSOFT(bus
))
363 return ERR_PTR(-ENODEV
);
365 return (char *)(SN_PCIBUS_BUSSOFT(bus
)->bs_legacy_mem
| __IA64_UNCACHED_OFFSET
);
368 int sn_pci_legacy_read(struct pci_bus
*bus
, u16 port
, u32
*val
, u8 size
)
372 struct ia64_sal_retval isrv
;
375 * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
376 * around hw issues at the pci bus level. SGI proms older than
377 * 4.10 don't implement this.
380 SAL_CALL(isrv
, SN_SAL_IOIF_PCI_SAFE
,
381 pci_domain_nr(bus
), bus
->number
,
384 port
, size
, __pa(val
));
386 if (isrv
.status
== 0)
390 * If the above failed, retry using the SAL_PROBE call which should
391 * be present in all proms (but which cannot work round PCI chipset
392 * bugs). This code is retained for compatibility with old
393 * pre-4.10 proms, and should be removed at some point in the future.
396 if (!SN_PCIBUS_BUSSOFT(bus
))
399 addr
= SN_PCIBUS_BUSSOFT(bus
)->bs_legacy_io
| __IA64_UNCACHED_OFFSET
;
402 ret
= ia64_sn_probe_mem(addr
, (long)size
, (void *)val
);
413 int sn_pci_legacy_write(struct pci_bus
*bus
, u16 port
, u32 val
, u8 size
)
418 struct ia64_sal_retval isrv
;
421 * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
422 * around hw issues at the pci bus level. SGI proms older than
423 * 4.10 don't implement this.
426 SAL_CALL(isrv
, SN_SAL_IOIF_PCI_SAFE
,
427 pci_domain_nr(bus
), bus
->number
,
430 port
, size
, __pa(&val
));
432 if (isrv
.status
== 0)
436 * If the above failed, retry using the SAL_PROBE call which should
437 * be present in all proms (but which cannot work round PCI chipset
438 * bugs). This code is retained for compatibility with old
439 * pre-4.10 proms, and should be removed at some point in the future.
442 if (!SN_PCIBUS_BUSSOFT(bus
)) {
447 /* Put the phys addr in uncached space */
448 paddr
= SN_PCIBUS_BUSSOFT(bus
)->bs_legacy_io
| __IA64_UNCACHED_OFFSET
;
450 addr
= (unsigned long *)paddr
;
454 *(volatile u8
*)(addr
) = (u8
)(val
);
457 *(volatile u16
*)(addr
) = (u16
)(val
);
460 *(volatile u32
*)(addr
) = (u32
)(val
);
470 struct dma_mapping_ops sn_dma_ops
= {
471 .alloc_coherent
= sn_dma_alloc_coherent
,
472 .free_coherent
= sn_dma_free_coherent
,
473 .map_single_attrs
= sn_dma_map_single_attrs
,
474 .unmap_single_attrs
= sn_dma_unmap_single_attrs
,
475 .map_sg_attrs
= sn_dma_map_sg_attrs
,
476 .unmap_sg_attrs
= sn_dma_unmap_sg_attrs
,
477 .sync_single_for_cpu
= sn_dma_sync_single_for_cpu
,
478 .sync_sg_for_cpu
= sn_dma_sync_sg_for_cpu
,
479 .sync_single_for_device
= sn_dma_sync_single_for_device
,
480 .sync_sg_for_device
= sn_dma_sync_sg_for_device
,
481 .mapping_error
= sn_dma_mapping_error
,
482 .dma_supported_op
= sn_dma_supported
,
485 void sn_dma_init(void)
487 dma_ops
= &sn_dma_ops
;