1 /* $Id: pci_iommu.c,v 1.17 2001/12/17 07:05:09 davem Exp $
2 * pci_iommu.c: UltraSparc PCI controller IOM/STC support.
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
11 #include <linux/delay.h>
15 #include "iommu_common.h"
17 #define PCI_STC_CTXMATCH_ADDR(STC, CTX) \
18 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
20 /* Accessing IOMMU and Streaming Buffer registers.
21 * REG parameter is a physical address. All registers
22 * are 64-bits in size.
24 #define pci_iommu_read(__reg) \
26 __asm__ __volatile__("ldxa [%1] %2, %0" \
28 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
32 #define pci_iommu_write(__reg, __val) \
33 __asm__ __volatile__("stxa %0, [%1] %2" \
35 : "r" (__val), "r" (__reg), \
36 "i" (ASI_PHYS_BYPASS_EC_E))
38 /* Must be invoked under the IOMMU lock. */
39 static void __iommu_flushall(struct pci_iommu
*iommu
)
44 tag
= iommu
->iommu_flush
+ (0xa580UL
- 0x0210UL
);
45 for (entry
= 0; entry
< 16; entry
++) {
46 pci_iommu_write(tag
, 0);
50 /* Ensure completion of previous PIO writes. */
51 (void) pci_iommu_read(iommu
->write_complete_reg
);
53 /* Now update everyone's flush point. */
54 for (entry
= 0; entry
< PBM_NCLUSTERS
; entry
++) {
55 iommu
->alloc_info
[entry
].flush
=
56 iommu
->alloc_info
[entry
].next
;
60 #define IOPTE_CONSISTENT(CTX) \
61 (IOPTE_VALID | IOPTE_CACHE | \
62 (((CTX) << 47) & IOPTE_CONTEXT))
64 #define IOPTE_STREAMING(CTX) \
65 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
67 /* Existing mappings are never marked invalid, instead they
68 * are pointed to a dummy page.
70 #define IOPTE_IS_DUMMY(iommu, iopte) \
71 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
73 static void inline iopte_make_dummy(struct pci_iommu
*iommu
, iopte_t
*iopte
)
75 unsigned long val
= iopte_val(*iopte
);
78 val
|= iommu
->dummy_page_pa
;
80 iopte_val(*iopte
) = val
;
83 void pci_iommu_table_init(struct pci_iommu
*iommu
, int tsbsize
)
87 tsbsize
/= sizeof(iopte_t
);
89 for (i
= 0; i
< tsbsize
; i
++)
90 iopte_make_dummy(iommu
, &iommu
->page_table
[i
]);
93 static iopte_t
*alloc_streaming_cluster(struct pci_iommu
*iommu
, unsigned long npages
)
95 iopte_t
*iopte
, *limit
, *first
;
96 unsigned long cnum
, ent
, flush_point
;
99 while ((1UL << cnum
) < npages
)
101 iopte
= (iommu
->page_table
+
102 (cnum
<< (iommu
->page_table_sz_bits
- PBM_LOGCLUSTERS
)));
105 limit
= (iommu
->page_table
+
106 iommu
->lowest_consistent_map
);
109 (1 << (iommu
->page_table_sz_bits
- PBM_LOGCLUSTERS
)));
111 iopte
+= ((ent
= iommu
->alloc_info
[cnum
].next
) << cnum
);
112 flush_point
= iommu
->alloc_info
[cnum
].flush
;
116 if (IOPTE_IS_DUMMY(iommu
, iopte
)) {
117 if ((iopte
+ (1 << cnum
)) >= limit
)
121 iommu
->alloc_info
[cnum
].next
= ent
;
122 if (ent
== flush_point
)
123 __iommu_flushall(iommu
);
126 iopte
+= (1 << cnum
);
128 if (iopte
>= limit
) {
129 iopte
= (iommu
->page_table
+
131 (iommu
->page_table_sz_bits
- PBM_LOGCLUSTERS
)));
134 if (ent
== flush_point
)
135 __iommu_flushall(iommu
);
140 /* I've got your streaming cluster right here buddy boy... */
144 printk(KERN_EMERG
"pci_iommu: alloc_streaming_cluster of npages(%ld) failed!\n",
149 static void free_streaming_cluster(struct pci_iommu
*iommu
, dma_addr_t base
,
150 unsigned long npages
, unsigned long ctx
)
152 unsigned long cnum
, ent
;
155 while ((1UL << cnum
) < npages
)
158 ent
= (base
<< (32 - IO_PAGE_SHIFT
+ PBM_LOGCLUSTERS
- iommu
->page_table_sz_bits
))
159 >> (32 + PBM_LOGCLUSTERS
+ cnum
- iommu
->page_table_sz_bits
);
161 /* If the global flush might not have caught this entry,
162 * adjust the flush point such that we will flush before
163 * ever trying to reuse it.
165 #define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
166 if (between(ent
, iommu
->alloc_info
[cnum
].next
, iommu
->alloc_info
[cnum
].flush
))
167 iommu
->alloc_info
[cnum
].flush
= ent
;
171 /* We allocate consistent mappings from the end of cluster zero. */
172 static iopte_t
*alloc_consistent_cluster(struct pci_iommu
*iommu
, unsigned long npages
)
176 iopte
= iommu
->page_table
+ (1 << (iommu
->page_table_sz_bits
- PBM_LOGCLUSTERS
));
177 while (iopte
> iommu
->page_table
) {
179 if (IOPTE_IS_DUMMY(iommu
, iopte
)) {
180 unsigned long tmp
= npages
;
184 if (!IOPTE_IS_DUMMY(iommu
, iopte
))
188 u32 entry
= (iopte
- iommu
->page_table
);
190 if (entry
< iommu
->lowest_consistent_map
)
191 iommu
->lowest_consistent_map
= entry
;
199 /* Allocate and map kernel buffer of size SIZE using consistent mode
200 * DMA for PCI device PDEV. Return non-NULL cpu-side address if
201 * successful and set *DMA_ADDRP to the PCI side dma address.
203 void *pci_alloc_consistent(struct pci_dev
*pdev
, size_t size
, dma_addr_t
*dma_addrp
)
205 struct pcidev_cookie
*pcp
;
206 struct pci_iommu
*iommu
;
208 unsigned long flags
, order
, first_page
, ctx
;
212 size
= IO_PAGE_ALIGN(size
);
213 order
= get_order(size
);
217 first_page
= __get_free_pages(GFP_ATOMIC
, order
);
218 if (first_page
== 0UL)
220 memset((char *)first_page
, 0, PAGE_SIZE
<< order
);
223 iommu
= pcp
->pbm
->iommu
;
225 spin_lock_irqsave(&iommu
->lock
, flags
);
226 iopte
= alloc_consistent_cluster(iommu
, size
>> IO_PAGE_SHIFT
);
228 spin_unlock_irqrestore(&iommu
->lock
, flags
);
229 free_pages(first_page
, order
);
233 *dma_addrp
= (iommu
->page_table_map_base
+
234 ((iopte
- iommu
->page_table
) << IO_PAGE_SHIFT
));
235 ret
= (void *) first_page
;
236 npages
= size
>> IO_PAGE_SHIFT
;
238 if (iommu
->iommu_ctxflush
)
239 ctx
= iommu
->iommu_cur_ctx
++;
240 first_page
= __pa(first_page
);
242 iopte_val(*iopte
) = (IOPTE_CONSISTENT(ctx
) |
244 (first_page
& IOPTE_PAGE
));
246 first_page
+= IO_PAGE_SIZE
;
251 u32 daddr
= *dma_addrp
;
253 npages
= size
>> IO_PAGE_SHIFT
;
254 for (i
= 0; i
< npages
; i
++) {
255 pci_iommu_write(iommu
->iommu_flush
, daddr
);
256 daddr
+= IO_PAGE_SIZE
;
260 spin_unlock_irqrestore(&iommu
->lock
, flags
);
265 /* Free and unmap a consistent DMA translation. */
266 void pci_free_consistent(struct pci_dev
*pdev
, size_t size
, void *cpu
, dma_addr_t dvma
)
268 struct pcidev_cookie
*pcp
;
269 struct pci_iommu
*iommu
;
271 unsigned long flags
, order
, npages
, i
, ctx
;
273 npages
= IO_PAGE_ALIGN(size
) >> IO_PAGE_SHIFT
;
275 iommu
= pcp
->pbm
->iommu
;
276 iopte
= iommu
->page_table
+
277 ((dvma
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
279 spin_lock_irqsave(&iommu
->lock
, flags
);
281 if ((iopte
- iommu
->page_table
) ==
282 iommu
->lowest_consistent_map
) {
283 iopte_t
*walk
= iopte
+ npages
;
286 limit
= (iommu
->page_table
+
287 (1 << (iommu
->page_table_sz_bits
- PBM_LOGCLUSTERS
)));
288 while (walk
< limit
) {
289 if (!IOPTE_IS_DUMMY(iommu
, walk
))
293 iommu
->lowest_consistent_map
=
294 (walk
- iommu
->page_table
);
297 /* Data for consistent mappings cannot enter the streaming
298 * buffers, so we only need to update the TSB. We flush
299 * the IOMMU here as well to prevent conflicts with the
300 * streaming mapping deferred tlb flush scheme.
304 if (iommu
->iommu_ctxflush
)
305 ctx
= (iopte_val(*iopte
) & IOPTE_CONTEXT
) >> 47UL;
307 for (i
= 0; i
< npages
; i
++, iopte
++)
308 iopte_make_dummy(iommu
, iopte
);
310 if (iommu
->iommu_ctxflush
) {
311 pci_iommu_write(iommu
->iommu_ctxflush
, ctx
);
313 for (i
= 0; i
< npages
; i
++) {
314 u32 daddr
= dvma
+ (i
<< IO_PAGE_SHIFT
);
316 pci_iommu_write(iommu
->iommu_flush
, daddr
);
320 spin_unlock_irqrestore(&iommu
->lock
, flags
);
322 order
= get_order(size
);
324 free_pages((unsigned long)cpu
, order
);
327 /* Map a single buffer at PTR of SZ bytes for PCI DMA
330 dma_addr_t
pci_map_single(struct pci_dev
*pdev
, void *ptr
, size_t sz
, int direction
)
332 struct pcidev_cookie
*pcp
;
333 struct pci_iommu
*iommu
;
334 struct pci_strbuf
*strbuf
;
336 unsigned long flags
, npages
, oaddr
;
337 unsigned long i
, base_paddr
, ctx
;
339 unsigned long iopte_protection
;
342 iommu
= pcp
->pbm
->iommu
;
343 strbuf
= &pcp
->pbm
->stc
;
345 if (direction
== PCI_DMA_NONE
)
348 oaddr
= (unsigned long)ptr
;
349 npages
= IO_PAGE_ALIGN(oaddr
+ sz
) - (oaddr
& IO_PAGE_MASK
);
350 npages
>>= IO_PAGE_SHIFT
;
352 spin_lock_irqsave(&iommu
->lock
, flags
);
354 base
= alloc_streaming_cluster(iommu
, npages
);
357 bus_addr
= (iommu
->page_table_map_base
+
358 ((base
- iommu
->page_table
) << IO_PAGE_SHIFT
));
359 ret
= bus_addr
| (oaddr
& ~IO_PAGE_MASK
);
360 base_paddr
= __pa(oaddr
& IO_PAGE_MASK
);
362 if (iommu
->iommu_ctxflush
)
363 ctx
= iommu
->iommu_cur_ctx
++;
364 if (strbuf
->strbuf_enabled
)
365 iopte_protection
= IOPTE_STREAMING(ctx
);
367 iopte_protection
= IOPTE_CONSISTENT(ctx
);
368 if (direction
!= PCI_DMA_TODEVICE
)
369 iopte_protection
|= IOPTE_WRITE
;
371 for (i
= 0; i
< npages
; i
++, base
++, base_paddr
+= IO_PAGE_SIZE
)
372 iopte_val(*base
) = iopte_protection
| base_paddr
;
374 spin_unlock_irqrestore(&iommu
->lock
, flags
);
379 spin_unlock_irqrestore(&iommu
->lock
, flags
);
380 return PCI_DMA_ERROR_CODE
;
383 static void pci_strbuf_flush(struct pci_strbuf
*strbuf
, struct pci_iommu
*iommu
, u32 vaddr
, unsigned long ctx
, unsigned long npages
)
387 PCI_STC_FLUSHFLAG_INIT(strbuf
);
388 if (strbuf
->strbuf_ctxflush
&&
389 iommu
->iommu_ctxflush
) {
390 unsigned long matchreg
, flushreg
;
392 flushreg
= strbuf
->strbuf_ctxflush
;
393 matchreg
= PCI_STC_CTXMATCH_ADDR(strbuf
, ctx
);
396 pci_iommu_write(flushreg
, ctx
);
398 if (((long)pci_iommu_read(matchreg
)) >= 0L)
406 printk(KERN_WARNING
"pci_strbuf_flush: ctx flush "
407 "timeout vaddr[%08x] ctx[%lx]\n",
412 for (i
= 0; i
< npages
; i
++, vaddr
+= IO_PAGE_SIZE
)
413 pci_iommu_write(strbuf
->strbuf_pflush
, vaddr
);
416 pci_iommu_write(strbuf
->strbuf_fsync
, strbuf
->strbuf_flushflag_pa
);
417 (void) pci_iommu_read(iommu
->write_complete_reg
);
420 while (!PCI_STC_FLUSHFLAG_SET(strbuf
)) {
428 printk(KERN_WARNING
"pci_strbuf_flush: flushflag timeout "
429 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
433 /* Unmap a single streaming mode DMA translation. */
434 void pci_unmap_single(struct pci_dev
*pdev
, dma_addr_t bus_addr
, size_t sz
, int direction
)
436 struct pcidev_cookie
*pcp
;
437 struct pci_iommu
*iommu
;
438 struct pci_strbuf
*strbuf
;
440 unsigned long flags
, npages
, ctx
;
442 if (direction
== PCI_DMA_NONE
)
446 iommu
= pcp
->pbm
->iommu
;
447 strbuf
= &pcp
->pbm
->stc
;
449 npages
= IO_PAGE_ALIGN(bus_addr
+ sz
) - (bus_addr
& IO_PAGE_MASK
);
450 npages
>>= IO_PAGE_SHIFT
;
451 base
= iommu
->page_table
+
452 ((bus_addr
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
453 #ifdef DEBUG_PCI_IOMMU
454 if (IOPTE_IS_DUMMY(iommu
, base
))
455 printk("pci_unmap_single called on non-mapped region %08x,%08x from %016lx\n",
456 bus_addr
, sz
, __builtin_return_address(0));
458 bus_addr
&= IO_PAGE_MASK
;
460 spin_lock_irqsave(&iommu
->lock
, flags
);
462 /* Record the context, if any. */
464 if (iommu
->iommu_ctxflush
)
465 ctx
= (iopte_val(*base
) & IOPTE_CONTEXT
) >> 47UL;
467 /* Step 1: Kick data out of streaming buffers if necessary. */
468 if (strbuf
->strbuf_enabled
)
469 pci_strbuf_flush(strbuf
, iommu
, bus_addr
, ctx
, npages
);
471 /* Step 2: Clear out first TSB entry. */
472 iopte_make_dummy(iommu
, base
);
474 free_streaming_cluster(iommu
, bus_addr
- iommu
->page_table_map_base
,
477 spin_unlock_irqrestore(&iommu
->lock
, flags
);
480 #define SG_ENT_PHYS_ADDRESS(SG) \
481 (__pa(page_address((SG)->page)) + (SG)->offset)
483 static inline void fill_sg(iopte_t
*iopte
, struct scatterlist
*sg
,
484 int nused
, int nelems
, unsigned long iopte_protection
)
486 struct scatterlist
*dma_sg
= sg
;
487 struct scatterlist
*sg_end
= sg
+ nelems
;
490 for (i
= 0; i
< nused
; i
++) {
491 unsigned long pteval
= ~0UL;
494 dma_npages
= ((dma_sg
->dma_address
& (IO_PAGE_SIZE
- 1UL)) +
496 ((IO_PAGE_SIZE
- 1UL))) >> IO_PAGE_SHIFT
;
498 unsigned long offset
;
501 /* If we are here, we know we have at least one
502 * more page to map. So walk forward until we
503 * hit a page crossing, and begin creating new
504 * mappings from that spot.
509 tmp
= SG_ENT_PHYS_ADDRESS(sg
);
511 if (((tmp
^ pteval
) >> IO_PAGE_SHIFT
) != 0UL) {
512 pteval
= tmp
& IO_PAGE_MASK
;
513 offset
= tmp
& (IO_PAGE_SIZE
- 1UL);
516 if (((tmp
^ (tmp
+ len
- 1UL)) >> IO_PAGE_SHIFT
) != 0UL) {
517 pteval
= (tmp
+ IO_PAGE_SIZE
) & IO_PAGE_MASK
;
519 len
-= (IO_PAGE_SIZE
- (tmp
& (IO_PAGE_SIZE
- 1UL)));
525 pteval
= iopte_protection
| (pteval
& IOPTE_PAGE
);
527 *iopte
++ = __iopte(pteval
);
528 pteval
+= IO_PAGE_SIZE
;
529 len
-= (IO_PAGE_SIZE
- offset
);
534 pteval
= (pteval
& IOPTE_PAGE
) + len
;
537 /* Skip over any tail mappings we've fully mapped,
538 * adjusting pteval along the way. Stop when we
539 * detect a page crossing event.
541 while (sg
< sg_end
&&
542 (pteval
<< (64 - IO_PAGE_SHIFT
)) != 0UL &&
543 (pteval
== SG_ENT_PHYS_ADDRESS(sg
)) &&
545 (SG_ENT_PHYS_ADDRESS(sg
) + sg
->length
- 1UL)) >> IO_PAGE_SHIFT
) == 0UL) {
546 pteval
+= sg
->length
;
549 if ((pteval
<< (64 - IO_PAGE_SHIFT
)) == 0UL)
551 } while (dma_npages
!= 0);
556 /* Map a set of buffers described by SGLIST with NELEMS array
557 * elements in streaming mode for PCI DMA.
558 * When making changes here, inspect the assembly output. I was having
559 * hard time to kepp this routine out of using stack slots for holding variables.
561 int pci_map_sg(struct pci_dev
*pdev
, struct scatterlist
*sglist
, int nelems
, int direction
)
563 struct pcidev_cookie
*pcp
;
564 struct pci_iommu
*iommu
;
565 struct pci_strbuf
*strbuf
;
566 unsigned long flags
, ctx
, npages
, iopte_protection
;
569 struct scatterlist
*sgtmp
;
572 /* Fast path single entry scatterlists. */
574 sglist
->dma_address
=
576 (page_address(sglist
->page
) + sglist
->offset
),
577 sglist
->length
, direction
);
578 sglist
->dma_length
= sglist
->length
;
583 iommu
= pcp
->pbm
->iommu
;
584 strbuf
= &pcp
->pbm
->stc
;
586 if (direction
== PCI_DMA_NONE
)
589 /* Step 1: Prepare scatter list. */
591 npages
= prepare_sg(sglist
, nelems
);
593 /* Step 2: Allocate a cluster. */
595 spin_lock_irqsave(&iommu
->lock
, flags
);
597 base
= alloc_streaming_cluster(iommu
, npages
);
600 dma_base
= iommu
->page_table_map_base
+ ((base
- iommu
->page_table
) << IO_PAGE_SHIFT
);
602 /* Step 3: Normalize DMA addresses. */
606 while (used
&& sgtmp
->dma_length
) {
607 sgtmp
->dma_address
+= dma_base
;
611 used
= nelems
- used
;
613 /* Step 4: Choose a context if necessary. */
615 if (iommu
->iommu_ctxflush
)
616 ctx
= iommu
->iommu_cur_ctx
++;
618 /* Step 5: Create the mappings. */
619 if (strbuf
->strbuf_enabled
)
620 iopte_protection
= IOPTE_STREAMING(ctx
);
622 iopte_protection
= IOPTE_CONSISTENT(ctx
);
623 if (direction
!= PCI_DMA_TODEVICE
)
624 iopte_protection
|= IOPTE_WRITE
;
625 fill_sg (base
, sglist
, used
, nelems
, iopte_protection
);
627 verify_sglist(sglist
, nelems
, base
, npages
);
630 spin_unlock_irqrestore(&iommu
->lock
, flags
);
635 spin_unlock_irqrestore(&iommu
->lock
, flags
);
636 return PCI_DMA_ERROR_CODE
;
639 /* Unmap a set of streaming mode DMA translations. */
640 void pci_unmap_sg(struct pci_dev
*pdev
, struct scatterlist
*sglist
, int nelems
, int direction
)
642 struct pcidev_cookie
*pcp
;
643 struct pci_iommu
*iommu
;
644 struct pci_strbuf
*strbuf
;
646 unsigned long flags
, ctx
, i
, npages
;
649 if (direction
== PCI_DMA_NONE
)
653 iommu
= pcp
->pbm
->iommu
;
654 strbuf
= &pcp
->pbm
->stc
;
656 bus_addr
= sglist
->dma_address
& IO_PAGE_MASK
;
658 for (i
= 1; i
< nelems
; i
++)
659 if (sglist
[i
].dma_length
== 0)
662 npages
= (IO_PAGE_ALIGN(sglist
[i
].dma_address
+ sglist
[i
].dma_length
) - bus_addr
) >> IO_PAGE_SHIFT
;
664 base
= iommu
->page_table
+
665 ((bus_addr
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
667 #ifdef DEBUG_PCI_IOMMU
668 if (IOPTE_IS_DUMMY(iommu
, base
))
669 printk("pci_unmap_sg called on non-mapped region %016lx,%d from %016lx\n", sglist
->dma_address
, nelems
, __builtin_return_address(0));
672 spin_lock_irqsave(&iommu
->lock
, flags
);
674 /* Record the context, if any. */
676 if (iommu
->iommu_ctxflush
)
677 ctx
= (iopte_val(*base
) & IOPTE_CONTEXT
) >> 47UL;
679 /* Step 1: Kick data out of streaming buffers if necessary. */
680 if (strbuf
->strbuf_enabled
)
681 pci_strbuf_flush(strbuf
, iommu
, bus_addr
, ctx
, npages
);
683 /* Step 2: Clear out first TSB entry. */
684 iopte_make_dummy(iommu
, base
);
686 free_streaming_cluster(iommu
, bus_addr
- iommu
->page_table_map_base
,
689 spin_unlock_irqrestore(&iommu
->lock
, flags
);
692 /* Make physical memory consistent for a single
693 * streaming mode DMA translation after a transfer.
695 void pci_dma_sync_single_for_cpu(struct pci_dev
*pdev
, dma_addr_t bus_addr
, size_t sz
, int direction
)
697 struct pcidev_cookie
*pcp
;
698 struct pci_iommu
*iommu
;
699 struct pci_strbuf
*strbuf
;
700 unsigned long flags
, ctx
, npages
;
703 iommu
= pcp
->pbm
->iommu
;
704 strbuf
= &pcp
->pbm
->stc
;
706 if (!strbuf
->strbuf_enabled
)
709 spin_lock_irqsave(&iommu
->lock
, flags
);
711 npages
= IO_PAGE_ALIGN(bus_addr
+ sz
) - (bus_addr
& IO_PAGE_MASK
);
712 npages
>>= IO_PAGE_SHIFT
;
713 bus_addr
&= IO_PAGE_MASK
;
715 /* Step 1: Record the context, if any. */
717 if (iommu
->iommu_ctxflush
&&
718 strbuf
->strbuf_ctxflush
) {
721 iopte
= iommu
->page_table
+
722 ((bus_addr
- iommu
->page_table_map_base
)>>IO_PAGE_SHIFT
);
723 ctx
= (iopte_val(*iopte
) & IOPTE_CONTEXT
) >> 47UL;
726 /* Step 2: Kick data out of streaming buffers. */
727 pci_strbuf_flush(strbuf
, iommu
, bus_addr
, ctx
, npages
);
729 spin_unlock_irqrestore(&iommu
->lock
, flags
);
732 /* Make physical memory consistent for a set of streaming
733 * mode DMA translations after a transfer.
735 void pci_dma_sync_sg_for_cpu(struct pci_dev
*pdev
, struct scatterlist
*sglist
, int nelems
, int direction
)
737 struct pcidev_cookie
*pcp
;
738 struct pci_iommu
*iommu
;
739 struct pci_strbuf
*strbuf
;
740 unsigned long flags
, ctx
, npages
, i
;
744 iommu
= pcp
->pbm
->iommu
;
745 strbuf
= &pcp
->pbm
->stc
;
747 if (!strbuf
->strbuf_enabled
)
750 spin_lock_irqsave(&iommu
->lock
, flags
);
752 /* Step 1: Record the context, if any. */
754 if (iommu
->iommu_ctxflush
&&
755 strbuf
->strbuf_ctxflush
) {
758 iopte
= iommu
->page_table
+
759 ((sglist
[0].dma_address
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
760 ctx
= (iopte_val(*iopte
) & IOPTE_CONTEXT
) >> 47UL;
763 /* Step 2: Kick data out of streaming buffers. */
764 bus_addr
= sglist
[0].dma_address
& IO_PAGE_MASK
;
765 for(i
= 1; i
< nelems
; i
++)
766 if (!sglist
[i
].dma_length
)
769 npages
= (IO_PAGE_ALIGN(sglist
[i
].dma_address
+ sglist
[i
].dma_length
)
770 - bus_addr
) >> IO_PAGE_SHIFT
;
771 pci_strbuf_flush(strbuf
, iommu
, bus_addr
, ctx
, npages
);
773 spin_unlock_irqrestore(&iommu
->lock
, flags
);
776 static void ali_sound_dma_hack(struct pci_dev
*pdev
, int set_bit
)
778 struct pci_dev
*ali_isa_bridge
;
781 /* ALI sound chips generate 31-bits of DMA, a special register
782 * determines what bit 31 is emitted as.
784 ali_isa_bridge
= pci_get_device(PCI_VENDOR_ID_AL
,
785 PCI_DEVICE_ID_AL_M1533
,
788 pci_read_config_byte(ali_isa_bridge
, 0x7e, &val
);
793 pci_write_config_byte(ali_isa_bridge
, 0x7e, val
);
794 pci_dev_put(ali_isa_bridge
);
797 int pci_dma_supported(struct pci_dev
*pdev
, u64 device_mask
)
799 struct pcidev_cookie
*pcp
= pdev
->sysdata
;
803 dma_addr_mask
= 0xffffffff;
805 struct pci_iommu
*iommu
= pcp
->pbm
->iommu
;
807 dma_addr_mask
= iommu
->dma_addr_mask
;
809 if (pdev
->vendor
== PCI_VENDOR_ID_AL
&&
810 pdev
->device
== PCI_DEVICE_ID_AL_M5451
&&
811 device_mask
== 0x7fffffff) {
812 ali_sound_dma_hack(pdev
,
813 (dma_addr_mask
& 0x80000000) != 0);
818 if (device_mask
>= (1UL << 32UL))
821 return (device_mask
& dma_addr_mask
) == dma_addr_mask
;
This page took 0.057662 seconds and 6 git commands to generate.