alpha: convert IOMMU to use ALIGN()
[deliverable/linux.git] / arch / alpha / kernel / pci_iommu.c
1 /*
2 * linux/arch/alpha/kernel/pci_iommu.c
3 */
4
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/pci.h>
8 #include <linux/slab.h>
9 #include <linux/bootmem.h>
10 #include <linux/scatterlist.h>
11 #include <linux/log2.h>
12 #include <linux/dma-mapping.h>
13
14 #include <asm/io.h>
15 #include <asm/hwrpb.h>
16
17 #include "proto.h"
18 #include "pci_impl.h"
19
20
21 #define DEBUG_ALLOC 0
22 #if DEBUG_ALLOC > 0
23 # define DBGA(args...) printk(KERN_DEBUG args)
24 #else
25 # define DBGA(args...)
26 #endif
27 #if DEBUG_ALLOC > 1
28 # define DBGA2(args...) printk(KERN_DEBUG args)
29 #else
30 # define DBGA2(args...)
31 #endif
32
33 #define DEBUG_NODIRECT 0
34 #define DEBUG_FORCEDAC 0
35
36 #define ISA_DMA_MASK 0x00ffffff
37
38 static inline unsigned long
39 mk_iommu_pte(unsigned long paddr)
40 {
41 return (paddr >> (PAGE_SHIFT-1)) | 1;
42 }
43
44 static inline long
45 calc_npages(long bytes)
46 {
47 return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
48 }
49 \f
50
51 /* Return the minimum of MAX or the first power of two larger
52 than main memory. */
53
54 unsigned long
55 size_for_memory(unsigned long max)
56 {
57 unsigned long mem = max_low_pfn << PAGE_SHIFT;
58 if (mem < max)
59 max = roundup_pow_of_two(mem);
60 return max;
61 }
62 \f
63 struct pci_iommu_arena * __init
64 iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
65 unsigned long window_size, unsigned long align)
66 {
67 unsigned long mem_size;
68 struct pci_iommu_arena *arena;
69
70 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
71
72 /* Note that the TLB lookup logic uses bitwise concatenation,
73 not addition, so the required arena alignment is based on
74 the size of the window. Retain the align parameter so that
75 particular systems can over-align the arena. */
76 if (align < mem_size)
77 align = mem_size;
78
79
80 #ifdef CONFIG_DISCONTIGMEM
81
82 if (!NODE_DATA(nid) ||
83 (NULL == (arena = alloc_bootmem_node(NODE_DATA(nid),
84 sizeof(*arena))))) {
85 printk("%s: couldn't allocate arena from node %d\n"
86 " falling back to system-wide allocation\n",
87 __FUNCTION__, nid);
88 arena = alloc_bootmem(sizeof(*arena));
89 }
90
91 if (!NODE_DATA(nid) ||
92 (NULL == (arena->ptes = __alloc_bootmem_node(NODE_DATA(nid),
93 mem_size,
94 align,
95 0)))) {
96 printk("%s: couldn't allocate arena ptes from node %d\n"
97 " falling back to system-wide allocation\n",
98 __FUNCTION__, nid);
99 arena->ptes = __alloc_bootmem(mem_size, align, 0);
100 }
101
102 #else /* CONFIG_DISCONTIGMEM */
103
104 arena = alloc_bootmem(sizeof(*arena));
105 arena->ptes = __alloc_bootmem(mem_size, align, 0);
106
107 #endif /* CONFIG_DISCONTIGMEM */
108
109 spin_lock_init(&arena->lock);
110 arena->hose = hose;
111 arena->dma_base = base;
112 arena->size = window_size;
113 arena->next_entry = 0;
114
115 /* Align allocations to a multiple of a page size. Not needed
116 unless there are chip bugs. */
117 arena->align_entry = 1;
118
119 return arena;
120 }
121
122 struct pci_iommu_arena * __init
123 iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
124 unsigned long window_size, unsigned long align)
125 {
126 return iommu_arena_new_node(0, hose, base, window_size, align);
127 }
128
129 /* Must be called with the arena lock held */
130 static long
131 iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask)
132 {
133 unsigned long *ptes;
134 long i, p, nent;
135
136 /* Search forward for the first mask-aligned sequence of N free ptes */
137 ptes = arena->ptes;
138 nent = arena->size >> PAGE_SHIFT;
139 p = ALIGN(arena->next_entry, mask + 1);
140 i = 0;
141 while (i < n && p+i < nent) {
142 if (ptes[p+i])
143 p = ALIGN(p + i + 1, mask + 1), i = 0;
144 else
145 i = i + 1;
146 }
147
148 if (i < n) {
149 /* Reached the end. Flush the TLB and restart the
150 search from the beginning. */
151 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
152
153 p = 0, i = 0;
154 while (i < n && p+i < nent) {
155 if (ptes[p+i])
156 p = ALIGN(p + i + 1, mask + 1), i = 0;
157 else
158 i = i + 1;
159 }
160
161 if (i < n)
162 return -1;
163 }
164
165 /* Success. It's the responsibility of the caller to mark them
166 in use before releasing the lock */
167 return p;
168 }
169
170 static long
171 iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align)
172 {
173 unsigned long flags;
174 unsigned long *ptes;
175 long i, p, mask;
176
177 spin_lock_irqsave(&arena->lock, flags);
178
179 /* Search for N empty ptes */
180 ptes = arena->ptes;
181 mask = max(align, arena->align_entry) - 1;
182 p = iommu_arena_find_pages(arena, n, mask);
183 if (p < 0) {
184 spin_unlock_irqrestore(&arena->lock, flags);
185 return -1;
186 }
187
188 /* Success. Mark them all in use, ie not zero and invalid
189 for the iommu tlb that could load them from under us.
190 The chip specific bits will fill this in with something
191 kosher when we return. */
192 for (i = 0; i < n; ++i)
193 ptes[p+i] = IOMMU_INVALID_PTE;
194
195 arena->next_entry = p + n;
196 spin_unlock_irqrestore(&arena->lock, flags);
197
198 return p;
199 }
200
201 static void
202 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
203 {
204 unsigned long *p;
205 long i;
206
207 p = arena->ptes + ofs;
208 for (i = 0; i < n; ++i)
209 p[i] = 0;
210 }
211 \f
212 /* True if the machine supports DAC addressing, and DEV can
213 make use of it given MASK. */
214 static int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask);
215
216 /* Map a single buffer of the indicated size for PCI DMA in streaming
217 mode. The 32-bit PCI bus mastering address to use is returned.
218 Once the device is given the dma address, the device owns this memory
219 until either pci_unmap_single or pci_dma_sync_single is performed. */
220
221 static dma_addr_t
222 pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
223 int dac_allowed)
224 {
225 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
226 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
227 struct pci_iommu_arena *arena;
228 long npages, dma_ofs, i;
229 unsigned long paddr;
230 dma_addr_t ret;
231 unsigned int align = 0;
232
233 paddr = __pa(cpu_addr);
234
235 #if !DEBUG_NODIRECT
236 /* First check to see if we can use the direct map window. */
237 if (paddr + size + __direct_map_base - 1 <= max_dma
238 && paddr + size <= __direct_map_size) {
239 ret = paddr + __direct_map_base;
240
241 DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n",
242 cpu_addr, size, ret, __builtin_return_address(0));
243
244 return ret;
245 }
246 #endif
247
248 /* Next, use DAC if selected earlier. */
249 if (dac_allowed) {
250 ret = paddr + alpha_mv.pci_dac_offset;
251
252 DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n",
253 cpu_addr, size, ret, __builtin_return_address(0));
254
255 return ret;
256 }
257
258 /* If the machine doesn't define a pci_tbi routine, we have to
259 assume it doesn't support sg mapping, and, since we tried to
260 use direct_map above, it now must be considered an error. */
261 if (! alpha_mv.mv_pci_tbi) {
262 static int been_here = 0; /* Only print the message once. */
263 if (!been_here) {
264 printk(KERN_WARNING "pci_map_single: no HW sg\n");
265 been_here = 1;
266 }
267 return 0;
268 }
269
270 arena = hose->sg_pci;
271 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
272 arena = hose->sg_isa;
273
274 npages = calc_npages((paddr & ~PAGE_MASK) + size);
275
276 /* Force allocation to 64KB boundary for ISA bridges. */
277 if (pdev && pdev == isa_bridge)
278 align = 8;
279 dma_ofs = iommu_arena_alloc(arena, npages, align);
280 if (dma_ofs < 0) {
281 printk(KERN_WARNING "pci_map_single failed: "
282 "could not allocate dma page tables\n");
283 return 0;
284 }
285
286 paddr &= PAGE_MASK;
287 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
288 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
289
290 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
291 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
292
293 DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n",
294 cpu_addr, size, npages, ret, __builtin_return_address(0));
295
296 return ret;
297 }
298
299 dma_addr_t
300 pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir)
301 {
302 int dac_allowed;
303
304 if (dir == PCI_DMA_NONE)
305 BUG();
306
307 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
308 return pci_map_single_1(pdev, cpu_addr, size, dac_allowed);
309 }
310 EXPORT_SYMBOL(pci_map_single);
311
312 dma_addr_t
313 pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
314 size_t size, int dir)
315 {
316 int dac_allowed;
317
318 if (dir == PCI_DMA_NONE)
319 BUG();
320
321 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
322 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
323 size, dac_allowed);
324 }
325 EXPORT_SYMBOL(pci_map_page);
326
327 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
328 SIZE must match what was provided for in a previous pci_map_single
329 call. All other usages are undefined. After this call, reads by
330 the cpu to the buffer are guaranteed to see whatever the device
331 wrote there. */
332
333 void
334 pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
335 int direction)
336 {
337 unsigned long flags;
338 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
339 struct pci_iommu_arena *arena;
340 long dma_ofs, npages;
341
342 if (direction == PCI_DMA_NONE)
343 BUG();
344
345 if (dma_addr >= __direct_map_base
346 && dma_addr < __direct_map_base + __direct_map_size) {
347 /* Nothing to do. */
348
349 DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n",
350 dma_addr, size, __builtin_return_address(0));
351
352 return;
353 }
354
355 if (dma_addr > 0xffffffff) {
356 DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n",
357 dma_addr, size, __builtin_return_address(0));
358 return;
359 }
360
361 arena = hose->sg_pci;
362 if (!arena || dma_addr < arena->dma_base)
363 arena = hose->sg_isa;
364
365 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
366 if (dma_ofs * PAGE_SIZE >= arena->size) {
367 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx "
368 " base %lx size %x\n", dma_addr, arena->dma_base,
369 arena->size);
370 return;
371 BUG();
372 }
373
374 npages = calc_npages((dma_addr & ~PAGE_MASK) + size);
375
376 spin_lock_irqsave(&arena->lock, flags);
377
378 iommu_arena_free(arena, dma_ofs, npages);
379
380 /* If we're freeing ptes above the `next_entry' pointer (they
381 may have snuck back into the TLB since the last wrap flush),
382 we need to flush the TLB before reallocating the latter. */
383 if (dma_ofs >= arena->next_entry)
384 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
385
386 spin_unlock_irqrestore(&arena->lock, flags);
387
388 DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n",
389 dma_addr, size, npages, __builtin_return_address(0));
390 }
391 EXPORT_SYMBOL(pci_unmap_single);
392
393 void
394 pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,
395 size_t size, int direction)
396 {
397 pci_unmap_single(pdev, dma_addr, size, direction);
398 }
399 EXPORT_SYMBOL(pci_unmap_page);
400
401 /* Allocate and map kernel buffer using consistent mode DMA for PCI
402 device. Returns non-NULL cpu-view pointer to the buffer if
403 successful and sets *DMA_ADDRP to the pci side dma address as well,
404 else DMA_ADDRP is undefined. */
405
406 void *
407 pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
408 {
409 void *cpu_addr;
410 long order = get_order(size);
411 gfp_t gfp = GFP_ATOMIC;
412
413 try_again:
414 cpu_addr = (void *)__get_free_pages(gfp, order);
415 if (! cpu_addr) {
416 printk(KERN_INFO "pci_alloc_consistent: "
417 "get_free_pages failed from %p\n",
418 __builtin_return_address(0));
419 /* ??? Really atomic allocation? Otherwise we could play
420 with vmalloc and sg if we can't find contiguous memory. */
421 return NULL;
422 }
423 memset(cpu_addr, 0, size);
424
425 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
426 if (*dma_addrp == 0) {
427 free_pages((unsigned long)cpu_addr, order);
428 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
429 return NULL;
430 /* The address doesn't fit required mask and we
431 do not have iommu. Try again with GFP_DMA. */
432 gfp |= GFP_DMA;
433 goto try_again;
434 }
435
436 DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n",
437 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
438
439 return cpu_addr;
440 }
441 EXPORT_SYMBOL(pci_alloc_consistent);
442
443 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
444 be values that were returned from pci_alloc_consistent. SIZE must
445 be the same as what as passed into pci_alloc_consistent.
446 References to the memory and mappings associated with CPU_ADDR or
447 DMA_ADDR past this call are illegal. */
448
449 void
450 pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
451 dma_addr_t dma_addr)
452 {
453 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
454 free_pages((unsigned long)cpu_addr, get_order(size));
455
456 DBGA2("pci_free_consistent: [%x,%lx] from %p\n",
457 dma_addr, size, __builtin_return_address(0));
458 }
459 EXPORT_SYMBOL(pci_free_consistent);
460
461 /* Classify the elements of the scatterlist. Write dma_address
462 of each element with:
463 0 : Followers all physically adjacent.
464 1 : Followers all virtually adjacent.
465 -1 : Not leader, physically adjacent to previous.
466 -2 : Not leader, virtually adjacent to previous.
467 Write dma_length of each leader with the combined lengths of
468 the mergable followers. */
469
470 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
471 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
472
473 static void
474 sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
475 int virt_ok)
476 {
477 unsigned long next_paddr;
478 struct scatterlist *leader;
479 long leader_flag, leader_length;
480 unsigned int max_seg_size;
481
482 leader = sg;
483 leader_flag = 0;
484 leader_length = leader->length;
485 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
486
487 /* we will not marge sg without device. */
488 max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
489 for (++sg; sg < end; ++sg) {
490 unsigned long addr, len;
491 addr = SG_ENT_PHYS_ADDRESS(sg);
492 len = sg->length;
493
494 if (leader_length + len > max_seg_size)
495 goto new_segment;
496
497 if (next_paddr == addr) {
498 sg->dma_address = -1;
499 leader_length += len;
500 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
501 sg->dma_address = -2;
502 leader_flag = 1;
503 leader_length += len;
504 } else {
505 new_segment:
506 leader->dma_address = leader_flag;
507 leader->dma_length = leader_length;
508 leader = sg;
509 leader_flag = 0;
510 leader_length = len;
511 }
512
513 next_paddr = addr + len;
514 }
515
516 leader->dma_address = leader_flag;
517 leader->dma_length = leader_length;
518 }
519
520 /* Given a scatterlist leader, choose an allocation method and fill
521 in the blanks. */
522
523 static int
524 sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
525 struct scatterlist *out, struct pci_iommu_arena *arena,
526 dma_addr_t max_dma, int dac_allowed)
527 {
528 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
529 long size = leader->dma_length;
530 struct scatterlist *sg;
531 unsigned long *ptes;
532 long npages, dma_ofs, i;
533
534 #if !DEBUG_NODIRECT
535 /* If everything is physically contiguous, and the addresses
536 fall into the direct-map window, use it. */
537 if (leader->dma_address == 0
538 && paddr + size + __direct_map_base - 1 <= max_dma
539 && paddr + size <= __direct_map_size) {
540 out->dma_address = paddr + __direct_map_base;
541 out->dma_length = size;
542
543 DBGA(" sg_fill: [%p,%lx] -> direct %lx\n",
544 __va(paddr), size, out->dma_address);
545
546 return 0;
547 }
548 #endif
549
550 /* If physically contiguous and DAC is available, use it. */
551 if (leader->dma_address == 0 && dac_allowed) {
552 out->dma_address = paddr + alpha_mv.pci_dac_offset;
553 out->dma_length = size;
554
555 DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n",
556 __va(paddr), size, out->dma_address);
557
558 return 0;
559 }
560
561 /* Otherwise, we'll use the iommu to make the pages virtually
562 contiguous. */
563
564 paddr &= ~PAGE_MASK;
565 npages = calc_npages(paddr + size);
566 dma_ofs = iommu_arena_alloc(arena, npages, 0);
567 if (dma_ofs < 0) {
568 /* If we attempted a direct map above but failed, die. */
569 if (leader->dma_address == 0)
570 return -1;
571
572 /* Otherwise, break up the remaining virtually contiguous
573 hunks into individual direct maps and retry. */
574 sg_classify(dev, leader, end, 0);
575 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
576 }
577
578 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
579 out->dma_length = size;
580
581 DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n",
582 __va(paddr), size, out->dma_address, npages);
583
584 /* All virtually contiguous. We need to find the length of each
585 physically contiguous subsegment to fill in the ptes. */
586 ptes = &arena->ptes[dma_ofs];
587 sg = leader;
588 do {
589 #if DEBUG_ALLOC > 0
590 struct scatterlist *last_sg = sg;
591 #endif
592
593 size = sg->length;
594 paddr = SG_ENT_PHYS_ADDRESS(sg);
595
596 while (sg+1 < end && (int) sg[1].dma_address == -1) {
597 size += sg[1].length;
598 sg++;
599 }
600
601 npages = calc_npages((paddr & ~PAGE_MASK) + size);
602
603 paddr &= PAGE_MASK;
604 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
605 *ptes++ = mk_iommu_pte(paddr);
606
607 #if DEBUG_ALLOC > 0
608 DBGA(" (%ld) [%p,%x] np %ld\n",
609 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
610 last_sg->length, npages);
611 while (++last_sg <= sg) {
612 DBGA(" (%ld) [%p,%x] cont\n",
613 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
614 last_sg->length);
615 }
616 #endif
617 } while (++sg < end && (int) sg->dma_address < 0);
618
619 return 1;
620 }
621
622 int
623 pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
624 int direction)
625 {
626 struct scatterlist *start, *end, *out;
627 struct pci_controller *hose;
628 struct pci_iommu_arena *arena;
629 dma_addr_t max_dma;
630 int dac_allowed;
631 struct device *dev;
632
633 if (direction == PCI_DMA_NONE)
634 BUG();
635
636 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
637
638 dev = pdev ? &pdev->dev : NULL;
639
640 /* Fast path single entry scatterlists. */
641 if (nents == 1) {
642 sg->dma_length = sg->length;
643 sg->dma_address
644 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
645 sg->length, dac_allowed);
646 return sg->dma_address != 0;
647 }
648
649 start = sg;
650 end = sg + nents;
651
652 /* First, prepare information about the entries. */
653 sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
654
655 /* Second, figure out where we're going to map things. */
656 if (alpha_mv.mv_pci_tbi) {
657 hose = pdev ? pdev->sysdata : pci_isa_hose;
658 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
659 arena = hose->sg_pci;
660 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
661 arena = hose->sg_isa;
662 } else {
663 max_dma = -1;
664 arena = NULL;
665 hose = NULL;
666 }
667
668 /* Third, iterate over the scatterlist leaders and allocate
669 dma space as needed. */
670 for (out = sg; sg < end; ++sg) {
671 if ((int) sg->dma_address < 0)
672 continue;
673 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
674 goto error;
675 out++;
676 }
677
678 /* Mark the end of the list for pci_unmap_sg. */
679 if (out < end)
680 out->dma_length = 0;
681
682 if (out - start == 0)
683 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
684 DBGA("pci_map_sg: %ld entries\n", out - start);
685
686 return out - start;
687
688 error:
689 printk(KERN_WARNING "pci_map_sg failed: "
690 "could not allocate dma page tables\n");
691
692 /* Some allocation failed while mapping the scatterlist
693 entries. Unmap them now. */
694 if (out > start)
695 pci_unmap_sg(pdev, start, out - start, direction);
696 return 0;
697 }
698 EXPORT_SYMBOL(pci_map_sg);
699
700 /* Unmap a set of streaming mode DMA translations. Again, cpu read
701 rules concerning calls here are the same as for pci_unmap_single()
702 above. */
703
704 void
705 pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
706 int direction)
707 {
708 unsigned long flags;
709 struct pci_controller *hose;
710 struct pci_iommu_arena *arena;
711 struct scatterlist *end;
712 dma_addr_t max_dma;
713 dma_addr_t fbeg, fend;
714
715 if (direction == PCI_DMA_NONE)
716 BUG();
717
718 if (! alpha_mv.mv_pci_tbi)
719 return;
720
721 hose = pdev ? pdev->sysdata : pci_isa_hose;
722 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
723 arena = hose->sg_pci;
724 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
725 arena = hose->sg_isa;
726
727 fbeg = -1, fend = 0;
728
729 spin_lock_irqsave(&arena->lock, flags);
730
731 for (end = sg + nents; sg < end; ++sg) {
732 dma64_addr_t addr;
733 size_t size;
734 long npages, ofs;
735 dma_addr_t tend;
736
737 addr = sg->dma_address;
738 size = sg->dma_length;
739 if (!size)
740 break;
741
742 if (addr > 0xffffffff) {
743 /* It's a DAC address -- nothing to do. */
744 DBGA(" (%ld) DAC [%lx,%lx]\n",
745 sg - end + nents, addr, size);
746 continue;
747 }
748
749 if (addr >= __direct_map_base
750 && addr < __direct_map_base + __direct_map_size) {
751 /* Nothing to do. */
752 DBGA(" (%ld) direct [%lx,%lx]\n",
753 sg - end + nents, addr, size);
754 continue;
755 }
756
757 DBGA(" (%ld) sg [%lx,%lx]\n",
758 sg - end + nents, addr, size);
759
760 npages = calc_npages((addr & ~PAGE_MASK) + size);
761 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
762 iommu_arena_free(arena, ofs, npages);
763
764 tend = addr + size - 1;
765 if (fbeg > addr) fbeg = addr;
766 if (fend < tend) fend = tend;
767 }
768
769 /* If we're freeing ptes above the `next_entry' pointer (they
770 may have snuck back into the TLB since the last wrap flush),
771 we need to flush the TLB before reallocating the latter. */
772 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
773 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
774
775 spin_unlock_irqrestore(&arena->lock, flags);
776
777 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
778 }
779 EXPORT_SYMBOL(pci_unmap_sg);
780
781
782 /* Return whether the given PCI device DMA address mask can be
783 supported properly. */
784
785 int
786 pci_dma_supported(struct pci_dev *pdev, u64 mask)
787 {
788 struct pci_controller *hose;
789 struct pci_iommu_arena *arena;
790
791 /* If there exists a direct map, and the mask fits either
792 the entire direct mapped space or the total system memory as
793 shifted by the map base */
794 if (__direct_map_size != 0
795 && (__direct_map_base + __direct_map_size - 1 <= mask ||
796 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
797 return 1;
798
799 /* Check that we have a scatter-gather arena that fits. */
800 hose = pdev ? pdev->sysdata : pci_isa_hose;
801 arena = hose->sg_isa;
802 if (arena && arena->dma_base + arena->size - 1 <= mask)
803 return 1;
804 arena = hose->sg_pci;
805 if (arena && arena->dma_base + arena->size - 1 <= mask)
806 return 1;
807
808 /* As last resort try ZONE_DMA. */
809 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
810 return 1;
811
812 return 0;
813 }
814 EXPORT_SYMBOL(pci_dma_supported);
815
816 \f
817 /*
818 * AGP GART extensions to the IOMMU
819 */
820 int
821 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
822 {
823 unsigned long flags;
824 unsigned long *ptes;
825 long i, p;
826
827 if (!arena) return -EINVAL;
828
829 spin_lock_irqsave(&arena->lock, flags);
830
831 /* Search for N empty ptes. */
832 ptes = arena->ptes;
833 p = iommu_arena_find_pages(arena, pg_count, align_mask);
834 if (p < 0) {
835 spin_unlock_irqrestore(&arena->lock, flags);
836 return -1;
837 }
838
839 /* Success. Mark them all reserved (ie not zero and invalid)
840 for the iommu tlb that could load them from under us.
841 They will be filled in with valid bits by _bind() */
842 for (i = 0; i < pg_count; ++i)
843 ptes[p+i] = IOMMU_RESERVED_PTE;
844
845 arena->next_entry = p + pg_count;
846 spin_unlock_irqrestore(&arena->lock, flags);
847
848 return p;
849 }
850
851 int
852 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
853 {
854 unsigned long *ptes;
855 long i;
856
857 if (!arena) return -EINVAL;
858
859 ptes = arena->ptes;
860
861 /* Make sure they're all reserved first... */
862 for(i = pg_start; i < pg_start + pg_count; i++)
863 if (ptes[i] != IOMMU_RESERVED_PTE)
864 return -EBUSY;
865
866 iommu_arena_free(arena, pg_start, pg_count);
867 return 0;
868 }
869
870 int
871 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
872 unsigned long *physaddrs)
873 {
874 unsigned long flags;
875 unsigned long *ptes;
876 long i, j;
877
878 if (!arena) return -EINVAL;
879
880 spin_lock_irqsave(&arena->lock, flags);
881
882 ptes = arena->ptes;
883
884 for(j = pg_start; j < pg_start + pg_count; j++) {
885 if (ptes[j] != IOMMU_RESERVED_PTE) {
886 spin_unlock_irqrestore(&arena->lock, flags);
887 return -EBUSY;
888 }
889 }
890
891 for(i = 0, j = pg_start; i < pg_count; i++, j++)
892 ptes[j] = mk_iommu_pte(physaddrs[i]);
893
894 spin_unlock_irqrestore(&arena->lock, flags);
895
896 return 0;
897 }
898
899 int
900 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
901 {
902 unsigned long *p;
903 long i;
904
905 if (!arena) return -EINVAL;
906
907 p = arena->ptes + pg_start;
908 for(i = 0; i < pg_count; i++)
909 p[i] = IOMMU_RESERVED_PTE;
910
911 return 0;
912 }
913
914 /* True if the machine supports DAC addressing, and DEV can
915 make use of it given MASK. */
916
917 static int
918 pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
919 {
920 dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
921 int ok = 1;
922
923 /* If this is not set, the machine doesn't support DAC at all. */
924 if (dac_offset == 0)
925 ok = 0;
926
927 /* The device has to be able to address our DAC bit. */
928 if ((dac_offset & dev->dma_mask) != dac_offset)
929 ok = 0;
930
931 /* If both conditions above are met, we are fine. */
932 DBGA("pci_dac_dma_supported %s from %p\n",
933 ok ? "yes" : "no", __builtin_return_address(0));
934
935 return ok;
936 }
937
938 /* Helper for generic DMA-mapping functions. */
939
940 struct pci_dev *
941 alpha_gendev_to_pci(struct device *dev)
942 {
943 if (dev && dev->bus == &pci_bus_type)
944 return to_pci_dev(dev);
945
946 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
947 BUG() otherwise. */
948 BUG_ON(!isa_bridge);
949
950 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
951 bridge is bus master then). */
952 if (!dev || !dev->dma_mask || !*dev->dma_mask)
953 return isa_bridge;
954
955 /* For EISA bus masters, return isa_bridge (it might have smaller
956 dma_mask due to wiring limitations). */
957 if (*dev->dma_mask >= isa_bridge->dma_mask)
958 return isa_bridge;
959
960 /* This assumes ISA bus master with dma_mask 0xffffff. */
961 return NULL;
962 }
963 EXPORT_SYMBOL(alpha_gendev_to_pci);
964
965 int
966 dma_set_mask(struct device *dev, u64 mask)
967 {
968 if (!dev->dma_mask ||
969 !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
970 return -EIO;
971
972 *dev->dma_mask = mask;
973
974 return 0;
975 }
976 EXPORT_SYMBOL(dma_set_mask);
This page took 0.07731 seconds and 5 git commands to generate.