Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/alpha/kernel/pci_iommu.c | |
3 | */ | |
4 | ||
5 | #include <linux/kernel.h> | |
6 | #include <linux/mm.h> | |
7 | #include <linux/pci.h> | |
8 | #include <linux/slab.h> | |
9 | #include <linux/bootmem.h> | |
944bda26 | 10 | #include <linux/scatterlist.h> |
74fd1b68 | 11 | #include <linux/log2.h> |
7c53664d | 12 | #include <linux/dma-mapping.h> |
1da177e4 LT |
13 | |
14 | #include <asm/io.h> | |
15 | #include <asm/hwrpb.h> | |
16 | ||
17 | #include "proto.h" | |
18 | #include "pci_impl.h" | |
19 | ||
20 | ||
21 | #define DEBUG_ALLOC 0 | |
22 | #if DEBUG_ALLOC > 0 | |
23 | # define DBGA(args...) printk(KERN_DEBUG args) | |
24 | #else | |
25 | # define DBGA(args...) | |
26 | #endif | |
27 | #if DEBUG_ALLOC > 1 | |
28 | # define DBGA2(args...) printk(KERN_DEBUG args) | |
29 | #else | |
30 | # define DBGA2(args...) | |
31 | #endif | |
32 | ||
33 | #define DEBUG_NODIRECT 0 | |
34 | #define DEBUG_FORCEDAC 0 | |
35 | ||
36 | #define ISA_DMA_MASK 0x00ffffff | |
37 | ||
38 | static inline unsigned long | |
39 | mk_iommu_pte(unsigned long paddr) | |
40 | { | |
41 | return (paddr >> (PAGE_SHIFT-1)) | 1; | |
42 | } | |
43 | ||
44 | static inline long | |
45 | calc_npages(long bytes) | |
46 | { | |
47 | return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
48 | } | |
49 | \f | |
50 | ||
51 | /* Return the minimum of MAX or the first power of two larger | |
52 | than main memory. */ | |
53 | ||
54 | unsigned long | |
55 | size_for_memory(unsigned long max) | |
56 | { | |
57 | unsigned long mem = max_low_pfn << PAGE_SHIFT; | |
58 | if (mem < max) | |
74fd1b68 | 59 | max = roundup_pow_of_two(mem); |
1da177e4 LT |
60 | return max; |
61 | } | |
62 | \f | |
ed5f6561 | 63 | struct pci_iommu_arena * __init |
1da177e4 LT |
64 | iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base, |
65 | unsigned long window_size, unsigned long align) | |
66 | { | |
67 | unsigned long mem_size; | |
68 | struct pci_iommu_arena *arena; | |
69 | ||
70 | mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long)); | |
71 | ||
72 | /* Note that the TLB lookup logic uses bitwise concatenation, | |
73 | not addition, so the required arena alignment is based on | |
74 | the size of the window. Retain the align parameter so that | |
75 | particular systems can over-align the arena. */ | |
76 | if (align < mem_size) | |
77 | align = mem_size; | |
78 | ||
79 | ||
80 | #ifdef CONFIG_DISCONTIGMEM | |
81 | ||
82 | if (!NODE_DATA(nid) || | |
83 | (NULL == (arena = alloc_bootmem_node(NODE_DATA(nid), | |
84 | sizeof(*arena))))) { | |
85 | printk("%s: couldn't allocate arena from node %d\n" | |
86 | " falling back to system-wide allocation\n", | |
87 | __FUNCTION__, nid); | |
88 | arena = alloc_bootmem(sizeof(*arena)); | |
89 | } | |
90 | ||
91 | if (!NODE_DATA(nid) || | |
92 | (NULL == (arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), | |
93 | mem_size, | |
94 | align, | |
95 | 0)))) { | |
96 | printk("%s: couldn't allocate arena ptes from node %d\n" | |
97 | " falling back to system-wide allocation\n", | |
98 | __FUNCTION__, nid); | |
99 | arena->ptes = __alloc_bootmem(mem_size, align, 0); | |
100 | } | |
101 | ||
102 | #else /* CONFIG_DISCONTIGMEM */ | |
103 | ||
104 | arena = alloc_bootmem(sizeof(*arena)); | |
105 | arena->ptes = __alloc_bootmem(mem_size, align, 0); | |
106 | ||
107 | #endif /* CONFIG_DISCONTIGMEM */ | |
108 | ||
109 | spin_lock_init(&arena->lock); | |
110 | arena->hose = hose; | |
111 | arena->dma_base = base; | |
112 | arena->size = window_size; | |
113 | arena->next_entry = 0; | |
114 | ||
115 | /* Align allocations to a multiple of a page size. Not needed | |
116 | unless there are chip bugs. */ | |
117 | arena->align_entry = 1; | |
118 | ||
119 | return arena; | |
120 | } | |
121 | ||
ed5f6561 | 122 | struct pci_iommu_arena * __init |
1da177e4 LT |
123 | iommu_arena_new(struct pci_controller *hose, dma_addr_t base, |
124 | unsigned long window_size, unsigned long align) | |
125 | { | |
126 | return iommu_arena_new_node(0, hose, base, window_size, align); | |
127 | } | |
128 | ||
129 | /* Must be called with the arena lock held */ | |
130 | static long | |
131 | iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask) | |
132 | { | |
133 | unsigned long *ptes; | |
134 | long i, p, nent; | |
23d7e039 | 135 | int pass = 0; |
1da177e4 LT |
136 | |
137 | /* Search forward for the first mask-aligned sequence of N free ptes */ | |
138 | ptes = arena->ptes; | |
139 | nent = arena->size >> PAGE_SHIFT; | |
3c5f1def | 140 | p = ALIGN(arena->next_entry, mask + 1); |
1da177e4 | 141 | i = 0; |
23d7e039 FT |
142 | |
143 | again: | |
1da177e4 LT |
144 | while (i < n && p+i < nent) { |
145 | if (ptes[p+i]) | |
3c5f1def | 146 | p = ALIGN(p + i + 1, mask + 1), i = 0; |
1da177e4 LT |
147 | else |
148 | i = i + 1; | |
149 | } | |
150 | ||
151 | if (i < n) { | |
23d7e039 FT |
152 | if (pass < 1) { |
153 | /* | |
154 | * Reached the end. Flush the TLB and restart | |
155 | * the search from the beginning. | |
156 | */ | |
157 | alpha_mv.mv_pci_tbi(arena->hose, 0, -1); | |
158 | ||
159 | pass++; | |
160 | p = 0; | |
161 | i = 0; | |
162 | goto again; | |
163 | } else | |
1da177e4 LT |
164 | return -1; |
165 | } | |
166 | ||
167 | /* Success. It's the responsibility of the caller to mark them | |
168 | in use before releasing the lock */ | |
169 | return p; | |
170 | } | |
171 | ||
172 | static long | |
173 | iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align) | |
174 | { | |
175 | unsigned long flags; | |
176 | unsigned long *ptes; | |
177 | long i, p, mask; | |
178 | ||
179 | spin_lock_irqsave(&arena->lock, flags); | |
180 | ||
181 | /* Search for N empty ptes */ | |
182 | ptes = arena->ptes; | |
183 | mask = max(align, arena->align_entry) - 1; | |
184 | p = iommu_arena_find_pages(arena, n, mask); | |
185 | if (p < 0) { | |
186 | spin_unlock_irqrestore(&arena->lock, flags); | |
187 | return -1; | |
188 | } | |
189 | ||
190 | /* Success. Mark them all in use, ie not zero and invalid | |
191 | for the iommu tlb that could load them from under us. | |
192 | The chip specific bits will fill this in with something | |
193 | kosher when we return. */ | |
194 | for (i = 0; i < n; ++i) | |
195 | ptes[p+i] = IOMMU_INVALID_PTE; | |
196 | ||
197 | arena->next_entry = p + n; | |
198 | spin_unlock_irqrestore(&arena->lock, flags); | |
199 | ||
200 | return p; | |
201 | } | |
202 | ||
203 | static void | |
204 | iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n) | |
205 | { | |
206 | unsigned long *p; | |
207 | long i; | |
208 | ||
209 | p = arena->ptes + ofs; | |
210 | for (i = 0; i < n; ++i) | |
211 | p[i] = 0; | |
212 | } | |
213 | \f | |
caa51716 JB |
214 | /* True if the machine supports DAC addressing, and DEV can |
215 | make use of it given MASK. */ | |
216 | static int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask); | |
217 | ||
1da177e4 LT |
218 | /* Map a single buffer of the indicated size for PCI DMA in streaming |
219 | mode. The 32-bit PCI bus mastering address to use is returned. | |
220 | Once the device is given the dma address, the device owns this memory | |
221 | until either pci_unmap_single or pci_dma_sync_single is performed. */ | |
222 | ||
223 | static dma_addr_t | |
224 | pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, | |
225 | int dac_allowed) | |
226 | { | |
227 | struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; | |
228 | dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; | |
229 | struct pci_iommu_arena *arena; | |
230 | long npages, dma_ofs, i; | |
231 | unsigned long paddr; | |
232 | dma_addr_t ret; | |
233 | unsigned int align = 0; | |
234 | ||
235 | paddr = __pa(cpu_addr); | |
236 | ||
237 | #if !DEBUG_NODIRECT | |
238 | /* First check to see if we can use the direct map window. */ | |
239 | if (paddr + size + __direct_map_base - 1 <= max_dma | |
240 | && paddr + size <= __direct_map_size) { | |
241 | ret = paddr + __direct_map_base; | |
242 | ||
243 | DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n", | |
244 | cpu_addr, size, ret, __builtin_return_address(0)); | |
245 | ||
246 | return ret; | |
247 | } | |
248 | #endif | |
249 | ||
250 | /* Next, use DAC if selected earlier. */ | |
251 | if (dac_allowed) { | |
252 | ret = paddr + alpha_mv.pci_dac_offset; | |
253 | ||
254 | DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n", | |
255 | cpu_addr, size, ret, __builtin_return_address(0)); | |
256 | ||
257 | return ret; | |
258 | } | |
259 | ||
260 | /* If the machine doesn't define a pci_tbi routine, we have to | |
261 | assume it doesn't support sg mapping, and, since we tried to | |
262 | use direct_map above, it now must be considered an error. */ | |
263 | if (! alpha_mv.mv_pci_tbi) { | |
264 | static int been_here = 0; /* Only print the message once. */ | |
265 | if (!been_here) { | |
266 | printk(KERN_WARNING "pci_map_single: no HW sg\n"); | |
267 | been_here = 1; | |
268 | } | |
269 | return 0; | |
270 | } | |
271 | ||
272 | arena = hose->sg_pci; | |
273 | if (!arena || arena->dma_base + arena->size - 1 > max_dma) | |
274 | arena = hose->sg_isa; | |
275 | ||
276 | npages = calc_npages((paddr & ~PAGE_MASK) + size); | |
277 | ||
278 | /* Force allocation to 64KB boundary for ISA bridges. */ | |
279 | if (pdev && pdev == isa_bridge) | |
280 | align = 8; | |
281 | dma_ofs = iommu_arena_alloc(arena, npages, align); | |
282 | if (dma_ofs < 0) { | |
283 | printk(KERN_WARNING "pci_map_single failed: " | |
284 | "could not allocate dma page tables\n"); | |
285 | return 0; | |
286 | } | |
287 | ||
288 | paddr &= PAGE_MASK; | |
289 | for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) | |
290 | arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr); | |
291 | ||
292 | ret = arena->dma_base + dma_ofs * PAGE_SIZE; | |
293 | ret += (unsigned long)cpu_addr & ~PAGE_MASK; | |
294 | ||
295 | DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n", | |
296 | cpu_addr, size, npages, ret, __builtin_return_address(0)); | |
297 | ||
298 | return ret; | |
299 | } | |
300 | ||
301 | dma_addr_t | |
302 | pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir) | |
303 | { | |
304 | int dac_allowed; | |
305 | ||
306 | if (dir == PCI_DMA_NONE) | |
307 | BUG(); | |
308 | ||
309 | dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; | |
310 | return pci_map_single_1(pdev, cpu_addr, size, dac_allowed); | |
311 | } | |
cff52daf | 312 | EXPORT_SYMBOL(pci_map_single); |
1da177e4 LT |
313 | |
314 | dma_addr_t | |
315 | pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset, | |
316 | size_t size, int dir) | |
317 | { | |
318 | int dac_allowed; | |
319 | ||
320 | if (dir == PCI_DMA_NONE) | |
321 | BUG(); | |
322 | ||
323 | dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; | |
324 | return pci_map_single_1(pdev, (char *)page_address(page) + offset, | |
325 | size, dac_allowed); | |
326 | } | |
cff52daf | 327 | EXPORT_SYMBOL(pci_map_page); |
1da177e4 LT |
328 | |
329 | /* Unmap a single streaming mode DMA translation. The DMA_ADDR and | |
330 | SIZE must match what was provided for in a previous pci_map_single | |
331 | call. All other usages are undefined. After this call, reads by | |
332 | the cpu to the buffer are guaranteed to see whatever the device | |
333 | wrote there. */ | |
334 | ||
335 | void | |
336 | pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size, | |
337 | int direction) | |
338 | { | |
339 | unsigned long flags; | |
340 | struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; | |
341 | struct pci_iommu_arena *arena; | |
342 | long dma_ofs, npages; | |
343 | ||
344 | if (direction == PCI_DMA_NONE) | |
345 | BUG(); | |
346 | ||
347 | if (dma_addr >= __direct_map_base | |
348 | && dma_addr < __direct_map_base + __direct_map_size) { | |
349 | /* Nothing to do. */ | |
350 | ||
351 | DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n", | |
352 | dma_addr, size, __builtin_return_address(0)); | |
353 | ||
354 | return; | |
355 | } | |
356 | ||
357 | if (dma_addr > 0xffffffff) { | |
358 | DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n", | |
359 | dma_addr, size, __builtin_return_address(0)); | |
360 | return; | |
361 | } | |
362 | ||
363 | arena = hose->sg_pci; | |
364 | if (!arena || dma_addr < arena->dma_base) | |
365 | arena = hose->sg_isa; | |
366 | ||
367 | dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT; | |
368 | if (dma_ofs * PAGE_SIZE >= arena->size) { | |
369 | printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx " | |
370 | " base %lx size %x\n", dma_addr, arena->dma_base, | |
371 | arena->size); | |
372 | return; | |
373 | BUG(); | |
374 | } | |
375 | ||
376 | npages = calc_npages((dma_addr & ~PAGE_MASK) + size); | |
377 | ||
378 | spin_lock_irqsave(&arena->lock, flags); | |
379 | ||
380 | iommu_arena_free(arena, dma_ofs, npages); | |
381 | ||
382 | /* If we're freeing ptes above the `next_entry' pointer (they | |
383 | may have snuck back into the TLB since the last wrap flush), | |
384 | we need to flush the TLB before reallocating the latter. */ | |
385 | if (dma_ofs >= arena->next_entry) | |
386 | alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1); | |
387 | ||
388 | spin_unlock_irqrestore(&arena->lock, flags); | |
389 | ||
390 | DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n", | |
391 | dma_addr, size, npages, __builtin_return_address(0)); | |
392 | } | |
cff52daf | 393 | EXPORT_SYMBOL(pci_unmap_single); |
1da177e4 LT |
394 | |
395 | void | |
396 | pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr, | |
397 | size_t size, int direction) | |
398 | { | |
399 | pci_unmap_single(pdev, dma_addr, size, direction); | |
400 | } | |
cff52daf | 401 | EXPORT_SYMBOL(pci_unmap_page); |
1da177e4 LT |
402 | |
403 | /* Allocate and map kernel buffer using consistent mode DMA for PCI | |
404 | device. Returns non-NULL cpu-view pointer to the buffer if | |
405 | successful and sets *DMA_ADDRP to the pci side dma address as well, | |
406 | else DMA_ADDRP is undefined. */ | |
407 | ||
408 | void * | |
409 | pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) | |
410 | { | |
411 | void *cpu_addr; | |
412 | long order = get_order(size); | |
53f9fc93 | 413 | gfp_t gfp = GFP_ATOMIC; |
1da177e4 LT |
414 | |
415 | try_again: | |
416 | cpu_addr = (void *)__get_free_pages(gfp, order); | |
417 | if (! cpu_addr) { | |
418 | printk(KERN_INFO "pci_alloc_consistent: " | |
419 | "get_free_pages failed from %p\n", | |
420 | __builtin_return_address(0)); | |
421 | /* ??? Really atomic allocation? Otherwise we could play | |
422 | with vmalloc and sg if we can't find contiguous memory. */ | |
423 | return NULL; | |
424 | } | |
425 | memset(cpu_addr, 0, size); | |
426 | ||
427 | *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0); | |
428 | if (*dma_addrp == 0) { | |
429 | free_pages((unsigned long)cpu_addr, order); | |
430 | if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA)) | |
431 | return NULL; | |
432 | /* The address doesn't fit required mask and we | |
433 | do not have iommu. Try again with GFP_DMA. */ | |
434 | gfp |= GFP_DMA; | |
435 | goto try_again; | |
436 | } | |
437 | ||
438 | DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n", | |
439 | size, cpu_addr, *dma_addrp, __builtin_return_address(0)); | |
440 | ||
441 | return cpu_addr; | |
442 | } | |
cff52daf | 443 | EXPORT_SYMBOL(pci_alloc_consistent); |
1da177e4 LT |
444 | |
445 | /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must | |
446 | be values that were returned from pci_alloc_consistent. SIZE must | |
447 | be the same as what as passed into pci_alloc_consistent. | |
448 | References to the memory and mappings associated with CPU_ADDR or | |
449 | DMA_ADDR past this call are illegal. */ | |
450 | ||
451 | void | |
452 | pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr, | |
453 | dma_addr_t dma_addr) | |
454 | { | |
455 | pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); | |
456 | free_pages((unsigned long)cpu_addr, get_order(size)); | |
457 | ||
458 | DBGA2("pci_free_consistent: [%x,%lx] from %p\n", | |
459 | dma_addr, size, __builtin_return_address(0)); | |
460 | } | |
cff52daf | 461 | EXPORT_SYMBOL(pci_free_consistent); |
1da177e4 LT |
462 | |
463 | /* Classify the elements of the scatterlist. Write dma_address | |
464 | of each element with: | |
465 | 0 : Followers all physically adjacent. | |
466 | 1 : Followers all virtually adjacent. | |
467 | -1 : Not leader, physically adjacent to previous. | |
468 | -2 : Not leader, virtually adjacent to previous. | |
469 | Write dma_length of each leader with the combined lengths of | |
470 | the mergable followers. */ | |
471 | ||
58b053e4 | 472 | #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG))) |
1da177e4 LT |
473 | #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG)) |
474 | ||
475 | static void | |
7c53664d FT |
476 | sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end, |
477 | int virt_ok) | |
1da177e4 LT |
478 | { |
479 | unsigned long next_paddr; | |
480 | struct scatterlist *leader; | |
481 | long leader_flag, leader_length; | |
7c53664d | 482 | unsigned int max_seg_size; |
1da177e4 LT |
483 | |
484 | leader = sg; | |
485 | leader_flag = 0; | |
486 | leader_length = leader->length; | |
487 | next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length; | |
488 | ||
7c53664d FT |
489 | /* we will not marge sg without device. */ |
490 | max_seg_size = dev ? dma_get_max_seg_size(dev) : 0; | |
1da177e4 LT |
491 | for (++sg; sg < end; ++sg) { |
492 | unsigned long addr, len; | |
493 | addr = SG_ENT_PHYS_ADDRESS(sg); | |
494 | len = sg->length; | |
495 | ||
7c53664d FT |
496 | if (leader_length + len > max_seg_size) |
497 | goto new_segment; | |
498 | ||
1da177e4 LT |
499 | if (next_paddr == addr) { |
500 | sg->dma_address = -1; | |
501 | leader_length += len; | |
502 | } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) { | |
503 | sg->dma_address = -2; | |
504 | leader_flag = 1; | |
505 | leader_length += len; | |
506 | } else { | |
7c53664d | 507 | new_segment: |
1da177e4 LT |
508 | leader->dma_address = leader_flag; |
509 | leader->dma_length = leader_length; | |
510 | leader = sg; | |
511 | leader_flag = 0; | |
512 | leader_length = len; | |
513 | } | |
514 | ||
515 | next_paddr = addr + len; | |
516 | } | |
517 | ||
518 | leader->dma_address = leader_flag; | |
519 | leader->dma_length = leader_length; | |
520 | } | |
521 | ||
522 | /* Given a scatterlist leader, choose an allocation method and fill | |
523 | in the blanks. */ | |
524 | ||
525 | static int | |
7c53664d | 526 | sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, |
1da177e4 LT |
527 | struct scatterlist *out, struct pci_iommu_arena *arena, |
528 | dma_addr_t max_dma, int dac_allowed) | |
529 | { | |
530 | unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader); | |
531 | long size = leader->dma_length; | |
532 | struct scatterlist *sg; | |
533 | unsigned long *ptes; | |
534 | long npages, dma_ofs, i; | |
535 | ||
536 | #if !DEBUG_NODIRECT | |
537 | /* If everything is physically contiguous, and the addresses | |
538 | fall into the direct-map window, use it. */ | |
539 | if (leader->dma_address == 0 | |
540 | && paddr + size + __direct_map_base - 1 <= max_dma | |
541 | && paddr + size <= __direct_map_size) { | |
542 | out->dma_address = paddr + __direct_map_base; | |
543 | out->dma_length = size; | |
544 | ||
545 | DBGA(" sg_fill: [%p,%lx] -> direct %lx\n", | |
546 | __va(paddr), size, out->dma_address); | |
547 | ||
548 | return 0; | |
549 | } | |
550 | #endif | |
551 | ||
552 | /* If physically contiguous and DAC is available, use it. */ | |
553 | if (leader->dma_address == 0 && dac_allowed) { | |
554 | out->dma_address = paddr + alpha_mv.pci_dac_offset; | |
555 | out->dma_length = size; | |
556 | ||
557 | DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n", | |
558 | __va(paddr), size, out->dma_address); | |
559 | ||
560 | return 0; | |
561 | } | |
562 | ||
563 | /* Otherwise, we'll use the iommu to make the pages virtually | |
564 | contiguous. */ | |
565 | ||
566 | paddr &= ~PAGE_MASK; | |
567 | npages = calc_npages(paddr + size); | |
568 | dma_ofs = iommu_arena_alloc(arena, npages, 0); | |
569 | if (dma_ofs < 0) { | |
570 | /* If we attempted a direct map above but failed, die. */ | |
571 | if (leader->dma_address == 0) | |
572 | return -1; | |
573 | ||
574 | /* Otherwise, break up the remaining virtually contiguous | |
575 | hunks into individual direct maps and retry. */ | |
7c53664d FT |
576 | sg_classify(dev, leader, end, 0); |
577 | return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed); | |
1da177e4 LT |
578 | } |
579 | ||
580 | out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr; | |
581 | out->dma_length = size; | |
582 | ||
583 | DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n", | |
584 | __va(paddr), size, out->dma_address, npages); | |
585 | ||
586 | /* All virtually contiguous. We need to find the length of each | |
587 | physically contiguous subsegment to fill in the ptes. */ | |
588 | ptes = &arena->ptes[dma_ofs]; | |
589 | sg = leader; | |
590 | do { | |
591 | #if DEBUG_ALLOC > 0 | |
592 | struct scatterlist *last_sg = sg; | |
593 | #endif | |
594 | ||
595 | size = sg->length; | |
596 | paddr = SG_ENT_PHYS_ADDRESS(sg); | |
597 | ||
598 | while (sg+1 < end && (int) sg[1].dma_address == -1) { | |
599 | size += sg[1].length; | |
600 | sg++; | |
601 | } | |
602 | ||
603 | npages = calc_npages((paddr & ~PAGE_MASK) + size); | |
604 | ||
605 | paddr &= PAGE_MASK; | |
606 | for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) | |
607 | *ptes++ = mk_iommu_pte(paddr); | |
608 | ||
609 | #if DEBUG_ALLOC > 0 | |
610 | DBGA(" (%ld) [%p,%x] np %ld\n", | |
611 | last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg), | |
612 | last_sg->length, npages); | |
613 | while (++last_sg <= sg) { | |
614 | DBGA(" (%ld) [%p,%x] cont\n", | |
615 | last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg), | |
616 | last_sg->length); | |
617 | } | |
618 | #endif | |
619 | } while (++sg < end && (int) sg->dma_address < 0); | |
620 | ||
621 | return 1; | |
622 | } | |
623 | ||
624 | int | |
625 | pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, | |
626 | int direction) | |
627 | { | |
628 | struct scatterlist *start, *end, *out; | |
629 | struct pci_controller *hose; | |
630 | struct pci_iommu_arena *arena; | |
631 | dma_addr_t max_dma; | |
632 | int dac_allowed; | |
7c53664d | 633 | struct device *dev; |
1da177e4 LT |
634 | |
635 | if (direction == PCI_DMA_NONE) | |
636 | BUG(); | |
637 | ||
638 | dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; | |
639 | ||
7c53664d FT |
640 | dev = pdev ? &pdev->dev : NULL; |
641 | ||
1da177e4 LT |
642 | /* Fast path single entry scatterlists. */ |
643 | if (nents == 1) { | |
644 | sg->dma_length = sg->length; | |
645 | sg->dma_address | |
646 | = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg), | |
647 | sg->length, dac_allowed); | |
648 | return sg->dma_address != 0; | |
649 | } | |
650 | ||
651 | start = sg; | |
652 | end = sg + nents; | |
653 | ||
654 | /* First, prepare information about the entries. */ | |
7c53664d | 655 | sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0); |
1da177e4 LT |
656 | |
657 | /* Second, figure out where we're going to map things. */ | |
658 | if (alpha_mv.mv_pci_tbi) { | |
659 | hose = pdev ? pdev->sysdata : pci_isa_hose; | |
660 | max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; | |
661 | arena = hose->sg_pci; | |
662 | if (!arena || arena->dma_base + arena->size - 1 > max_dma) | |
663 | arena = hose->sg_isa; | |
664 | } else { | |
665 | max_dma = -1; | |
666 | arena = NULL; | |
667 | hose = NULL; | |
668 | } | |
669 | ||
670 | /* Third, iterate over the scatterlist leaders and allocate | |
671 | dma space as needed. */ | |
672 | for (out = sg; sg < end; ++sg) { | |
673 | if ((int) sg->dma_address < 0) | |
674 | continue; | |
7c53664d | 675 | if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0) |
1da177e4 LT |
676 | goto error; |
677 | out++; | |
678 | } | |
679 | ||
680 | /* Mark the end of the list for pci_unmap_sg. */ | |
681 | if (out < end) | |
682 | out->dma_length = 0; | |
683 | ||
684 | if (out - start == 0) | |
685 | printk(KERN_WARNING "pci_map_sg failed: no entries?\n"); | |
686 | DBGA("pci_map_sg: %ld entries\n", out - start); | |
687 | ||
688 | return out - start; | |
689 | ||
690 | error: | |
691 | printk(KERN_WARNING "pci_map_sg failed: " | |
692 | "could not allocate dma page tables\n"); | |
693 | ||
694 | /* Some allocation failed while mapping the scatterlist | |
695 | entries. Unmap them now. */ | |
696 | if (out > start) | |
697 | pci_unmap_sg(pdev, start, out - start, direction); | |
698 | return 0; | |
699 | } | |
cff52daf | 700 | EXPORT_SYMBOL(pci_map_sg); |
1da177e4 LT |
701 | |
702 | /* Unmap a set of streaming mode DMA translations. Again, cpu read | |
703 | rules concerning calls here are the same as for pci_unmap_single() | |
704 | above. */ | |
705 | ||
706 | void | |
707 | pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, | |
708 | int direction) | |
709 | { | |
710 | unsigned long flags; | |
711 | struct pci_controller *hose; | |
712 | struct pci_iommu_arena *arena; | |
713 | struct scatterlist *end; | |
714 | dma_addr_t max_dma; | |
715 | dma_addr_t fbeg, fend; | |
716 | ||
717 | if (direction == PCI_DMA_NONE) | |
718 | BUG(); | |
719 | ||
720 | if (! alpha_mv.mv_pci_tbi) | |
721 | return; | |
722 | ||
723 | hose = pdev ? pdev->sysdata : pci_isa_hose; | |
724 | max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; | |
725 | arena = hose->sg_pci; | |
726 | if (!arena || arena->dma_base + arena->size - 1 > max_dma) | |
727 | arena = hose->sg_isa; | |
728 | ||
729 | fbeg = -1, fend = 0; | |
730 | ||
731 | spin_lock_irqsave(&arena->lock, flags); | |
732 | ||
733 | for (end = sg + nents; sg < end; ++sg) { | |
734 | dma64_addr_t addr; | |
735 | size_t size; | |
736 | long npages, ofs; | |
737 | dma_addr_t tend; | |
738 | ||
739 | addr = sg->dma_address; | |
740 | size = sg->dma_length; | |
741 | if (!size) | |
742 | break; | |
743 | ||
744 | if (addr > 0xffffffff) { | |
745 | /* It's a DAC address -- nothing to do. */ | |
746 | DBGA(" (%ld) DAC [%lx,%lx]\n", | |
747 | sg - end + nents, addr, size); | |
748 | continue; | |
749 | } | |
750 | ||
751 | if (addr >= __direct_map_base | |
752 | && addr < __direct_map_base + __direct_map_size) { | |
753 | /* Nothing to do. */ | |
754 | DBGA(" (%ld) direct [%lx,%lx]\n", | |
755 | sg - end + nents, addr, size); | |
756 | continue; | |
757 | } | |
758 | ||
759 | DBGA(" (%ld) sg [%lx,%lx]\n", | |
760 | sg - end + nents, addr, size); | |
761 | ||
762 | npages = calc_npages((addr & ~PAGE_MASK) + size); | |
763 | ofs = (addr - arena->dma_base) >> PAGE_SHIFT; | |
764 | iommu_arena_free(arena, ofs, npages); | |
765 | ||
766 | tend = addr + size - 1; | |
767 | if (fbeg > addr) fbeg = addr; | |
768 | if (fend < tend) fend = tend; | |
769 | } | |
770 | ||
771 | /* If we're freeing ptes above the `next_entry' pointer (they | |
772 | may have snuck back into the TLB since the last wrap flush), | |
773 | we need to flush the TLB before reallocating the latter. */ | |
774 | if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry) | |
775 | alpha_mv.mv_pci_tbi(hose, fbeg, fend); | |
776 | ||
777 | spin_unlock_irqrestore(&arena->lock, flags); | |
778 | ||
779 | DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg)); | |
780 | } | |
cff52daf | 781 | EXPORT_SYMBOL(pci_unmap_sg); |
1da177e4 LT |
782 | |
783 | ||
784 | /* Return whether the given PCI device DMA address mask can be | |
785 | supported properly. */ | |
786 | ||
787 | int | |
788 | pci_dma_supported(struct pci_dev *pdev, u64 mask) | |
789 | { | |
790 | struct pci_controller *hose; | |
791 | struct pci_iommu_arena *arena; | |
792 | ||
793 | /* If there exists a direct map, and the mask fits either | |
794 | the entire direct mapped space or the total system memory as | |
795 | shifted by the map base */ | |
796 | if (__direct_map_size != 0 | |
797 | && (__direct_map_base + __direct_map_size - 1 <= mask || | |
798 | __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask)) | |
799 | return 1; | |
800 | ||
801 | /* Check that we have a scatter-gather arena that fits. */ | |
802 | hose = pdev ? pdev->sysdata : pci_isa_hose; | |
803 | arena = hose->sg_isa; | |
804 | if (arena && arena->dma_base + arena->size - 1 <= mask) | |
805 | return 1; | |
806 | arena = hose->sg_pci; | |
807 | if (arena && arena->dma_base + arena->size - 1 <= mask) | |
808 | return 1; | |
809 | ||
810 | /* As last resort try ZONE_DMA. */ | |
811 | if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask) | |
812 | return 1; | |
813 | ||
814 | return 0; | |
815 | } | |
cff52daf | 816 | EXPORT_SYMBOL(pci_dma_supported); |
1da177e4 LT |
817 | |
818 | \f | |
819 | /* | |
820 | * AGP GART extensions to the IOMMU | |
821 | */ | |
822 | int | |
823 | iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask) | |
824 | { | |
825 | unsigned long flags; | |
826 | unsigned long *ptes; | |
827 | long i, p; | |
828 | ||
829 | if (!arena) return -EINVAL; | |
830 | ||
831 | spin_lock_irqsave(&arena->lock, flags); | |
832 | ||
833 | /* Search for N empty ptes. */ | |
834 | ptes = arena->ptes; | |
835 | p = iommu_arena_find_pages(arena, pg_count, align_mask); | |
836 | if (p < 0) { | |
837 | spin_unlock_irqrestore(&arena->lock, flags); | |
838 | return -1; | |
839 | } | |
840 | ||
841 | /* Success. Mark them all reserved (ie not zero and invalid) | |
842 | for the iommu tlb that could load them from under us. | |
843 | They will be filled in with valid bits by _bind() */ | |
844 | for (i = 0; i < pg_count; ++i) | |
845 | ptes[p+i] = IOMMU_RESERVED_PTE; | |
846 | ||
847 | arena->next_entry = p + pg_count; | |
848 | spin_unlock_irqrestore(&arena->lock, flags); | |
849 | ||
850 | return p; | |
851 | } | |
852 | ||
853 | int | |
854 | iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count) | |
855 | { | |
856 | unsigned long *ptes; | |
857 | long i; | |
858 | ||
859 | if (!arena) return -EINVAL; | |
860 | ||
861 | ptes = arena->ptes; | |
862 | ||
863 | /* Make sure they're all reserved first... */ | |
864 | for(i = pg_start; i < pg_start + pg_count; i++) | |
865 | if (ptes[i] != IOMMU_RESERVED_PTE) | |
866 | return -EBUSY; | |
867 | ||
868 | iommu_arena_free(arena, pg_start, pg_count); | |
869 | return 0; | |
870 | } | |
871 | ||
872 | int | |
873 | iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count, | |
874 | unsigned long *physaddrs) | |
875 | { | |
876 | unsigned long flags; | |
877 | unsigned long *ptes; | |
878 | long i, j; | |
879 | ||
880 | if (!arena) return -EINVAL; | |
881 | ||
882 | spin_lock_irqsave(&arena->lock, flags); | |
883 | ||
884 | ptes = arena->ptes; | |
885 | ||
886 | for(j = pg_start; j < pg_start + pg_count; j++) { | |
887 | if (ptes[j] != IOMMU_RESERVED_PTE) { | |
888 | spin_unlock_irqrestore(&arena->lock, flags); | |
889 | return -EBUSY; | |
890 | } | |
891 | } | |
892 | ||
893 | for(i = 0, j = pg_start; i < pg_count; i++, j++) | |
894 | ptes[j] = mk_iommu_pte(physaddrs[i]); | |
895 | ||
896 | spin_unlock_irqrestore(&arena->lock, flags); | |
897 | ||
898 | return 0; | |
899 | } | |
900 | ||
901 | int | |
902 | iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count) | |
903 | { | |
904 | unsigned long *p; | |
905 | long i; | |
906 | ||
907 | if (!arena) return -EINVAL; | |
908 | ||
909 | p = arena->ptes + pg_start; | |
910 | for(i = 0; i < pg_count; i++) | |
911 | p[i] = IOMMU_RESERVED_PTE; | |
912 | ||
913 | return 0; | |
914 | } | |
915 | ||
916 | /* True if the machine supports DAC addressing, and DEV can | |
917 | make use of it given MASK. */ | |
918 | ||
caa51716 | 919 | static int |
1da177e4 LT |
920 | pci_dac_dma_supported(struct pci_dev *dev, u64 mask) |
921 | { | |
922 | dma64_addr_t dac_offset = alpha_mv.pci_dac_offset; | |
923 | int ok = 1; | |
924 | ||
925 | /* If this is not set, the machine doesn't support DAC at all. */ | |
926 | if (dac_offset == 0) | |
927 | ok = 0; | |
928 | ||
929 | /* The device has to be able to address our DAC bit. */ | |
930 | if ((dac_offset & dev->dma_mask) != dac_offset) | |
931 | ok = 0; | |
932 | ||
933 | /* If both conditions above are met, we are fine. */ | |
934 | DBGA("pci_dac_dma_supported %s from %p\n", | |
935 | ok ? "yes" : "no", __builtin_return_address(0)); | |
936 | ||
937 | return ok; | |
938 | } | |
1da177e4 LT |
939 | |
940 | /* Helper for generic DMA-mapping functions. */ | |
941 | ||
942 | struct pci_dev * | |
943 | alpha_gendev_to_pci(struct device *dev) | |
944 | { | |
945 | if (dev && dev->bus == &pci_bus_type) | |
946 | return to_pci_dev(dev); | |
947 | ||
948 | /* Assume that non-PCI devices asking for DMA are either ISA or EISA, | |
949 | BUG() otherwise. */ | |
950 | BUG_ON(!isa_bridge); | |
951 | ||
952 | /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA | |
953 | bridge is bus master then). */ | |
954 | if (!dev || !dev->dma_mask || !*dev->dma_mask) | |
955 | return isa_bridge; | |
956 | ||
957 | /* For EISA bus masters, return isa_bridge (it might have smaller | |
958 | dma_mask due to wiring limitations). */ | |
959 | if (*dev->dma_mask >= isa_bridge->dma_mask) | |
960 | return isa_bridge; | |
961 | ||
962 | /* This assumes ISA bus master with dma_mask 0xffffff. */ | |
963 | return NULL; | |
964 | } | |
cff52daf | 965 | EXPORT_SYMBOL(alpha_gendev_to_pci); |
1da177e4 LT |
966 | |
967 | int | |
968 | dma_set_mask(struct device *dev, u64 mask) | |
969 | { | |
970 | if (!dev->dma_mask || | |
971 | !pci_dma_supported(alpha_gendev_to_pci(dev), mask)) | |
972 | return -EIO; | |
973 | ||
974 | *dev->dma_mask = mask; | |
975 | ||
976 | return 0; | |
977 | } | |
cff52daf | 978 | EXPORT_SYMBOL(dma_set_mask); |