Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/alpha/kernel/pci_iommu.c | |
3 | */ | |
4 | ||
5 | #include <linux/kernel.h> | |
6 | #include <linux/mm.h> | |
7 | #include <linux/pci.h> | |
8 | #include <linux/slab.h> | |
9 | #include <linux/bootmem.h> | |
944bda26 | 10 | #include <linux/scatterlist.h> |
74fd1b68 | 11 | #include <linux/log2.h> |
1da177e4 LT |
12 | |
13 | #include <asm/io.h> | |
14 | #include <asm/hwrpb.h> | |
15 | ||
16 | #include "proto.h" | |
17 | #include "pci_impl.h" | |
18 | ||
19 | ||
20 | #define DEBUG_ALLOC 0 | |
21 | #if DEBUG_ALLOC > 0 | |
22 | # define DBGA(args...) printk(KERN_DEBUG args) | |
23 | #else | |
24 | # define DBGA(args...) | |
25 | #endif | |
26 | #if DEBUG_ALLOC > 1 | |
27 | # define DBGA2(args...) printk(KERN_DEBUG args) | |
28 | #else | |
29 | # define DBGA2(args...) | |
30 | #endif | |
31 | ||
32 | #define DEBUG_NODIRECT 0 | |
33 | #define DEBUG_FORCEDAC 0 | |
34 | ||
35 | #define ISA_DMA_MASK 0x00ffffff | |
36 | ||
37 | static inline unsigned long | |
38 | mk_iommu_pte(unsigned long paddr) | |
39 | { | |
40 | return (paddr >> (PAGE_SHIFT-1)) | 1; | |
41 | } | |
42 | ||
43 | static inline long | |
44 | calc_npages(long bytes) | |
45 | { | |
46 | return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
47 | } | |
48 | \f | |
49 | ||
50 | /* Return the minimum of MAX or the first power of two larger | |
51 | than main memory. */ | |
52 | ||
53 | unsigned long | |
54 | size_for_memory(unsigned long max) | |
55 | { | |
56 | unsigned long mem = max_low_pfn << PAGE_SHIFT; | |
57 | if (mem < max) | |
74fd1b68 | 58 | max = roundup_pow_of_two(mem); |
1da177e4 LT |
59 | return max; |
60 | } | |
61 | \f | |
ed5f6561 | 62 | struct pci_iommu_arena * __init |
1da177e4 LT |
63 | iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base, |
64 | unsigned long window_size, unsigned long align) | |
65 | { | |
66 | unsigned long mem_size; | |
67 | struct pci_iommu_arena *arena; | |
68 | ||
69 | mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long)); | |
70 | ||
71 | /* Note that the TLB lookup logic uses bitwise concatenation, | |
72 | not addition, so the required arena alignment is based on | |
73 | the size of the window. Retain the align parameter so that | |
74 | particular systems can over-align the arena. */ | |
75 | if (align < mem_size) | |
76 | align = mem_size; | |
77 | ||
78 | ||
79 | #ifdef CONFIG_DISCONTIGMEM | |
80 | ||
81 | if (!NODE_DATA(nid) || | |
82 | (NULL == (arena = alloc_bootmem_node(NODE_DATA(nid), | |
83 | sizeof(*arena))))) { | |
84 | printk("%s: couldn't allocate arena from node %d\n" | |
85 | " falling back to system-wide allocation\n", | |
86 | __FUNCTION__, nid); | |
87 | arena = alloc_bootmem(sizeof(*arena)); | |
88 | } | |
89 | ||
90 | if (!NODE_DATA(nid) || | |
91 | (NULL == (arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), | |
92 | mem_size, | |
93 | align, | |
94 | 0)))) { | |
95 | printk("%s: couldn't allocate arena ptes from node %d\n" | |
96 | " falling back to system-wide allocation\n", | |
97 | __FUNCTION__, nid); | |
98 | arena->ptes = __alloc_bootmem(mem_size, align, 0); | |
99 | } | |
100 | ||
101 | #else /* CONFIG_DISCONTIGMEM */ | |
102 | ||
103 | arena = alloc_bootmem(sizeof(*arena)); | |
104 | arena->ptes = __alloc_bootmem(mem_size, align, 0); | |
105 | ||
106 | #endif /* CONFIG_DISCONTIGMEM */ | |
107 | ||
108 | spin_lock_init(&arena->lock); | |
109 | arena->hose = hose; | |
110 | arena->dma_base = base; | |
111 | arena->size = window_size; | |
112 | arena->next_entry = 0; | |
113 | ||
114 | /* Align allocations to a multiple of a page size. Not needed | |
115 | unless there are chip bugs. */ | |
116 | arena->align_entry = 1; | |
117 | ||
118 | return arena; | |
119 | } | |
120 | ||
ed5f6561 | 121 | struct pci_iommu_arena * __init |
1da177e4 LT |
122 | iommu_arena_new(struct pci_controller *hose, dma_addr_t base, |
123 | unsigned long window_size, unsigned long align) | |
124 | { | |
125 | return iommu_arena_new_node(0, hose, base, window_size, align); | |
126 | } | |
127 | ||
128 | /* Must be called with the arena lock held */ | |
129 | static long | |
130 | iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask) | |
131 | { | |
132 | unsigned long *ptes; | |
133 | long i, p, nent; | |
134 | ||
135 | /* Search forward for the first mask-aligned sequence of N free ptes */ | |
136 | ptes = arena->ptes; | |
137 | nent = arena->size >> PAGE_SHIFT; | |
138 | p = (arena->next_entry + mask) & ~mask; | |
139 | i = 0; | |
140 | while (i < n && p+i < nent) { | |
141 | if (ptes[p+i]) | |
142 | p = (p + i + 1 + mask) & ~mask, i = 0; | |
143 | else | |
144 | i = i + 1; | |
145 | } | |
146 | ||
147 | if (i < n) { | |
148 | /* Reached the end. Flush the TLB and restart the | |
149 | search from the beginning. */ | |
150 | alpha_mv.mv_pci_tbi(arena->hose, 0, -1); | |
151 | ||
152 | p = 0, i = 0; | |
153 | while (i < n && p+i < nent) { | |
154 | if (ptes[p+i]) | |
155 | p = (p + i + 1 + mask) & ~mask, i = 0; | |
156 | else | |
157 | i = i + 1; | |
158 | } | |
159 | ||
160 | if (i < n) | |
161 | return -1; | |
162 | } | |
163 | ||
164 | /* Success. It's the responsibility of the caller to mark them | |
165 | in use before releasing the lock */ | |
166 | return p; | |
167 | } | |
168 | ||
169 | static long | |
170 | iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align) | |
171 | { | |
172 | unsigned long flags; | |
173 | unsigned long *ptes; | |
174 | long i, p, mask; | |
175 | ||
176 | spin_lock_irqsave(&arena->lock, flags); | |
177 | ||
178 | /* Search for N empty ptes */ | |
179 | ptes = arena->ptes; | |
180 | mask = max(align, arena->align_entry) - 1; | |
181 | p = iommu_arena_find_pages(arena, n, mask); | |
182 | if (p < 0) { | |
183 | spin_unlock_irqrestore(&arena->lock, flags); | |
184 | return -1; | |
185 | } | |
186 | ||
187 | /* Success. Mark them all in use, ie not zero and invalid | |
188 | for the iommu tlb that could load them from under us. | |
189 | The chip specific bits will fill this in with something | |
190 | kosher when we return. */ | |
191 | for (i = 0; i < n; ++i) | |
192 | ptes[p+i] = IOMMU_INVALID_PTE; | |
193 | ||
194 | arena->next_entry = p + n; | |
195 | spin_unlock_irqrestore(&arena->lock, flags); | |
196 | ||
197 | return p; | |
198 | } | |
199 | ||
200 | static void | |
201 | iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n) | |
202 | { | |
203 | unsigned long *p; | |
204 | long i; | |
205 | ||
206 | p = arena->ptes + ofs; | |
207 | for (i = 0; i < n; ++i) | |
208 | p[i] = 0; | |
209 | } | |
210 | \f | |
caa51716 JB |
211 | /* True if the machine supports DAC addressing, and DEV can |
212 | make use of it given MASK. */ | |
213 | static int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask); | |
214 | ||
1da177e4 LT |
215 | /* Map a single buffer of the indicated size for PCI DMA in streaming |
216 | mode. The 32-bit PCI bus mastering address to use is returned. | |
217 | Once the device is given the dma address, the device owns this memory | |
218 | until either pci_unmap_single or pci_dma_sync_single is performed. */ | |
219 | ||
220 | static dma_addr_t | |
221 | pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, | |
222 | int dac_allowed) | |
223 | { | |
224 | struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; | |
225 | dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; | |
226 | struct pci_iommu_arena *arena; | |
227 | long npages, dma_ofs, i; | |
228 | unsigned long paddr; | |
229 | dma_addr_t ret; | |
230 | unsigned int align = 0; | |
231 | ||
232 | paddr = __pa(cpu_addr); | |
233 | ||
234 | #if !DEBUG_NODIRECT | |
235 | /* First check to see if we can use the direct map window. */ | |
236 | if (paddr + size + __direct_map_base - 1 <= max_dma | |
237 | && paddr + size <= __direct_map_size) { | |
238 | ret = paddr + __direct_map_base; | |
239 | ||
240 | DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n", | |
241 | cpu_addr, size, ret, __builtin_return_address(0)); | |
242 | ||
243 | return ret; | |
244 | } | |
245 | #endif | |
246 | ||
247 | /* Next, use DAC if selected earlier. */ | |
248 | if (dac_allowed) { | |
249 | ret = paddr + alpha_mv.pci_dac_offset; | |
250 | ||
251 | DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n", | |
252 | cpu_addr, size, ret, __builtin_return_address(0)); | |
253 | ||
254 | return ret; | |
255 | } | |
256 | ||
257 | /* If the machine doesn't define a pci_tbi routine, we have to | |
258 | assume it doesn't support sg mapping, and, since we tried to | |
259 | use direct_map above, it now must be considered an error. */ | |
260 | if (! alpha_mv.mv_pci_tbi) { | |
261 | static int been_here = 0; /* Only print the message once. */ | |
262 | if (!been_here) { | |
263 | printk(KERN_WARNING "pci_map_single: no HW sg\n"); | |
264 | been_here = 1; | |
265 | } | |
266 | return 0; | |
267 | } | |
268 | ||
269 | arena = hose->sg_pci; | |
270 | if (!arena || arena->dma_base + arena->size - 1 > max_dma) | |
271 | arena = hose->sg_isa; | |
272 | ||
273 | npages = calc_npages((paddr & ~PAGE_MASK) + size); | |
274 | ||
275 | /* Force allocation to 64KB boundary for ISA bridges. */ | |
276 | if (pdev && pdev == isa_bridge) | |
277 | align = 8; | |
278 | dma_ofs = iommu_arena_alloc(arena, npages, align); | |
279 | if (dma_ofs < 0) { | |
280 | printk(KERN_WARNING "pci_map_single failed: " | |
281 | "could not allocate dma page tables\n"); | |
282 | return 0; | |
283 | } | |
284 | ||
285 | paddr &= PAGE_MASK; | |
286 | for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) | |
287 | arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr); | |
288 | ||
289 | ret = arena->dma_base + dma_ofs * PAGE_SIZE; | |
290 | ret += (unsigned long)cpu_addr & ~PAGE_MASK; | |
291 | ||
292 | DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n", | |
293 | cpu_addr, size, npages, ret, __builtin_return_address(0)); | |
294 | ||
295 | return ret; | |
296 | } | |
297 | ||
298 | dma_addr_t | |
299 | pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir) | |
300 | { | |
301 | int dac_allowed; | |
302 | ||
303 | if (dir == PCI_DMA_NONE) | |
304 | BUG(); | |
305 | ||
306 | dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; | |
307 | return pci_map_single_1(pdev, cpu_addr, size, dac_allowed); | |
308 | } | |
cff52daf | 309 | EXPORT_SYMBOL(pci_map_single); |
1da177e4 LT |
310 | |
311 | dma_addr_t | |
312 | pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset, | |
313 | size_t size, int dir) | |
314 | { | |
315 | int dac_allowed; | |
316 | ||
317 | if (dir == PCI_DMA_NONE) | |
318 | BUG(); | |
319 | ||
320 | dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; | |
321 | return pci_map_single_1(pdev, (char *)page_address(page) + offset, | |
322 | size, dac_allowed); | |
323 | } | |
cff52daf | 324 | EXPORT_SYMBOL(pci_map_page); |
1da177e4 LT |
325 | |
326 | /* Unmap a single streaming mode DMA translation. The DMA_ADDR and | |
327 | SIZE must match what was provided for in a previous pci_map_single | |
328 | call. All other usages are undefined. After this call, reads by | |
329 | the cpu to the buffer are guaranteed to see whatever the device | |
330 | wrote there. */ | |
331 | ||
332 | void | |
333 | pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size, | |
334 | int direction) | |
335 | { | |
336 | unsigned long flags; | |
337 | struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; | |
338 | struct pci_iommu_arena *arena; | |
339 | long dma_ofs, npages; | |
340 | ||
341 | if (direction == PCI_DMA_NONE) | |
342 | BUG(); | |
343 | ||
344 | if (dma_addr >= __direct_map_base | |
345 | && dma_addr < __direct_map_base + __direct_map_size) { | |
346 | /* Nothing to do. */ | |
347 | ||
348 | DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n", | |
349 | dma_addr, size, __builtin_return_address(0)); | |
350 | ||
351 | return; | |
352 | } | |
353 | ||
354 | if (dma_addr > 0xffffffff) { | |
355 | DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n", | |
356 | dma_addr, size, __builtin_return_address(0)); | |
357 | return; | |
358 | } | |
359 | ||
360 | arena = hose->sg_pci; | |
361 | if (!arena || dma_addr < arena->dma_base) | |
362 | arena = hose->sg_isa; | |
363 | ||
364 | dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT; | |
365 | if (dma_ofs * PAGE_SIZE >= arena->size) { | |
366 | printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx " | |
367 | " base %lx size %x\n", dma_addr, arena->dma_base, | |
368 | arena->size); | |
369 | return; | |
370 | BUG(); | |
371 | } | |
372 | ||
373 | npages = calc_npages((dma_addr & ~PAGE_MASK) + size); | |
374 | ||
375 | spin_lock_irqsave(&arena->lock, flags); | |
376 | ||
377 | iommu_arena_free(arena, dma_ofs, npages); | |
378 | ||
379 | /* If we're freeing ptes above the `next_entry' pointer (they | |
380 | may have snuck back into the TLB since the last wrap flush), | |
381 | we need to flush the TLB before reallocating the latter. */ | |
382 | if (dma_ofs >= arena->next_entry) | |
383 | alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1); | |
384 | ||
385 | spin_unlock_irqrestore(&arena->lock, flags); | |
386 | ||
387 | DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n", | |
388 | dma_addr, size, npages, __builtin_return_address(0)); | |
389 | } | |
cff52daf | 390 | EXPORT_SYMBOL(pci_unmap_single); |
1da177e4 LT |
391 | |
392 | void | |
393 | pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr, | |
394 | size_t size, int direction) | |
395 | { | |
396 | pci_unmap_single(pdev, dma_addr, size, direction); | |
397 | } | |
cff52daf | 398 | EXPORT_SYMBOL(pci_unmap_page); |
1da177e4 LT |
399 | |
400 | /* Allocate and map kernel buffer using consistent mode DMA for PCI | |
401 | device. Returns non-NULL cpu-view pointer to the buffer if | |
402 | successful and sets *DMA_ADDRP to the pci side dma address as well, | |
403 | else DMA_ADDRP is undefined. */ | |
404 | ||
405 | void * | |
406 | pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) | |
407 | { | |
408 | void *cpu_addr; | |
409 | long order = get_order(size); | |
53f9fc93 | 410 | gfp_t gfp = GFP_ATOMIC; |
1da177e4 LT |
411 | |
412 | try_again: | |
413 | cpu_addr = (void *)__get_free_pages(gfp, order); | |
414 | if (! cpu_addr) { | |
415 | printk(KERN_INFO "pci_alloc_consistent: " | |
416 | "get_free_pages failed from %p\n", | |
417 | __builtin_return_address(0)); | |
418 | /* ??? Really atomic allocation? Otherwise we could play | |
419 | with vmalloc and sg if we can't find contiguous memory. */ | |
420 | return NULL; | |
421 | } | |
422 | memset(cpu_addr, 0, size); | |
423 | ||
424 | *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0); | |
425 | if (*dma_addrp == 0) { | |
426 | free_pages((unsigned long)cpu_addr, order); | |
427 | if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA)) | |
428 | return NULL; | |
429 | /* The address doesn't fit required mask and we | |
430 | do not have iommu. Try again with GFP_DMA. */ | |
431 | gfp |= GFP_DMA; | |
432 | goto try_again; | |
433 | } | |
434 | ||
435 | DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n", | |
436 | size, cpu_addr, *dma_addrp, __builtin_return_address(0)); | |
437 | ||
438 | return cpu_addr; | |
439 | } | |
cff52daf | 440 | EXPORT_SYMBOL(pci_alloc_consistent); |
1da177e4 LT |
441 | |
442 | /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must | |
443 | be values that were returned from pci_alloc_consistent. SIZE must | |
444 | be the same as what as passed into pci_alloc_consistent. | |
445 | References to the memory and mappings associated with CPU_ADDR or | |
446 | DMA_ADDR past this call are illegal. */ | |
447 | ||
448 | void | |
449 | pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr, | |
450 | dma_addr_t dma_addr) | |
451 | { | |
452 | pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); | |
453 | free_pages((unsigned long)cpu_addr, get_order(size)); | |
454 | ||
455 | DBGA2("pci_free_consistent: [%x,%lx] from %p\n", | |
456 | dma_addr, size, __builtin_return_address(0)); | |
457 | } | |
cff52daf | 458 | EXPORT_SYMBOL(pci_free_consistent); |
1da177e4 LT |
459 | |
460 | /* Classify the elements of the scatterlist. Write dma_address | |
461 | of each element with: | |
462 | 0 : Followers all physically adjacent. | |
463 | 1 : Followers all virtually adjacent. | |
464 | -1 : Not leader, physically adjacent to previous. | |
465 | -2 : Not leader, virtually adjacent to previous. | |
466 | Write dma_length of each leader with the combined lengths of | |
467 | the mergable followers. */ | |
468 | ||
58b053e4 | 469 | #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG))) |
1da177e4 LT |
470 | #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG)) |
471 | ||
472 | static void | |
473 | sg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok) | |
474 | { | |
475 | unsigned long next_paddr; | |
476 | struct scatterlist *leader; | |
477 | long leader_flag, leader_length; | |
478 | ||
479 | leader = sg; | |
480 | leader_flag = 0; | |
481 | leader_length = leader->length; | |
482 | next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length; | |
483 | ||
484 | for (++sg; sg < end; ++sg) { | |
485 | unsigned long addr, len; | |
486 | addr = SG_ENT_PHYS_ADDRESS(sg); | |
487 | len = sg->length; | |
488 | ||
489 | if (next_paddr == addr) { | |
490 | sg->dma_address = -1; | |
491 | leader_length += len; | |
492 | } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) { | |
493 | sg->dma_address = -2; | |
494 | leader_flag = 1; | |
495 | leader_length += len; | |
496 | } else { | |
497 | leader->dma_address = leader_flag; | |
498 | leader->dma_length = leader_length; | |
499 | leader = sg; | |
500 | leader_flag = 0; | |
501 | leader_length = len; | |
502 | } | |
503 | ||
504 | next_paddr = addr + len; | |
505 | } | |
506 | ||
507 | leader->dma_address = leader_flag; | |
508 | leader->dma_length = leader_length; | |
509 | } | |
510 | ||
511 | /* Given a scatterlist leader, choose an allocation method and fill | |
512 | in the blanks. */ | |
513 | ||
514 | static int | |
515 | sg_fill(struct scatterlist *leader, struct scatterlist *end, | |
516 | struct scatterlist *out, struct pci_iommu_arena *arena, | |
517 | dma_addr_t max_dma, int dac_allowed) | |
518 | { | |
519 | unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader); | |
520 | long size = leader->dma_length; | |
521 | struct scatterlist *sg; | |
522 | unsigned long *ptes; | |
523 | long npages, dma_ofs, i; | |
524 | ||
525 | #if !DEBUG_NODIRECT | |
526 | /* If everything is physically contiguous, and the addresses | |
527 | fall into the direct-map window, use it. */ | |
528 | if (leader->dma_address == 0 | |
529 | && paddr + size + __direct_map_base - 1 <= max_dma | |
530 | && paddr + size <= __direct_map_size) { | |
531 | out->dma_address = paddr + __direct_map_base; | |
532 | out->dma_length = size; | |
533 | ||
534 | DBGA(" sg_fill: [%p,%lx] -> direct %lx\n", | |
535 | __va(paddr), size, out->dma_address); | |
536 | ||
537 | return 0; | |
538 | } | |
539 | #endif | |
540 | ||
541 | /* If physically contiguous and DAC is available, use it. */ | |
542 | if (leader->dma_address == 0 && dac_allowed) { | |
543 | out->dma_address = paddr + alpha_mv.pci_dac_offset; | |
544 | out->dma_length = size; | |
545 | ||
546 | DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n", | |
547 | __va(paddr), size, out->dma_address); | |
548 | ||
549 | return 0; | |
550 | } | |
551 | ||
552 | /* Otherwise, we'll use the iommu to make the pages virtually | |
553 | contiguous. */ | |
554 | ||
555 | paddr &= ~PAGE_MASK; | |
556 | npages = calc_npages(paddr + size); | |
557 | dma_ofs = iommu_arena_alloc(arena, npages, 0); | |
558 | if (dma_ofs < 0) { | |
559 | /* If we attempted a direct map above but failed, die. */ | |
560 | if (leader->dma_address == 0) | |
561 | return -1; | |
562 | ||
563 | /* Otherwise, break up the remaining virtually contiguous | |
564 | hunks into individual direct maps and retry. */ | |
565 | sg_classify(leader, end, 0); | |
566 | return sg_fill(leader, end, out, arena, max_dma, dac_allowed); | |
567 | } | |
568 | ||
569 | out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr; | |
570 | out->dma_length = size; | |
571 | ||
572 | DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n", | |
573 | __va(paddr), size, out->dma_address, npages); | |
574 | ||
575 | /* All virtually contiguous. We need to find the length of each | |
576 | physically contiguous subsegment to fill in the ptes. */ | |
577 | ptes = &arena->ptes[dma_ofs]; | |
578 | sg = leader; | |
579 | do { | |
580 | #if DEBUG_ALLOC > 0 | |
581 | struct scatterlist *last_sg = sg; | |
582 | #endif | |
583 | ||
584 | size = sg->length; | |
585 | paddr = SG_ENT_PHYS_ADDRESS(sg); | |
586 | ||
587 | while (sg+1 < end && (int) sg[1].dma_address == -1) { | |
588 | size += sg[1].length; | |
589 | sg++; | |
590 | } | |
591 | ||
592 | npages = calc_npages((paddr & ~PAGE_MASK) + size); | |
593 | ||
594 | paddr &= PAGE_MASK; | |
595 | for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) | |
596 | *ptes++ = mk_iommu_pte(paddr); | |
597 | ||
598 | #if DEBUG_ALLOC > 0 | |
599 | DBGA(" (%ld) [%p,%x] np %ld\n", | |
600 | last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg), | |
601 | last_sg->length, npages); | |
602 | while (++last_sg <= sg) { | |
603 | DBGA(" (%ld) [%p,%x] cont\n", | |
604 | last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg), | |
605 | last_sg->length); | |
606 | } | |
607 | #endif | |
608 | } while (++sg < end && (int) sg->dma_address < 0); | |
609 | ||
610 | return 1; | |
611 | } | |
612 | ||
613 | int | |
614 | pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, | |
615 | int direction) | |
616 | { | |
617 | struct scatterlist *start, *end, *out; | |
618 | struct pci_controller *hose; | |
619 | struct pci_iommu_arena *arena; | |
620 | dma_addr_t max_dma; | |
621 | int dac_allowed; | |
622 | ||
623 | if (direction == PCI_DMA_NONE) | |
624 | BUG(); | |
625 | ||
626 | dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; | |
627 | ||
628 | /* Fast path single entry scatterlists. */ | |
629 | if (nents == 1) { | |
630 | sg->dma_length = sg->length; | |
631 | sg->dma_address | |
632 | = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg), | |
633 | sg->length, dac_allowed); | |
634 | return sg->dma_address != 0; | |
635 | } | |
636 | ||
637 | start = sg; | |
638 | end = sg + nents; | |
639 | ||
640 | /* First, prepare information about the entries. */ | |
641 | sg_classify(sg, end, alpha_mv.mv_pci_tbi != 0); | |
642 | ||
643 | /* Second, figure out where we're going to map things. */ | |
644 | if (alpha_mv.mv_pci_tbi) { | |
645 | hose = pdev ? pdev->sysdata : pci_isa_hose; | |
646 | max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; | |
647 | arena = hose->sg_pci; | |
648 | if (!arena || arena->dma_base + arena->size - 1 > max_dma) | |
649 | arena = hose->sg_isa; | |
650 | } else { | |
651 | max_dma = -1; | |
652 | arena = NULL; | |
653 | hose = NULL; | |
654 | } | |
655 | ||
656 | /* Third, iterate over the scatterlist leaders and allocate | |
657 | dma space as needed. */ | |
658 | for (out = sg; sg < end; ++sg) { | |
659 | if ((int) sg->dma_address < 0) | |
660 | continue; | |
661 | if (sg_fill(sg, end, out, arena, max_dma, dac_allowed) < 0) | |
662 | goto error; | |
663 | out++; | |
664 | } | |
665 | ||
666 | /* Mark the end of the list for pci_unmap_sg. */ | |
667 | if (out < end) | |
668 | out->dma_length = 0; | |
669 | ||
670 | if (out - start == 0) | |
671 | printk(KERN_WARNING "pci_map_sg failed: no entries?\n"); | |
672 | DBGA("pci_map_sg: %ld entries\n", out - start); | |
673 | ||
674 | return out - start; | |
675 | ||
676 | error: | |
677 | printk(KERN_WARNING "pci_map_sg failed: " | |
678 | "could not allocate dma page tables\n"); | |
679 | ||
680 | /* Some allocation failed while mapping the scatterlist | |
681 | entries. Unmap them now. */ | |
682 | if (out > start) | |
683 | pci_unmap_sg(pdev, start, out - start, direction); | |
684 | return 0; | |
685 | } | |
cff52daf | 686 | EXPORT_SYMBOL(pci_map_sg); |
1da177e4 LT |
687 | |
688 | /* Unmap a set of streaming mode DMA translations. Again, cpu read | |
689 | rules concerning calls here are the same as for pci_unmap_single() | |
690 | above. */ | |
691 | ||
692 | void | |
693 | pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, | |
694 | int direction) | |
695 | { | |
696 | unsigned long flags; | |
697 | struct pci_controller *hose; | |
698 | struct pci_iommu_arena *arena; | |
699 | struct scatterlist *end; | |
700 | dma_addr_t max_dma; | |
701 | dma_addr_t fbeg, fend; | |
702 | ||
703 | if (direction == PCI_DMA_NONE) | |
704 | BUG(); | |
705 | ||
706 | if (! alpha_mv.mv_pci_tbi) | |
707 | return; | |
708 | ||
709 | hose = pdev ? pdev->sysdata : pci_isa_hose; | |
710 | max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; | |
711 | arena = hose->sg_pci; | |
712 | if (!arena || arena->dma_base + arena->size - 1 > max_dma) | |
713 | arena = hose->sg_isa; | |
714 | ||
715 | fbeg = -1, fend = 0; | |
716 | ||
717 | spin_lock_irqsave(&arena->lock, flags); | |
718 | ||
719 | for (end = sg + nents; sg < end; ++sg) { | |
720 | dma64_addr_t addr; | |
721 | size_t size; | |
722 | long npages, ofs; | |
723 | dma_addr_t tend; | |
724 | ||
725 | addr = sg->dma_address; | |
726 | size = sg->dma_length; | |
727 | if (!size) | |
728 | break; | |
729 | ||
730 | if (addr > 0xffffffff) { | |
731 | /* It's a DAC address -- nothing to do. */ | |
732 | DBGA(" (%ld) DAC [%lx,%lx]\n", | |
733 | sg - end + nents, addr, size); | |
734 | continue; | |
735 | } | |
736 | ||
737 | if (addr >= __direct_map_base | |
738 | && addr < __direct_map_base + __direct_map_size) { | |
739 | /* Nothing to do. */ | |
740 | DBGA(" (%ld) direct [%lx,%lx]\n", | |
741 | sg - end + nents, addr, size); | |
742 | continue; | |
743 | } | |
744 | ||
745 | DBGA(" (%ld) sg [%lx,%lx]\n", | |
746 | sg - end + nents, addr, size); | |
747 | ||
748 | npages = calc_npages((addr & ~PAGE_MASK) + size); | |
749 | ofs = (addr - arena->dma_base) >> PAGE_SHIFT; | |
750 | iommu_arena_free(arena, ofs, npages); | |
751 | ||
752 | tend = addr + size - 1; | |
753 | if (fbeg > addr) fbeg = addr; | |
754 | if (fend < tend) fend = tend; | |
755 | } | |
756 | ||
757 | /* If we're freeing ptes above the `next_entry' pointer (they | |
758 | may have snuck back into the TLB since the last wrap flush), | |
759 | we need to flush the TLB before reallocating the latter. */ | |
760 | if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry) | |
761 | alpha_mv.mv_pci_tbi(hose, fbeg, fend); | |
762 | ||
763 | spin_unlock_irqrestore(&arena->lock, flags); | |
764 | ||
765 | DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg)); | |
766 | } | |
cff52daf | 767 | EXPORT_SYMBOL(pci_unmap_sg); |
1da177e4 LT |
768 | |
769 | ||
770 | /* Return whether the given PCI device DMA address mask can be | |
771 | supported properly. */ | |
772 | ||
773 | int | |
774 | pci_dma_supported(struct pci_dev *pdev, u64 mask) | |
775 | { | |
776 | struct pci_controller *hose; | |
777 | struct pci_iommu_arena *arena; | |
778 | ||
779 | /* If there exists a direct map, and the mask fits either | |
780 | the entire direct mapped space or the total system memory as | |
781 | shifted by the map base */ | |
782 | if (__direct_map_size != 0 | |
783 | && (__direct_map_base + __direct_map_size - 1 <= mask || | |
784 | __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask)) | |
785 | return 1; | |
786 | ||
787 | /* Check that we have a scatter-gather arena that fits. */ | |
788 | hose = pdev ? pdev->sysdata : pci_isa_hose; | |
789 | arena = hose->sg_isa; | |
790 | if (arena && arena->dma_base + arena->size - 1 <= mask) | |
791 | return 1; | |
792 | arena = hose->sg_pci; | |
793 | if (arena && arena->dma_base + arena->size - 1 <= mask) | |
794 | return 1; | |
795 | ||
796 | /* As last resort try ZONE_DMA. */ | |
797 | if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask) | |
798 | return 1; | |
799 | ||
800 | return 0; | |
801 | } | |
cff52daf | 802 | EXPORT_SYMBOL(pci_dma_supported); |
1da177e4 LT |
803 | |
804 | \f | |
805 | /* | |
806 | * AGP GART extensions to the IOMMU | |
807 | */ | |
808 | int | |
809 | iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask) | |
810 | { | |
811 | unsigned long flags; | |
812 | unsigned long *ptes; | |
813 | long i, p; | |
814 | ||
815 | if (!arena) return -EINVAL; | |
816 | ||
817 | spin_lock_irqsave(&arena->lock, flags); | |
818 | ||
819 | /* Search for N empty ptes. */ | |
820 | ptes = arena->ptes; | |
821 | p = iommu_arena_find_pages(arena, pg_count, align_mask); | |
822 | if (p < 0) { | |
823 | spin_unlock_irqrestore(&arena->lock, flags); | |
824 | return -1; | |
825 | } | |
826 | ||
827 | /* Success. Mark them all reserved (ie not zero and invalid) | |
828 | for the iommu tlb that could load them from under us. | |
829 | They will be filled in with valid bits by _bind() */ | |
830 | for (i = 0; i < pg_count; ++i) | |
831 | ptes[p+i] = IOMMU_RESERVED_PTE; | |
832 | ||
833 | arena->next_entry = p + pg_count; | |
834 | spin_unlock_irqrestore(&arena->lock, flags); | |
835 | ||
836 | return p; | |
837 | } | |
838 | ||
839 | int | |
840 | iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count) | |
841 | { | |
842 | unsigned long *ptes; | |
843 | long i; | |
844 | ||
845 | if (!arena) return -EINVAL; | |
846 | ||
847 | ptes = arena->ptes; | |
848 | ||
849 | /* Make sure they're all reserved first... */ | |
850 | for(i = pg_start; i < pg_start + pg_count; i++) | |
851 | if (ptes[i] != IOMMU_RESERVED_PTE) | |
852 | return -EBUSY; | |
853 | ||
854 | iommu_arena_free(arena, pg_start, pg_count); | |
855 | return 0; | |
856 | } | |
857 | ||
858 | int | |
859 | iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count, | |
860 | unsigned long *physaddrs) | |
861 | { | |
862 | unsigned long flags; | |
863 | unsigned long *ptes; | |
864 | long i, j; | |
865 | ||
866 | if (!arena) return -EINVAL; | |
867 | ||
868 | spin_lock_irqsave(&arena->lock, flags); | |
869 | ||
870 | ptes = arena->ptes; | |
871 | ||
872 | for(j = pg_start; j < pg_start + pg_count; j++) { | |
873 | if (ptes[j] != IOMMU_RESERVED_PTE) { | |
874 | spin_unlock_irqrestore(&arena->lock, flags); | |
875 | return -EBUSY; | |
876 | } | |
877 | } | |
878 | ||
879 | for(i = 0, j = pg_start; i < pg_count; i++, j++) | |
880 | ptes[j] = mk_iommu_pte(physaddrs[i]); | |
881 | ||
882 | spin_unlock_irqrestore(&arena->lock, flags); | |
883 | ||
884 | return 0; | |
885 | } | |
886 | ||
887 | int | |
888 | iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count) | |
889 | { | |
890 | unsigned long *p; | |
891 | long i; | |
892 | ||
893 | if (!arena) return -EINVAL; | |
894 | ||
895 | p = arena->ptes + pg_start; | |
896 | for(i = 0; i < pg_count; i++) | |
897 | p[i] = IOMMU_RESERVED_PTE; | |
898 | ||
899 | return 0; | |
900 | } | |
901 | ||
902 | /* True if the machine supports DAC addressing, and DEV can | |
903 | make use of it given MASK. */ | |
904 | ||
caa51716 | 905 | static int |
1da177e4 LT |
906 | pci_dac_dma_supported(struct pci_dev *dev, u64 mask) |
907 | { | |
908 | dma64_addr_t dac_offset = alpha_mv.pci_dac_offset; | |
909 | int ok = 1; | |
910 | ||
911 | /* If this is not set, the machine doesn't support DAC at all. */ | |
912 | if (dac_offset == 0) | |
913 | ok = 0; | |
914 | ||
915 | /* The device has to be able to address our DAC bit. */ | |
916 | if ((dac_offset & dev->dma_mask) != dac_offset) | |
917 | ok = 0; | |
918 | ||
919 | /* If both conditions above are met, we are fine. */ | |
920 | DBGA("pci_dac_dma_supported %s from %p\n", | |
921 | ok ? "yes" : "no", __builtin_return_address(0)); | |
922 | ||
923 | return ok; | |
924 | } | |
1da177e4 LT |
925 | |
926 | /* Helper for generic DMA-mapping functions. */ | |
927 | ||
928 | struct pci_dev * | |
929 | alpha_gendev_to_pci(struct device *dev) | |
930 | { | |
931 | if (dev && dev->bus == &pci_bus_type) | |
932 | return to_pci_dev(dev); | |
933 | ||
934 | /* Assume that non-PCI devices asking for DMA are either ISA or EISA, | |
935 | BUG() otherwise. */ | |
936 | BUG_ON(!isa_bridge); | |
937 | ||
938 | /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA | |
939 | bridge is bus master then). */ | |
940 | if (!dev || !dev->dma_mask || !*dev->dma_mask) | |
941 | return isa_bridge; | |
942 | ||
943 | /* For EISA bus masters, return isa_bridge (it might have smaller | |
944 | dma_mask due to wiring limitations). */ | |
945 | if (*dev->dma_mask >= isa_bridge->dma_mask) | |
946 | return isa_bridge; | |
947 | ||
948 | /* This assumes ISA bus master with dma_mask 0xffffff. */ | |
949 | return NULL; | |
950 | } | |
cff52daf | 951 | EXPORT_SYMBOL(alpha_gendev_to_pci); |
1da177e4 LT |
952 | |
953 | int | |
954 | dma_set_mask(struct device *dev, u64 mask) | |
955 | { | |
956 | if (!dev->dma_mask || | |
957 | !pci_dma_supported(alpha_gendev_to_pci(dev), mask)) | |
958 | return -EIO; | |
959 | ||
960 | *dev->dma_mask = mask; | |
961 | ||
962 | return 0; | |
963 | } | |
cff52daf | 964 | EXPORT_SYMBOL(dma_set_mask); |