[SPARC64]: Add SG merging support back into IOMMU code.
[deliverable/linux.git] / arch / sparc64 / kernel / pci_sun4v.c
index c8b6199a5dc4b405b4153eb0b084842266391b71..ddca6c6c0b492709193e1a3c8a86dcb30f884fbf 100644 (file)
@@ -89,6 +89,17 @@ static long iommu_batch_flush(struct iommu_batch *p)
        return 0;
 }
 
+static inline void iommu_batch_new_entry(unsigned long entry)
+{
+       struct iommu_batch *p = &__get_cpu_var(iommu_batch);
+
+       if (p->entry + p->npages == entry)
+               return;
+       if (p->entry != ~0UL)
+               iommu_batch_flush(p);
+       p->entry = entry;
+}
+
 /* Interrupts must be disabled.  */
 static inline long iommu_batch_add(u64 phys_page)
 {
@@ -320,88 +331,131 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
                         int nelems, enum dma_data_direction direction)
 {
-       unsigned long flags, npages, i, prot;
-       u32 dma_base, orig_dma_base;
-       struct scatterlist *sg;
+       struct scatterlist *s, *outs, *segstart;
+       unsigned long flags, handle, prot;
+       dma_addr_t dma_next = 0, dma_addr;
+       unsigned int max_seg_size;
+       int outcount, incount, i;
        struct iommu *iommu;
-       long entry, err;
-
-       /* Fast path single entry scatterlists. */
-       if (nelems == 1) {
-               sglist->dma_address =
-                       dma_4v_map_single(dev, sg_virt(sglist),
-                                         sglist->length, direction);
-               if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
-                       return 0;
-               sglist->dma_length = sglist->length;
-               return 1;
-       }
+       long err;
+
+       BUG_ON(direction == DMA_NONE);
 
        iommu = dev->archdata.iommu;
+       if (nelems == 0 || !iommu)
+               return 0;
        
-       if (unlikely(direction == DMA_NONE))
-               goto bad;
-
-       npages = calc_npages(sglist, nelems);
+       prot = HV_PCI_MAP_ATTR_READ;
+       if (direction != DMA_TO_DEVICE)
+               prot |= HV_PCI_MAP_ATTR_WRITE;
 
-       spin_lock_irqsave(&iommu->lock, flags);
-       entry = iommu_range_alloc(dev, iommu, npages, NULL);
-       spin_unlock_irqrestore(&iommu->lock, flags);
+       outs = s = segstart = &sglist[0];
+       outcount = 1;
+       incount = nelems;
+       handle = 0;
 
-       if (unlikely(entry == DMA_ERROR_CODE))
-               goto bad;
+       /* Init first segment length for backout at failure */
+       outs->dma_length = 0;
 
-       orig_dma_base = dma_base = iommu->page_table_map_base +
-               (entry << IO_PAGE_SHIFT);
+       spin_lock_irqsave(&iommu->lock, flags);
 
-       prot = HV_PCI_MAP_ATTR_READ;
-       if (direction != DMA_TO_DEVICE)
-               prot |= HV_PCI_MAP_ATTR_WRITE;
+       iommu_batch_start(dev, prot, ~0UL);
 
-       local_irq_save(flags);
+       max_seg_size = dma_get_max_seg_size(dev);
+       for_each_sg(sglist, s, nelems, i) {
+               unsigned long paddr, npages, entry, slen;
 
-       iommu_batch_start(dev, prot, entry);
+               slen = s->length;
+               /* Sanity check */
+               if (slen == 0) {
+                       dma_next = 0;
+                       continue;
+               }
+               /* Allocate iommu entries for that segment */
+               paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
+               npages = iommu_num_pages(paddr, slen);
+               entry = iommu_range_alloc(dev, iommu, npages, &handle);
 
-       for_each_sg(sglist, sg, nelems, i) {
-               unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg);
-               unsigned long slen = sg->length;
-               unsigned long this_npages;
+               /* Handle failure */
+               if (unlikely(entry == DMA_ERROR_CODE)) {
+                       if (printk_ratelimit())
+                               printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
+                                      " npages %lx\n", iommu, paddr, npages);
+                       goto iommu_map_failed;
+               }
 
-               this_npages = iommu_num_pages(paddr, slen);
+               iommu_batch_new_entry(entry);
 
-               sg->dma_address = dma_base | (paddr & ~IO_PAGE_MASK);
-               sg->dma_length = slen;
+               /* Convert entry to a dma_addr_t */
+               dma_addr = iommu->page_table_map_base +
+                       (entry << IO_PAGE_SHIFT);
+               dma_addr |= (s->offset & ~IO_PAGE_MASK);
 
+               /* Insert into HW table */
                paddr &= IO_PAGE_MASK;
-               while (this_npages--) {
+               while (npages--) {
                        err = iommu_batch_add(paddr);
-                       if (unlikely(err < 0L)) {
-                               local_irq_restore(flags);
+                       if (unlikely(err < 0L))
                                goto iommu_map_failed;
+                       paddr += IO_PAGE_SIZE;
+               }
+
+               /* If we are in an open segment, try merging */
+               if (segstart != s) {
+                       /* We cannot merge if:
+                        * - allocated dma_addr isn't contiguous to previous allocation
+                        */
+                       if ((dma_addr != dma_next) ||
+                           (outs->dma_length + s->length > max_seg_size)) {
+                               /* Can't merge: create a new segment */
+                               segstart = s;
+                               outcount++;
+                               outs = sg_next(outs);
+                       } else {
+                               outs->dma_length += s->length;
                        }
+               }
 
-                       paddr += IO_PAGE_SIZE;
-                       dma_base += IO_PAGE_SIZE;
+               if (segstart == s) {
+                       /* This is a new segment, fill entries */
+                       outs->dma_address = dma_addr;
+                       outs->dma_length = slen;
                }
+
+               /* Calculate next page pointer for contiguous check */
+               dma_next = dma_addr + slen;
        }
 
        err = iommu_batch_end();
 
-       local_irq_restore(flags);
-
        if (unlikely(err < 0L))
                goto iommu_map_failed;
 
-       return nelems;
+       spin_unlock_irqrestore(&iommu->lock, flags);
 
-bad:
-       if (printk_ratelimit())
-               WARN_ON(1);
-       return 0;
+       if (outcount < incount) {
+               outs = sg_next(outs);
+               outs->dma_address = DMA_ERROR_CODE;
+               outs->dma_length = 0;
+       }
+
+       return outcount;
 
 iommu_map_failed:
-       spin_lock_irqsave(&iommu->lock, flags);
-       iommu_range_free(iommu, orig_dma_base, npages);
+       for_each_sg(sglist, s, nelems, i) {
+               if (s->dma_length != 0) {
+                       unsigned long vaddr, npages;
+
+                       vaddr = s->dma_address & IO_PAGE_MASK;
+                       npages = iommu_num_pages(s->dma_address, s->dma_length);
+                       iommu_range_free(iommu, vaddr, npages);
+                       /* XXX demap? XXX */
+                       s->dma_address = DMA_ERROR_CODE;
+                       s->dma_length = 0;
+               }
+               if (s == outs)
+                       break;
+       }
        spin_unlock_irqrestore(&iommu->lock, flags);
 
        return 0;
@@ -410,39 +464,43 @@ iommu_map_failed:
 static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
                            int nelems, enum dma_data_direction direction)
 {
-       unsigned long flags, npages;
        struct pci_pbm_info *pbm;
-       u32 devhandle, bus_addr;
+       struct scatterlist *sg;
        struct iommu *iommu;
-       long entry;
+       unsigned long flags;
+       u32 devhandle;
 
-       if (unlikely(direction == DMA_NONE)) {
-               if (printk_ratelimit())
-                       WARN_ON(1);
-       }
+       BUG_ON(direction == DMA_NONE);
 
        iommu = dev->archdata.iommu;
        pbm = dev->archdata.host_controller;
        devhandle = pbm->devhandle;
        
-       bus_addr = sglist->dma_address & IO_PAGE_MASK;
-
-       npages = calc_npages(sglist, nelems);
-
-       entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
-
        spin_lock_irqsave(&iommu->lock, flags);
 
-       iommu_range_free(iommu, bus_addr, npages);
-
-       do {
-               unsigned long num;
+       sg = sglist;
+       while (nelems--) {
+               dma_addr_t dma_handle = sg->dma_address;
+               unsigned int len = sg->dma_length;
+               unsigned long npages, entry;
+
+               if (!len)
+                       break;
+               npages = iommu_num_pages(dma_handle, len);
+               iommu_range_free(iommu, dma_handle, npages);
+
+               entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+               while (npages) {
+                       unsigned long num;
+
+                       num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
+                                                   npages);
+                       entry += num;
+                       npages -= num;
+               }
 
-               num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
-                                           npages);
-               entry += num;
-               npages -= num;
-       } while (npages != 0);
+               sg = sg_next(sg);
+       }
 
        spin_unlock_irqrestore(&iommu->lock, flags);
 }
This page took 0.049609 seconds and 5 git commands to generate.