1 /* pci_sun4v.c: SUN4V specific PCI controller support.
3 * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
6 #include <linux/kernel.h>
7 #include <linux/types.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/log2.h>
17 #include <asm/iommu.h>
20 #include <asm/pstate.h>
21 #include <asm/oplib.h>
22 #include <asm/hypervisor.h>
26 #include "iommu_common.h"
28 #include "pci_sun4v.h"
30 static unsigned long vpci_major
= 1;
31 static unsigned long vpci_minor
= 1;
33 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
36 struct device
*dev
; /* Device mapping is for. */
37 unsigned long prot
; /* IOMMU page protections */
38 unsigned long entry
; /* Index into IOTSB. */
39 u64
*pglist
; /* List of physical pages */
40 unsigned long npages
; /* Number of pages in list. */
43 static DEFINE_PER_CPU(struct iommu_batch
, iommu_batch
);
45 /* Interrupts must be disabled. */
46 static inline void iommu_batch_start(struct device
*dev
, unsigned long prot
, unsigned long entry
)
48 struct iommu_batch
*p
= &__get_cpu_var(iommu_batch
);
56 /* Interrupts must be disabled. */
57 static long iommu_batch_flush(struct iommu_batch
*p
)
59 struct pci_pbm_info
*pbm
= p
->dev
->archdata
.host_controller
;
60 unsigned long devhandle
= pbm
->devhandle
;
61 unsigned long prot
= p
->prot
;
62 unsigned long entry
= p
->entry
;
63 u64
*pglist
= p
->pglist
;
64 unsigned long npages
= p
->npages
;
69 num
= pci_sun4v_iommu_map(devhandle
, HV_PCI_TSBID(0, entry
),
70 npages
, prot
, __pa(pglist
));
71 if (unlikely(num
< 0)) {
72 if (printk_ratelimit())
73 printk("iommu_batch_flush: IOMMU map of "
74 "[%08lx:%08lx:%lx:%lx:%lx] failed with "
76 devhandle
, HV_PCI_TSBID(0, entry
),
77 npages
, prot
, __pa(pglist
), num
);
92 /* Interrupts must be disabled. */
93 static inline long iommu_batch_add(u64 phys_page
)
95 struct iommu_batch
*p
= &__get_cpu_var(iommu_batch
);
97 BUG_ON(p
->npages
>= PGLIST_NENTS
);
99 p
->pglist
[p
->npages
++] = phys_page
;
100 if (p
->npages
== PGLIST_NENTS
)
101 return iommu_batch_flush(p
);
106 /* Interrupts must be disabled. */
107 static inline long iommu_batch_end(void)
109 struct iommu_batch
*p
= &__get_cpu_var(iommu_batch
);
111 BUG_ON(p
->npages
>= PGLIST_NENTS
);
113 return iommu_batch_flush(p
);
116 static long arena_alloc(struct iommu_arena
*arena
, unsigned long npages
)
118 unsigned long n
, i
, start
, end
, limit
;
121 limit
= arena
->limit
;
126 n
= find_next_zero_bit(arena
->map
, limit
, start
);
128 if (unlikely(end
>= limit
)) {
129 if (likely(pass
< 1)) {
135 /* Scanned the whole thing, give up. */
140 for (i
= n
; i
< end
; i
++) {
141 if (test_bit(i
, arena
->map
)) {
147 for (i
= n
; i
< end
; i
++)
148 __set_bit(i
, arena
->map
);
155 static void arena_free(struct iommu_arena
*arena
, unsigned long base
,
156 unsigned long npages
)
160 for (i
= base
; i
< (base
+ npages
); i
++)
161 __clear_bit(i
, arena
->map
);
164 static void *dma_4v_alloc_coherent(struct device
*dev
, size_t size
,
165 dma_addr_t
*dma_addrp
, gfp_t gfp
)
168 unsigned long flags
, order
, first_page
, npages
, n
;
172 size
= IO_PAGE_ALIGN(size
);
173 order
= get_order(size
);
174 if (unlikely(order
>= MAX_ORDER
))
177 npages
= size
>> IO_PAGE_SHIFT
;
179 first_page
= __get_free_pages(gfp
, order
);
180 if (unlikely(first_page
== 0UL))
183 memset((char *)first_page
, 0, PAGE_SIZE
<< order
);
185 iommu
= dev
->archdata
.iommu
;
187 spin_lock_irqsave(&iommu
->lock
, flags
);
188 entry
= arena_alloc(&iommu
->arena
, npages
);
189 spin_unlock_irqrestore(&iommu
->lock
, flags
);
191 if (unlikely(entry
< 0L))
192 goto arena_alloc_fail
;
194 *dma_addrp
= (iommu
->page_table_map_base
+
195 (entry
<< IO_PAGE_SHIFT
));
196 ret
= (void *) first_page
;
197 first_page
= __pa(first_page
);
199 local_irq_save(flags
);
201 iommu_batch_start(dev
,
202 (HV_PCI_MAP_ATTR_READ
|
203 HV_PCI_MAP_ATTR_WRITE
),
206 for (n
= 0; n
< npages
; n
++) {
207 long err
= iommu_batch_add(first_page
+ (n
* PAGE_SIZE
));
208 if (unlikely(err
< 0L))
212 if (unlikely(iommu_batch_end() < 0L))
215 local_irq_restore(flags
);
220 /* Interrupts are disabled. */
221 spin_lock(&iommu
->lock
);
222 arena_free(&iommu
->arena
, entry
, npages
);
223 spin_unlock_irqrestore(&iommu
->lock
, flags
);
226 free_pages(first_page
, order
);
230 static void dma_4v_free_coherent(struct device
*dev
, size_t size
, void *cpu
,
233 struct pci_pbm_info
*pbm
;
235 unsigned long flags
, order
, npages
, entry
;
238 npages
= IO_PAGE_ALIGN(size
) >> IO_PAGE_SHIFT
;
239 iommu
= dev
->archdata
.iommu
;
240 pbm
= dev
->archdata
.host_controller
;
241 devhandle
= pbm
->devhandle
;
242 entry
= ((dvma
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
244 spin_lock_irqsave(&iommu
->lock
, flags
);
246 arena_free(&iommu
->arena
, entry
, npages
);
251 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
255 } while (npages
!= 0);
257 spin_unlock_irqrestore(&iommu
->lock
, flags
);
259 order
= get_order(size
);
261 free_pages((unsigned long)cpu
, order
);
264 static dma_addr_t
dma_4v_map_single(struct device
*dev
, void *ptr
, size_t sz
,
265 enum dma_data_direction direction
)
268 unsigned long flags
, npages
, oaddr
;
269 unsigned long i
, base_paddr
;
274 iommu
= dev
->archdata
.iommu
;
276 if (unlikely(direction
== DMA_NONE
))
279 oaddr
= (unsigned long)ptr
;
280 npages
= IO_PAGE_ALIGN(oaddr
+ sz
) - (oaddr
& IO_PAGE_MASK
);
281 npages
>>= IO_PAGE_SHIFT
;
283 spin_lock_irqsave(&iommu
->lock
, flags
);
284 entry
= arena_alloc(&iommu
->arena
, npages
);
285 spin_unlock_irqrestore(&iommu
->lock
, flags
);
287 if (unlikely(entry
< 0L))
290 bus_addr
= (iommu
->page_table_map_base
+
291 (entry
<< IO_PAGE_SHIFT
));
292 ret
= bus_addr
| (oaddr
& ~IO_PAGE_MASK
);
293 base_paddr
= __pa(oaddr
& IO_PAGE_MASK
);
294 prot
= HV_PCI_MAP_ATTR_READ
;
295 if (direction
!= DMA_TO_DEVICE
)
296 prot
|= HV_PCI_MAP_ATTR_WRITE
;
298 local_irq_save(flags
);
300 iommu_batch_start(dev
, prot
, entry
);
302 for (i
= 0; i
< npages
; i
++, base_paddr
+= IO_PAGE_SIZE
) {
303 long err
= iommu_batch_add(base_paddr
);
304 if (unlikely(err
< 0L))
307 if (unlikely(iommu_batch_end() < 0L))
310 local_irq_restore(flags
);
315 if (printk_ratelimit())
317 return DMA_ERROR_CODE
;
320 /* Interrupts are disabled. */
321 spin_lock(&iommu
->lock
);
322 arena_free(&iommu
->arena
, entry
, npages
);
323 spin_unlock_irqrestore(&iommu
->lock
, flags
);
325 return DMA_ERROR_CODE
;
328 static void dma_4v_unmap_single(struct device
*dev
, dma_addr_t bus_addr
,
329 size_t sz
, enum dma_data_direction direction
)
331 struct pci_pbm_info
*pbm
;
333 unsigned long flags
, npages
;
337 if (unlikely(direction
== DMA_NONE
)) {
338 if (printk_ratelimit())
343 iommu
= dev
->archdata
.iommu
;
344 pbm
= dev
->archdata
.host_controller
;
345 devhandle
= pbm
->devhandle
;
347 npages
= IO_PAGE_ALIGN(bus_addr
+ sz
) - (bus_addr
& IO_PAGE_MASK
);
348 npages
>>= IO_PAGE_SHIFT
;
349 bus_addr
&= IO_PAGE_MASK
;
351 spin_lock_irqsave(&iommu
->lock
, flags
);
353 entry
= (bus_addr
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
;
354 arena_free(&iommu
->arena
, entry
, npages
);
359 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
363 } while (npages
!= 0);
365 spin_unlock_irqrestore(&iommu
->lock
, flags
);
368 #define SG_ENT_PHYS_ADDRESS(SG) \
369 (__pa(page_address((SG)->page)) + (SG)->offset)
371 static long fill_sg(long entry
, struct device
*dev
,
372 struct scatterlist
*sg
,
373 int nused
, int nelems
, unsigned long prot
)
375 struct scatterlist
*dma_sg
= sg
;
379 local_irq_save(flags
);
381 iommu_batch_start(dev
, prot
, entry
);
383 for (i
= 0; i
< nused
; i
++) {
384 unsigned long pteval
= ~0UL;
387 dma_npages
= ((dma_sg
->dma_address
& (IO_PAGE_SIZE
- 1UL)) +
389 ((IO_PAGE_SIZE
- 1UL))) >> IO_PAGE_SHIFT
;
391 unsigned long offset
;
394 /* If we are here, we know we have at least one
395 * more page to map. So walk forward until we
396 * hit a page crossing, and begin creating new
397 * mappings from that spot.
402 tmp
= SG_ENT_PHYS_ADDRESS(sg
);
404 if (((tmp
^ pteval
) >> IO_PAGE_SHIFT
) != 0UL) {
405 pteval
= tmp
& IO_PAGE_MASK
;
406 offset
= tmp
& (IO_PAGE_SIZE
- 1UL);
409 if (((tmp
^ (tmp
+ len
- 1UL)) >> IO_PAGE_SHIFT
) != 0UL) {
410 pteval
= (tmp
+ IO_PAGE_SIZE
) & IO_PAGE_MASK
;
412 len
-= (IO_PAGE_SIZE
- (tmp
& (IO_PAGE_SIZE
- 1UL)));
419 pteval
= (pteval
& IOPTE_PAGE
);
423 err
= iommu_batch_add(pteval
);
424 if (unlikely(err
< 0L))
425 goto iommu_map_failed
;
427 pteval
+= IO_PAGE_SIZE
;
428 len
-= (IO_PAGE_SIZE
- offset
);
433 pteval
= (pteval
& IOPTE_PAGE
) + len
;
437 /* Skip over any tail mappings we've fully mapped,
438 * adjusting pteval along the way. Stop when we
439 * detect a page crossing event.
442 (pteval
<< (64 - IO_PAGE_SHIFT
)) != 0UL &&
443 (pteval
== SG_ENT_PHYS_ADDRESS(sg
)) &&
445 (SG_ENT_PHYS_ADDRESS(sg
) + sg
->length
- 1UL)) >> IO_PAGE_SHIFT
) == 0UL) {
446 pteval
+= sg
->length
;
450 if ((pteval
<< (64 - IO_PAGE_SHIFT
)) == 0UL)
452 } while (dma_npages
!= 0);
453 dma_sg
= sg_next(dma_sg
);
456 if (unlikely(iommu_batch_end() < 0L))
457 goto iommu_map_failed
;
459 local_irq_restore(flags
);
463 local_irq_restore(flags
);
467 static int dma_4v_map_sg(struct device
*dev
, struct scatterlist
*sglist
,
468 int nelems
, enum dma_data_direction direction
)
471 unsigned long flags
, npages
, prot
;
473 struct scatterlist
*sgtmp
;
477 /* Fast path single entry scatterlists. */
479 sglist
->dma_address
=
480 dma_4v_map_single(dev
,
481 (page_address(sglist
->page
) +
483 sglist
->length
, direction
);
484 if (unlikely(sglist
->dma_address
== DMA_ERROR_CODE
))
486 sglist
->dma_length
= sglist
->length
;
490 iommu
= dev
->archdata
.iommu
;
492 if (unlikely(direction
== DMA_NONE
))
495 /* Step 1: Prepare scatter list. */
496 npages
= prepare_sg(sglist
, nelems
);
498 /* Step 2: Allocate a cluster and context, if necessary. */
499 spin_lock_irqsave(&iommu
->lock
, flags
);
500 entry
= arena_alloc(&iommu
->arena
, npages
);
501 spin_unlock_irqrestore(&iommu
->lock
, flags
);
503 if (unlikely(entry
< 0L))
506 dma_base
= iommu
->page_table_map_base
+
507 (entry
<< IO_PAGE_SHIFT
);
509 /* Step 3: Normalize DMA addresses. */
513 while (used
&& sgtmp
->dma_length
) {
514 sgtmp
->dma_address
+= dma_base
;
515 sgtmp
= sg_next(sgtmp
);
518 used
= nelems
- used
;
520 /* Step 4: Create the mappings. */
521 prot
= HV_PCI_MAP_ATTR_READ
;
522 if (direction
!= DMA_TO_DEVICE
)
523 prot
|= HV_PCI_MAP_ATTR_WRITE
;
525 err
= fill_sg(entry
, dev
, sglist
, used
, nelems
, prot
);
526 if (unlikely(err
< 0L))
527 goto iommu_map_failed
;
532 if (printk_ratelimit())
537 spin_lock_irqsave(&iommu
->lock
, flags
);
538 arena_free(&iommu
->arena
, entry
, npages
);
539 spin_unlock_irqrestore(&iommu
->lock
, flags
);
544 static void dma_4v_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
545 int nelems
, enum dma_data_direction direction
)
547 struct pci_pbm_info
*pbm
;
549 unsigned long flags
, i
, npages
;
550 struct scatterlist
*sg
, *sgprv
;
552 u32 devhandle
, bus_addr
;
554 if (unlikely(direction
== DMA_NONE
)) {
555 if (printk_ratelimit())
559 iommu
= dev
->archdata
.iommu
;
560 pbm
= dev
->archdata
.host_controller
;
561 devhandle
= pbm
->devhandle
;
563 bus_addr
= sglist
->dma_address
& IO_PAGE_MASK
;
565 for_each_sg(sglist
, sg
, nelems
, i
) {
566 if (sg
->dma_length
== 0)
572 npages
= (IO_PAGE_ALIGN(sgprv
->dma_address
+ sgprv
->dma_length
) -
573 bus_addr
) >> IO_PAGE_SHIFT
;
575 entry
= ((bus_addr
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
577 spin_lock_irqsave(&iommu
->lock
, flags
);
579 arena_free(&iommu
->arena
, entry
, npages
);
584 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
588 } while (npages
!= 0);
590 spin_unlock_irqrestore(&iommu
->lock
, flags
);
593 static void dma_4v_sync_single_for_cpu(struct device
*dev
,
594 dma_addr_t bus_addr
, size_t sz
,
595 enum dma_data_direction direction
)
597 /* Nothing to do... */
600 static void dma_4v_sync_sg_for_cpu(struct device
*dev
,
601 struct scatterlist
*sglist
, int nelems
,
602 enum dma_data_direction direction
)
604 /* Nothing to do... */
607 const struct dma_ops sun4v_dma_ops
= {
608 .alloc_coherent
= dma_4v_alloc_coherent
,
609 .free_coherent
= dma_4v_free_coherent
,
610 .map_single
= dma_4v_map_single
,
611 .unmap_single
= dma_4v_unmap_single
,
612 .map_sg
= dma_4v_map_sg
,
613 .unmap_sg
= dma_4v_unmap_sg
,
614 .sync_single_for_cpu
= dma_4v_sync_single_for_cpu
,
615 .sync_sg_for_cpu
= dma_4v_sync_sg_for_cpu
,
618 static void pci_sun4v_scan_bus(struct pci_pbm_info
*pbm
)
620 struct property
*prop
;
621 struct device_node
*dp
;
624 prop
= of_find_property(dp
, "66mhz-capable", NULL
);
625 pbm
->is_66mhz_capable
= (prop
!= NULL
);
626 pbm
->pci_bus
= pci_scan_one_pbm(pbm
);
628 /* XXX register error interrupt handlers XXX */
631 static unsigned long probe_existing_entries(struct pci_pbm_info
*pbm
,
634 struct iommu_arena
*arena
= &iommu
->arena
;
635 unsigned long i
, cnt
= 0;
638 devhandle
= pbm
->devhandle
;
639 for (i
= 0; i
< arena
->limit
; i
++) {
640 unsigned long ret
, io_attrs
, ra
;
642 ret
= pci_sun4v_iommu_getmap(devhandle
,
646 if (page_in_phys_avail(ra
)) {
647 pci_sun4v_iommu_demap(devhandle
,
648 HV_PCI_TSBID(0, i
), 1);
651 __set_bit(i
, arena
->map
);
659 static void pci_sun4v_iommu_init(struct pci_pbm_info
*pbm
)
661 struct iommu
*iommu
= pbm
->iommu
;
662 struct property
*prop
;
663 unsigned long num_tsb_entries
, sz
, tsbsize
;
664 u32 vdma
[2], dma_mask
, dma_offset
;
666 prop
= of_find_property(pbm
->prom_node
, "virtual-dma", NULL
);
668 u32
*val
= prop
->value
;
673 /* No property, use default values. */
674 vdma
[0] = 0x80000000;
675 vdma
[1] = 0x80000000;
678 if ((vdma
[0] | vdma
[1]) & ~IO_PAGE_MASK
) {
679 prom_printf("PCI-SUN4V: strange virtual-dma[%08x:%08x].\n",
684 dma_mask
= (roundup_pow_of_two(vdma
[1]) - 1UL);
685 num_tsb_entries
= vdma
[1] / IO_PAGE_SIZE
;
686 tsbsize
= num_tsb_entries
* sizeof(iopte_t
);
688 dma_offset
= vdma
[0];
690 /* Setup initial software IOMMU state. */
691 spin_lock_init(&iommu
->lock
);
692 iommu
->ctx_lowest_free
= 1;
693 iommu
->page_table_map_base
= dma_offset
;
694 iommu
->dma_addr_mask
= dma_mask
;
696 /* Allocate and initialize the free area map. */
697 sz
= (num_tsb_entries
+ 7) / 8;
698 sz
= (sz
+ 7UL) & ~7UL;
699 iommu
->arena
.map
= kzalloc(sz
, GFP_KERNEL
);
700 if (!iommu
->arena
.map
) {
701 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
704 iommu
->arena
.limit
= num_tsb_entries
;
706 sz
= probe_existing_entries(pbm
, iommu
);
708 printk("%s: Imported %lu TSB entries from OBP\n",
712 #ifdef CONFIG_PCI_MSI
713 struct pci_sun4v_msiq_entry
{
715 #define MSIQ_VERSION_MASK 0xffffffff00000000UL
716 #define MSIQ_VERSION_SHIFT 32
717 #define MSIQ_TYPE_MASK 0x00000000000000ffUL
718 #define MSIQ_TYPE_SHIFT 0
719 #define MSIQ_TYPE_NONE 0x00
720 #define MSIQ_TYPE_MSG 0x01
721 #define MSIQ_TYPE_MSI32 0x02
722 #define MSIQ_TYPE_MSI64 0x03
723 #define MSIQ_TYPE_INTX 0x08
724 #define MSIQ_TYPE_NONE2 0xff
729 u64 req_id
; /* bus/device/func */
730 #define MSIQ_REQID_BUS_MASK 0xff00UL
731 #define MSIQ_REQID_BUS_SHIFT 8
732 #define MSIQ_REQID_DEVICE_MASK 0x00f8UL
733 #define MSIQ_REQID_DEVICE_SHIFT 3
734 #define MSIQ_REQID_FUNC_MASK 0x0007UL
735 #define MSIQ_REQID_FUNC_SHIFT 0
739 /* The format of this value is message type dependent.
740 * For MSI bits 15:0 are the data from the MSI packet.
741 * For MSI-X bits 31:0 are the data from the MSI packet.
742 * For MSG, the message code and message routing code where:
743 * bits 39:32 is the bus/device/fn of the msg target-id
744 * bits 18:16 is the message routing code
745 * bits 7:0 is the message code
746 * For INTx the low order 2-bits are:
757 static int pci_sun4v_get_head(struct pci_pbm_info
*pbm
, unsigned long msiqid
,
760 unsigned long err
, limit
;
762 err
= pci_sun4v_msiq_gethead(pbm
->devhandle
, msiqid
, head
);
766 limit
= pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
);
767 if (unlikely(*head
>= limit
))
773 static int pci_sun4v_dequeue_msi(struct pci_pbm_info
*pbm
,
774 unsigned long msiqid
, unsigned long *head
,
777 struct pci_sun4v_msiq_entry
*ep
;
778 unsigned long err
, type
;
780 /* Note: void pointer arithmetic, 'head' is a byte offset */
781 ep
= (pbm
->msi_queues
+ ((msiqid
- pbm
->msiq_first
) *
782 (pbm
->msiq_ent_count
*
783 sizeof(struct pci_sun4v_msiq_entry
))) +
786 if ((ep
->version_type
& MSIQ_TYPE_MASK
) == 0)
789 type
= (ep
->version_type
& MSIQ_TYPE_MASK
) >> MSIQ_TYPE_SHIFT
;
790 if (unlikely(type
!= MSIQ_TYPE_MSI32
&&
791 type
!= MSIQ_TYPE_MSI64
))
796 err
= pci_sun4v_msi_setstate(pbm
->devhandle
,
797 ep
->msi_data
/* msi_num */,
802 /* Clear the entry. */
803 ep
->version_type
&= ~MSIQ_TYPE_MASK
;
805 (*head
) += sizeof(struct pci_sun4v_msiq_entry
);
807 (pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
)))
813 static int pci_sun4v_set_head(struct pci_pbm_info
*pbm
, unsigned long msiqid
,
818 err
= pci_sun4v_msiq_sethead(pbm
->devhandle
, msiqid
, head
);
825 static int pci_sun4v_msi_setup(struct pci_pbm_info
*pbm
, unsigned long msiqid
,
826 unsigned long msi
, int is_msi64
)
828 if (pci_sun4v_msi_setmsiq(pbm
->devhandle
, msi
, msiqid
,
830 HV_MSITYPE_MSI64
: HV_MSITYPE_MSI32
)))
832 if (pci_sun4v_msi_setstate(pbm
->devhandle
, msi
, HV_MSISTATE_IDLE
))
834 if (pci_sun4v_msi_setvalid(pbm
->devhandle
, msi
, HV_MSIVALID_VALID
))
839 static int pci_sun4v_msi_teardown(struct pci_pbm_info
*pbm
, unsigned long msi
)
841 unsigned long err
, msiqid
;
843 err
= pci_sun4v_msi_getmsiq(pbm
->devhandle
, msi
, &msiqid
);
847 pci_sun4v_msi_setvalid(pbm
->devhandle
, msi
, HV_MSIVALID_INVALID
);
852 static int pci_sun4v_msiq_alloc(struct pci_pbm_info
*pbm
)
854 unsigned long q_size
, alloc_size
, pages
, order
;
857 q_size
= pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
);
858 alloc_size
= (pbm
->msiq_num
* q_size
);
859 order
= get_order(alloc_size
);
860 pages
= __get_free_pages(GFP_KERNEL
| __GFP_COMP
, order
);
862 printk(KERN_ERR
"MSI: Cannot allocate MSI queues (o=%lu).\n",
866 memset((char *)pages
, 0, PAGE_SIZE
<< order
);
867 pbm
->msi_queues
= (void *) pages
;
869 for (i
= 0; i
< pbm
->msiq_num
; i
++) {
870 unsigned long err
, base
= __pa(pages
+ (i
* q_size
));
871 unsigned long ret1
, ret2
;
873 err
= pci_sun4v_msiq_conf(pbm
->devhandle
,
875 base
, pbm
->msiq_ent_count
);
877 printk(KERN_ERR
"MSI: msiq register fails (err=%lu)\n",
882 err
= pci_sun4v_msiq_info(pbm
->devhandle
,
886 printk(KERN_ERR
"MSI: Cannot read msiq (err=%lu)\n",
890 if (ret1
!= base
|| ret2
!= pbm
->msiq_ent_count
) {
891 printk(KERN_ERR
"MSI: Bogus qconf "
892 "expected[%lx:%x] got[%lx:%lx]\n",
893 base
, pbm
->msiq_ent_count
,
902 free_pages(pages
, order
);
906 static void pci_sun4v_msiq_free(struct pci_pbm_info
*pbm
)
908 unsigned long q_size
, alloc_size
, pages
, order
;
911 for (i
= 0; i
< pbm
->msiq_num
; i
++) {
912 unsigned long msiqid
= pbm
->msiq_first
+ i
;
914 (void) pci_sun4v_msiq_conf(pbm
->devhandle
, msiqid
, 0UL, 0);
917 q_size
= pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
);
918 alloc_size
= (pbm
->msiq_num
* q_size
);
919 order
= get_order(alloc_size
);
921 pages
= (unsigned long) pbm
->msi_queues
;
923 free_pages(pages
, order
);
925 pbm
->msi_queues
= NULL
;
928 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info
*pbm
,
929 unsigned long msiqid
,
930 unsigned long devino
)
932 unsigned int virt_irq
= sun4v_build_irq(pbm
->devhandle
, devino
);
937 if (pci_sun4v_msiq_setstate(pbm
->devhandle
, msiqid
, HV_MSIQSTATE_IDLE
))
939 if (pci_sun4v_msiq_setvalid(pbm
->devhandle
, msiqid
, HV_MSIQ_VALID
))
945 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops
= {
946 .get_head
= pci_sun4v_get_head
,
947 .dequeue_msi
= pci_sun4v_dequeue_msi
,
948 .set_head
= pci_sun4v_set_head
,
949 .msi_setup
= pci_sun4v_msi_setup
,
950 .msi_teardown
= pci_sun4v_msi_teardown
,
951 .msiq_alloc
= pci_sun4v_msiq_alloc
,
952 .msiq_free
= pci_sun4v_msiq_free
,
953 .msiq_build_irq
= pci_sun4v_msiq_build_irq
,
956 static void pci_sun4v_msi_init(struct pci_pbm_info
*pbm
)
958 sparc64_pbm_msi_init(pbm
, &pci_sun4v_msiq_ops
);
960 #else /* CONFIG_PCI_MSI */
961 static void pci_sun4v_msi_init(struct pci_pbm_info
*pbm
)
964 #endif /* !(CONFIG_PCI_MSI) */
966 static void __init
pci_sun4v_pbm_init(struct pci_controller_info
*p
, struct device_node
*dp
, u32 devhandle
)
968 struct pci_pbm_info
*pbm
;
970 if (devhandle
& 0x40)
975 pbm
->next
= pci_pbm_root
;
978 pbm
->scan_bus
= pci_sun4v_scan_bus
;
979 pbm
->pci_ops
= &sun4v_pci_ops
;
980 pbm
->config_space_reg_bits
= 12;
982 pbm
->index
= pci_num_pbms
++;
987 pbm
->devhandle
= devhandle
;
989 pbm
->name
= dp
->full_name
;
991 printk("%s: SUN4V PCI Bus Module\n", pbm
->name
);
993 pci_determine_mem_io_space(pbm
);
995 pci_get_pbm_props(pbm
);
996 pci_sun4v_iommu_init(pbm
);
997 pci_sun4v_msi_init(pbm
);
1000 void __init
sun4v_pci_init(struct device_node
*dp
, char *model_name
)
1002 static int hvapi_negotiated
= 0;
1003 struct pci_controller_info
*p
;
1004 struct pci_pbm_info
*pbm
;
1005 struct iommu
*iommu
;
1006 struct property
*prop
;
1007 struct linux_prom64_registers
*regs
;
1011 if (!hvapi_negotiated
++) {
1012 int err
= sun4v_hvapi_register(HV_GRP_PCI
,
1017 prom_printf("SUN4V_PCI: Could not register hvapi, "
1021 printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n",
1022 vpci_major
, vpci_minor
);
1024 dma_ops
= &sun4v_dma_ops
;
1027 prop
= of_find_property(dp
, "reg", NULL
);
1030 devhandle
= (regs
->phys_addr
>> 32UL) & 0x0fffffff;
1032 for (pbm
= pci_pbm_root
; pbm
; pbm
= pbm
->next
) {
1033 if (pbm
->devhandle
== (devhandle
^ 0x40)) {
1034 pci_sun4v_pbm_init(pbm
->parent
, dp
, devhandle
);
1039 for_each_possible_cpu(i
) {
1040 unsigned long page
= get_zeroed_page(GFP_ATOMIC
);
1043 goto fatal_memory_error
;
1045 per_cpu(iommu_batch
, i
).pglist
= (u64
*) page
;
1048 p
= kzalloc(sizeof(struct pci_controller_info
), GFP_ATOMIC
);
1050 goto fatal_memory_error
;
1052 iommu
= kzalloc(sizeof(struct iommu
), GFP_ATOMIC
);
1054 goto fatal_memory_error
;
1056 p
->pbm_A
.iommu
= iommu
;
1058 iommu
= kzalloc(sizeof(struct iommu
), GFP_ATOMIC
);
1060 goto fatal_memory_error
;
1062 p
->pbm_B
.iommu
= iommu
;
1064 pci_sun4v_pbm_init(p
, dp
, devhandle
);
1068 prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");