2b21749cc2b33af95d3a0dbab0870cc1ac7634ac
2 * Copyright IBM Corp. 2012
5 * Jan Glauber <jang@linux.vnet.ibm.com>
7 * The System z PCI code is a rewrite from a prototype by
8 * the following people (Kudoz!):
18 #define COMPONENT "zPCI"
19 #define pr_fmt(fmt) COMPONENT ": " fmt
21 #include <linux/kernel.h>
22 #include <linux/slab.h>
23 #include <linux/err.h>
24 #include <linux/export.h>
25 #include <linux/delay.h>
26 #include <linux/irq.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/seq_file.h>
29 #include <linux/pci.h>
30 #include <linux/msi.h>
34 #include <asm/facility.h>
35 #include <asm/pci_insn.h>
36 #include <asm/pci_clp.h>
37 #include <asm/pci_dma.h>
39 #define DEBUG /* enable pr_debug */
41 #define SIC_IRQ_MODE_ALL 0
42 #define SIC_IRQ_MODE_SINGLE 1
44 #define ZPCI_NR_DMA_SPACES 1
45 #define ZPCI_MSI_VEC_BITS 6
46 #define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS
48 /* list of all detected zpci devices */
50 EXPORT_SYMBOL_GPL(zpci_list
);
51 DEFINE_MUTEX(zpci_list_lock
);
52 EXPORT_SYMBOL_GPL(zpci_list_lock
);
54 static struct pci_hp_callback_ops
*hotplug_ops
;
56 static DECLARE_BITMAP(zpci_domain
, ZPCI_NR_DEVICES
);
57 static DEFINE_SPINLOCK(zpci_domain_lock
);
60 irq_handler_t handler
;
65 unsigned long aibv
; /* AI bit vector */
66 int msi_vecs
; /* consecutive MSI-vectors used */
68 struct callback cb
[ZPCI_NR_MSI_VECS
]; /* callback handler array */
69 spinlock_t lock
; /* protect callbacks against de-reg */
73 /* amap of adapters, one bit per dev, corresponds to one irq nr */
75 /* AI summary bit, global page for all devices */
77 /* pointer to aibv and callback data in zdev */
78 struct zdev_irq_map
*imap
[ZPCI_NR_DEVICES
];
79 /* protects the whole bucket struct */
83 static struct intr_bucket
*bucket
;
85 /* Adapter local summary indicator */
86 static u8
*zpci_irq_si
;
88 static atomic_t irq_retries
= ATOMIC_INIT(0);
91 static DEFINE_SPINLOCK(zpci_iomap_lock
);
92 static DECLARE_BITMAP(zpci_iomap
, ZPCI_IOMAP_MAX_ENTRIES
);
93 struct zpci_iomap_entry
*zpci_iomap_start
;
94 EXPORT_SYMBOL_GPL(zpci_iomap_start
);
96 /* highest irq summary bit */
97 static int __read_mostly aisb_max
;
99 static struct kmem_cache
*zdev_irq_cache
;
100 static struct kmem_cache
*zdev_fmb_cache
;
102 static inline int irq_to_msi_nr(unsigned int irq
)
104 return irq
& ZPCI_MSI_MASK
;
107 static inline int irq_to_dev_nr(unsigned int irq
)
109 return irq
>> ZPCI_MSI_VEC_BITS
;
112 static inline struct zdev_irq_map
*get_imap(unsigned int irq
)
114 return bucket
->imap
[irq_to_dev_nr(irq
)];
117 struct zpci_dev
*get_zdev(struct pci_dev
*pdev
)
119 return (struct zpci_dev
*) pdev
->sysdata
;
122 struct zpci_dev
*get_zdev_by_fid(u32 fid
)
124 struct zpci_dev
*tmp
, *zdev
= NULL
;
126 mutex_lock(&zpci_list_lock
);
127 list_for_each_entry(tmp
, &zpci_list
, entry
) {
128 if (tmp
->fid
== fid
) {
133 mutex_unlock(&zpci_list_lock
);
137 bool zpci_fid_present(u32 fid
)
139 return (get_zdev_by_fid(fid
) != NULL
) ? true : false;
142 static struct zpci_dev
*get_zdev_by_bus(struct pci_bus
*bus
)
144 return (bus
&& bus
->sysdata
) ? (struct zpci_dev
*) bus
->sysdata
: NULL
;
147 int pci_domain_nr(struct pci_bus
*bus
)
149 return ((struct zpci_dev
*) bus
->sysdata
)->domain
;
151 EXPORT_SYMBOL_GPL(pci_domain_nr
);
153 int pci_proc_domain(struct pci_bus
*bus
)
155 return pci_domain_nr(bus
);
157 EXPORT_SYMBOL_GPL(pci_proc_domain
);
159 /* Modify PCI: Register adapter interruptions */
160 static int zpci_register_airq(struct zpci_dev
*zdev
, unsigned int aisb
,
163 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, 0, ZPCI_MOD_FC_REG_INT
);
164 struct zpci_fib
*fib
;
167 fib
= (void *) get_zeroed_page(GFP_KERNEL
);
172 fib
->noi
= zdev
->irq_map
->msi_vecs
;
173 fib
->sum
= 1; /* enable summary notifications */
175 fib
->aibvo
= 0; /* every function has its own page */
176 fib
->aisb
= (u64
) bucket
->aisb
+ aisb
/ 8;
177 fib
->aisbo
= aisb
& ZPCI_MSI_MASK
;
179 rc
= s390pci_mod_fc(req
, fib
);
180 pr_debug("%s mpcifc returned noi: %d\n", __func__
, fib
->noi
);
182 free_page((unsigned long) fib
);
186 struct mod_pci_args
{
193 static int mod_pci(struct zpci_dev
*zdev
, int fn
, u8 dmaas
, struct mod_pci_args
*args
)
195 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, dmaas
, fn
);
196 struct zpci_fib
*fib
;
199 /* The FIB must be available even if it's not used */
200 fib
= (void *) get_zeroed_page(GFP_KERNEL
);
204 fib
->pba
= args
->base
;
205 fib
->pal
= args
->limit
;
206 fib
->iota
= args
->iota
;
207 fib
->fmb_addr
= args
->fmb_addr
;
209 rc
= s390pci_mod_fc(req
, fib
);
210 free_page((unsigned long) fib
);
214 /* Modify PCI: Register I/O address translation parameters */
215 int zpci_register_ioat(struct zpci_dev
*zdev
, u8 dmaas
,
216 u64 base
, u64 limit
, u64 iota
)
218 struct mod_pci_args args
= { base
, limit
, iota
, 0 };
220 WARN_ON_ONCE(iota
& 0x3fff);
221 args
.iota
|= ZPCI_IOTA_RTTO_FLAG
;
222 return mod_pci(zdev
, ZPCI_MOD_FC_REG_IOAT
, dmaas
, &args
);
225 /* Modify PCI: Unregister I/O address translation parameters */
226 int zpci_unregister_ioat(struct zpci_dev
*zdev
, u8 dmaas
)
228 struct mod_pci_args args
= { 0, 0, 0, 0 };
230 return mod_pci(zdev
, ZPCI_MOD_FC_DEREG_IOAT
, dmaas
, &args
);
233 /* Modify PCI: Unregister adapter interruptions */
234 static int zpci_unregister_airq(struct zpci_dev
*zdev
)
236 struct mod_pci_args args
= { 0, 0, 0, 0 };
238 return mod_pci(zdev
, ZPCI_MOD_FC_DEREG_INT
, 0, &args
);
241 /* Modify PCI: Set PCI function measurement parameters */
242 int zpci_fmb_enable_device(struct zpci_dev
*zdev
)
244 struct mod_pci_args args
= { 0, 0, 0, 0 };
249 zdev
->fmb
= kmem_cache_zalloc(zdev_fmb_cache
, GFP_KERNEL
);
252 WARN_ON((u64
) zdev
->fmb
& 0xf);
254 args
.fmb_addr
= virt_to_phys(zdev
->fmb
);
255 return mod_pci(zdev
, ZPCI_MOD_FC_SET_MEASURE
, 0, &args
);
258 /* Modify PCI: Disable PCI function measurement */
259 int zpci_fmb_disable_device(struct zpci_dev
*zdev
)
261 struct mod_pci_args args
= { 0, 0, 0, 0 };
267 /* Function measurement is disabled if fmb address is zero */
268 rc
= mod_pci(zdev
, ZPCI_MOD_FC_SET_MEASURE
, 0, &args
);
270 kmem_cache_free(zdev_fmb_cache
, zdev
->fmb
);
275 #define ZPCI_PCIAS_CFGSPC 15
277 static int zpci_cfg_load(struct zpci_dev
*zdev
, int offset
, u32
*val
, u8 len
)
279 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, ZPCI_PCIAS_CFGSPC
, len
);
283 rc
= s390pci_load(&data
, req
, offset
);
284 data
= data
<< ((8 - len
) * 8);
285 data
= le64_to_cpu(data
);
293 static int zpci_cfg_store(struct zpci_dev
*zdev
, int offset
, u32 val
, u8 len
)
295 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, ZPCI_PCIAS_CFGSPC
, len
);
299 data
= cpu_to_le64(data
);
300 data
= data
>> ((8 - len
) * 8);
301 rc
= s390pci_store(data
, req
, offset
);
305 void synchronize_irq(unsigned int irq
)
308 * Not needed, the handler is protected by a lock and IRQs that occur
309 * after the handler is deleted are just NOPs.
312 EXPORT_SYMBOL_GPL(synchronize_irq
);
314 void enable_irq(unsigned int irq
)
316 struct msi_desc
*msi
= irq_get_msi_desc(irq
);
318 zpci_msi_set_mask_bits(msi
, 1, 0);
320 EXPORT_SYMBOL_GPL(enable_irq
);
322 void disable_irq(unsigned int irq
)
324 struct msi_desc
*msi
= irq_get_msi_desc(irq
);
326 zpci_msi_set_mask_bits(msi
, 1, 1);
328 EXPORT_SYMBOL_GPL(disable_irq
);
330 void disable_irq_nosync(unsigned int irq
)
334 EXPORT_SYMBOL_GPL(disable_irq_nosync
);
336 unsigned long probe_irq_on(void)
340 EXPORT_SYMBOL_GPL(probe_irq_on
);
342 int probe_irq_off(unsigned long val
)
346 EXPORT_SYMBOL_GPL(probe_irq_off
);
348 unsigned int probe_irq_mask(unsigned long val
)
352 EXPORT_SYMBOL_GPL(probe_irq_mask
);
354 void pcibios_fixup_bus(struct pci_bus
*bus
)
358 resource_size_t
pcibios_align_resource(void *data
, const struct resource
*res
,
359 resource_size_t size
,
360 resource_size_t align
)
365 /* combine single writes by using store-block insn */
366 void __iowrite64_copy(void __iomem
*to
, const void *from
, size_t count
)
368 zpci_memcpy_toio(to
, from
, count
);
371 /* Create a virtual mapping cookie for a PCI BAR */
372 void __iomem
*pci_iomap(struct pci_dev
*pdev
, int bar
, unsigned long max
)
374 struct zpci_dev
*zdev
= get_zdev(pdev
);
378 if ((bar
& 7) != bar
)
381 idx
= zdev
->bars
[bar
].map_idx
;
382 spin_lock(&zpci_iomap_lock
);
383 zpci_iomap_start
[idx
].fh
= zdev
->fh
;
384 zpci_iomap_start
[idx
].bar
= bar
;
385 spin_unlock(&zpci_iomap_lock
);
387 addr
= ZPCI_IOMAP_ADDR_BASE
| ((u64
) idx
<< 48);
388 return (void __iomem
*) addr
;
390 EXPORT_SYMBOL_GPL(pci_iomap
);
392 void pci_iounmap(struct pci_dev
*pdev
, void __iomem
*addr
)
396 idx
= (((__force u64
) addr
) & ~ZPCI_IOMAP_ADDR_BASE
) >> 48;
397 spin_lock(&zpci_iomap_lock
);
398 zpci_iomap_start
[idx
].fh
= 0;
399 zpci_iomap_start
[idx
].bar
= 0;
400 spin_unlock(&zpci_iomap_lock
);
402 EXPORT_SYMBOL_GPL(pci_iounmap
);
404 static int pci_read(struct pci_bus
*bus
, unsigned int devfn
, int where
,
407 struct zpci_dev
*zdev
= get_zdev_by_bus(bus
);
409 if (!zdev
|| devfn
!= ZPCI_DEVFN
)
411 return zpci_cfg_load(zdev
, where
, val
, size
);
414 static int pci_write(struct pci_bus
*bus
, unsigned int devfn
, int where
,
417 struct zpci_dev
*zdev
= get_zdev_by_bus(bus
);
419 if (!zdev
|| devfn
!= ZPCI_DEVFN
)
421 return zpci_cfg_store(zdev
, where
, val
, size
);
424 static struct pci_ops pci_root_ops
= {
429 /* store the last handled bit to implement fair scheduling of devices */
430 static DEFINE_PER_CPU(unsigned long, next_sbit
);
432 static void zpci_irq_handler(void *dont
, void *need
)
434 unsigned long sbit
, mbit
, last
= 0, start
= __get_cpu_var(next_sbit
);
435 int rescan
= 0, max
= aisb_max
;
436 struct zdev_irq_map
*imap
;
438 inc_irq_stat(IRQIO_PCI
);
442 /* find summary_bit */
443 for_each_set_bit_left_cont(sbit
, bucket
->aisb
, max
) {
444 clear_bit(63 - (sbit
& 63), bucket
->aisb
+ (sbit
>> 6));
447 /* find vector bit */
448 imap
= bucket
->imap
[sbit
];
449 for_each_set_bit_left(mbit
, &imap
->aibv
, imap
->msi_vecs
) {
450 inc_irq_stat(IRQIO_MSI
);
451 clear_bit(63 - mbit
, &imap
->aibv
);
453 spin_lock(&imap
->lock
);
454 if (imap
->cb
[mbit
].handler
)
455 imap
->cb
[mbit
].handler(mbit
,
456 imap
->cb
[mbit
].data
);
457 spin_unlock(&imap
->lock
);
464 /* scan the skipped bits */
472 /* enable interrupts again */
473 set_irq_ctrl(SIC_IRQ_MODE_SINGLE
, NULL
, PCI_ISC
);
475 /* check again to not lose initiative */
478 sbit
= find_first_bit_left(bucket
->aisb
, max
);
480 atomic_inc(&irq_retries
);
485 /* store next device bit to scan */
486 __get_cpu_var(next_sbit
) = (++last
>= aisb_max
) ? 0 : last
;
489 /* msi_vecs - number of requested interrupts, 0 place function to error state */
490 static int zpci_setup_msi(struct pci_dev
*pdev
, int msi_vecs
)
492 struct zpci_dev
*zdev
= get_zdev(pdev
);
493 unsigned int aisb
, msi_nr
;
494 struct msi_desc
*msi
;
497 /* store the number of used MSI vectors */
498 zdev
->irq_map
->msi_vecs
= min(msi_vecs
, ZPCI_NR_MSI_VECS
);
500 spin_lock(&bucket
->lock
);
501 aisb
= find_first_zero_bit(bucket
->alloc
, PAGE_SIZE
);
502 /* alloc map exhausted? */
503 if (aisb
== PAGE_SIZE
) {
504 spin_unlock(&bucket
->lock
);
507 set_bit(aisb
, bucket
->alloc
);
508 spin_unlock(&bucket
->lock
);
511 if (aisb
+ 1 > aisb_max
)
514 /* wire up IRQ shortcut pointer */
515 bucket
->imap
[zdev
->aisb
] = zdev
->irq_map
;
516 pr_debug("%s: imap[%u] linked to %p\n", __func__
, zdev
->aisb
, zdev
->irq_map
);
518 /* TODO: irq number 0 wont be found if we return less than requested MSIs.
519 * ignore it for now and fix in common code.
521 msi_nr
= aisb
<< ZPCI_MSI_VEC_BITS
;
523 list_for_each_entry(msi
, &pdev
->msi_list
, list
) {
524 rc
= zpci_setup_msi_irq(zdev
, msi
, msi_nr
,
525 aisb
<< ZPCI_MSI_VEC_BITS
);
531 rc
= zpci_register_airq(zdev
, aisb
, (u64
) &zdev
->irq_map
->aibv
);
533 clear_bit(aisb
, bucket
->alloc
);
534 dev_err(&pdev
->dev
, "register MSI failed with: %d\n", rc
);
537 return (zdev
->irq_map
->msi_vecs
== msi_vecs
) ?
538 0 : zdev
->irq_map
->msi_vecs
;
541 static void zpci_teardown_msi(struct pci_dev
*pdev
)
543 struct zpci_dev
*zdev
= get_zdev(pdev
);
544 struct msi_desc
*msi
;
547 rc
= zpci_unregister_airq(zdev
);
549 dev_err(&pdev
->dev
, "deregister MSI failed with: %d\n", rc
);
553 msi
= list_first_entry(&pdev
->msi_list
, struct msi_desc
, list
);
554 aisb
= irq_to_dev_nr(msi
->irq
);
556 list_for_each_entry(msi
, &pdev
->msi_list
, list
)
557 zpci_teardown_msi_irq(zdev
, msi
);
559 clear_bit(aisb
, bucket
->alloc
);
560 if (aisb
+ 1 == aisb_max
)
564 int arch_setup_msi_irqs(struct pci_dev
*pdev
, int nvec
, int type
)
566 pr_debug("%s: requesting %d MSI-X interrupts...", __func__
, nvec
);
567 if (type
!= PCI_CAP_ID_MSIX
&& type
!= PCI_CAP_ID_MSI
)
569 return zpci_setup_msi(pdev
, nvec
);
572 void arch_teardown_msi_irqs(struct pci_dev
*pdev
)
574 pr_info("%s: on pdev: %p\n", __func__
, pdev
);
575 zpci_teardown_msi(pdev
);
578 static void zpci_map_resources(struct zpci_dev
*zdev
)
580 struct pci_dev
*pdev
= zdev
->pdev
;
584 for (i
= 0; i
< PCI_BAR_COUNT
; i
++) {
585 len
= pci_resource_len(pdev
, i
);
588 pdev
->resource
[i
].start
= (resource_size_t
) pci_iomap(pdev
, i
, 0);
589 pdev
->resource
[i
].end
= pdev
->resource
[i
].start
+ len
- 1;
590 pr_debug("BAR%i: -> start: %Lx end: %Lx\n",
591 i
, pdev
->resource
[i
].start
, pdev
->resource
[i
].end
);
595 static void zpci_unmap_resources(struct pci_dev
*pdev
)
600 for (i
= 0; i
< PCI_BAR_COUNT
; i
++) {
601 len
= pci_resource_len(pdev
, i
);
604 pci_iounmap(pdev
, (void *) pdev
->resource
[i
].start
);
608 struct zpci_dev
*zpci_alloc_device(void)
610 struct zpci_dev
*zdev
;
612 /* Alloc memory for our private pci device data */
613 zdev
= kzalloc(sizeof(*zdev
), GFP_KERNEL
);
615 return ERR_PTR(-ENOMEM
);
617 /* Alloc aibv & callback space */
618 zdev
->irq_map
= kmem_cache_zalloc(zdev_irq_cache
, GFP_KERNEL
);
621 WARN_ON((u64
) zdev
->irq_map
& 0xff);
626 return ERR_PTR(-ENOMEM
);
629 void zpci_free_device(struct zpci_dev
*zdev
)
631 kmem_cache_free(zdev_irq_cache
, zdev
->irq_map
);
635 /* Called on removal of pci_dev, leaves zpci and bus device */
636 static void zpci_remove_device(struct pci_dev
*pdev
)
638 struct zpci_dev
*zdev
= get_zdev(pdev
);
640 dev_info(&pdev
->dev
, "Removing device %u\n", zdev
->domain
);
641 zdev
->state
= ZPCI_FN_STATE_CONFIGURED
;
642 zpci_dma_exit_device(zdev
);
643 zpci_fmb_disable_device(zdev
);
644 zpci_sysfs_remove_device(&pdev
->dev
);
645 zpci_unmap_resources(pdev
);
646 list_del(&zdev
->entry
); /* can be called from init */
650 static void zpci_scan_devices(void)
652 struct zpci_dev
*zdev
;
654 mutex_lock(&zpci_list_lock
);
655 list_for_each_entry(zdev
, &zpci_list
, entry
)
656 if (zdev
->state
== ZPCI_FN_STATE_CONFIGURED
)
657 zpci_scan_device(zdev
);
658 mutex_unlock(&zpci_list_lock
);
662 * Too late for any s390 specific setup, since interrupts must be set up
663 * already which requires DMA setup too and the pci scan will access the
664 * config space, which only works if the function handle is enabled.
666 int pcibios_enable_device(struct pci_dev
*pdev
, int mask
)
668 struct resource
*res
;
672 pci_read_config_word(pdev
, PCI_COMMAND
, &cmd
);
674 for (i
= 0; i
< PCI_BAR_COUNT
; i
++) {
675 res
= &pdev
->resource
[i
];
677 if (res
->flags
& IORESOURCE_IO
)
680 if (res
->flags
& IORESOURCE_MEM
)
681 cmd
|= PCI_COMMAND_MEMORY
;
683 pci_write_config_word(pdev
, PCI_COMMAND
, cmd
);
687 void pcibios_disable_device(struct pci_dev
*pdev
)
689 zpci_remove_device(pdev
);
690 pdev
->sysdata
= NULL
;
693 int pcibios_add_platform_entries(struct pci_dev
*pdev
)
695 return zpci_sysfs_add_device(&pdev
->dev
);
698 int zpci_request_irq(unsigned int irq
, irq_handler_t handler
, void *data
)
700 int msi_nr
= irq_to_msi_nr(irq
);
701 struct zdev_irq_map
*imap
;
702 struct msi_desc
*msi
;
704 msi
= irq_get_msi_desc(irq
);
708 imap
= get_imap(irq
);
709 spin_lock_init(&imap
->lock
);
711 pr_debug("%s: register handler for IRQ:MSI %d:%d\n", __func__
, irq
>> 6, msi_nr
);
712 imap
->cb
[msi_nr
].handler
= handler
;
713 imap
->cb
[msi_nr
].data
= data
;
716 * The generic MSI code returns with the interrupt disabled on the
717 * card, using the MSI mask bits. Firmware doesn't appear to unmask
718 * at that level, so we do it here by hand.
720 zpci_msi_set_mask_bits(msi
, 1, 0);
724 void zpci_free_irq(unsigned int irq
)
726 struct zdev_irq_map
*imap
= get_imap(irq
);
727 int msi_nr
= irq_to_msi_nr(irq
);
730 pr_debug("%s: for irq: %d\n", __func__
, irq
);
732 spin_lock_irqsave(&imap
->lock
, flags
);
733 imap
->cb
[msi_nr
].handler
= NULL
;
734 imap
->cb
[msi_nr
].data
= NULL
;
735 spin_unlock_irqrestore(&imap
->lock
, flags
);
738 int request_irq(unsigned int irq
, irq_handler_t handler
,
739 unsigned long irqflags
, const char *devname
, void *dev_id
)
741 pr_debug("%s: irq: %d handler: %p flags: %lx dev: %s\n",
742 __func__
, irq
, handler
, irqflags
, devname
);
744 return zpci_request_irq(irq
, handler
, dev_id
);
746 EXPORT_SYMBOL_GPL(request_irq
);
748 void free_irq(unsigned int irq
, void *dev_id
)
752 EXPORT_SYMBOL_GPL(free_irq
);
754 static int __init
zpci_irq_init(void)
758 bucket
= kzalloc(sizeof(*bucket
), GFP_KERNEL
);
762 bucket
->aisb
= (unsigned long *) get_zeroed_page(GFP_KERNEL
);
768 bucket
->alloc
= (unsigned long *) get_zeroed_page(GFP_KERNEL
);
769 if (!bucket
->alloc
) {
774 isc_register(PCI_ISC
);
775 zpci_irq_si
= s390_register_adapter_interrupt(&zpci_irq_handler
, NULL
, PCI_ISC
);
776 if (IS_ERR(zpci_irq_si
)) {
777 rc
= PTR_ERR(zpci_irq_si
);
782 for_each_online_cpu(cpu
)
783 per_cpu(next_sbit
, cpu
) = 0;
785 spin_lock_init(&bucket
->lock
);
786 /* set summary to 1 to be called every time for the ISC */
788 set_irq_ctrl(SIC_IRQ_MODE_SINGLE
, NULL
, PCI_ISC
);
792 isc_unregister(PCI_ISC
);
793 free_page((unsigned long) bucket
->alloc
);
795 free_page((unsigned long) bucket
->aisb
);
801 static void zpci_irq_exit(void)
803 free_page((unsigned long) bucket
->alloc
);
804 free_page((unsigned long) bucket
->aisb
);
805 s390_unregister_adapter_interrupt(zpci_irq_si
, PCI_ISC
);
806 isc_unregister(PCI_ISC
);
810 void zpci_debug_info(struct zpci_dev
*zdev
, struct seq_file
*m
)
815 seq_printf(m
, "global irq retries: %u\n", atomic_read(&irq_retries
));
816 seq_printf(m
, "aibv[0]:%016lx aibv[1]:%016lx aisb:%016lx\n",
817 get_imap(0)->aibv
, get_imap(1)->aibv
, *bucket
->aisb
);
820 static struct resource
*zpci_alloc_bus_resource(unsigned long start
, unsigned long size
,
821 unsigned long flags
, int domain
)
827 r
= kzalloc(sizeof(*r
), GFP_KERNEL
);
829 return ERR_PTR(-ENOMEM
);
831 r
->end
= r
->start
+ size
- 1;
833 r
->parent
= &iomem_resource
;
834 name
= kmalloc(18, GFP_KERNEL
);
837 return ERR_PTR(-ENOMEM
);
839 sprintf(name
, "PCI Bus: %04x:%02x", domain
, ZPCI_BUS_NR
);
842 rc
= request_resource(&iomem_resource
, r
);
844 pr_debug("request resource %pR failed\n", r
);
848 static int zpci_alloc_iomap(struct zpci_dev
*zdev
)
852 spin_lock(&zpci_iomap_lock
);
853 entry
= find_first_zero_bit(zpci_iomap
, ZPCI_IOMAP_MAX_ENTRIES
);
854 if (entry
== ZPCI_IOMAP_MAX_ENTRIES
) {
855 spin_unlock(&zpci_iomap_lock
);
858 set_bit(entry
, zpci_iomap
);
859 spin_unlock(&zpci_iomap_lock
);
863 static void zpci_free_iomap(struct zpci_dev
*zdev
, int entry
)
865 spin_lock(&zpci_iomap_lock
);
866 memset(&zpci_iomap_start
[entry
], 0, sizeof(struct zpci_iomap_entry
));
867 clear_bit(entry
, zpci_iomap
);
868 spin_unlock(&zpci_iomap_lock
);
871 int pcibios_add_device(struct pci_dev
*pdev
)
873 struct zpci_dev
*zdev
= get_zdev(pdev
);
875 zpci_debug_init_device(zdev
);
876 zpci_fmb_enable_device(zdev
);
877 zpci_map_resources(zdev
);
882 static int zpci_create_device_bus(struct zpci_dev
*zdev
)
884 struct resource
*res
;
885 LIST_HEAD(resources
);
888 /* allocate mapping entry for each used bar */
889 for (i
= 0; i
< PCI_BAR_COUNT
; i
++) {
890 unsigned long addr
, size
, flags
;
893 if (!zdev
->bars
[i
].size
)
895 entry
= zpci_alloc_iomap(zdev
);
898 zdev
->bars
[i
].map_idx
= entry
;
900 /* only MMIO is supported */
901 flags
= IORESOURCE_MEM
;
902 if (zdev
->bars
[i
].val
& 8)
903 flags
|= IORESOURCE_PREFETCH
;
904 if (zdev
->bars
[i
].val
& 4)
905 flags
|= IORESOURCE_MEM_64
;
907 addr
= ZPCI_IOMAP_ADDR_BASE
+ ((u64
) entry
<< 48);
909 size
= 1UL << zdev
->bars
[i
].size
;
911 res
= zpci_alloc_bus_resource(addr
, size
, flags
, zdev
->domain
);
913 zpci_free_iomap(zdev
, entry
);
916 pci_add_resource(&resources
, res
);
919 zdev
->bus
= pci_create_root_bus(NULL
, ZPCI_BUS_NR
, &pci_root_ops
,
924 zdev
->bus
->max_bus_speed
= zdev
->max_bus_speed
;
928 static int zpci_alloc_domain(struct zpci_dev
*zdev
)
930 spin_lock(&zpci_domain_lock
);
931 zdev
->domain
= find_first_zero_bit(zpci_domain
, ZPCI_NR_DEVICES
);
932 if (zdev
->domain
== ZPCI_NR_DEVICES
) {
933 spin_unlock(&zpci_domain_lock
);
936 set_bit(zdev
->domain
, zpci_domain
);
937 spin_unlock(&zpci_domain_lock
);
941 static void zpci_free_domain(struct zpci_dev
*zdev
)
943 spin_lock(&zpci_domain_lock
);
944 clear_bit(zdev
->domain
, zpci_domain
);
945 spin_unlock(&zpci_domain_lock
);
948 int zpci_enable_device(struct zpci_dev
*zdev
)
952 rc
= clp_enable_fh(zdev
, ZPCI_NR_DMA_SPACES
);
955 pr_info("Enabled fh: 0x%x fid: 0x%x\n", zdev
->fh
, zdev
->fid
);
957 rc
= zpci_dma_init_device(zdev
);
963 clp_disable_fh(zdev
);
967 EXPORT_SYMBOL_GPL(zpci_enable_device
);
969 int zpci_disable_device(struct zpci_dev
*zdev
)
971 zpci_dma_exit_device(zdev
);
972 return clp_disable_fh(zdev
);
974 EXPORT_SYMBOL_GPL(zpci_disable_device
);
976 int zpci_create_device(struct zpci_dev
*zdev
)
980 rc
= zpci_alloc_domain(zdev
);
984 rc
= zpci_create_device_bus(zdev
);
988 mutex_lock(&zpci_list_lock
);
989 list_add_tail(&zdev
->entry
, &zpci_list
);
991 hotplug_ops
->create_slot(zdev
);
992 mutex_unlock(&zpci_list_lock
);
994 if (zdev
->state
== ZPCI_FN_STATE_STANDBY
)
997 rc
= zpci_enable_device(zdev
);
1003 mutex_lock(&zpci_list_lock
);
1004 list_del(&zdev
->entry
);
1006 hotplug_ops
->remove_slot(zdev
);
1007 mutex_unlock(&zpci_list_lock
);
1009 zpci_free_domain(zdev
);
1014 void zpci_stop_device(struct zpci_dev
*zdev
)
1016 zpci_dma_exit_device(zdev
);
1018 * Note: SCLP disables fh via set-pci-fn so don't
1022 EXPORT_SYMBOL_GPL(zpci_stop_device
);
1024 int zpci_scan_device(struct zpci_dev
*zdev
)
1026 zdev
->pdev
= pci_scan_single_device(zdev
->bus
, ZPCI_DEVFN
);
1028 pr_err("pci_scan_single_device failed for fid: 0x%x\n",
1033 pci_bus_add_devices(zdev
->bus
);
1035 /* now that pdev was added to the bus mark it as used */
1036 zdev
->state
= ZPCI_FN_STATE_ONLINE
;
1040 zpci_dma_exit_device(zdev
);
1041 clp_disable_fh(zdev
);
1044 EXPORT_SYMBOL_GPL(zpci_scan_device
);
1046 static inline int barsize(u8 size
)
1048 return (size
) ? (1 << size
) >> 10 : 0;
1051 static int zpci_mem_init(void)
1053 zdev_irq_cache
= kmem_cache_create("PCI_IRQ_cache", sizeof(struct zdev_irq_map
),
1054 L1_CACHE_BYTES
, SLAB_HWCACHE_ALIGN
, NULL
);
1055 if (!zdev_irq_cache
)
1058 zdev_fmb_cache
= kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb
),
1060 if (!zdev_fmb_cache
)
1063 /* TODO: use realloc */
1064 zpci_iomap_start
= kzalloc(ZPCI_IOMAP_MAX_ENTRIES
* sizeof(*zpci_iomap_start
),
1066 if (!zpci_iomap_start
)
1071 kmem_cache_destroy(zdev_fmb_cache
);
1073 kmem_cache_destroy(zdev_irq_cache
);
1078 static void zpci_mem_exit(void)
1080 kfree(zpci_iomap_start
);
1081 kmem_cache_destroy(zdev_irq_cache
);
1082 kmem_cache_destroy(zdev_fmb_cache
);
1085 void zpci_register_hp_ops(struct pci_hp_callback_ops
*ops
)
1087 mutex_lock(&zpci_list_lock
);
1089 mutex_unlock(&zpci_list_lock
);
1091 EXPORT_SYMBOL_GPL(zpci_register_hp_ops
);
1093 void zpci_deregister_hp_ops(void)
1095 mutex_lock(&zpci_list_lock
);
1097 mutex_unlock(&zpci_list_lock
);
1099 EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops
);
1101 unsigned int s390_pci_probe
= 1;
1102 EXPORT_SYMBOL_GPL(s390_pci_probe
);
1104 char * __init
pcibios_setup(char *str
)
1106 if (!strcmp(str
, "off")) {
1113 static int __init
pci_base_init(void)
1117 if (!s390_pci_probe
)
1120 if (!test_facility(2) || !test_facility(69)
1121 || !test_facility(71) || !test_facility(72))
1124 pr_info("Probing PCI hardware: PCI:%d SID:%d AEN:%d\n",
1125 test_facility(69), test_facility(70),
1128 rc
= zpci_debug_init();
1132 rc
= zpci_mem_init();
1136 rc
= zpci_msihash_init();
1140 rc
= zpci_irq_init();
1144 rc
= zpci_dma_init();
1148 rc
= clp_find_pci_devices();
1152 zpci_scan_devices();
1160 zpci_msihash_exit();
1167 subsys_initcall(pci_base_init
);
This page took 0.080597 seconds and 4 git commands to generate.