b0ccd424308a692832f81883fdad13be58848ee4
[deliverable/linux.git] / arch / s390 / pci / pci.c
1 /*
2 * Copyright IBM Corp. 2012
3 *
4 * Author(s):
5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 *
7 * The System z PCI code is a rewrite from a prototype by
8 * the following people (Kudoz!):
9 * Alexander Schmidt
10 * Christoph Raisch
11 * Hannes Hering
12 * Hoang-Nam Nguyen
13 * Jan-Bernd Themann
14 * Stefan Roscher
15 * Thomas Klein
16 */
17
18 #define COMPONENT "zPCI"
19 #define pr_fmt(fmt) COMPONENT ": " fmt
20
21 #include <linux/kernel.h>
22 #include <linux/slab.h>
23 #include <linux/err.h>
24 #include <linux/export.h>
25 #include <linux/delay.h>
26 #include <linux/irq.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/seq_file.h>
29 #include <linux/pci.h>
30 #include <linux/msi.h>
31
32 #include <asm/isc.h>
33 #include <asm/airq.h>
34 #include <asm/facility.h>
35 #include <asm/pci_insn.h>
36 #include <asm/pci_clp.h>
37 #include <asm/pci_dma.h>
38
39 #define DEBUG /* enable pr_debug */
40
41 #define SIC_IRQ_MODE_ALL 0
42 #define SIC_IRQ_MODE_SINGLE 1
43
44 #define ZPCI_NR_DMA_SPACES 1
45 #define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS
46
47 /* list of all detected zpci devices */
48 static LIST_HEAD(zpci_list);
49 static DEFINE_MUTEX(zpci_list_lock);
50
51 static void zpci_enable_irq(struct irq_data *data);
52 static void zpci_disable_irq(struct irq_data *data);
53
54 static struct irq_chip zpci_irq_chip = {
55 .name = "zPCI",
56 .irq_unmask = zpci_enable_irq,
57 .irq_mask = zpci_disable_irq,
58 };
59
60 static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
61 static DEFINE_SPINLOCK(zpci_domain_lock);
62
63 static struct airq_iv *zpci_aisb_iv;
64 static struct airq_iv *zpci_aibv[ZPCI_NR_DEVICES];
65
66 /* Adapter interrupt definitions */
67 static void zpci_irq_handler(struct airq_struct *airq);
68
69 static struct airq_struct zpci_airq = {
70 .handler = zpci_irq_handler,
71 .isc = PCI_ISC,
72 };
73
74 /* I/O Map */
75 static DEFINE_SPINLOCK(zpci_iomap_lock);
76 static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
77 struct zpci_iomap_entry *zpci_iomap_start;
78 EXPORT_SYMBOL_GPL(zpci_iomap_start);
79
80 static struct kmem_cache *zdev_fmb_cache;
81
82 struct zpci_dev *get_zdev(struct pci_dev *pdev)
83 {
84 return (struct zpci_dev *) pdev->sysdata;
85 }
86
87 struct zpci_dev *get_zdev_by_fid(u32 fid)
88 {
89 struct zpci_dev *tmp, *zdev = NULL;
90
91 mutex_lock(&zpci_list_lock);
92 list_for_each_entry(tmp, &zpci_list, entry) {
93 if (tmp->fid == fid) {
94 zdev = tmp;
95 break;
96 }
97 }
98 mutex_unlock(&zpci_list_lock);
99 return zdev;
100 }
101
102 bool zpci_fid_present(u32 fid)
103 {
104 return (get_zdev_by_fid(fid) != NULL) ? true : false;
105 }
106
107 static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
108 {
109 return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
110 }
111
112 int pci_domain_nr(struct pci_bus *bus)
113 {
114 return ((struct zpci_dev *) bus->sysdata)->domain;
115 }
116 EXPORT_SYMBOL_GPL(pci_domain_nr);
117
118 int pci_proc_domain(struct pci_bus *bus)
119 {
120 return pci_domain_nr(bus);
121 }
122 EXPORT_SYMBOL_GPL(pci_proc_domain);
123
124 /* Modify PCI: Register adapter interruptions */
125 static int zpci_set_airq(struct zpci_dev *zdev)
126 {
127 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
128 struct zpci_fib *fib;
129 int rc;
130
131 fib = (void *) get_zeroed_page(GFP_KERNEL);
132 if (!fib)
133 return -ENOMEM;
134
135 fib->isc = PCI_ISC;
136 fib->sum = 1; /* enable summary notifications */
137 fib->noi = airq_iv_end(zdev->aibv);
138 fib->aibv = (unsigned long) zdev->aibv->vector;
139 fib->aibvo = 0; /* each zdev has its own interrupt vector */
140 fib->aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8;
141 fib->aisbo = zdev->aisb & 63;
142
143 rc = zpci_mod_fc(req, fib);
144 pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
145
146 free_page((unsigned long) fib);
147 return rc;
148 }
149
150 struct mod_pci_args {
151 u64 base;
152 u64 limit;
153 u64 iota;
154 u64 fmb_addr;
155 };
156
157 static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args)
158 {
159 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn);
160 struct zpci_fib *fib;
161 int rc;
162
163 /* The FIB must be available even if it's not used */
164 fib = (void *) get_zeroed_page(GFP_KERNEL);
165 if (!fib)
166 return -ENOMEM;
167
168 fib->pba = args->base;
169 fib->pal = args->limit;
170 fib->iota = args->iota;
171 fib->fmb_addr = args->fmb_addr;
172
173 rc = zpci_mod_fc(req, fib);
174 free_page((unsigned long) fib);
175 return rc;
176 }
177
178 /* Modify PCI: Register I/O address translation parameters */
179 int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
180 u64 base, u64 limit, u64 iota)
181 {
182 struct mod_pci_args args = { base, limit, iota, 0 };
183
184 WARN_ON_ONCE(iota & 0x3fff);
185 args.iota |= ZPCI_IOTA_RTTO_FLAG;
186 return mod_pci(zdev, ZPCI_MOD_FC_REG_IOAT, dmaas, &args);
187 }
188
189 /* Modify PCI: Unregister I/O address translation parameters */
190 int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
191 {
192 struct mod_pci_args args = { 0, 0, 0, 0 };
193
194 return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args);
195 }
196
197 /* Modify PCI: Unregister adapter interruptions */
198 static int zpci_clear_airq(struct zpci_dev *zdev)
199 {
200 struct mod_pci_args args = { 0, 0, 0, 0 };
201
202 return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args);
203 }
204
205 /* Modify PCI: Set PCI function measurement parameters */
206 int zpci_fmb_enable_device(struct zpci_dev *zdev)
207 {
208 struct mod_pci_args args = { 0, 0, 0, 0 };
209
210 if (zdev->fmb)
211 return -EINVAL;
212
213 zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
214 if (!zdev->fmb)
215 return -ENOMEM;
216 WARN_ON((u64) zdev->fmb & 0xf);
217
218 args.fmb_addr = virt_to_phys(zdev->fmb);
219 return mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args);
220 }
221
222 /* Modify PCI: Disable PCI function measurement */
223 int zpci_fmb_disable_device(struct zpci_dev *zdev)
224 {
225 struct mod_pci_args args = { 0, 0, 0, 0 };
226 int rc;
227
228 if (!zdev->fmb)
229 return -EINVAL;
230
231 /* Function measurement is disabled if fmb address is zero */
232 rc = mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args);
233
234 kmem_cache_free(zdev_fmb_cache, zdev->fmb);
235 zdev->fmb = NULL;
236 return rc;
237 }
238
239 #define ZPCI_PCIAS_CFGSPC 15
240
241 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
242 {
243 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
244 u64 data;
245 int rc;
246
247 rc = zpci_load(&data, req, offset);
248 if (!rc) {
249 data = data << ((8 - len) * 8);
250 data = le64_to_cpu(data);
251 *val = (u32) data;
252 } else
253 *val = 0xffffffff;
254 return rc;
255 }
256
257 static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
258 {
259 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
260 u64 data = val;
261 int rc;
262
263 data = cpu_to_le64(data);
264 data = data >> ((8 - len) * 8);
265 rc = zpci_store(data, req, offset);
266 return rc;
267 }
268
269 static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
270 {
271 int offset, pos;
272 u32 mask_bits;
273
274 if (msi->msi_attrib.is_msix) {
275 offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
276 PCI_MSIX_ENTRY_VECTOR_CTRL;
277 msi->masked = readl(msi->mask_base + offset);
278 writel(flag, msi->mask_base + offset);
279 } else if (msi->msi_attrib.maskbit) {
280 pos = (long) msi->mask_base;
281 pci_read_config_dword(msi->dev, pos, &mask_bits);
282 mask_bits &= ~(mask);
283 mask_bits |= flag & mask;
284 pci_write_config_dword(msi->dev, pos, mask_bits);
285 } else
286 return 0;
287
288 msi->msi_attrib.maskbit = !!flag;
289 return 1;
290 }
291
292 static void zpci_enable_irq(struct irq_data *data)
293 {
294 struct msi_desc *msi = irq_get_msi_desc(data->irq);
295
296 zpci_msi_set_mask_bits(msi, 1, 0);
297 }
298
299 static void zpci_disable_irq(struct irq_data *data)
300 {
301 struct msi_desc *msi = irq_get_msi_desc(data->irq);
302
303 zpci_msi_set_mask_bits(msi, 1, 1);
304 }
305
306 void pcibios_fixup_bus(struct pci_bus *bus)
307 {
308 }
309
310 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
311 resource_size_t size,
312 resource_size_t align)
313 {
314 return 0;
315 }
316
317 /* combine single writes by using store-block insn */
318 void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
319 {
320 zpci_memcpy_toio(to, from, count);
321 }
322
323 /* Create a virtual mapping cookie for a PCI BAR */
324 void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max)
325 {
326 struct zpci_dev *zdev = get_zdev(pdev);
327 u64 addr;
328 int idx;
329
330 if ((bar & 7) != bar)
331 return NULL;
332
333 idx = zdev->bars[bar].map_idx;
334 spin_lock(&zpci_iomap_lock);
335 zpci_iomap_start[idx].fh = zdev->fh;
336 zpci_iomap_start[idx].bar = bar;
337 spin_unlock(&zpci_iomap_lock);
338
339 addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
340 return (void __iomem *) addr;
341 }
342 EXPORT_SYMBOL_GPL(pci_iomap);
343
344 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
345 {
346 unsigned int idx;
347
348 idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48;
349 spin_lock(&zpci_iomap_lock);
350 zpci_iomap_start[idx].fh = 0;
351 zpci_iomap_start[idx].bar = 0;
352 spin_unlock(&zpci_iomap_lock);
353 }
354 EXPORT_SYMBOL_GPL(pci_iounmap);
355
356 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
357 int size, u32 *val)
358 {
359 struct zpci_dev *zdev = get_zdev_by_bus(bus);
360 int ret;
361
362 if (!zdev || devfn != ZPCI_DEVFN)
363 ret = -ENODEV;
364 else
365 ret = zpci_cfg_load(zdev, where, val, size);
366
367 return ret;
368 }
369
370 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
371 int size, u32 val)
372 {
373 struct zpci_dev *zdev = get_zdev_by_bus(bus);
374 int ret;
375
376 if (!zdev || devfn != ZPCI_DEVFN)
377 ret = -ENODEV;
378 else
379 ret = zpci_cfg_store(zdev, where, val, size);
380
381 return ret;
382 }
383
384 static struct pci_ops pci_root_ops = {
385 .read = pci_read,
386 .write = pci_write,
387 };
388
389 static void zpci_irq_handler(struct airq_struct *airq)
390 {
391 unsigned long si, ai;
392 struct airq_iv *aibv;
393 int irqs_on = 0;
394
395 inc_irq_stat(IRQIO_PCI);
396 for (si = 0;;) {
397 /* Scan adapter summary indicator bit vector */
398 si = airq_iv_scan(zpci_aisb_iv, si, airq_iv_end(zpci_aisb_iv));
399 if (si == -1UL) {
400 if (irqs_on++)
401 /* End of second scan with interrupts on. */
402 break;
403 /* First scan complete, reenable interrupts. */
404 zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
405 si = 0;
406 continue;
407 }
408
409 /* Scan the adapter interrupt vector for this device. */
410 aibv = zpci_aibv[si];
411 for (ai = 0;;) {
412 ai = airq_iv_scan(aibv, ai, airq_iv_end(aibv));
413 if (ai == -1UL)
414 break;
415 inc_irq_stat(IRQIO_MSI);
416 airq_iv_lock(aibv, ai);
417 generic_handle_irq(airq_iv_get_data(aibv, ai));
418 airq_iv_unlock(aibv, ai);
419 }
420 }
421 }
422
423 int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
424 {
425 struct zpci_dev *zdev = get_zdev(pdev);
426 unsigned int hwirq, irq, msi_vecs;
427 unsigned long aisb;
428 struct msi_desc *msi;
429 struct msi_msg msg;
430 int rc;
431
432 pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec);
433 if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
434 return -EINVAL;
435 msi_vecs = min(nvec, ZPCI_MSI_VEC_MAX);
436 msi_vecs = min_t(unsigned int, msi_vecs, CONFIG_PCI_NR_MSI);
437
438 /* Allocate adapter summary indicator bit */
439 rc = -EIO;
440 aisb = airq_iv_alloc_bit(zpci_aisb_iv);
441 if (aisb == -1UL)
442 goto out;
443 zdev->aisb = aisb;
444
445 /* Create adapter interrupt vector */
446 rc = -ENOMEM;
447 zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK);
448 if (!zdev->aibv)
449 goto out_si;
450
451 /* Wire up shortcut pointer */
452 zpci_aibv[aisb] = zdev->aibv;
453
454 /* Request MSI interrupts */
455 hwirq = 0;
456 list_for_each_entry(msi, &pdev->msi_list, list) {
457 rc = -EIO;
458 irq = irq_alloc_desc(0); /* Alloc irq on node 0 */
459 if (irq == NO_IRQ)
460 goto out_msi;
461 rc = irq_set_msi_desc(irq, msi);
462 if (rc)
463 goto out_msi;
464 irq_set_chip_and_handler(irq, &zpci_irq_chip,
465 handle_simple_irq);
466 msg.data = hwirq;
467 msg.address_lo = zdev->msi_addr & 0xffffffff;
468 msg.address_hi = zdev->msi_addr >> 32;
469 write_msi_msg(irq, &msg);
470 airq_iv_set_data(zdev->aibv, hwirq, irq);
471 hwirq++;
472 }
473
474 /* Enable adapter interrupts */
475 rc = zpci_set_airq(zdev);
476 if (rc)
477 goto out_msi;
478
479 return (msi_vecs == nvec) ? 0 : msi_vecs;
480
481 out_msi:
482 list_for_each_entry(msi, &pdev->msi_list, list) {
483 if (hwirq-- == 0)
484 break;
485 irq_set_msi_desc(msi->irq, NULL);
486 irq_free_desc(msi->irq);
487 msi->msg.address_lo = 0;
488 msi->msg.address_hi = 0;
489 msi->msg.data = 0;
490 msi->irq = 0;
491 }
492 zpci_aibv[aisb] = NULL;
493 airq_iv_release(zdev->aibv);
494 out_si:
495 airq_iv_free_bit(zpci_aisb_iv, aisb);
496 out:
497 dev_err(&pdev->dev, "register MSI failed with: %d\n", rc);
498 return rc;
499 }
500
501 void arch_teardown_msi_irqs(struct pci_dev *pdev)
502 {
503 struct zpci_dev *zdev = get_zdev(pdev);
504 struct msi_desc *msi;
505 int rc;
506
507 pr_info("%s: on pdev: %p\n", __func__, pdev);
508
509 /* Disable adapter interrupts */
510 rc = zpci_clear_airq(zdev);
511 if (rc) {
512 dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc);
513 return;
514 }
515
516 /* Release MSI interrupts */
517 list_for_each_entry(msi, &pdev->msi_list, list) {
518 zpci_msi_set_mask_bits(msi, 1, 1);
519 irq_set_msi_desc(msi->irq, NULL);
520 irq_free_desc(msi->irq);
521 msi->msg.address_lo = 0;
522 msi->msg.address_hi = 0;
523 msi->msg.data = 0;
524 msi->irq = 0;
525 }
526
527 zpci_aibv[zdev->aisb] = NULL;
528 airq_iv_release(zdev->aibv);
529 airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
530 }
531
532 static void zpci_map_resources(struct zpci_dev *zdev)
533 {
534 struct pci_dev *pdev = zdev->pdev;
535 resource_size_t len;
536 int i;
537
538 for (i = 0; i < PCI_BAR_COUNT; i++) {
539 len = pci_resource_len(pdev, i);
540 if (!len)
541 continue;
542 pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0);
543 pdev->resource[i].end = pdev->resource[i].start + len - 1;
544 }
545 }
546
547 static void zpci_unmap_resources(struct zpci_dev *zdev)
548 {
549 struct pci_dev *pdev = zdev->pdev;
550 resource_size_t len;
551 int i;
552
553 for (i = 0; i < PCI_BAR_COUNT; i++) {
554 len = pci_resource_len(pdev, i);
555 if (!len)
556 continue;
557 pci_iounmap(pdev, (void *) pdev->resource[i].start);
558 }
559 }
560
561 struct zpci_dev *zpci_alloc_device(void)
562 {
563 struct zpci_dev *zdev;
564
565 /* Alloc memory for our private pci device data */
566 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
567 return zdev ? : ERR_PTR(-ENOMEM);
568 }
569
570 void zpci_free_device(struct zpci_dev *zdev)
571 {
572 kfree(zdev);
573 }
574
575 int pcibios_add_platform_entries(struct pci_dev *pdev)
576 {
577 return zpci_sysfs_add_device(&pdev->dev);
578 }
579
580 static int __init zpci_irq_init(void)
581 {
582 int rc;
583
584 rc = register_adapter_interrupt(&zpci_airq);
585 if (rc)
586 goto out;
587 /* Set summary to 1 to be called every time for the ISC. */
588 *zpci_airq.lsi_ptr = 1;
589
590 rc = -ENOMEM;
591 zpci_aisb_iv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC);
592 if (!zpci_aisb_iv)
593 goto out_airq;
594
595 zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
596 return 0;
597
598 out_airq:
599 unregister_adapter_interrupt(&zpci_airq);
600 out:
601 return rc;
602 }
603
604 static void zpci_irq_exit(void)
605 {
606 airq_iv_release(zpci_aisb_iv);
607 unregister_adapter_interrupt(&zpci_airq);
608 }
609
610 static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size,
611 unsigned long flags, int domain)
612 {
613 struct resource *r;
614 char *name;
615 int rc;
616
617 r = kzalloc(sizeof(*r), GFP_KERNEL);
618 if (!r)
619 return ERR_PTR(-ENOMEM);
620 r->start = start;
621 r->end = r->start + size - 1;
622 r->flags = flags;
623 r->parent = &iomem_resource;
624 name = kmalloc(18, GFP_KERNEL);
625 if (!name) {
626 kfree(r);
627 return ERR_PTR(-ENOMEM);
628 }
629 sprintf(name, "PCI Bus: %04x:%02x", domain, ZPCI_BUS_NR);
630 r->name = name;
631
632 rc = request_resource(&iomem_resource, r);
633 if (rc)
634 pr_debug("request resource %pR failed\n", r);
635 return r;
636 }
637
638 static int zpci_alloc_iomap(struct zpci_dev *zdev)
639 {
640 int entry;
641
642 spin_lock(&zpci_iomap_lock);
643 entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
644 if (entry == ZPCI_IOMAP_MAX_ENTRIES) {
645 spin_unlock(&zpci_iomap_lock);
646 return -ENOSPC;
647 }
648 set_bit(entry, zpci_iomap);
649 spin_unlock(&zpci_iomap_lock);
650 return entry;
651 }
652
653 static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
654 {
655 spin_lock(&zpci_iomap_lock);
656 memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
657 clear_bit(entry, zpci_iomap);
658 spin_unlock(&zpci_iomap_lock);
659 }
660
661 int pcibios_add_device(struct pci_dev *pdev)
662 {
663 struct zpci_dev *zdev = get_zdev(pdev);
664 struct resource *res;
665 int i;
666
667 zdev->pdev = pdev;
668 zpci_map_resources(zdev);
669
670 for (i = 0; i < PCI_BAR_COUNT; i++) {
671 res = &pdev->resource[i];
672 if (res->parent || !res->flags)
673 continue;
674 pci_claim_resource(pdev, i);
675 }
676
677 return 0;
678 }
679
680 int pcibios_enable_device(struct pci_dev *pdev, int mask)
681 {
682 struct zpci_dev *zdev = get_zdev(pdev);
683 struct resource *res;
684 u16 cmd;
685 int i;
686
687 zdev->pdev = pdev;
688 zpci_debug_init_device(zdev);
689 zpci_fmb_enable_device(zdev);
690 zpci_map_resources(zdev);
691
692 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
693 for (i = 0; i < PCI_BAR_COUNT; i++) {
694 res = &pdev->resource[i];
695
696 if (res->flags & IORESOURCE_IO)
697 return -EINVAL;
698
699 if (res->flags & IORESOURCE_MEM)
700 cmd |= PCI_COMMAND_MEMORY;
701 }
702 pci_write_config_word(pdev, PCI_COMMAND, cmd);
703 return 0;
704 }
705
706 void pcibios_disable_device(struct pci_dev *pdev)
707 {
708 struct zpci_dev *zdev = get_zdev(pdev);
709
710 zpci_unmap_resources(zdev);
711 zpci_fmb_disable_device(zdev);
712 zpci_debug_exit_device(zdev);
713 zdev->pdev = NULL;
714 }
715
716 static int zpci_scan_bus(struct zpci_dev *zdev)
717 {
718 struct resource *res;
719 LIST_HEAD(resources);
720 int i;
721
722 /* allocate mapping entry for each used bar */
723 for (i = 0; i < PCI_BAR_COUNT; i++) {
724 unsigned long addr, size, flags;
725 int entry;
726
727 if (!zdev->bars[i].size)
728 continue;
729 entry = zpci_alloc_iomap(zdev);
730 if (entry < 0)
731 return entry;
732 zdev->bars[i].map_idx = entry;
733
734 /* only MMIO is supported */
735 flags = IORESOURCE_MEM;
736 if (zdev->bars[i].val & 8)
737 flags |= IORESOURCE_PREFETCH;
738 if (zdev->bars[i].val & 4)
739 flags |= IORESOURCE_MEM_64;
740
741 addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48);
742
743 size = 1UL << zdev->bars[i].size;
744
745 res = zpci_alloc_bus_resource(addr, size, flags, zdev->domain);
746 if (IS_ERR(res)) {
747 zpci_free_iomap(zdev, entry);
748 return PTR_ERR(res);
749 }
750 pci_add_resource(&resources, res);
751 }
752
753 zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
754 zdev, &resources);
755 if (!zdev->bus)
756 return -EIO;
757
758 zdev->bus->max_bus_speed = zdev->max_bus_speed;
759 return 0;
760 }
761
762 static int zpci_alloc_domain(struct zpci_dev *zdev)
763 {
764 spin_lock(&zpci_domain_lock);
765 zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
766 if (zdev->domain == ZPCI_NR_DEVICES) {
767 spin_unlock(&zpci_domain_lock);
768 return -ENOSPC;
769 }
770 set_bit(zdev->domain, zpci_domain);
771 spin_unlock(&zpci_domain_lock);
772 return 0;
773 }
774
775 static void zpci_free_domain(struct zpci_dev *zdev)
776 {
777 spin_lock(&zpci_domain_lock);
778 clear_bit(zdev->domain, zpci_domain);
779 spin_unlock(&zpci_domain_lock);
780 }
781
782 int zpci_enable_device(struct zpci_dev *zdev)
783 {
784 int rc;
785
786 rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
787 if (rc)
788 goto out;
789 pr_info("Enabled fh: 0x%x fid: 0x%x\n", zdev->fh, zdev->fid);
790
791 rc = zpci_dma_init_device(zdev);
792 if (rc)
793 goto out_dma;
794
795 zdev->state = ZPCI_FN_STATE_ONLINE;
796 return 0;
797
798 out_dma:
799 clp_disable_fh(zdev);
800 out:
801 return rc;
802 }
803 EXPORT_SYMBOL_GPL(zpci_enable_device);
804
805 int zpci_disable_device(struct zpci_dev *zdev)
806 {
807 zpci_dma_exit_device(zdev);
808 return clp_disable_fh(zdev);
809 }
810 EXPORT_SYMBOL_GPL(zpci_disable_device);
811
812 int zpci_create_device(struct zpci_dev *zdev)
813 {
814 int rc;
815
816 rc = zpci_alloc_domain(zdev);
817 if (rc)
818 goto out;
819
820 if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
821 rc = zpci_enable_device(zdev);
822 if (rc)
823 goto out_free;
824 }
825 rc = zpci_scan_bus(zdev);
826 if (rc)
827 goto out_disable;
828
829 mutex_lock(&zpci_list_lock);
830 list_add_tail(&zdev->entry, &zpci_list);
831 mutex_unlock(&zpci_list_lock);
832
833 zpci_init_slot(zdev);
834
835 return 0;
836
837 out_disable:
838 if (zdev->state == ZPCI_FN_STATE_ONLINE)
839 zpci_disable_device(zdev);
840 out_free:
841 zpci_free_domain(zdev);
842 out:
843 return rc;
844 }
845
846 void zpci_stop_device(struct zpci_dev *zdev)
847 {
848 zpci_dma_exit_device(zdev);
849 /*
850 * Note: SCLP disables fh via set-pci-fn so don't
851 * do that here.
852 */
853 }
854 EXPORT_SYMBOL_GPL(zpci_stop_device);
855
856 static inline int barsize(u8 size)
857 {
858 return (size) ? (1 << size) >> 10 : 0;
859 }
860
861 static int zpci_mem_init(void)
862 {
863 zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
864 16, 0, NULL);
865 if (!zdev_fmb_cache)
866 goto error_zdev;
867
868 /* TODO: use realloc */
869 zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start),
870 GFP_KERNEL);
871 if (!zpci_iomap_start)
872 goto error_iomap;
873 return 0;
874
875 error_iomap:
876 kmem_cache_destroy(zdev_fmb_cache);
877 error_zdev:
878 return -ENOMEM;
879 }
880
881 static void zpci_mem_exit(void)
882 {
883 kfree(zpci_iomap_start);
884 kmem_cache_destroy(zdev_fmb_cache);
885 }
886
887 static unsigned int s390_pci_probe;
888
889 char * __init pcibios_setup(char *str)
890 {
891 if (!strcmp(str, "on")) {
892 s390_pci_probe = 1;
893 return NULL;
894 }
895 return str;
896 }
897
898 static int __init pci_base_init(void)
899 {
900 int rc;
901
902 if (!s390_pci_probe)
903 return 0;
904
905 if (!test_facility(2) || !test_facility(69)
906 || !test_facility(71) || !test_facility(72))
907 return 0;
908
909 pr_info("Probing PCI hardware: PCI:%d SID:%d AEN:%d\n",
910 test_facility(69), test_facility(70),
911 test_facility(71));
912
913 rc = zpci_debug_init();
914 if (rc)
915 goto out;
916
917 rc = zpci_mem_init();
918 if (rc)
919 goto out_mem;
920
921 rc = zpci_irq_init();
922 if (rc)
923 goto out_irq;
924
925 rc = zpci_dma_init();
926 if (rc)
927 goto out_dma;
928
929 rc = clp_find_pci_devices();
930 if (rc)
931 goto out_find;
932
933 return 0;
934
935 out_find:
936 zpci_dma_exit();
937 out_dma:
938 zpci_irq_exit();
939 out_irq:
940 zpci_mem_exit();
941 out_mem:
942 zpci_debug_exit();
943 out:
944 return rc;
945 }
946 subsys_initcall_sync(pci_base_init);
This page took 0.057889 seconds and 4 git commands to generate.