2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #include <linux/kernel.h>
16 #include <linux/mmzone.h>
17 #include <linux/pci.h>
18 #include <linux/delay.h>
19 #include <linux/string.h>
20 #include <linux/init.h>
21 #include <linux/capability.h>
22 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/irq.h>
25 #include <linux/msi.h>
27 #include <linux/uaccess.h>
28 #include <linux/ctype.h>
30 #include <asm/processor.h>
31 #include <asm/sections.h>
32 #include <asm/byteorder.h>
34 #include <gxio/iorpc_globals.h>
35 #include <gxio/kiorpc.h>
36 #include <gxio/trio.h>
37 #include <gxio/iorpc_trio.h>
38 #include <hv/drv_trio_intf.h>
43 * This file containes the routines to search for PCI buses,
44 * enumerate the buses, and configure any attached devices.
47 #define DEBUG_PCI_CFG 0
50 #define TRACE_CFG_WR(size, val, bus, dev, func, offset) \
51 pr_info("CFG WR %d-byte VAL %#x to bus %d dev %d func %d addr %u\n", \
52 size, val, bus, dev, func, offset & 0xFFF);
53 #define TRACE_CFG_RD(size, val, bus, dev, func, offset) \
54 pr_info("CFG RD %d-byte VAL %#x from bus %d dev %d func %d addr %u\n", \
55 size, val, bus, dev, func, offset & 0xFFF);
57 #define TRACE_CFG_WR(...)
58 #define TRACE_CFG_RD(...)
61 static int pci_probe
= 1;
63 /* Information on the PCIe RC ports configuration. */
64 static int pcie_rc
[TILEGX_NUM_TRIO
][TILEGX_TRIO_PCIES
];
67 * On some platforms with one or more Gx endpoint ports, we need to
68 * delay the PCIe RC port probe for a few seconds to work around
69 * a HW PCIe link-training bug. The exact delay is specified with
70 * a kernel boot argument in the form of "pcie_rc_delay=T,P,S",
71 * where T is the TRIO instance number, P is the port number and S is
72 * the delay in seconds. If the argument is specified, but the delay is
73 * not provided, the value will be DEFAULT_RC_DELAY.
75 static int rc_delay
[TILEGX_NUM_TRIO
][TILEGX_TRIO_PCIES
];
77 /* Default number of seconds that the PCIe RC port probe can be delayed. */
78 #define DEFAULT_RC_DELAY 10
80 /* The PCI I/O space size in each PCI domain. */
81 #define IO_SPACE_SIZE 0x10000
83 /* Provide shorter versions of some very long constant names. */
84 #define AUTO_CONFIG_RC \
85 TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC
86 #define AUTO_CONFIG_RC_G1 \
87 TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1
88 #define AUTO_CONFIG_EP \
89 TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT
90 #define AUTO_CONFIG_EP_G1 \
91 TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1
93 /* Array of the PCIe ports configuration info obtained from the BIB. */
94 struct pcie_port_property pcie_ports
[TILEGX_NUM_TRIO
][TILEGX_TRIO_PCIES
];
96 /* Number of configured TRIO instances. */
99 /* All drivers share the TRIO contexts defined here. */
100 gxio_trio_context_t trio_contexts
[TILEGX_NUM_TRIO
];
102 /* Pointer to an array of PCIe RC controllers. */
103 struct pci_controller pci_controllers
[TILEGX_NUM_TRIO
* TILEGX_TRIO_PCIES
];
104 int num_rc_controllers
;
106 static struct pci_ops tile_cfg_ops
;
108 /* Mask of CPUs that should receive PCIe interrupts. */
109 static struct cpumask intr_cpus_map
;
112 * We don't need to worry about the alignment of resources.
114 resource_size_t
pcibios_align_resource(void *data
, const struct resource
*res
,
115 resource_size_t size
, resource_size_t align
)
119 EXPORT_SYMBOL(pcibios_align_resource
);
123 * Pick a CPU to receive and handle the PCIe interrupts, based on the IRQ #.
124 * For now, we simply send interrupts to non-dataplane CPUs.
125 * We may implement methods to allow user to specify the target CPUs,
126 * e.g. via boot arguments.
128 static int tile_irq_cpu(int irq
)
134 count
= cpumask_weight(&intr_cpus_map
);
135 if (unlikely(count
== 0)) {
136 pr_warning("intr_cpus_map empty, interrupts will be"
137 " delievered to dataplane tiles\n");
138 return irq
% (smp_height
* smp_width
);
142 for_each_cpu(cpu
, &intr_cpus_map
) {
150 * Open a file descriptor to the TRIO shim.
152 static int tile_pcie_open(int trio_index
)
154 gxio_trio_context_t
*context
= &trio_contexts
[trio_index
];
159 * This opens a file descriptor to the TRIO shim.
161 ret
= gxio_trio_init(context
, trio_index
);
163 goto gxio_trio_init_failure
;
166 * Allocate an ASID for the kernel.
168 ret
= gxio_trio_alloc_asids(context
, 1, 0, 0);
170 pr_err("PCI: ASID alloc failure on TRIO %d, give up\n",
172 goto asid_alloc_failure
;
177 #ifdef USE_SHARED_PCIE_CONFIG_REGION
179 * Alloc a PIO region for config access, shared by all MACs per TRIO.
180 * This shouldn't fail since the kernel is supposed to the first
181 * client of the TRIO's PIO regions.
183 ret
= gxio_trio_alloc_pio_regions(context
, 1, 0, 0);
185 pr_err("PCI: CFG PIO alloc failure on TRIO %d, give up\n",
187 goto pio_alloc_failure
;
190 context
->pio_cfg_index
= ret
;
193 * For PIO CFG, the bus_address_hi parameter is 0. The mac parameter
194 * is also 0 because it is specified in PIO_REGION_SETUP_CFG_ADDR.
196 ret
= gxio_trio_init_pio_region_aux(context
, context
->pio_cfg_index
,
197 0, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE
);
199 pr_err("PCI: CFG PIO init failure on TRIO %d, give up\n",
201 goto pio_alloc_failure
;
205 /* Get the properties of the PCIe ports on this TRIO instance. */
206 ret
= hv_dev_pread(context
->fd
, 0,
207 (HV_VirtAddr
)&pcie_ports
[trio_index
][0],
208 sizeof(struct pcie_port_property
) * TILEGX_TRIO_PCIES
,
209 GXIO_TRIO_OP_GET_PORT_PROPERTY
);
211 pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d,"
212 " on TRIO %d\n", ret
, trio_index
);
213 goto get_port_property_failure
;
216 context
->mmio_base_mac
=
217 iorpc_ioremap(context
->fd
, 0, HV_TRIO_CONFIG_IOREMAP_SIZE
);
218 if (context
->mmio_base_mac
== NULL
) {
219 pr_err("PCI: TRIO config space mapping failure, error %d,"
220 " on TRIO %d\n", ret
, trio_index
);
223 goto trio_mmio_mapping_failure
;
226 /* Check the port strap state which will override the BIB setting. */
227 for (mac
= 0; mac
< TILEGX_TRIO_PCIES
; mac
++) {
228 TRIO_PCIE_INTFC_PORT_CONFIG_t port_config
;
229 unsigned int reg_offset
;
231 /* Ignore ports that are not specified in the BIB. */
232 if (!pcie_ports
[trio_index
][mac
].allow_rc
&&
233 !pcie_ports
[trio_index
][mac
].allow_ep
)
237 (TRIO_PCIE_INTFC_PORT_CONFIG
<<
238 TRIO_CFG_REGION_ADDR__REG_SHIFT
) |
239 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE
<<
240 TRIO_CFG_REGION_ADDR__INTFC_SHIFT
) |
241 (mac
<< TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT
);
244 __gxio_mmio_read(context
->mmio_base_mac
+ reg_offset
);
246 if (port_config
.strap_state
!= AUTO_CONFIG_RC
&&
247 port_config
.strap_state
!= AUTO_CONFIG_RC_G1
) {
249 * If this is really intended to be an EP port, record
250 * it so that the endpoint driver will know about it.
252 if (port_config
.strap_state
== AUTO_CONFIG_EP
||
253 port_config
.strap_state
== AUTO_CONFIG_EP_G1
)
254 pcie_ports
[trio_index
][mac
].allow_ep
= 1;
260 trio_mmio_mapping_failure
:
261 get_port_property_failure
:
263 #ifdef USE_SHARED_PCIE_CONFIG_REGION
266 hv_dev_close(context
->fd
);
267 gxio_trio_init_failure
:
273 static int __init
tile_trio_init(void)
277 /* We loop over all the TRIO shims. */
278 for (i
= 0; i
< TILEGX_NUM_TRIO
; i
++) {
279 if (tile_pcie_open(i
) < 0)
286 postcore_initcall(tile_trio_init
);
289 tilegx_legacy_irq_ack(struct irq_data
*d
)
291 __insn_mtspr(SPR_IPI_EVENT_RESET_K
, 1UL << d
->irq
);
295 tilegx_legacy_irq_mask(struct irq_data
*d
)
297 __insn_mtspr(SPR_IPI_MASK_SET_K
, 1UL << d
->irq
);
301 tilegx_legacy_irq_unmask(struct irq_data
*d
)
303 __insn_mtspr(SPR_IPI_MASK_RESET_K
, 1UL << d
->irq
);
306 static struct irq_chip tilegx_legacy_irq_chip
= {
307 .name
= "tilegx_legacy_irq",
308 .irq_ack
= tilegx_legacy_irq_ack
,
309 .irq_mask
= tilegx_legacy_irq_mask
,
310 .irq_unmask
= tilegx_legacy_irq_unmask
,
312 /* TBD: support set_affinity. */
316 * This is a wrapper function of the kernel level-trigger interrupt
317 * handler handle_level_irq() for PCI legacy interrupts. The TRIO
318 * is configured such that only INTx Assert interrupts are proxied
319 * to Linux which just calls handle_level_irq() after clearing the
320 * MAC INTx Assert status bit associated with this interrupt.
323 trio_handle_level_irq(unsigned int irq
, struct irq_desc
*desc
)
325 struct pci_controller
*controller
= irq_desc_get_handler_data(desc
);
326 gxio_trio_context_t
*trio_context
= controller
->trio
;
327 uint64_t intx
= (uint64_t)irq_desc_get_chip_data(desc
);
328 int mac
= controller
->mac
;
329 unsigned int reg_offset
;
332 handle_level_irq(irq
, desc
);
335 * Clear the INTx Level status, otherwise future interrupts are
338 reg_offset
= (TRIO_PCIE_INTFC_MAC_INT_STS
<<
339 TRIO_CFG_REGION_ADDR__REG_SHIFT
) |
340 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE
<<
341 TRIO_CFG_REGION_ADDR__INTFC_SHIFT
) |
342 (mac
<< TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT
);
344 level_mask
= TRIO_PCIE_INTFC_MAC_INT_STS__INT_LEVEL_MASK
<< intx
;
346 __gxio_mmio_write(trio_context
->mmio_base_mac
+ reg_offset
, level_mask
);
350 * Create kernel irqs and set up the handlers for the legacy interrupts.
351 * Also some minimum initialization for the MSI support.
353 static int tile_init_irqs(struct pci_controller
*controller
)
360 cpumask_copy(&intr_cpus_map
, cpu_online_mask
);
363 for (i
= 0; i
< 4; i
++) {
364 gxio_trio_context_t
*context
= controller
->trio
;
367 /* Ask the kernel to allocate an IRQ. */
370 pr_err("PCI: no free irq vectors, failed for %d\n", i
);
374 controller
->irq_intx_table
[i
] = irq
;
376 /* Distribute the 4 IRQs to different tiles. */
377 cpu
= tile_irq_cpu(irq
);
379 /* Configure the TRIO intr binding for this IRQ. */
380 result
= gxio_trio_config_legacy_intr(context
, cpu_x(cpu
),
381 cpu_y(cpu
), KERNEL_PL
,
382 irq
, controller
->mac
, i
);
384 pr_err("PCI: MAC intx config failed for %d\n", i
);
390 * Register the IRQ handler with the kernel.
392 irq_set_chip_and_handler(irq
, &tilegx_legacy_irq_chip
,
393 trio_handle_level_irq
);
394 irq_set_chip_data(irq
, (void *)(uint64_t)i
);
395 irq_set_handler_data(irq
, controller
);
401 for (j
= 0; j
< i
; j
++)
402 destroy_irq(controller
->irq_intx_table
[j
]);
408 * Return 1 if the port is strapped to operate in RC mode.
411 strapped_for_rc(gxio_trio_context_t
*trio_context
, int mac
)
413 TRIO_PCIE_INTFC_PORT_CONFIG_t port_config
;
414 unsigned int reg_offset
;
416 /* Check the port configuration. */
418 (TRIO_PCIE_INTFC_PORT_CONFIG
<<
419 TRIO_CFG_REGION_ADDR__REG_SHIFT
) |
420 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE
<<
421 TRIO_CFG_REGION_ADDR__INTFC_SHIFT
) |
422 (mac
<< TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT
);
424 __gxio_mmio_read(trio_context
->mmio_base_mac
+ reg_offset
);
426 if (port_config
.strap_state
== AUTO_CONFIG_RC
||
427 port_config
.strap_state
== AUTO_CONFIG_RC_G1
)
434 * Find valid controllers and fill in pci_controller structs for each
437 * Return the number of controllers discovered.
439 int __init
tile_pci_init(void)
445 pr_info("PCI: disabled by boot argument\n");
449 pr_info("PCI: Searching for controllers...\n");
451 if (num_trio_shims
== 0 || sim_is_simulator())
455 * Now determine which PCIe ports are configured to operate in RC mode.
456 * We look at the Board Information Block first and then see if there
457 * are any overriding configuration by the HW strapping pin.
459 for (i
= 0; i
< TILEGX_NUM_TRIO
; i
++) {
460 gxio_trio_context_t
*context
= &trio_contexts
[i
];
465 for (j
= 0; j
< TILEGX_TRIO_PCIES
; j
++) {
466 if (pcie_ports
[i
][j
].allow_rc
&&
467 strapped_for_rc(context
, j
)) {
469 num_rc_controllers
++;
475 * Return if no PCIe ports are configured to operate in RC mode.
477 if (num_rc_controllers
== 0)
481 * Set the TRIO pointer and MAC index for each PCIe RC port.
483 for (i
= 0; i
< TILEGX_NUM_TRIO
; i
++) {
484 for (j
= 0; j
< TILEGX_TRIO_PCIES
; j
++) {
486 pci_controllers
[ctl_index
].trio
=
488 pci_controllers
[ctl_index
].mac
= j
;
489 pci_controllers
[ctl_index
].trio_index
= i
;
491 if (ctl_index
== num_rc_controllers
)
499 * Configure each PCIe RC port.
501 for (i
= 0; i
< num_rc_controllers
; i
++) {
503 * Configure the PCIe MAC to run in RC mode.
506 struct pci_controller
*controller
= &pci_controllers
[i
];
508 controller
->index
= i
;
509 controller
->ops
= &tile_cfg_ops
;
511 controller
->io_space
.start
= PCIBIOS_MIN_IO
+
513 controller
->io_space
.end
= controller
->io_space
.start
+
515 BUG_ON(controller
->io_space
.end
> IO_SPACE_LIMIT
);
516 controller
->io_space
.flags
= IORESOURCE_IO
;
517 snprintf(controller
->io_space_name
,
518 sizeof(controller
->io_space_name
),
519 "PCI I/O domain %d", i
);
520 controller
->io_space
.name
= controller
->io_space_name
;
523 * The PCI memory resource is located above the PA space.
524 * For every host bridge, the BAR window or the MMIO aperture
525 * is in range [3GB, 4GB - 1] of a 4GB space beyond the
529 controller
->mem_offset
= TILE_PCI_MEM_START
+
530 (i
* TILE_PCI_BAR_WINDOW_TOP
);
531 controller
->mem_space
.start
= controller
->mem_offset
+
532 TILE_PCI_BAR_WINDOW_TOP
- TILE_PCI_BAR_WINDOW_SIZE
;
533 controller
->mem_space
.end
= controller
->mem_offset
+
534 TILE_PCI_BAR_WINDOW_TOP
- 1;
535 controller
->mem_space
.flags
= IORESOURCE_MEM
;
536 snprintf(controller
->mem_space_name
,
537 sizeof(controller
->mem_space_name
),
538 "PCI mem domain %d", i
);
539 controller
->mem_space
.name
= controller
->mem_space_name
;
542 return num_rc_controllers
;
546 * (pin - 1) converts from the PCI standard's [1:4] convention to
547 * a normal [0:3] range.
549 static int tile_map_irq(const struct pci_dev
*dev
, u8 device
, u8 pin
)
551 struct pci_controller
*controller
=
552 (struct pci_controller
*)dev
->sysdata
;
553 return controller
->irq_intx_table
[pin
- 1];
557 static void fixup_read_and_payload_sizes(struct pci_controller
*controller
)
559 gxio_trio_context_t
*trio_context
= controller
->trio
;
560 struct pci_bus
*root_bus
= controller
->root_bus
;
561 TRIO_PCIE_RC_DEVICE_CONTROL_t dev_control
;
562 TRIO_PCIE_RC_DEVICE_CAP_t rc_dev_cap
;
563 unsigned int reg_offset
;
564 struct pci_bus
*child
;
568 mac
= controller
->mac
;
571 * Set our max read request size to be 4KB.
574 (TRIO_PCIE_RC_DEVICE_CONTROL
<<
575 TRIO_CFG_REGION_ADDR__REG_SHIFT
) |
576 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD
<<
577 TRIO_CFG_REGION_ADDR__INTFC_SHIFT
) |
578 (mac
<< TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT
);
580 dev_control
.word
= __gxio_mmio_read32(trio_context
->mmio_base_mac
+
582 dev_control
.max_read_req_sz
= 5;
583 __gxio_mmio_write32(trio_context
->mmio_base_mac
+ reg_offset
,
587 * Set the max payload size supported by this Gx PCIe MAC.
588 * Though Gx PCIe supports Max Payload Size of up to 1024 bytes,
589 * experiments have shown that setting MPS to 256 yields the
593 (TRIO_PCIE_RC_DEVICE_CAP
<<
594 TRIO_CFG_REGION_ADDR__REG_SHIFT
) |
595 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD
<<
596 TRIO_CFG_REGION_ADDR__INTFC_SHIFT
) |
597 (mac
<< TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT
);
599 rc_dev_cap
.word
= __gxio_mmio_read32(trio_context
->mmio_base_mac
+
601 rc_dev_cap
.mps_sup
= 1;
602 __gxio_mmio_write32(trio_context
->mmio_base_mac
+ reg_offset
,
605 /* Configure PCI Express MPS setting. */
606 list_for_each_entry(child
, &root_bus
->children
, node
) {
607 struct pci_dev
*self
= child
->self
;
611 pcie_bus_configure_settings(child
, self
->pcie_mpss
);
615 * Set the mac_config register in trio based on the MPS/MRS of the link.
618 (TRIO_PCIE_RC_DEVICE_CONTROL
<<
619 TRIO_CFG_REGION_ADDR__REG_SHIFT
) |
620 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD
<<
621 TRIO_CFG_REGION_ADDR__INTFC_SHIFT
) |
622 (mac
<< TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT
);
624 dev_control
.word
= __gxio_mmio_read32(trio_context
->mmio_base_mac
+
627 err
= gxio_trio_set_mps_mrs(trio_context
,
628 dev_control
.max_payload_size
,
629 dev_control
.max_read_req_sz
,
632 pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, "
633 "MAC %d on TRIO %d\n",
634 mac
, controller
->trio_index
);
638 static int setup_pcie_rc_delay(char *str
)
640 unsigned long delay
= 0;
641 unsigned long trio_index
;
644 if (str
== NULL
|| !isdigit(*str
))
646 trio_index
= simple_strtoul(str
, (char **)&str
, 10);
647 if (trio_index
>= TILEGX_NUM_TRIO
)
656 mac
= simple_strtoul(str
, (char **)&str
, 10);
657 if (mac
>= TILEGX_TRIO_PCIES
)
667 delay
= simple_strtoul(str
, (char **)&str
, 10);
670 rc_delay
[trio_index
][mac
] = delay
? : DEFAULT_RC_DELAY
;
673 early_param("pcie_rc_delay", setup_pcie_rc_delay
);
676 * PCI initialization entry point, called by subsys_initcall.
678 int __init
pcibios_init(void)
680 resource_size_t offset
;
681 LIST_HEAD(resources
);
687 if (num_rc_controllers
== 0)
691 * Delay a bit in case devices aren't ready. Some devices are
692 * known to require at least 20ms here, but we use a more
693 * conservative value.
697 /* Scan all of the recorded PCI controllers. */
698 for (next_busno
= 0, i
= 0; i
< num_rc_controllers
; i
++) {
699 struct pci_controller
*controller
= &pci_controllers
[i
];
700 gxio_trio_context_t
*trio_context
= controller
->trio
;
701 TRIO_PCIE_INTFC_PORT_STATUS_t port_status
;
702 TRIO_PCIE_INTFC_TX_FIFO_CTL_t tx_fifo_ctl
;
704 unsigned int reg_offset
;
705 unsigned int class_code_revision
;
710 if (trio_context
->fd
< 0)
713 trio_index
= controller
->trio_index
;
714 mac
= controller
->mac
;
717 * Check for PCIe link-up status to decide if we need
718 * to force the link to come up.
721 (TRIO_PCIE_INTFC_PORT_STATUS
<<
722 TRIO_CFG_REGION_ADDR__REG_SHIFT
) |
723 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE
<<
724 TRIO_CFG_REGION_ADDR__INTFC_SHIFT
) |
725 (mac
<< TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT
);
728 __gxio_mmio_read(trio_context
->mmio_base_mac
+
730 if (!port_status
.dl_up
) {
731 if (rc_delay
[trio_index
][mac
]) {
732 pr_info("Delaying PCIe RC TRIO init %d sec"
733 " on MAC %d on TRIO %d\n",
734 rc_delay
[trio_index
][mac
], mac
,
736 msleep(rc_delay
[trio_index
][mac
] * 1000);
738 ret
= gxio_trio_force_rc_link_up(trio_context
, mac
);
740 pr_err("PCI: PCIE_FORCE_LINK_UP failure, "
741 "MAC %d on TRIO %d\n", mac
, trio_index
);
744 pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n", i
,
745 trio_index
, controller
->mac
);
748 * Delay the bus probe if needed.
750 if (rc_delay
[trio_index
][mac
]) {
751 pr_info("Delaying PCIe RC bus enumerating %d sec"
752 " on MAC %d on TRIO %d\n",
753 rc_delay
[trio_index
][mac
], mac
,
755 msleep(rc_delay
[trio_index
][mac
] * 1000);
758 * Wait a bit here because some EP devices
759 * take longer to come up.
765 * Check for PCIe link-up status again.
768 __gxio_mmio_read(trio_context
->mmio_base_mac
+
770 if (!port_status
.dl_up
) {
771 if (pcie_ports
[trio_index
][mac
].removable
) {
772 pr_info("PCI: link is down, MAC %d on TRIO %d\n",
774 pr_info("This is expected if no PCIe card"
775 " is connected to this link\n");
777 pr_err("PCI: link is down, MAC %d on TRIO %d\n",
783 * Ensure that the link can come out of L1 power down state.
784 * Strictly speaking, this is needed only in the case of
785 * heavy RC-initiated DMAs.
788 (TRIO_PCIE_INTFC_TX_FIFO_CTL
<<
789 TRIO_CFG_REGION_ADDR__REG_SHIFT
) |
790 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE
<<
791 TRIO_CFG_REGION_ADDR__INTFC_SHIFT
) |
792 (mac
<< TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT
);
794 __gxio_mmio_read(trio_context
->mmio_base_mac
+
796 tx_fifo_ctl
.min_p_credits
= 0;
797 __gxio_mmio_write(trio_context
->mmio_base_mac
+ reg_offset
,
801 * Change the device ID so that Linux bus crawl doesn't confuse
802 * the internal bridge with any Tilera endpoints.
806 (TRIO_PCIE_RC_DEVICE_ID_VEN_ID
<<
807 TRIO_CFG_REGION_ADDR__REG_SHIFT
) |
808 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD
<<
809 TRIO_CFG_REGION_ADDR__INTFC_SHIFT
) |
810 (mac
<< TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT
);
812 __gxio_mmio_write32(trio_context
->mmio_base_mac
+ reg_offset
,
813 (TILERA_GX36_RC_DEV_ID
<<
814 TRIO_PCIE_RC_DEVICE_ID_VEN_ID__DEV_ID_SHIFT
) |
818 * Set the internal P2P bridge class code.
822 (TRIO_PCIE_RC_REVISION_ID
<<
823 TRIO_CFG_REGION_ADDR__REG_SHIFT
) |
824 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD
<<
825 TRIO_CFG_REGION_ADDR__INTFC_SHIFT
) |
826 (mac
<< TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT
);
828 class_code_revision
=
829 __gxio_mmio_read32(trio_context
->mmio_base_mac
+
831 class_code_revision
= (class_code_revision
& 0xff ) |
832 (PCI_CLASS_BRIDGE_PCI
<< 16);
834 __gxio_mmio_write32(trio_context
->mmio_base_mac
+
835 reg_offset
, class_code_revision
);
837 #ifdef USE_SHARED_PCIE_CONFIG_REGION
840 * Map in the MMIO space for the PIO region.
842 offset
= HV_TRIO_PIO_OFFSET(trio_context
->pio_cfg_index
) |
843 (((unsigned long long)mac
) <<
844 TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT
);
849 * Alloc a PIO region for PCI config access per MAC.
851 ret
= gxio_trio_alloc_pio_regions(trio_context
, 1, 0, 0);
853 pr_err("PCI: PCI CFG PIO alloc failure for mac %d "
854 "on TRIO %d, give up\n", mac
, trio_index
);
859 trio_context
->pio_cfg_index
[mac
] = ret
;
862 * For PIO CFG, the bus_address_hi parameter is 0.
864 ret
= gxio_trio_init_pio_region_aux(trio_context
,
865 trio_context
->pio_cfg_index
[mac
],
866 mac
, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE
);
868 pr_err("PCI: PCI CFG PIO init failure for mac %d "
869 "on TRIO %d, give up\n", mac
, trio_index
);
874 offset
= HV_TRIO_PIO_OFFSET(trio_context
->pio_cfg_index
[mac
]) |
875 (((unsigned long long)mac
) <<
876 TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT
);
880 trio_context
->mmio_base_pio_cfg
[mac
] =
881 iorpc_ioremap(trio_context
->fd
, offset
,
882 (1 << TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT
));
883 if (trio_context
->mmio_base_pio_cfg
[mac
] == NULL
) {
884 pr_err("PCI: PIO map failure for mac %d on TRIO %d\n",
891 * Initialize the PCIe interrupts.
893 if (tile_init_irqs(controller
)) {
894 pr_err("PCI: IRQs init failure for mac %d on TRIO %d\n",
901 * The PCI memory resource is located above the PA space.
902 * The memory range for the PCI root bus should not overlap
903 * with the physical RAM.
905 pci_add_resource_offset(&resources
, &controller
->mem_space
,
906 controller
->mem_offset
);
907 pci_add_resource(&resources
, &controller
->io_space
);
908 controller
->first_busno
= next_busno
;
909 bus
= pci_scan_root_bus(NULL
, next_busno
, controller
->ops
,
910 controller
, &resources
);
911 controller
->root_bus
= bus
;
912 next_busno
= bus
->busn_res
.end
+ 1;
915 /* Do machine dependent PCI interrupt routing */
916 pci_fixup_irqs(pci_common_swizzle
, tile_map_irq
);
919 * This comes from the generic Linux PCI driver.
921 * It allocates all of the resources (I/O memory, etc)
922 * associated with the devices read in above.
925 pci_assign_unassigned_resources();
927 /* Record the I/O resources in the PCI controller structure. */
928 for (i
= 0; i
< num_rc_controllers
; i
++) {
929 struct pci_controller
*controller
= &pci_controllers
[i
];
930 gxio_trio_context_t
*trio_context
= controller
->trio
;
931 struct pci_bus
*root_bus
= pci_controllers
[i
].root_bus
;
936 * Skip controllers that are not properly initialized or
939 if (root_bus
== NULL
)
942 /* Configure the max_payload_size values for this domain. */
943 fixup_read_and_payload_sizes(controller
);
946 * Alloc a PIO region for PCI memory access for each RC port.
948 ret
= gxio_trio_alloc_pio_regions(trio_context
, 1, 0, 0);
950 pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, "
951 "give up\n", controller
->trio_index
,
957 controller
->pio_mem_index
= ret
;
960 * For PIO MEM, the bus_address_hi parameter is hard-coded 0
961 * because we always assign 32-bit PCI bus BAR ranges.
963 ret
= gxio_trio_init_pio_region_aux(trio_context
,
964 controller
->pio_mem_index
,
969 pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, "
970 "give up\n", controller
->trio_index
,
976 #ifdef CONFIG_TILE_PCI_IO
978 * Alloc a PIO region for PCI I/O space access for each RC port.
980 ret
= gxio_trio_alloc_pio_regions(trio_context
, 1, 0, 0);
982 pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, "
983 "give up\n", controller
->trio_index
,
989 controller
->pio_io_index
= ret
;
992 * For PIO IO, the bus_address_hi parameter is hard-coded 0
993 * because PCI I/O address space is 32-bit.
995 ret
= gxio_trio_init_pio_region_aux(trio_context
,
996 controller
->pio_io_index
,
999 HV_TRIO_PIO_FLAG_IO_SPACE
);
1001 pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, "
1002 "give up\n", controller
->trio_index
,
1010 * Configure a Mem-Map region for each memory controller so
1011 * that Linux can map all of its PA space to the PCI bus.
1012 * Use the IOMMU to handle hash-for-home memory.
1014 for_each_online_node(j
) {
1015 unsigned long start_pfn
= node_start_pfn
[j
];
1016 unsigned long end_pfn
= node_end_pfn
[j
];
1017 unsigned long nr_pages
= end_pfn
- start_pfn
;
1019 ret
= gxio_trio_alloc_memory_maps(trio_context
, 1, 0,
1022 pr_err("PCI: Mem-Map alloc failure on TRIO %d "
1023 "mac %d for MC %d, give up\n",
1024 controller
->trio_index
,
1025 controller
->mac
, j
);
1027 goto alloc_mem_map_failed
;
1030 controller
->mem_maps
[j
] = ret
;
1033 * Initialize the Mem-Map and the I/O MMU so that all
1034 * the physical memory can be accessed by the endpoint
1035 * devices. The base bus address is set to the base CPA
1036 * of this memory controller plus an offset (see pci.h).
1037 * The region's base VA is set to the base CPA. The
1038 * I/O MMU table essentially translates the CPA to
1039 * the real PA. Implicitly, for node 0, we create
1040 * a separate Mem-Map region that serves as the inbound
1041 * window for legacy 32-bit devices. This is a direct
1042 * map of the low 4GB CPA space.
1044 ret
= gxio_trio_init_memory_map_mmu_aux(trio_context
,
1045 controller
->mem_maps
[j
],
1046 start_pfn
<< PAGE_SHIFT
,
1047 nr_pages
<< PAGE_SHIFT
,
1050 (start_pfn
<< PAGE_SHIFT
) +
1051 TILE_PCI_MEM_MAP_BASE_OFFSET
,
1053 GXIO_TRIO_ORDER_MODE_UNORDERED
);
1055 pr_err("PCI: Mem-Map init failure on TRIO %d "
1056 "mac %d for MC %d, give up\n",
1057 controller
->trio_index
,
1058 controller
->mac
, j
);
1060 goto alloc_mem_map_failed
;
1064 alloc_mem_map_failed
:
1072 subsys_initcall(pcibios_init
);
1074 /* Note: to be deleted after Linux 3.6 merge. */
1075 void pcibios_fixup_bus(struct pci_bus
*bus
)
1080 * This can be called from the generic PCI layer, but doesn't need to
1083 char *pcibios_setup(char *str
)
1085 if (!strcmp(str
, "off")) {
1093 * Enable memory address decoding, as appropriate, for the
1094 * device described by the 'dev' struct.
1096 * This is called from the generic PCI layer, and can be called
1097 * for bridges or endpoints.
1099 int pcibios_enable_device(struct pci_dev
*dev
, int mask
)
1101 return pci_enable_resources(dev
, mask
);
1104 /* Called for each device after PCI setup is done. */
1105 static void pcibios_fixup_final(struct pci_dev
*pdev
)
1107 set_dma_ops(&pdev
->dev
, gx_pci_dma_map_ops
);
1108 set_dma_offset(&pdev
->dev
, TILE_PCI_MEM_MAP_BASE_OFFSET
);
1109 pdev
->dev
.archdata
.max_direct_dma_addr
=
1110 TILE_PCI_MAX_DIRECT_DMA_ADDRESS
;
1112 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID
, PCI_ANY_ID
, pcibios_fixup_final
);
1114 /* Map a PCI MMIO bus address into VA space. */
1115 void __iomem
*ioremap(resource_size_t phys_addr
, unsigned long size
)
1117 struct pci_controller
*controller
= NULL
;
1118 resource_size_t bar_start
;
1119 resource_size_t bar_end
;
1120 resource_size_t offset
;
1121 resource_size_t start
;
1122 resource_size_t end
;
1127 end
= phys_addr
+ size
- 1;
1130 * By searching phys_addr in each controller's mem_space, we can
1131 * determine the controller that should accept the PCI memory access.
1134 for (i
= 0; i
< num_rc_controllers
; i
++) {
1136 * Skip controllers that are not properly initialized or
1139 if (pci_controllers
[i
].root_bus
== NULL
)
1142 bar_start
= pci_controllers
[i
].mem_space
.start
;
1143 bar_end
= pci_controllers
[i
].mem_space
.end
;
1145 if ((start
>= bar_start
) && (end
<= bar_end
)) {
1146 controller
= &pci_controllers
[i
];
1151 if (controller
== NULL
)
1154 trio_fd
= controller
->trio
->fd
;
1156 /* Convert the resource start to the bus address offset. */
1157 start
= phys_addr
- controller
->mem_offset
;
1159 offset
= HV_TRIO_PIO_OFFSET(controller
->pio_mem_index
) + start
;
1162 * We need to keep the PCI bus address's in-page offset in the VA.
1164 return iorpc_ioremap(trio_fd
, offset
, size
) +
1165 (start
& (PAGE_SIZE
- 1));
1167 EXPORT_SYMBOL(ioremap
);
1169 #ifdef CONFIG_TILE_PCI_IO
1170 /* Map a PCI I/O address into VA space. */
1171 void __iomem
*ioport_map(unsigned long port
, unsigned int size
)
1173 struct pci_controller
*controller
= NULL
;
1174 resource_size_t bar_start
;
1175 resource_size_t bar_end
;
1176 resource_size_t offset
;
1177 resource_size_t start
;
1178 resource_size_t end
;
1183 end
= port
+ size
- 1;
1186 * By searching the port in each controller's io_space, we can
1187 * determine the controller that should accept the PCI I/O access.
1190 for (i
= 0; i
< num_rc_controllers
; i
++) {
1192 * Skip controllers that are not properly initialized or
1195 if (pci_controllers
[i
].root_bus
== NULL
)
1198 bar_start
= pci_controllers
[i
].io_space
.start
;
1199 bar_end
= pci_controllers
[i
].io_space
.end
;
1201 if ((start
>= bar_start
) && (end
<= bar_end
)) {
1202 controller
= &pci_controllers
[i
];
1207 if (controller
== NULL
)
1210 trio_fd
= controller
->trio
->fd
;
1212 /* Convert the resource start to the bus address offset. */
1213 port
-= controller
->io_space
.start
;
1215 offset
= HV_TRIO_PIO_OFFSET(controller
->pio_io_index
) + port
;
1218 * We need to keep the PCI bus address's in-page offset in the VA.
1220 return iorpc_ioremap(trio_fd
, offset
, size
) + (port
& (PAGE_SIZE
- 1));
1222 EXPORT_SYMBOL(ioport_map
);
1224 void ioport_unmap(void __iomem
*addr
)
1228 EXPORT_SYMBOL(ioport_unmap
);
1231 void pci_iounmap(struct pci_dev
*dev
, void __iomem
*addr
)
1235 EXPORT_SYMBOL(pci_iounmap
);
1237 /****************************************************************
1239 * Tile PCI config space read/write routines
1241 ****************************************************************/
1244 * These are the normal read and write ops
1245 * These are expanded with macros from pci_bus_read_config_byte() etc.
1247 * devfn is the combined PCI device & function.
1249 * offset is in bytes, from the start of config space for the
1250 * specified bus & device.
1253 static int tile_cfg_read(struct pci_bus
*bus
, unsigned int devfn
, int offset
,
1256 struct pci_controller
*controller
= bus
->sysdata
;
1257 gxio_trio_context_t
*trio_context
= controller
->trio
;
1258 int busnum
= bus
->number
& 0xff;
1259 int device
= PCI_SLOT(devfn
);
1260 int function
= PCI_FUNC(devfn
);
1261 int config_type
= 1;
1262 TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr
;
1266 * Map all accesses to the local device on root bus into the
1267 * MMIO space of the MAC. Accesses to the downstream devices
1268 * go to the PIO space.
1270 if (pci_is_root_bus(bus
)) {
1273 * This is the internal downstream P2P bridge,
1276 unsigned int reg_offset
;
1278 reg_offset
= ((offset
& 0xFFF) <<
1279 TRIO_CFG_REGION_ADDR__REG_SHIFT
) |
1280 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED
1281 << TRIO_CFG_REGION_ADDR__INTFC_SHIFT
) |
1283 TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT
);
1285 mmio_addr
= trio_context
->mmio_base_mac
+ reg_offset
;
1291 * We fake an empty device for (device > 0),
1292 * since there is only one device on bus 0.
1294 goto invalid_device
;
1299 * Accesses to the directly attached device have to be
1300 * sent as type-0 configs.
1303 if (busnum
== (controller
->first_busno
+ 1)) {
1305 * There is only one device off of our built-in P2P bridge.
1308 goto invalid_device
;
1314 cfg_addr
.reg_addr
= (offset
& 0xFFF);
1315 cfg_addr
.fn
= function
;
1316 cfg_addr
.dev
= device
;
1317 cfg_addr
.bus
= busnum
;
1318 cfg_addr
.type
= config_type
;
1321 * Note that we don't set the mac field in cfg_addr because the
1322 * mapping is per port.
1325 mmio_addr
= trio_context
->mmio_base_pio_cfg
[controller
->mac
] +
1332 *val
= __gxio_mmio_read32(mmio_addr
);
1336 *val
= __gxio_mmio_read16(mmio_addr
);
1340 *val
= __gxio_mmio_read8(mmio_addr
);
1344 return PCIBIOS_FUNC_NOT_SUPPORTED
;
1347 TRACE_CFG_RD(size
, *val
, busnum
, device
, function
, offset
);
1367 return PCIBIOS_FUNC_NOT_SUPPORTED
;
1375 * See tile_cfg_read() for relevent comments.
1376 * Note that "val" is the value to write, not a pointer to that value.
1378 static int tile_cfg_write(struct pci_bus
*bus
, unsigned int devfn
, int offset
,
1381 struct pci_controller
*controller
= bus
->sysdata
;
1382 gxio_trio_context_t
*trio_context
= controller
->trio
;
1383 int busnum
= bus
->number
& 0xff;
1384 int device
= PCI_SLOT(devfn
);
1385 int function
= PCI_FUNC(devfn
);
1386 int config_type
= 1;
1387 TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr
;
1389 u32 val_32
= (u32
)val
;
1390 u16 val_16
= (u16
)val
;
1394 * Map all accesses to the local device on root bus into the
1395 * MMIO space of the MAC. Accesses to the downstream devices
1396 * go to the PIO space.
1398 if (pci_is_root_bus(bus
)) {
1401 * This is the internal downstream P2P bridge,
1404 unsigned int reg_offset
;
1406 reg_offset
= ((offset
& 0xFFF) <<
1407 TRIO_CFG_REGION_ADDR__REG_SHIFT
) |
1408 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED
1409 << TRIO_CFG_REGION_ADDR__INTFC_SHIFT
) |
1411 TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT
);
1413 mmio_addr
= trio_context
->mmio_base_mac
+ reg_offset
;
1419 * We fake an empty device for (device > 0),
1420 * since there is only one device on bus 0.
1422 goto invalid_device
;
1427 * Accesses to the directly attached device have to be
1428 * sent as type-0 configs.
1431 if (busnum
== (controller
->first_busno
+ 1)) {
1433 * There is only one device off of our built-in P2P bridge.
1436 goto invalid_device
;
1442 cfg_addr
.reg_addr
= (offset
& 0xFFF);
1443 cfg_addr
.fn
= function
;
1444 cfg_addr
.dev
= device
;
1445 cfg_addr
.bus
= busnum
;
1446 cfg_addr
.type
= config_type
;
1449 * Note that we don't set the mac field in cfg_addr because the
1450 * mapping is per port.
1453 mmio_addr
= trio_context
->mmio_base_pio_cfg
[controller
->mac
] +
1460 __gxio_mmio_write32(mmio_addr
, val_32
);
1461 TRACE_CFG_WR(size
, val_32
, busnum
, device
, function
, offset
);
1465 __gxio_mmio_write16(mmio_addr
, val_16
);
1466 TRACE_CFG_WR(size
, val_16
, busnum
, device
, function
, offset
);
1470 __gxio_mmio_write8(mmio_addr
, val_8
);
1471 TRACE_CFG_WR(size
, val_8
, busnum
, device
, function
, offset
);
1475 return PCIBIOS_FUNC_NOT_SUPPORTED
;
1484 static struct pci_ops tile_cfg_ops
= {
1485 .read
= tile_cfg_read
,
1486 .write
= tile_cfg_write
,
1491 * MSI support starts here.
1494 tilegx_msi_startup(struct irq_data
*d
)
1503 tilegx_msi_ack(struct irq_data
*d
)
1505 __insn_mtspr(SPR_IPI_EVENT_RESET_K
, 1UL << d
->irq
);
1509 tilegx_msi_mask(struct irq_data
*d
)
1512 __insn_mtspr(SPR_IPI_MASK_SET_K
, 1UL << d
->irq
);
1516 tilegx_msi_unmask(struct irq_data
*d
)
1518 __insn_mtspr(SPR_IPI_MASK_RESET_K
, 1UL << d
->irq
);
1522 static struct irq_chip tilegx_msi_chip
= {
1523 .name
= "tilegx_msi",
1524 .irq_startup
= tilegx_msi_startup
,
1525 .irq_ack
= tilegx_msi_ack
,
1526 .irq_mask
= tilegx_msi_mask
,
1527 .irq_unmask
= tilegx_msi_unmask
,
1529 /* TBD: support set_affinity. */
1532 int arch_setup_msi_irq(struct pci_dev
*pdev
, struct msi_desc
*desc
)
1534 struct pci_controller
*controller
;
1535 gxio_trio_context_t
*trio_context
;
1538 uint64_t mem_map_base
;
1539 uint64_t mem_map_limit
;
1551 * Since we use a 64-bit Mem-Map to accept the MSI write, we fail
1552 * devices that are not capable of generating a 64-bit message address.
1553 * These devices will fall back to using the legacy interrupts.
1554 * Most PCIe endpoint devices do support 64-bit message addressing.
1556 if (desc
->msi_attrib
.is_64
== 0) {
1557 dev_printk(KERN_INFO
, &pdev
->dev
,
1558 "64-bit MSI message address not supported, "
1559 "falling back to legacy interrupts.\n");
1565 default_irq
= desc
->msi_attrib
.default_irq
;
1566 controller
= irq_get_handler_data(default_irq
);
1568 BUG_ON(!controller
);
1570 trio_context
= controller
->trio
;
1573 * Allocate a scatter-queue that will accept the MSI write and
1574 * trigger the TILE-side interrupts. We use the scatter-queue regions
1575 * before the mem map regions, because the latter are needed by more
1578 mem_map
= gxio_trio_alloc_scatter_queues(trio_context
, 1, 0, 0);
1580 TRIO_MAP_SQ_DOORBELL_FMT_t doorbell_template
= {{
1585 mem_map
+= TRIO_NUM_MAP_MEM_REGIONS
;
1586 mem_map_base
= MEM_MAP_INTR_REGIONS_BASE
+
1587 mem_map
* MEM_MAP_INTR_REGION_SIZE
;
1588 mem_map_limit
= mem_map_base
+ MEM_MAP_INTR_REGION_SIZE
- 1;
1590 msi_addr
= mem_map_base
+ MEM_MAP_INTR_REGION_SIZE
- 8;
1591 msg
.data
= (unsigned int)doorbell_template
.word
;
1593 /* SQ regions are out, allocate from map mem regions. */
1594 mem_map
= gxio_trio_alloc_memory_maps(trio_context
, 1, 0, 0);
1596 dev_printk(KERN_INFO
, &pdev
->dev
,
1597 "%s Mem-Map alloc failure. "
1598 "Failed to initialize MSI interrupts. "
1599 "Falling back to legacy interrupts.\n",
1600 desc
->msi_attrib
.is_msix
? "MSI-X" : "MSI");
1602 goto msi_mem_map_alloc_failure
;
1605 mem_map_base
= MEM_MAP_INTR_REGIONS_BASE
+
1606 mem_map
* MEM_MAP_INTR_REGION_SIZE
;
1607 mem_map_limit
= mem_map_base
+ MEM_MAP_INTR_REGION_SIZE
- 1;
1609 msi_addr
= mem_map_base
+ TRIO_MAP_MEM_REG_INT3
-
1610 TRIO_MAP_MEM_REG_INT0
;
1615 /* We try to distribute different IRQs to different tiles. */
1616 cpu
= tile_irq_cpu(irq
);
1619 * Now call up to the HV to configure the MSI interrupt and
1620 * set up the IPI binding.
1622 ret
= gxio_trio_config_msi_intr(trio_context
, cpu_x(cpu
), cpu_y(cpu
),
1623 KERNEL_PL
, irq
, controller
->mac
,
1624 mem_map
, mem_map_base
, mem_map_limit
,
1625 trio_context
->asid
);
1627 dev_printk(KERN_INFO
, &pdev
->dev
, "HV MSI config failed.\n");
1629 goto hv_msi_config_failure
;
1632 irq_set_msi_desc(irq
, desc
);
1634 msg
.address_hi
= msi_addr
>> 32;
1635 msg
.address_lo
= msi_addr
& 0xffffffff;
1637 write_msi_msg(irq
, &msg
);
1638 irq_set_chip_and_handler(irq
, &tilegx_msi_chip
, handle_level_irq
);
1639 irq_set_handler_data(irq
, controller
);
1643 hv_msi_config_failure
:
1645 msi_mem_map_alloc_failure
:
1651 void arch_teardown_msi_irq(unsigned int irq
)