PCI: tegra: Remove unnecessary tegra_pcie_fixup_bridge()
[deliverable/linux.git] / drivers / pci / host / pci-tegra.c
CommitLineData
d1523b52 1/*
94716cdd 2 * PCIe host controller driver for Tegra SoCs
d1523b52
TR
3 *
4 * Copyright (c) 2010, CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il>
6 *
7 * Based on NVIDIA PCIe driver
8 * Copyright (c) 2008-2009, NVIDIA Corporation.
9 *
10 * Bits taken from arch/arm/mach-dove/pcie.c
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
25 */
26
27#include <linux/clk.h>
2cb989f6 28#include <linux/debugfs.h>
d1523b52
TR
29#include <linux/delay.h>
30#include <linux/export.h>
31#include <linux/interrupt.h>
32#include <linux/irq.h>
33#include <linux/irqdomain.h>
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/msi.h>
37#include <linux/of_address.h>
38#include <linux/of_pci.h>
39#include <linux/of_platform.h>
40#include <linux/pci.h>
7f1f054b 41#include <linux/phy/phy.h>
d1523b52 42#include <linux/platform_device.h>
3127a6b2 43#include <linux/reset.h>
d1523b52
TR
44#include <linux/sizes.h>
45#include <linux/slab.h>
d1523b52
TR
46#include <linux/vmalloc.h>
47#include <linux/regulator/consumer.h>
48
306a7f91 49#include <soc/tegra/cpuidle.h>
7232398a 50#include <soc/tegra/pmc.h>
306a7f91 51
d1523b52
TR
52#include <asm/mach/irq.h>
53#include <asm/mach/map.h>
54#include <asm/mach/pci.h>
55
56#define INT_PCI_MSI_NR (8 * 32)
d1523b52
TR
57
58/* register definitions */
59
60#define AFI_AXI_BAR0_SZ 0x00
61#define AFI_AXI_BAR1_SZ 0x04
62#define AFI_AXI_BAR2_SZ 0x08
63#define AFI_AXI_BAR3_SZ 0x0c
64#define AFI_AXI_BAR4_SZ 0x10
65#define AFI_AXI_BAR5_SZ 0x14
66
67#define AFI_AXI_BAR0_START 0x18
68#define AFI_AXI_BAR1_START 0x1c
69#define AFI_AXI_BAR2_START 0x20
70#define AFI_AXI_BAR3_START 0x24
71#define AFI_AXI_BAR4_START 0x28
72#define AFI_AXI_BAR5_START 0x2c
73
74#define AFI_FPCI_BAR0 0x30
75#define AFI_FPCI_BAR1 0x34
76#define AFI_FPCI_BAR2 0x38
77#define AFI_FPCI_BAR3 0x3c
78#define AFI_FPCI_BAR4 0x40
79#define AFI_FPCI_BAR5 0x44
80
81#define AFI_CACHE_BAR0_SZ 0x48
82#define AFI_CACHE_BAR0_ST 0x4c
83#define AFI_CACHE_BAR1_SZ 0x50
84#define AFI_CACHE_BAR1_ST 0x54
85
86#define AFI_MSI_BAR_SZ 0x60
87#define AFI_MSI_FPCI_BAR_ST 0x64
88#define AFI_MSI_AXI_BAR_ST 0x68
89
90#define AFI_MSI_VEC0 0x6c
91#define AFI_MSI_VEC1 0x70
92#define AFI_MSI_VEC2 0x74
93#define AFI_MSI_VEC3 0x78
94#define AFI_MSI_VEC4 0x7c
95#define AFI_MSI_VEC5 0x80
96#define AFI_MSI_VEC6 0x84
97#define AFI_MSI_VEC7 0x88
98
99#define AFI_MSI_EN_VEC0 0x8c
100#define AFI_MSI_EN_VEC1 0x90
101#define AFI_MSI_EN_VEC2 0x94
102#define AFI_MSI_EN_VEC3 0x98
103#define AFI_MSI_EN_VEC4 0x9c
104#define AFI_MSI_EN_VEC5 0xa0
105#define AFI_MSI_EN_VEC6 0xa4
106#define AFI_MSI_EN_VEC7 0xa8
107
108#define AFI_CONFIGURATION 0xac
109#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
110
111#define AFI_FPCI_ERROR_MASKS 0xb0
112
113#define AFI_INTR_MASK 0xb4
114#define AFI_INTR_MASK_INT_MASK (1 << 0)
115#define AFI_INTR_MASK_MSI_MASK (1 << 8)
116
117#define AFI_INTR_CODE 0xb8
118#define AFI_INTR_CODE_MASK 0xf
7f1f054b
TR
119#define AFI_INTR_INI_SLAVE_ERROR 1
120#define AFI_INTR_INI_DECODE_ERROR 2
d1523b52
TR
121#define AFI_INTR_TARGET_ABORT 3
122#define AFI_INTR_MASTER_ABORT 4
123#define AFI_INTR_INVALID_WRITE 5
124#define AFI_INTR_LEGACY 6
125#define AFI_INTR_FPCI_DECODE_ERROR 7
7f1f054b
TR
126#define AFI_INTR_AXI_DECODE_ERROR 8
127#define AFI_INTR_FPCI_TIMEOUT 9
128#define AFI_INTR_PE_PRSNT_SENSE 10
129#define AFI_INTR_PE_CLKREQ_SENSE 11
130#define AFI_INTR_CLKCLAMP_SENSE 12
131#define AFI_INTR_RDY4PD_SENSE 13
132#define AFI_INTR_P2P_ERROR 14
d1523b52
TR
133
134#define AFI_INTR_SIGNATURE 0xbc
135#define AFI_UPPER_FPCI_ADDRESS 0xc0
136#define AFI_SM_INTR_ENABLE 0xc4
137#define AFI_SM_INTR_INTA_ASSERT (1 << 0)
138#define AFI_SM_INTR_INTB_ASSERT (1 << 1)
139#define AFI_SM_INTR_INTC_ASSERT (1 << 2)
140#define AFI_SM_INTR_INTD_ASSERT (1 << 3)
141#define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
142#define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
143#define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
144#define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
145
146#define AFI_AFI_INTR_ENABLE 0xc8
147#define AFI_INTR_EN_INI_SLVERR (1 << 0)
148#define AFI_INTR_EN_INI_DECERR (1 << 1)
149#define AFI_INTR_EN_TGT_SLVERR (1 << 2)
150#define AFI_INTR_EN_TGT_DECERR (1 << 3)
151#define AFI_INTR_EN_TGT_WRERR (1 << 4)
152#define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
153#define AFI_INTR_EN_AXI_DECERR (1 << 6)
154#define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
94716cdd 155#define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
d1523b52
TR
156
157#define AFI_PCIE_CONFIG 0x0f8
158#define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
159#define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
160#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
161#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
94716cdd 162#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
7f1f054b 163#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
d1523b52 164#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
94716cdd 165#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
7f1f054b 166#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
94716cdd 167#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
d1523b52
TR
168
169#define AFI_FUSE 0x104
170#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
171
172#define AFI_PEX0_CTRL 0x110
173#define AFI_PEX1_CTRL 0x118
94716cdd 174#define AFI_PEX2_CTRL 0x128
d1523b52 175#define AFI_PEX_CTRL_RST (1 << 0)
94716cdd 176#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
d1523b52 177#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
7f1f054b
TR
178#define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
179
180#define AFI_PLLE_CONTROL 0x160
181#define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
182#define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
d1523b52 183
94716cdd
JA
184#define AFI_PEXBIAS_CTRL_0 0x168
185
d1523b52
TR
186#define RP_VEND_XP 0x00000F00
187#define RP_VEND_XP_DL_UP (1 << 30)
188
7f1f054b
TR
189#define RP_PRIV_MISC 0x00000FE0
190#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0)
191#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0)
192
d1523b52
TR
193#define RP_LINK_CONTROL_STATUS 0x00000090
194#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
195#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
196
197#define PADS_CTL_SEL 0x0000009C
198
199#define PADS_CTL 0x000000A0
200#define PADS_CTL_IDDQ_1L (1 << 0)
201#define PADS_CTL_TX_DATA_EN_1L (1 << 6)
202#define PADS_CTL_RX_DATA_EN_1L (1 << 10)
203
94716cdd
JA
204#define PADS_PLL_CTL_TEGRA20 0x000000B8
205#define PADS_PLL_CTL_TEGRA30 0x000000B4
d1523b52
TR
206#define PADS_PLL_CTL_RST_B4SM (1 << 1)
207#define PADS_PLL_CTL_LOCKDET (1 << 8)
208#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
209#define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
210#define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
211#define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
212#define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
213#define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
214#define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
94716cdd
JA
215#define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
216
217#define PADS_REFCLK_CFG0 0x000000C8
218#define PADS_REFCLK_CFG1 0x000000CC
7f1f054b 219#define PADS_REFCLK_BIAS 0x000000D0
d1523b52 220
b02b07ad
SW
221/*
222 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
223 * entries, one entry per PCIe port. These field definitions and desired
224 * values aren't in the TRM, but do come from NVIDIA.
225 */
226#define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */
227#define PADS_REFCLK_CFG_E_TERM_SHIFT 7
228#define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
229#define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
230
231/* Default value provided by HW engineering is 0xfa5c */
232#define PADS_REFCLK_CFG_VALUE \
233 ( \
234 (0x17 << PADS_REFCLK_CFG_TERM_SHIFT) | \
235 (0 << PADS_REFCLK_CFG_E_TERM_SHIFT) | \
236 (0xa << PADS_REFCLK_CFG_PREDI_SHIFT) | \
237 (0xf << PADS_REFCLK_CFG_DRVI_SHIFT) \
238 )
239
d1523b52 240struct tegra_msi {
c2791b80 241 struct msi_controller chip;
d1523b52
TR
242 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
243 struct irq_domain *domain;
244 unsigned long pages;
245 struct mutex lock;
246 int irq;
247};
248
94716cdd
JA
249/* used to differentiate between Tegra SoC generations */
250struct tegra_pcie_soc_data {
251 unsigned int num_ports;
252 unsigned int msi_base_shift;
253 u32 pads_pll_ctl;
254 u32 tx_ref_sel;
255 bool has_pex_clkreq_en;
256 bool has_pex_bias_ctrl;
257 bool has_intr_prsnt_sense;
94716cdd 258 bool has_cml_clk;
7f1f054b 259 bool has_gen2;
94716cdd
JA
260};
261
c2791b80 262static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
d1523b52
TR
263{
264 return container_of(chip, struct tegra_msi, chip);
265}
266
267struct tegra_pcie {
268 struct device *dev;
269
270 void __iomem *pads;
271 void __iomem *afi;
272 int irq;
273
f7625980 274 struct list_head buses;
d1523b52
TR
275 struct resource *cs;
276
41534e53 277 struct resource all;
d1523b52 278 struct resource io;
5106787a 279 struct resource pio;
d1523b52
TR
280 struct resource mem;
281 struct resource prefetch;
282 struct resource busn;
283
284 struct clk *pex_clk;
285 struct clk *afi_clk;
d1523b52 286 struct clk *pll_e;
94716cdd 287 struct clk *cml_clk;
d1523b52 288
3127a6b2
SW
289 struct reset_control *pex_rst;
290 struct reset_control *afi_rst;
291 struct reset_control *pcie_xrst;
292
7f1f054b
TR
293 struct phy *phy;
294
d1523b52
TR
295 struct tegra_msi msi;
296
297 struct list_head ports;
298 unsigned int num_ports;
299 u32 xbar_config;
300
077fb158
TR
301 struct regulator_bulk_data *supplies;
302 unsigned int num_supplies;
94716cdd
JA
303
304 const struct tegra_pcie_soc_data *soc_data;
2cb989f6 305 struct dentry *debugfs;
d1523b52
TR
306};
307
308struct tegra_pcie_port {
309 struct tegra_pcie *pcie;
310 struct list_head list;
311 struct resource regs;
312 void __iomem *base;
313 unsigned int index;
314 unsigned int lanes;
315};
316
317struct tegra_pcie_bus {
318 struct vm_struct *area;
319 struct list_head list;
320 unsigned int nr;
321};
322
323static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
324{
325 return sys->private_data;
326}
327
328static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
329 unsigned long offset)
330{
331 writel(value, pcie->afi + offset);
332}
333
334static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
335{
336 return readl(pcie->afi + offset);
337}
338
339static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
340 unsigned long offset)
341{
342 writel(value, pcie->pads + offset);
343}
344
345static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
346{
347 return readl(pcie->pads + offset);
348}
349
350/*
351 * The configuration space mapping on Tegra is somewhat similar to the ECAM
352 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
353 * register accesses are mapped:
354 *
355 * [27:24] extended register number
356 * [23:16] bus number
357 * [15:11] device number
358 * [10: 8] function number
359 * [ 7: 0] register number
360 *
361 * Mapping the whole extended configuration space would require 256 MiB of
362 * virtual address space, only a small part of which will actually be used.
363 * To work around this, a 1 MiB of virtual addresses are allocated per bus
364 * when the bus is first accessed. When the physical range is mapped, the
365 * the bus number bits are hidden so that the extended register number bits
366 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
367 *
368 * [19:16] extended register number
369 * [15:11] device number
370 * [10: 8] function number
371 * [ 7: 0] register number
372 *
373 * This is achieved by stitching together 16 chunks of 64 KiB of physical
374 * address space via the MMU.
375 */
376static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
377{
378 return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
379 (PCI_FUNC(devfn) << 8) | (where & 0xfc);
380}
381
382static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
383 unsigned int busnr)
384{
385 pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN |
386 L_PTE_MT_DEV_SHARED | L_PTE_SHARED;
387 phys_addr_t cs = pcie->cs->start;
388 struct tegra_pcie_bus *bus;
389 unsigned int i;
390 int err;
391
392 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
393 if (!bus)
394 return ERR_PTR(-ENOMEM);
395
396 INIT_LIST_HEAD(&bus->list);
397 bus->nr = busnr;
398
399 /* allocate 1 MiB of virtual addresses */
400 bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
401 if (!bus->area) {
402 err = -ENOMEM;
403 goto free;
404 }
405
406 /* map each of the 16 chunks of 64 KiB each */
407 for (i = 0; i < 16; i++) {
408 unsigned long virt = (unsigned long)bus->area->addr +
409 i * SZ_64K;
8d41794c 410 phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K;
d1523b52
TR
411
412 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
413 if (err < 0) {
414 dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
415 err);
416 goto unmap;
417 }
418 }
419
420 return bus;
421
422unmap:
423 vunmap(bus->area->addr);
424free:
425 kfree(bus);
426 return ERR_PTR(err);
427}
428
429/*
430 * Look up a virtual address mapping for the specified bus number. If no such
f7625980 431 * mapping exists, try to create one.
d1523b52
TR
432 */
433static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
434 unsigned int busnr)
435{
436 struct tegra_pcie_bus *bus;
437
f7625980 438 list_for_each_entry(bus, &pcie->buses, list)
d1523b52 439 if (bus->nr == busnr)
1e65249d 440 return (void __iomem *)bus->area->addr;
d1523b52
TR
441
442 bus = tegra_pcie_bus_alloc(pcie, busnr);
443 if (IS_ERR(bus))
444 return NULL;
445
f7625980 446 list_add_tail(&bus->list, &pcie->buses);
d1523b52 447
1e65249d 448 return (void __iomem *)bus->area->addr;
d1523b52
TR
449}
450
451static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
452 unsigned int devfn,
453 int where)
454{
455 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
456 void __iomem *addr = NULL;
457
458 if (bus->number == 0) {
459 unsigned int slot = PCI_SLOT(devfn);
460 struct tegra_pcie_port *port;
461
462 list_for_each_entry(port, &pcie->ports, list) {
463 if (port->index + 1 == slot) {
464 addr = port->base + (where & ~3);
465 break;
466 }
467 }
468 } else {
469 addr = tegra_pcie_bus_map(pcie, bus->number);
470 if (!addr) {
471 dev_err(pcie->dev,
472 "failed to map cfg. space for bus %u\n",
473 bus->number);
474 return NULL;
475 }
476
477 addr += tegra_pcie_conf_offset(devfn, where);
478 }
479
480 return addr;
481}
482
483static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
484 int where, int size, u32 *value)
485{
486 void __iomem *addr;
487
488 addr = tegra_pcie_conf_address(bus, devfn, where);
489 if (!addr) {
490 *value = 0xffffffff;
491 return PCIBIOS_DEVICE_NOT_FOUND;
492 }
493
494 *value = readl(addr);
495
496 if (size == 1)
497 *value = (*value >> (8 * (where & 3))) & 0xff;
498 else if (size == 2)
499 *value = (*value >> (8 * (where & 3))) & 0xffff;
500
501 return PCIBIOS_SUCCESSFUL;
502}
503
504static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
505 int where, int size, u32 value)
506{
507 void __iomem *addr;
508 u32 mask, tmp;
509
510 addr = tegra_pcie_conf_address(bus, devfn, where);
511 if (!addr)
512 return PCIBIOS_DEVICE_NOT_FOUND;
513
514 if (size == 4) {
515 writel(value, addr);
516 return PCIBIOS_SUCCESSFUL;
517 }
518
519 if (size == 2)
520 mask = ~(0xffff << ((where & 0x3) * 8));
521 else if (size == 1)
522 mask = ~(0xff << ((where & 0x3) * 8));
523 else
524 return PCIBIOS_BAD_REGISTER_NUMBER;
525
526 tmp = readl(addr) & mask;
527 tmp |= value << ((where & 0x3) * 8);
528 writel(tmp, addr);
529
530 return PCIBIOS_SUCCESSFUL;
531}
532
533static struct pci_ops tegra_pcie_ops = {
534 .read = tegra_pcie_read_conf,
535 .write = tegra_pcie_write_conf,
536};
537
538static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
539{
540 unsigned long ret = 0;
541
542 switch (port->index) {
543 case 0:
544 ret = AFI_PEX0_CTRL;
545 break;
546
547 case 1:
548 ret = AFI_PEX1_CTRL;
549 break;
94716cdd
JA
550
551 case 2:
552 ret = AFI_PEX2_CTRL;
553 break;
d1523b52
TR
554 }
555
556 return ret;
557}
558
559static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
560{
561 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
562 unsigned long value;
563
564 /* pulse reset signal */
565 value = afi_readl(port->pcie, ctrl);
566 value &= ~AFI_PEX_CTRL_RST;
567 afi_writel(port->pcie, value, ctrl);
568
569 usleep_range(1000, 2000);
570
571 value = afi_readl(port->pcie, ctrl);
572 value |= AFI_PEX_CTRL_RST;
573 afi_writel(port->pcie, value, ctrl);
574}
575
576static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
577{
94716cdd 578 const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
d1523b52
TR
579 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
580 unsigned long value;
581
582 /* enable reference clock */
583 value = afi_readl(port->pcie, ctrl);
584 value |= AFI_PEX_CTRL_REFCLK_EN;
94716cdd
JA
585
586 if (soc->has_pex_clkreq_en)
587 value |= AFI_PEX_CTRL_CLKREQ_EN;
588
7f1f054b
TR
589 value |= AFI_PEX_CTRL_OVERRIDE_EN;
590
d1523b52
TR
591 afi_writel(port->pcie, value, ctrl);
592
593 tegra_pcie_port_reset(port);
594}
595
596static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
597{
0d20d621 598 const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
d1523b52
TR
599 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
600 unsigned long value;
601
602 /* assert port reset */
603 value = afi_readl(port->pcie, ctrl);
604 value &= ~AFI_PEX_CTRL_RST;
605 afi_writel(port->pcie, value, ctrl);
606
607 /* disable reference clock */
608 value = afi_readl(port->pcie, ctrl);
0d20d621
TR
609
610 if (soc->has_pex_clkreq_en)
611 value &= ~AFI_PEX_CTRL_CLKREQ_EN;
612
d1523b52
TR
613 value &= ~AFI_PEX_CTRL_REFCLK_EN;
614 afi_writel(port->pcie, value, ctrl);
615}
616
617static void tegra_pcie_port_free(struct tegra_pcie_port *port)
618{
619 struct tegra_pcie *pcie = port->pcie;
620
621 devm_iounmap(pcie->dev, port->base);
622 devm_release_mem_region(pcie->dev, port->regs.start,
623 resource_size(&port->regs));
624 list_del(&port->list);
625 devm_kfree(pcie->dev, port);
626}
627
d1523b52
TR
628/* Tegra PCIE root complex wrongly reports device class */
629static void tegra_pcie_fixup_class(struct pci_dev *dev)
630{
631 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
632}
633DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
634DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
94716cdd
JA
635DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
636DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
d1523b52
TR
637
638/* Tegra PCIE requires relaxed ordering */
639static void tegra_pcie_relax_enable(struct pci_dev *dev)
640{
641 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
642}
643DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
644
645static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
646{
647 struct tegra_pcie *pcie = sys_to_pcie(sys);
41534e53
TR
648 int err;
649
650 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
651 if (err < 0)
652 return err;
653
654 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->prefetch);
655 if (err)
656 return err;
d1523b52
TR
657
658 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
659 pci_add_resource_offset(&sys->resources, &pcie->prefetch,
660 sys->mem_offset);
661 pci_add_resource(&sys->resources, &pcie->busn);
662
5106787a 663 pci_ioremap_io(pcie->pio.start, pcie->io.start);
d1523b52
TR
664
665 return 1;
666}
667
668static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
669{
670 struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
f5d3352b 671 int irq;
d1523b52 672
b4f17375
SW
673 tegra_cpuidle_pcie_irqs_in_use();
674
f5d3352b
LS
675 irq = of_irq_parse_and_map_pci(pdev, slot, pin);
676 if (!irq)
677 irq = pcie->irq;
678
679 return irq;
d1523b52
TR
680}
681
d1523b52
TR
682static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys)
683{
684 struct tegra_pcie *pcie = sys_to_pcie(sys);
685 struct pci_bus *bus;
686
687 bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys,
688 &sys->resources);
689 if (!bus)
690 return NULL;
691
692 pci_scan_child_bus(bus);
693
694 return bus;
695}
696
697static irqreturn_t tegra_pcie_isr(int irq, void *arg)
698{
699 const char *err_msg[] = {
700 "Unknown",
701 "AXI slave error",
702 "AXI decode error",
703 "Target abort",
704 "Master abort",
705 "Invalid write",
7f1f054b 706 "Legacy interrupt",
d1523b52
TR
707 "Response decoding error",
708 "AXI response decoding error",
709 "Transaction timeout",
7f1f054b
TR
710 "Slot present pin change",
711 "Slot clock request change",
712 "TMS clock ramp change",
713 "TMS ready for power down",
714 "Peer2Peer error",
d1523b52
TR
715 };
716 struct tegra_pcie *pcie = arg;
717 u32 code, signature;
718
719 code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
720 signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
721 afi_writel(pcie, 0, AFI_INTR_CODE);
722
723 if (code == AFI_INTR_LEGACY)
724 return IRQ_NONE;
725
726 if (code >= ARRAY_SIZE(err_msg))
727 code = 0;
728
729 /*
730 * do not pollute kernel log with master abort reports since they
731 * happen a lot during enumeration
732 */
733 if (code == AFI_INTR_MASTER_ABORT)
734 dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
735 signature);
736 else
737 dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
738 signature);
739
740 if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
741 code == AFI_INTR_FPCI_DECODE_ERROR) {
742 u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
743 u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
744
745 if (code == AFI_INTR_MASTER_ABORT)
746 dev_dbg(pcie->dev, " FPCI address: %10llx\n", address);
747 else
748 dev_err(pcie->dev, " FPCI address: %10llx\n", address);
749 }
750
751 return IRQ_HANDLED;
752}
753
754/*
755 * FPCI map is as follows:
756 * - 0xfdfc000000: I/O space
757 * - 0xfdfe000000: type 0 configuration space
758 * - 0xfdff000000: type 1 configuration space
759 * - 0xfe00000000: type 0 extended configuration space
760 * - 0xfe10000000: type 1 extended configuration space
761 */
762static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
763{
764 u32 fpci_bar, size, axi_address;
765
766 /* Bar 0: type 1 extended configuration space */
767 fpci_bar = 0xfe100000;
768 size = resource_size(pcie->cs);
769 axi_address = pcie->cs->start;
770 afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
771 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
772 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
773
774 /* Bar 1: downstream IO bar */
775 fpci_bar = 0xfdfc0000;
776 size = resource_size(&pcie->io);
5106787a 777 axi_address = pcie->io.start;
d1523b52
TR
778 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
779 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
780 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
781
782 /* Bar 2: prefetchable memory BAR */
783 fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
784 size = resource_size(&pcie->prefetch);
785 axi_address = pcie->prefetch.start;
786 afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
787 afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
788 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
789
790 /* Bar 3: non prefetchable memory BAR */
791 fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
792 size = resource_size(&pcie->mem);
793 axi_address = pcie->mem.start;
794 afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
795 afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
796 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
797
798 /* NULL out the remaining BARs as they are not used */
799 afi_writel(pcie, 0, AFI_AXI_BAR4_START);
800 afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
801 afi_writel(pcie, 0, AFI_FPCI_BAR4);
802
803 afi_writel(pcie, 0, AFI_AXI_BAR5_START);
804 afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
805 afi_writel(pcie, 0, AFI_FPCI_BAR5);
806
807 /* map all upstream transactions as uncached */
808 afi_writel(pcie, PHYS_OFFSET, AFI_CACHE_BAR0_ST);
809 afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
810 afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
811 afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
812
813 /* MSI translations are setup only when needed */
814 afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
815 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
816 afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
817 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
818}
819
7f1f054b 820static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
d1523b52 821{
94716cdd 822 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
7f1f054b 823 u32 value;
d1523b52 824
7f1f054b 825 timeout = jiffies + msecs_to_jiffies(timeout);
94716cdd 826
7f1f054b
TR
827 while (time_before(jiffies, timeout)) {
828 value = pads_readl(pcie, soc->pads_pll_ctl);
829 if (value & PADS_PLL_CTL_LOCKDET)
830 return 0;
831 }
d1523b52 832
7f1f054b
TR
833 return -ETIMEDOUT;
834}
d1523b52 835
7f1f054b
TR
836static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
837{
838 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
839 u32 value;
840 int err;
d1523b52 841
f7625980 842 /* initialize internal PHY, enable up to 16 PCIE lanes */
d1523b52
TR
843 pads_writel(pcie, 0x0, PADS_CTL_SEL);
844
845 /* override IDDQ to 1 on all 4 lanes */
846 value = pads_readl(pcie, PADS_CTL);
847 value |= PADS_CTL_IDDQ_1L;
848 pads_writel(pcie, value, PADS_CTL);
849
850 /*
851 * Set up PHY PLL inputs select PLLE output as refclock,
852 * set TX ref sel to div10 (not div5).
853 */
94716cdd 854 value = pads_readl(pcie, soc->pads_pll_ctl);
d1523b52 855 value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
94716cdd
JA
856 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
857 pads_writel(pcie, value, soc->pads_pll_ctl);
d1523b52 858
ec732762
EY
859 /* reset PLL */
860 value = pads_readl(pcie, soc->pads_pll_ctl);
861 value &= ~PADS_PLL_CTL_RST_B4SM;
862 pads_writel(pcie, value, soc->pads_pll_ctl);
863
864 usleep_range(20, 100);
865
d1523b52 866 /* take PLL out of reset */
94716cdd 867 value = pads_readl(pcie, soc->pads_pll_ctl);
d1523b52 868 value |= PADS_PLL_CTL_RST_B4SM;
94716cdd 869 pads_writel(pcie, value, soc->pads_pll_ctl);
d1523b52 870
b02b07ad
SW
871 /* Configure the reference clock driver */
872 value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16);
873 pads_writel(pcie, value, PADS_REFCLK_CFG0);
874 if (soc->num_ports > 2)
875 pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
d1523b52
TR
876
877 /* wait for the PLL to lock */
7f1f054b
TR
878 err = tegra_pcie_pll_wait(pcie, 500);
879 if (err < 0) {
880 dev_err(pcie->dev, "PLL failed to lock: %d\n", err);
881 return err;
882 }
d1523b52
TR
883
884 /* turn off IDDQ override */
885 value = pads_readl(pcie, PADS_CTL);
886 value &= ~PADS_CTL_IDDQ_1L;
887 pads_writel(pcie, value, PADS_CTL);
888
889 /* enable TX/RX data */
890 value = pads_readl(pcie, PADS_CTL);
891 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
892 pads_writel(pcie, value, PADS_CTL);
893
7f1f054b
TR
894 return 0;
895}
896
897static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
898{
899 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
900 struct tegra_pcie_port *port;
901 unsigned long value;
902 int err;
903
904 /* enable PLL power down */
905 if (pcie->phy) {
906 value = afi_readl(pcie, AFI_PLLE_CONTROL);
907 value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
908 value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
909 afi_writel(pcie, value, AFI_PLLE_CONTROL);
910 }
911
912 /* power down PCIe slot clock bias pad */
913 if (soc->has_pex_bias_ctrl)
914 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
915
916 /* configure mode and disable all ports */
917 value = afi_readl(pcie, AFI_PCIE_CONFIG);
918 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
919 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
920
921 list_for_each_entry(port, &pcie->ports, list)
922 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
923
924 afi_writel(pcie, value, AFI_PCIE_CONFIG);
925
926 if (soc->has_gen2) {
927 value = afi_readl(pcie, AFI_FUSE);
928 value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
929 afi_writel(pcie, value, AFI_FUSE);
930 } else {
931 value = afi_readl(pcie, AFI_FUSE);
932 value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
933 afi_writel(pcie, value, AFI_FUSE);
934 }
935
936 if (!pcie->phy)
937 err = tegra_pcie_phy_enable(pcie);
938 else
939 err = phy_power_on(pcie->phy);
940
941 if (err < 0) {
942 dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
943 return err;
944 }
945
d1523b52 946 /* take the PCIe interface module out of reset */
3127a6b2 947 reset_control_deassert(pcie->pcie_xrst);
d1523b52
TR
948
949 /* finally enable PCIe */
950 value = afi_readl(pcie, AFI_CONFIGURATION);
951 value |= AFI_CONFIGURATION_EN_FPCI;
952 afi_writel(pcie, value, AFI_CONFIGURATION);
953
954 value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
955 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
956 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
94716cdd
JA
957
958 if (soc->has_intr_prsnt_sense)
959 value |= AFI_INTR_EN_PRSNT_SENSE;
960
d1523b52
TR
961 afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
962 afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
963
964 /* don't enable MSI for now, only when needed */
965 afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
966
967 /* disable all exceptions */
968 afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
969
970 return 0;
971}
972
973static void tegra_pcie_power_off(struct tegra_pcie *pcie)
974{
975 int err;
976
977 /* TODO: disable and unprepare clocks? */
978
7f1f054b
TR
979 err = phy_power_off(pcie->phy);
980 if (err < 0)
981 dev_warn(pcie->dev, "failed to power off PHY: %d\n", err);
982
3127a6b2
SW
983 reset_control_assert(pcie->pcie_xrst);
984 reset_control_assert(pcie->afi_rst);
985 reset_control_assert(pcie->pex_rst);
d1523b52
TR
986
987 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
988
077fb158 989 err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
d1523b52 990 if (err < 0)
077fb158 991 dev_warn(pcie->dev, "failed to disable regulators: %d\n", err);
d1523b52
TR
992}
993
994static int tegra_pcie_power_on(struct tegra_pcie *pcie)
995{
94716cdd 996 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
d1523b52
TR
997 int err;
998
3127a6b2
SW
999 reset_control_assert(pcie->pcie_xrst);
1000 reset_control_assert(pcie->afi_rst);
1001 reset_control_assert(pcie->pex_rst);
d1523b52
TR
1002
1003 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1004
1005 /* enable regulators */
077fb158
TR
1006 err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1007 if (err < 0)
1008 dev_err(pcie->dev, "failed to enable regulators: %d\n", err);
94716cdd 1009
d1523b52 1010 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
80b28791
SW
1011 pcie->pex_clk,
1012 pcie->pex_rst);
d1523b52
TR
1013 if (err) {
1014 dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
1015 return err;
1016 }
1017
3127a6b2 1018 reset_control_deassert(pcie->afi_rst);
d1523b52
TR
1019
1020 err = clk_prepare_enable(pcie->afi_clk);
1021 if (err < 0) {
1022 dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
1023 return err;
1024 }
1025
94716cdd
JA
1026 if (soc->has_cml_clk) {
1027 err = clk_prepare_enable(pcie->cml_clk);
1028 if (err < 0) {
1029 dev_err(pcie->dev, "failed to enable CML clock: %d\n",
1030 err);
1031 return err;
1032 }
1033 }
1034
d1523b52
TR
1035 err = clk_prepare_enable(pcie->pll_e);
1036 if (err < 0) {
1037 dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
1038 return err;
1039 }
1040
1041 return 0;
1042}
1043
1044static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1045{
94716cdd
JA
1046 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1047
d1523b52
TR
1048 pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
1049 if (IS_ERR(pcie->pex_clk))
1050 return PTR_ERR(pcie->pex_clk);
1051
1052 pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
1053 if (IS_ERR(pcie->afi_clk))
1054 return PTR_ERR(pcie->afi_clk);
1055
d1523b52
TR
1056 pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
1057 if (IS_ERR(pcie->pll_e))
1058 return PTR_ERR(pcie->pll_e);
1059
94716cdd
JA
1060 if (soc->has_cml_clk) {
1061 pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
1062 if (IS_ERR(pcie->cml_clk))
1063 return PTR_ERR(pcie->cml_clk);
1064 }
1065
d1523b52
TR
1066 return 0;
1067}
1068
3127a6b2
SW
1069static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1070{
1071 pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
1072 if (IS_ERR(pcie->pex_rst))
1073 return PTR_ERR(pcie->pex_rst);
1074
1075 pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
1076 if (IS_ERR(pcie->afi_rst))
1077 return PTR_ERR(pcie->afi_rst);
1078
1079 pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
1080 if (IS_ERR(pcie->pcie_xrst))
1081 return PTR_ERR(pcie->pcie_xrst);
1082
1083 return 0;
1084}
1085
d1523b52
TR
1086static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1087{
1088 struct platform_device *pdev = to_platform_device(pcie->dev);
1089 struct resource *pads, *afi, *res;
1090 int err;
1091
1092 err = tegra_pcie_clocks_get(pcie);
1093 if (err) {
1094 dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
1095 return err;
1096 }
1097
3127a6b2
SW
1098 err = tegra_pcie_resets_get(pcie);
1099 if (err) {
1100 dev_err(&pdev->dev, "failed to get resets: %d\n", err);
1101 return err;
1102 }
1103
7f1f054b
TR
1104 pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
1105 if (IS_ERR(pcie->phy)) {
1106 err = PTR_ERR(pcie->phy);
1107 dev_err(&pdev->dev, "failed to get PHY: %d\n", err);
1108 return err;
1109 }
1110
1111 err = phy_init(pcie->phy);
1112 if (err < 0) {
1113 dev_err(&pdev->dev, "failed to initialize PHY: %d\n", err);
1114 return err;
1115 }
1116
d1523b52
TR
1117 err = tegra_pcie_power_on(pcie);
1118 if (err) {
1119 dev_err(&pdev->dev, "failed to power up: %d\n", err);
1120 return err;
1121 }
1122
d1523b52 1123 pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
dc05ee32
JL
1124 pcie->pads = devm_ioremap_resource(&pdev->dev, pads);
1125 if (IS_ERR(pcie->pads)) {
1126 err = PTR_ERR(pcie->pads);
d1523b52
TR
1127 goto poweroff;
1128 }
1129
1130 afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
dc05ee32
JL
1131 pcie->afi = devm_ioremap_resource(&pdev->dev, afi);
1132 if (IS_ERR(pcie->afi)) {
1133 err = PTR_ERR(pcie->afi);
d1523b52
TR
1134 goto poweroff;
1135 }
1136
dc05ee32 1137 /* request configuration space, but remap later, on demand */
d1523b52
TR
1138 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1139 if (!res) {
1140 err = -EADDRNOTAVAIL;
1141 goto poweroff;
1142 }
1143
1144 pcie->cs = devm_request_mem_region(pcie->dev, res->start,
1145 resource_size(res), res->name);
1146 if (!pcie->cs) {
1147 err = -EADDRNOTAVAIL;
1148 goto poweroff;
1149 }
1150
1151 /* request interrupt */
1152 err = platform_get_irq_byname(pdev, "intr");
1153 if (err < 0) {
1154 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1155 goto poweroff;
1156 }
1157
1158 pcie->irq = err;
1159
1160 err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1161 if (err) {
1162 dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
1163 goto poweroff;
1164 }
1165
1166 return 0;
1167
1168poweroff:
1169 tegra_pcie_power_off(pcie);
1170 return err;
1171}
1172
1173static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1174{
7f1f054b
TR
1175 int err;
1176
d1523b52
TR
1177 if (pcie->irq > 0)
1178 free_irq(pcie->irq, pcie);
1179
1180 tegra_pcie_power_off(pcie);
7f1f054b
TR
1181
1182 err = phy_exit(pcie->phy);
1183 if (err < 0)
1184 dev_err(pcie->dev, "failed to teardown PHY: %d\n", err);
1185
d1523b52
TR
1186 return 0;
1187}
1188
1189static int tegra_msi_alloc(struct tegra_msi *chip)
1190{
1191 int msi;
1192
1193 mutex_lock(&chip->lock);
1194
1195 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1196 if (msi < INT_PCI_MSI_NR)
1197 set_bit(msi, chip->used);
1198 else
1199 msi = -ENOSPC;
1200
1201 mutex_unlock(&chip->lock);
1202
1203 return msi;
1204}
1205
1206static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1207{
1208 struct device *dev = chip->chip.dev;
1209
1210 mutex_lock(&chip->lock);
1211
1212 if (!test_bit(irq, chip->used))
1213 dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1214 else
1215 clear_bit(irq, chip->used);
1216
1217 mutex_unlock(&chip->lock);
1218}
1219
1220static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1221{
1222 struct tegra_pcie *pcie = data;
1223 struct tegra_msi *msi = &pcie->msi;
1224 unsigned int i, processed = 0;
1225
1226 for (i = 0; i < 8; i++) {
1227 unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1228
1229 while (reg) {
1230 unsigned int offset = find_first_bit(&reg, 32);
1231 unsigned int index = i * 32 + offset;
1232 unsigned int irq;
1233
1234 /* clear the interrupt */
1235 afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1236
1237 irq = irq_find_mapping(msi->domain, index);
1238 if (irq) {
1239 if (test_bit(index, msi->used))
1240 generic_handle_irq(irq);
1241 else
1242 dev_info(pcie->dev, "unhandled MSI\n");
1243 } else {
1244 /*
1245 * that's weird who triggered this?
1246 * just clear it
1247 */
1248 dev_info(pcie->dev, "unexpected MSI\n");
1249 }
1250
1251 /* see if there's any more pending in this vector */
1252 reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1253
1254 processed++;
1255 }
1256 }
1257
1258 return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1259}
1260
c2791b80
YW
1261static int tegra_msi_setup_irq(struct msi_controller *chip,
1262 struct pci_dev *pdev, struct msi_desc *desc)
d1523b52
TR
1263{
1264 struct tegra_msi *msi = to_tegra_msi(chip);
1265 struct msi_msg msg;
1266 unsigned int irq;
1267 int hwirq;
1268
1269 hwirq = tegra_msi_alloc(msi);
1270 if (hwirq < 0)
1271 return hwirq;
1272
1273 irq = irq_create_mapping(msi->domain, hwirq);
019fa46e
JZ
1274 if (!irq) {
1275 tegra_msi_free(msi, hwirq);
d1523b52 1276 return -EINVAL;
019fa46e 1277 }
d1523b52
TR
1278
1279 irq_set_msi_desc(irq, desc);
1280
1281 msg.address_lo = virt_to_phys((void *)msi->pages);
1282 /* 32 bit address only */
1283 msg.address_hi = 0;
1284 msg.data = hwirq;
1285
83a18912 1286 pci_write_msi_msg(irq, &msg);
d1523b52
TR
1287
1288 return 0;
1289}
1290
c2791b80
YW
1291static void tegra_msi_teardown_irq(struct msi_controller *chip,
1292 unsigned int irq)
d1523b52
TR
1293{
1294 struct tegra_msi *msi = to_tegra_msi(chip);
1295 struct irq_data *d = irq_get_irq_data(irq);
019fa46e 1296 irq_hw_number_t hwirq = irqd_to_hwirq(d);
d1523b52 1297
019fa46e
JZ
1298 irq_dispose_mapping(irq);
1299 tegra_msi_free(msi, hwirq);
d1523b52
TR
1300}
1301
1302static struct irq_chip tegra_msi_irq_chip = {
1303 .name = "Tegra PCIe MSI",
280510f1
TG
1304 .irq_enable = pci_msi_unmask_irq,
1305 .irq_disable = pci_msi_mask_irq,
1306 .irq_mask = pci_msi_mask_irq,
1307 .irq_unmask = pci_msi_unmask_irq,
d1523b52
TR
1308};
1309
1310static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1311 irq_hw_number_t hwirq)
1312{
1313 irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1314 irq_set_chip_data(irq, domain->host_data);
1315 set_irq_flags(irq, IRQF_VALID);
1316
b4f17375
SW
1317 tegra_cpuidle_pcie_irqs_in_use();
1318
d1523b52
TR
1319 return 0;
1320}
1321
1322static const struct irq_domain_ops msi_domain_ops = {
1323 .map = tegra_msi_map,
1324};
1325
1326static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1327{
1328 struct platform_device *pdev = to_platform_device(pcie->dev);
94716cdd 1329 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
d1523b52
TR
1330 struct tegra_msi *msi = &pcie->msi;
1331 unsigned long base;
1332 int err;
1333 u32 reg;
1334
1335 mutex_init(&msi->lock);
1336
1337 msi->chip.dev = pcie->dev;
1338 msi->chip.setup_irq = tegra_msi_setup_irq;
1339 msi->chip.teardown_irq = tegra_msi_teardown_irq;
1340
1341 msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
1342 &msi_domain_ops, &msi->chip);
1343 if (!msi->domain) {
1344 dev_err(&pdev->dev, "failed to create IRQ domain\n");
1345 return -ENOMEM;
1346 }
1347
1348 err = platform_get_irq_byname(pdev, "msi");
1349 if (err < 0) {
1350 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1351 goto err;
1352 }
1353
1354 msi->irq = err;
1355
1356 err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
1357 tegra_msi_irq_chip.name, pcie);
1358 if (err < 0) {
1359 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
1360 goto err;
1361 }
1362
1363 /* setup AFI/FPCI range */
1364 msi->pages = __get_free_pages(GFP_KERNEL, 0);
1365 base = virt_to_phys((void *)msi->pages);
1366
94716cdd 1367 afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
d1523b52
TR
1368 afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
1369 /* this register is in 4K increments */
1370 afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1371
1372 /* enable all MSI vectors */
1373 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1374 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1375 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1376 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1377 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1378 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1379 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1380 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1381
1382 /* and unmask the MSI interrupt */
1383 reg = afi_readl(pcie, AFI_INTR_MASK);
1384 reg |= AFI_INTR_MASK_MSI_MASK;
1385 afi_writel(pcie, reg, AFI_INTR_MASK);
1386
1387 return 0;
1388
1389err:
1390 irq_domain_remove(msi->domain);
1391 return err;
1392}
1393
1394static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1395{
1396 struct tegra_msi *msi = &pcie->msi;
1397 unsigned int i, irq;
1398 u32 value;
1399
1400 /* mask the MSI interrupt */
1401 value = afi_readl(pcie, AFI_INTR_MASK);
1402 value &= ~AFI_INTR_MASK_MSI_MASK;
1403 afi_writel(pcie, value, AFI_INTR_MASK);
1404
1405 /* disable all MSI vectors */
1406 afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1407 afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1408 afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1409 afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1410 afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1411 afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1412 afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1413 afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1414
1415 free_pages(msi->pages, 0);
1416
1417 if (msi->irq > 0)
1418 free_irq(msi->irq, pcie);
1419
1420 for (i = 0; i < INT_PCI_MSI_NR; i++) {
1421 irq = irq_find_mapping(msi->domain, i);
1422 if (irq > 0)
1423 irq_dispose_mapping(irq);
1424 }
1425
1426 irq_domain_remove(msi->domain);
1427
1428 return 0;
1429}
1430
1431static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1432 u32 *xbar)
1433{
1434 struct device_node *np = pcie->dev->of_node;
1435
7f1f054b
TR
1436 if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1437 switch (lanes) {
1438 case 0x0000104:
1439 dev_info(pcie->dev, "4x1, 1x1 configuration\n");
1440 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1441 return 0;
1442
1443 case 0x0000102:
1444 dev_info(pcie->dev, "2x1, 1x1 configuration\n");
1445 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1446 return 0;
1447 }
1448 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
94716cdd
JA
1449 switch (lanes) {
1450 case 0x00000204:
1451 dev_info(pcie->dev, "4x1, 2x1 configuration\n");
1452 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1453 return 0;
1454
1455 case 0x00020202:
1456 dev_info(pcie->dev, "2x3 configuration\n");
1457 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1458 return 0;
1459
1460 case 0x00010104:
1461 dev_info(pcie->dev, "4x1, 1x2 configuration\n");
1462 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1463 return 0;
1464 }
1465 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1466 switch (lanes) {
1467 case 0x00000004:
1468 dev_info(pcie->dev, "single-mode configuration\n");
1469 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1470 return 0;
1471
1472 case 0x00000202:
1473 dev_info(pcie->dev, "dual-mode configuration\n");
1474 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1475 return 0;
1476 }
d1523b52
TR
1477 }
1478
1479 return -EINVAL;
1480}
1481
077fb158
TR
1482/*
1483 * Check whether a given set of supplies is available in a device tree node.
1484 * This is used to check whether the new or the legacy device tree bindings
1485 * should be used.
1486 */
1487static bool of_regulator_bulk_available(struct device_node *np,
1488 struct regulator_bulk_data *supplies,
1489 unsigned int num_supplies)
1490{
1491 char property[32];
1492 unsigned int i;
1493
1494 for (i = 0; i < num_supplies; i++) {
1495 snprintf(property, 32, "%s-supply", supplies[i].supply);
1496
1497 if (of_find_property(np, property, NULL) == NULL)
1498 return false;
1499 }
1500
1501 return true;
1502}
1503
1504/*
1505 * Old versions of the device tree binding for this device used a set of power
1506 * supplies that didn't match the hardware inputs. This happened to work for a
1507 * number of cases but is not future proof. However to preserve backwards-
1508 * compatibility with old device trees, this function will try to use the old
1509 * set of supplies.
1510 */
1511static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1512{
1513 struct device_node *np = pcie->dev->of_node;
1514
1515 if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1516 pcie->num_supplies = 3;
1517 else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1518 pcie->num_supplies = 2;
1519
1520 if (pcie->num_supplies == 0) {
1521 dev_err(pcie->dev, "device %s not supported in legacy mode\n",
1522 np->full_name);
1523 return -ENODEV;
1524 }
1525
1526 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1527 sizeof(*pcie->supplies),
1528 GFP_KERNEL);
1529 if (!pcie->supplies)
1530 return -ENOMEM;
1531
1532 pcie->supplies[0].supply = "pex-clk";
1533 pcie->supplies[1].supply = "vdd";
1534
1535 if (pcie->num_supplies > 2)
1536 pcie->supplies[2].supply = "avdd";
1537
1538 return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
1539 pcie->supplies);
1540}
1541
1542/*
1543 * Obtains the list of regulators required for a particular generation of the
1544 * IP block.
1545 *
1546 * This would've been nice to do simply by providing static tables for use
1547 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1548 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1549 * and either seems to be optional depending on which ports are being used.
1550 */
1551static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1552{
1553 struct device_node *np = pcie->dev->of_node;
1554 unsigned int i = 0;
1555
7f1f054b
TR
1556 if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1557 pcie->num_supplies = 7;
1558
1559 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1560 sizeof(*pcie->supplies),
1561 GFP_KERNEL);
1562 if (!pcie->supplies)
1563 return -ENOMEM;
1564
1565 pcie->supplies[i++].supply = "avddio-pex";
1566 pcie->supplies[i++].supply = "dvddio-pex";
1567 pcie->supplies[i++].supply = "avdd-pex-pll";
1568 pcie->supplies[i++].supply = "hvdd-pex";
1569 pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1570 pcie->supplies[i++].supply = "vddio-pex-ctl";
1571 pcie->supplies[i++].supply = "avdd-pll-erefe";
1572 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
077fb158
TR
1573 bool need_pexa = false, need_pexb = false;
1574
1575 /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
1576 if (lane_mask & 0x0f)
1577 need_pexa = true;
1578
1579 /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
1580 if (lane_mask & 0x30)
1581 need_pexb = true;
1582
1583 pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
1584 (need_pexb ? 2 : 0);
1585
1586 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1587 sizeof(*pcie->supplies),
1588 GFP_KERNEL);
1589 if (!pcie->supplies)
1590 return -ENOMEM;
1591
1592 pcie->supplies[i++].supply = "avdd-pex-pll";
1593 pcie->supplies[i++].supply = "hvdd-pex";
1594 pcie->supplies[i++].supply = "vddio-pex-ctl";
1595 pcie->supplies[i++].supply = "avdd-plle";
1596
1597 if (need_pexa) {
1598 pcie->supplies[i++].supply = "avdd-pexa";
1599 pcie->supplies[i++].supply = "vdd-pexa";
1600 }
1601
1602 if (need_pexb) {
1603 pcie->supplies[i++].supply = "avdd-pexb";
1604 pcie->supplies[i++].supply = "vdd-pexb";
1605 }
1606 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1607 pcie->num_supplies = 5;
1608
1609 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1610 sizeof(*pcie->supplies),
1611 GFP_KERNEL);
1612 if (!pcie->supplies)
1613 return -ENOMEM;
1614
1615 pcie->supplies[0].supply = "avdd-pex";
1616 pcie->supplies[1].supply = "vdd-pex";
1617 pcie->supplies[2].supply = "avdd-pex-pll";
1618 pcie->supplies[3].supply = "avdd-plle";
1619 pcie->supplies[4].supply = "vddio-pex-clk";
1620 }
1621
1622 if (of_regulator_bulk_available(pcie->dev->of_node, pcie->supplies,
1623 pcie->num_supplies))
1624 return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
1625 pcie->supplies);
1626
1627 /*
1628 * If not all regulators are available for this new scheme, assume
1629 * that the device tree complies with an older version of the device
1630 * tree binding.
1631 */
1632 dev_info(pcie->dev, "using legacy DT binding for power supplies\n");
1633
1634 devm_kfree(pcie->dev, pcie->supplies);
1635 pcie->num_supplies = 0;
1636
1637 return tegra_pcie_get_legacy_regulators(pcie);
1638}
1639
d1523b52
TR
1640static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1641{
94716cdd 1642 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
d1523b52
TR
1643 struct device_node *np = pcie->dev->of_node, *port;
1644 struct of_pci_range_parser parser;
1645 struct of_pci_range range;
077fb158
TR
1646 u32 lanes = 0, mask = 0;
1647 unsigned int lane = 0;
d1523b52 1648 struct resource res;
d1523b52
TR
1649 int err;
1650
41534e53
TR
1651 memset(&pcie->all, 0, sizeof(pcie->all));
1652 pcie->all.flags = IORESOURCE_MEM;
1653 pcie->all.name = np->full_name;
1654 pcie->all.start = ~0;
1655 pcie->all.end = 0;
1656
d1523b52
TR
1657 if (of_pci_range_parser_init(&parser, np)) {
1658 dev_err(pcie->dev, "missing \"ranges\" property\n");
1659 return -EINVAL;
1660 }
1661
d1523b52 1662 for_each_of_pci_range(&parser, &range) {
0b0b0893
LD
1663 err = of_pci_range_to_resource(&range, np, &res);
1664 if (err < 0)
1665 return err;
d1523b52
TR
1666
1667 switch (res.flags & IORESOURCE_TYPE_BITS) {
1668 case IORESOURCE_IO:
5106787a
TR
1669 memcpy(&pcie->pio, &res, sizeof(res));
1670 pcie->pio.name = np->full_name;
1671
1672 /*
1673 * The Tegra PCIe host bridge uses this to program the
1674 * mapping of the I/O space to the physical address,
1675 * so we override the .start and .end fields here that
1676 * of_pci_range_to_resource() converted to I/O space.
1677 * We also set the IORESOURCE_MEM type to clarify that
1678 * the resource is in the physical memory space.
1679 */
1680 pcie->io.start = range.cpu_addr;
1681 pcie->io.end = range.cpu_addr + range.size - 1;
1682 pcie->io.flags = IORESOURCE_MEM;
1683 pcie->io.name = "I/O";
1684
1685 memcpy(&res, &pcie->io, sizeof(res));
d1523b52
TR
1686 break;
1687
1688 case IORESOURCE_MEM:
1689 if (res.flags & IORESOURCE_PREFETCH) {
1690 memcpy(&pcie->prefetch, &res, sizeof(res));
41534e53 1691 pcie->prefetch.name = "prefetchable";
d1523b52
TR
1692 } else {
1693 memcpy(&pcie->mem, &res, sizeof(res));
41534e53 1694 pcie->mem.name = "non-prefetchable";
d1523b52
TR
1695 }
1696 break;
1697 }
41534e53
TR
1698
1699 if (res.start <= pcie->all.start)
1700 pcie->all.start = res.start;
1701
1702 if (res.end >= pcie->all.end)
1703 pcie->all.end = res.end;
d1523b52
TR
1704 }
1705
41534e53
TR
1706 err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->all);
1707 if (err < 0)
1708 return err;
1709
d1523b52
TR
1710 err = of_pci_parse_bus_range(np, &pcie->busn);
1711 if (err < 0) {
1712 dev_err(pcie->dev, "failed to parse ranges property: %d\n",
1713 err);
1714 pcie->busn.name = np->name;
1715 pcie->busn.start = 0;
1716 pcie->busn.end = 0xff;
1717 pcie->busn.flags = IORESOURCE_BUS;
1718 }
1719
1720 /* parse root ports */
1721 for_each_child_of_node(np, port) {
1722 struct tegra_pcie_port *rp;
1723 unsigned int index;
1724 u32 value;
1725
1726 err = of_pci_get_devfn(port);
1727 if (err < 0) {
1728 dev_err(pcie->dev, "failed to parse address: %d\n",
1729 err);
1730 return err;
1731 }
1732
1733 index = PCI_SLOT(err);
1734
94716cdd 1735 if (index < 1 || index > soc->num_ports) {
d1523b52
TR
1736 dev_err(pcie->dev, "invalid port number: %d\n", index);
1737 return -EINVAL;
1738 }
1739
1740 index--;
1741
1742 err = of_property_read_u32(port, "nvidia,num-lanes", &value);
1743 if (err < 0) {
1744 dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
1745 err);
1746 return err;
1747 }
1748
1749 if (value > 16) {
1750 dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
1751 return -EINVAL;
1752 }
1753
1754 lanes |= value << (index << 3);
1755
077fb158
TR
1756 if (!of_device_is_available(port)) {
1757 lane += value;
d1523b52 1758 continue;
077fb158
TR
1759 }
1760
1761 mask |= ((1 << value) - 1) << lane;
1762 lane += value;
d1523b52
TR
1763
1764 rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
1765 if (!rp)
1766 return -ENOMEM;
1767
1768 err = of_address_to_resource(port, 0, &rp->regs);
1769 if (err < 0) {
1770 dev_err(pcie->dev, "failed to parse address: %d\n",
1771 err);
1772 return err;
1773 }
1774
1775 INIT_LIST_HEAD(&rp->list);
1776 rp->index = index;
1777 rp->lanes = value;
1778 rp->pcie = pcie;
1779
dc05ee32
JL
1780 rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
1781 if (IS_ERR(rp->base))
1782 return PTR_ERR(rp->base);
d1523b52
TR
1783
1784 list_add_tail(&rp->list, &pcie->ports);
1785 }
1786
1787 err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
1788 if (err < 0) {
1789 dev_err(pcie->dev, "invalid lane configuration\n");
1790 return err;
1791 }
1792
077fb158
TR
1793 err = tegra_pcie_get_regulators(pcie, mask);
1794 if (err < 0)
1795 return err;
1796
d1523b52
TR
1797 return 0;
1798}
1799
1800/*
1801 * FIXME: If there are no PCIe cards attached, then calling this function
1802 * can result in the increase of the bootup time as there are big timeout
1803 * loops.
1804 */
1805#define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
1806static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
1807{
1808 unsigned int retries = 3;
1809 unsigned long value;
1810
7f1f054b
TR
1811 /* override presence detection */
1812 value = readl(port->base + RP_PRIV_MISC);
1813 value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
1814 value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
1815 writel(value, port->base + RP_PRIV_MISC);
1816
d1523b52
TR
1817 do {
1818 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1819
1820 do {
1821 value = readl(port->base + RP_VEND_XP);
1822
1823 if (value & RP_VEND_XP_DL_UP)
1824 break;
1825
1826 usleep_range(1000, 2000);
1827 } while (--timeout);
1828
1829 if (!timeout) {
1830 dev_err(port->pcie->dev, "link %u down, retrying\n",
1831 port->index);
1832 goto retry;
1833 }
1834
1835 timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1836
1837 do {
1838 value = readl(port->base + RP_LINK_CONTROL_STATUS);
1839
1840 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1841 return true;
1842
1843 usleep_range(1000, 2000);
1844 } while (--timeout);
1845
1846retry:
1847 tegra_pcie_port_reset(port);
1848 } while (--retries);
1849
1850 return false;
1851}
1852
1853static int tegra_pcie_enable(struct tegra_pcie *pcie)
1854{
1855 struct tegra_pcie_port *port, *tmp;
1856 struct hw_pci hw;
1857
1858 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
1859 dev_info(pcie->dev, "probing port %u, using %u lanes\n",
1860 port->index, port->lanes);
1861
1862 tegra_pcie_port_enable(port);
1863
1864 if (tegra_pcie_port_check_link(port))
1865 continue;
1866
1867 dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
1868
1869 tegra_pcie_port_disable(port);
1870 tegra_pcie_port_free(port);
1871 }
1872
1873 memset(&hw, 0, sizeof(hw));
1874
7ec725b2
YW
1875#ifdef CONFIG_PCI_MSI
1876 hw.msi_ctrl = &pcie->msi.chip;
1877#endif
1878
d1523b52
TR
1879 hw.nr_controllers = 1;
1880 hw.private_data = (void **)&pcie;
1881 hw.setup = tegra_pcie_setup;
1882 hw.map_irq = tegra_pcie_map_irq;
d1523b52
TR
1883 hw.scan = tegra_pcie_scan_bus;
1884 hw.ops = &tegra_pcie_ops;
1885
1886 pci_common_init_dev(pcie->dev, &hw);
1887
1888 return 0;
1889}
1890
94716cdd
JA
1891static const struct tegra_pcie_soc_data tegra20_pcie_data = {
1892 .num_ports = 2,
1893 .msi_base_shift = 0,
1894 .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
1895 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
1896 .has_pex_clkreq_en = false,
1897 .has_pex_bias_ctrl = false,
1898 .has_intr_prsnt_sense = false,
94716cdd 1899 .has_cml_clk = false,
7f1f054b 1900 .has_gen2 = false,
94716cdd
JA
1901};
1902
1903static const struct tegra_pcie_soc_data tegra30_pcie_data = {
1904 .num_ports = 3,
1905 .msi_base_shift = 8,
1906 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1907 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1908 .has_pex_clkreq_en = true,
1909 .has_pex_bias_ctrl = true,
1910 .has_intr_prsnt_sense = true,
94716cdd 1911 .has_cml_clk = true,
7f1f054b
TR
1912 .has_gen2 = false,
1913};
1914
1915static const struct tegra_pcie_soc_data tegra124_pcie_data = {
1916 .num_ports = 2,
1917 .msi_base_shift = 8,
1918 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1919 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1920 .has_pex_clkreq_en = true,
1921 .has_pex_bias_ctrl = true,
1922 .has_intr_prsnt_sense = true,
1923 .has_cml_clk = true,
1924 .has_gen2 = true,
94716cdd
JA
1925};
1926
1927static const struct of_device_id tegra_pcie_of_match[] = {
7f1f054b 1928 { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie_data },
94716cdd
JA
1929 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
1930 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
1931 { },
1932};
1933MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
1934
2cb989f6
TR
1935static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
1936{
1937 struct tegra_pcie *pcie = s->private;
1938
1939 if (list_empty(&pcie->ports))
1940 return NULL;
1941
1942 seq_printf(s, "Index Status\n");
1943
1944 return seq_list_start(&pcie->ports, *pos);
1945}
1946
1947static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
1948{
1949 struct tegra_pcie *pcie = s->private;
1950
1951 return seq_list_next(v, &pcie->ports, pos);
1952}
1953
1954static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
1955{
1956}
1957
1958static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
1959{
1960 bool up = false, active = false;
1961 struct tegra_pcie_port *port;
1962 unsigned int value;
1963
1964 port = list_entry(v, struct tegra_pcie_port, list);
1965
1966 value = readl(port->base + RP_VEND_XP);
1967
1968 if (value & RP_VEND_XP_DL_UP)
1969 up = true;
1970
1971 value = readl(port->base + RP_LINK_CONTROL_STATUS);
1972
1973 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1974 active = true;
1975
1976 seq_printf(s, "%2u ", port->index);
1977
1978 if (up)
1979 seq_printf(s, "up");
1980
1981 if (active) {
1982 if (up)
1983 seq_printf(s, ", ");
1984
1985 seq_printf(s, "active");
1986 }
1987
1988 seq_printf(s, "\n");
1989 return 0;
1990}
1991
1992static const struct seq_operations tegra_pcie_ports_seq_ops = {
1993 .start = tegra_pcie_ports_seq_start,
1994 .next = tegra_pcie_ports_seq_next,
1995 .stop = tegra_pcie_ports_seq_stop,
1996 .show = tegra_pcie_ports_seq_show,
1997};
1998
1999static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
2000{
2001 struct tegra_pcie *pcie = inode->i_private;
2002 struct seq_file *s;
2003 int err;
2004
2005 err = seq_open(file, &tegra_pcie_ports_seq_ops);
2006 if (err)
2007 return err;
2008
2009 s = file->private_data;
2010 s->private = pcie;
2011
2012 return 0;
2013}
2014
2015static const struct file_operations tegra_pcie_ports_ops = {
2016 .owner = THIS_MODULE,
2017 .open = tegra_pcie_ports_open,
2018 .read = seq_read,
2019 .llseek = seq_lseek,
2020 .release = seq_release,
2021};
2022
2023static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2024{
2025 struct dentry *file;
2026
2027 pcie->debugfs = debugfs_create_dir("pcie", NULL);
2028 if (!pcie->debugfs)
2029 return -ENOMEM;
2030
2031 file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
2032 pcie, &tegra_pcie_ports_ops);
2033 if (!file)
2034 goto remove;
2035
2036 return 0;
2037
2038remove:
2039 debugfs_remove_recursive(pcie->debugfs);
2040 pcie->debugfs = NULL;
2041 return -ENOMEM;
2042}
2043
d1523b52
TR
2044static int tegra_pcie_probe(struct platform_device *pdev)
2045{
94716cdd 2046 const struct of_device_id *match;
d1523b52
TR
2047 struct tegra_pcie *pcie;
2048 int err;
2049
94716cdd
JA
2050 match = of_match_device(tegra_pcie_of_match, &pdev->dev);
2051 if (!match)
2052 return -ENODEV;
2053
d1523b52
TR
2054 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
2055 if (!pcie)
2056 return -ENOMEM;
2057
f7625980 2058 INIT_LIST_HEAD(&pcie->buses);
d1523b52 2059 INIT_LIST_HEAD(&pcie->ports);
94716cdd 2060 pcie->soc_data = match->data;
d1523b52
TR
2061 pcie->dev = &pdev->dev;
2062
2063 err = tegra_pcie_parse_dt(pcie);
2064 if (err < 0)
2065 return err;
2066
2067 pcibios_min_mem = 0;
2068
2069 err = tegra_pcie_get_resources(pcie);
2070 if (err < 0) {
2071 dev_err(&pdev->dev, "failed to request resources: %d\n", err);
2072 return err;
2073 }
2074
2075 err = tegra_pcie_enable_controller(pcie);
2076 if (err)
2077 goto put_resources;
2078
2079 /* setup the AFI address translations */
2080 tegra_pcie_setup_translations(pcie);
2081
2082 if (IS_ENABLED(CONFIG_PCI_MSI)) {
2083 err = tegra_pcie_enable_msi(pcie);
2084 if (err < 0) {
2085 dev_err(&pdev->dev,
2086 "failed to enable MSI support: %d\n",
2087 err);
2088 goto put_resources;
2089 }
2090 }
2091
2092 err = tegra_pcie_enable(pcie);
2093 if (err < 0) {
2094 dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
2095 goto disable_msi;
2096 }
2097
2cb989f6
TR
2098 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2099 err = tegra_pcie_debugfs_init(pcie);
2100 if (err < 0)
2101 dev_err(&pdev->dev, "failed to setup debugfs: %d\n",
2102 err);
2103 }
2104
d1523b52
TR
2105 platform_set_drvdata(pdev, pcie);
2106 return 0;
2107
2108disable_msi:
2109 if (IS_ENABLED(CONFIG_PCI_MSI))
2110 tegra_pcie_disable_msi(pcie);
2111put_resources:
2112 tegra_pcie_put_resources(pcie);
2113 return err;
2114}
2115
d1523b52
TR
2116static struct platform_driver tegra_pcie_driver = {
2117 .driver = {
2118 .name = "tegra-pcie",
d1523b52
TR
2119 .of_match_table = tegra_pcie_of_match,
2120 .suppress_bind_attrs = true,
2121 },
2122 .probe = tegra_pcie_probe,
2123};
2124module_platform_driver(tegra_pcie_driver);
2125
2126MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
2127MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
d975cb57 2128MODULE_LICENSE("GPL v2");
This page took 0.182412 seconds and 5 git commands to generate.