PCI/MSI: Rename __read_msi_msg() to __pci_read_msi_msg()
[deliverable/linux.git] / drivers / pci / host / pci-tegra.c
CommitLineData
d1523b52 1/*
94716cdd 2 * PCIe host controller driver for Tegra SoCs
d1523b52
TR
3 *
4 * Copyright (c) 2010, CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il>
6 *
7 * Based on NVIDIA PCIe driver
8 * Copyright (c) 2008-2009, NVIDIA Corporation.
9 *
10 * Bits taken from arch/arm/mach-dove/pcie.c
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
25 */
26
27#include <linux/clk.h>
2cb989f6 28#include <linux/debugfs.h>
d1523b52
TR
29#include <linux/delay.h>
30#include <linux/export.h>
31#include <linux/interrupt.h>
32#include <linux/irq.h>
33#include <linux/irqdomain.h>
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/msi.h>
37#include <linux/of_address.h>
38#include <linux/of_pci.h>
39#include <linux/of_platform.h>
40#include <linux/pci.h>
7f1f054b 41#include <linux/phy/phy.h>
d1523b52 42#include <linux/platform_device.h>
3127a6b2 43#include <linux/reset.h>
d1523b52
TR
44#include <linux/sizes.h>
45#include <linux/slab.h>
d1523b52
TR
46#include <linux/vmalloc.h>
47#include <linux/regulator/consumer.h>
48
306a7f91 49#include <soc/tegra/cpuidle.h>
7232398a 50#include <soc/tegra/pmc.h>
306a7f91 51
d1523b52
TR
52#include <asm/mach/irq.h>
53#include <asm/mach/map.h>
54#include <asm/mach/pci.h>
55
56#define INT_PCI_MSI_NR (8 * 32)
d1523b52
TR
57
58/* register definitions */
59
60#define AFI_AXI_BAR0_SZ 0x00
61#define AFI_AXI_BAR1_SZ 0x04
62#define AFI_AXI_BAR2_SZ 0x08
63#define AFI_AXI_BAR3_SZ 0x0c
64#define AFI_AXI_BAR4_SZ 0x10
65#define AFI_AXI_BAR5_SZ 0x14
66
67#define AFI_AXI_BAR0_START 0x18
68#define AFI_AXI_BAR1_START 0x1c
69#define AFI_AXI_BAR2_START 0x20
70#define AFI_AXI_BAR3_START 0x24
71#define AFI_AXI_BAR4_START 0x28
72#define AFI_AXI_BAR5_START 0x2c
73
74#define AFI_FPCI_BAR0 0x30
75#define AFI_FPCI_BAR1 0x34
76#define AFI_FPCI_BAR2 0x38
77#define AFI_FPCI_BAR3 0x3c
78#define AFI_FPCI_BAR4 0x40
79#define AFI_FPCI_BAR5 0x44
80
81#define AFI_CACHE_BAR0_SZ 0x48
82#define AFI_CACHE_BAR0_ST 0x4c
83#define AFI_CACHE_BAR1_SZ 0x50
84#define AFI_CACHE_BAR1_ST 0x54
85
86#define AFI_MSI_BAR_SZ 0x60
87#define AFI_MSI_FPCI_BAR_ST 0x64
88#define AFI_MSI_AXI_BAR_ST 0x68
89
90#define AFI_MSI_VEC0 0x6c
91#define AFI_MSI_VEC1 0x70
92#define AFI_MSI_VEC2 0x74
93#define AFI_MSI_VEC3 0x78
94#define AFI_MSI_VEC4 0x7c
95#define AFI_MSI_VEC5 0x80
96#define AFI_MSI_VEC6 0x84
97#define AFI_MSI_VEC7 0x88
98
99#define AFI_MSI_EN_VEC0 0x8c
100#define AFI_MSI_EN_VEC1 0x90
101#define AFI_MSI_EN_VEC2 0x94
102#define AFI_MSI_EN_VEC3 0x98
103#define AFI_MSI_EN_VEC4 0x9c
104#define AFI_MSI_EN_VEC5 0xa0
105#define AFI_MSI_EN_VEC6 0xa4
106#define AFI_MSI_EN_VEC7 0xa8
107
108#define AFI_CONFIGURATION 0xac
109#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
110
111#define AFI_FPCI_ERROR_MASKS 0xb0
112
113#define AFI_INTR_MASK 0xb4
114#define AFI_INTR_MASK_INT_MASK (1 << 0)
115#define AFI_INTR_MASK_MSI_MASK (1 << 8)
116
117#define AFI_INTR_CODE 0xb8
118#define AFI_INTR_CODE_MASK 0xf
7f1f054b
TR
119#define AFI_INTR_INI_SLAVE_ERROR 1
120#define AFI_INTR_INI_DECODE_ERROR 2
d1523b52
TR
121#define AFI_INTR_TARGET_ABORT 3
122#define AFI_INTR_MASTER_ABORT 4
123#define AFI_INTR_INVALID_WRITE 5
124#define AFI_INTR_LEGACY 6
125#define AFI_INTR_FPCI_DECODE_ERROR 7
7f1f054b
TR
126#define AFI_INTR_AXI_DECODE_ERROR 8
127#define AFI_INTR_FPCI_TIMEOUT 9
128#define AFI_INTR_PE_PRSNT_SENSE 10
129#define AFI_INTR_PE_CLKREQ_SENSE 11
130#define AFI_INTR_CLKCLAMP_SENSE 12
131#define AFI_INTR_RDY4PD_SENSE 13
132#define AFI_INTR_P2P_ERROR 14
d1523b52
TR
133
134#define AFI_INTR_SIGNATURE 0xbc
135#define AFI_UPPER_FPCI_ADDRESS 0xc0
136#define AFI_SM_INTR_ENABLE 0xc4
137#define AFI_SM_INTR_INTA_ASSERT (1 << 0)
138#define AFI_SM_INTR_INTB_ASSERT (1 << 1)
139#define AFI_SM_INTR_INTC_ASSERT (1 << 2)
140#define AFI_SM_INTR_INTD_ASSERT (1 << 3)
141#define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
142#define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
143#define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
144#define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
145
146#define AFI_AFI_INTR_ENABLE 0xc8
147#define AFI_INTR_EN_INI_SLVERR (1 << 0)
148#define AFI_INTR_EN_INI_DECERR (1 << 1)
149#define AFI_INTR_EN_TGT_SLVERR (1 << 2)
150#define AFI_INTR_EN_TGT_DECERR (1 << 3)
151#define AFI_INTR_EN_TGT_WRERR (1 << 4)
152#define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
153#define AFI_INTR_EN_AXI_DECERR (1 << 6)
154#define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
94716cdd 155#define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
d1523b52
TR
156
157#define AFI_PCIE_CONFIG 0x0f8
158#define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
159#define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
160#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
161#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
94716cdd 162#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
7f1f054b 163#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
d1523b52 164#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
94716cdd 165#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
7f1f054b 166#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
94716cdd 167#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
d1523b52
TR
168
169#define AFI_FUSE 0x104
170#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
171
172#define AFI_PEX0_CTRL 0x110
173#define AFI_PEX1_CTRL 0x118
94716cdd 174#define AFI_PEX2_CTRL 0x128
d1523b52 175#define AFI_PEX_CTRL_RST (1 << 0)
94716cdd 176#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
d1523b52 177#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
7f1f054b
TR
178#define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
179
180#define AFI_PLLE_CONTROL 0x160
181#define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
182#define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
d1523b52 183
94716cdd
JA
184#define AFI_PEXBIAS_CTRL_0 0x168
185
d1523b52
TR
186#define RP_VEND_XP 0x00000F00
187#define RP_VEND_XP_DL_UP (1 << 30)
188
7f1f054b
TR
189#define RP_PRIV_MISC 0x00000FE0
190#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0)
191#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0)
192
d1523b52
TR
193#define RP_LINK_CONTROL_STATUS 0x00000090
194#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
195#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
196
197#define PADS_CTL_SEL 0x0000009C
198
199#define PADS_CTL 0x000000A0
200#define PADS_CTL_IDDQ_1L (1 << 0)
201#define PADS_CTL_TX_DATA_EN_1L (1 << 6)
202#define PADS_CTL_RX_DATA_EN_1L (1 << 10)
203
94716cdd
JA
204#define PADS_PLL_CTL_TEGRA20 0x000000B8
205#define PADS_PLL_CTL_TEGRA30 0x000000B4
d1523b52
TR
206#define PADS_PLL_CTL_RST_B4SM (1 << 1)
207#define PADS_PLL_CTL_LOCKDET (1 << 8)
208#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
209#define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
210#define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
211#define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
212#define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
213#define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
214#define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
94716cdd
JA
215#define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
216
217#define PADS_REFCLK_CFG0 0x000000C8
218#define PADS_REFCLK_CFG1 0x000000CC
7f1f054b 219#define PADS_REFCLK_BIAS 0x000000D0
d1523b52 220
b02b07ad
SW
221/*
222 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
223 * entries, one entry per PCIe port. These field definitions and desired
224 * values aren't in the TRM, but do come from NVIDIA.
225 */
226#define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */
227#define PADS_REFCLK_CFG_E_TERM_SHIFT 7
228#define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
229#define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
230
231/* Default value provided by HW engineering is 0xfa5c */
232#define PADS_REFCLK_CFG_VALUE \
233 ( \
234 (0x17 << PADS_REFCLK_CFG_TERM_SHIFT) | \
235 (0 << PADS_REFCLK_CFG_E_TERM_SHIFT) | \
236 (0xa << PADS_REFCLK_CFG_PREDI_SHIFT) | \
237 (0xf << PADS_REFCLK_CFG_DRVI_SHIFT) \
238 )
239
d1523b52 240struct tegra_msi {
c2791b80 241 struct msi_controller chip;
d1523b52
TR
242 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
243 struct irq_domain *domain;
244 unsigned long pages;
245 struct mutex lock;
246 int irq;
247};
248
94716cdd
JA
249/* used to differentiate between Tegra SoC generations */
250struct tegra_pcie_soc_data {
251 unsigned int num_ports;
252 unsigned int msi_base_shift;
253 u32 pads_pll_ctl;
254 u32 tx_ref_sel;
255 bool has_pex_clkreq_en;
256 bool has_pex_bias_ctrl;
257 bool has_intr_prsnt_sense;
94716cdd 258 bool has_cml_clk;
7f1f054b 259 bool has_gen2;
94716cdd
JA
260};
261
c2791b80 262static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
d1523b52
TR
263{
264 return container_of(chip, struct tegra_msi, chip);
265}
266
267struct tegra_pcie {
268 struct device *dev;
269
270 void __iomem *pads;
271 void __iomem *afi;
272 int irq;
273
f7625980 274 struct list_head buses;
d1523b52
TR
275 struct resource *cs;
276
41534e53 277 struct resource all;
d1523b52
TR
278 struct resource io;
279 struct resource mem;
280 struct resource prefetch;
281 struct resource busn;
282
283 struct clk *pex_clk;
284 struct clk *afi_clk;
d1523b52 285 struct clk *pll_e;
94716cdd 286 struct clk *cml_clk;
d1523b52 287
3127a6b2
SW
288 struct reset_control *pex_rst;
289 struct reset_control *afi_rst;
290 struct reset_control *pcie_xrst;
291
7f1f054b
TR
292 struct phy *phy;
293
d1523b52
TR
294 struct tegra_msi msi;
295
296 struct list_head ports;
297 unsigned int num_ports;
298 u32 xbar_config;
299
077fb158
TR
300 struct regulator_bulk_data *supplies;
301 unsigned int num_supplies;
94716cdd
JA
302
303 const struct tegra_pcie_soc_data *soc_data;
2cb989f6 304 struct dentry *debugfs;
d1523b52
TR
305};
306
307struct tegra_pcie_port {
308 struct tegra_pcie *pcie;
309 struct list_head list;
310 struct resource regs;
311 void __iomem *base;
312 unsigned int index;
313 unsigned int lanes;
314};
315
316struct tegra_pcie_bus {
317 struct vm_struct *area;
318 struct list_head list;
319 unsigned int nr;
320};
321
322static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
323{
324 return sys->private_data;
325}
326
327static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
328 unsigned long offset)
329{
330 writel(value, pcie->afi + offset);
331}
332
333static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
334{
335 return readl(pcie->afi + offset);
336}
337
338static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
339 unsigned long offset)
340{
341 writel(value, pcie->pads + offset);
342}
343
344static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
345{
346 return readl(pcie->pads + offset);
347}
348
349/*
350 * The configuration space mapping on Tegra is somewhat similar to the ECAM
351 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
352 * register accesses are mapped:
353 *
354 * [27:24] extended register number
355 * [23:16] bus number
356 * [15:11] device number
357 * [10: 8] function number
358 * [ 7: 0] register number
359 *
360 * Mapping the whole extended configuration space would require 256 MiB of
361 * virtual address space, only a small part of which will actually be used.
362 * To work around this, a 1 MiB of virtual addresses are allocated per bus
363 * when the bus is first accessed. When the physical range is mapped, the
364 * the bus number bits are hidden so that the extended register number bits
365 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
366 *
367 * [19:16] extended register number
368 * [15:11] device number
369 * [10: 8] function number
370 * [ 7: 0] register number
371 *
372 * This is achieved by stitching together 16 chunks of 64 KiB of physical
373 * address space via the MMU.
374 */
375static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
376{
377 return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
378 (PCI_FUNC(devfn) << 8) | (where & 0xfc);
379}
380
381static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
382 unsigned int busnr)
383{
384 pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN |
385 L_PTE_MT_DEV_SHARED | L_PTE_SHARED;
386 phys_addr_t cs = pcie->cs->start;
387 struct tegra_pcie_bus *bus;
388 unsigned int i;
389 int err;
390
391 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
392 if (!bus)
393 return ERR_PTR(-ENOMEM);
394
395 INIT_LIST_HEAD(&bus->list);
396 bus->nr = busnr;
397
398 /* allocate 1 MiB of virtual addresses */
399 bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
400 if (!bus->area) {
401 err = -ENOMEM;
402 goto free;
403 }
404
405 /* map each of the 16 chunks of 64 KiB each */
406 for (i = 0; i < 16; i++) {
407 unsigned long virt = (unsigned long)bus->area->addr +
408 i * SZ_64K;
8d41794c 409 phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K;
d1523b52
TR
410
411 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
412 if (err < 0) {
413 dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
414 err);
415 goto unmap;
416 }
417 }
418
419 return bus;
420
421unmap:
422 vunmap(bus->area->addr);
423free:
424 kfree(bus);
425 return ERR_PTR(err);
426}
427
428/*
429 * Look up a virtual address mapping for the specified bus number. If no such
f7625980 430 * mapping exists, try to create one.
d1523b52
TR
431 */
432static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
433 unsigned int busnr)
434{
435 struct tegra_pcie_bus *bus;
436
f7625980 437 list_for_each_entry(bus, &pcie->buses, list)
d1523b52 438 if (bus->nr == busnr)
1e65249d 439 return (void __iomem *)bus->area->addr;
d1523b52
TR
440
441 bus = tegra_pcie_bus_alloc(pcie, busnr);
442 if (IS_ERR(bus))
443 return NULL;
444
f7625980 445 list_add_tail(&bus->list, &pcie->buses);
d1523b52 446
1e65249d 447 return (void __iomem *)bus->area->addr;
d1523b52
TR
448}
449
450static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
451 unsigned int devfn,
452 int where)
453{
454 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
455 void __iomem *addr = NULL;
456
457 if (bus->number == 0) {
458 unsigned int slot = PCI_SLOT(devfn);
459 struct tegra_pcie_port *port;
460
461 list_for_each_entry(port, &pcie->ports, list) {
462 if (port->index + 1 == slot) {
463 addr = port->base + (where & ~3);
464 break;
465 }
466 }
467 } else {
468 addr = tegra_pcie_bus_map(pcie, bus->number);
469 if (!addr) {
470 dev_err(pcie->dev,
471 "failed to map cfg. space for bus %u\n",
472 bus->number);
473 return NULL;
474 }
475
476 addr += tegra_pcie_conf_offset(devfn, where);
477 }
478
479 return addr;
480}
481
482static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
483 int where, int size, u32 *value)
484{
485 void __iomem *addr;
486
487 addr = tegra_pcie_conf_address(bus, devfn, where);
488 if (!addr) {
489 *value = 0xffffffff;
490 return PCIBIOS_DEVICE_NOT_FOUND;
491 }
492
493 *value = readl(addr);
494
495 if (size == 1)
496 *value = (*value >> (8 * (where & 3))) & 0xff;
497 else if (size == 2)
498 *value = (*value >> (8 * (where & 3))) & 0xffff;
499
500 return PCIBIOS_SUCCESSFUL;
501}
502
503static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
504 int where, int size, u32 value)
505{
506 void __iomem *addr;
507 u32 mask, tmp;
508
509 addr = tegra_pcie_conf_address(bus, devfn, where);
510 if (!addr)
511 return PCIBIOS_DEVICE_NOT_FOUND;
512
513 if (size == 4) {
514 writel(value, addr);
515 return PCIBIOS_SUCCESSFUL;
516 }
517
518 if (size == 2)
519 mask = ~(0xffff << ((where & 0x3) * 8));
520 else if (size == 1)
521 mask = ~(0xff << ((where & 0x3) * 8));
522 else
523 return PCIBIOS_BAD_REGISTER_NUMBER;
524
525 tmp = readl(addr) & mask;
526 tmp |= value << ((where & 0x3) * 8);
527 writel(tmp, addr);
528
529 return PCIBIOS_SUCCESSFUL;
530}
531
532static struct pci_ops tegra_pcie_ops = {
533 .read = tegra_pcie_read_conf,
534 .write = tegra_pcie_write_conf,
535};
536
537static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
538{
539 unsigned long ret = 0;
540
541 switch (port->index) {
542 case 0:
543 ret = AFI_PEX0_CTRL;
544 break;
545
546 case 1:
547 ret = AFI_PEX1_CTRL;
548 break;
94716cdd
JA
549
550 case 2:
551 ret = AFI_PEX2_CTRL;
552 break;
d1523b52
TR
553 }
554
555 return ret;
556}
557
558static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
559{
560 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
561 unsigned long value;
562
563 /* pulse reset signal */
564 value = afi_readl(port->pcie, ctrl);
565 value &= ~AFI_PEX_CTRL_RST;
566 afi_writel(port->pcie, value, ctrl);
567
568 usleep_range(1000, 2000);
569
570 value = afi_readl(port->pcie, ctrl);
571 value |= AFI_PEX_CTRL_RST;
572 afi_writel(port->pcie, value, ctrl);
573}
574
575static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
576{
94716cdd 577 const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
d1523b52
TR
578 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
579 unsigned long value;
580
581 /* enable reference clock */
582 value = afi_readl(port->pcie, ctrl);
583 value |= AFI_PEX_CTRL_REFCLK_EN;
94716cdd
JA
584
585 if (soc->has_pex_clkreq_en)
586 value |= AFI_PEX_CTRL_CLKREQ_EN;
587
7f1f054b
TR
588 value |= AFI_PEX_CTRL_OVERRIDE_EN;
589
d1523b52
TR
590 afi_writel(port->pcie, value, ctrl);
591
592 tegra_pcie_port_reset(port);
593}
594
595static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
596{
0d20d621 597 const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
d1523b52
TR
598 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
599 unsigned long value;
600
601 /* assert port reset */
602 value = afi_readl(port->pcie, ctrl);
603 value &= ~AFI_PEX_CTRL_RST;
604 afi_writel(port->pcie, value, ctrl);
605
606 /* disable reference clock */
607 value = afi_readl(port->pcie, ctrl);
0d20d621
TR
608
609 if (soc->has_pex_clkreq_en)
610 value &= ~AFI_PEX_CTRL_CLKREQ_EN;
611
d1523b52
TR
612 value &= ~AFI_PEX_CTRL_REFCLK_EN;
613 afi_writel(port->pcie, value, ctrl);
614}
615
616static void tegra_pcie_port_free(struct tegra_pcie_port *port)
617{
618 struct tegra_pcie *pcie = port->pcie;
619
620 devm_iounmap(pcie->dev, port->base);
621 devm_release_mem_region(pcie->dev, port->regs.start,
622 resource_size(&port->regs));
623 list_del(&port->list);
624 devm_kfree(pcie->dev, port);
625}
626
627static void tegra_pcie_fixup_bridge(struct pci_dev *dev)
628{
629 u16 reg;
630
631 if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
632 pci_read_config_word(dev, PCI_COMMAND, &reg);
633 reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
634 PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
635 pci_write_config_word(dev, PCI_COMMAND, reg);
636 }
637}
638DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
639
640/* Tegra PCIE root complex wrongly reports device class */
641static void tegra_pcie_fixup_class(struct pci_dev *dev)
642{
643 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
644}
645DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
646DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
94716cdd
JA
647DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
648DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
d1523b52
TR
649
650/* Tegra PCIE requires relaxed ordering */
651static void tegra_pcie_relax_enable(struct pci_dev *dev)
652{
653 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
654}
655DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
656
657static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
658{
659 struct tegra_pcie *pcie = sys_to_pcie(sys);
41534e53 660 int err;
07a7cbd3 661 phys_addr_t io_start;
41534e53
TR
662
663 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
664 if (err < 0)
665 return err;
666
667 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->prefetch);
668 if (err)
669 return err;
d1523b52 670
07a7cbd3 671 io_start = pci_pio_to_address(pcie->io.start);
d1523b52
TR
672
673 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
674 pci_add_resource_offset(&sys->resources, &pcie->prefetch,
675 sys->mem_offset);
676 pci_add_resource(&sys->resources, &pcie->busn);
677
0b0b0893 678 pci_ioremap_io(nr * SZ_64K, io_start);
d1523b52
TR
679
680 return 1;
681}
682
683static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
684{
685 struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
f5d3352b 686 int irq;
d1523b52 687
b4f17375
SW
688 tegra_cpuidle_pcie_irqs_in_use();
689
f5d3352b
LS
690 irq = of_irq_parse_and_map_pci(pdev, slot, pin);
691 if (!irq)
692 irq = pcie->irq;
693
694 return irq;
d1523b52
TR
695}
696
d1523b52
TR
697static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys)
698{
699 struct tegra_pcie *pcie = sys_to_pcie(sys);
700 struct pci_bus *bus;
701
702 bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys,
703 &sys->resources);
704 if (!bus)
705 return NULL;
706
707 pci_scan_child_bus(bus);
708
709 return bus;
710}
711
712static irqreturn_t tegra_pcie_isr(int irq, void *arg)
713{
714 const char *err_msg[] = {
715 "Unknown",
716 "AXI slave error",
717 "AXI decode error",
718 "Target abort",
719 "Master abort",
720 "Invalid write",
7f1f054b 721 "Legacy interrupt",
d1523b52
TR
722 "Response decoding error",
723 "AXI response decoding error",
724 "Transaction timeout",
7f1f054b
TR
725 "Slot present pin change",
726 "Slot clock request change",
727 "TMS clock ramp change",
728 "TMS ready for power down",
729 "Peer2Peer error",
d1523b52
TR
730 };
731 struct tegra_pcie *pcie = arg;
732 u32 code, signature;
733
734 code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
735 signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
736 afi_writel(pcie, 0, AFI_INTR_CODE);
737
738 if (code == AFI_INTR_LEGACY)
739 return IRQ_NONE;
740
741 if (code >= ARRAY_SIZE(err_msg))
742 code = 0;
743
744 /*
745 * do not pollute kernel log with master abort reports since they
746 * happen a lot during enumeration
747 */
748 if (code == AFI_INTR_MASTER_ABORT)
749 dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
750 signature);
751 else
752 dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
753 signature);
754
755 if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
756 code == AFI_INTR_FPCI_DECODE_ERROR) {
757 u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
758 u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
759
760 if (code == AFI_INTR_MASTER_ABORT)
761 dev_dbg(pcie->dev, " FPCI address: %10llx\n", address);
762 else
763 dev_err(pcie->dev, " FPCI address: %10llx\n", address);
764 }
765
766 return IRQ_HANDLED;
767}
768
769/*
770 * FPCI map is as follows:
771 * - 0xfdfc000000: I/O space
772 * - 0xfdfe000000: type 0 configuration space
773 * - 0xfdff000000: type 1 configuration space
774 * - 0xfe00000000: type 0 extended configuration space
775 * - 0xfe10000000: type 1 extended configuration space
776 */
777static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
778{
779 u32 fpci_bar, size, axi_address;
0b0b0893 780 phys_addr_t io_start = pci_pio_to_address(pcie->io.start);
d1523b52
TR
781
782 /* Bar 0: type 1 extended configuration space */
783 fpci_bar = 0xfe100000;
784 size = resource_size(pcie->cs);
785 axi_address = pcie->cs->start;
786 afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
787 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
788 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
789
790 /* Bar 1: downstream IO bar */
791 fpci_bar = 0xfdfc0000;
792 size = resource_size(&pcie->io);
0b0b0893 793 axi_address = io_start;
d1523b52
TR
794 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
795 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
796 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
797
798 /* Bar 2: prefetchable memory BAR */
799 fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
800 size = resource_size(&pcie->prefetch);
801 axi_address = pcie->prefetch.start;
802 afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
803 afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
804 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
805
806 /* Bar 3: non prefetchable memory BAR */
807 fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
808 size = resource_size(&pcie->mem);
809 axi_address = pcie->mem.start;
810 afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
811 afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
812 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
813
814 /* NULL out the remaining BARs as they are not used */
815 afi_writel(pcie, 0, AFI_AXI_BAR4_START);
816 afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
817 afi_writel(pcie, 0, AFI_FPCI_BAR4);
818
819 afi_writel(pcie, 0, AFI_AXI_BAR5_START);
820 afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
821 afi_writel(pcie, 0, AFI_FPCI_BAR5);
822
823 /* map all upstream transactions as uncached */
824 afi_writel(pcie, PHYS_OFFSET, AFI_CACHE_BAR0_ST);
825 afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
826 afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
827 afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
828
829 /* MSI translations are setup only when needed */
830 afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
831 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
832 afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
833 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
834}
835
7f1f054b 836static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
d1523b52 837{
94716cdd 838 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
7f1f054b 839 u32 value;
d1523b52 840
7f1f054b 841 timeout = jiffies + msecs_to_jiffies(timeout);
94716cdd 842
7f1f054b
TR
843 while (time_before(jiffies, timeout)) {
844 value = pads_readl(pcie, soc->pads_pll_ctl);
845 if (value & PADS_PLL_CTL_LOCKDET)
846 return 0;
847 }
d1523b52 848
7f1f054b
TR
849 return -ETIMEDOUT;
850}
d1523b52 851
7f1f054b
TR
852static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
853{
854 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
855 u32 value;
856 int err;
d1523b52 857
f7625980 858 /* initialize internal PHY, enable up to 16 PCIE lanes */
d1523b52
TR
859 pads_writel(pcie, 0x0, PADS_CTL_SEL);
860
861 /* override IDDQ to 1 on all 4 lanes */
862 value = pads_readl(pcie, PADS_CTL);
863 value |= PADS_CTL_IDDQ_1L;
864 pads_writel(pcie, value, PADS_CTL);
865
866 /*
867 * Set up PHY PLL inputs select PLLE output as refclock,
868 * set TX ref sel to div10 (not div5).
869 */
94716cdd 870 value = pads_readl(pcie, soc->pads_pll_ctl);
d1523b52 871 value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
94716cdd
JA
872 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
873 pads_writel(pcie, value, soc->pads_pll_ctl);
d1523b52 874
ec732762
EY
875 /* reset PLL */
876 value = pads_readl(pcie, soc->pads_pll_ctl);
877 value &= ~PADS_PLL_CTL_RST_B4SM;
878 pads_writel(pcie, value, soc->pads_pll_ctl);
879
880 usleep_range(20, 100);
881
d1523b52 882 /* take PLL out of reset */
94716cdd 883 value = pads_readl(pcie, soc->pads_pll_ctl);
d1523b52 884 value |= PADS_PLL_CTL_RST_B4SM;
94716cdd 885 pads_writel(pcie, value, soc->pads_pll_ctl);
d1523b52 886
b02b07ad
SW
887 /* Configure the reference clock driver */
888 value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16);
889 pads_writel(pcie, value, PADS_REFCLK_CFG0);
890 if (soc->num_ports > 2)
891 pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
d1523b52
TR
892
893 /* wait for the PLL to lock */
7f1f054b
TR
894 err = tegra_pcie_pll_wait(pcie, 500);
895 if (err < 0) {
896 dev_err(pcie->dev, "PLL failed to lock: %d\n", err);
897 return err;
898 }
d1523b52
TR
899
900 /* turn off IDDQ override */
901 value = pads_readl(pcie, PADS_CTL);
902 value &= ~PADS_CTL_IDDQ_1L;
903 pads_writel(pcie, value, PADS_CTL);
904
905 /* enable TX/RX data */
906 value = pads_readl(pcie, PADS_CTL);
907 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
908 pads_writel(pcie, value, PADS_CTL);
909
7f1f054b
TR
910 return 0;
911}
912
913static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
914{
915 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
916 struct tegra_pcie_port *port;
917 unsigned long value;
918 int err;
919
920 /* enable PLL power down */
921 if (pcie->phy) {
922 value = afi_readl(pcie, AFI_PLLE_CONTROL);
923 value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
924 value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
925 afi_writel(pcie, value, AFI_PLLE_CONTROL);
926 }
927
928 /* power down PCIe slot clock bias pad */
929 if (soc->has_pex_bias_ctrl)
930 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
931
932 /* configure mode and disable all ports */
933 value = afi_readl(pcie, AFI_PCIE_CONFIG);
934 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
935 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
936
937 list_for_each_entry(port, &pcie->ports, list)
938 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
939
940 afi_writel(pcie, value, AFI_PCIE_CONFIG);
941
942 if (soc->has_gen2) {
943 value = afi_readl(pcie, AFI_FUSE);
944 value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
945 afi_writel(pcie, value, AFI_FUSE);
946 } else {
947 value = afi_readl(pcie, AFI_FUSE);
948 value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
949 afi_writel(pcie, value, AFI_FUSE);
950 }
951
952 if (!pcie->phy)
953 err = tegra_pcie_phy_enable(pcie);
954 else
955 err = phy_power_on(pcie->phy);
956
957 if (err < 0) {
958 dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
959 return err;
960 }
961
d1523b52 962 /* take the PCIe interface module out of reset */
3127a6b2 963 reset_control_deassert(pcie->pcie_xrst);
d1523b52
TR
964
965 /* finally enable PCIe */
966 value = afi_readl(pcie, AFI_CONFIGURATION);
967 value |= AFI_CONFIGURATION_EN_FPCI;
968 afi_writel(pcie, value, AFI_CONFIGURATION);
969
970 value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
971 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
972 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
94716cdd
JA
973
974 if (soc->has_intr_prsnt_sense)
975 value |= AFI_INTR_EN_PRSNT_SENSE;
976
d1523b52
TR
977 afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
978 afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
979
980 /* don't enable MSI for now, only when needed */
981 afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
982
983 /* disable all exceptions */
984 afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
985
986 return 0;
987}
988
989static void tegra_pcie_power_off(struct tegra_pcie *pcie)
990{
991 int err;
992
993 /* TODO: disable and unprepare clocks? */
994
7f1f054b
TR
995 err = phy_power_off(pcie->phy);
996 if (err < 0)
997 dev_warn(pcie->dev, "failed to power off PHY: %d\n", err);
998
3127a6b2
SW
999 reset_control_assert(pcie->pcie_xrst);
1000 reset_control_assert(pcie->afi_rst);
1001 reset_control_assert(pcie->pex_rst);
d1523b52
TR
1002
1003 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1004
077fb158 1005 err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
d1523b52 1006 if (err < 0)
077fb158 1007 dev_warn(pcie->dev, "failed to disable regulators: %d\n", err);
d1523b52
TR
1008}
1009
1010static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1011{
94716cdd 1012 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
d1523b52
TR
1013 int err;
1014
3127a6b2
SW
1015 reset_control_assert(pcie->pcie_xrst);
1016 reset_control_assert(pcie->afi_rst);
1017 reset_control_assert(pcie->pex_rst);
d1523b52
TR
1018
1019 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1020
1021 /* enable regulators */
077fb158
TR
1022 err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1023 if (err < 0)
1024 dev_err(pcie->dev, "failed to enable regulators: %d\n", err);
94716cdd 1025
d1523b52 1026 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
80b28791
SW
1027 pcie->pex_clk,
1028 pcie->pex_rst);
d1523b52
TR
1029 if (err) {
1030 dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
1031 return err;
1032 }
1033
3127a6b2 1034 reset_control_deassert(pcie->afi_rst);
d1523b52
TR
1035
1036 err = clk_prepare_enable(pcie->afi_clk);
1037 if (err < 0) {
1038 dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
1039 return err;
1040 }
1041
94716cdd
JA
1042 if (soc->has_cml_clk) {
1043 err = clk_prepare_enable(pcie->cml_clk);
1044 if (err < 0) {
1045 dev_err(pcie->dev, "failed to enable CML clock: %d\n",
1046 err);
1047 return err;
1048 }
1049 }
1050
d1523b52
TR
1051 err = clk_prepare_enable(pcie->pll_e);
1052 if (err < 0) {
1053 dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
1054 return err;
1055 }
1056
1057 return 0;
1058}
1059
1060static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1061{
94716cdd
JA
1062 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1063
d1523b52
TR
1064 pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
1065 if (IS_ERR(pcie->pex_clk))
1066 return PTR_ERR(pcie->pex_clk);
1067
1068 pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
1069 if (IS_ERR(pcie->afi_clk))
1070 return PTR_ERR(pcie->afi_clk);
1071
d1523b52
TR
1072 pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
1073 if (IS_ERR(pcie->pll_e))
1074 return PTR_ERR(pcie->pll_e);
1075
94716cdd
JA
1076 if (soc->has_cml_clk) {
1077 pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
1078 if (IS_ERR(pcie->cml_clk))
1079 return PTR_ERR(pcie->cml_clk);
1080 }
1081
d1523b52
TR
1082 return 0;
1083}
1084
3127a6b2
SW
1085static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1086{
1087 pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
1088 if (IS_ERR(pcie->pex_rst))
1089 return PTR_ERR(pcie->pex_rst);
1090
1091 pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
1092 if (IS_ERR(pcie->afi_rst))
1093 return PTR_ERR(pcie->afi_rst);
1094
1095 pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
1096 if (IS_ERR(pcie->pcie_xrst))
1097 return PTR_ERR(pcie->pcie_xrst);
1098
1099 return 0;
1100}
1101
d1523b52
TR
1102static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1103{
1104 struct platform_device *pdev = to_platform_device(pcie->dev);
1105 struct resource *pads, *afi, *res;
1106 int err;
1107
1108 err = tegra_pcie_clocks_get(pcie);
1109 if (err) {
1110 dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
1111 return err;
1112 }
1113
3127a6b2
SW
1114 err = tegra_pcie_resets_get(pcie);
1115 if (err) {
1116 dev_err(&pdev->dev, "failed to get resets: %d\n", err);
1117 return err;
1118 }
1119
7f1f054b
TR
1120 pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
1121 if (IS_ERR(pcie->phy)) {
1122 err = PTR_ERR(pcie->phy);
1123 dev_err(&pdev->dev, "failed to get PHY: %d\n", err);
1124 return err;
1125 }
1126
1127 err = phy_init(pcie->phy);
1128 if (err < 0) {
1129 dev_err(&pdev->dev, "failed to initialize PHY: %d\n", err);
1130 return err;
1131 }
1132
d1523b52
TR
1133 err = tegra_pcie_power_on(pcie);
1134 if (err) {
1135 dev_err(&pdev->dev, "failed to power up: %d\n", err);
1136 return err;
1137 }
1138
d1523b52 1139 pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
dc05ee32
JL
1140 pcie->pads = devm_ioremap_resource(&pdev->dev, pads);
1141 if (IS_ERR(pcie->pads)) {
1142 err = PTR_ERR(pcie->pads);
d1523b52
TR
1143 goto poweroff;
1144 }
1145
1146 afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
dc05ee32
JL
1147 pcie->afi = devm_ioremap_resource(&pdev->dev, afi);
1148 if (IS_ERR(pcie->afi)) {
1149 err = PTR_ERR(pcie->afi);
d1523b52
TR
1150 goto poweroff;
1151 }
1152
dc05ee32 1153 /* request configuration space, but remap later, on demand */
d1523b52
TR
1154 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1155 if (!res) {
1156 err = -EADDRNOTAVAIL;
1157 goto poweroff;
1158 }
1159
1160 pcie->cs = devm_request_mem_region(pcie->dev, res->start,
1161 resource_size(res), res->name);
1162 if (!pcie->cs) {
1163 err = -EADDRNOTAVAIL;
1164 goto poweroff;
1165 }
1166
1167 /* request interrupt */
1168 err = platform_get_irq_byname(pdev, "intr");
1169 if (err < 0) {
1170 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1171 goto poweroff;
1172 }
1173
1174 pcie->irq = err;
1175
1176 err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1177 if (err) {
1178 dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
1179 goto poweroff;
1180 }
1181
1182 return 0;
1183
1184poweroff:
1185 tegra_pcie_power_off(pcie);
1186 return err;
1187}
1188
1189static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1190{
7f1f054b
TR
1191 int err;
1192
d1523b52
TR
1193 if (pcie->irq > 0)
1194 free_irq(pcie->irq, pcie);
1195
1196 tegra_pcie_power_off(pcie);
7f1f054b
TR
1197
1198 err = phy_exit(pcie->phy);
1199 if (err < 0)
1200 dev_err(pcie->dev, "failed to teardown PHY: %d\n", err);
1201
d1523b52
TR
1202 return 0;
1203}
1204
1205static int tegra_msi_alloc(struct tegra_msi *chip)
1206{
1207 int msi;
1208
1209 mutex_lock(&chip->lock);
1210
1211 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1212 if (msi < INT_PCI_MSI_NR)
1213 set_bit(msi, chip->used);
1214 else
1215 msi = -ENOSPC;
1216
1217 mutex_unlock(&chip->lock);
1218
1219 return msi;
1220}
1221
1222static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1223{
1224 struct device *dev = chip->chip.dev;
1225
1226 mutex_lock(&chip->lock);
1227
1228 if (!test_bit(irq, chip->used))
1229 dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1230 else
1231 clear_bit(irq, chip->used);
1232
1233 mutex_unlock(&chip->lock);
1234}
1235
1236static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1237{
1238 struct tegra_pcie *pcie = data;
1239 struct tegra_msi *msi = &pcie->msi;
1240 unsigned int i, processed = 0;
1241
1242 for (i = 0; i < 8; i++) {
1243 unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1244
1245 while (reg) {
1246 unsigned int offset = find_first_bit(&reg, 32);
1247 unsigned int index = i * 32 + offset;
1248 unsigned int irq;
1249
1250 /* clear the interrupt */
1251 afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1252
1253 irq = irq_find_mapping(msi->domain, index);
1254 if (irq) {
1255 if (test_bit(index, msi->used))
1256 generic_handle_irq(irq);
1257 else
1258 dev_info(pcie->dev, "unhandled MSI\n");
1259 } else {
1260 /*
1261 * that's weird who triggered this?
1262 * just clear it
1263 */
1264 dev_info(pcie->dev, "unexpected MSI\n");
1265 }
1266
1267 /* see if there's any more pending in this vector */
1268 reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1269
1270 processed++;
1271 }
1272 }
1273
1274 return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1275}
1276
c2791b80
YW
1277static int tegra_msi_setup_irq(struct msi_controller *chip,
1278 struct pci_dev *pdev, struct msi_desc *desc)
d1523b52
TR
1279{
1280 struct tegra_msi *msi = to_tegra_msi(chip);
1281 struct msi_msg msg;
1282 unsigned int irq;
1283 int hwirq;
1284
1285 hwirq = tegra_msi_alloc(msi);
1286 if (hwirq < 0)
1287 return hwirq;
1288
1289 irq = irq_create_mapping(msi->domain, hwirq);
019fa46e
JZ
1290 if (!irq) {
1291 tegra_msi_free(msi, hwirq);
d1523b52 1292 return -EINVAL;
019fa46e 1293 }
d1523b52
TR
1294
1295 irq_set_msi_desc(irq, desc);
1296
1297 msg.address_lo = virt_to_phys((void *)msi->pages);
1298 /* 32 bit address only */
1299 msg.address_hi = 0;
1300 msg.data = hwirq;
1301
1302 write_msi_msg(irq, &msg);
1303
1304 return 0;
1305}
1306
c2791b80
YW
1307static void tegra_msi_teardown_irq(struct msi_controller *chip,
1308 unsigned int irq)
d1523b52
TR
1309{
1310 struct tegra_msi *msi = to_tegra_msi(chip);
1311 struct irq_data *d = irq_get_irq_data(irq);
019fa46e 1312 irq_hw_number_t hwirq = irqd_to_hwirq(d);
d1523b52 1313
019fa46e
JZ
1314 irq_dispose_mapping(irq);
1315 tegra_msi_free(msi, hwirq);
d1523b52
TR
1316}
1317
1318static struct irq_chip tegra_msi_irq_chip = {
1319 .name = "Tegra PCIe MSI",
1320 .irq_enable = unmask_msi_irq,
1321 .irq_disable = mask_msi_irq,
1322 .irq_mask = mask_msi_irq,
1323 .irq_unmask = unmask_msi_irq,
1324};
1325
1326static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1327 irq_hw_number_t hwirq)
1328{
1329 irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1330 irq_set_chip_data(irq, domain->host_data);
1331 set_irq_flags(irq, IRQF_VALID);
1332
b4f17375
SW
1333 tegra_cpuidle_pcie_irqs_in_use();
1334
d1523b52
TR
1335 return 0;
1336}
1337
1338static const struct irq_domain_ops msi_domain_ops = {
1339 .map = tegra_msi_map,
1340};
1341
1342static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1343{
1344 struct platform_device *pdev = to_platform_device(pcie->dev);
94716cdd 1345 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
d1523b52
TR
1346 struct tegra_msi *msi = &pcie->msi;
1347 unsigned long base;
1348 int err;
1349 u32 reg;
1350
1351 mutex_init(&msi->lock);
1352
1353 msi->chip.dev = pcie->dev;
1354 msi->chip.setup_irq = tegra_msi_setup_irq;
1355 msi->chip.teardown_irq = tegra_msi_teardown_irq;
1356
1357 msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
1358 &msi_domain_ops, &msi->chip);
1359 if (!msi->domain) {
1360 dev_err(&pdev->dev, "failed to create IRQ domain\n");
1361 return -ENOMEM;
1362 }
1363
1364 err = platform_get_irq_byname(pdev, "msi");
1365 if (err < 0) {
1366 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1367 goto err;
1368 }
1369
1370 msi->irq = err;
1371
1372 err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
1373 tegra_msi_irq_chip.name, pcie);
1374 if (err < 0) {
1375 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
1376 goto err;
1377 }
1378
1379 /* setup AFI/FPCI range */
1380 msi->pages = __get_free_pages(GFP_KERNEL, 0);
1381 base = virt_to_phys((void *)msi->pages);
1382
94716cdd 1383 afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
d1523b52
TR
1384 afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
1385 /* this register is in 4K increments */
1386 afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1387
1388 /* enable all MSI vectors */
1389 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1390 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1391 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1392 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1393 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1394 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1395 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1396 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1397
1398 /* and unmask the MSI interrupt */
1399 reg = afi_readl(pcie, AFI_INTR_MASK);
1400 reg |= AFI_INTR_MASK_MSI_MASK;
1401 afi_writel(pcie, reg, AFI_INTR_MASK);
1402
1403 return 0;
1404
1405err:
1406 irq_domain_remove(msi->domain);
1407 return err;
1408}
1409
1410static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1411{
1412 struct tegra_msi *msi = &pcie->msi;
1413 unsigned int i, irq;
1414 u32 value;
1415
1416 /* mask the MSI interrupt */
1417 value = afi_readl(pcie, AFI_INTR_MASK);
1418 value &= ~AFI_INTR_MASK_MSI_MASK;
1419 afi_writel(pcie, value, AFI_INTR_MASK);
1420
1421 /* disable all MSI vectors */
1422 afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1423 afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1424 afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1425 afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1426 afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1427 afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1428 afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1429 afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1430
1431 free_pages(msi->pages, 0);
1432
1433 if (msi->irq > 0)
1434 free_irq(msi->irq, pcie);
1435
1436 for (i = 0; i < INT_PCI_MSI_NR; i++) {
1437 irq = irq_find_mapping(msi->domain, i);
1438 if (irq > 0)
1439 irq_dispose_mapping(irq);
1440 }
1441
1442 irq_domain_remove(msi->domain);
1443
1444 return 0;
1445}
1446
1447static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1448 u32 *xbar)
1449{
1450 struct device_node *np = pcie->dev->of_node;
1451
7f1f054b
TR
1452 if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1453 switch (lanes) {
1454 case 0x0000104:
1455 dev_info(pcie->dev, "4x1, 1x1 configuration\n");
1456 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1457 return 0;
1458
1459 case 0x0000102:
1460 dev_info(pcie->dev, "2x1, 1x1 configuration\n");
1461 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1462 return 0;
1463 }
1464 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
94716cdd
JA
1465 switch (lanes) {
1466 case 0x00000204:
1467 dev_info(pcie->dev, "4x1, 2x1 configuration\n");
1468 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1469 return 0;
1470
1471 case 0x00020202:
1472 dev_info(pcie->dev, "2x3 configuration\n");
1473 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1474 return 0;
1475
1476 case 0x00010104:
1477 dev_info(pcie->dev, "4x1, 1x2 configuration\n");
1478 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1479 return 0;
1480 }
1481 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1482 switch (lanes) {
1483 case 0x00000004:
1484 dev_info(pcie->dev, "single-mode configuration\n");
1485 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1486 return 0;
1487
1488 case 0x00000202:
1489 dev_info(pcie->dev, "dual-mode configuration\n");
1490 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1491 return 0;
1492 }
d1523b52
TR
1493 }
1494
1495 return -EINVAL;
1496}
1497
077fb158
TR
1498/*
1499 * Check whether a given set of supplies is available in a device tree node.
1500 * This is used to check whether the new or the legacy device tree bindings
1501 * should be used.
1502 */
1503static bool of_regulator_bulk_available(struct device_node *np,
1504 struct regulator_bulk_data *supplies,
1505 unsigned int num_supplies)
1506{
1507 char property[32];
1508 unsigned int i;
1509
1510 for (i = 0; i < num_supplies; i++) {
1511 snprintf(property, 32, "%s-supply", supplies[i].supply);
1512
1513 if (of_find_property(np, property, NULL) == NULL)
1514 return false;
1515 }
1516
1517 return true;
1518}
1519
1520/*
1521 * Old versions of the device tree binding for this device used a set of power
1522 * supplies that didn't match the hardware inputs. This happened to work for a
1523 * number of cases but is not future proof. However to preserve backwards-
1524 * compatibility with old device trees, this function will try to use the old
1525 * set of supplies.
1526 */
1527static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1528{
1529 struct device_node *np = pcie->dev->of_node;
1530
1531 if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1532 pcie->num_supplies = 3;
1533 else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1534 pcie->num_supplies = 2;
1535
1536 if (pcie->num_supplies == 0) {
1537 dev_err(pcie->dev, "device %s not supported in legacy mode\n",
1538 np->full_name);
1539 return -ENODEV;
1540 }
1541
1542 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1543 sizeof(*pcie->supplies),
1544 GFP_KERNEL);
1545 if (!pcie->supplies)
1546 return -ENOMEM;
1547
1548 pcie->supplies[0].supply = "pex-clk";
1549 pcie->supplies[1].supply = "vdd";
1550
1551 if (pcie->num_supplies > 2)
1552 pcie->supplies[2].supply = "avdd";
1553
1554 return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
1555 pcie->supplies);
1556}
1557
1558/*
1559 * Obtains the list of regulators required for a particular generation of the
1560 * IP block.
1561 *
1562 * This would've been nice to do simply by providing static tables for use
1563 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1564 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1565 * and either seems to be optional depending on which ports are being used.
1566 */
1567static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1568{
1569 struct device_node *np = pcie->dev->of_node;
1570 unsigned int i = 0;
1571
7f1f054b
TR
1572 if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1573 pcie->num_supplies = 7;
1574
1575 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1576 sizeof(*pcie->supplies),
1577 GFP_KERNEL);
1578 if (!pcie->supplies)
1579 return -ENOMEM;
1580
1581 pcie->supplies[i++].supply = "avddio-pex";
1582 pcie->supplies[i++].supply = "dvddio-pex";
1583 pcie->supplies[i++].supply = "avdd-pex-pll";
1584 pcie->supplies[i++].supply = "hvdd-pex";
1585 pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1586 pcie->supplies[i++].supply = "vddio-pex-ctl";
1587 pcie->supplies[i++].supply = "avdd-pll-erefe";
1588 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
077fb158
TR
1589 bool need_pexa = false, need_pexb = false;
1590
1591 /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
1592 if (lane_mask & 0x0f)
1593 need_pexa = true;
1594
1595 /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
1596 if (lane_mask & 0x30)
1597 need_pexb = true;
1598
1599 pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
1600 (need_pexb ? 2 : 0);
1601
1602 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1603 sizeof(*pcie->supplies),
1604 GFP_KERNEL);
1605 if (!pcie->supplies)
1606 return -ENOMEM;
1607
1608 pcie->supplies[i++].supply = "avdd-pex-pll";
1609 pcie->supplies[i++].supply = "hvdd-pex";
1610 pcie->supplies[i++].supply = "vddio-pex-ctl";
1611 pcie->supplies[i++].supply = "avdd-plle";
1612
1613 if (need_pexa) {
1614 pcie->supplies[i++].supply = "avdd-pexa";
1615 pcie->supplies[i++].supply = "vdd-pexa";
1616 }
1617
1618 if (need_pexb) {
1619 pcie->supplies[i++].supply = "avdd-pexb";
1620 pcie->supplies[i++].supply = "vdd-pexb";
1621 }
1622 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1623 pcie->num_supplies = 5;
1624
1625 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1626 sizeof(*pcie->supplies),
1627 GFP_KERNEL);
1628 if (!pcie->supplies)
1629 return -ENOMEM;
1630
1631 pcie->supplies[0].supply = "avdd-pex";
1632 pcie->supplies[1].supply = "vdd-pex";
1633 pcie->supplies[2].supply = "avdd-pex-pll";
1634 pcie->supplies[3].supply = "avdd-plle";
1635 pcie->supplies[4].supply = "vddio-pex-clk";
1636 }
1637
1638 if (of_regulator_bulk_available(pcie->dev->of_node, pcie->supplies,
1639 pcie->num_supplies))
1640 return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
1641 pcie->supplies);
1642
1643 /*
1644 * If not all regulators are available for this new scheme, assume
1645 * that the device tree complies with an older version of the device
1646 * tree binding.
1647 */
1648 dev_info(pcie->dev, "using legacy DT binding for power supplies\n");
1649
1650 devm_kfree(pcie->dev, pcie->supplies);
1651 pcie->num_supplies = 0;
1652
1653 return tegra_pcie_get_legacy_regulators(pcie);
1654}
1655
d1523b52
TR
1656static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1657{
94716cdd 1658 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
d1523b52
TR
1659 struct device_node *np = pcie->dev->of_node, *port;
1660 struct of_pci_range_parser parser;
1661 struct of_pci_range range;
077fb158
TR
1662 u32 lanes = 0, mask = 0;
1663 unsigned int lane = 0;
d1523b52 1664 struct resource res;
d1523b52
TR
1665 int err;
1666
41534e53
TR
1667 memset(&pcie->all, 0, sizeof(pcie->all));
1668 pcie->all.flags = IORESOURCE_MEM;
1669 pcie->all.name = np->full_name;
1670 pcie->all.start = ~0;
1671 pcie->all.end = 0;
1672
d1523b52
TR
1673 if (of_pci_range_parser_init(&parser, np)) {
1674 dev_err(pcie->dev, "missing \"ranges\" property\n");
1675 return -EINVAL;
1676 }
1677
d1523b52 1678 for_each_of_pci_range(&parser, &range) {
0b0b0893
LD
1679 err = of_pci_range_to_resource(&range, np, &res);
1680 if (err < 0)
1681 return err;
d1523b52
TR
1682
1683 switch (res.flags & IORESOURCE_TYPE_BITS) {
1684 case IORESOURCE_IO:
1685 memcpy(&pcie->io, &res, sizeof(res));
41534e53 1686 pcie->io.name = np->full_name;
d1523b52
TR
1687 break;
1688
1689 case IORESOURCE_MEM:
1690 if (res.flags & IORESOURCE_PREFETCH) {
1691 memcpy(&pcie->prefetch, &res, sizeof(res));
41534e53 1692 pcie->prefetch.name = "prefetchable";
d1523b52
TR
1693 } else {
1694 memcpy(&pcie->mem, &res, sizeof(res));
41534e53 1695 pcie->mem.name = "non-prefetchable";
d1523b52
TR
1696 }
1697 break;
1698 }
41534e53
TR
1699
1700 if (res.start <= pcie->all.start)
1701 pcie->all.start = res.start;
1702
1703 if (res.end >= pcie->all.end)
1704 pcie->all.end = res.end;
d1523b52
TR
1705 }
1706
41534e53
TR
1707 err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->all);
1708 if (err < 0)
1709 return err;
1710
d1523b52
TR
1711 err = of_pci_parse_bus_range(np, &pcie->busn);
1712 if (err < 0) {
1713 dev_err(pcie->dev, "failed to parse ranges property: %d\n",
1714 err);
1715 pcie->busn.name = np->name;
1716 pcie->busn.start = 0;
1717 pcie->busn.end = 0xff;
1718 pcie->busn.flags = IORESOURCE_BUS;
1719 }
1720
1721 /* parse root ports */
1722 for_each_child_of_node(np, port) {
1723 struct tegra_pcie_port *rp;
1724 unsigned int index;
1725 u32 value;
1726
1727 err = of_pci_get_devfn(port);
1728 if (err < 0) {
1729 dev_err(pcie->dev, "failed to parse address: %d\n",
1730 err);
1731 return err;
1732 }
1733
1734 index = PCI_SLOT(err);
1735
94716cdd 1736 if (index < 1 || index > soc->num_ports) {
d1523b52
TR
1737 dev_err(pcie->dev, "invalid port number: %d\n", index);
1738 return -EINVAL;
1739 }
1740
1741 index--;
1742
1743 err = of_property_read_u32(port, "nvidia,num-lanes", &value);
1744 if (err < 0) {
1745 dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
1746 err);
1747 return err;
1748 }
1749
1750 if (value > 16) {
1751 dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
1752 return -EINVAL;
1753 }
1754
1755 lanes |= value << (index << 3);
1756
077fb158
TR
1757 if (!of_device_is_available(port)) {
1758 lane += value;
d1523b52 1759 continue;
077fb158
TR
1760 }
1761
1762 mask |= ((1 << value) - 1) << lane;
1763 lane += value;
d1523b52
TR
1764
1765 rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
1766 if (!rp)
1767 return -ENOMEM;
1768
1769 err = of_address_to_resource(port, 0, &rp->regs);
1770 if (err < 0) {
1771 dev_err(pcie->dev, "failed to parse address: %d\n",
1772 err);
1773 return err;
1774 }
1775
1776 INIT_LIST_HEAD(&rp->list);
1777 rp->index = index;
1778 rp->lanes = value;
1779 rp->pcie = pcie;
1780
dc05ee32
JL
1781 rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
1782 if (IS_ERR(rp->base))
1783 return PTR_ERR(rp->base);
d1523b52
TR
1784
1785 list_add_tail(&rp->list, &pcie->ports);
1786 }
1787
1788 err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
1789 if (err < 0) {
1790 dev_err(pcie->dev, "invalid lane configuration\n");
1791 return err;
1792 }
1793
077fb158
TR
1794 err = tegra_pcie_get_regulators(pcie, mask);
1795 if (err < 0)
1796 return err;
1797
d1523b52
TR
1798 return 0;
1799}
1800
1801/*
1802 * FIXME: If there are no PCIe cards attached, then calling this function
1803 * can result in the increase of the bootup time as there are big timeout
1804 * loops.
1805 */
1806#define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
1807static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
1808{
1809 unsigned int retries = 3;
1810 unsigned long value;
1811
7f1f054b
TR
1812 /* override presence detection */
1813 value = readl(port->base + RP_PRIV_MISC);
1814 value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
1815 value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
1816 writel(value, port->base + RP_PRIV_MISC);
1817
d1523b52
TR
1818 do {
1819 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1820
1821 do {
1822 value = readl(port->base + RP_VEND_XP);
1823
1824 if (value & RP_VEND_XP_DL_UP)
1825 break;
1826
1827 usleep_range(1000, 2000);
1828 } while (--timeout);
1829
1830 if (!timeout) {
1831 dev_err(port->pcie->dev, "link %u down, retrying\n",
1832 port->index);
1833 goto retry;
1834 }
1835
1836 timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1837
1838 do {
1839 value = readl(port->base + RP_LINK_CONTROL_STATUS);
1840
1841 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1842 return true;
1843
1844 usleep_range(1000, 2000);
1845 } while (--timeout);
1846
1847retry:
1848 tegra_pcie_port_reset(port);
1849 } while (--retries);
1850
1851 return false;
1852}
1853
1854static int tegra_pcie_enable(struct tegra_pcie *pcie)
1855{
1856 struct tegra_pcie_port *port, *tmp;
1857 struct hw_pci hw;
1858
1859 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
1860 dev_info(pcie->dev, "probing port %u, using %u lanes\n",
1861 port->index, port->lanes);
1862
1863 tegra_pcie_port_enable(port);
1864
1865 if (tegra_pcie_port_check_link(port))
1866 continue;
1867
1868 dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
1869
1870 tegra_pcie_port_disable(port);
1871 tegra_pcie_port_free(port);
1872 }
1873
1874 memset(&hw, 0, sizeof(hw));
1875
7ec725b2
YW
1876#ifdef CONFIG_PCI_MSI
1877 hw.msi_ctrl = &pcie->msi.chip;
1878#endif
1879
d1523b52
TR
1880 hw.nr_controllers = 1;
1881 hw.private_data = (void **)&pcie;
1882 hw.setup = tegra_pcie_setup;
1883 hw.map_irq = tegra_pcie_map_irq;
d1523b52
TR
1884 hw.scan = tegra_pcie_scan_bus;
1885 hw.ops = &tegra_pcie_ops;
1886
1887 pci_common_init_dev(pcie->dev, &hw);
1888
1889 return 0;
1890}
1891
94716cdd
JA
1892static const struct tegra_pcie_soc_data tegra20_pcie_data = {
1893 .num_ports = 2,
1894 .msi_base_shift = 0,
1895 .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
1896 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
1897 .has_pex_clkreq_en = false,
1898 .has_pex_bias_ctrl = false,
1899 .has_intr_prsnt_sense = false,
94716cdd 1900 .has_cml_clk = false,
7f1f054b 1901 .has_gen2 = false,
94716cdd
JA
1902};
1903
1904static const struct tegra_pcie_soc_data tegra30_pcie_data = {
1905 .num_ports = 3,
1906 .msi_base_shift = 8,
1907 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1908 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1909 .has_pex_clkreq_en = true,
1910 .has_pex_bias_ctrl = true,
1911 .has_intr_prsnt_sense = true,
94716cdd 1912 .has_cml_clk = true,
7f1f054b
TR
1913 .has_gen2 = false,
1914};
1915
1916static const struct tegra_pcie_soc_data tegra124_pcie_data = {
1917 .num_ports = 2,
1918 .msi_base_shift = 8,
1919 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1920 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1921 .has_pex_clkreq_en = true,
1922 .has_pex_bias_ctrl = true,
1923 .has_intr_prsnt_sense = true,
1924 .has_cml_clk = true,
1925 .has_gen2 = true,
94716cdd
JA
1926};
1927
1928static const struct of_device_id tegra_pcie_of_match[] = {
7f1f054b 1929 { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie_data },
94716cdd
JA
1930 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
1931 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
1932 { },
1933};
1934MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
1935
2cb989f6
TR
1936static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
1937{
1938 struct tegra_pcie *pcie = s->private;
1939
1940 if (list_empty(&pcie->ports))
1941 return NULL;
1942
1943 seq_printf(s, "Index Status\n");
1944
1945 return seq_list_start(&pcie->ports, *pos);
1946}
1947
1948static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
1949{
1950 struct tegra_pcie *pcie = s->private;
1951
1952 return seq_list_next(v, &pcie->ports, pos);
1953}
1954
1955static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
1956{
1957}
1958
1959static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
1960{
1961 bool up = false, active = false;
1962 struct tegra_pcie_port *port;
1963 unsigned int value;
1964
1965 port = list_entry(v, struct tegra_pcie_port, list);
1966
1967 value = readl(port->base + RP_VEND_XP);
1968
1969 if (value & RP_VEND_XP_DL_UP)
1970 up = true;
1971
1972 value = readl(port->base + RP_LINK_CONTROL_STATUS);
1973
1974 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1975 active = true;
1976
1977 seq_printf(s, "%2u ", port->index);
1978
1979 if (up)
1980 seq_printf(s, "up");
1981
1982 if (active) {
1983 if (up)
1984 seq_printf(s, ", ");
1985
1986 seq_printf(s, "active");
1987 }
1988
1989 seq_printf(s, "\n");
1990 return 0;
1991}
1992
1993static const struct seq_operations tegra_pcie_ports_seq_ops = {
1994 .start = tegra_pcie_ports_seq_start,
1995 .next = tegra_pcie_ports_seq_next,
1996 .stop = tegra_pcie_ports_seq_stop,
1997 .show = tegra_pcie_ports_seq_show,
1998};
1999
2000static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
2001{
2002 struct tegra_pcie *pcie = inode->i_private;
2003 struct seq_file *s;
2004 int err;
2005
2006 err = seq_open(file, &tegra_pcie_ports_seq_ops);
2007 if (err)
2008 return err;
2009
2010 s = file->private_data;
2011 s->private = pcie;
2012
2013 return 0;
2014}
2015
2016static const struct file_operations tegra_pcie_ports_ops = {
2017 .owner = THIS_MODULE,
2018 .open = tegra_pcie_ports_open,
2019 .read = seq_read,
2020 .llseek = seq_lseek,
2021 .release = seq_release,
2022};
2023
2024static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2025{
2026 struct dentry *file;
2027
2028 pcie->debugfs = debugfs_create_dir("pcie", NULL);
2029 if (!pcie->debugfs)
2030 return -ENOMEM;
2031
2032 file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
2033 pcie, &tegra_pcie_ports_ops);
2034 if (!file)
2035 goto remove;
2036
2037 return 0;
2038
2039remove:
2040 debugfs_remove_recursive(pcie->debugfs);
2041 pcie->debugfs = NULL;
2042 return -ENOMEM;
2043}
2044
d1523b52
TR
2045static int tegra_pcie_probe(struct platform_device *pdev)
2046{
94716cdd 2047 const struct of_device_id *match;
d1523b52
TR
2048 struct tegra_pcie *pcie;
2049 int err;
2050
94716cdd
JA
2051 match = of_match_device(tegra_pcie_of_match, &pdev->dev);
2052 if (!match)
2053 return -ENODEV;
2054
d1523b52
TR
2055 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
2056 if (!pcie)
2057 return -ENOMEM;
2058
f7625980 2059 INIT_LIST_HEAD(&pcie->buses);
d1523b52 2060 INIT_LIST_HEAD(&pcie->ports);
94716cdd 2061 pcie->soc_data = match->data;
d1523b52
TR
2062 pcie->dev = &pdev->dev;
2063
2064 err = tegra_pcie_parse_dt(pcie);
2065 if (err < 0)
2066 return err;
2067
2068 pcibios_min_mem = 0;
2069
2070 err = tegra_pcie_get_resources(pcie);
2071 if (err < 0) {
2072 dev_err(&pdev->dev, "failed to request resources: %d\n", err);
2073 return err;
2074 }
2075
2076 err = tegra_pcie_enable_controller(pcie);
2077 if (err)
2078 goto put_resources;
2079
2080 /* setup the AFI address translations */
2081 tegra_pcie_setup_translations(pcie);
2082
2083 if (IS_ENABLED(CONFIG_PCI_MSI)) {
2084 err = tegra_pcie_enable_msi(pcie);
2085 if (err < 0) {
2086 dev_err(&pdev->dev,
2087 "failed to enable MSI support: %d\n",
2088 err);
2089 goto put_resources;
2090 }
2091 }
2092
2093 err = tegra_pcie_enable(pcie);
2094 if (err < 0) {
2095 dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
2096 goto disable_msi;
2097 }
2098
2cb989f6
TR
2099 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2100 err = tegra_pcie_debugfs_init(pcie);
2101 if (err < 0)
2102 dev_err(&pdev->dev, "failed to setup debugfs: %d\n",
2103 err);
2104 }
2105
d1523b52
TR
2106 platform_set_drvdata(pdev, pcie);
2107 return 0;
2108
2109disable_msi:
2110 if (IS_ENABLED(CONFIG_PCI_MSI))
2111 tegra_pcie_disable_msi(pcie);
2112put_resources:
2113 tegra_pcie_put_resources(pcie);
2114 return err;
2115}
2116
d1523b52
TR
2117static struct platform_driver tegra_pcie_driver = {
2118 .driver = {
2119 .name = "tegra-pcie",
2120 .owner = THIS_MODULE,
2121 .of_match_table = tegra_pcie_of_match,
2122 .suppress_bind_attrs = true,
2123 },
2124 .probe = tegra_pcie_probe,
2125};
2126module_platform_driver(tegra_pcie_driver);
2127
2128MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
2129MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
d975cb57 2130MODULE_LICENSE("GPL v2");
This page took 0.165842 seconds and 5 git commands to generate.