1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * PCIe host controller driver for Tegra SoCs
4  *
5  * Copyright (c) 2010, CompuLab, Ltd.
6  * Author: Mike Rapoport <mike@compulab.co.il>
7  *
8  * Based on NVIDIA PCIe driver
9  * Copyright (c) 2008-2009, NVIDIA Corporation.
10  *
11  * Bits taken from arch/arm/mach-dove/pcie.c
12  *
13  * Author: Thierry Reding <treding@nvidia.com>
14  */
15 
16 #include <linux/clk.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/export.h>
20 #include <linux/interrupt.h>
21 #include <linux/iopoll.h>
22 #include <linux/irq.h>
23 #include <linux/irqdomain.h>
24 #include <linux/kernel.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/msi.h>
28 #include <linux/of_address.h>
29 #include <linux/of_pci.h>
30 #include <linux/of_platform.h>
31 #include <linux/pci.h>
32 #include <linux/phy/phy.h>
33 #include <linux/platform_device.h>
34 #include <linux/reset.h>
35 #include <linux/sizes.h>
36 #include <linux/slab.h>
37 #include <linux/vmalloc.h>
38 #include <linux/regulator/consumer.h>
39 
40 #include <soc/tegra/cpuidle.h>
41 #include <soc/tegra/pmc.h>
42 
43 #include "../pci.h"
44 
45 #define INT_PCI_MSI_NR (8 * 32)
46 
47 /* register definitions */
48 
49 #define AFI_AXI_BAR0_SZ	0x00
50 #define AFI_AXI_BAR1_SZ	0x04
51 #define AFI_AXI_BAR2_SZ	0x08
52 #define AFI_AXI_BAR3_SZ	0x0c
53 #define AFI_AXI_BAR4_SZ	0x10
54 #define AFI_AXI_BAR5_SZ	0x14
55 
56 #define AFI_AXI_BAR0_START	0x18
57 #define AFI_AXI_BAR1_START	0x1c
58 #define AFI_AXI_BAR2_START	0x20
59 #define AFI_AXI_BAR3_START	0x24
60 #define AFI_AXI_BAR4_START	0x28
61 #define AFI_AXI_BAR5_START	0x2c
62 
63 #define AFI_FPCI_BAR0	0x30
64 #define AFI_FPCI_BAR1	0x34
65 #define AFI_FPCI_BAR2	0x38
66 #define AFI_FPCI_BAR3	0x3c
67 #define AFI_FPCI_BAR4	0x40
68 #define AFI_FPCI_BAR5	0x44
69 
70 #define AFI_CACHE_BAR0_SZ	0x48
71 #define AFI_CACHE_BAR0_ST	0x4c
72 #define AFI_CACHE_BAR1_SZ	0x50
73 #define AFI_CACHE_BAR1_ST	0x54
74 
75 #define AFI_MSI_BAR_SZ		0x60
76 #define AFI_MSI_FPCI_BAR_ST	0x64
77 #define AFI_MSI_AXI_BAR_ST	0x68
78 
79 #define AFI_MSI_VEC0		0x6c
80 #define AFI_MSI_VEC1		0x70
81 #define AFI_MSI_VEC2		0x74
82 #define AFI_MSI_VEC3		0x78
83 #define AFI_MSI_VEC4		0x7c
84 #define AFI_MSI_VEC5		0x80
85 #define AFI_MSI_VEC6		0x84
86 #define AFI_MSI_VEC7		0x88
87 
88 #define AFI_MSI_EN_VEC0		0x8c
89 #define AFI_MSI_EN_VEC1		0x90
90 #define AFI_MSI_EN_VEC2		0x94
91 #define AFI_MSI_EN_VEC3		0x98
92 #define AFI_MSI_EN_VEC4		0x9c
93 #define AFI_MSI_EN_VEC5		0xa0
94 #define AFI_MSI_EN_VEC6		0xa4
95 #define AFI_MSI_EN_VEC7		0xa8
96 
97 #define AFI_CONFIGURATION		0xac
98 #define  AFI_CONFIGURATION_EN_FPCI	(1 << 0)
99 
100 #define AFI_FPCI_ERROR_MASKS	0xb0
101 
102 #define AFI_INTR_MASK		0xb4
103 #define  AFI_INTR_MASK_INT_MASK	(1 << 0)
104 #define  AFI_INTR_MASK_MSI_MASK	(1 << 8)
105 
106 #define AFI_INTR_CODE			0xb8
107 #define  AFI_INTR_CODE_MASK		0xf
108 #define  AFI_INTR_INI_SLAVE_ERROR	1
109 #define  AFI_INTR_INI_DECODE_ERROR	2
110 #define  AFI_INTR_TARGET_ABORT		3
111 #define  AFI_INTR_MASTER_ABORT		4
112 #define  AFI_INTR_INVALID_WRITE		5
113 #define  AFI_INTR_LEGACY		6
114 #define  AFI_INTR_FPCI_DECODE_ERROR	7
115 #define  AFI_INTR_AXI_DECODE_ERROR	8
116 #define  AFI_INTR_FPCI_TIMEOUT		9
117 #define  AFI_INTR_PE_PRSNT_SENSE	10
118 #define  AFI_INTR_PE_CLKREQ_SENSE	11
119 #define  AFI_INTR_CLKCLAMP_SENSE	12
120 #define  AFI_INTR_RDY4PD_SENSE		13
121 #define  AFI_INTR_P2P_ERROR		14
122 
123 #define AFI_INTR_SIGNATURE	0xbc
124 #define AFI_UPPER_FPCI_ADDRESS	0xc0
125 #define AFI_SM_INTR_ENABLE	0xc4
126 #define  AFI_SM_INTR_INTA_ASSERT	(1 << 0)
127 #define  AFI_SM_INTR_INTB_ASSERT	(1 << 1)
128 #define  AFI_SM_INTR_INTC_ASSERT	(1 << 2)
129 #define  AFI_SM_INTR_INTD_ASSERT	(1 << 3)
130 #define  AFI_SM_INTR_INTA_DEASSERT	(1 << 4)
131 #define  AFI_SM_INTR_INTB_DEASSERT	(1 << 5)
132 #define  AFI_SM_INTR_INTC_DEASSERT	(1 << 6)
133 #define  AFI_SM_INTR_INTD_DEASSERT	(1 << 7)
134 
135 #define AFI_AFI_INTR_ENABLE		0xc8
136 #define  AFI_INTR_EN_INI_SLVERR		(1 << 0)
137 #define  AFI_INTR_EN_INI_DECERR		(1 << 1)
138 #define  AFI_INTR_EN_TGT_SLVERR		(1 << 2)
139 #define  AFI_INTR_EN_TGT_DECERR		(1 << 3)
140 #define  AFI_INTR_EN_TGT_WRERR		(1 << 4)
141 #define  AFI_INTR_EN_DFPCI_DECERR	(1 << 5)
142 #define  AFI_INTR_EN_AXI_DECERR		(1 << 6)
143 #define  AFI_INTR_EN_FPCI_TIMEOUT	(1 << 7)
144 #define  AFI_INTR_EN_PRSNT_SENSE	(1 << 8)
145 
146 #define AFI_PCIE_PME		0xf0
147 
148 #define AFI_PCIE_CONFIG					0x0f8
149 #define  AFI_PCIE_CONFIG_PCIE_DISABLE(x)		(1 << ((x) + 1))
150 #define  AFI_PCIE_CONFIG_PCIE_DISABLE_ALL		0xe
151 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK	(0xf << 20)
152 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE	(0x0 << 20)
153 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420	(0x0 << 20)
154 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1	(0x0 << 20)
155 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401	(0x0 << 20)
156 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL	(0x1 << 20)
157 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222	(0x1 << 20)
158 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1	(0x1 << 20)
159 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211	(0x1 << 20)
160 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411	(0x2 << 20)
161 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111	(0x2 << 20)
162 
163 #define AFI_FUSE			0x104
164 #define  AFI_FUSE_PCIE_T0_GEN2_DIS	(1 << 2)
165 
166 #define AFI_PEX0_CTRL			0x110
167 #define AFI_PEX1_CTRL			0x118
168 #define AFI_PEX2_CTRL			0x128
169 #define  AFI_PEX_CTRL_RST		(1 << 0)
170 #define  AFI_PEX_CTRL_CLKREQ_EN		(1 << 1)
171 #define  AFI_PEX_CTRL_REFCLK_EN		(1 << 3)
172 #define  AFI_PEX_CTRL_OVERRIDE_EN	(1 << 4)
173 
174 #define AFI_PLLE_CONTROL		0x160
175 #define  AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
176 #define  AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
177 
178 #define AFI_PEXBIAS_CTRL_0		0x168
179 
180 #define RP_VEND_XP	0x00000f00
181 #define  RP_VEND_XP_DL_UP	(1 << 30)
182 
183 #define RP_VEND_CTL2 0x00000fa8
184 #define  RP_VEND_CTL2_PCA_ENABLE (1 << 7)
185 
186 #define RP_PRIV_MISC	0x00000fe0
187 #define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
188 #define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
189 
190 #define RP_LINK_CONTROL_STATUS			0x00000090
191 #define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE	0x20000000
192 #define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK	0x3fff0000
193 
194 #define PADS_CTL_SEL		0x0000009c
195 
196 #define PADS_CTL		0x000000a0
197 #define  PADS_CTL_IDDQ_1L	(1 << 0)
198 #define  PADS_CTL_TX_DATA_EN_1L	(1 << 6)
199 #define  PADS_CTL_RX_DATA_EN_1L	(1 << 10)
200 
201 #define PADS_PLL_CTL_TEGRA20			0x000000b8
202 #define PADS_PLL_CTL_TEGRA30			0x000000b4
203 #define  PADS_PLL_CTL_RST_B4SM			(1 << 1)
204 #define  PADS_PLL_CTL_LOCKDET			(1 << 8)
205 #define  PADS_PLL_CTL_REFCLK_MASK		(0x3 << 16)
206 #define  PADS_PLL_CTL_REFCLK_INTERNAL_CML	(0 << 16)
207 #define  PADS_PLL_CTL_REFCLK_INTERNAL_CMOS	(1 << 16)
208 #define  PADS_PLL_CTL_REFCLK_EXTERNAL		(2 << 16)
209 #define  PADS_PLL_CTL_TXCLKREF_MASK		(0x1 << 20)
210 #define  PADS_PLL_CTL_TXCLKREF_DIV10		(0 << 20)
211 #define  PADS_PLL_CTL_TXCLKREF_DIV5		(1 << 20)
212 #define  PADS_PLL_CTL_TXCLKREF_BUF_EN		(1 << 22)
213 
214 #define PADS_REFCLK_CFG0			0x000000c8
215 #define PADS_REFCLK_CFG1			0x000000cc
216 #define PADS_REFCLK_BIAS			0x000000d0
217 
218 /*
219  * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
220  * entries, one entry per PCIe port. These field definitions and desired
221  * values aren't in the TRM, but do come from NVIDIA.
222  */
223 #define PADS_REFCLK_CFG_TERM_SHIFT		2  /* 6:2 */
224 #define PADS_REFCLK_CFG_E_TERM_SHIFT		7
225 #define PADS_REFCLK_CFG_PREDI_SHIFT		8  /* 11:8 */
226 #define PADS_REFCLK_CFG_DRVI_SHIFT		12 /* 15:12 */
227 
228 #define PME_ACK_TIMEOUT 10000
229 
230 struct tegra_msi {
231 	struct msi_controller chip;
232 	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
233 	struct irq_domain *domain;
234 	unsigned long pages;
235 	struct mutex lock;
236 	u64 phys;
237 	int irq;
238 };
239 
240 /* used to differentiate between Tegra SoC generations */
241 struct tegra_pcie_port_soc {
242 	struct {
243 		u8 turnoff_bit;
244 		u8 ack_bit;
245 	} pme;
246 };
247 
248 struct tegra_pcie_soc {
249 	unsigned int num_ports;
250 	const struct tegra_pcie_port_soc *ports;
251 	unsigned int msi_base_shift;
252 	u32 pads_pll_ctl;
253 	u32 tx_ref_sel;
254 	u32 pads_refclk_cfg0;
255 	u32 pads_refclk_cfg1;
256 	bool has_pex_clkreq_en;
257 	bool has_pex_bias_ctrl;
258 	bool has_intr_prsnt_sense;
259 	bool has_cml_clk;
260 	bool has_gen2;
261 	bool force_pca_enable;
262 	bool program_uphy;
263 };
264 
to_tegra_msi(struct msi_controller * chip)265 static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
266 {
267 	return container_of(chip, struct tegra_msi, chip);
268 }
269 
270 struct tegra_pcie {
271 	struct device *dev;
272 
273 	void __iomem *pads;
274 	void __iomem *afi;
275 	void __iomem *cfg;
276 	int irq;
277 
278 	struct resource cs;
279 	struct resource io;
280 	struct resource pio;
281 	struct resource mem;
282 	struct resource prefetch;
283 	struct resource busn;
284 
285 	struct {
286 		resource_size_t mem;
287 		resource_size_t io;
288 	} offset;
289 
290 	struct clk *pex_clk;
291 	struct clk *afi_clk;
292 	struct clk *pll_e;
293 	struct clk *cml_clk;
294 
295 	struct reset_control *pex_rst;
296 	struct reset_control *afi_rst;
297 	struct reset_control *pcie_xrst;
298 
299 	bool legacy_phy;
300 	struct phy *phy;
301 
302 	struct tegra_msi msi;
303 
304 	struct list_head ports;
305 	u32 xbar_config;
306 
307 	struct regulator_bulk_data *supplies;
308 	unsigned int num_supplies;
309 
310 	const struct tegra_pcie_soc *soc;
311 	struct dentry *debugfs;
312 };
313 
314 struct tegra_pcie_port {
315 	struct tegra_pcie *pcie;
316 	struct device_node *np;
317 	struct list_head list;
318 	struct resource regs;
319 	void __iomem *base;
320 	unsigned int index;
321 	unsigned int lanes;
322 
323 	struct phy **phys;
324 };
325 
326 struct tegra_pcie_bus {
327 	struct list_head list;
328 	unsigned int nr;
329 };
330 
afi_writel(struct tegra_pcie * pcie,u32 value,unsigned long offset)331 static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
332 			      unsigned long offset)
333 {
334 	writel(value, pcie->afi + offset);
335 }
336 
afi_readl(struct tegra_pcie * pcie,unsigned long offset)337 static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
338 {
339 	return readl(pcie->afi + offset);
340 }
341 
pads_writel(struct tegra_pcie * pcie,u32 value,unsigned long offset)342 static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
343 			       unsigned long offset)
344 {
345 	writel(value, pcie->pads + offset);
346 }
347 
pads_readl(struct tegra_pcie * pcie,unsigned long offset)348 static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
349 {
350 	return readl(pcie->pads + offset);
351 }
352 
353 /*
354  * The configuration space mapping on Tegra is somewhat similar to the ECAM
355  * defined by PCIe. However it deviates a bit in how the 4 bits for extended
356  * register accesses are mapped:
357  *
358  *    [27:24] extended register number
359  *    [23:16] bus number
360  *    [15:11] device number
361  *    [10: 8] function number
362  *    [ 7: 0] register number
363  *
364  * Mapping the whole extended configuration space would require 256 MiB of
365  * virtual address space, only a small part of which will actually be used.
366  *
367  * To work around this, a 4 KiB region is used to generate the required
368  * configuration transaction with relevant B:D:F and register offset values.
369  * This is achieved by dynamically programming base address and size of
370  * AFI_AXI_BAR used for end point config space mapping to make sure that the
371  * address (access to which generates correct config transaction) falls in
372  * this 4 KiB region.
373  */
tegra_pcie_conf_offset(u8 bus,unsigned int devfn,unsigned int where)374 static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
375 					   unsigned int where)
376 {
377 	return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
378 	       (PCI_FUNC(devfn) << 8) | (where & 0xff);
379 }
380 
tegra_pcie_map_bus(struct pci_bus * bus,unsigned int devfn,int where)381 static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
382 					unsigned int devfn,
383 					int where)
384 {
385 	struct tegra_pcie *pcie = bus->sysdata;
386 	void __iomem *addr = NULL;
387 
388 	if (bus->number == 0) {
389 		unsigned int slot = PCI_SLOT(devfn);
390 		struct tegra_pcie_port *port;
391 
392 		list_for_each_entry(port, &pcie->ports, list) {
393 			if (port->index + 1 == slot) {
394 				addr = port->base + (where & ~3);
395 				break;
396 			}
397 		}
398 	} else {
399 		unsigned int offset;
400 		u32 base;
401 
402 		offset = tegra_pcie_conf_offset(bus->number, devfn, where);
403 
404 		/* move 4 KiB window to offset within the FPCI region */
405 		base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
406 		afi_writel(pcie, base, AFI_FPCI_BAR0);
407 
408 		/* move to correct offset within the 4 KiB page */
409 		addr = pcie->cfg + (offset & (SZ_4K - 1));
410 	}
411 
412 	return addr;
413 }
414 
tegra_pcie_config_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * value)415 static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
416 				  int where, int size, u32 *value)
417 {
418 	if (bus->number == 0)
419 		return pci_generic_config_read32(bus, devfn, where, size,
420 						 value);
421 
422 	return pci_generic_config_read(bus, devfn, where, size, value);
423 }
424 
tegra_pcie_config_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 value)425 static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
426 				   int where, int size, u32 value)
427 {
428 	if (bus->number == 0)
429 		return pci_generic_config_write32(bus, devfn, where, size,
430 						  value);
431 
432 	return pci_generic_config_write(bus, devfn, where, size, value);
433 }
434 
435 static struct pci_ops tegra_pcie_ops = {
436 	.map_bus = tegra_pcie_map_bus,
437 	.read = tegra_pcie_config_read,
438 	.write = tegra_pcie_config_write,
439 };
440 
tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port * port)441 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
442 {
443 	unsigned long ret = 0;
444 
445 	switch (port->index) {
446 	case 0:
447 		ret = AFI_PEX0_CTRL;
448 		break;
449 
450 	case 1:
451 		ret = AFI_PEX1_CTRL;
452 		break;
453 
454 	case 2:
455 		ret = AFI_PEX2_CTRL;
456 		break;
457 	}
458 
459 	return ret;
460 }
461 
tegra_pcie_port_reset(struct tegra_pcie_port * port)462 static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
463 {
464 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
465 	unsigned long value;
466 
467 	/* pulse reset signal */
468 	value = afi_readl(port->pcie, ctrl);
469 	value &= ~AFI_PEX_CTRL_RST;
470 	afi_writel(port->pcie, value, ctrl);
471 
472 	usleep_range(1000, 2000);
473 
474 	value = afi_readl(port->pcie, ctrl);
475 	value |= AFI_PEX_CTRL_RST;
476 	afi_writel(port->pcie, value, ctrl);
477 }
478 
tegra_pcie_port_enable(struct tegra_pcie_port * port)479 static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
480 {
481 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
482 	const struct tegra_pcie_soc *soc = port->pcie->soc;
483 	unsigned long value;
484 
485 	/* enable reference clock */
486 	value = afi_readl(port->pcie, ctrl);
487 	value |= AFI_PEX_CTRL_REFCLK_EN;
488 
489 	if (soc->has_pex_clkreq_en)
490 		value |= AFI_PEX_CTRL_CLKREQ_EN;
491 
492 	value |= AFI_PEX_CTRL_OVERRIDE_EN;
493 
494 	afi_writel(port->pcie, value, ctrl);
495 
496 	tegra_pcie_port_reset(port);
497 
498 	if (soc->force_pca_enable) {
499 		value = readl(port->base + RP_VEND_CTL2);
500 		value |= RP_VEND_CTL2_PCA_ENABLE;
501 		writel(value, port->base + RP_VEND_CTL2);
502 	}
503 }
504 
tegra_pcie_port_disable(struct tegra_pcie_port * port)505 static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
506 {
507 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
508 	const struct tegra_pcie_soc *soc = port->pcie->soc;
509 	unsigned long value;
510 
511 	/* assert port reset */
512 	value = afi_readl(port->pcie, ctrl);
513 	value &= ~AFI_PEX_CTRL_RST;
514 	afi_writel(port->pcie, value, ctrl);
515 
516 	/* disable reference clock */
517 	value = afi_readl(port->pcie, ctrl);
518 
519 	if (soc->has_pex_clkreq_en)
520 		value &= ~AFI_PEX_CTRL_CLKREQ_EN;
521 
522 	value &= ~AFI_PEX_CTRL_REFCLK_EN;
523 	afi_writel(port->pcie, value, ctrl);
524 }
525 
tegra_pcie_port_free(struct tegra_pcie_port * port)526 static void tegra_pcie_port_free(struct tegra_pcie_port *port)
527 {
528 	struct tegra_pcie *pcie = port->pcie;
529 	struct device *dev = pcie->dev;
530 
531 	devm_iounmap(dev, port->base);
532 	devm_release_mem_region(dev, port->regs.start,
533 				resource_size(&port->regs));
534 	list_del(&port->list);
535 	devm_kfree(dev, port);
536 }
537 
538 /* Tegra PCIE root complex wrongly reports device class */
tegra_pcie_fixup_class(struct pci_dev * dev)539 static void tegra_pcie_fixup_class(struct pci_dev *dev)
540 {
541 	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
542 }
543 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
544 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
545 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
546 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
547 
548 /* Tegra PCIE requires relaxed ordering */
tegra_pcie_relax_enable(struct pci_dev * dev)549 static void tegra_pcie_relax_enable(struct pci_dev *dev)
550 {
551 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
552 }
553 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
554 
tegra_pcie_request_resources(struct tegra_pcie * pcie)555 static int tegra_pcie_request_resources(struct tegra_pcie *pcie)
556 {
557 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
558 	struct list_head *windows = &host->windows;
559 	struct device *dev = pcie->dev;
560 	int err;
561 
562 	pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
563 	pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
564 	pci_add_resource_offset(windows, &pcie->prefetch, pcie->offset.mem);
565 	pci_add_resource(windows, &pcie->busn);
566 
567 	err = devm_request_pci_bus_resources(dev, windows);
568 	if (err < 0) {
569 		pci_free_resource_list(windows);
570 		return err;
571 	}
572 
573 	pci_remap_iospace(&pcie->pio, pcie->io.start);
574 
575 	return 0;
576 }
577 
tegra_pcie_free_resources(struct tegra_pcie * pcie)578 static void tegra_pcie_free_resources(struct tegra_pcie *pcie)
579 {
580 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
581 	struct list_head *windows = &host->windows;
582 
583 	pci_unmap_iospace(&pcie->pio);
584 	pci_free_resource_list(windows);
585 }
586 
tegra_pcie_map_irq(const struct pci_dev * pdev,u8 slot,u8 pin)587 static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
588 {
589 	struct tegra_pcie *pcie = pdev->bus->sysdata;
590 	int irq;
591 
592 	tegra_cpuidle_pcie_irqs_in_use();
593 
594 	irq = of_irq_parse_and_map_pci(pdev, slot, pin);
595 	if (!irq)
596 		irq = pcie->irq;
597 
598 	return irq;
599 }
600 
tegra_pcie_isr(int irq,void * arg)601 static irqreturn_t tegra_pcie_isr(int irq, void *arg)
602 {
603 	const char *err_msg[] = {
604 		"Unknown",
605 		"AXI slave error",
606 		"AXI decode error",
607 		"Target abort",
608 		"Master abort",
609 		"Invalid write",
610 		"Legacy interrupt",
611 		"Response decoding error",
612 		"AXI response decoding error",
613 		"Transaction timeout",
614 		"Slot present pin change",
615 		"Slot clock request change",
616 		"TMS clock ramp change",
617 		"TMS ready for power down",
618 		"Peer2Peer error",
619 	};
620 	struct tegra_pcie *pcie = arg;
621 	struct device *dev = pcie->dev;
622 	u32 code, signature;
623 
624 	code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
625 	signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
626 	afi_writel(pcie, 0, AFI_INTR_CODE);
627 
628 	if (code == AFI_INTR_LEGACY)
629 		return IRQ_NONE;
630 
631 	if (code >= ARRAY_SIZE(err_msg))
632 		code = 0;
633 
634 	/*
635 	 * do not pollute kernel log with master abort reports since they
636 	 * happen a lot during enumeration
637 	 */
638 	if (code == AFI_INTR_MASTER_ABORT)
639 		dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
640 	else
641 		dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
642 
643 	if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
644 	    code == AFI_INTR_FPCI_DECODE_ERROR) {
645 		u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
646 		u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
647 
648 		if (code == AFI_INTR_MASTER_ABORT)
649 			dev_dbg(dev, "  FPCI address: %10llx\n", address);
650 		else
651 			dev_err(dev, "  FPCI address: %10llx\n", address);
652 	}
653 
654 	return IRQ_HANDLED;
655 }
656 
657 /*
658  * FPCI map is as follows:
659  * - 0xfdfc000000: I/O space
660  * - 0xfdfe000000: type 0 configuration space
661  * - 0xfdff000000: type 1 configuration space
662  * - 0xfe00000000: type 0 extended configuration space
663  * - 0xfe10000000: type 1 extended configuration space
664  */
tegra_pcie_setup_translations(struct tegra_pcie * pcie)665 static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
666 {
667 	u32 fpci_bar, size, axi_address;
668 
669 	/* Bar 0: type 1 extended configuration space */
670 	size = resource_size(&pcie->cs);
671 	afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
672 	afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
673 
674 	/* Bar 1: downstream IO bar */
675 	fpci_bar = 0xfdfc0000;
676 	size = resource_size(&pcie->io);
677 	axi_address = pcie->io.start;
678 	afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
679 	afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
680 	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
681 
682 	/* Bar 2: prefetchable memory BAR */
683 	fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
684 	size = resource_size(&pcie->prefetch);
685 	axi_address = pcie->prefetch.start;
686 	afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
687 	afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
688 	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
689 
690 	/* Bar 3: non prefetchable memory BAR */
691 	fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
692 	size = resource_size(&pcie->mem);
693 	axi_address = pcie->mem.start;
694 	afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
695 	afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
696 	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
697 
698 	/* NULL out the remaining BARs as they are not used */
699 	afi_writel(pcie, 0, AFI_AXI_BAR4_START);
700 	afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
701 	afi_writel(pcie, 0, AFI_FPCI_BAR4);
702 
703 	afi_writel(pcie, 0, AFI_AXI_BAR5_START);
704 	afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
705 	afi_writel(pcie, 0, AFI_FPCI_BAR5);
706 
707 	/* map all upstream transactions as uncached */
708 	afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
709 	afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
710 	afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
711 	afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
712 
713 	/* MSI translations are setup only when needed */
714 	afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
715 	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
716 	afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
717 	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
718 }
719 
tegra_pcie_pll_wait(struct tegra_pcie * pcie,unsigned long timeout)720 static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
721 {
722 	const struct tegra_pcie_soc *soc = pcie->soc;
723 	u32 value;
724 
725 	timeout = jiffies + msecs_to_jiffies(timeout);
726 
727 	while (time_before(jiffies, timeout)) {
728 		value = pads_readl(pcie, soc->pads_pll_ctl);
729 		if (value & PADS_PLL_CTL_LOCKDET)
730 			return 0;
731 	}
732 
733 	return -ETIMEDOUT;
734 }
735 
tegra_pcie_phy_enable(struct tegra_pcie * pcie)736 static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
737 {
738 	struct device *dev = pcie->dev;
739 	const struct tegra_pcie_soc *soc = pcie->soc;
740 	u32 value;
741 	int err;
742 
743 	/* initialize internal PHY, enable up to 16 PCIE lanes */
744 	pads_writel(pcie, 0x0, PADS_CTL_SEL);
745 
746 	/* override IDDQ to 1 on all 4 lanes */
747 	value = pads_readl(pcie, PADS_CTL);
748 	value |= PADS_CTL_IDDQ_1L;
749 	pads_writel(pcie, value, PADS_CTL);
750 
751 	/*
752 	 * Set up PHY PLL inputs select PLLE output as refclock,
753 	 * set TX ref sel to div10 (not div5).
754 	 */
755 	value = pads_readl(pcie, soc->pads_pll_ctl);
756 	value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
757 	value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
758 	pads_writel(pcie, value, soc->pads_pll_ctl);
759 
760 	/* reset PLL */
761 	value = pads_readl(pcie, soc->pads_pll_ctl);
762 	value &= ~PADS_PLL_CTL_RST_B4SM;
763 	pads_writel(pcie, value, soc->pads_pll_ctl);
764 
765 	usleep_range(20, 100);
766 
767 	/* take PLL out of reset  */
768 	value = pads_readl(pcie, soc->pads_pll_ctl);
769 	value |= PADS_PLL_CTL_RST_B4SM;
770 	pads_writel(pcie, value, soc->pads_pll_ctl);
771 
772 	/* wait for the PLL to lock */
773 	err = tegra_pcie_pll_wait(pcie, 500);
774 	if (err < 0) {
775 		dev_err(dev, "PLL failed to lock: %d\n", err);
776 		return err;
777 	}
778 
779 	/* turn off IDDQ override */
780 	value = pads_readl(pcie, PADS_CTL);
781 	value &= ~PADS_CTL_IDDQ_1L;
782 	pads_writel(pcie, value, PADS_CTL);
783 
784 	/* enable TX/RX data */
785 	value = pads_readl(pcie, PADS_CTL);
786 	value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
787 	pads_writel(pcie, value, PADS_CTL);
788 
789 	return 0;
790 }
791 
tegra_pcie_phy_disable(struct tegra_pcie * pcie)792 static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
793 {
794 	const struct tegra_pcie_soc *soc = pcie->soc;
795 	u32 value;
796 
797 	/* disable TX/RX data */
798 	value = pads_readl(pcie, PADS_CTL);
799 	value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
800 	pads_writel(pcie, value, PADS_CTL);
801 
802 	/* override IDDQ */
803 	value = pads_readl(pcie, PADS_CTL);
804 	value |= PADS_CTL_IDDQ_1L;
805 	pads_writel(pcie, value, PADS_CTL);
806 
807 	/* reset PLL */
808 	value = pads_readl(pcie, soc->pads_pll_ctl);
809 	value &= ~PADS_PLL_CTL_RST_B4SM;
810 	pads_writel(pcie, value, soc->pads_pll_ctl);
811 
812 	usleep_range(20, 100);
813 
814 	return 0;
815 }
816 
tegra_pcie_port_phy_power_on(struct tegra_pcie_port * port)817 static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
818 {
819 	struct device *dev = port->pcie->dev;
820 	unsigned int i;
821 	int err;
822 
823 	for (i = 0; i < port->lanes; i++) {
824 		err = phy_power_on(port->phys[i]);
825 		if (err < 0) {
826 			dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
827 			return err;
828 		}
829 	}
830 
831 	return 0;
832 }
833 
tegra_pcie_port_phy_power_off(struct tegra_pcie_port * port)834 static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
835 {
836 	struct device *dev = port->pcie->dev;
837 	unsigned int i;
838 	int err;
839 
840 	for (i = 0; i < port->lanes; i++) {
841 		err = phy_power_off(port->phys[i]);
842 		if (err < 0) {
843 			dev_err(dev, "failed to power off PHY#%u: %d\n", i,
844 				err);
845 			return err;
846 		}
847 	}
848 
849 	return 0;
850 }
851 
tegra_pcie_phy_power_on(struct tegra_pcie * pcie)852 static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
853 {
854 	struct device *dev = pcie->dev;
855 	const struct tegra_pcie_soc *soc = pcie->soc;
856 	struct tegra_pcie_port *port;
857 	int err;
858 
859 	if (pcie->legacy_phy) {
860 		if (pcie->phy)
861 			err = phy_power_on(pcie->phy);
862 		else
863 			err = tegra_pcie_phy_enable(pcie);
864 
865 		if (err < 0)
866 			dev_err(dev, "failed to power on PHY: %d\n", err);
867 
868 		return err;
869 	}
870 
871 	list_for_each_entry(port, &pcie->ports, list) {
872 		err = tegra_pcie_port_phy_power_on(port);
873 		if (err < 0) {
874 			dev_err(dev,
875 				"failed to power on PCIe port %u PHY: %d\n",
876 				port->index, err);
877 			return err;
878 		}
879 	}
880 
881 	/* Configure the reference clock driver */
882 	pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
883 
884 	if (soc->num_ports > 2)
885 		pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
886 
887 	return 0;
888 }
889 
tegra_pcie_phy_power_off(struct tegra_pcie * pcie)890 static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
891 {
892 	struct device *dev = pcie->dev;
893 	struct tegra_pcie_port *port;
894 	int err;
895 
896 	if (pcie->legacy_phy) {
897 		if (pcie->phy)
898 			err = phy_power_off(pcie->phy);
899 		else
900 			err = tegra_pcie_phy_disable(pcie);
901 
902 		if (err < 0)
903 			dev_err(dev, "failed to power off PHY: %d\n", err);
904 
905 		return err;
906 	}
907 
908 	list_for_each_entry(port, &pcie->ports, list) {
909 		err = tegra_pcie_port_phy_power_off(port);
910 		if (err < 0) {
911 			dev_err(dev,
912 				"failed to power off PCIe port %u PHY: %d\n",
913 				port->index, err);
914 			return err;
915 		}
916 	}
917 
918 	return 0;
919 }
920 
tegra_pcie_enable_controller(struct tegra_pcie * pcie)921 static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
922 {
923 	struct device *dev = pcie->dev;
924 	const struct tegra_pcie_soc *soc = pcie->soc;
925 	struct tegra_pcie_port *port;
926 	unsigned long value;
927 	int err;
928 
929 	/* enable PLL power down */
930 	if (pcie->phy) {
931 		value = afi_readl(pcie, AFI_PLLE_CONTROL);
932 		value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
933 		value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
934 		afi_writel(pcie, value, AFI_PLLE_CONTROL);
935 	}
936 
937 	/* power down PCIe slot clock bias pad */
938 	if (soc->has_pex_bias_ctrl)
939 		afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
940 
941 	/* configure mode and disable all ports */
942 	value = afi_readl(pcie, AFI_PCIE_CONFIG);
943 	value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
944 	value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
945 
946 	list_for_each_entry(port, &pcie->ports, list)
947 		value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
948 
949 	afi_writel(pcie, value, AFI_PCIE_CONFIG);
950 
951 	if (soc->has_gen2) {
952 		value = afi_readl(pcie, AFI_FUSE);
953 		value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
954 		afi_writel(pcie, value, AFI_FUSE);
955 	} else {
956 		value = afi_readl(pcie, AFI_FUSE);
957 		value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
958 		afi_writel(pcie, value, AFI_FUSE);
959 	}
960 
961 	if (soc->program_uphy) {
962 		err = tegra_pcie_phy_power_on(pcie);
963 		if (err < 0) {
964 			dev_err(dev, "failed to power on PHY(s): %d\n", err);
965 			return err;
966 		}
967 	}
968 
969 	/* take the PCIe interface module out of reset */
970 	reset_control_deassert(pcie->pcie_xrst);
971 
972 	/* finally enable PCIe */
973 	value = afi_readl(pcie, AFI_CONFIGURATION);
974 	value |= AFI_CONFIGURATION_EN_FPCI;
975 	afi_writel(pcie, value, AFI_CONFIGURATION);
976 
977 	value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
978 		AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
979 		AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
980 
981 	if (soc->has_intr_prsnt_sense)
982 		value |= AFI_INTR_EN_PRSNT_SENSE;
983 
984 	afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
985 	afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
986 
987 	/* don't enable MSI for now, only when needed */
988 	afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
989 
990 	/* disable all exceptions */
991 	afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
992 
993 	return 0;
994 }
995 
tegra_pcie_disable_controller(struct tegra_pcie * pcie)996 static void tegra_pcie_disable_controller(struct tegra_pcie *pcie)
997 {
998 	int err;
999 
1000 	reset_control_assert(pcie->pcie_xrst);
1001 
1002 	if (pcie->soc->program_uphy) {
1003 		err = tegra_pcie_phy_power_off(pcie);
1004 		if (err < 0)
1005 			dev_err(pcie->dev, "failed to power off PHY(s): %d\n",
1006 				err);
1007 	}
1008 }
1009 
tegra_pcie_power_off(struct tegra_pcie * pcie)1010 static void tegra_pcie_power_off(struct tegra_pcie *pcie)
1011 {
1012 	struct device *dev = pcie->dev;
1013 	const struct tegra_pcie_soc *soc = pcie->soc;
1014 	int err;
1015 
1016 	reset_control_assert(pcie->afi_rst);
1017 	reset_control_assert(pcie->pex_rst);
1018 
1019 	clk_disable_unprepare(pcie->pll_e);
1020 	if (soc->has_cml_clk)
1021 		clk_disable_unprepare(pcie->cml_clk);
1022 	clk_disable_unprepare(pcie->afi_clk);
1023 	clk_disable_unprepare(pcie->pex_clk);
1024 
1025 	if (!dev->pm_domain)
1026 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1027 
1028 	err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1029 	if (err < 0)
1030 		dev_warn(dev, "failed to disable regulators: %d\n", err);
1031 }
1032 
tegra_pcie_power_on(struct tegra_pcie * pcie)1033 static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1034 {
1035 	struct device *dev = pcie->dev;
1036 	const struct tegra_pcie_soc *soc = pcie->soc;
1037 	int err;
1038 
1039 	reset_control_assert(pcie->pcie_xrst);
1040 	reset_control_assert(pcie->afi_rst);
1041 	reset_control_assert(pcie->pex_rst);
1042 
1043 	if (!dev->pm_domain)
1044 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1045 
1046 	/* enable regulators */
1047 	err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1048 	if (err < 0)
1049 		dev_err(dev, "failed to enable regulators: %d\n", err);
1050 
1051 	if (dev->pm_domain) {
1052 		err = clk_prepare_enable(pcie->pex_clk);
1053 		if (err) {
1054 			dev_err(dev, "failed to enable PEX clock: %d\n", err);
1055 			return err;
1056 		}
1057 		reset_control_deassert(pcie->pex_rst);
1058 	} else {
1059 		err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
1060 							pcie->pex_clk,
1061 							pcie->pex_rst);
1062 		if (err) {
1063 			dev_err(dev, "powerup sequence failed: %d\n", err);
1064 			return err;
1065 		}
1066 	}
1067 
1068 	reset_control_deassert(pcie->afi_rst);
1069 
1070 	err = clk_prepare_enable(pcie->afi_clk);
1071 	if (err < 0) {
1072 		dev_err(dev, "failed to enable AFI clock: %d\n", err);
1073 		return err;
1074 	}
1075 
1076 	if (soc->has_cml_clk) {
1077 		err = clk_prepare_enable(pcie->cml_clk);
1078 		if (err < 0) {
1079 			dev_err(dev, "failed to enable CML clock: %d\n", err);
1080 			return err;
1081 		}
1082 	}
1083 
1084 	err = clk_prepare_enable(pcie->pll_e);
1085 	if (err < 0) {
1086 		dev_err(dev, "failed to enable PLLE clock: %d\n", err);
1087 		return err;
1088 	}
1089 
1090 	return 0;
1091 }
1092 
tegra_pcie_clocks_get(struct tegra_pcie * pcie)1093 static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1094 {
1095 	struct device *dev = pcie->dev;
1096 	const struct tegra_pcie_soc *soc = pcie->soc;
1097 
1098 	pcie->pex_clk = devm_clk_get(dev, "pex");
1099 	if (IS_ERR(pcie->pex_clk))
1100 		return PTR_ERR(pcie->pex_clk);
1101 
1102 	pcie->afi_clk = devm_clk_get(dev, "afi");
1103 	if (IS_ERR(pcie->afi_clk))
1104 		return PTR_ERR(pcie->afi_clk);
1105 
1106 	pcie->pll_e = devm_clk_get(dev, "pll_e");
1107 	if (IS_ERR(pcie->pll_e))
1108 		return PTR_ERR(pcie->pll_e);
1109 
1110 	if (soc->has_cml_clk) {
1111 		pcie->cml_clk = devm_clk_get(dev, "cml");
1112 		if (IS_ERR(pcie->cml_clk))
1113 			return PTR_ERR(pcie->cml_clk);
1114 	}
1115 
1116 	return 0;
1117 }
1118 
tegra_pcie_resets_get(struct tegra_pcie * pcie)1119 static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1120 {
1121 	struct device *dev = pcie->dev;
1122 
1123 	pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
1124 	if (IS_ERR(pcie->pex_rst))
1125 		return PTR_ERR(pcie->pex_rst);
1126 
1127 	pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
1128 	if (IS_ERR(pcie->afi_rst))
1129 		return PTR_ERR(pcie->afi_rst);
1130 
1131 	pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
1132 	if (IS_ERR(pcie->pcie_xrst))
1133 		return PTR_ERR(pcie->pcie_xrst);
1134 
1135 	return 0;
1136 }
1137 
tegra_pcie_phys_get_legacy(struct tegra_pcie * pcie)1138 static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
1139 {
1140 	struct device *dev = pcie->dev;
1141 	int err;
1142 
1143 	pcie->phy = devm_phy_optional_get(dev, "pcie");
1144 	if (IS_ERR(pcie->phy)) {
1145 		err = PTR_ERR(pcie->phy);
1146 		dev_err(dev, "failed to get PHY: %d\n", err);
1147 		return err;
1148 	}
1149 
1150 	err = phy_init(pcie->phy);
1151 	if (err < 0) {
1152 		dev_err(dev, "failed to initialize PHY: %d\n", err);
1153 		return err;
1154 	}
1155 
1156 	pcie->legacy_phy = true;
1157 
1158 	return 0;
1159 }
1160 
devm_of_phy_optional_get_index(struct device * dev,struct device_node * np,const char * consumer,unsigned int index)1161 static struct phy *devm_of_phy_optional_get_index(struct device *dev,
1162 						  struct device_node *np,
1163 						  const char *consumer,
1164 						  unsigned int index)
1165 {
1166 	struct phy *phy;
1167 	char *name;
1168 
1169 	name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
1170 	if (!name)
1171 		return ERR_PTR(-ENOMEM);
1172 
1173 	phy = devm_of_phy_get(dev, np, name);
1174 	kfree(name);
1175 
1176 	if (IS_ERR(phy) && PTR_ERR(phy) == -ENODEV)
1177 		phy = NULL;
1178 
1179 	return phy;
1180 }
1181 
tegra_pcie_port_get_phys(struct tegra_pcie_port * port)1182 static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
1183 {
1184 	struct device *dev = port->pcie->dev;
1185 	struct phy *phy;
1186 	unsigned int i;
1187 	int err;
1188 
1189 	port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
1190 	if (!port->phys)
1191 		return -ENOMEM;
1192 
1193 	for (i = 0; i < port->lanes; i++) {
1194 		phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
1195 		if (IS_ERR(phy)) {
1196 			dev_err(dev, "failed to get PHY#%u: %ld\n", i,
1197 				PTR_ERR(phy));
1198 			return PTR_ERR(phy);
1199 		}
1200 
1201 		err = phy_init(phy);
1202 		if (err < 0) {
1203 			dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
1204 				err);
1205 			return err;
1206 		}
1207 
1208 		port->phys[i] = phy;
1209 	}
1210 
1211 	return 0;
1212 }
1213 
tegra_pcie_phys_get(struct tegra_pcie * pcie)1214 static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
1215 {
1216 	const struct tegra_pcie_soc *soc = pcie->soc;
1217 	struct device_node *np = pcie->dev->of_node;
1218 	struct tegra_pcie_port *port;
1219 	int err;
1220 
1221 	if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
1222 		return tegra_pcie_phys_get_legacy(pcie);
1223 
1224 	list_for_each_entry(port, &pcie->ports, list) {
1225 		err = tegra_pcie_port_get_phys(port);
1226 		if (err < 0)
1227 			return err;
1228 	}
1229 
1230 	return 0;
1231 }
1232 
tegra_pcie_phys_put(struct tegra_pcie * pcie)1233 static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
1234 {
1235 	struct tegra_pcie_port *port;
1236 	struct device *dev = pcie->dev;
1237 	int err, i;
1238 
1239 	if (pcie->legacy_phy) {
1240 		err = phy_exit(pcie->phy);
1241 		if (err < 0)
1242 			dev_err(dev, "failed to teardown PHY: %d\n", err);
1243 		return;
1244 	}
1245 
1246 	list_for_each_entry(port, &pcie->ports, list) {
1247 		for (i = 0; i < port->lanes; i++) {
1248 			err = phy_exit(port->phys[i]);
1249 			if (err < 0)
1250 				dev_err(dev, "failed to teardown PHY#%u: %d\n",
1251 					i, err);
1252 		}
1253 	}
1254 }
1255 
1256 
tegra_pcie_get_resources(struct tegra_pcie * pcie)1257 static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1258 {
1259 	struct device *dev = pcie->dev;
1260 	struct platform_device *pdev = to_platform_device(dev);
1261 	struct resource *pads, *afi, *res;
1262 	const struct tegra_pcie_soc *soc = pcie->soc;
1263 	int err;
1264 
1265 	err = tegra_pcie_clocks_get(pcie);
1266 	if (err) {
1267 		dev_err(dev, "failed to get clocks: %d\n", err);
1268 		return err;
1269 	}
1270 
1271 	err = tegra_pcie_resets_get(pcie);
1272 	if (err) {
1273 		dev_err(dev, "failed to get resets: %d\n", err);
1274 		return err;
1275 	}
1276 
1277 	if (soc->program_uphy) {
1278 		err = tegra_pcie_phys_get(pcie);
1279 		if (err < 0) {
1280 			dev_err(dev, "failed to get PHYs: %d\n", err);
1281 			return err;
1282 		}
1283 	}
1284 
1285 	pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1286 	pcie->pads = devm_ioremap_resource(dev, pads);
1287 	if (IS_ERR(pcie->pads)) {
1288 		err = PTR_ERR(pcie->pads);
1289 		goto phys_put;
1290 	}
1291 
1292 	afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1293 	pcie->afi = devm_ioremap_resource(dev, afi);
1294 	if (IS_ERR(pcie->afi)) {
1295 		err = PTR_ERR(pcie->afi);
1296 		goto phys_put;
1297 	}
1298 
1299 	/* request configuration space, but remap later, on demand */
1300 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1301 	if (!res) {
1302 		err = -EADDRNOTAVAIL;
1303 		goto phys_put;
1304 	}
1305 
1306 	pcie->cs = *res;
1307 
1308 	/* constrain configuration space to 4 KiB */
1309 	pcie->cs.end = pcie->cs.start + SZ_4K - 1;
1310 
1311 	pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
1312 	if (IS_ERR(pcie->cfg)) {
1313 		err = PTR_ERR(pcie->cfg);
1314 		goto phys_put;
1315 	}
1316 
1317 	/* request interrupt */
1318 	err = platform_get_irq_byname(pdev, "intr");
1319 	if (err < 0) {
1320 		dev_err(dev, "failed to get IRQ: %d\n", err);
1321 		goto phys_put;
1322 	}
1323 
1324 	pcie->irq = err;
1325 
1326 	err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1327 	if (err) {
1328 		dev_err(dev, "failed to register IRQ: %d\n", err);
1329 		goto phys_put;
1330 	}
1331 
1332 	return 0;
1333 
1334 phys_put:
1335 	if (soc->program_uphy)
1336 		tegra_pcie_phys_put(pcie);
1337 	return err;
1338 }
1339 
tegra_pcie_put_resources(struct tegra_pcie * pcie)1340 static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1341 {
1342 	const struct tegra_pcie_soc *soc = pcie->soc;
1343 
1344 	if (pcie->irq > 0)
1345 		free_irq(pcie->irq, pcie);
1346 
1347 	if (soc->program_uphy)
1348 		tegra_pcie_phys_put(pcie);
1349 
1350 	return 0;
1351 }
1352 
tegra_pcie_pme_turnoff(struct tegra_pcie_port * port)1353 static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
1354 {
1355 	struct tegra_pcie *pcie = port->pcie;
1356 	const struct tegra_pcie_soc *soc = pcie->soc;
1357 	int err;
1358 	u32 val;
1359 	u8 ack_bit;
1360 
1361 	val = afi_readl(pcie, AFI_PCIE_PME);
1362 	val |= (0x1 << soc->ports[port->index].pme.turnoff_bit);
1363 	afi_writel(pcie, val, AFI_PCIE_PME);
1364 
1365 	ack_bit = soc->ports[port->index].pme.ack_bit;
1366 	err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val,
1367 				 val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT);
1368 	if (err)
1369 		dev_err(pcie->dev, "PME Ack is not received on port: %d\n",
1370 			port->index);
1371 
1372 	usleep_range(10000, 11000);
1373 
1374 	val = afi_readl(pcie, AFI_PCIE_PME);
1375 	val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit);
1376 	afi_writel(pcie, val, AFI_PCIE_PME);
1377 }
1378 
tegra_msi_alloc(struct tegra_msi * chip)1379 static int tegra_msi_alloc(struct tegra_msi *chip)
1380 {
1381 	int msi;
1382 
1383 	mutex_lock(&chip->lock);
1384 
1385 	msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1386 	if (msi < INT_PCI_MSI_NR)
1387 		set_bit(msi, chip->used);
1388 	else
1389 		msi = -ENOSPC;
1390 
1391 	mutex_unlock(&chip->lock);
1392 
1393 	return msi;
1394 }
1395 
tegra_msi_free(struct tegra_msi * chip,unsigned long irq)1396 static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1397 {
1398 	struct device *dev = chip->chip.dev;
1399 
1400 	mutex_lock(&chip->lock);
1401 
1402 	if (!test_bit(irq, chip->used))
1403 		dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1404 	else
1405 		clear_bit(irq, chip->used);
1406 
1407 	mutex_unlock(&chip->lock);
1408 }
1409 
tegra_pcie_msi_irq(int irq,void * data)1410 static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1411 {
1412 	struct tegra_pcie *pcie = data;
1413 	struct device *dev = pcie->dev;
1414 	struct tegra_msi *msi = &pcie->msi;
1415 	unsigned int i, processed = 0;
1416 
1417 	for (i = 0; i < 8; i++) {
1418 		unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1419 
1420 		while (reg) {
1421 			unsigned int offset = find_first_bit(&reg, 32);
1422 			unsigned int index = i * 32 + offset;
1423 			unsigned int irq;
1424 
1425 			/* clear the interrupt */
1426 			afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1427 
1428 			irq = irq_find_mapping(msi->domain, index);
1429 			if (irq) {
1430 				if (test_bit(index, msi->used))
1431 					generic_handle_irq(irq);
1432 				else
1433 					dev_info(dev, "unhandled MSI\n");
1434 			} else {
1435 				/*
1436 				 * that's weird who triggered this?
1437 				 * just clear it
1438 				 */
1439 				dev_info(dev, "unexpected MSI\n");
1440 			}
1441 
1442 			/* see if there's any more pending in this vector */
1443 			reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1444 
1445 			processed++;
1446 		}
1447 	}
1448 
1449 	return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1450 }
1451 
tegra_msi_setup_irq(struct msi_controller * chip,struct pci_dev * pdev,struct msi_desc * desc)1452 static int tegra_msi_setup_irq(struct msi_controller *chip,
1453 			       struct pci_dev *pdev, struct msi_desc *desc)
1454 {
1455 	struct tegra_msi *msi = to_tegra_msi(chip);
1456 	struct msi_msg msg;
1457 	unsigned int irq;
1458 	int hwirq;
1459 
1460 	hwirq = tegra_msi_alloc(msi);
1461 	if (hwirq < 0)
1462 		return hwirq;
1463 
1464 	irq = irq_create_mapping(msi->domain, hwirq);
1465 	if (!irq) {
1466 		tegra_msi_free(msi, hwirq);
1467 		return -EINVAL;
1468 	}
1469 
1470 	irq_set_msi_desc(irq, desc);
1471 
1472 	msg.address_lo = lower_32_bits(msi->phys);
1473 	msg.address_hi = upper_32_bits(msi->phys);
1474 	msg.data = hwirq;
1475 
1476 	pci_write_msi_msg(irq, &msg);
1477 
1478 	return 0;
1479 }
1480 
tegra_msi_teardown_irq(struct msi_controller * chip,unsigned int irq)1481 static void tegra_msi_teardown_irq(struct msi_controller *chip,
1482 				   unsigned int irq)
1483 {
1484 	struct tegra_msi *msi = to_tegra_msi(chip);
1485 	struct irq_data *d = irq_get_irq_data(irq);
1486 	irq_hw_number_t hwirq = irqd_to_hwirq(d);
1487 
1488 	irq_dispose_mapping(irq);
1489 	tegra_msi_free(msi, hwirq);
1490 }
1491 
1492 static struct irq_chip tegra_msi_irq_chip = {
1493 	.name = "Tegra PCIe MSI",
1494 	.irq_enable = pci_msi_unmask_irq,
1495 	.irq_disable = pci_msi_mask_irq,
1496 	.irq_mask = pci_msi_mask_irq,
1497 	.irq_unmask = pci_msi_unmask_irq,
1498 };
1499 
tegra_msi_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)1500 static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1501 			 irq_hw_number_t hwirq)
1502 {
1503 	irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1504 	irq_set_chip_data(irq, domain->host_data);
1505 
1506 	tegra_cpuidle_pcie_irqs_in_use();
1507 
1508 	return 0;
1509 }
1510 
1511 static const struct irq_domain_ops msi_domain_ops = {
1512 	.map = tegra_msi_map,
1513 };
1514 
tegra_pcie_msi_setup(struct tegra_pcie * pcie)1515 static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
1516 {
1517 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1518 	struct platform_device *pdev = to_platform_device(pcie->dev);
1519 	struct tegra_msi *msi = &pcie->msi;
1520 	struct device *dev = pcie->dev;
1521 	int err;
1522 
1523 	mutex_init(&msi->lock);
1524 
1525 	msi->chip.dev = dev;
1526 	msi->chip.setup_irq = tegra_msi_setup_irq;
1527 	msi->chip.teardown_irq = tegra_msi_teardown_irq;
1528 
1529 	msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
1530 					    &msi_domain_ops, &msi->chip);
1531 	if (!msi->domain) {
1532 		dev_err(dev, "failed to create IRQ domain\n");
1533 		return -ENOMEM;
1534 	}
1535 
1536 	err = platform_get_irq_byname(pdev, "msi");
1537 	if (err < 0) {
1538 		dev_err(dev, "failed to get IRQ: %d\n", err);
1539 		goto err;
1540 	}
1541 
1542 	msi->irq = err;
1543 
1544 	err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
1545 			  tegra_msi_irq_chip.name, pcie);
1546 	if (err < 0) {
1547 		dev_err(dev, "failed to request IRQ: %d\n", err);
1548 		goto err;
1549 	}
1550 
1551 	/* setup AFI/FPCI range */
1552 	msi->pages = __get_free_pages(GFP_KERNEL, 0);
1553 	msi->phys = virt_to_phys((void *)msi->pages);
1554 	host->msi = &msi->chip;
1555 
1556 	return 0;
1557 
1558 err:
1559 	irq_domain_remove(msi->domain);
1560 	return err;
1561 }
1562 
tegra_pcie_enable_msi(struct tegra_pcie * pcie)1563 static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1564 {
1565 	const struct tegra_pcie_soc *soc = pcie->soc;
1566 	struct tegra_msi *msi = &pcie->msi;
1567 	u32 reg;
1568 
1569 	afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1570 	afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
1571 	/* this register is in 4K increments */
1572 	afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1573 
1574 	/* enable all MSI vectors */
1575 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1576 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1577 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1578 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1579 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1580 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1581 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1582 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1583 
1584 	/* and unmask the MSI interrupt */
1585 	reg = afi_readl(pcie, AFI_INTR_MASK);
1586 	reg |= AFI_INTR_MASK_MSI_MASK;
1587 	afi_writel(pcie, reg, AFI_INTR_MASK);
1588 }
1589 
tegra_pcie_msi_teardown(struct tegra_pcie * pcie)1590 static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
1591 {
1592 	struct tegra_msi *msi = &pcie->msi;
1593 	unsigned int i, irq;
1594 
1595 	free_pages(msi->pages, 0);
1596 
1597 	if (msi->irq > 0)
1598 		free_irq(msi->irq, pcie);
1599 
1600 	for (i = 0; i < INT_PCI_MSI_NR; i++) {
1601 		irq = irq_find_mapping(msi->domain, i);
1602 		if (irq > 0)
1603 			irq_dispose_mapping(irq);
1604 	}
1605 
1606 	irq_domain_remove(msi->domain);
1607 }
1608 
tegra_pcie_disable_msi(struct tegra_pcie * pcie)1609 static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1610 {
1611 	u32 value;
1612 
1613 	/* mask the MSI interrupt */
1614 	value = afi_readl(pcie, AFI_INTR_MASK);
1615 	value &= ~AFI_INTR_MASK_MSI_MASK;
1616 	afi_writel(pcie, value, AFI_INTR_MASK);
1617 
1618 	/* disable all MSI vectors */
1619 	afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1620 	afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1621 	afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1622 	afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1623 	afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1624 	afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1625 	afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1626 	afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1627 
1628 	return 0;
1629 }
1630 
tegra_pcie_get_xbar_config(struct tegra_pcie * pcie,u32 lanes,u32 * xbar)1631 static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1632 				      u32 *xbar)
1633 {
1634 	struct device *dev = pcie->dev;
1635 	struct device_node *np = dev->of_node;
1636 
1637 	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1638 		switch (lanes) {
1639 		case 0x010004:
1640 			dev_info(dev, "4x1, 1x1 configuration\n");
1641 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
1642 			return 0;
1643 
1644 		case 0x010102:
1645 			dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
1646 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1647 			return 0;
1648 
1649 		case 0x010101:
1650 			dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
1651 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
1652 			return 0;
1653 
1654 		default:
1655 			dev_info(dev, "wrong configuration updated in DT, "
1656 				 "switching to default 2x1, 1x1, 1x1 "
1657 				 "configuration\n");
1658 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1659 			return 0;
1660 		}
1661 	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
1662 		   of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1663 		switch (lanes) {
1664 		case 0x0000104:
1665 			dev_info(dev, "4x1, 1x1 configuration\n");
1666 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1667 			return 0;
1668 
1669 		case 0x0000102:
1670 			dev_info(dev, "2x1, 1x1 configuration\n");
1671 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1672 			return 0;
1673 		}
1674 	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1675 		switch (lanes) {
1676 		case 0x00000204:
1677 			dev_info(dev, "4x1, 2x1 configuration\n");
1678 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1679 			return 0;
1680 
1681 		case 0x00020202:
1682 			dev_info(dev, "2x3 configuration\n");
1683 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1684 			return 0;
1685 
1686 		case 0x00010104:
1687 			dev_info(dev, "4x1, 1x2 configuration\n");
1688 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1689 			return 0;
1690 		}
1691 	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1692 		switch (lanes) {
1693 		case 0x00000004:
1694 			dev_info(dev, "single-mode configuration\n");
1695 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1696 			return 0;
1697 
1698 		case 0x00000202:
1699 			dev_info(dev, "dual-mode configuration\n");
1700 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1701 			return 0;
1702 		}
1703 	}
1704 
1705 	return -EINVAL;
1706 }
1707 
1708 /*
1709  * Check whether a given set of supplies is available in a device tree node.
1710  * This is used to check whether the new or the legacy device tree bindings
1711  * should be used.
1712  */
of_regulator_bulk_available(struct device_node * np,struct regulator_bulk_data * supplies,unsigned int num_supplies)1713 static bool of_regulator_bulk_available(struct device_node *np,
1714 					struct regulator_bulk_data *supplies,
1715 					unsigned int num_supplies)
1716 {
1717 	char property[32];
1718 	unsigned int i;
1719 
1720 	for (i = 0; i < num_supplies; i++) {
1721 		snprintf(property, 32, "%s-supply", supplies[i].supply);
1722 
1723 		if (of_find_property(np, property, NULL) == NULL)
1724 			return false;
1725 	}
1726 
1727 	return true;
1728 }
1729 
1730 /*
1731  * Old versions of the device tree binding for this device used a set of power
1732  * supplies that didn't match the hardware inputs. This happened to work for a
1733  * number of cases but is not future proof. However to preserve backwards-
1734  * compatibility with old device trees, this function will try to use the old
1735  * set of supplies.
1736  */
tegra_pcie_get_legacy_regulators(struct tegra_pcie * pcie)1737 static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1738 {
1739 	struct device *dev = pcie->dev;
1740 	struct device_node *np = dev->of_node;
1741 
1742 	if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1743 		pcie->num_supplies = 3;
1744 	else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1745 		pcie->num_supplies = 2;
1746 
1747 	if (pcie->num_supplies == 0) {
1748 		dev_err(dev, "device %pOF not supported in legacy mode\n", np);
1749 		return -ENODEV;
1750 	}
1751 
1752 	pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1753 				      sizeof(*pcie->supplies),
1754 				      GFP_KERNEL);
1755 	if (!pcie->supplies)
1756 		return -ENOMEM;
1757 
1758 	pcie->supplies[0].supply = "pex-clk";
1759 	pcie->supplies[1].supply = "vdd";
1760 
1761 	if (pcie->num_supplies > 2)
1762 		pcie->supplies[2].supply = "avdd";
1763 
1764 	return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
1765 }
1766 
1767 /*
1768  * Obtains the list of regulators required for a particular generation of the
1769  * IP block.
1770  *
1771  * This would've been nice to do simply by providing static tables for use
1772  * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1773  * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1774  * and either seems to be optional depending on which ports are being used.
1775  */
tegra_pcie_get_regulators(struct tegra_pcie * pcie,u32 lane_mask)1776 static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1777 {
1778 	struct device *dev = pcie->dev;
1779 	struct device_node *np = dev->of_node;
1780 	unsigned int i = 0;
1781 
1782 	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1783 		pcie->num_supplies = 4;
1784 
1785 		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1786 					      sizeof(*pcie->supplies),
1787 					      GFP_KERNEL);
1788 		if (!pcie->supplies)
1789 			return -ENOMEM;
1790 
1791 		pcie->supplies[i++].supply = "dvdd-pex";
1792 		pcie->supplies[i++].supply = "hvdd-pex-pll";
1793 		pcie->supplies[i++].supply = "hvdd-pex";
1794 		pcie->supplies[i++].supply = "vddio-pexctl-aud";
1795 	} else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1796 		pcie->num_supplies = 6;
1797 
1798 		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1799 					      sizeof(*pcie->supplies),
1800 					      GFP_KERNEL);
1801 		if (!pcie->supplies)
1802 			return -ENOMEM;
1803 
1804 		pcie->supplies[i++].supply = "avdd-pll-uerefe";
1805 		pcie->supplies[i++].supply = "hvddio-pex";
1806 		pcie->supplies[i++].supply = "dvddio-pex";
1807 		pcie->supplies[i++].supply = "dvdd-pex-pll";
1808 		pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1809 		pcie->supplies[i++].supply = "vddio-pex-ctl";
1810 	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1811 		pcie->num_supplies = 7;
1812 
1813 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1814 					      sizeof(*pcie->supplies),
1815 					      GFP_KERNEL);
1816 		if (!pcie->supplies)
1817 			return -ENOMEM;
1818 
1819 		pcie->supplies[i++].supply = "avddio-pex";
1820 		pcie->supplies[i++].supply = "dvddio-pex";
1821 		pcie->supplies[i++].supply = "avdd-pex-pll";
1822 		pcie->supplies[i++].supply = "hvdd-pex";
1823 		pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1824 		pcie->supplies[i++].supply = "vddio-pex-ctl";
1825 		pcie->supplies[i++].supply = "avdd-pll-erefe";
1826 	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1827 		bool need_pexa = false, need_pexb = false;
1828 
1829 		/* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
1830 		if (lane_mask & 0x0f)
1831 			need_pexa = true;
1832 
1833 		/* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
1834 		if (lane_mask & 0x30)
1835 			need_pexb = true;
1836 
1837 		pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
1838 					 (need_pexb ? 2 : 0);
1839 
1840 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1841 					      sizeof(*pcie->supplies),
1842 					      GFP_KERNEL);
1843 		if (!pcie->supplies)
1844 			return -ENOMEM;
1845 
1846 		pcie->supplies[i++].supply = "avdd-pex-pll";
1847 		pcie->supplies[i++].supply = "hvdd-pex";
1848 		pcie->supplies[i++].supply = "vddio-pex-ctl";
1849 		pcie->supplies[i++].supply = "avdd-plle";
1850 
1851 		if (need_pexa) {
1852 			pcie->supplies[i++].supply = "avdd-pexa";
1853 			pcie->supplies[i++].supply = "vdd-pexa";
1854 		}
1855 
1856 		if (need_pexb) {
1857 			pcie->supplies[i++].supply = "avdd-pexb";
1858 			pcie->supplies[i++].supply = "vdd-pexb";
1859 		}
1860 	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1861 		pcie->num_supplies = 5;
1862 
1863 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1864 					      sizeof(*pcie->supplies),
1865 					      GFP_KERNEL);
1866 		if (!pcie->supplies)
1867 			return -ENOMEM;
1868 
1869 		pcie->supplies[0].supply = "avdd-pex";
1870 		pcie->supplies[1].supply = "vdd-pex";
1871 		pcie->supplies[2].supply = "avdd-pex-pll";
1872 		pcie->supplies[3].supply = "avdd-plle";
1873 		pcie->supplies[4].supply = "vddio-pex-clk";
1874 	}
1875 
1876 	if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
1877 					pcie->num_supplies))
1878 		return devm_regulator_bulk_get(dev, pcie->num_supplies,
1879 					       pcie->supplies);
1880 
1881 	/*
1882 	 * If not all regulators are available for this new scheme, assume
1883 	 * that the device tree complies with an older version of the device
1884 	 * tree binding.
1885 	 */
1886 	dev_info(dev, "using legacy DT binding for power supplies\n");
1887 
1888 	devm_kfree(dev, pcie->supplies);
1889 	pcie->num_supplies = 0;
1890 
1891 	return tegra_pcie_get_legacy_regulators(pcie);
1892 }
1893 
tegra_pcie_parse_dt(struct tegra_pcie * pcie)1894 static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1895 {
1896 	struct device *dev = pcie->dev;
1897 	struct device_node *np = dev->of_node, *port;
1898 	const struct tegra_pcie_soc *soc = pcie->soc;
1899 	struct of_pci_range_parser parser;
1900 	struct of_pci_range range;
1901 	u32 lanes = 0, mask = 0;
1902 	unsigned int lane = 0;
1903 	struct resource res;
1904 	int err;
1905 
1906 	if (of_pci_range_parser_init(&parser, np)) {
1907 		dev_err(dev, "missing \"ranges\" property\n");
1908 		return -EINVAL;
1909 	}
1910 
1911 	for_each_of_pci_range(&parser, &range) {
1912 		err = of_pci_range_to_resource(&range, np, &res);
1913 		if (err < 0)
1914 			return err;
1915 
1916 		switch (res.flags & IORESOURCE_TYPE_BITS) {
1917 		case IORESOURCE_IO:
1918 			/* Track the bus -> CPU I/O mapping offset. */
1919 			pcie->offset.io = res.start - range.pci_addr;
1920 
1921 			memcpy(&pcie->pio, &res, sizeof(res));
1922 			pcie->pio.name = np->full_name;
1923 
1924 			/*
1925 			 * The Tegra PCIe host bridge uses this to program the
1926 			 * mapping of the I/O space to the physical address,
1927 			 * so we override the .start and .end fields here that
1928 			 * of_pci_range_to_resource() converted to I/O space.
1929 			 * We also set the IORESOURCE_MEM type to clarify that
1930 			 * the resource is in the physical memory space.
1931 			 */
1932 			pcie->io.start = range.cpu_addr;
1933 			pcie->io.end = range.cpu_addr + range.size - 1;
1934 			pcie->io.flags = IORESOURCE_MEM;
1935 			pcie->io.name = "I/O";
1936 
1937 			memcpy(&res, &pcie->io, sizeof(res));
1938 			break;
1939 
1940 		case IORESOURCE_MEM:
1941 			/*
1942 			 * Track the bus -> CPU memory mapping offset. This
1943 			 * assumes that the prefetchable and non-prefetchable
1944 			 * regions will be the last of type IORESOURCE_MEM in
1945 			 * the ranges property.
1946 			 * */
1947 			pcie->offset.mem = res.start - range.pci_addr;
1948 
1949 			if (res.flags & IORESOURCE_PREFETCH) {
1950 				memcpy(&pcie->prefetch, &res, sizeof(res));
1951 				pcie->prefetch.name = "prefetchable";
1952 			} else {
1953 				memcpy(&pcie->mem, &res, sizeof(res));
1954 				pcie->mem.name = "non-prefetchable";
1955 			}
1956 			break;
1957 		}
1958 	}
1959 
1960 	err = of_pci_parse_bus_range(np, &pcie->busn);
1961 	if (err < 0) {
1962 		dev_err(dev, "failed to parse ranges property: %d\n", err);
1963 		pcie->busn.name = np->name;
1964 		pcie->busn.start = 0;
1965 		pcie->busn.end = 0xff;
1966 		pcie->busn.flags = IORESOURCE_BUS;
1967 	}
1968 
1969 	/* parse root ports */
1970 	for_each_child_of_node(np, port) {
1971 		struct tegra_pcie_port *rp;
1972 		unsigned int index;
1973 		u32 value;
1974 
1975 		err = of_pci_get_devfn(port);
1976 		if (err < 0) {
1977 			dev_err(dev, "failed to parse address: %d\n", err);
1978 			return err;
1979 		}
1980 
1981 		index = PCI_SLOT(err);
1982 
1983 		if (index < 1 || index > soc->num_ports) {
1984 			dev_err(dev, "invalid port number: %d\n", index);
1985 			return -EINVAL;
1986 		}
1987 
1988 		index--;
1989 
1990 		err = of_property_read_u32(port, "nvidia,num-lanes", &value);
1991 		if (err < 0) {
1992 			dev_err(dev, "failed to parse # of lanes: %d\n",
1993 				err);
1994 			return err;
1995 		}
1996 
1997 		if (value > 16) {
1998 			dev_err(dev, "invalid # of lanes: %u\n", value);
1999 			return -EINVAL;
2000 		}
2001 
2002 		lanes |= value << (index << 3);
2003 
2004 		if (!of_device_is_available(port)) {
2005 			lane += value;
2006 			continue;
2007 		}
2008 
2009 		mask |= ((1 << value) - 1) << lane;
2010 		lane += value;
2011 
2012 		rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
2013 		if (!rp)
2014 			return -ENOMEM;
2015 
2016 		err = of_address_to_resource(port, 0, &rp->regs);
2017 		if (err < 0) {
2018 			dev_err(dev, "failed to parse address: %d\n", err);
2019 			return err;
2020 		}
2021 
2022 		INIT_LIST_HEAD(&rp->list);
2023 		rp->index = index;
2024 		rp->lanes = value;
2025 		rp->pcie = pcie;
2026 		rp->np = port;
2027 
2028 		rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
2029 		if (IS_ERR(rp->base))
2030 			return PTR_ERR(rp->base);
2031 
2032 		list_add_tail(&rp->list, &pcie->ports);
2033 	}
2034 
2035 	err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
2036 	if (err < 0) {
2037 		dev_err(dev, "invalid lane configuration\n");
2038 		return err;
2039 	}
2040 
2041 	err = tegra_pcie_get_regulators(pcie, mask);
2042 	if (err < 0)
2043 		return err;
2044 
2045 	return 0;
2046 }
2047 
2048 /*
2049  * FIXME: If there are no PCIe cards attached, then calling this function
2050  * can result in the increase of the bootup time as there are big timeout
2051  * loops.
2052  */
2053 #define TEGRA_PCIE_LINKUP_TIMEOUT	200	/* up to 1.2 seconds */
tegra_pcie_port_check_link(struct tegra_pcie_port * port)2054 static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
2055 {
2056 	struct device *dev = port->pcie->dev;
2057 	unsigned int retries = 3;
2058 	unsigned long value;
2059 
2060 	/* override presence detection */
2061 	value = readl(port->base + RP_PRIV_MISC);
2062 	value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
2063 	value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
2064 	writel(value, port->base + RP_PRIV_MISC);
2065 
2066 	do {
2067 		unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2068 
2069 		do {
2070 			value = readl(port->base + RP_VEND_XP);
2071 
2072 			if (value & RP_VEND_XP_DL_UP)
2073 				break;
2074 
2075 			usleep_range(1000, 2000);
2076 		} while (--timeout);
2077 
2078 		if (!timeout) {
2079 			dev_err(dev, "link %u down, retrying\n", port->index);
2080 			goto retry;
2081 		}
2082 
2083 		timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2084 
2085 		do {
2086 			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2087 
2088 			if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2089 				return true;
2090 
2091 			usleep_range(1000, 2000);
2092 		} while (--timeout);
2093 
2094 retry:
2095 		tegra_pcie_port_reset(port);
2096 	} while (--retries);
2097 
2098 	return false;
2099 }
2100 
tegra_pcie_enable_ports(struct tegra_pcie * pcie)2101 static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
2102 {
2103 	struct device *dev = pcie->dev;
2104 	struct tegra_pcie_port *port, *tmp;
2105 
2106 	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2107 		dev_info(dev, "probing port %u, using %u lanes\n",
2108 			 port->index, port->lanes);
2109 
2110 		tegra_pcie_port_enable(port);
2111 
2112 		if (tegra_pcie_port_check_link(port))
2113 			continue;
2114 
2115 		dev_info(dev, "link %u down, ignoring\n", port->index);
2116 
2117 		tegra_pcie_port_disable(port);
2118 		tegra_pcie_port_free(port);
2119 	}
2120 }
2121 
tegra_pcie_disable_ports(struct tegra_pcie * pcie)2122 static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
2123 {
2124 	struct tegra_pcie_port *port, *tmp;
2125 
2126 	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2127 		tegra_pcie_port_disable(port);
2128 }
2129 
2130 static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = {
2131 	{ .pme.turnoff_bit = 0, .pme.ack_bit =  5 },
2132 	{ .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
2133 };
2134 
2135 static const struct tegra_pcie_soc tegra20_pcie = {
2136 	.num_ports = 2,
2137 	.ports = tegra20_pcie_ports,
2138 	.msi_base_shift = 0,
2139 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2140 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2141 	.pads_refclk_cfg0 = 0xfa5cfa5c,
2142 	.has_pex_clkreq_en = false,
2143 	.has_pex_bias_ctrl = false,
2144 	.has_intr_prsnt_sense = false,
2145 	.has_cml_clk = false,
2146 	.has_gen2 = false,
2147 	.force_pca_enable = false,
2148 	.program_uphy = true,
2149 };
2150 
2151 static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
2152 	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2153 	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2154 	{ .pme.turnoff_bit = 16, .pme.ack_bit = 18 },
2155 };
2156 
2157 static const struct tegra_pcie_soc tegra30_pcie = {
2158 	.num_ports = 3,
2159 	.ports = tegra30_pcie_ports,
2160 	.msi_base_shift = 8,
2161 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2162 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2163 	.pads_refclk_cfg0 = 0xfa5cfa5c,
2164 	.pads_refclk_cfg1 = 0xfa5cfa5c,
2165 	.has_pex_clkreq_en = true,
2166 	.has_pex_bias_ctrl = true,
2167 	.has_intr_prsnt_sense = true,
2168 	.has_cml_clk = true,
2169 	.has_gen2 = false,
2170 	.force_pca_enable = false,
2171 	.program_uphy = true,
2172 };
2173 
2174 static const struct tegra_pcie_soc tegra124_pcie = {
2175 	.num_ports = 2,
2176 	.ports = tegra20_pcie_ports,
2177 	.msi_base_shift = 8,
2178 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2179 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2180 	.pads_refclk_cfg0 = 0x44ac44ac,
2181 	.has_pex_clkreq_en = true,
2182 	.has_pex_bias_ctrl = true,
2183 	.has_intr_prsnt_sense = true,
2184 	.has_cml_clk = true,
2185 	.has_gen2 = true,
2186 	.force_pca_enable = false,
2187 	.program_uphy = true,
2188 };
2189 
2190 static const struct tegra_pcie_soc tegra210_pcie = {
2191 	.num_ports = 2,
2192 	.ports = tegra20_pcie_ports,
2193 	.msi_base_shift = 8,
2194 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2195 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2196 	.pads_refclk_cfg0 = 0x90b890b8,
2197 	.has_pex_clkreq_en = true,
2198 	.has_pex_bias_ctrl = true,
2199 	.has_intr_prsnt_sense = true,
2200 	.has_cml_clk = true,
2201 	.has_gen2 = true,
2202 	.force_pca_enable = true,
2203 	.program_uphy = true,
2204 };
2205 
2206 static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
2207 	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2208 	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2209 	{ .pme.turnoff_bit = 12, .pme.ack_bit = 14 },
2210 };
2211 
2212 static const struct tegra_pcie_soc tegra186_pcie = {
2213 	.num_ports = 3,
2214 	.ports = tegra186_pcie_ports,
2215 	.msi_base_shift = 8,
2216 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2217 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2218 	.pads_refclk_cfg0 = 0x80b880b8,
2219 	.pads_refclk_cfg1 = 0x000480b8,
2220 	.has_pex_clkreq_en = true,
2221 	.has_pex_bias_ctrl = true,
2222 	.has_intr_prsnt_sense = true,
2223 	.has_cml_clk = false,
2224 	.has_gen2 = true,
2225 	.force_pca_enable = false,
2226 	.program_uphy = false,
2227 };
2228 
2229 static const struct of_device_id tegra_pcie_of_match[] = {
2230 	{ .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
2231 	{ .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
2232 	{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
2233 	{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
2234 	{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
2235 	{ },
2236 };
2237 
tegra_pcie_ports_seq_start(struct seq_file * s,loff_t * pos)2238 static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
2239 {
2240 	struct tegra_pcie *pcie = s->private;
2241 
2242 	if (list_empty(&pcie->ports))
2243 		return NULL;
2244 
2245 	seq_printf(s, "Index  Status\n");
2246 
2247 	return seq_list_start(&pcie->ports, *pos);
2248 }
2249 
tegra_pcie_ports_seq_next(struct seq_file * s,void * v,loff_t * pos)2250 static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
2251 {
2252 	struct tegra_pcie *pcie = s->private;
2253 
2254 	return seq_list_next(v, &pcie->ports, pos);
2255 }
2256 
tegra_pcie_ports_seq_stop(struct seq_file * s,void * v)2257 static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
2258 {
2259 }
2260 
tegra_pcie_ports_seq_show(struct seq_file * s,void * v)2261 static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
2262 {
2263 	bool up = false, active = false;
2264 	struct tegra_pcie_port *port;
2265 	unsigned int value;
2266 
2267 	port = list_entry(v, struct tegra_pcie_port, list);
2268 
2269 	value = readl(port->base + RP_VEND_XP);
2270 
2271 	if (value & RP_VEND_XP_DL_UP)
2272 		up = true;
2273 
2274 	value = readl(port->base + RP_LINK_CONTROL_STATUS);
2275 
2276 	if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2277 		active = true;
2278 
2279 	seq_printf(s, "%2u     ", port->index);
2280 
2281 	if (up)
2282 		seq_printf(s, "up");
2283 
2284 	if (active) {
2285 		if (up)
2286 			seq_printf(s, ", ");
2287 
2288 		seq_printf(s, "active");
2289 	}
2290 
2291 	seq_printf(s, "\n");
2292 	return 0;
2293 }
2294 
2295 static const struct seq_operations tegra_pcie_ports_seq_ops = {
2296 	.start = tegra_pcie_ports_seq_start,
2297 	.next = tegra_pcie_ports_seq_next,
2298 	.stop = tegra_pcie_ports_seq_stop,
2299 	.show = tegra_pcie_ports_seq_show,
2300 };
2301 
tegra_pcie_ports_open(struct inode * inode,struct file * file)2302 static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
2303 {
2304 	struct tegra_pcie *pcie = inode->i_private;
2305 	struct seq_file *s;
2306 	int err;
2307 
2308 	err = seq_open(file, &tegra_pcie_ports_seq_ops);
2309 	if (err)
2310 		return err;
2311 
2312 	s = file->private_data;
2313 	s->private = pcie;
2314 
2315 	return 0;
2316 }
2317 
2318 static const struct file_operations tegra_pcie_ports_ops = {
2319 	.owner = THIS_MODULE,
2320 	.open = tegra_pcie_ports_open,
2321 	.read = seq_read,
2322 	.llseek = seq_lseek,
2323 	.release = seq_release,
2324 };
2325 
tegra_pcie_debugfs_exit(struct tegra_pcie * pcie)2326 static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
2327 {
2328 	debugfs_remove_recursive(pcie->debugfs);
2329 	pcie->debugfs = NULL;
2330 }
2331 
tegra_pcie_debugfs_init(struct tegra_pcie * pcie)2332 static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2333 {
2334 	struct dentry *file;
2335 
2336 	pcie->debugfs = debugfs_create_dir("pcie", NULL);
2337 	if (!pcie->debugfs)
2338 		return -ENOMEM;
2339 
2340 	file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
2341 				   pcie, &tegra_pcie_ports_ops);
2342 	if (!file)
2343 		goto remove;
2344 
2345 	return 0;
2346 
2347 remove:
2348 	tegra_pcie_debugfs_exit(pcie);
2349 	return -ENOMEM;
2350 }
2351 
tegra_pcie_probe(struct platform_device * pdev)2352 static int tegra_pcie_probe(struct platform_device *pdev)
2353 {
2354 	struct device *dev = &pdev->dev;
2355 	struct pci_host_bridge *host;
2356 	struct tegra_pcie *pcie;
2357 	struct pci_bus *child;
2358 	int err;
2359 
2360 	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
2361 	if (!host)
2362 		return -ENOMEM;
2363 
2364 	pcie = pci_host_bridge_priv(host);
2365 	host->sysdata = pcie;
2366 	platform_set_drvdata(pdev, pcie);
2367 
2368 	pcie->soc = of_device_get_match_data(dev);
2369 	INIT_LIST_HEAD(&pcie->ports);
2370 	pcie->dev = dev;
2371 
2372 	err = tegra_pcie_parse_dt(pcie);
2373 	if (err < 0)
2374 		return err;
2375 
2376 	err = tegra_pcie_get_resources(pcie);
2377 	if (err < 0) {
2378 		dev_err(dev, "failed to request resources: %d\n", err);
2379 		return err;
2380 	}
2381 
2382 	err = tegra_pcie_msi_setup(pcie);
2383 	if (err < 0) {
2384 		dev_err(dev, "failed to enable MSI support: %d\n", err);
2385 		goto put_resources;
2386 	}
2387 
2388 	pm_runtime_enable(pcie->dev);
2389 	err = pm_runtime_get_sync(pcie->dev);
2390 	if (err) {
2391 		dev_err(dev, "fail to enable pcie controller: %d\n", err);
2392 		goto teardown_msi;
2393 	}
2394 
2395 	err = tegra_pcie_request_resources(pcie);
2396 	if (err)
2397 		goto pm_runtime_put;
2398 
2399 	host->busnr = pcie->busn.start;
2400 	host->dev.parent = &pdev->dev;
2401 	host->ops = &tegra_pcie_ops;
2402 	host->map_irq = tegra_pcie_map_irq;
2403 	host->swizzle_irq = pci_common_swizzle;
2404 
2405 	err = pci_scan_root_bus_bridge(host);
2406 	if (err < 0) {
2407 		dev_err(dev, "failed to register host: %d\n", err);
2408 		goto free_resources;
2409 	}
2410 
2411 	pci_bus_size_bridges(host->bus);
2412 	pci_bus_assign_resources(host->bus);
2413 
2414 	list_for_each_entry(child, &host->bus->children, node)
2415 		pcie_bus_configure_settings(child);
2416 
2417 	pci_bus_add_devices(host->bus);
2418 
2419 	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2420 		err = tegra_pcie_debugfs_init(pcie);
2421 		if (err < 0)
2422 			dev_err(dev, "failed to setup debugfs: %d\n", err);
2423 	}
2424 
2425 	return 0;
2426 
2427 free_resources:
2428 	tegra_pcie_free_resources(pcie);
2429 pm_runtime_put:
2430 	pm_runtime_put_sync(pcie->dev);
2431 	pm_runtime_disable(pcie->dev);
2432 teardown_msi:
2433 	tegra_pcie_msi_teardown(pcie);
2434 put_resources:
2435 	tegra_pcie_put_resources(pcie);
2436 	return err;
2437 }
2438 
tegra_pcie_remove(struct platform_device * pdev)2439 static int tegra_pcie_remove(struct platform_device *pdev)
2440 {
2441 	struct tegra_pcie *pcie = platform_get_drvdata(pdev);
2442 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
2443 	struct tegra_pcie_port *port, *tmp;
2444 
2445 	if (IS_ENABLED(CONFIG_DEBUG_FS))
2446 		tegra_pcie_debugfs_exit(pcie);
2447 
2448 	pci_stop_root_bus(host->bus);
2449 	pci_remove_root_bus(host->bus);
2450 	tegra_pcie_free_resources(pcie);
2451 	pm_runtime_put_sync(pcie->dev);
2452 	pm_runtime_disable(pcie->dev);
2453 
2454 	if (IS_ENABLED(CONFIG_PCI_MSI))
2455 		tegra_pcie_msi_teardown(pcie);
2456 
2457 	tegra_pcie_put_resources(pcie);
2458 
2459 	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2460 		tegra_pcie_port_free(port);
2461 
2462 	return 0;
2463 }
2464 
tegra_pcie_pm_suspend(struct device * dev)2465 static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)
2466 {
2467 	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2468 	struct tegra_pcie_port *port;
2469 
2470 	list_for_each_entry(port, &pcie->ports, list)
2471 		tegra_pcie_pme_turnoff(port);
2472 
2473 	tegra_pcie_disable_ports(pcie);
2474 
2475 	if (IS_ENABLED(CONFIG_PCI_MSI))
2476 		tegra_pcie_disable_msi(pcie);
2477 
2478 	tegra_pcie_disable_controller(pcie);
2479 	tegra_pcie_power_off(pcie);
2480 
2481 	return 0;
2482 }
2483 
tegra_pcie_pm_resume(struct device * dev)2484 static int __maybe_unused tegra_pcie_pm_resume(struct device *dev)
2485 {
2486 	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2487 	int err;
2488 
2489 	err = tegra_pcie_power_on(pcie);
2490 	if (err) {
2491 		dev_err(dev, "tegra pcie power on fail: %d\n", err);
2492 		return err;
2493 	}
2494 	err = tegra_pcie_enable_controller(pcie);
2495 	if (err) {
2496 		dev_err(dev, "tegra pcie controller enable fail: %d\n", err);
2497 		goto poweroff;
2498 	}
2499 	tegra_pcie_setup_translations(pcie);
2500 
2501 	if (IS_ENABLED(CONFIG_PCI_MSI))
2502 		tegra_pcie_enable_msi(pcie);
2503 
2504 	tegra_pcie_enable_ports(pcie);
2505 
2506 	return 0;
2507 
2508 poweroff:
2509 	tegra_pcie_power_off(pcie);
2510 
2511 	return err;
2512 }
2513 
2514 static const struct dev_pm_ops tegra_pcie_pm_ops = {
2515 	SET_RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
2516 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend,
2517 				      tegra_pcie_pm_resume)
2518 };
2519 
2520 static struct platform_driver tegra_pcie_driver = {
2521 	.driver = {
2522 		.name = "tegra-pcie",
2523 		.of_match_table = tegra_pcie_of_match,
2524 		.suppress_bind_attrs = true,
2525 		.pm = &tegra_pcie_pm_ops,
2526 	},
2527 	.probe = tegra_pcie_probe,
2528 	.remove = tegra_pcie_remove,
2529 };
2530 module_platform_driver(tegra_pcie_driver);
2531 MODULE_LICENSE("GPL");
2532