xref: /linux/drivers/pci/controller/pci-tegra.c (revision c6fbb759)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * PCIe host controller driver for Tegra SoCs
4  *
5  * Copyright (c) 2010, CompuLab, Ltd.
6  * Author: Mike Rapoport <mike@compulab.co.il>
7  *
8  * Based on NVIDIA PCIe driver
9  * Copyright (c) 2008-2009, NVIDIA Corporation.
10  *
11  * Bits taken from arch/arm/mach-dove/pcie.c
12  *
13  * Author: Thierry Reding <treding@nvidia.com>
14  */
15 
16 #include <linux/clk.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/export.h>
20 #include <linux/gpio/consumer.h>
21 #include <linux/interrupt.h>
22 #include <linux/iopoll.h>
23 #include <linux/irq.h>
24 #include <linux/irqchip/chained_irq.h>
25 #include <linux/irqdomain.h>
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/msi.h>
30 #include <linux/of_address.h>
31 #include <linux/of_pci.h>
32 #include <linux/of_platform.h>
33 #include <linux/pci.h>
34 #include <linux/phy/phy.h>
35 #include <linux/pinctrl/consumer.h>
36 #include <linux/platform_device.h>
37 #include <linux/reset.h>
38 #include <linux/sizes.h>
39 #include <linux/slab.h>
40 #include <linux/vmalloc.h>
41 #include <linux/regulator/consumer.h>
42 
43 #include <soc/tegra/cpuidle.h>
44 #include <soc/tegra/pmc.h>
45 
46 #include "../pci.h"
47 
48 #define INT_PCI_MSI_NR (8 * 32)
49 
50 /* register definitions */
51 
52 #define AFI_AXI_BAR0_SZ	0x00
53 #define AFI_AXI_BAR1_SZ	0x04
54 #define AFI_AXI_BAR2_SZ	0x08
55 #define AFI_AXI_BAR3_SZ	0x0c
56 #define AFI_AXI_BAR4_SZ	0x10
57 #define AFI_AXI_BAR5_SZ	0x14
58 
59 #define AFI_AXI_BAR0_START	0x18
60 #define AFI_AXI_BAR1_START	0x1c
61 #define AFI_AXI_BAR2_START	0x20
62 #define AFI_AXI_BAR3_START	0x24
63 #define AFI_AXI_BAR4_START	0x28
64 #define AFI_AXI_BAR5_START	0x2c
65 
66 #define AFI_FPCI_BAR0	0x30
67 #define AFI_FPCI_BAR1	0x34
68 #define AFI_FPCI_BAR2	0x38
69 #define AFI_FPCI_BAR3	0x3c
70 #define AFI_FPCI_BAR4	0x40
71 #define AFI_FPCI_BAR5	0x44
72 
73 #define AFI_CACHE_BAR0_SZ	0x48
74 #define AFI_CACHE_BAR0_ST	0x4c
75 #define AFI_CACHE_BAR1_SZ	0x50
76 #define AFI_CACHE_BAR1_ST	0x54
77 
78 #define AFI_MSI_BAR_SZ		0x60
79 #define AFI_MSI_FPCI_BAR_ST	0x64
80 #define AFI_MSI_AXI_BAR_ST	0x68
81 
82 #define AFI_MSI_VEC(x)		(0x6c + ((x) * 4))
83 #define AFI_MSI_EN_VEC(x)	(0x8c + ((x) * 4))
84 
85 #define AFI_CONFIGURATION		0xac
86 #define  AFI_CONFIGURATION_EN_FPCI		(1 << 0)
87 #define  AFI_CONFIGURATION_CLKEN_OVERRIDE	(1 << 31)
88 
89 #define AFI_FPCI_ERROR_MASKS	0xb0
90 
91 #define AFI_INTR_MASK		0xb4
92 #define  AFI_INTR_MASK_INT_MASK	(1 << 0)
93 #define  AFI_INTR_MASK_MSI_MASK	(1 << 8)
94 
95 #define AFI_INTR_CODE			0xb8
96 #define  AFI_INTR_CODE_MASK		0xf
97 #define  AFI_INTR_INI_SLAVE_ERROR	1
98 #define  AFI_INTR_INI_DECODE_ERROR	2
99 #define  AFI_INTR_TARGET_ABORT		3
100 #define  AFI_INTR_MASTER_ABORT		4
101 #define  AFI_INTR_INVALID_WRITE		5
102 #define  AFI_INTR_LEGACY		6
103 #define  AFI_INTR_FPCI_DECODE_ERROR	7
104 #define  AFI_INTR_AXI_DECODE_ERROR	8
105 #define  AFI_INTR_FPCI_TIMEOUT		9
106 #define  AFI_INTR_PE_PRSNT_SENSE	10
107 #define  AFI_INTR_PE_CLKREQ_SENSE	11
108 #define  AFI_INTR_CLKCLAMP_SENSE	12
109 #define  AFI_INTR_RDY4PD_SENSE		13
110 #define  AFI_INTR_P2P_ERROR		14
111 
112 #define AFI_INTR_SIGNATURE	0xbc
113 #define AFI_UPPER_FPCI_ADDRESS	0xc0
114 #define AFI_SM_INTR_ENABLE	0xc4
115 #define  AFI_SM_INTR_INTA_ASSERT	(1 << 0)
116 #define  AFI_SM_INTR_INTB_ASSERT	(1 << 1)
117 #define  AFI_SM_INTR_INTC_ASSERT	(1 << 2)
118 #define  AFI_SM_INTR_INTD_ASSERT	(1 << 3)
119 #define  AFI_SM_INTR_INTA_DEASSERT	(1 << 4)
120 #define  AFI_SM_INTR_INTB_DEASSERT	(1 << 5)
121 #define  AFI_SM_INTR_INTC_DEASSERT	(1 << 6)
122 #define  AFI_SM_INTR_INTD_DEASSERT	(1 << 7)
123 
124 #define AFI_AFI_INTR_ENABLE		0xc8
125 #define  AFI_INTR_EN_INI_SLVERR		(1 << 0)
126 #define  AFI_INTR_EN_INI_DECERR		(1 << 1)
127 #define  AFI_INTR_EN_TGT_SLVERR		(1 << 2)
128 #define  AFI_INTR_EN_TGT_DECERR		(1 << 3)
129 #define  AFI_INTR_EN_TGT_WRERR		(1 << 4)
130 #define  AFI_INTR_EN_DFPCI_DECERR	(1 << 5)
131 #define  AFI_INTR_EN_AXI_DECERR		(1 << 6)
132 #define  AFI_INTR_EN_FPCI_TIMEOUT	(1 << 7)
133 #define  AFI_INTR_EN_PRSNT_SENSE	(1 << 8)
134 
135 #define AFI_PCIE_PME		0xf0
136 
137 #define AFI_PCIE_CONFIG					0x0f8
138 #define  AFI_PCIE_CONFIG_PCIE_DISABLE(x)		(1 << ((x) + 1))
139 #define  AFI_PCIE_CONFIG_PCIE_DISABLE_ALL		0xe
140 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK	(0xf << 20)
141 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE	(0x0 << 20)
142 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420	(0x0 << 20)
143 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1	(0x0 << 20)
144 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401	(0x0 << 20)
145 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL	(0x1 << 20)
146 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222	(0x1 << 20)
147 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1	(0x1 << 20)
148 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211	(0x1 << 20)
149 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411	(0x2 << 20)
150 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111	(0x2 << 20)
151 #define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x)		(1 << ((x) + 29))
152 #define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL		(0x7 << 29)
153 
154 #define AFI_FUSE			0x104
155 #define  AFI_FUSE_PCIE_T0_GEN2_DIS	(1 << 2)
156 
157 #define AFI_PEX0_CTRL			0x110
158 #define AFI_PEX1_CTRL			0x118
159 #define  AFI_PEX_CTRL_RST		(1 << 0)
160 #define  AFI_PEX_CTRL_CLKREQ_EN		(1 << 1)
161 #define  AFI_PEX_CTRL_REFCLK_EN		(1 << 3)
162 #define  AFI_PEX_CTRL_OVERRIDE_EN	(1 << 4)
163 
164 #define AFI_PLLE_CONTROL		0x160
165 #define  AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
166 #define  AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
167 
168 #define AFI_PEXBIAS_CTRL_0		0x168
169 
170 #define RP_ECTL_2_R1	0x00000e84
171 #define  RP_ECTL_2_R1_RX_CTLE_1C_MASK		0xffff
172 
173 #define RP_ECTL_4_R1	0x00000e8c
174 #define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
175 #define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT	16
176 
177 #define RP_ECTL_5_R1	0x00000e90
178 #define  RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK	0xffffffff
179 
180 #define RP_ECTL_6_R1	0x00000e94
181 #define  RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK	0xffffffff
182 
183 #define RP_ECTL_2_R2	0x00000ea4
184 #define  RP_ECTL_2_R2_RX_CTLE_1C_MASK	0xffff
185 
186 #define RP_ECTL_4_R2	0x00000eac
187 #define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
188 #define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT	16
189 
190 #define RP_ECTL_5_R2	0x00000eb0
191 #define  RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK	0xffffffff
192 
193 #define RP_ECTL_6_R2	0x00000eb4
194 #define  RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK	0xffffffff
195 
196 #define RP_VEND_XP	0x00000f00
197 #define  RP_VEND_XP_DL_UP			(1 << 30)
198 #define  RP_VEND_XP_OPPORTUNISTIC_ACK		(1 << 27)
199 #define  RP_VEND_XP_OPPORTUNISTIC_UPDATEFC	(1 << 28)
200 #define  RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK	(0xff << 18)
201 
202 #define RP_VEND_CTL0	0x00000f44
203 #define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK	(0xf << 12)
204 #define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH	(0x9 << 12)
205 
206 #define RP_VEND_CTL1	0x00000f48
207 #define  RP_VEND_CTL1_ERPT	(1 << 13)
208 
209 #define RP_VEND_XP_BIST	0x00000f4c
210 #define  RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE	(1 << 28)
211 
212 #define RP_VEND_CTL2 0x00000fa8
213 #define  RP_VEND_CTL2_PCA_ENABLE (1 << 7)
214 
215 #define RP_PRIV_MISC	0x00000fe0
216 #define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT		(0xe << 0)
217 #define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT		(0xf << 0)
218 #define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 16)
219 #define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD		(0xf << 16)
220 #define  RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE		(1 << 23)
221 #define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 24)
222 #define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD		(0xf << 24)
223 #define  RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE		(1 << 31)
224 
225 #define RP_LINK_CONTROL_STATUS			0x00000090
226 #define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE	0x20000000
227 #define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK	0x3fff0000
228 
229 #define RP_LINK_CONTROL_STATUS_2		0x000000b0
230 
231 #define PADS_CTL_SEL		0x0000009c
232 
233 #define PADS_CTL		0x000000a0
234 #define  PADS_CTL_IDDQ_1L	(1 << 0)
235 #define  PADS_CTL_TX_DATA_EN_1L	(1 << 6)
236 #define  PADS_CTL_RX_DATA_EN_1L	(1 << 10)
237 
238 #define PADS_PLL_CTL_TEGRA20			0x000000b8
239 #define PADS_PLL_CTL_TEGRA30			0x000000b4
240 #define  PADS_PLL_CTL_RST_B4SM			(1 << 1)
241 #define  PADS_PLL_CTL_LOCKDET			(1 << 8)
242 #define  PADS_PLL_CTL_REFCLK_MASK		(0x3 << 16)
243 #define  PADS_PLL_CTL_REFCLK_INTERNAL_CML	(0 << 16)
244 #define  PADS_PLL_CTL_REFCLK_INTERNAL_CMOS	(1 << 16)
245 #define  PADS_PLL_CTL_REFCLK_EXTERNAL		(2 << 16)
246 #define  PADS_PLL_CTL_TXCLKREF_MASK		(0x1 << 20)
247 #define  PADS_PLL_CTL_TXCLKREF_DIV10		(0 << 20)
248 #define  PADS_PLL_CTL_TXCLKREF_DIV5		(1 << 20)
249 #define  PADS_PLL_CTL_TXCLKREF_BUF_EN		(1 << 22)
250 
251 #define PADS_REFCLK_CFG0			0x000000c8
252 #define PADS_REFCLK_CFG1			0x000000cc
253 #define PADS_REFCLK_BIAS			0x000000d0
254 
255 /*
256  * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
257  * entries, one entry per PCIe port. These field definitions and desired
258  * values aren't in the TRM, but do come from NVIDIA.
259  */
260 #define PADS_REFCLK_CFG_TERM_SHIFT		2  /* 6:2 */
261 #define PADS_REFCLK_CFG_E_TERM_SHIFT		7
262 #define PADS_REFCLK_CFG_PREDI_SHIFT		8  /* 11:8 */
263 #define PADS_REFCLK_CFG_DRVI_SHIFT		12 /* 15:12 */
264 
265 #define PME_ACK_TIMEOUT 10000
266 #define LINK_RETRAIN_TIMEOUT 100000 /* in usec */
267 
268 struct tegra_msi {
269 	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
270 	struct irq_domain *domain;
271 	struct mutex map_lock;
272 	spinlock_t mask_lock;
273 	void *virt;
274 	dma_addr_t phys;
275 	int irq;
276 };
277 
278 /* used to differentiate between Tegra SoC generations */
279 struct tegra_pcie_port_soc {
280 	struct {
281 		u8 turnoff_bit;
282 		u8 ack_bit;
283 	} pme;
284 };
285 
286 struct tegra_pcie_soc {
287 	unsigned int num_ports;
288 	const struct tegra_pcie_port_soc *ports;
289 	unsigned int msi_base_shift;
290 	unsigned long afi_pex2_ctrl;
291 	u32 pads_pll_ctl;
292 	u32 tx_ref_sel;
293 	u32 pads_refclk_cfg0;
294 	u32 pads_refclk_cfg1;
295 	u32 update_fc_threshold;
296 	bool has_pex_clkreq_en;
297 	bool has_pex_bias_ctrl;
298 	bool has_intr_prsnt_sense;
299 	bool has_cml_clk;
300 	bool has_gen2;
301 	bool force_pca_enable;
302 	bool program_uphy;
303 	bool update_clamp_threshold;
304 	bool program_deskew_time;
305 	bool update_fc_timer;
306 	bool has_cache_bars;
307 	struct {
308 		struct {
309 			u32 rp_ectl_2_r1;
310 			u32 rp_ectl_4_r1;
311 			u32 rp_ectl_5_r1;
312 			u32 rp_ectl_6_r1;
313 			u32 rp_ectl_2_r2;
314 			u32 rp_ectl_4_r2;
315 			u32 rp_ectl_5_r2;
316 			u32 rp_ectl_6_r2;
317 		} regs;
318 		bool enable;
319 	} ectl;
320 };
321 
322 struct tegra_pcie {
323 	struct device *dev;
324 
325 	void __iomem *pads;
326 	void __iomem *afi;
327 	void __iomem *cfg;
328 	int irq;
329 
330 	struct resource cs;
331 
332 	struct clk *pex_clk;
333 	struct clk *afi_clk;
334 	struct clk *pll_e;
335 	struct clk *cml_clk;
336 
337 	struct reset_control *pex_rst;
338 	struct reset_control *afi_rst;
339 	struct reset_control *pcie_xrst;
340 
341 	bool legacy_phy;
342 	struct phy *phy;
343 
344 	struct tegra_msi msi;
345 
346 	struct list_head ports;
347 	u32 xbar_config;
348 
349 	struct regulator_bulk_data *supplies;
350 	unsigned int num_supplies;
351 
352 	const struct tegra_pcie_soc *soc;
353 	struct dentry *debugfs;
354 };
355 
356 static inline struct tegra_pcie *msi_to_pcie(struct tegra_msi *msi)
357 {
358 	return container_of(msi, struct tegra_pcie, msi);
359 }
360 
361 struct tegra_pcie_port {
362 	struct tegra_pcie *pcie;
363 	struct device_node *np;
364 	struct list_head list;
365 	struct resource regs;
366 	void __iomem *base;
367 	unsigned int index;
368 	unsigned int lanes;
369 
370 	struct phy **phys;
371 
372 	struct gpio_desc *reset_gpio;
373 };
374 
375 static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
376 			      unsigned long offset)
377 {
378 	writel(value, pcie->afi + offset);
379 }
380 
381 static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
382 {
383 	return readl(pcie->afi + offset);
384 }
385 
386 static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
387 			       unsigned long offset)
388 {
389 	writel(value, pcie->pads + offset);
390 }
391 
392 static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
393 {
394 	return readl(pcie->pads + offset);
395 }
396 
397 /*
398  * The configuration space mapping on Tegra is somewhat similar to the ECAM
399  * defined by PCIe. However it deviates a bit in how the 4 bits for extended
400  * register accesses are mapped:
401  *
402  *    [27:24] extended register number
403  *    [23:16] bus number
404  *    [15:11] device number
405  *    [10: 8] function number
406  *    [ 7: 0] register number
407  *
408  * Mapping the whole extended configuration space would require 256 MiB of
409  * virtual address space, only a small part of which will actually be used.
410  *
411  * To work around this, a 4 KiB region is used to generate the required
412  * configuration transaction with relevant B:D:F and register offset values.
413  * This is achieved by dynamically programming base address and size of
414  * AFI_AXI_BAR used for end point config space mapping to make sure that the
415  * address (access to which generates correct config transaction) falls in
416  * this 4 KiB region.
417  */
418 static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
419 					unsigned int devfn,
420 					int where)
421 {
422 	struct tegra_pcie *pcie = bus->sysdata;
423 	void __iomem *addr = NULL;
424 
425 	if (bus->number == 0) {
426 		unsigned int slot = PCI_SLOT(devfn);
427 		struct tegra_pcie_port *port;
428 
429 		list_for_each_entry(port, &pcie->ports, list) {
430 			if (port->index + 1 == slot) {
431 				addr = port->base + (where & ~3);
432 				break;
433 			}
434 		}
435 	} else {
436 		unsigned int offset;
437 		u32 base;
438 
439 		offset = PCI_CONF1_EXT_ADDRESS(bus->number, PCI_SLOT(devfn),
440 					       PCI_FUNC(devfn), where) &
441 			 ~PCI_CONF1_ENABLE;
442 
443 		/* move 4 KiB window to offset within the FPCI region */
444 		base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
445 		afi_writel(pcie, base, AFI_FPCI_BAR0);
446 
447 		/* move to correct offset within the 4 KiB page */
448 		addr = pcie->cfg + (offset & (SZ_4K - 1));
449 	}
450 
451 	return addr;
452 }
453 
454 static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
455 				  int where, int size, u32 *value)
456 {
457 	if (bus->number == 0)
458 		return pci_generic_config_read32(bus, devfn, where, size,
459 						 value);
460 
461 	return pci_generic_config_read(bus, devfn, where, size, value);
462 }
463 
464 static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
465 				   int where, int size, u32 value)
466 {
467 	if (bus->number == 0)
468 		return pci_generic_config_write32(bus, devfn, where, size,
469 						  value);
470 
471 	return pci_generic_config_write(bus, devfn, where, size, value);
472 }
473 
474 static struct pci_ops tegra_pcie_ops = {
475 	.map_bus = tegra_pcie_map_bus,
476 	.read = tegra_pcie_config_read,
477 	.write = tegra_pcie_config_write,
478 };
479 
480 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
481 {
482 	const struct tegra_pcie_soc *soc = port->pcie->soc;
483 	unsigned long ret = 0;
484 
485 	switch (port->index) {
486 	case 0:
487 		ret = AFI_PEX0_CTRL;
488 		break;
489 
490 	case 1:
491 		ret = AFI_PEX1_CTRL;
492 		break;
493 
494 	case 2:
495 		ret = soc->afi_pex2_ctrl;
496 		break;
497 	}
498 
499 	return ret;
500 }
501 
502 static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
503 {
504 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
505 	unsigned long value;
506 
507 	/* pulse reset signal */
508 	if (port->reset_gpio) {
509 		gpiod_set_value(port->reset_gpio, 1);
510 	} else {
511 		value = afi_readl(port->pcie, ctrl);
512 		value &= ~AFI_PEX_CTRL_RST;
513 		afi_writel(port->pcie, value, ctrl);
514 	}
515 
516 	usleep_range(1000, 2000);
517 
518 	if (port->reset_gpio) {
519 		gpiod_set_value(port->reset_gpio, 0);
520 	} else {
521 		value = afi_readl(port->pcie, ctrl);
522 		value |= AFI_PEX_CTRL_RST;
523 		afi_writel(port->pcie, value, ctrl);
524 	}
525 }
526 
527 static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port)
528 {
529 	const struct tegra_pcie_soc *soc = port->pcie->soc;
530 	u32 value;
531 
532 	/* Enable AER capability */
533 	value = readl(port->base + RP_VEND_CTL1);
534 	value |= RP_VEND_CTL1_ERPT;
535 	writel(value, port->base + RP_VEND_CTL1);
536 
537 	/* Optimal settings to enhance bandwidth */
538 	value = readl(port->base + RP_VEND_XP);
539 	value |= RP_VEND_XP_OPPORTUNISTIC_ACK;
540 	value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC;
541 	writel(value, port->base + RP_VEND_XP);
542 
543 	/*
544 	 * LTSSM will wait for DLLP to finish before entering L1 or L2,
545 	 * to avoid truncation of PM messages which results in receiver errors
546 	 */
547 	value = readl(port->base + RP_VEND_XP_BIST);
548 	value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE;
549 	writel(value, port->base + RP_VEND_XP_BIST);
550 
551 	value = readl(port->base + RP_PRIV_MISC);
552 	value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE;
553 	value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE;
554 
555 	if (soc->update_clamp_threshold) {
556 		value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK |
557 				RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK);
558 		value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD |
559 			RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD;
560 	}
561 
562 	writel(value, port->base + RP_PRIV_MISC);
563 }
564 
565 static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port)
566 {
567 	const struct tegra_pcie_soc *soc = port->pcie->soc;
568 	u32 value;
569 
570 	value = readl(port->base + RP_ECTL_2_R1);
571 	value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK;
572 	value |= soc->ectl.regs.rp_ectl_2_r1;
573 	writel(value, port->base + RP_ECTL_2_R1);
574 
575 	value = readl(port->base + RP_ECTL_4_R1);
576 	value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK;
577 	value |= soc->ectl.regs.rp_ectl_4_r1 <<
578 				RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT;
579 	writel(value, port->base + RP_ECTL_4_R1);
580 
581 	value = readl(port->base + RP_ECTL_5_R1);
582 	value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK;
583 	value |= soc->ectl.regs.rp_ectl_5_r1;
584 	writel(value, port->base + RP_ECTL_5_R1);
585 
586 	value = readl(port->base + RP_ECTL_6_R1);
587 	value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK;
588 	value |= soc->ectl.regs.rp_ectl_6_r1;
589 	writel(value, port->base + RP_ECTL_6_R1);
590 
591 	value = readl(port->base + RP_ECTL_2_R2);
592 	value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK;
593 	value |= soc->ectl.regs.rp_ectl_2_r2;
594 	writel(value, port->base + RP_ECTL_2_R2);
595 
596 	value = readl(port->base + RP_ECTL_4_R2);
597 	value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK;
598 	value |= soc->ectl.regs.rp_ectl_4_r2 <<
599 				RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT;
600 	writel(value, port->base + RP_ECTL_4_R2);
601 
602 	value = readl(port->base + RP_ECTL_5_R2);
603 	value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK;
604 	value |= soc->ectl.regs.rp_ectl_5_r2;
605 	writel(value, port->base + RP_ECTL_5_R2);
606 
607 	value = readl(port->base + RP_ECTL_6_R2);
608 	value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK;
609 	value |= soc->ectl.regs.rp_ectl_6_r2;
610 	writel(value, port->base + RP_ECTL_6_R2);
611 }
612 
613 static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port)
614 {
615 	const struct tegra_pcie_soc *soc = port->pcie->soc;
616 	u32 value;
617 
618 	/*
619 	 * Sometimes link speed change from Gen2 to Gen1 fails due to
620 	 * instability in deskew logic on lane-0. Increase the deskew
621 	 * retry time to resolve this issue.
622 	 */
623 	if (soc->program_deskew_time) {
624 		value = readl(port->base + RP_VEND_CTL0);
625 		value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK;
626 		value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH;
627 		writel(value, port->base + RP_VEND_CTL0);
628 	}
629 
630 	if (soc->update_fc_timer) {
631 		value = readl(port->base + RP_VEND_XP);
632 		value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
633 		value |= soc->update_fc_threshold;
634 		writel(value, port->base + RP_VEND_XP);
635 	}
636 
637 	/*
638 	 * PCIe link doesn't come up with few legacy PCIe endpoints if
639 	 * root port advertises both Gen-1 and Gen-2 speeds in Tegra.
640 	 * Hence, the strategy followed here is to initially advertise
641 	 * only Gen-1 and after link is up, retrain link to Gen-2 speed
642 	 */
643 	value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
644 	value &= ~PCI_EXP_LNKSTA_CLS;
645 	value |= PCI_EXP_LNKSTA_CLS_2_5GB;
646 	writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
647 }
648 
649 static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
650 {
651 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
652 	const struct tegra_pcie_soc *soc = port->pcie->soc;
653 	unsigned long value;
654 
655 	/* enable reference clock */
656 	value = afi_readl(port->pcie, ctrl);
657 	value |= AFI_PEX_CTRL_REFCLK_EN;
658 
659 	if (soc->has_pex_clkreq_en)
660 		value |= AFI_PEX_CTRL_CLKREQ_EN;
661 
662 	value |= AFI_PEX_CTRL_OVERRIDE_EN;
663 
664 	afi_writel(port->pcie, value, ctrl);
665 
666 	tegra_pcie_port_reset(port);
667 
668 	if (soc->force_pca_enable) {
669 		value = readl(port->base + RP_VEND_CTL2);
670 		value |= RP_VEND_CTL2_PCA_ENABLE;
671 		writel(value, port->base + RP_VEND_CTL2);
672 	}
673 
674 	tegra_pcie_enable_rp_features(port);
675 
676 	if (soc->ectl.enable)
677 		tegra_pcie_program_ectl_settings(port);
678 
679 	tegra_pcie_apply_sw_fixup(port);
680 }
681 
682 static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
683 {
684 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
685 	const struct tegra_pcie_soc *soc = port->pcie->soc;
686 	unsigned long value;
687 
688 	/* assert port reset */
689 	value = afi_readl(port->pcie, ctrl);
690 	value &= ~AFI_PEX_CTRL_RST;
691 	afi_writel(port->pcie, value, ctrl);
692 
693 	/* disable reference clock */
694 	value = afi_readl(port->pcie, ctrl);
695 
696 	if (soc->has_pex_clkreq_en)
697 		value &= ~AFI_PEX_CTRL_CLKREQ_EN;
698 
699 	value &= ~AFI_PEX_CTRL_REFCLK_EN;
700 	afi_writel(port->pcie, value, ctrl);
701 
702 	/* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */
703 	value = afi_readl(port->pcie, AFI_PCIE_CONFIG);
704 	value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
705 	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
706 	afi_writel(port->pcie, value, AFI_PCIE_CONFIG);
707 }
708 
709 static void tegra_pcie_port_free(struct tegra_pcie_port *port)
710 {
711 	struct tegra_pcie *pcie = port->pcie;
712 	struct device *dev = pcie->dev;
713 
714 	devm_iounmap(dev, port->base);
715 	devm_release_mem_region(dev, port->regs.start,
716 				resource_size(&port->regs));
717 	list_del(&port->list);
718 	devm_kfree(dev, port);
719 }
720 
721 /* Tegra PCIE root complex wrongly reports device class */
722 static void tegra_pcie_fixup_class(struct pci_dev *dev)
723 {
724 	dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
725 }
726 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
727 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
728 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
729 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
730 
731 /* Tegra20 and Tegra30 PCIE requires relaxed ordering */
732 static void tegra_pcie_relax_enable(struct pci_dev *dev)
733 {
734 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
735 }
736 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
737 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
738 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
739 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
740 
741 static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
742 {
743 	struct tegra_pcie *pcie = pdev->bus->sysdata;
744 	int irq;
745 
746 	tegra_cpuidle_pcie_irqs_in_use();
747 
748 	irq = of_irq_parse_and_map_pci(pdev, slot, pin);
749 	if (!irq)
750 		irq = pcie->irq;
751 
752 	return irq;
753 }
754 
755 static irqreturn_t tegra_pcie_isr(int irq, void *arg)
756 {
757 	static const char * const err_msg[] = {
758 		"Unknown",
759 		"AXI slave error",
760 		"AXI decode error",
761 		"Target abort",
762 		"Master abort",
763 		"Invalid write",
764 		"Legacy interrupt",
765 		"Response decoding error",
766 		"AXI response decoding error",
767 		"Transaction timeout",
768 		"Slot present pin change",
769 		"Slot clock request change",
770 		"TMS clock ramp change",
771 		"TMS ready for power down",
772 		"Peer2Peer error",
773 	};
774 	struct tegra_pcie *pcie = arg;
775 	struct device *dev = pcie->dev;
776 	u32 code, signature;
777 
778 	code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
779 	signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
780 	afi_writel(pcie, 0, AFI_INTR_CODE);
781 
782 	if (code == AFI_INTR_LEGACY)
783 		return IRQ_NONE;
784 
785 	if (code >= ARRAY_SIZE(err_msg))
786 		code = 0;
787 
788 	/*
789 	 * do not pollute kernel log with master abort reports since they
790 	 * happen a lot during enumeration
791 	 */
792 	if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE)
793 		dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
794 	else
795 		dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
796 
797 	if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
798 	    code == AFI_INTR_FPCI_DECODE_ERROR) {
799 		u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
800 		u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
801 
802 		if (code == AFI_INTR_MASTER_ABORT)
803 			dev_dbg(dev, "  FPCI address: %10llx\n", address);
804 		else
805 			dev_err(dev, "  FPCI address: %10llx\n", address);
806 	}
807 
808 	return IRQ_HANDLED;
809 }
810 
811 /*
812  * FPCI map is as follows:
813  * - 0xfdfc000000: I/O space
814  * - 0xfdfe000000: type 0 configuration space
815  * - 0xfdff000000: type 1 configuration space
816  * - 0xfe00000000: type 0 extended configuration space
817  * - 0xfe10000000: type 1 extended configuration space
818  */
819 static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
820 {
821 	u32 size;
822 	struct resource_entry *entry;
823 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
824 
825 	/* Bar 0: type 1 extended configuration space */
826 	size = resource_size(&pcie->cs);
827 	afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
828 	afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
829 
830 	resource_list_for_each_entry(entry, &bridge->windows) {
831 		u32 fpci_bar, axi_address;
832 		struct resource *res = entry->res;
833 
834 		size = resource_size(res);
835 
836 		switch (resource_type(res)) {
837 		case IORESOURCE_IO:
838 			/* Bar 1: downstream IO bar */
839 			fpci_bar = 0xfdfc0000;
840 			axi_address = pci_pio_to_address(res->start);
841 			afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
842 			afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
843 			afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
844 			break;
845 		case IORESOURCE_MEM:
846 			fpci_bar = (((res->start >> 12) & 0x0fffffff) << 4) | 0x1;
847 			axi_address = res->start;
848 
849 			if (res->flags & IORESOURCE_PREFETCH) {
850 				/* Bar 2: prefetchable memory BAR */
851 				afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
852 				afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
853 				afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
854 
855 			} else {
856 				/* Bar 3: non prefetchable memory BAR */
857 				afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
858 				afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
859 				afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
860 			}
861 			break;
862 		}
863 	}
864 
865 	/* NULL out the remaining BARs as they are not used */
866 	afi_writel(pcie, 0, AFI_AXI_BAR4_START);
867 	afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
868 	afi_writel(pcie, 0, AFI_FPCI_BAR4);
869 
870 	afi_writel(pcie, 0, AFI_AXI_BAR5_START);
871 	afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
872 	afi_writel(pcie, 0, AFI_FPCI_BAR5);
873 
874 	if (pcie->soc->has_cache_bars) {
875 		/* map all upstream transactions as uncached */
876 		afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
877 		afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
878 		afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
879 		afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
880 	}
881 
882 	/* MSI translations are setup only when needed */
883 	afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
884 	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
885 	afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
886 	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
887 }
888 
889 static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
890 {
891 	const struct tegra_pcie_soc *soc = pcie->soc;
892 	u32 value;
893 
894 	timeout = jiffies + msecs_to_jiffies(timeout);
895 
896 	while (time_before(jiffies, timeout)) {
897 		value = pads_readl(pcie, soc->pads_pll_ctl);
898 		if (value & PADS_PLL_CTL_LOCKDET)
899 			return 0;
900 	}
901 
902 	return -ETIMEDOUT;
903 }
904 
905 static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
906 {
907 	struct device *dev = pcie->dev;
908 	const struct tegra_pcie_soc *soc = pcie->soc;
909 	u32 value;
910 	int err;
911 
912 	/* initialize internal PHY, enable up to 16 PCIE lanes */
913 	pads_writel(pcie, 0x0, PADS_CTL_SEL);
914 
915 	/* override IDDQ to 1 on all 4 lanes */
916 	value = pads_readl(pcie, PADS_CTL);
917 	value |= PADS_CTL_IDDQ_1L;
918 	pads_writel(pcie, value, PADS_CTL);
919 
920 	/*
921 	 * Set up PHY PLL inputs select PLLE output as refclock,
922 	 * set TX ref sel to div10 (not div5).
923 	 */
924 	value = pads_readl(pcie, soc->pads_pll_ctl);
925 	value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
926 	value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
927 	pads_writel(pcie, value, soc->pads_pll_ctl);
928 
929 	/* reset PLL */
930 	value = pads_readl(pcie, soc->pads_pll_ctl);
931 	value &= ~PADS_PLL_CTL_RST_B4SM;
932 	pads_writel(pcie, value, soc->pads_pll_ctl);
933 
934 	usleep_range(20, 100);
935 
936 	/* take PLL out of reset  */
937 	value = pads_readl(pcie, soc->pads_pll_ctl);
938 	value |= PADS_PLL_CTL_RST_B4SM;
939 	pads_writel(pcie, value, soc->pads_pll_ctl);
940 
941 	/* wait for the PLL to lock */
942 	err = tegra_pcie_pll_wait(pcie, 500);
943 	if (err < 0) {
944 		dev_err(dev, "PLL failed to lock: %d\n", err);
945 		return err;
946 	}
947 
948 	/* turn off IDDQ override */
949 	value = pads_readl(pcie, PADS_CTL);
950 	value &= ~PADS_CTL_IDDQ_1L;
951 	pads_writel(pcie, value, PADS_CTL);
952 
953 	/* enable TX/RX data */
954 	value = pads_readl(pcie, PADS_CTL);
955 	value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
956 	pads_writel(pcie, value, PADS_CTL);
957 
958 	return 0;
959 }
960 
961 static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
962 {
963 	const struct tegra_pcie_soc *soc = pcie->soc;
964 	u32 value;
965 
966 	/* disable TX/RX data */
967 	value = pads_readl(pcie, PADS_CTL);
968 	value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
969 	pads_writel(pcie, value, PADS_CTL);
970 
971 	/* override IDDQ */
972 	value = pads_readl(pcie, PADS_CTL);
973 	value |= PADS_CTL_IDDQ_1L;
974 	pads_writel(pcie, value, PADS_CTL);
975 
976 	/* reset PLL */
977 	value = pads_readl(pcie, soc->pads_pll_ctl);
978 	value &= ~PADS_PLL_CTL_RST_B4SM;
979 	pads_writel(pcie, value, soc->pads_pll_ctl);
980 
981 	usleep_range(20, 100);
982 
983 	return 0;
984 }
985 
986 static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
987 {
988 	struct device *dev = port->pcie->dev;
989 	unsigned int i;
990 	int err;
991 
992 	for (i = 0; i < port->lanes; i++) {
993 		err = phy_power_on(port->phys[i]);
994 		if (err < 0) {
995 			dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
996 			return err;
997 		}
998 	}
999 
1000 	return 0;
1001 }
1002 
1003 static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
1004 {
1005 	struct device *dev = port->pcie->dev;
1006 	unsigned int i;
1007 	int err;
1008 
1009 	for (i = 0; i < port->lanes; i++) {
1010 		err = phy_power_off(port->phys[i]);
1011 		if (err < 0) {
1012 			dev_err(dev, "failed to power off PHY#%u: %d\n", i,
1013 				err);
1014 			return err;
1015 		}
1016 	}
1017 
1018 	return 0;
1019 }
1020 
1021 static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
1022 {
1023 	struct device *dev = pcie->dev;
1024 	struct tegra_pcie_port *port;
1025 	int err;
1026 
1027 	if (pcie->legacy_phy) {
1028 		if (pcie->phy)
1029 			err = phy_power_on(pcie->phy);
1030 		else
1031 			err = tegra_pcie_phy_enable(pcie);
1032 
1033 		if (err < 0)
1034 			dev_err(dev, "failed to power on PHY: %d\n", err);
1035 
1036 		return err;
1037 	}
1038 
1039 	list_for_each_entry(port, &pcie->ports, list) {
1040 		err = tegra_pcie_port_phy_power_on(port);
1041 		if (err < 0) {
1042 			dev_err(dev,
1043 				"failed to power on PCIe port %u PHY: %d\n",
1044 				port->index, err);
1045 			return err;
1046 		}
1047 	}
1048 
1049 	return 0;
1050 }
1051 
1052 static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
1053 {
1054 	struct device *dev = pcie->dev;
1055 	struct tegra_pcie_port *port;
1056 	int err;
1057 
1058 	if (pcie->legacy_phy) {
1059 		if (pcie->phy)
1060 			err = phy_power_off(pcie->phy);
1061 		else
1062 			err = tegra_pcie_phy_disable(pcie);
1063 
1064 		if (err < 0)
1065 			dev_err(dev, "failed to power off PHY: %d\n", err);
1066 
1067 		return err;
1068 	}
1069 
1070 	list_for_each_entry(port, &pcie->ports, list) {
1071 		err = tegra_pcie_port_phy_power_off(port);
1072 		if (err < 0) {
1073 			dev_err(dev,
1074 				"failed to power off PCIe port %u PHY: %d\n",
1075 				port->index, err);
1076 			return err;
1077 		}
1078 	}
1079 
1080 	return 0;
1081 }
1082 
1083 static void tegra_pcie_enable_controller(struct tegra_pcie *pcie)
1084 {
1085 	const struct tegra_pcie_soc *soc = pcie->soc;
1086 	struct tegra_pcie_port *port;
1087 	unsigned long value;
1088 
1089 	/* enable PLL power down */
1090 	if (pcie->phy) {
1091 		value = afi_readl(pcie, AFI_PLLE_CONTROL);
1092 		value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
1093 		value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
1094 		afi_writel(pcie, value, AFI_PLLE_CONTROL);
1095 	}
1096 
1097 	/* power down PCIe slot clock bias pad */
1098 	if (soc->has_pex_bias_ctrl)
1099 		afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
1100 
1101 	/* configure mode and disable all ports */
1102 	value = afi_readl(pcie, AFI_PCIE_CONFIG);
1103 	value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
1104 	value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
1105 	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL;
1106 
1107 	list_for_each_entry(port, &pcie->ports, list) {
1108 		value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
1109 		value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
1110 	}
1111 
1112 	afi_writel(pcie, value, AFI_PCIE_CONFIG);
1113 
1114 	if (soc->has_gen2) {
1115 		value = afi_readl(pcie, AFI_FUSE);
1116 		value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
1117 		afi_writel(pcie, value, AFI_FUSE);
1118 	} else {
1119 		value = afi_readl(pcie, AFI_FUSE);
1120 		value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
1121 		afi_writel(pcie, value, AFI_FUSE);
1122 	}
1123 
1124 	/* Disable AFI dynamic clock gating and enable PCIe */
1125 	value = afi_readl(pcie, AFI_CONFIGURATION);
1126 	value |= AFI_CONFIGURATION_EN_FPCI;
1127 	value |= AFI_CONFIGURATION_CLKEN_OVERRIDE;
1128 	afi_writel(pcie, value, AFI_CONFIGURATION);
1129 
1130 	value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
1131 		AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
1132 		AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
1133 
1134 	if (soc->has_intr_prsnt_sense)
1135 		value |= AFI_INTR_EN_PRSNT_SENSE;
1136 
1137 	afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
1138 	afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
1139 
1140 	/* don't enable MSI for now, only when needed */
1141 	afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
1142 
1143 	/* disable all exceptions */
1144 	afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
1145 }
1146 
1147 static void tegra_pcie_power_off(struct tegra_pcie *pcie)
1148 {
1149 	struct device *dev = pcie->dev;
1150 	const struct tegra_pcie_soc *soc = pcie->soc;
1151 	int err;
1152 
1153 	reset_control_assert(pcie->afi_rst);
1154 
1155 	clk_disable_unprepare(pcie->pll_e);
1156 	if (soc->has_cml_clk)
1157 		clk_disable_unprepare(pcie->cml_clk);
1158 	clk_disable_unprepare(pcie->afi_clk);
1159 
1160 	if (!dev->pm_domain)
1161 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1162 
1163 	err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1164 	if (err < 0)
1165 		dev_warn(dev, "failed to disable regulators: %d\n", err);
1166 }
1167 
1168 static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1169 {
1170 	struct device *dev = pcie->dev;
1171 	const struct tegra_pcie_soc *soc = pcie->soc;
1172 	int err;
1173 
1174 	reset_control_assert(pcie->pcie_xrst);
1175 	reset_control_assert(pcie->afi_rst);
1176 	reset_control_assert(pcie->pex_rst);
1177 
1178 	if (!dev->pm_domain)
1179 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1180 
1181 	/* enable regulators */
1182 	err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1183 	if (err < 0)
1184 		dev_err(dev, "failed to enable regulators: %d\n", err);
1185 
1186 	if (!dev->pm_domain) {
1187 		err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE);
1188 		if (err) {
1189 			dev_err(dev, "failed to power ungate: %d\n", err);
1190 			goto regulator_disable;
1191 		}
1192 		err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE);
1193 		if (err) {
1194 			dev_err(dev, "failed to remove clamp: %d\n", err);
1195 			goto powergate;
1196 		}
1197 	}
1198 
1199 	err = clk_prepare_enable(pcie->afi_clk);
1200 	if (err < 0) {
1201 		dev_err(dev, "failed to enable AFI clock: %d\n", err);
1202 		goto powergate;
1203 	}
1204 
1205 	if (soc->has_cml_clk) {
1206 		err = clk_prepare_enable(pcie->cml_clk);
1207 		if (err < 0) {
1208 			dev_err(dev, "failed to enable CML clock: %d\n", err);
1209 			goto disable_afi_clk;
1210 		}
1211 	}
1212 
1213 	err = clk_prepare_enable(pcie->pll_e);
1214 	if (err < 0) {
1215 		dev_err(dev, "failed to enable PLLE clock: %d\n", err);
1216 		goto disable_cml_clk;
1217 	}
1218 
1219 	reset_control_deassert(pcie->afi_rst);
1220 
1221 	return 0;
1222 
1223 disable_cml_clk:
1224 	if (soc->has_cml_clk)
1225 		clk_disable_unprepare(pcie->cml_clk);
1226 disable_afi_clk:
1227 	clk_disable_unprepare(pcie->afi_clk);
1228 powergate:
1229 	if (!dev->pm_domain)
1230 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1231 regulator_disable:
1232 	regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1233 
1234 	return err;
1235 }
1236 
1237 static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie)
1238 {
1239 	const struct tegra_pcie_soc *soc = pcie->soc;
1240 
1241 	/* Configure the reference clock driver */
1242 	pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
1243 
1244 	if (soc->num_ports > 2)
1245 		pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
1246 }
1247 
1248 static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1249 {
1250 	struct device *dev = pcie->dev;
1251 	const struct tegra_pcie_soc *soc = pcie->soc;
1252 
1253 	pcie->pex_clk = devm_clk_get(dev, "pex");
1254 	if (IS_ERR(pcie->pex_clk))
1255 		return PTR_ERR(pcie->pex_clk);
1256 
1257 	pcie->afi_clk = devm_clk_get(dev, "afi");
1258 	if (IS_ERR(pcie->afi_clk))
1259 		return PTR_ERR(pcie->afi_clk);
1260 
1261 	pcie->pll_e = devm_clk_get(dev, "pll_e");
1262 	if (IS_ERR(pcie->pll_e))
1263 		return PTR_ERR(pcie->pll_e);
1264 
1265 	if (soc->has_cml_clk) {
1266 		pcie->cml_clk = devm_clk_get(dev, "cml");
1267 		if (IS_ERR(pcie->cml_clk))
1268 			return PTR_ERR(pcie->cml_clk);
1269 	}
1270 
1271 	return 0;
1272 }
1273 
1274 static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1275 {
1276 	struct device *dev = pcie->dev;
1277 
1278 	pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
1279 	if (IS_ERR(pcie->pex_rst))
1280 		return PTR_ERR(pcie->pex_rst);
1281 
1282 	pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
1283 	if (IS_ERR(pcie->afi_rst))
1284 		return PTR_ERR(pcie->afi_rst);
1285 
1286 	pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
1287 	if (IS_ERR(pcie->pcie_xrst))
1288 		return PTR_ERR(pcie->pcie_xrst);
1289 
1290 	return 0;
1291 }
1292 
1293 static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
1294 {
1295 	struct device *dev = pcie->dev;
1296 	int err;
1297 
1298 	pcie->phy = devm_phy_optional_get(dev, "pcie");
1299 	if (IS_ERR(pcie->phy)) {
1300 		err = PTR_ERR(pcie->phy);
1301 		dev_err(dev, "failed to get PHY: %d\n", err);
1302 		return err;
1303 	}
1304 
1305 	err = phy_init(pcie->phy);
1306 	if (err < 0) {
1307 		dev_err(dev, "failed to initialize PHY: %d\n", err);
1308 		return err;
1309 	}
1310 
1311 	pcie->legacy_phy = true;
1312 
1313 	return 0;
1314 }
1315 
1316 static struct phy *devm_of_phy_optional_get_index(struct device *dev,
1317 						  struct device_node *np,
1318 						  const char *consumer,
1319 						  unsigned int index)
1320 {
1321 	struct phy *phy;
1322 	char *name;
1323 
1324 	name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
1325 	if (!name)
1326 		return ERR_PTR(-ENOMEM);
1327 
1328 	phy = devm_of_phy_get(dev, np, name);
1329 	kfree(name);
1330 
1331 	if (PTR_ERR(phy) == -ENODEV)
1332 		phy = NULL;
1333 
1334 	return phy;
1335 }
1336 
1337 static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
1338 {
1339 	struct device *dev = port->pcie->dev;
1340 	struct phy *phy;
1341 	unsigned int i;
1342 	int err;
1343 
1344 	port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
1345 	if (!port->phys)
1346 		return -ENOMEM;
1347 
1348 	for (i = 0; i < port->lanes; i++) {
1349 		phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
1350 		if (IS_ERR(phy)) {
1351 			dev_err(dev, "failed to get PHY#%u: %ld\n", i,
1352 				PTR_ERR(phy));
1353 			return PTR_ERR(phy);
1354 		}
1355 
1356 		err = phy_init(phy);
1357 		if (err < 0) {
1358 			dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
1359 				err);
1360 			return err;
1361 		}
1362 
1363 		port->phys[i] = phy;
1364 	}
1365 
1366 	return 0;
1367 }
1368 
1369 static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
1370 {
1371 	const struct tegra_pcie_soc *soc = pcie->soc;
1372 	struct device_node *np = pcie->dev->of_node;
1373 	struct tegra_pcie_port *port;
1374 	int err;
1375 
1376 	if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
1377 		return tegra_pcie_phys_get_legacy(pcie);
1378 
1379 	list_for_each_entry(port, &pcie->ports, list) {
1380 		err = tegra_pcie_port_get_phys(port);
1381 		if (err < 0)
1382 			return err;
1383 	}
1384 
1385 	return 0;
1386 }
1387 
1388 static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
1389 {
1390 	struct tegra_pcie_port *port;
1391 	struct device *dev = pcie->dev;
1392 	int err, i;
1393 
1394 	if (pcie->legacy_phy) {
1395 		err = phy_exit(pcie->phy);
1396 		if (err < 0)
1397 			dev_err(dev, "failed to teardown PHY: %d\n", err);
1398 		return;
1399 	}
1400 
1401 	list_for_each_entry(port, &pcie->ports, list) {
1402 		for (i = 0; i < port->lanes; i++) {
1403 			err = phy_exit(port->phys[i]);
1404 			if (err < 0)
1405 				dev_err(dev, "failed to teardown PHY#%u: %d\n",
1406 					i, err);
1407 		}
1408 	}
1409 }
1410 
1411 static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1412 {
1413 	struct device *dev = pcie->dev;
1414 	struct platform_device *pdev = to_platform_device(dev);
1415 	struct resource *res;
1416 	const struct tegra_pcie_soc *soc = pcie->soc;
1417 	int err;
1418 
1419 	err = tegra_pcie_clocks_get(pcie);
1420 	if (err) {
1421 		dev_err(dev, "failed to get clocks: %d\n", err);
1422 		return err;
1423 	}
1424 
1425 	err = tegra_pcie_resets_get(pcie);
1426 	if (err) {
1427 		dev_err(dev, "failed to get resets: %d\n", err);
1428 		return err;
1429 	}
1430 
1431 	if (soc->program_uphy) {
1432 		err = tegra_pcie_phys_get(pcie);
1433 		if (err < 0) {
1434 			dev_err(dev, "failed to get PHYs: %d\n", err);
1435 			return err;
1436 		}
1437 	}
1438 
1439 	pcie->pads = devm_platform_ioremap_resource_byname(pdev, "pads");
1440 	if (IS_ERR(pcie->pads)) {
1441 		err = PTR_ERR(pcie->pads);
1442 		goto phys_put;
1443 	}
1444 
1445 	pcie->afi = devm_platform_ioremap_resource_byname(pdev, "afi");
1446 	if (IS_ERR(pcie->afi)) {
1447 		err = PTR_ERR(pcie->afi);
1448 		goto phys_put;
1449 	}
1450 
1451 	/* request configuration space, but remap later, on demand */
1452 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1453 	if (!res) {
1454 		err = -EADDRNOTAVAIL;
1455 		goto phys_put;
1456 	}
1457 
1458 	pcie->cs = *res;
1459 
1460 	/* constrain configuration space to 4 KiB */
1461 	pcie->cs.end = pcie->cs.start + SZ_4K - 1;
1462 
1463 	pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
1464 	if (IS_ERR(pcie->cfg)) {
1465 		err = PTR_ERR(pcie->cfg);
1466 		goto phys_put;
1467 	}
1468 
1469 	/* request interrupt */
1470 	err = platform_get_irq_byname(pdev, "intr");
1471 	if (err < 0)
1472 		goto phys_put;
1473 
1474 	pcie->irq = err;
1475 
1476 	err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1477 	if (err) {
1478 		dev_err(dev, "failed to register IRQ: %d\n", err);
1479 		goto phys_put;
1480 	}
1481 
1482 	return 0;
1483 
1484 phys_put:
1485 	if (soc->program_uphy)
1486 		tegra_pcie_phys_put(pcie);
1487 
1488 	return err;
1489 }
1490 
1491 static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1492 {
1493 	const struct tegra_pcie_soc *soc = pcie->soc;
1494 
1495 	if (pcie->irq > 0)
1496 		free_irq(pcie->irq, pcie);
1497 
1498 	if (soc->program_uphy)
1499 		tegra_pcie_phys_put(pcie);
1500 
1501 	return 0;
1502 }
1503 
1504 static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
1505 {
1506 	struct tegra_pcie *pcie = port->pcie;
1507 	const struct tegra_pcie_soc *soc = pcie->soc;
1508 	int err;
1509 	u32 val;
1510 	u8 ack_bit;
1511 
1512 	val = afi_readl(pcie, AFI_PCIE_PME);
1513 	val |= (0x1 << soc->ports[port->index].pme.turnoff_bit);
1514 	afi_writel(pcie, val, AFI_PCIE_PME);
1515 
1516 	ack_bit = soc->ports[port->index].pme.ack_bit;
1517 	err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val,
1518 				 val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT);
1519 	if (err)
1520 		dev_err(pcie->dev, "PME Ack is not received on port: %d\n",
1521 			port->index);
1522 
1523 	usleep_range(10000, 11000);
1524 
1525 	val = afi_readl(pcie, AFI_PCIE_PME);
1526 	val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit);
1527 	afi_writel(pcie, val, AFI_PCIE_PME);
1528 }
1529 
1530 static void tegra_pcie_msi_irq(struct irq_desc *desc)
1531 {
1532 	struct tegra_pcie *pcie = irq_desc_get_handler_data(desc);
1533 	struct irq_chip *chip = irq_desc_get_chip(desc);
1534 	struct tegra_msi *msi = &pcie->msi;
1535 	struct device *dev = pcie->dev;
1536 	unsigned int i;
1537 
1538 	chained_irq_enter(chip, desc);
1539 
1540 	for (i = 0; i < 8; i++) {
1541 		unsigned long reg = afi_readl(pcie, AFI_MSI_VEC(i));
1542 
1543 		while (reg) {
1544 			unsigned int offset = find_first_bit(&reg, 32);
1545 			unsigned int index = i * 32 + offset;
1546 			int ret;
1547 
1548 			ret = generic_handle_domain_irq(msi->domain->parent, index);
1549 			if (ret) {
1550 				/*
1551 				 * that's weird who triggered this?
1552 				 * just clear it
1553 				 */
1554 				dev_info(dev, "unexpected MSI\n");
1555 				afi_writel(pcie, BIT(index % 32), AFI_MSI_VEC(index));
1556 			}
1557 
1558 			/* see if there's any more pending in this vector */
1559 			reg = afi_readl(pcie, AFI_MSI_VEC(i));
1560 		}
1561 	}
1562 
1563 	chained_irq_exit(chip, desc);
1564 }
1565 
1566 static void tegra_msi_top_irq_ack(struct irq_data *d)
1567 {
1568 	irq_chip_ack_parent(d);
1569 }
1570 
1571 static void tegra_msi_top_irq_mask(struct irq_data *d)
1572 {
1573 	pci_msi_mask_irq(d);
1574 	irq_chip_mask_parent(d);
1575 }
1576 
1577 static void tegra_msi_top_irq_unmask(struct irq_data *d)
1578 {
1579 	pci_msi_unmask_irq(d);
1580 	irq_chip_unmask_parent(d);
1581 }
1582 
1583 static struct irq_chip tegra_msi_top_chip = {
1584 	.name		= "Tegra PCIe MSI",
1585 	.irq_ack	= tegra_msi_top_irq_ack,
1586 	.irq_mask	= tegra_msi_top_irq_mask,
1587 	.irq_unmask	= tegra_msi_top_irq_unmask,
1588 };
1589 
1590 static void tegra_msi_irq_ack(struct irq_data *d)
1591 {
1592 	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1593 	struct tegra_pcie *pcie = msi_to_pcie(msi);
1594 	unsigned int index = d->hwirq / 32;
1595 
1596 	/* clear the interrupt */
1597 	afi_writel(pcie, BIT(d->hwirq % 32), AFI_MSI_VEC(index));
1598 }
1599 
1600 static void tegra_msi_irq_mask(struct irq_data *d)
1601 {
1602 	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1603 	struct tegra_pcie *pcie = msi_to_pcie(msi);
1604 	unsigned int index = d->hwirq / 32;
1605 	unsigned long flags;
1606 	u32 value;
1607 
1608 	spin_lock_irqsave(&msi->mask_lock, flags);
1609 	value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
1610 	value &= ~BIT(d->hwirq % 32);
1611 	afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
1612 	spin_unlock_irqrestore(&msi->mask_lock, flags);
1613 }
1614 
1615 static void tegra_msi_irq_unmask(struct irq_data *d)
1616 {
1617 	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1618 	struct tegra_pcie *pcie = msi_to_pcie(msi);
1619 	unsigned int index = d->hwirq / 32;
1620 	unsigned long flags;
1621 	u32 value;
1622 
1623 	spin_lock_irqsave(&msi->mask_lock, flags);
1624 	value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
1625 	value |= BIT(d->hwirq % 32);
1626 	afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
1627 	spin_unlock_irqrestore(&msi->mask_lock, flags);
1628 }
1629 
1630 static int tegra_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
1631 {
1632 	return -EINVAL;
1633 }
1634 
1635 static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1636 {
1637 	struct tegra_msi *msi = irq_data_get_irq_chip_data(data);
1638 
1639 	msg->address_lo = lower_32_bits(msi->phys);
1640 	msg->address_hi = upper_32_bits(msi->phys);
1641 	msg->data = data->hwirq;
1642 }
1643 
1644 static struct irq_chip tegra_msi_bottom_chip = {
1645 	.name			= "Tegra MSI",
1646 	.irq_ack		= tegra_msi_irq_ack,
1647 	.irq_mask		= tegra_msi_irq_mask,
1648 	.irq_unmask		= tegra_msi_irq_unmask,
1649 	.irq_set_affinity 	= tegra_msi_set_affinity,
1650 	.irq_compose_msi_msg	= tegra_compose_msi_msg,
1651 };
1652 
1653 static int tegra_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
1654 				  unsigned int nr_irqs, void *args)
1655 {
1656 	struct tegra_msi *msi = domain->host_data;
1657 	unsigned int i;
1658 	int hwirq;
1659 
1660 	mutex_lock(&msi->map_lock);
1661 
1662 	hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs));
1663 
1664 	mutex_unlock(&msi->map_lock);
1665 
1666 	if (hwirq < 0)
1667 		return -ENOSPC;
1668 
1669 	for (i = 0; i < nr_irqs; i++)
1670 		irq_domain_set_info(domain, virq + i, hwirq + i,
1671 				    &tegra_msi_bottom_chip, domain->host_data,
1672 				    handle_edge_irq, NULL, NULL);
1673 
1674 	tegra_cpuidle_pcie_irqs_in_use();
1675 
1676 	return 0;
1677 }
1678 
1679 static void tegra_msi_domain_free(struct irq_domain *domain, unsigned int virq,
1680 				  unsigned int nr_irqs)
1681 {
1682 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
1683 	struct tegra_msi *msi = domain->host_data;
1684 
1685 	mutex_lock(&msi->map_lock);
1686 
1687 	bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs));
1688 
1689 	mutex_unlock(&msi->map_lock);
1690 }
1691 
1692 static const struct irq_domain_ops tegra_msi_domain_ops = {
1693 	.alloc = tegra_msi_domain_alloc,
1694 	.free = tegra_msi_domain_free,
1695 };
1696 
1697 static struct msi_domain_info tegra_msi_info = {
1698 	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
1699 		   MSI_FLAG_PCI_MSIX),
1700 	.chip	= &tegra_msi_top_chip,
1701 };
1702 
1703 static int tegra_allocate_domains(struct tegra_msi *msi)
1704 {
1705 	struct tegra_pcie *pcie = msi_to_pcie(msi);
1706 	struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
1707 	struct irq_domain *parent;
1708 
1709 	parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
1710 					  &tegra_msi_domain_ops, msi);
1711 	if (!parent) {
1712 		dev_err(pcie->dev, "failed to create IRQ domain\n");
1713 		return -ENOMEM;
1714 	}
1715 	irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
1716 
1717 	msi->domain = pci_msi_create_irq_domain(fwnode, &tegra_msi_info, parent);
1718 	if (!msi->domain) {
1719 		dev_err(pcie->dev, "failed to create MSI domain\n");
1720 		irq_domain_remove(parent);
1721 		return -ENOMEM;
1722 	}
1723 
1724 	return 0;
1725 }
1726 
1727 static void tegra_free_domains(struct tegra_msi *msi)
1728 {
1729 	struct irq_domain *parent = msi->domain->parent;
1730 
1731 	irq_domain_remove(msi->domain);
1732 	irq_domain_remove(parent);
1733 }
1734 
1735 static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
1736 {
1737 	struct platform_device *pdev = to_platform_device(pcie->dev);
1738 	struct tegra_msi *msi = &pcie->msi;
1739 	struct device *dev = pcie->dev;
1740 	int err;
1741 
1742 	mutex_init(&msi->map_lock);
1743 	spin_lock_init(&msi->mask_lock);
1744 
1745 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1746 		err = tegra_allocate_domains(msi);
1747 		if (err)
1748 			return err;
1749 	}
1750 
1751 	err = platform_get_irq_byname(pdev, "msi");
1752 	if (err < 0)
1753 		goto free_irq_domain;
1754 
1755 	msi->irq = err;
1756 
1757 	irq_set_chained_handler_and_data(msi->irq, tegra_pcie_msi_irq, pcie);
1758 
1759 	/* Though the PCIe controller can address >32-bit address space, to
1760 	 * facilitate endpoints that support only 32-bit MSI target address,
1761 	 * the mask is set to 32-bit to make sure that MSI target address is
1762 	 * always a 32-bit address
1763 	 */
1764 	err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
1765 	if (err < 0) {
1766 		dev_err(dev, "failed to set DMA coherent mask: %d\n", err);
1767 		goto free_irq;
1768 	}
1769 
1770 	msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL,
1771 				    DMA_ATTR_NO_KERNEL_MAPPING);
1772 	if (!msi->virt) {
1773 		dev_err(dev, "failed to allocate DMA memory for MSI\n");
1774 		err = -ENOMEM;
1775 		goto free_irq;
1776 	}
1777 
1778 	return 0;
1779 
1780 free_irq:
1781 	irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
1782 free_irq_domain:
1783 	if (IS_ENABLED(CONFIG_PCI_MSI))
1784 		tegra_free_domains(msi);
1785 
1786 	return err;
1787 }
1788 
1789 static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1790 {
1791 	const struct tegra_pcie_soc *soc = pcie->soc;
1792 	struct tegra_msi *msi = &pcie->msi;
1793 	u32 reg, msi_state[INT_PCI_MSI_NR / 32];
1794 	int i;
1795 
1796 	afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1797 	afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
1798 	/* this register is in 4K increments */
1799 	afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1800 
1801 	/* Restore the MSI allocation state */
1802 	bitmap_to_arr32(msi_state, msi->used, INT_PCI_MSI_NR);
1803 	for (i = 0; i < ARRAY_SIZE(msi_state); i++)
1804 		afi_writel(pcie, msi_state[i], AFI_MSI_EN_VEC(i));
1805 
1806 	/* and unmask the MSI interrupt */
1807 	reg = afi_readl(pcie, AFI_INTR_MASK);
1808 	reg |= AFI_INTR_MASK_MSI_MASK;
1809 	afi_writel(pcie, reg, AFI_INTR_MASK);
1810 }
1811 
1812 static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
1813 {
1814 	struct tegra_msi *msi = &pcie->msi;
1815 	unsigned int i, irq;
1816 
1817 	dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
1818 		       DMA_ATTR_NO_KERNEL_MAPPING);
1819 
1820 	for (i = 0; i < INT_PCI_MSI_NR; i++) {
1821 		irq = irq_find_mapping(msi->domain, i);
1822 		if (irq > 0)
1823 			irq_domain_free_irqs(irq, 1);
1824 	}
1825 
1826 	irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
1827 
1828 	if (IS_ENABLED(CONFIG_PCI_MSI))
1829 		tegra_free_domains(msi);
1830 }
1831 
1832 static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1833 {
1834 	u32 value;
1835 
1836 	/* mask the MSI interrupt */
1837 	value = afi_readl(pcie, AFI_INTR_MASK);
1838 	value &= ~AFI_INTR_MASK_MSI_MASK;
1839 	afi_writel(pcie, value, AFI_INTR_MASK);
1840 
1841 	return 0;
1842 }
1843 
1844 static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie)
1845 {
1846 	u32 value;
1847 
1848 	value = afi_readl(pcie, AFI_INTR_MASK);
1849 	value &= ~AFI_INTR_MASK_INT_MASK;
1850 	afi_writel(pcie, value, AFI_INTR_MASK);
1851 }
1852 
1853 static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1854 				      u32 *xbar)
1855 {
1856 	struct device *dev = pcie->dev;
1857 	struct device_node *np = dev->of_node;
1858 
1859 	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1860 		switch (lanes) {
1861 		case 0x010004:
1862 			dev_info(dev, "4x1, 1x1 configuration\n");
1863 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
1864 			return 0;
1865 
1866 		case 0x010102:
1867 			dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
1868 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1869 			return 0;
1870 
1871 		case 0x010101:
1872 			dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
1873 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
1874 			return 0;
1875 
1876 		default:
1877 			dev_info(dev, "wrong configuration updated in DT, "
1878 				 "switching to default 2x1, 1x1, 1x1 "
1879 				 "configuration\n");
1880 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1881 			return 0;
1882 		}
1883 	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
1884 		   of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1885 		switch (lanes) {
1886 		case 0x0000104:
1887 			dev_info(dev, "4x1, 1x1 configuration\n");
1888 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1889 			return 0;
1890 
1891 		case 0x0000102:
1892 			dev_info(dev, "2x1, 1x1 configuration\n");
1893 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1894 			return 0;
1895 		}
1896 	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1897 		switch (lanes) {
1898 		case 0x00000204:
1899 			dev_info(dev, "4x1, 2x1 configuration\n");
1900 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1901 			return 0;
1902 
1903 		case 0x00020202:
1904 			dev_info(dev, "2x3 configuration\n");
1905 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1906 			return 0;
1907 
1908 		case 0x00010104:
1909 			dev_info(dev, "4x1, 1x2 configuration\n");
1910 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1911 			return 0;
1912 		}
1913 	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1914 		switch (lanes) {
1915 		case 0x00000004:
1916 			dev_info(dev, "single-mode configuration\n");
1917 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1918 			return 0;
1919 
1920 		case 0x00000202:
1921 			dev_info(dev, "dual-mode configuration\n");
1922 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1923 			return 0;
1924 		}
1925 	}
1926 
1927 	return -EINVAL;
1928 }
1929 
1930 /*
1931  * Check whether a given set of supplies is available in a device tree node.
1932  * This is used to check whether the new or the legacy device tree bindings
1933  * should be used.
1934  */
1935 static bool of_regulator_bulk_available(struct device_node *np,
1936 					struct regulator_bulk_data *supplies,
1937 					unsigned int num_supplies)
1938 {
1939 	char property[32];
1940 	unsigned int i;
1941 
1942 	for (i = 0; i < num_supplies; i++) {
1943 		snprintf(property, 32, "%s-supply", supplies[i].supply);
1944 
1945 		if (of_find_property(np, property, NULL) == NULL)
1946 			return false;
1947 	}
1948 
1949 	return true;
1950 }
1951 
1952 /*
1953  * Old versions of the device tree binding for this device used a set of power
1954  * supplies that didn't match the hardware inputs. This happened to work for a
1955  * number of cases but is not future proof. However to preserve backwards-
1956  * compatibility with old device trees, this function will try to use the old
1957  * set of supplies.
1958  */
1959 static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1960 {
1961 	struct device *dev = pcie->dev;
1962 	struct device_node *np = dev->of_node;
1963 
1964 	if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1965 		pcie->num_supplies = 3;
1966 	else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1967 		pcie->num_supplies = 2;
1968 
1969 	if (pcie->num_supplies == 0) {
1970 		dev_err(dev, "device %pOF not supported in legacy mode\n", np);
1971 		return -ENODEV;
1972 	}
1973 
1974 	pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1975 				      sizeof(*pcie->supplies),
1976 				      GFP_KERNEL);
1977 	if (!pcie->supplies)
1978 		return -ENOMEM;
1979 
1980 	pcie->supplies[0].supply = "pex-clk";
1981 	pcie->supplies[1].supply = "vdd";
1982 
1983 	if (pcie->num_supplies > 2)
1984 		pcie->supplies[2].supply = "avdd";
1985 
1986 	return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
1987 }
1988 
1989 /*
1990  * Obtains the list of regulators required for a particular generation of the
1991  * IP block.
1992  *
1993  * This would've been nice to do simply by providing static tables for use
1994  * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1995  * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1996  * and either seems to be optional depending on which ports are being used.
1997  */
1998 static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1999 {
2000 	struct device *dev = pcie->dev;
2001 	struct device_node *np = dev->of_node;
2002 	unsigned int i = 0;
2003 
2004 	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
2005 		pcie->num_supplies = 4;
2006 
2007 		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
2008 					      sizeof(*pcie->supplies),
2009 					      GFP_KERNEL);
2010 		if (!pcie->supplies)
2011 			return -ENOMEM;
2012 
2013 		pcie->supplies[i++].supply = "dvdd-pex";
2014 		pcie->supplies[i++].supply = "hvdd-pex-pll";
2015 		pcie->supplies[i++].supply = "hvdd-pex";
2016 		pcie->supplies[i++].supply = "vddio-pexctl-aud";
2017 	} else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
2018 		pcie->num_supplies = 3;
2019 
2020 		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
2021 					      sizeof(*pcie->supplies),
2022 					      GFP_KERNEL);
2023 		if (!pcie->supplies)
2024 			return -ENOMEM;
2025 
2026 		pcie->supplies[i++].supply = "hvddio-pex";
2027 		pcie->supplies[i++].supply = "dvddio-pex";
2028 		pcie->supplies[i++].supply = "vddio-pex-ctl";
2029 	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
2030 		pcie->num_supplies = 4;
2031 
2032 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2033 					      sizeof(*pcie->supplies),
2034 					      GFP_KERNEL);
2035 		if (!pcie->supplies)
2036 			return -ENOMEM;
2037 
2038 		pcie->supplies[i++].supply = "avddio-pex";
2039 		pcie->supplies[i++].supply = "dvddio-pex";
2040 		pcie->supplies[i++].supply = "hvdd-pex";
2041 		pcie->supplies[i++].supply = "vddio-pex-ctl";
2042 	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
2043 		bool need_pexa = false, need_pexb = false;
2044 
2045 		/* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
2046 		if (lane_mask & 0x0f)
2047 			need_pexa = true;
2048 
2049 		/* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
2050 		if (lane_mask & 0x30)
2051 			need_pexb = true;
2052 
2053 		pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
2054 					 (need_pexb ? 2 : 0);
2055 
2056 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2057 					      sizeof(*pcie->supplies),
2058 					      GFP_KERNEL);
2059 		if (!pcie->supplies)
2060 			return -ENOMEM;
2061 
2062 		pcie->supplies[i++].supply = "avdd-pex-pll";
2063 		pcie->supplies[i++].supply = "hvdd-pex";
2064 		pcie->supplies[i++].supply = "vddio-pex-ctl";
2065 		pcie->supplies[i++].supply = "avdd-plle";
2066 
2067 		if (need_pexa) {
2068 			pcie->supplies[i++].supply = "avdd-pexa";
2069 			pcie->supplies[i++].supply = "vdd-pexa";
2070 		}
2071 
2072 		if (need_pexb) {
2073 			pcie->supplies[i++].supply = "avdd-pexb";
2074 			pcie->supplies[i++].supply = "vdd-pexb";
2075 		}
2076 	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
2077 		pcie->num_supplies = 5;
2078 
2079 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2080 					      sizeof(*pcie->supplies),
2081 					      GFP_KERNEL);
2082 		if (!pcie->supplies)
2083 			return -ENOMEM;
2084 
2085 		pcie->supplies[0].supply = "avdd-pex";
2086 		pcie->supplies[1].supply = "vdd-pex";
2087 		pcie->supplies[2].supply = "avdd-pex-pll";
2088 		pcie->supplies[3].supply = "avdd-plle";
2089 		pcie->supplies[4].supply = "vddio-pex-clk";
2090 	}
2091 
2092 	if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
2093 					pcie->num_supplies))
2094 		return devm_regulator_bulk_get(dev, pcie->num_supplies,
2095 					       pcie->supplies);
2096 
2097 	/*
2098 	 * If not all regulators are available for this new scheme, assume
2099 	 * that the device tree complies with an older version of the device
2100 	 * tree binding.
2101 	 */
2102 	dev_info(dev, "using legacy DT binding for power supplies\n");
2103 
2104 	devm_kfree(dev, pcie->supplies);
2105 	pcie->num_supplies = 0;
2106 
2107 	return tegra_pcie_get_legacy_regulators(pcie);
2108 }
2109 
2110 static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
2111 {
2112 	struct device *dev = pcie->dev;
2113 	struct device_node *np = dev->of_node, *port;
2114 	const struct tegra_pcie_soc *soc = pcie->soc;
2115 	u32 lanes = 0, mask = 0;
2116 	unsigned int lane = 0;
2117 	int err;
2118 
2119 	/* parse root ports */
2120 	for_each_child_of_node(np, port) {
2121 		struct tegra_pcie_port *rp;
2122 		unsigned int index;
2123 		u32 value;
2124 		char *label;
2125 
2126 		err = of_pci_get_devfn(port);
2127 		if (err < 0) {
2128 			dev_err(dev, "failed to parse address: %d\n", err);
2129 			goto err_node_put;
2130 		}
2131 
2132 		index = PCI_SLOT(err);
2133 
2134 		if (index < 1 || index > soc->num_ports) {
2135 			dev_err(dev, "invalid port number: %d\n", index);
2136 			err = -EINVAL;
2137 			goto err_node_put;
2138 		}
2139 
2140 		index--;
2141 
2142 		err = of_property_read_u32(port, "nvidia,num-lanes", &value);
2143 		if (err < 0) {
2144 			dev_err(dev, "failed to parse # of lanes: %d\n",
2145 				err);
2146 			goto err_node_put;
2147 		}
2148 
2149 		if (value > 16) {
2150 			dev_err(dev, "invalid # of lanes: %u\n", value);
2151 			err = -EINVAL;
2152 			goto err_node_put;
2153 		}
2154 
2155 		lanes |= value << (index << 3);
2156 
2157 		if (!of_device_is_available(port)) {
2158 			lane += value;
2159 			continue;
2160 		}
2161 
2162 		mask |= ((1 << value) - 1) << lane;
2163 		lane += value;
2164 
2165 		rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
2166 		if (!rp) {
2167 			err = -ENOMEM;
2168 			goto err_node_put;
2169 		}
2170 
2171 		err = of_address_to_resource(port, 0, &rp->regs);
2172 		if (err < 0) {
2173 			dev_err(dev, "failed to parse address: %d\n", err);
2174 			goto err_node_put;
2175 		}
2176 
2177 		INIT_LIST_HEAD(&rp->list);
2178 		rp->index = index;
2179 		rp->lanes = value;
2180 		rp->pcie = pcie;
2181 		rp->np = port;
2182 
2183 		rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
2184 		if (IS_ERR(rp->base)) {
2185 			err = PTR_ERR(rp->base);
2186 			goto err_node_put;
2187 		}
2188 
2189 		label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
2190 		if (!label) {
2191 			err = -ENOMEM;
2192 			goto err_node_put;
2193 		}
2194 
2195 		/*
2196 		 * Returns -ENOENT if reset-gpios property is not populated
2197 		 * and in this case fall back to using AFI per port register
2198 		 * to toggle PERST# SFIO line.
2199 		 */
2200 		rp->reset_gpio = devm_gpiod_get_from_of_node(dev, port,
2201 							     "reset-gpios", 0,
2202 							     GPIOD_OUT_LOW,
2203 							     label);
2204 		if (IS_ERR(rp->reset_gpio)) {
2205 			if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
2206 				rp->reset_gpio = NULL;
2207 			} else {
2208 				dev_err(dev, "failed to get reset GPIO: %ld\n",
2209 					PTR_ERR(rp->reset_gpio));
2210 				err = PTR_ERR(rp->reset_gpio);
2211 				goto err_node_put;
2212 			}
2213 		}
2214 
2215 		list_add_tail(&rp->list, &pcie->ports);
2216 	}
2217 
2218 	err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
2219 	if (err < 0) {
2220 		dev_err(dev, "invalid lane configuration\n");
2221 		return err;
2222 	}
2223 
2224 	err = tegra_pcie_get_regulators(pcie, mask);
2225 	if (err < 0)
2226 		return err;
2227 
2228 	return 0;
2229 
2230 err_node_put:
2231 	of_node_put(port);
2232 	return err;
2233 }
2234 
2235 /*
2236  * FIXME: If there are no PCIe cards attached, then calling this function
2237  * can result in the increase of the bootup time as there are big timeout
2238  * loops.
2239  */
2240 #define TEGRA_PCIE_LINKUP_TIMEOUT	200	/* up to 1.2 seconds */
2241 static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
2242 {
2243 	struct device *dev = port->pcie->dev;
2244 	unsigned int retries = 3;
2245 	unsigned long value;
2246 
2247 	/* override presence detection */
2248 	value = readl(port->base + RP_PRIV_MISC);
2249 	value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
2250 	value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
2251 	writel(value, port->base + RP_PRIV_MISC);
2252 
2253 	do {
2254 		unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2255 
2256 		do {
2257 			value = readl(port->base + RP_VEND_XP);
2258 
2259 			if (value & RP_VEND_XP_DL_UP)
2260 				break;
2261 
2262 			usleep_range(1000, 2000);
2263 		} while (--timeout);
2264 
2265 		if (!timeout) {
2266 			dev_dbg(dev, "link %u down, retrying\n", port->index);
2267 			goto retry;
2268 		}
2269 
2270 		timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2271 
2272 		do {
2273 			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2274 
2275 			if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2276 				return true;
2277 
2278 			usleep_range(1000, 2000);
2279 		} while (--timeout);
2280 
2281 retry:
2282 		tegra_pcie_port_reset(port);
2283 	} while (--retries);
2284 
2285 	return false;
2286 }
2287 
2288 static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie)
2289 {
2290 	struct device *dev = pcie->dev;
2291 	struct tegra_pcie_port *port;
2292 	ktime_t deadline;
2293 	u32 value;
2294 
2295 	list_for_each_entry(port, &pcie->ports, list) {
2296 		/*
2297 		 * "Supported Link Speeds Vector" in "Link Capabilities 2"
2298 		 * is not supported by Tegra. tegra_pcie_change_link_speed()
2299 		 * is called only for Tegra chips which support Gen2.
2300 		 * So there no harm if supported link speed is not verified.
2301 		 */
2302 		value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
2303 		value &= ~PCI_EXP_LNKSTA_CLS;
2304 		value |= PCI_EXP_LNKSTA_CLS_5_0GB;
2305 		writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
2306 
2307 		/*
2308 		 * Poll until link comes back from recovery to avoid race
2309 		 * condition.
2310 		 */
2311 		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2312 
2313 		while (ktime_before(ktime_get(), deadline)) {
2314 			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2315 			if ((value & PCI_EXP_LNKSTA_LT) == 0)
2316 				break;
2317 
2318 			usleep_range(2000, 3000);
2319 		}
2320 
2321 		if (value & PCI_EXP_LNKSTA_LT)
2322 			dev_warn(dev, "PCIe port %u link is in recovery\n",
2323 				 port->index);
2324 
2325 		/* Retrain the link */
2326 		value = readl(port->base + RP_LINK_CONTROL_STATUS);
2327 		value |= PCI_EXP_LNKCTL_RL;
2328 		writel(value, port->base + RP_LINK_CONTROL_STATUS);
2329 
2330 		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2331 
2332 		while (ktime_before(ktime_get(), deadline)) {
2333 			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2334 			if ((value & PCI_EXP_LNKSTA_LT) == 0)
2335 				break;
2336 
2337 			usleep_range(2000, 3000);
2338 		}
2339 
2340 		if (value & PCI_EXP_LNKSTA_LT)
2341 			dev_err(dev, "failed to retrain link of port %u\n",
2342 				port->index);
2343 	}
2344 }
2345 
2346 static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
2347 {
2348 	struct device *dev = pcie->dev;
2349 	struct tegra_pcie_port *port, *tmp;
2350 
2351 	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2352 		dev_info(dev, "probing port %u, using %u lanes\n",
2353 			 port->index, port->lanes);
2354 
2355 		tegra_pcie_port_enable(port);
2356 	}
2357 
2358 	/* Start LTSSM from Tegra side */
2359 	reset_control_deassert(pcie->pcie_xrst);
2360 
2361 	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2362 		if (tegra_pcie_port_check_link(port))
2363 			continue;
2364 
2365 		dev_info(dev, "link %u down, ignoring\n", port->index);
2366 
2367 		tegra_pcie_port_disable(port);
2368 		tegra_pcie_port_free(port);
2369 	}
2370 
2371 	if (pcie->soc->has_gen2)
2372 		tegra_pcie_change_link_speed(pcie);
2373 }
2374 
2375 static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
2376 {
2377 	struct tegra_pcie_port *port, *tmp;
2378 
2379 	reset_control_assert(pcie->pcie_xrst);
2380 
2381 	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2382 		tegra_pcie_port_disable(port);
2383 }
2384 
2385 static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = {
2386 	{ .pme.turnoff_bit = 0, .pme.ack_bit =  5 },
2387 	{ .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
2388 };
2389 
2390 static const struct tegra_pcie_soc tegra20_pcie = {
2391 	.num_ports = 2,
2392 	.ports = tegra20_pcie_ports,
2393 	.msi_base_shift = 0,
2394 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2395 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2396 	.pads_refclk_cfg0 = 0xfa5cfa5c,
2397 	.has_pex_clkreq_en = false,
2398 	.has_pex_bias_ctrl = false,
2399 	.has_intr_prsnt_sense = false,
2400 	.has_cml_clk = false,
2401 	.has_gen2 = false,
2402 	.force_pca_enable = false,
2403 	.program_uphy = true,
2404 	.update_clamp_threshold = false,
2405 	.program_deskew_time = false,
2406 	.update_fc_timer = false,
2407 	.has_cache_bars = true,
2408 	.ectl.enable = false,
2409 };
2410 
2411 static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
2412 	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2413 	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2414 	{ .pme.turnoff_bit = 16, .pme.ack_bit = 18 },
2415 };
2416 
2417 static const struct tegra_pcie_soc tegra30_pcie = {
2418 	.num_ports = 3,
2419 	.ports = tegra30_pcie_ports,
2420 	.msi_base_shift = 8,
2421 	.afi_pex2_ctrl = 0x128,
2422 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2423 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2424 	.pads_refclk_cfg0 = 0xfa5cfa5c,
2425 	.pads_refclk_cfg1 = 0xfa5cfa5c,
2426 	.has_pex_clkreq_en = true,
2427 	.has_pex_bias_ctrl = true,
2428 	.has_intr_prsnt_sense = true,
2429 	.has_cml_clk = true,
2430 	.has_gen2 = false,
2431 	.force_pca_enable = false,
2432 	.program_uphy = true,
2433 	.update_clamp_threshold = false,
2434 	.program_deskew_time = false,
2435 	.update_fc_timer = false,
2436 	.has_cache_bars = false,
2437 	.ectl.enable = false,
2438 };
2439 
2440 static const struct tegra_pcie_soc tegra124_pcie = {
2441 	.num_ports = 2,
2442 	.ports = tegra20_pcie_ports,
2443 	.msi_base_shift = 8,
2444 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2445 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2446 	.pads_refclk_cfg0 = 0x44ac44ac,
2447 	.has_pex_clkreq_en = true,
2448 	.has_pex_bias_ctrl = true,
2449 	.has_intr_prsnt_sense = true,
2450 	.has_cml_clk = true,
2451 	.has_gen2 = true,
2452 	.force_pca_enable = false,
2453 	.program_uphy = true,
2454 	.update_clamp_threshold = true,
2455 	.program_deskew_time = false,
2456 	.update_fc_timer = false,
2457 	.has_cache_bars = false,
2458 	.ectl.enable = false,
2459 };
2460 
2461 static const struct tegra_pcie_soc tegra210_pcie = {
2462 	.num_ports = 2,
2463 	.ports = tegra20_pcie_ports,
2464 	.msi_base_shift = 8,
2465 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2466 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2467 	.pads_refclk_cfg0 = 0x90b890b8,
2468 	/* FC threshold is bit[25:18] */
2469 	.update_fc_threshold = 0x01800000,
2470 	.has_pex_clkreq_en = true,
2471 	.has_pex_bias_ctrl = true,
2472 	.has_intr_prsnt_sense = true,
2473 	.has_cml_clk = true,
2474 	.has_gen2 = true,
2475 	.force_pca_enable = true,
2476 	.program_uphy = true,
2477 	.update_clamp_threshold = true,
2478 	.program_deskew_time = true,
2479 	.update_fc_timer = true,
2480 	.has_cache_bars = false,
2481 	.ectl = {
2482 		.regs = {
2483 			.rp_ectl_2_r1 = 0x0000000f,
2484 			.rp_ectl_4_r1 = 0x00000067,
2485 			.rp_ectl_5_r1 = 0x55010000,
2486 			.rp_ectl_6_r1 = 0x00000001,
2487 			.rp_ectl_2_r2 = 0x0000008f,
2488 			.rp_ectl_4_r2 = 0x000000c7,
2489 			.rp_ectl_5_r2 = 0x55010000,
2490 			.rp_ectl_6_r2 = 0x00000001,
2491 		},
2492 		.enable = true,
2493 	},
2494 };
2495 
2496 static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
2497 	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2498 	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2499 	{ .pme.turnoff_bit = 12, .pme.ack_bit = 14 },
2500 };
2501 
2502 static const struct tegra_pcie_soc tegra186_pcie = {
2503 	.num_ports = 3,
2504 	.ports = tegra186_pcie_ports,
2505 	.msi_base_shift = 8,
2506 	.afi_pex2_ctrl = 0x19c,
2507 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2508 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2509 	.pads_refclk_cfg0 = 0x80b880b8,
2510 	.pads_refclk_cfg1 = 0x000480b8,
2511 	.has_pex_clkreq_en = true,
2512 	.has_pex_bias_ctrl = true,
2513 	.has_intr_prsnt_sense = true,
2514 	.has_cml_clk = false,
2515 	.has_gen2 = true,
2516 	.force_pca_enable = false,
2517 	.program_uphy = false,
2518 	.update_clamp_threshold = false,
2519 	.program_deskew_time = false,
2520 	.update_fc_timer = false,
2521 	.has_cache_bars = false,
2522 	.ectl.enable = false,
2523 };
2524 
2525 static const struct of_device_id tegra_pcie_of_match[] = {
2526 	{ .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
2527 	{ .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
2528 	{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
2529 	{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
2530 	{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
2531 	{ },
2532 };
2533 MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
2534 
2535 static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
2536 {
2537 	struct tegra_pcie *pcie = s->private;
2538 
2539 	if (list_empty(&pcie->ports))
2540 		return NULL;
2541 
2542 	seq_puts(s, "Index  Status\n");
2543 
2544 	return seq_list_start(&pcie->ports, *pos);
2545 }
2546 
2547 static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
2548 {
2549 	struct tegra_pcie *pcie = s->private;
2550 
2551 	return seq_list_next(v, &pcie->ports, pos);
2552 }
2553 
2554 static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
2555 {
2556 }
2557 
2558 static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
2559 {
2560 	bool up = false, active = false;
2561 	struct tegra_pcie_port *port;
2562 	unsigned int value;
2563 
2564 	port = list_entry(v, struct tegra_pcie_port, list);
2565 
2566 	value = readl(port->base + RP_VEND_XP);
2567 
2568 	if (value & RP_VEND_XP_DL_UP)
2569 		up = true;
2570 
2571 	value = readl(port->base + RP_LINK_CONTROL_STATUS);
2572 
2573 	if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2574 		active = true;
2575 
2576 	seq_printf(s, "%2u     ", port->index);
2577 
2578 	if (up)
2579 		seq_puts(s, "up");
2580 
2581 	if (active) {
2582 		if (up)
2583 			seq_puts(s, ", ");
2584 
2585 		seq_puts(s, "active");
2586 	}
2587 
2588 	seq_puts(s, "\n");
2589 	return 0;
2590 }
2591 
2592 static const struct seq_operations tegra_pcie_ports_sops = {
2593 	.start = tegra_pcie_ports_seq_start,
2594 	.next = tegra_pcie_ports_seq_next,
2595 	.stop = tegra_pcie_ports_seq_stop,
2596 	.show = tegra_pcie_ports_seq_show,
2597 };
2598 
2599 DEFINE_SEQ_ATTRIBUTE(tegra_pcie_ports);
2600 
2601 static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
2602 {
2603 	debugfs_remove_recursive(pcie->debugfs);
2604 	pcie->debugfs = NULL;
2605 }
2606 
2607 static void tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2608 {
2609 	pcie->debugfs = debugfs_create_dir("pcie", NULL);
2610 
2611 	debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, pcie,
2612 			    &tegra_pcie_ports_fops);
2613 }
2614 
2615 static int tegra_pcie_probe(struct platform_device *pdev)
2616 {
2617 	struct device *dev = &pdev->dev;
2618 	struct pci_host_bridge *host;
2619 	struct tegra_pcie *pcie;
2620 	int err;
2621 
2622 	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
2623 	if (!host)
2624 		return -ENOMEM;
2625 
2626 	pcie = pci_host_bridge_priv(host);
2627 	host->sysdata = pcie;
2628 	platform_set_drvdata(pdev, pcie);
2629 
2630 	pcie->soc = of_device_get_match_data(dev);
2631 	INIT_LIST_HEAD(&pcie->ports);
2632 	pcie->dev = dev;
2633 
2634 	err = tegra_pcie_parse_dt(pcie);
2635 	if (err < 0)
2636 		return err;
2637 
2638 	err = tegra_pcie_get_resources(pcie);
2639 	if (err < 0) {
2640 		dev_err(dev, "failed to request resources: %d\n", err);
2641 		return err;
2642 	}
2643 
2644 	err = tegra_pcie_msi_setup(pcie);
2645 	if (err < 0) {
2646 		dev_err(dev, "failed to enable MSI support: %d\n", err);
2647 		goto put_resources;
2648 	}
2649 
2650 	pm_runtime_enable(pcie->dev);
2651 	err = pm_runtime_get_sync(pcie->dev);
2652 	if (err < 0) {
2653 		dev_err(dev, "fail to enable pcie controller: %d\n", err);
2654 		goto pm_runtime_put;
2655 	}
2656 
2657 	host->ops = &tegra_pcie_ops;
2658 	host->map_irq = tegra_pcie_map_irq;
2659 
2660 	err = pci_host_probe(host);
2661 	if (err < 0) {
2662 		dev_err(dev, "failed to register host: %d\n", err);
2663 		goto pm_runtime_put;
2664 	}
2665 
2666 	if (IS_ENABLED(CONFIG_DEBUG_FS))
2667 		tegra_pcie_debugfs_init(pcie);
2668 
2669 	return 0;
2670 
2671 pm_runtime_put:
2672 	pm_runtime_put_sync(pcie->dev);
2673 	pm_runtime_disable(pcie->dev);
2674 	tegra_pcie_msi_teardown(pcie);
2675 put_resources:
2676 	tegra_pcie_put_resources(pcie);
2677 	return err;
2678 }
2679 
2680 static int tegra_pcie_remove(struct platform_device *pdev)
2681 {
2682 	struct tegra_pcie *pcie = platform_get_drvdata(pdev);
2683 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
2684 	struct tegra_pcie_port *port, *tmp;
2685 
2686 	if (IS_ENABLED(CONFIG_DEBUG_FS))
2687 		tegra_pcie_debugfs_exit(pcie);
2688 
2689 	pci_stop_root_bus(host->bus);
2690 	pci_remove_root_bus(host->bus);
2691 	pm_runtime_put_sync(pcie->dev);
2692 	pm_runtime_disable(pcie->dev);
2693 
2694 	if (IS_ENABLED(CONFIG_PCI_MSI))
2695 		tegra_pcie_msi_teardown(pcie);
2696 
2697 	tegra_pcie_put_resources(pcie);
2698 
2699 	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2700 		tegra_pcie_port_free(port);
2701 
2702 	return 0;
2703 }
2704 
2705 static int tegra_pcie_pm_suspend(struct device *dev)
2706 {
2707 	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2708 	struct tegra_pcie_port *port;
2709 	int err;
2710 
2711 	list_for_each_entry(port, &pcie->ports, list)
2712 		tegra_pcie_pme_turnoff(port);
2713 
2714 	tegra_pcie_disable_ports(pcie);
2715 
2716 	/*
2717 	 * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to
2718 	 * avoid unwanted interrupts raised by AFI after pex_rst is asserted.
2719 	 */
2720 	tegra_pcie_disable_interrupts(pcie);
2721 
2722 	if (pcie->soc->program_uphy) {
2723 		err = tegra_pcie_phy_power_off(pcie);
2724 		if (err < 0)
2725 			dev_err(dev, "failed to power off PHY(s): %d\n", err);
2726 	}
2727 
2728 	reset_control_assert(pcie->pex_rst);
2729 	clk_disable_unprepare(pcie->pex_clk);
2730 
2731 	if (IS_ENABLED(CONFIG_PCI_MSI))
2732 		tegra_pcie_disable_msi(pcie);
2733 
2734 	pinctrl_pm_select_idle_state(dev);
2735 	tegra_pcie_power_off(pcie);
2736 
2737 	return 0;
2738 }
2739 
2740 static int tegra_pcie_pm_resume(struct device *dev)
2741 {
2742 	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2743 	int err;
2744 
2745 	err = tegra_pcie_power_on(pcie);
2746 	if (err) {
2747 		dev_err(dev, "tegra pcie power on fail: %d\n", err);
2748 		return err;
2749 	}
2750 
2751 	err = pinctrl_pm_select_default_state(dev);
2752 	if (err < 0) {
2753 		dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err);
2754 		goto poweroff;
2755 	}
2756 
2757 	tegra_pcie_enable_controller(pcie);
2758 	tegra_pcie_setup_translations(pcie);
2759 
2760 	if (IS_ENABLED(CONFIG_PCI_MSI))
2761 		tegra_pcie_enable_msi(pcie);
2762 
2763 	err = clk_prepare_enable(pcie->pex_clk);
2764 	if (err) {
2765 		dev_err(dev, "failed to enable PEX clock: %d\n", err);
2766 		goto pex_dpd_enable;
2767 	}
2768 
2769 	reset_control_deassert(pcie->pex_rst);
2770 
2771 	if (pcie->soc->program_uphy) {
2772 		err = tegra_pcie_phy_power_on(pcie);
2773 		if (err < 0) {
2774 			dev_err(dev, "failed to power on PHY(s): %d\n", err);
2775 			goto disable_pex_clk;
2776 		}
2777 	}
2778 
2779 	tegra_pcie_apply_pad_settings(pcie);
2780 	tegra_pcie_enable_ports(pcie);
2781 
2782 	return 0;
2783 
2784 disable_pex_clk:
2785 	reset_control_assert(pcie->pex_rst);
2786 	clk_disable_unprepare(pcie->pex_clk);
2787 pex_dpd_enable:
2788 	pinctrl_pm_select_idle_state(dev);
2789 poweroff:
2790 	tegra_pcie_power_off(pcie);
2791 
2792 	return err;
2793 }
2794 
2795 static const struct dev_pm_ops tegra_pcie_pm_ops = {
2796 	RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
2797 	NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume)
2798 };
2799 
2800 static struct platform_driver tegra_pcie_driver = {
2801 	.driver = {
2802 		.name = "tegra-pcie",
2803 		.of_match_table = tegra_pcie_of_match,
2804 		.suppress_bind_attrs = true,
2805 		.pm = &tegra_pcie_pm_ops,
2806 	},
2807 	.probe = tegra_pcie_probe,
2808 	.remove = tegra_pcie_remove,
2809 };
2810 module_platform_driver(tegra_pcie_driver);
2811 MODULE_LICENSE("GPL");
2812