1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCIe host controller driver for Freescale i.MX6 SoCs
4 *
5 * Copyright (C) 2013 Kosagi
6 * https://www.kosagi.com
7 *
8 * Author: Sean Cross <xobs@kosagi.com>
9 */
10
11 #include <linux/bitfield.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/gpio.h>
15 #include <linux/kernel.h>
16 #include <linux/mfd/syscon.h>
17 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
18 #include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
19 #include <linux/module.h>
20 #include <linux/of_gpio.h>
21 #include <linux/of_device.h>
22 #include <linux/of_address.h>
23 #include <linux/pci.h>
24 #include <linux/platform_device.h>
25 #include <linux/regmap.h>
26 #include <linux/regulator/consumer.h>
27 #include <linux/resource.h>
28 #include <linux/signal.h>
29 #include <linux/types.h>
30 #include <linux/interrupt.h>
31 #include <linux/reset.h>
32 #include <linux/pm_domain.h>
33 #include <linux/pm_runtime.h>
34
35 #include "pcie-designware.h"
36
37 #define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9)
38 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10)
39 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11)
40 #define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8)
41 #define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000
42
43 #define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
44
45 enum imx6_pcie_variants {
46 IMX6Q,
47 IMX6SX,
48 IMX6QP,
49 IMX7D,
50 IMX8MQ,
51 };
52
53 #define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
54 #define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1)
55 #define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
56
57 struct imx6_pcie_drvdata {
58 enum imx6_pcie_variants variant;
59 u32 flags;
60 int dbi_length;
61 };
62
63 struct imx6_pcie {
64 struct dw_pcie *pci;
65 int reset_gpio;
66 bool gpio_active_high;
67 struct clk *pcie_bus;
68 struct clk *pcie_phy;
69 struct clk *pcie_inbound_axi;
70 struct clk *pcie;
71 struct clk *pcie_aux;
72 struct regmap *iomuxc_gpr;
73 u32 controller_id;
74 struct reset_control *pciephy_reset;
75 struct reset_control *apps_reset;
76 struct reset_control *turnoff_reset;
77 u32 tx_deemph_gen1;
78 u32 tx_deemph_gen2_3p5db;
79 u32 tx_deemph_gen2_6db;
80 u32 tx_swing_full;
81 u32 tx_swing_low;
82 struct regulator *vpcie;
83 void __iomem *phy_base;
84
85 /* power domain for pcie */
86 struct device *pd_pcie;
87 /* power domain for pcie phy */
88 struct device *pd_pcie_phy;
89 const struct imx6_pcie_drvdata *drvdata;
90 };
91
92 /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
93 #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
94 #define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX)
95
96 /* PCIe Port Logic registers (memory-mapped) */
97 #define PL_OFFSET 0x700
98
99 #define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
100 #define PCIE_PHY_CTRL_DATA(x) FIELD_PREP(GENMASK(15, 0), (x))
101 #define PCIE_PHY_CTRL_CAP_ADR BIT(16)
102 #define PCIE_PHY_CTRL_CAP_DAT BIT(17)
103 #define PCIE_PHY_CTRL_WR BIT(18)
104 #define PCIE_PHY_CTRL_RD BIT(19)
105
106 #define PCIE_PHY_STAT (PL_OFFSET + 0x110)
107 #define PCIE_PHY_STAT_ACK BIT(16)
108
109 /* PHY registers (not memory-mapped) */
110 #define PCIE_PHY_ATEOVRD 0x10
111 #define PCIE_PHY_ATEOVRD_EN BIT(2)
112 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0
113 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1
114
115 #define PCIE_PHY_MPLL_OVRD_IN_LO 0x11
116 #define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2
117 #define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f
118 #define PCIE_PHY_MPLL_MULTIPLIER_OVRD BIT(9)
119
120 #define PCIE_PHY_RX_ASIC_OUT 0x100D
121 #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
122
123 /* iMX7 PCIe PHY registers */
124 #define PCIE_PHY_CMN_REG4 0x14
125 /* These are probably the bits that *aren't* DCC_FB_EN */
126 #define PCIE_PHY_CMN_REG4_DCC_FB_EN 0x29
127
128 #define PCIE_PHY_CMN_REG15 0x54
129 #define PCIE_PHY_CMN_REG15_DLY_4 BIT(2)
130 #define PCIE_PHY_CMN_REG15_PLL_PD BIT(5)
131 #define PCIE_PHY_CMN_REG15_OVRD_PLL_PD BIT(7)
132
133 #define PCIE_PHY_CMN_REG24 0x90
134 #define PCIE_PHY_CMN_REG24_RX_EQ BIT(6)
135 #define PCIE_PHY_CMN_REG24_RX_EQ_SEL BIT(3)
136
137 #define PCIE_PHY_CMN_REG26 0x98
138 #define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC
139
140 #define PHY_RX_OVRD_IN_LO 0x1005
141 #define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5)
142 #define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3)
143
pcie_phy_poll_ack(struct imx6_pcie * imx6_pcie,bool exp_val)144 static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
145 {
146 struct dw_pcie *pci = imx6_pcie->pci;
147 bool val;
148 u32 max_iterations = 10;
149 u32 wait_counter = 0;
150
151 do {
152 val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) &
153 PCIE_PHY_STAT_ACK;
154 wait_counter++;
155
156 if (val == exp_val)
157 return 0;
158
159 udelay(1);
160 } while (wait_counter < max_iterations);
161
162 return -ETIMEDOUT;
163 }
164
pcie_phy_wait_ack(struct imx6_pcie * imx6_pcie,int addr)165 static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
166 {
167 struct dw_pcie *pci = imx6_pcie->pci;
168 u32 val;
169 int ret;
170
171 val = PCIE_PHY_CTRL_DATA(addr);
172 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
173
174 val |= PCIE_PHY_CTRL_CAP_ADR;
175 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
176
177 ret = pcie_phy_poll_ack(imx6_pcie, true);
178 if (ret)
179 return ret;
180
181 val = PCIE_PHY_CTRL_DATA(addr);
182 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
183
184 return pcie_phy_poll_ack(imx6_pcie, false);
185 }
186
187 /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
pcie_phy_read(struct imx6_pcie * imx6_pcie,int addr,u16 * data)188 static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
189 {
190 struct dw_pcie *pci = imx6_pcie->pci;
191 u32 phy_ctl;
192 int ret;
193
194 ret = pcie_phy_wait_ack(imx6_pcie, addr);
195 if (ret)
196 return ret;
197
198 /* assert Read signal */
199 phy_ctl = PCIE_PHY_CTRL_RD;
200 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
201
202 ret = pcie_phy_poll_ack(imx6_pcie, true);
203 if (ret)
204 return ret;
205
206 *data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
207
208 /* deassert Read signal */
209 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
210
211 return pcie_phy_poll_ack(imx6_pcie, false);
212 }
213
pcie_phy_write(struct imx6_pcie * imx6_pcie,int addr,u16 data)214 static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
215 {
216 struct dw_pcie *pci = imx6_pcie->pci;
217 u32 var;
218 int ret;
219
220 /* write addr */
221 /* cap addr */
222 ret = pcie_phy_wait_ack(imx6_pcie, addr);
223 if (ret)
224 return ret;
225
226 var = PCIE_PHY_CTRL_DATA(data);
227 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
228
229 /* capture data */
230 var |= PCIE_PHY_CTRL_CAP_DAT;
231 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
232
233 ret = pcie_phy_poll_ack(imx6_pcie, true);
234 if (ret)
235 return ret;
236
237 /* deassert cap data */
238 var = PCIE_PHY_CTRL_DATA(data);
239 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
240
241 /* wait for ack de-assertion */
242 ret = pcie_phy_poll_ack(imx6_pcie, false);
243 if (ret)
244 return ret;
245
246 /* assert wr signal */
247 var = PCIE_PHY_CTRL_WR;
248 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
249
250 /* wait for ack */
251 ret = pcie_phy_poll_ack(imx6_pcie, true);
252 if (ret)
253 return ret;
254
255 /* deassert wr signal */
256 var = PCIE_PHY_CTRL_DATA(data);
257 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
258
259 /* wait for ack de-assertion */
260 ret = pcie_phy_poll_ack(imx6_pcie, false);
261 if (ret)
262 return ret;
263
264 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0);
265
266 return 0;
267 }
268
imx6_pcie_reset_phy(struct imx6_pcie * imx6_pcie)269 static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
270 {
271 u16 tmp;
272
273 if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
274 return;
275
276 pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
277 tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
278 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
279 pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
280
281 usleep_range(2000, 3000);
282
283 pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
284 tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
285 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
286 pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
287 }
288
289 #ifdef CONFIG_ARM
290 /* Added for PCI abort handling */
imx6q_pcie_abort_handler(unsigned long addr,unsigned int fsr,struct pt_regs * regs)291 static int imx6q_pcie_abort_handler(unsigned long addr,
292 unsigned int fsr, struct pt_regs *regs)
293 {
294 unsigned long pc = instruction_pointer(regs);
295 unsigned long instr = *(unsigned long *)pc;
296 int reg = (instr >> 12) & 15;
297
298 /*
299 * If the instruction being executed was a read,
300 * make it look like it read all-ones.
301 */
302 if ((instr & 0x0c100000) == 0x04100000) {
303 unsigned long val;
304
305 if (instr & 0x00400000)
306 val = 255;
307 else
308 val = -1;
309
310 regs->uregs[reg] = val;
311 regs->ARM_pc += 4;
312 return 0;
313 }
314
315 if ((instr & 0x0e100090) == 0x00100090) {
316 regs->uregs[reg] = -1;
317 regs->ARM_pc += 4;
318 return 0;
319 }
320
321 return 1;
322 }
323 #endif
324
imx6_pcie_attach_pd(struct device * dev)325 static int imx6_pcie_attach_pd(struct device *dev)
326 {
327 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
328 struct device_link *link;
329
330 /* Do nothing when in a single power domain */
331 if (dev->pm_domain)
332 return 0;
333
334 imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
335 if (IS_ERR(imx6_pcie->pd_pcie))
336 return PTR_ERR(imx6_pcie->pd_pcie);
337 /* Do nothing when power domain missing */
338 if (!imx6_pcie->pd_pcie)
339 return 0;
340 link = device_link_add(dev, imx6_pcie->pd_pcie,
341 DL_FLAG_STATELESS |
342 DL_FLAG_PM_RUNTIME |
343 DL_FLAG_RPM_ACTIVE);
344 if (!link) {
345 dev_err(dev, "Failed to add device_link to pcie pd.\n");
346 return -EINVAL;
347 }
348
349 imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
350 if (IS_ERR(imx6_pcie->pd_pcie_phy))
351 return PTR_ERR(imx6_pcie->pd_pcie_phy);
352
353 link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
354 DL_FLAG_STATELESS |
355 DL_FLAG_PM_RUNTIME |
356 DL_FLAG_RPM_ACTIVE);
357 if (!link) {
358 dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
359 return -EINVAL;
360 }
361
362 return 0;
363 }
364
imx6_pcie_assert_core_reset(struct imx6_pcie * imx6_pcie)365 static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
366 {
367 struct device *dev = imx6_pcie->pci->dev;
368
369 switch (imx6_pcie->drvdata->variant) {
370 case IMX7D:
371 case IMX8MQ:
372 reset_control_assert(imx6_pcie->pciephy_reset);
373 reset_control_assert(imx6_pcie->apps_reset);
374 break;
375 case IMX6SX:
376 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
377 IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
378 IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
379 /* Force PCIe PHY reset */
380 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
381 IMX6SX_GPR5_PCIE_BTNRST_RESET,
382 IMX6SX_GPR5_PCIE_BTNRST_RESET);
383 break;
384 case IMX6QP:
385 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
386 IMX6Q_GPR1_PCIE_SW_RST,
387 IMX6Q_GPR1_PCIE_SW_RST);
388 break;
389 case IMX6Q:
390 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
391 IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
392 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
393 IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
394 break;
395 }
396
397 if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
398 int ret = regulator_disable(imx6_pcie->vpcie);
399
400 if (ret)
401 dev_err(dev, "failed to disable vpcie regulator: %d\n",
402 ret);
403 }
404 }
405
imx6_pcie_grp_offset(const struct imx6_pcie * imx6_pcie)406 static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
407 {
408 WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ);
409 return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
410 }
411
imx6_pcie_enable_ref_clk(struct imx6_pcie * imx6_pcie)412 static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
413 {
414 struct dw_pcie *pci = imx6_pcie->pci;
415 struct device *dev = pci->dev;
416 unsigned int offset;
417 int ret = 0;
418
419 switch (imx6_pcie->drvdata->variant) {
420 case IMX6SX:
421 ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
422 if (ret) {
423 dev_err(dev, "unable to enable pcie_axi clock\n");
424 break;
425 }
426
427 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
428 IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
429 break;
430 case IMX6QP:
431 case IMX6Q:
432 /* power up core phy and enable ref clock */
433 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
434 IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
435 /*
436 * the async reset input need ref clock to sync internally,
437 * when the ref clock comes after reset, internal synced
438 * reset time is too short, cannot meet the requirement.
439 * add one ~10us delay here.
440 */
441 usleep_range(10, 100);
442 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
443 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
444 break;
445 case IMX7D:
446 break;
447 case IMX8MQ:
448 ret = clk_prepare_enable(imx6_pcie->pcie_aux);
449 if (ret) {
450 dev_err(dev, "unable to enable pcie_aux clock\n");
451 break;
452 }
453
454 offset = imx6_pcie_grp_offset(imx6_pcie);
455 /*
456 * Set the over ride low and enabled
457 * make sure that REF_CLK is turned on.
458 */
459 regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
460 IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
461 0);
462 regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
463 IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
464 IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
465 break;
466 }
467
468 return ret;
469 }
470
imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie * imx6_pcie)471 static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
472 {
473 u32 val;
474 struct device *dev = imx6_pcie->pci->dev;
475
476 if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
477 IOMUXC_GPR22, val,
478 val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
479 PHY_PLL_LOCK_WAIT_USLEEP_MAX,
480 PHY_PLL_LOCK_WAIT_TIMEOUT))
481 dev_err(dev, "PCIe PLL lock timeout\n");
482 }
483
imx6_pcie_deassert_core_reset(struct imx6_pcie * imx6_pcie)484 static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
485 {
486 struct dw_pcie *pci = imx6_pcie->pci;
487 struct device *dev = pci->dev;
488 int ret;
489
490 if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
491 ret = regulator_enable(imx6_pcie->vpcie);
492 if (ret) {
493 dev_err(dev, "failed to enable vpcie regulator: %d\n",
494 ret);
495 return;
496 }
497 }
498
499 ret = clk_prepare_enable(imx6_pcie->pcie_phy);
500 if (ret) {
501 dev_err(dev, "unable to enable pcie_phy clock\n");
502 goto err_pcie_phy;
503 }
504
505 ret = clk_prepare_enable(imx6_pcie->pcie_bus);
506 if (ret) {
507 dev_err(dev, "unable to enable pcie_bus clock\n");
508 goto err_pcie_bus;
509 }
510
511 ret = clk_prepare_enable(imx6_pcie->pcie);
512 if (ret) {
513 dev_err(dev, "unable to enable pcie clock\n");
514 goto err_pcie;
515 }
516
517 ret = imx6_pcie_enable_ref_clk(imx6_pcie);
518 if (ret) {
519 dev_err(dev, "unable to enable pcie ref clock\n");
520 goto err_ref_clk;
521 }
522
523 /* allow the clocks to stabilize */
524 usleep_range(200, 500);
525
526 /* Some boards don't have PCIe reset GPIO. */
527 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
528 gpio_set_value_cansleep(imx6_pcie->reset_gpio,
529 imx6_pcie->gpio_active_high);
530 msleep(100);
531 gpio_set_value_cansleep(imx6_pcie->reset_gpio,
532 !imx6_pcie->gpio_active_high);
533 }
534
535 switch (imx6_pcie->drvdata->variant) {
536 case IMX8MQ:
537 reset_control_deassert(imx6_pcie->pciephy_reset);
538 break;
539 case IMX7D:
540 reset_control_deassert(imx6_pcie->pciephy_reset);
541
542 /* Workaround for ERR010728, failure of PCI-e PLL VCO to
543 * oscillate, especially when cold. This turns off "Duty-cycle
544 * Corrector" and other mysterious undocumented things.
545 */
546 if (likely(imx6_pcie->phy_base)) {
547 /* De-assert DCC_FB_EN */
548 writel(PCIE_PHY_CMN_REG4_DCC_FB_EN,
549 imx6_pcie->phy_base + PCIE_PHY_CMN_REG4);
550 /* Assert RX_EQS and RX_EQS_SEL */
551 writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL
552 | PCIE_PHY_CMN_REG24_RX_EQ,
553 imx6_pcie->phy_base + PCIE_PHY_CMN_REG24);
554 /* Assert ATT_MODE */
555 writel(PCIE_PHY_CMN_REG26_ATT_MODE,
556 imx6_pcie->phy_base + PCIE_PHY_CMN_REG26);
557 } else {
558 dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
559 }
560
561 imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
562 break;
563 case IMX6SX:
564 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
565 IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
566 break;
567 case IMX6QP:
568 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
569 IMX6Q_GPR1_PCIE_SW_RST, 0);
570
571 usleep_range(200, 500);
572 break;
573 case IMX6Q: /* Nothing to do */
574 break;
575 }
576
577 return;
578
579 err_ref_clk:
580 clk_disable_unprepare(imx6_pcie->pcie);
581 err_pcie:
582 clk_disable_unprepare(imx6_pcie->pcie_bus);
583 err_pcie_bus:
584 clk_disable_unprepare(imx6_pcie->pcie_phy);
585 err_pcie_phy:
586 if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
587 ret = regulator_disable(imx6_pcie->vpcie);
588 if (ret)
589 dev_err(dev, "failed to disable vpcie regulator: %d\n",
590 ret);
591 }
592 }
593
imx6_pcie_configure_type(struct imx6_pcie * imx6_pcie)594 static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
595 {
596 unsigned int mask, val;
597
598 if (imx6_pcie->drvdata->variant == IMX8MQ &&
599 imx6_pcie->controller_id == 1) {
600 mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
601 val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
602 PCI_EXP_TYPE_ROOT_PORT);
603 } else {
604 mask = IMX6Q_GPR12_DEVICE_TYPE;
605 val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE,
606 PCI_EXP_TYPE_ROOT_PORT);
607 }
608
609 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
610 }
611
imx6_pcie_init_phy(struct imx6_pcie * imx6_pcie)612 static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
613 {
614 switch (imx6_pcie->drvdata->variant) {
615 case IMX8MQ:
616 /*
617 * TODO: Currently this code assumes external
618 * oscillator is being used
619 */
620 regmap_update_bits(imx6_pcie->iomuxc_gpr,
621 imx6_pcie_grp_offset(imx6_pcie),
622 IMX8MQ_GPR_PCIE_REF_USE_PAD,
623 IMX8MQ_GPR_PCIE_REF_USE_PAD);
624 break;
625 case IMX7D:
626 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
627 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
628 break;
629 case IMX6SX:
630 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
631 IMX6SX_GPR12_PCIE_RX_EQ_MASK,
632 IMX6SX_GPR12_PCIE_RX_EQ_2);
633 fallthrough;
634 default:
635 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
636 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
637
638 /* configure constant input signal to the pcie ctrl and phy */
639 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
640 IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
641
642 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
643 IMX6Q_GPR8_TX_DEEMPH_GEN1,
644 imx6_pcie->tx_deemph_gen1 << 0);
645 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
646 IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
647 imx6_pcie->tx_deemph_gen2_3p5db << 6);
648 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
649 IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
650 imx6_pcie->tx_deemph_gen2_6db << 12);
651 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
652 IMX6Q_GPR8_TX_SWING_FULL,
653 imx6_pcie->tx_swing_full << 18);
654 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
655 IMX6Q_GPR8_TX_SWING_LOW,
656 imx6_pcie->tx_swing_low << 25);
657 break;
658 }
659
660 imx6_pcie_configure_type(imx6_pcie);
661 }
662
imx6_setup_phy_mpll(struct imx6_pcie * imx6_pcie)663 static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
664 {
665 unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
666 int mult, div;
667 u16 val;
668
669 if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
670 return 0;
671
672 switch (phy_rate) {
673 case 125000000:
674 /*
675 * The default settings of the MPLL are for a 125MHz input
676 * clock, so no need to reconfigure anything in that case.
677 */
678 return 0;
679 case 100000000:
680 mult = 25;
681 div = 0;
682 break;
683 case 200000000:
684 mult = 25;
685 div = 1;
686 break;
687 default:
688 dev_err(imx6_pcie->pci->dev,
689 "Unsupported PHY reference clock rate %lu\n", phy_rate);
690 return -EINVAL;
691 }
692
693 pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
694 val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
695 PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
696 val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
697 val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
698 pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
699
700 pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
701 val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
702 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
703 val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
704 val |= PCIE_PHY_ATEOVRD_EN;
705 pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
706
707 return 0;
708 }
709
imx6_pcie_wait_for_speed_change(struct imx6_pcie * imx6_pcie)710 static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
711 {
712 struct dw_pcie *pci = imx6_pcie->pci;
713 struct device *dev = pci->dev;
714 u32 tmp;
715 unsigned int retries;
716
717 for (retries = 0; retries < 200; retries++) {
718 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
719 /* Test if the speed change finished. */
720 if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
721 return 0;
722 usleep_range(100, 1000);
723 }
724
725 dev_err(dev, "Speed change timeout\n");
726 return -ETIMEDOUT;
727 }
728
imx6_pcie_ltssm_enable(struct device * dev)729 static void imx6_pcie_ltssm_enable(struct device *dev)
730 {
731 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
732
733 switch (imx6_pcie->drvdata->variant) {
734 case IMX6Q:
735 case IMX6SX:
736 case IMX6QP:
737 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
738 IMX6Q_GPR12_PCIE_CTL_2,
739 IMX6Q_GPR12_PCIE_CTL_2);
740 break;
741 case IMX7D:
742 case IMX8MQ:
743 reset_control_deassert(imx6_pcie->apps_reset);
744 break;
745 }
746 }
747
imx6_pcie_start_link(struct dw_pcie * pci)748 static int imx6_pcie_start_link(struct dw_pcie *pci)
749 {
750 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
751 struct device *dev = pci->dev;
752 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
753 u32 tmp;
754 int ret;
755
756 /*
757 * Force Gen1 operation when starting the link. In case the link is
758 * started in Gen2 mode, there is a possibility the devices on the
759 * bus will not be detected at all. This happens with PCIe switches.
760 */
761 tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
762 tmp &= ~PCI_EXP_LNKCAP_SLS;
763 tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
764 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
765
766 /* Start LTSSM. */
767 imx6_pcie_ltssm_enable(dev);
768
769 ret = dw_pcie_wait_for_link(pci);
770 if (ret)
771 goto err_reset_phy;
772
773 if (pci->link_gen == 2) {
774 /* Allow Gen2 mode after the link is up. */
775 tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
776 tmp &= ~PCI_EXP_LNKCAP_SLS;
777 tmp |= PCI_EXP_LNKCAP_SLS_5_0GB;
778 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
779
780 /*
781 * Start Directed Speed Change so the best possible
782 * speed both link partners support can be negotiated.
783 */
784 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
785 tmp |= PORT_LOGIC_SPEED_CHANGE;
786 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
787
788 if (imx6_pcie->drvdata->flags &
789 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
790 /*
791 * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
792 * from i.MX6 family when no link speed transition
793 * occurs and we go Gen1 -> yep, Gen1. The difference
794 * is that, in such case, it will not be cleared by HW
795 * which will cause the following code to report false
796 * failure.
797 */
798
799 ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
800 if (ret) {
801 dev_err(dev, "Failed to bring link up!\n");
802 goto err_reset_phy;
803 }
804 }
805
806 /* Make sure link training is finished as well! */
807 ret = dw_pcie_wait_for_link(pci);
808 if (ret) {
809 dev_err(dev, "Failed to bring link up!\n");
810 goto err_reset_phy;
811 }
812 } else {
813 dev_info(dev, "Link: Gen2 disabled\n");
814 }
815
816 tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
817 dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
818 return 0;
819
820 err_reset_phy:
821 dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
822 dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
823 dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
824 imx6_pcie_reset_phy(imx6_pcie);
825 return ret;
826 }
827
imx6_pcie_host_init(struct pcie_port * pp)828 static int imx6_pcie_host_init(struct pcie_port *pp)
829 {
830 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
831 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
832
833 imx6_pcie_assert_core_reset(imx6_pcie);
834 imx6_pcie_init_phy(imx6_pcie);
835 imx6_pcie_deassert_core_reset(imx6_pcie);
836 imx6_setup_phy_mpll(imx6_pcie);
837
838 return 0;
839 }
840
841 static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
842 .host_init = imx6_pcie_host_init,
843 };
844
845 static const struct dw_pcie_ops dw_pcie_ops = {
846 .start_link = imx6_pcie_start_link,
847 };
848
849 #ifdef CONFIG_PM_SLEEP
imx6_pcie_ltssm_disable(struct device * dev)850 static void imx6_pcie_ltssm_disable(struct device *dev)
851 {
852 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
853
854 switch (imx6_pcie->drvdata->variant) {
855 case IMX6SX:
856 case IMX6QP:
857 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
858 IMX6Q_GPR12_PCIE_CTL_2, 0);
859 break;
860 case IMX7D:
861 reset_control_assert(imx6_pcie->apps_reset);
862 break;
863 default:
864 dev_err(dev, "ltssm_disable not supported\n");
865 }
866 }
867
imx6_pcie_pm_turnoff(struct imx6_pcie * imx6_pcie)868 static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
869 {
870 struct device *dev = imx6_pcie->pci->dev;
871
872 /* Some variants have a turnoff reset in DT */
873 if (imx6_pcie->turnoff_reset) {
874 reset_control_assert(imx6_pcie->turnoff_reset);
875 reset_control_deassert(imx6_pcie->turnoff_reset);
876 goto pm_turnoff_sleep;
877 }
878
879 /* Others poke directly at IOMUXC registers */
880 switch (imx6_pcie->drvdata->variant) {
881 case IMX6SX:
882 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
883 IMX6SX_GPR12_PCIE_PM_TURN_OFF,
884 IMX6SX_GPR12_PCIE_PM_TURN_OFF);
885 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
886 IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
887 break;
888 default:
889 dev_err(dev, "PME_Turn_Off not implemented\n");
890 return;
891 }
892
893 /*
894 * Components with an upstream port must respond to
895 * PME_Turn_Off with PME_TO_Ack but we can't check.
896 *
897 * The standard recommends a 1-10ms timeout after which to
898 * proceed anyway as if acks were received.
899 */
900 pm_turnoff_sleep:
901 usleep_range(1000, 10000);
902 }
903
imx6_pcie_clk_disable(struct imx6_pcie * imx6_pcie)904 static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
905 {
906 clk_disable_unprepare(imx6_pcie->pcie);
907 clk_disable_unprepare(imx6_pcie->pcie_phy);
908 clk_disable_unprepare(imx6_pcie->pcie_bus);
909
910 switch (imx6_pcie->drvdata->variant) {
911 case IMX6SX:
912 clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
913 break;
914 case IMX7D:
915 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
916 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
917 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
918 break;
919 case IMX8MQ:
920 clk_disable_unprepare(imx6_pcie->pcie_aux);
921 break;
922 default:
923 break;
924 }
925 }
926
imx6_pcie_suspend_noirq(struct device * dev)927 static int imx6_pcie_suspend_noirq(struct device *dev)
928 {
929 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
930
931 if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
932 return 0;
933
934 imx6_pcie_pm_turnoff(imx6_pcie);
935 imx6_pcie_clk_disable(imx6_pcie);
936 imx6_pcie_ltssm_disable(dev);
937
938 return 0;
939 }
940
imx6_pcie_resume_noirq(struct device * dev)941 static int imx6_pcie_resume_noirq(struct device *dev)
942 {
943 int ret;
944 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
945 struct pcie_port *pp = &imx6_pcie->pci->pp;
946
947 if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
948 return 0;
949
950 imx6_pcie_assert_core_reset(imx6_pcie);
951 imx6_pcie_init_phy(imx6_pcie);
952 imx6_pcie_deassert_core_reset(imx6_pcie);
953 dw_pcie_setup_rc(pp);
954
955 ret = imx6_pcie_start_link(imx6_pcie->pci);
956 if (ret < 0)
957 dev_info(dev, "pcie link is down after resume.\n");
958
959 return 0;
960 }
961 #endif
962
963 static const struct dev_pm_ops imx6_pcie_pm_ops = {
964 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
965 imx6_pcie_resume_noirq)
966 };
967
imx6_pcie_probe(struct platform_device * pdev)968 static int imx6_pcie_probe(struct platform_device *pdev)
969 {
970 struct device *dev = &pdev->dev;
971 struct dw_pcie *pci;
972 struct imx6_pcie *imx6_pcie;
973 struct device_node *np;
974 struct resource *dbi_base;
975 struct device_node *node = dev->of_node;
976 int ret;
977 u16 val;
978
979 imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
980 if (!imx6_pcie)
981 return -ENOMEM;
982
983 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
984 if (!pci)
985 return -ENOMEM;
986
987 pci->dev = dev;
988 pci->ops = &dw_pcie_ops;
989 pci->pp.ops = &imx6_pcie_host_ops;
990
991 imx6_pcie->pci = pci;
992 imx6_pcie->drvdata = of_device_get_match_data(dev);
993
994 /* Find the PHY if one is defined, only imx7d uses it */
995 np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
996 if (np) {
997 struct resource res;
998
999 ret = of_address_to_resource(np, 0, &res);
1000 if (ret) {
1001 dev_err(dev, "Unable to map PCIe PHY\n");
1002 return ret;
1003 }
1004 imx6_pcie->phy_base = devm_ioremap_resource(dev, &res);
1005 if (IS_ERR(imx6_pcie->phy_base)) {
1006 dev_err(dev, "Unable to map PCIe PHY\n");
1007 return PTR_ERR(imx6_pcie->phy_base);
1008 }
1009 }
1010
1011 dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1012 pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
1013 if (IS_ERR(pci->dbi_base))
1014 return PTR_ERR(pci->dbi_base);
1015
1016 /* Fetch GPIOs */
1017 imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
1018 imx6_pcie->gpio_active_high = of_property_read_bool(node,
1019 "reset-gpio-active-high");
1020 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
1021 ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
1022 imx6_pcie->gpio_active_high ?
1023 GPIOF_OUT_INIT_HIGH :
1024 GPIOF_OUT_INIT_LOW,
1025 "PCIe reset");
1026 if (ret) {
1027 dev_err(dev, "unable to get reset gpio\n");
1028 return ret;
1029 }
1030 } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
1031 return imx6_pcie->reset_gpio;
1032 }
1033
1034 /* Fetch clocks */
1035 imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
1036 if (IS_ERR(imx6_pcie->pcie_phy))
1037 return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy),
1038 "pcie_phy clock source missing or invalid\n");
1039
1040 imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
1041 if (IS_ERR(imx6_pcie->pcie_bus))
1042 return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_bus),
1043 "pcie_bus clock source missing or invalid\n");
1044
1045 imx6_pcie->pcie = devm_clk_get(dev, "pcie");
1046 if (IS_ERR(imx6_pcie->pcie))
1047 return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie),
1048 "pcie clock source missing or invalid\n");
1049
1050 switch (imx6_pcie->drvdata->variant) {
1051 case IMX6SX:
1052 imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
1053 "pcie_inbound_axi");
1054 if (IS_ERR(imx6_pcie->pcie_inbound_axi))
1055 return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_inbound_axi),
1056 "pcie_inbound_axi clock missing or invalid\n");
1057 break;
1058 case IMX8MQ:
1059 imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
1060 if (IS_ERR(imx6_pcie->pcie_aux))
1061 return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
1062 "pcie_aux clock source missing or invalid\n");
1063 fallthrough;
1064 case IMX7D:
1065 if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
1066 imx6_pcie->controller_id = 1;
1067
1068 imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
1069 "pciephy");
1070 if (IS_ERR(imx6_pcie->pciephy_reset)) {
1071 dev_err(dev, "Failed to get PCIEPHY reset control\n");
1072 return PTR_ERR(imx6_pcie->pciephy_reset);
1073 }
1074
1075 imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
1076 "apps");
1077 if (IS_ERR(imx6_pcie->apps_reset)) {
1078 dev_err(dev, "Failed to get PCIE APPS reset control\n");
1079 return PTR_ERR(imx6_pcie->apps_reset);
1080 }
1081 break;
1082 default:
1083 break;
1084 }
1085
1086 /* Grab turnoff reset */
1087 imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
1088 if (IS_ERR(imx6_pcie->turnoff_reset)) {
1089 dev_err(dev, "Failed to get TURNOFF reset control\n");
1090 return PTR_ERR(imx6_pcie->turnoff_reset);
1091 }
1092
1093 /* Grab GPR config register range */
1094 imx6_pcie->iomuxc_gpr =
1095 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
1096 if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
1097 dev_err(dev, "unable to find iomuxc registers\n");
1098 return PTR_ERR(imx6_pcie->iomuxc_gpr);
1099 }
1100
1101 /* Grab PCIe PHY Tx Settings */
1102 if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
1103 &imx6_pcie->tx_deemph_gen1))
1104 imx6_pcie->tx_deemph_gen1 = 0;
1105
1106 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
1107 &imx6_pcie->tx_deemph_gen2_3p5db))
1108 imx6_pcie->tx_deemph_gen2_3p5db = 0;
1109
1110 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
1111 &imx6_pcie->tx_deemph_gen2_6db))
1112 imx6_pcie->tx_deemph_gen2_6db = 20;
1113
1114 if (of_property_read_u32(node, "fsl,tx-swing-full",
1115 &imx6_pcie->tx_swing_full))
1116 imx6_pcie->tx_swing_full = 127;
1117
1118 if (of_property_read_u32(node, "fsl,tx-swing-low",
1119 &imx6_pcie->tx_swing_low))
1120 imx6_pcie->tx_swing_low = 127;
1121
1122 /* Limit link speed */
1123 pci->link_gen = 1;
1124 ret = of_property_read_u32(node, "fsl,max-link-speed", &pci->link_gen);
1125
1126 imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
1127 if (IS_ERR(imx6_pcie->vpcie)) {
1128 if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
1129 return PTR_ERR(imx6_pcie->vpcie);
1130 imx6_pcie->vpcie = NULL;
1131 }
1132
1133 platform_set_drvdata(pdev, imx6_pcie);
1134
1135 ret = imx6_pcie_attach_pd(dev);
1136 if (ret)
1137 return ret;
1138
1139 ret = dw_pcie_host_init(&pci->pp);
1140 if (ret < 0)
1141 return ret;
1142
1143 if (pci_msi_enabled()) {
1144 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
1145 val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
1146 val |= PCI_MSI_FLAGS_ENABLE;
1147 dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
1148 }
1149
1150 return 0;
1151 }
1152
imx6_pcie_shutdown(struct platform_device * pdev)1153 static void imx6_pcie_shutdown(struct platform_device *pdev)
1154 {
1155 struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
1156
1157 /* bring down link, so bootloader gets clean state in case of reboot */
1158 imx6_pcie_assert_core_reset(imx6_pcie);
1159 }
1160
1161 static const struct imx6_pcie_drvdata drvdata[] = {
1162 [IMX6Q] = {
1163 .variant = IMX6Q,
1164 .flags = IMX6_PCIE_FLAG_IMX6_PHY |
1165 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
1166 .dbi_length = 0x200,
1167 },
1168 [IMX6SX] = {
1169 .variant = IMX6SX,
1170 .flags = IMX6_PCIE_FLAG_IMX6_PHY |
1171 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
1172 IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
1173 },
1174 [IMX6QP] = {
1175 .variant = IMX6QP,
1176 .flags = IMX6_PCIE_FLAG_IMX6_PHY |
1177 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
1178 },
1179 [IMX7D] = {
1180 .variant = IMX7D,
1181 .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
1182 },
1183 [IMX8MQ] = {
1184 .variant = IMX8MQ,
1185 },
1186 };
1187
1188 static const struct of_device_id imx6_pcie_of_match[] = {
1189 { .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], },
1190 { .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
1191 { .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
1192 { .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], },
1193 { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], } ,
1194 {},
1195 };
1196
1197 static struct platform_driver imx6_pcie_driver = {
1198 .driver = {
1199 .name = "imx6q-pcie",
1200 .of_match_table = imx6_pcie_of_match,
1201 .suppress_bind_attrs = true,
1202 .pm = &imx6_pcie_pm_ops,
1203 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1204 },
1205 .probe = imx6_pcie_probe,
1206 .shutdown = imx6_pcie_shutdown,
1207 };
1208
imx6_pcie_quirk(struct pci_dev * dev)1209 static void imx6_pcie_quirk(struct pci_dev *dev)
1210 {
1211 struct pci_bus *bus = dev->bus;
1212 struct pcie_port *pp = bus->sysdata;
1213
1214 /* Bus parent is the PCI bridge, its parent is this platform driver */
1215 if (!bus->dev.parent || !bus->dev.parent->parent)
1216 return;
1217
1218 /* Make sure we only quirk devices associated with this driver */
1219 if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver)
1220 return;
1221
1222 if (pci_is_root_bus(bus)) {
1223 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1224 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
1225
1226 /*
1227 * Limit config length to avoid the kernel reading beyond
1228 * the register set and causing an abort on i.MX 6Quad
1229 */
1230 if (imx6_pcie->drvdata->dbi_length) {
1231 dev->cfg_size = imx6_pcie->drvdata->dbi_length;
1232 dev_info(&dev->dev, "Limiting cfg_size to %d\n",
1233 dev->cfg_size);
1234 }
1235 }
1236 }
1237 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
1238 PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk);
1239
imx6_pcie_init(void)1240 static int __init imx6_pcie_init(void)
1241 {
1242 #ifdef CONFIG_ARM
1243 /*
1244 * Since probe() can be deferred we need to make sure that
1245 * hook_fault_code is not called after __init memory is freed
1246 * by kernel and since imx6q_pcie_abort_handler() is a no-op,
1247 * we can install the handler here without risking it
1248 * accessing some uninitialized driver state.
1249 */
1250 hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
1251 "external abort on non-linefetch");
1252 #endif
1253
1254 return platform_driver_register(&imx6_pcie_driver);
1255 }
1256 device_initcall(imx6_pcie_init);
1257