1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCIe driver for Marvell Armada 370 and Armada XP SoCs
4 *
5 * Author: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/pci.h>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/gpio.h>
13 #include <linux/init.h>
14 #include <linux/mbus.h>
15 #include <linux/slab.h>
16 #include <linux/platform_device.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
19 #include <linux/of_gpio.h>
20 #include <linux/of_pci.h>
21 #include <linux/of_platform.h>
22
23 #include "../pci.h"
24 #include "../pci-bridge-emul.h"
25
26 /*
27 * PCIe unit register offsets.
28 */
29 #define PCIE_DEV_ID_OFF 0x0000
30 #define PCIE_CMD_OFF 0x0004
31 #define PCIE_DEV_REV_OFF 0x0008
32 #define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3))
33 #define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3))
34 #define PCIE_CAP_PCIEXP 0x0060
35 #define PCIE_HEADER_LOG_4_OFF 0x0128
36 #define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4))
37 #define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4))
38 #define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4))
39 #define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4))
40 #define PCIE_WIN5_CTRL_OFF 0x1880
41 #define PCIE_WIN5_BASE_OFF 0x1884
42 #define PCIE_WIN5_REMAP_OFF 0x188c
43 #define PCIE_CONF_ADDR_OFF 0x18f8
44 #define PCIE_CONF_ADDR_EN 0x80000000
45 #define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc))
46 #define PCIE_CONF_BUS(b) (((b) & 0xff) << 16)
47 #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11)
48 #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8)
49 #define PCIE_CONF_ADDR(bus, devfn, where) \
50 (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
51 PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \
52 PCIE_CONF_ADDR_EN)
53 #define PCIE_CONF_DATA_OFF 0x18fc
54 #define PCIE_MASK_OFF 0x1910
55 #define PCIE_MASK_ENABLE_INTS 0x0f000000
56 #define PCIE_CTRL_OFF 0x1a00
57 #define PCIE_CTRL_X1_MODE 0x0001
58 #define PCIE_STAT_OFF 0x1a04
59 #define PCIE_STAT_BUS 0xff00
60 #define PCIE_STAT_DEV 0x1f0000
61 #define PCIE_STAT_LINK_DOWN BIT(0)
62 #define PCIE_RC_RTSTA 0x1a14
63 #define PCIE_DEBUG_CTRL 0x1a60
64 #define PCIE_DEBUG_SOFT_RESET BIT(20)
65
66 struct mvebu_pcie_port;
67
68 /* Structure representing all PCIe interfaces */
69 struct mvebu_pcie {
70 struct platform_device *pdev;
71 struct mvebu_pcie_port *ports;
72 struct resource io;
73 struct resource realio;
74 struct resource mem;
75 struct resource busn;
76 int nports;
77 };
78
79 struct mvebu_pcie_window {
80 phys_addr_t base;
81 phys_addr_t remap;
82 size_t size;
83 };
84
85 /* Structure representing one PCIe interface */
86 struct mvebu_pcie_port {
87 char *name;
88 void __iomem *base;
89 u32 port;
90 u32 lane;
91 int devfn;
92 unsigned int mem_target;
93 unsigned int mem_attr;
94 unsigned int io_target;
95 unsigned int io_attr;
96 struct clk *clk;
97 struct gpio_desc *reset_gpio;
98 char *reset_name;
99 struct pci_bridge_emul bridge;
100 struct device_node *dn;
101 struct mvebu_pcie *pcie;
102 struct mvebu_pcie_window memwin;
103 struct mvebu_pcie_window iowin;
104 u32 saved_pcie_stat;
105 struct resource regs;
106 };
107
mvebu_writel(struct mvebu_pcie_port * port,u32 val,u32 reg)108 static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg)
109 {
110 writel(val, port->base + reg);
111 }
112
mvebu_readl(struct mvebu_pcie_port * port,u32 reg)113 static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg)
114 {
115 return readl(port->base + reg);
116 }
117
mvebu_has_ioport(struct mvebu_pcie_port * port)118 static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port)
119 {
120 return port->io_target != -1 && port->io_attr != -1;
121 }
122
mvebu_pcie_link_up(struct mvebu_pcie_port * port)123 static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
124 {
125 return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
126 }
127
mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port * port,int nr)128 static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr)
129 {
130 u32 stat;
131
132 stat = mvebu_readl(port, PCIE_STAT_OFF);
133 stat &= ~PCIE_STAT_BUS;
134 stat |= nr << 8;
135 mvebu_writel(port, stat, PCIE_STAT_OFF);
136 }
137
mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port * port,int nr)138 static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
139 {
140 u32 stat;
141
142 stat = mvebu_readl(port, PCIE_STAT_OFF);
143 stat &= ~PCIE_STAT_DEV;
144 stat |= nr << 16;
145 mvebu_writel(port, stat, PCIE_STAT_OFF);
146 }
147
148 /*
149 * Setup PCIE BARs and Address Decode Wins:
150 * BAR[0] -> internal registers (needed for MSI)
151 * BAR[1] -> covers all DRAM banks
152 * BAR[2] -> Disabled
153 * WIN[0-3] -> DRAM bank[0-3]
154 */
mvebu_pcie_setup_wins(struct mvebu_pcie_port * port)155 static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
156 {
157 const struct mbus_dram_target_info *dram;
158 u32 size;
159 int i;
160
161 dram = mv_mbus_dram_info();
162
163 /* First, disable and clear BARs and windows. */
164 for (i = 1; i < 3; i++) {
165 mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i));
166 mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i));
167 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i));
168 }
169
170 for (i = 0; i < 5; i++) {
171 mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i));
172 mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i));
173 mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
174 }
175
176 mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF);
177 mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF);
178 mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF);
179
180 /* Setup windows for DDR banks. Count total DDR size on the fly. */
181 size = 0;
182 for (i = 0; i < dram->num_cs; i++) {
183 const struct mbus_dram_window *cs = dram->cs + i;
184
185 mvebu_writel(port, cs->base & 0xffff0000,
186 PCIE_WIN04_BASE_OFF(i));
187 mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
188 mvebu_writel(port,
189 ((cs->size - 1) & 0xffff0000) |
190 (cs->mbus_attr << 8) |
191 (dram->mbus_dram_target_id << 4) | 1,
192 PCIE_WIN04_CTRL_OFF(i));
193
194 size += cs->size;
195 }
196
197 /* Round up 'size' to the nearest power of two. */
198 if ((size & (size - 1)) != 0)
199 size = 1 << fls(size);
200
201 /* Setup BAR[1] to all DRAM banks. */
202 mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1));
203 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1));
204 mvebu_writel(port, ((size - 1) & 0xffff0000) | 1,
205 PCIE_BAR_CTRL_OFF(1));
206
207 /*
208 * Point BAR[0] to the device's internal registers.
209 */
210 mvebu_writel(port, round_down(port->regs.start, SZ_1M), PCIE_BAR_LO_OFF(0));
211 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
212 }
213
mvebu_pcie_setup_hw(struct mvebu_pcie_port * port)214 static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
215 {
216 u32 cmd, mask;
217
218 /* Point PCIe unit MBUS decode windows to DRAM space. */
219 mvebu_pcie_setup_wins(port);
220
221 /* Master + slave enable. */
222 cmd = mvebu_readl(port, PCIE_CMD_OFF);
223 cmd |= PCI_COMMAND_IO;
224 cmd |= PCI_COMMAND_MEMORY;
225 cmd |= PCI_COMMAND_MASTER;
226 mvebu_writel(port, cmd, PCIE_CMD_OFF);
227
228 /* Enable interrupt lines A-D. */
229 mask = mvebu_readl(port, PCIE_MASK_OFF);
230 mask |= PCIE_MASK_ENABLE_INTS;
231 mvebu_writel(port, mask, PCIE_MASK_OFF);
232 }
233
mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port * port,struct pci_bus * bus,u32 devfn,int where,int size,u32 * val)234 static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port,
235 struct pci_bus *bus,
236 u32 devfn, int where, int size, u32 *val)
237 {
238 void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF;
239
240 mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
241 PCIE_CONF_ADDR_OFF);
242
243 switch (size) {
244 case 1:
245 *val = readb_relaxed(conf_data + (where & 3));
246 break;
247 case 2:
248 *val = readw_relaxed(conf_data + (where & 2));
249 break;
250 case 4:
251 *val = readl_relaxed(conf_data);
252 break;
253 }
254
255 return PCIBIOS_SUCCESSFUL;
256 }
257
mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port * port,struct pci_bus * bus,u32 devfn,int where,int size,u32 val)258 static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
259 struct pci_bus *bus,
260 u32 devfn, int where, int size, u32 val)
261 {
262 void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF;
263
264 mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
265 PCIE_CONF_ADDR_OFF);
266
267 switch (size) {
268 case 1:
269 writeb(val, conf_data + (where & 3));
270 break;
271 case 2:
272 writew(val, conf_data + (where & 2));
273 break;
274 case 4:
275 writel(val, conf_data);
276 break;
277 default:
278 return PCIBIOS_BAD_REGISTER_NUMBER;
279 }
280
281 return PCIBIOS_SUCCESSFUL;
282 }
283
284 /*
285 * Remove windows, starting from the largest ones to the smallest
286 * ones.
287 */
mvebu_pcie_del_windows(struct mvebu_pcie_port * port,phys_addr_t base,size_t size)288 static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
289 phys_addr_t base, size_t size)
290 {
291 while (size) {
292 size_t sz = 1 << (fls(size) - 1);
293
294 mvebu_mbus_del_window(base, sz);
295 base += sz;
296 size -= sz;
297 }
298 }
299
300 /*
301 * MBus windows can only have a power of two size, but PCI BARs do not
302 * have this constraint. Therefore, we have to split the PCI BAR into
303 * areas each having a power of two size. We start from the largest
304 * one (i.e highest order bit set in the size).
305 */
mvebu_pcie_add_windows(struct mvebu_pcie_port * port,unsigned int target,unsigned int attribute,phys_addr_t base,size_t size,phys_addr_t remap)306 static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
307 unsigned int target, unsigned int attribute,
308 phys_addr_t base, size_t size,
309 phys_addr_t remap)
310 {
311 size_t size_mapped = 0;
312
313 while (size) {
314 size_t sz = 1 << (fls(size) - 1);
315 int ret;
316
317 ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
318 sz, remap);
319 if (ret) {
320 phys_addr_t end = base + sz - 1;
321
322 dev_err(&port->pcie->pdev->dev,
323 "Could not create MBus window at [mem %pa-%pa]: %d\n",
324 &base, &end, ret);
325 mvebu_pcie_del_windows(port, base - size_mapped,
326 size_mapped);
327 return;
328 }
329
330 size -= sz;
331 size_mapped += sz;
332 base += sz;
333 if (remap != MVEBU_MBUS_NO_REMAP)
334 remap += sz;
335 }
336 }
337
mvebu_pcie_set_window(struct mvebu_pcie_port * port,unsigned int target,unsigned int attribute,const struct mvebu_pcie_window * desired,struct mvebu_pcie_window * cur)338 static void mvebu_pcie_set_window(struct mvebu_pcie_port *port,
339 unsigned int target, unsigned int attribute,
340 const struct mvebu_pcie_window *desired,
341 struct mvebu_pcie_window *cur)
342 {
343 if (desired->base == cur->base && desired->remap == cur->remap &&
344 desired->size == cur->size)
345 return;
346
347 if (cur->size != 0) {
348 mvebu_pcie_del_windows(port, cur->base, cur->size);
349 cur->size = 0;
350 cur->base = 0;
351
352 /*
353 * If something tries to change the window while it is enabled
354 * the change will not be done atomically. That would be
355 * difficult to do in the general case.
356 */
357 }
358
359 if (desired->size == 0)
360 return;
361
362 mvebu_pcie_add_windows(port, target, attribute, desired->base,
363 desired->size, desired->remap);
364 *cur = *desired;
365 }
366
mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port * port)367 static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
368 {
369 struct mvebu_pcie_window desired = {};
370 struct pci_bridge_emul_conf *conf = &port->bridge.conf;
371
372 /* Are the new iobase/iolimit values invalid? */
373 if (conf->iolimit < conf->iobase ||
374 conf->iolimitupper < conf->iobaseupper ||
375 !(conf->command & PCI_COMMAND_IO)) {
376 mvebu_pcie_set_window(port, port->io_target, port->io_attr,
377 &desired, &port->iowin);
378 return;
379 }
380
381 if (!mvebu_has_ioport(port)) {
382 dev_WARN(&port->pcie->pdev->dev,
383 "Attempt to set IO when IO is disabled\n");
384 return;
385 }
386
387 /*
388 * We read the PCI-to-PCI bridge emulated registers, and
389 * calculate the base address and size of the address decoding
390 * window to setup, according to the PCI-to-PCI bridge
391 * specifications. iobase is the bus address, port->iowin_base
392 * is the CPU address.
393 */
394 desired.remap = ((conf->iobase & 0xF0) << 8) |
395 (conf->iobaseupper << 16);
396 desired.base = port->pcie->io.start + desired.remap;
397 desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) |
398 (conf->iolimitupper << 16)) -
399 desired.remap) +
400 1;
401
402 mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired,
403 &port->iowin);
404 }
405
mvebu_pcie_handle_membase_change(struct mvebu_pcie_port * port)406 static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
407 {
408 struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
409 struct pci_bridge_emul_conf *conf = &port->bridge.conf;
410
411 /* Are the new membase/memlimit values invalid? */
412 if (conf->memlimit < conf->membase ||
413 !(conf->command & PCI_COMMAND_MEMORY)) {
414 mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
415 &desired, &port->memwin);
416 return;
417 }
418
419 /*
420 * We read the PCI-to-PCI bridge emulated registers, and
421 * calculate the base address and size of the address decoding
422 * window to setup, according to the PCI-to-PCI bridge
423 * specifications.
424 */
425 desired.base = ((conf->membase & 0xFFF0) << 16);
426 desired.size = (((conf->memlimit & 0xFFF0) << 16) | 0xFFFFF) -
427 desired.base + 1;
428
429 mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
430 &port->memwin);
431 }
432
433 static pci_bridge_emul_read_status_t
mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul * bridge,int reg,u32 * value)434 mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
435 int reg, u32 *value)
436 {
437 struct mvebu_pcie_port *port = bridge->data;
438
439 switch (reg) {
440 case PCI_EXP_DEVCAP:
441 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP);
442 break;
443
444 case PCI_EXP_DEVCTL:
445 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL) &
446 ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
447 PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
448 break;
449
450 case PCI_EXP_LNKCAP:
451 /*
452 * PCIe requires the clock power management capability to be
453 * hard-wired to zero for downstream ports
454 */
455 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) &
456 ~PCI_EXP_LNKCAP_CLKPM;
457 break;
458
459 case PCI_EXP_LNKCTL:
460 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
461 break;
462
463 case PCI_EXP_SLTCTL:
464 *value = PCI_EXP_SLTSTA_PDS << 16;
465 break;
466
467 case PCI_EXP_RTSTA:
468 *value = mvebu_readl(port, PCIE_RC_RTSTA);
469 break;
470
471 default:
472 return PCI_BRIDGE_EMUL_NOT_HANDLED;
473 }
474
475 return PCI_BRIDGE_EMUL_HANDLED;
476 }
477
478 static void
mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul * bridge,int reg,u32 old,u32 new,u32 mask)479 mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
480 int reg, u32 old, u32 new, u32 mask)
481 {
482 struct mvebu_pcie_port *port = bridge->data;
483 struct pci_bridge_emul_conf *conf = &bridge->conf;
484
485 switch (reg) {
486 case PCI_COMMAND:
487 {
488 if (!mvebu_has_ioport(port))
489 conf->command &= ~PCI_COMMAND_IO;
490
491 if ((old ^ new) & PCI_COMMAND_IO)
492 mvebu_pcie_handle_iobase_change(port);
493 if ((old ^ new) & PCI_COMMAND_MEMORY)
494 mvebu_pcie_handle_membase_change(port);
495
496 break;
497 }
498
499 case PCI_IO_BASE:
500 /*
501 * We keep bit 1 set, it is a read-only bit that
502 * indicates we support 32 bits addressing for the
503 * I/O
504 */
505 conf->iobase |= PCI_IO_RANGE_TYPE_32;
506 conf->iolimit |= PCI_IO_RANGE_TYPE_32;
507 mvebu_pcie_handle_iobase_change(port);
508 break;
509
510 case PCI_MEMORY_BASE:
511 mvebu_pcie_handle_membase_change(port);
512 break;
513
514 case PCI_IO_BASE_UPPER16:
515 mvebu_pcie_handle_iobase_change(port);
516 break;
517
518 case PCI_PRIMARY_BUS:
519 mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus);
520 break;
521
522 default:
523 break;
524 }
525 }
526
527 static void
mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul * bridge,int reg,u32 old,u32 new,u32 mask)528 mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
529 int reg, u32 old, u32 new, u32 mask)
530 {
531 struct mvebu_pcie_port *port = bridge->data;
532
533 switch (reg) {
534 case PCI_EXP_DEVCTL:
535 /*
536 * Armada370 data says these bits must always
537 * be zero when in root complex mode.
538 */
539 new &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
540 PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
541
542 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
543 break;
544
545 case PCI_EXP_LNKCTL:
546 /*
547 * If we don't support CLKREQ, we must ensure that the
548 * CLKREQ enable bit always reads zero. Since we haven't
549 * had this capability, and it's dependent on board wiring,
550 * disable it for the time being.
551 */
552 new &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
553
554 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
555 break;
556
557 case PCI_EXP_RTSTA:
558 mvebu_writel(port, new, PCIE_RC_RTSTA);
559 break;
560 }
561 }
562
563 static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
564 .write_base = mvebu_pci_bridge_emul_base_conf_write,
565 .read_pcie = mvebu_pci_bridge_emul_pcie_conf_read,
566 .write_pcie = mvebu_pci_bridge_emul_pcie_conf_write,
567 };
568
569 /*
570 * Initialize the configuration space of the PCI-to-PCI bridge
571 * associated with the given PCIe interface.
572 */
mvebu_pci_bridge_emul_init(struct mvebu_pcie_port * port)573 static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
574 {
575 struct pci_bridge_emul *bridge = &port->bridge;
576
577 bridge->conf.vendor = PCI_VENDOR_ID_MARVELL;
578 bridge->conf.device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
579 bridge->conf.class_revision =
580 mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff;
581
582 if (mvebu_has_ioport(port)) {
583 /* We support 32 bits I/O addressing */
584 bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
585 bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
586 }
587
588 bridge->has_pcie = true;
589 bridge->data = port;
590 bridge->ops = &mvebu_pci_bridge_emul_ops;
591
592 pci_bridge_emul_init(bridge, PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR);
593 }
594
sys_to_pcie(struct pci_sys_data * sys)595 static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
596 {
597 return sys->private_data;
598 }
599
mvebu_pcie_find_port(struct mvebu_pcie * pcie,struct pci_bus * bus,int devfn)600 static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
601 struct pci_bus *bus,
602 int devfn)
603 {
604 int i;
605
606 for (i = 0; i < pcie->nports; i++) {
607 struct mvebu_pcie_port *port = &pcie->ports[i];
608
609 if (bus->number == 0 && port->devfn == devfn)
610 return port;
611 if (bus->number != 0 &&
612 bus->number >= port->bridge.conf.secondary_bus &&
613 bus->number <= port->bridge.conf.subordinate_bus)
614 return port;
615 }
616
617 return NULL;
618 }
619
620 /* PCI configuration space write function */
mvebu_pcie_wr_conf(struct pci_bus * bus,u32 devfn,int where,int size,u32 val)621 static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
622 int where, int size, u32 val)
623 {
624 struct mvebu_pcie *pcie = bus->sysdata;
625 struct mvebu_pcie_port *port;
626 int ret;
627
628 port = mvebu_pcie_find_port(pcie, bus, devfn);
629 if (!port)
630 return PCIBIOS_DEVICE_NOT_FOUND;
631
632 /* Access the emulated PCI-to-PCI bridge */
633 if (bus->number == 0)
634 return pci_bridge_emul_conf_write(&port->bridge, where,
635 size, val);
636
637 if (!mvebu_pcie_link_up(port))
638 return PCIBIOS_DEVICE_NOT_FOUND;
639
640 /* Access the real PCIe interface */
641 ret = mvebu_pcie_hw_wr_conf(port, bus, devfn,
642 where, size, val);
643
644 return ret;
645 }
646
647 /* PCI configuration space read function */
mvebu_pcie_rd_conf(struct pci_bus * bus,u32 devfn,int where,int size,u32 * val)648 static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
649 int size, u32 *val)
650 {
651 struct mvebu_pcie *pcie = bus->sysdata;
652 struct mvebu_pcie_port *port;
653 int ret;
654
655 port = mvebu_pcie_find_port(pcie, bus, devfn);
656 if (!port) {
657 *val = 0xffffffff;
658 return PCIBIOS_DEVICE_NOT_FOUND;
659 }
660
661 /* Access the emulated PCI-to-PCI bridge */
662 if (bus->number == 0)
663 return pci_bridge_emul_conf_read(&port->bridge, where,
664 size, val);
665
666 if (!mvebu_pcie_link_up(port)) {
667 *val = 0xffffffff;
668 return PCIBIOS_DEVICE_NOT_FOUND;
669 }
670
671 /* Access the real PCIe interface */
672 ret = mvebu_pcie_hw_rd_conf(port, bus, devfn,
673 where, size, val);
674
675 return ret;
676 }
677
678 static struct pci_ops mvebu_pcie_ops = {
679 .read = mvebu_pcie_rd_conf,
680 .write = mvebu_pcie_wr_conf,
681 };
682
mvebu_pcie_align_resource(struct pci_dev * dev,const struct resource * res,resource_size_t start,resource_size_t size,resource_size_t align)683 static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
684 const struct resource *res,
685 resource_size_t start,
686 resource_size_t size,
687 resource_size_t align)
688 {
689 if (dev->bus->number != 0)
690 return start;
691
692 /*
693 * On the PCI-to-PCI bridge side, the I/O windows must have at
694 * least a 64 KB size and the memory windows must have at
695 * least a 1 MB size. Moreover, MBus windows need to have a
696 * base address aligned on their size, and their size must be
697 * a power of two. This means that if the BAR doesn't have a
698 * power of two size, several MBus windows will actually be
699 * created. We need to ensure that the biggest MBus window
700 * (which will be the first one) is aligned on its size, which
701 * explains the rounddown_pow_of_two() being done here.
702 */
703 if (res->flags & IORESOURCE_IO)
704 return round_up(start, max_t(resource_size_t, SZ_64K,
705 rounddown_pow_of_two(size)));
706 else if (res->flags & IORESOURCE_MEM)
707 return round_up(start, max_t(resource_size_t, SZ_1M,
708 rounddown_pow_of_two(size)));
709 else
710 return start;
711 }
712
mvebu_pcie_map_registers(struct platform_device * pdev,struct device_node * np,struct mvebu_pcie_port * port)713 static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
714 struct device_node *np,
715 struct mvebu_pcie_port *port)
716 {
717 int ret = 0;
718
719 ret = of_address_to_resource(np, 0, &port->regs);
720 if (ret)
721 return (void __iomem *)ERR_PTR(ret);
722
723 return devm_ioremap_resource(&pdev->dev, &port->regs);
724 }
725
726 #define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03)
727 #define DT_TYPE_IO 0x1
728 #define DT_TYPE_MEM32 0x2
729 #define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF)
730 #define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF)
731
mvebu_get_tgt_attr(struct device_node * np,int devfn,unsigned long type,unsigned int * tgt,unsigned int * attr)732 static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
733 unsigned long type,
734 unsigned int *tgt,
735 unsigned int *attr)
736 {
737 const int na = 3, ns = 2;
738 const __be32 *range;
739 int rlen, nranges, rangesz, pna, i;
740
741 *tgt = -1;
742 *attr = -1;
743
744 range = of_get_property(np, "ranges", &rlen);
745 if (!range)
746 return -EINVAL;
747
748 pna = of_n_addr_cells(np);
749 rangesz = pna + na + ns;
750 nranges = rlen / sizeof(__be32) / rangesz;
751
752 for (i = 0; i < nranges; i++, range += rangesz) {
753 u32 flags = of_read_number(range, 1);
754 u32 slot = of_read_number(range + 1, 1);
755 u64 cpuaddr = of_read_number(range + na, pna);
756 unsigned long rtype;
757
758 if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
759 rtype = IORESOURCE_IO;
760 else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
761 rtype = IORESOURCE_MEM;
762 else
763 continue;
764
765 if (slot == PCI_SLOT(devfn) && type == rtype) {
766 *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
767 *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
768 return 0;
769 }
770 }
771
772 return -ENOENT;
773 }
774
775 #ifdef CONFIG_PM_SLEEP
mvebu_pcie_suspend(struct device * dev)776 static int mvebu_pcie_suspend(struct device *dev)
777 {
778 struct mvebu_pcie *pcie;
779 int i;
780
781 pcie = dev_get_drvdata(dev);
782 for (i = 0; i < pcie->nports; i++) {
783 struct mvebu_pcie_port *port = pcie->ports + i;
784 port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF);
785 }
786
787 return 0;
788 }
789
mvebu_pcie_resume(struct device * dev)790 static int mvebu_pcie_resume(struct device *dev)
791 {
792 struct mvebu_pcie *pcie;
793 int i;
794
795 pcie = dev_get_drvdata(dev);
796 for (i = 0; i < pcie->nports; i++) {
797 struct mvebu_pcie_port *port = pcie->ports + i;
798 mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF);
799 mvebu_pcie_setup_hw(port);
800 }
801
802 return 0;
803 }
804 #endif
805
mvebu_pcie_port_clk_put(void * data)806 static void mvebu_pcie_port_clk_put(void *data)
807 {
808 struct mvebu_pcie_port *port = data;
809
810 clk_put(port->clk);
811 }
812
mvebu_pcie_parse_port(struct mvebu_pcie * pcie,struct mvebu_pcie_port * port,struct device_node * child)813 static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
814 struct mvebu_pcie_port *port, struct device_node *child)
815 {
816 struct device *dev = &pcie->pdev->dev;
817 enum of_gpio_flags flags;
818 int reset_gpio, ret;
819
820 port->pcie = pcie;
821
822 if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) {
823 dev_warn(dev, "ignoring %pOF, missing pcie-port property\n",
824 child);
825 goto skip;
826 }
827
828 if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane))
829 port->lane = 0;
830
831 port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port,
832 port->lane);
833 if (!port->name) {
834 ret = -ENOMEM;
835 goto err;
836 }
837
838 port->devfn = of_pci_get_devfn(child);
839 if (port->devfn < 0)
840 goto skip;
841
842 ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM,
843 &port->mem_target, &port->mem_attr);
844 if (ret < 0) {
845 dev_err(dev, "%s: cannot get tgt/attr for mem window\n",
846 port->name);
847 goto skip;
848 }
849
850 if (resource_size(&pcie->io) != 0) {
851 mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO,
852 &port->io_target, &port->io_attr);
853 } else {
854 port->io_target = -1;
855 port->io_attr = -1;
856 }
857
858 reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags);
859 if (reset_gpio == -EPROBE_DEFER) {
860 ret = reset_gpio;
861 goto err;
862 }
863
864 if (gpio_is_valid(reset_gpio)) {
865 unsigned long gpio_flags;
866
867 port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset",
868 port->name);
869 if (!port->reset_name) {
870 ret = -ENOMEM;
871 goto err;
872 }
873
874 if (flags & OF_GPIO_ACTIVE_LOW) {
875 dev_info(dev, "%pOF: reset gpio is active low\n",
876 child);
877 gpio_flags = GPIOF_ACTIVE_LOW |
878 GPIOF_OUT_INIT_LOW;
879 } else {
880 gpio_flags = GPIOF_OUT_INIT_HIGH;
881 }
882
883 ret = devm_gpio_request_one(dev, reset_gpio, gpio_flags,
884 port->reset_name);
885 if (ret) {
886 if (ret == -EPROBE_DEFER)
887 goto err;
888 goto skip;
889 }
890
891 port->reset_gpio = gpio_to_desc(reset_gpio);
892 }
893
894 port->clk = of_clk_get_by_name(child, NULL);
895 if (IS_ERR(port->clk)) {
896 dev_err(dev, "%s: cannot get clock\n", port->name);
897 goto skip;
898 }
899
900 ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port);
901 if (ret < 0) {
902 clk_put(port->clk);
903 goto err;
904 }
905
906 return 1;
907
908 skip:
909 ret = 0;
910
911 /* In the case of skipping, we need to free these */
912 devm_kfree(dev, port->reset_name);
913 port->reset_name = NULL;
914 devm_kfree(dev, port->name);
915 port->name = NULL;
916
917 err:
918 return ret;
919 }
920
921 /*
922 * Power up a PCIe port. PCIe requires the refclk to be stable for 100µs
923 * prior to releasing PERST. See table 2-4 in section 2.6.2 AC Specifications
924 * of the PCI Express Card Electromechanical Specification, 1.1.
925 */
mvebu_pcie_powerup(struct mvebu_pcie_port * port)926 static int mvebu_pcie_powerup(struct mvebu_pcie_port *port)
927 {
928 int ret;
929
930 ret = clk_prepare_enable(port->clk);
931 if (ret < 0)
932 return ret;
933
934 if (port->reset_gpio) {
935 u32 reset_udelay = PCI_PM_D3COLD_WAIT * 1000;
936
937 of_property_read_u32(port->dn, "reset-delay-us",
938 &reset_udelay);
939
940 udelay(100);
941
942 gpiod_set_value_cansleep(port->reset_gpio, 0);
943 msleep(reset_udelay / 1000);
944 }
945
946 return 0;
947 }
948
949 /*
950 * Power down a PCIe port. Strictly, PCIe requires us to place the card
951 * in D3hot state before asserting PERST#.
952 */
mvebu_pcie_powerdown(struct mvebu_pcie_port * port)953 static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
954 {
955 gpiod_set_value_cansleep(port->reset_gpio, 1);
956
957 clk_disable_unprepare(port->clk);
958 }
959
960 /*
961 * devm_of_pci_get_host_bridge_resources() only sets up translateable resources,
962 * so we need extra resource setup parsing our special DT properties encoding
963 * the MEM and IO apertures.
964 */
mvebu_pcie_parse_request_resources(struct mvebu_pcie * pcie)965 static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
966 {
967 struct device *dev = &pcie->pdev->dev;
968 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
969 int ret;
970
971 /* Get the PCIe memory aperture */
972 mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
973 if (resource_size(&pcie->mem) == 0) {
974 dev_err(dev, "invalid memory aperture size\n");
975 return -EINVAL;
976 }
977
978 pcie->mem.name = "PCI MEM";
979 pci_add_resource(&bridge->windows, &pcie->mem);
980 ret = devm_request_resource(dev, &iomem_resource, &pcie->mem);
981 if (ret)
982 return ret;
983
984 /* Get the PCIe IO aperture */
985 mvebu_mbus_get_pcie_io_aperture(&pcie->io);
986
987 if (resource_size(&pcie->io) != 0) {
988 pcie->realio.flags = pcie->io.flags;
989 pcie->realio.start = PCIBIOS_MIN_IO;
990 pcie->realio.end = min_t(resource_size_t,
991 IO_SPACE_LIMIT - SZ_64K,
992 resource_size(&pcie->io) - 1);
993 pcie->realio.name = "PCI I/O";
994
995 pci_add_resource(&bridge->windows, &pcie->realio);
996 ret = devm_request_resource(dev, &ioport_resource, &pcie->realio);
997 if (ret)
998 return ret;
999 }
1000
1001 return 0;
1002 }
1003
1004 /*
1005 * This is a copy of pci_host_probe(), except that it does the I/O
1006 * remap as the last step, once we are sure we won't fail.
1007 *
1008 * It should be removed once the I/O remap error handling issue has
1009 * been sorted out.
1010 */
mvebu_pci_host_probe(struct pci_host_bridge * bridge)1011 static int mvebu_pci_host_probe(struct pci_host_bridge *bridge)
1012 {
1013 struct mvebu_pcie *pcie;
1014 struct pci_bus *bus, *child;
1015 int ret;
1016
1017 ret = pci_scan_root_bus_bridge(bridge);
1018 if (ret < 0) {
1019 dev_err(bridge->dev.parent, "Scanning root bridge failed");
1020 return ret;
1021 }
1022
1023 pcie = pci_host_bridge_priv(bridge);
1024 if (resource_size(&pcie->io) != 0) {
1025 unsigned int i;
1026
1027 for (i = 0; i < resource_size(&pcie->realio); i += SZ_64K)
1028 pci_ioremap_io(i, pcie->io.start + i);
1029 }
1030
1031 bus = bridge->bus;
1032
1033 /*
1034 * We insert PCI resources into the iomem_resource and
1035 * ioport_resource trees in either pci_bus_claim_resources()
1036 * or pci_bus_assign_resources().
1037 */
1038 if (pci_has_flag(PCI_PROBE_ONLY)) {
1039 pci_bus_claim_resources(bus);
1040 } else {
1041 pci_bus_size_bridges(bus);
1042 pci_bus_assign_resources(bus);
1043
1044 list_for_each_entry(child, &bus->children, node)
1045 pcie_bus_configure_settings(child);
1046 }
1047
1048 pci_bus_add_devices(bus);
1049 return 0;
1050 }
1051
mvebu_pcie_probe(struct platform_device * pdev)1052 static int mvebu_pcie_probe(struct platform_device *pdev)
1053 {
1054 struct device *dev = &pdev->dev;
1055 struct mvebu_pcie *pcie;
1056 struct pci_host_bridge *bridge;
1057 struct device_node *np = dev->of_node;
1058 struct device_node *child;
1059 int num, i, ret;
1060
1061 bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct mvebu_pcie));
1062 if (!bridge)
1063 return -ENOMEM;
1064
1065 pcie = pci_host_bridge_priv(bridge);
1066 pcie->pdev = pdev;
1067 platform_set_drvdata(pdev, pcie);
1068
1069 ret = mvebu_pcie_parse_request_resources(pcie);
1070 if (ret)
1071 return ret;
1072
1073 num = of_get_available_child_count(np);
1074
1075 pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL);
1076 if (!pcie->ports)
1077 return -ENOMEM;
1078
1079 i = 0;
1080 for_each_available_child_of_node(np, child) {
1081 struct mvebu_pcie_port *port = &pcie->ports[i];
1082
1083 ret = mvebu_pcie_parse_port(pcie, port, child);
1084 if (ret < 0) {
1085 of_node_put(child);
1086 return ret;
1087 } else if (ret == 0) {
1088 continue;
1089 }
1090
1091 port->dn = child;
1092 i++;
1093 }
1094 pcie->nports = i;
1095
1096 for (i = 0; i < pcie->nports; i++) {
1097 struct mvebu_pcie_port *port = &pcie->ports[i];
1098
1099 child = port->dn;
1100 if (!child)
1101 continue;
1102
1103 ret = mvebu_pcie_powerup(port);
1104 if (ret < 0)
1105 continue;
1106
1107 port->base = mvebu_pcie_map_registers(pdev, child, port);
1108 if (IS_ERR(port->base)) {
1109 dev_err(dev, "%s: cannot map registers\n", port->name);
1110 port->base = NULL;
1111 mvebu_pcie_powerdown(port);
1112 continue;
1113 }
1114
1115 mvebu_pcie_setup_hw(port);
1116 mvebu_pcie_set_local_dev_nr(port, 1);
1117 mvebu_pci_bridge_emul_init(port);
1118 }
1119
1120 pcie->nports = i;
1121
1122 bridge->sysdata = pcie;
1123 bridge->ops = &mvebu_pcie_ops;
1124 bridge->align_resource = mvebu_pcie_align_resource;
1125
1126 return mvebu_pci_host_probe(bridge);
1127 }
1128
1129 static const struct of_device_id mvebu_pcie_of_match_table[] = {
1130 { .compatible = "marvell,armada-xp-pcie", },
1131 { .compatible = "marvell,armada-370-pcie", },
1132 { .compatible = "marvell,dove-pcie", },
1133 { .compatible = "marvell,kirkwood-pcie", },
1134 {},
1135 };
1136
1137 static const struct dev_pm_ops mvebu_pcie_pm_ops = {
1138 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
1139 };
1140
1141 static struct platform_driver mvebu_pcie_driver = {
1142 .driver = {
1143 .name = "mvebu-pcie",
1144 .of_match_table = mvebu_pcie_of_match_table,
1145 /* driver unloading/unbinding currently not supported */
1146 .suppress_bind_attrs = true,
1147 .pm = &mvebu_pcie_pm_ops,
1148 },
1149 .probe = mvebu_pcie_probe,
1150 };
1151 builtin_platform_driver(mvebu_pcie_driver);
1152