1 /* SPDX-License-Identifier: GPL-2.0 */
2 // Copyright (c) 2017 Cadence
3 // Cadence PCIe controller driver.
4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5
6 #ifndef _PCIE_CADENCE_H
7 #define _PCIE_CADENCE_H
8
9 #include <linux/kernel.h>
10 #include <linux/pci.h>
11 #include <linux/phy/phy.h>
12
13 /* Parameters for the waiting for link up routine */
14 #define LINK_WAIT_MAX_RETRIES 10
15 #define LINK_WAIT_USLEEP_MIN 90000
16 #define LINK_WAIT_USLEEP_MAX 100000
17
18 /*
19 * Local Management Registers
20 */
21 #define CDNS_PCIE_LM_BASE 0x00100000
22
23 /* Vendor ID Register */
24 #define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044)
25 #define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0)
26 #define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0
27 #define CDNS_PCIE_LM_ID_VENDOR(vid) \
28 (((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK)
29 #define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16)
30 #define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16
31 #define CDNS_PCIE_LM_ID_SUBSYS(sub) \
32 (((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK)
33
34 /* Root Port Requestor ID Register */
35 #define CDNS_PCIE_LM_RP_RID (CDNS_PCIE_LM_BASE + 0x0228)
36 #define CDNS_PCIE_LM_RP_RID_MASK GENMASK(15, 0)
37 #define CDNS_PCIE_LM_RP_RID_SHIFT 0
38 #define CDNS_PCIE_LM_RP_RID_(rid) \
39 (((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK)
40
41 /* Endpoint Bus and Device Number Register */
42 #define CDNS_PCIE_LM_EP_ID (CDNS_PCIE_LM_BASE + 0x022c)
43 #define CDNS_PCIE_LM_EP_ID_DEV_MASK GENMASK(4, 0)
44 #define CDNS_PCIE_LM_EP_ID_DEV_SHIFT 0
45 #define CDNS_PCIE_LM_EP_ID_BUS_MASK GENMASK(15, 8)
46 #define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8
47
48 /* Endpoint Function f BAR b Configuration Registers */
49 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \
50 (CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008)
51 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \
52 (CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008)
53 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
54 (GENMASK(4, 0) << ((b) * 8))
55 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
56 (((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
57 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
58 (GENMASK(7, 5) << ((b) * 8))
59 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
60 (((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
61
62 /* Endpoint Function Configuration Register */
63 #define CDNS_PCIE_LM_EP_FUNC_CFG (CDNS_PCIE_LM_BASE + 0x02c0)
64
65 /* Root Complex BAR Configuration Register */
66 #define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300)
67 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(5, 0)
68 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \
69 (((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK)
70 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6)
71 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \
72 (((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK)
73 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(13, 9)
74 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \
75 (((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK)
76 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14)
77 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \
78 (((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK)
79 #define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17)
80 #define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS 0
81 #define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18)
82 #define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19)
83 #define CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS 0
84 #define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20)
85 #define CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE BIT(31)
86
87 /* BAR control values applicable to both Endpoint Function and Root Complex */
88 #define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0
89 #define CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS 0x1
90 #define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4
91 #define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5
92 #define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6
93 #define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7
94
95 #define LM_RC_BAR_CFG_CTRL_DISABLED(bar) \
96 (CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED << (((bar) * 8) + 6))
97 #define LM_RC_BAR_CFG_CTRL_IO_32BITS(bar) \
98 (CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS << (((bar) * 8) + 6))
99 #define LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) \
100 (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS << (((bar) * 8) + 6))
101 #define LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) \
102 (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << (((bar) * 8) + 6))
103 #define LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) \
104 (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS << (((bar) * 8) + 6))
105 #define LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) \
106 (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << (((bar) * 8) + 6))
107 #define LM_RC_BAR_CFG_APERTURE(bar, aperture) \
108 (((aperture) - 2) << ((bar) * 8))
109
110 /*
111 * Endpoint Function Registers (PCI configuration space for endpoint functions)
112 */
113 #define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12))
114
115 #define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
116 #define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0
117
118 /*
119 * Root Port Registers (PCI configuration space for the root port function)
120 */
121 #define CDNS_PCIE_RP_BASE 0x00200000
122 #define CDNS_PCIE_RP_CAP_OFFSET 0xc0
123
124 /*
125 * Address Translation Registers
126 */
127 #define CDNS_PCIE_AT_BASE 0x00400000
128
129 /* Region r Outbound AXI to PCIe Address Translation Register 0 */
130 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
131 (CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
132 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0)
133 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \
134 (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK)
135 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12)
136 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
137 (((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
138 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20)
139 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
140 (((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
141
142 /* Region r Outbound AXI to PCIe Address Translation Register 1 */
143 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
144 (CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
145
146 /* Region r Outbound PCIe Descriptor Register 0 */
147 #define CDNS_PCIE_AT_OB_REGION_DESC0(r) \
148 (CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
149 #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(3, 0)
150 #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2
151 #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6
152 #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xa
153 #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xb
154 #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG 0xc
155 #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG 0xd
156 /* Bit 23 MUST be set in RC mode. */
157 #define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23)
158 #define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24)
159 #define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
160 (((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
161
162 /* Region r Outbound PCIe Descriptor Register 1 */
163 #define CDNS_PCIE_AT_OB_REGION_DESC1(r) \
164 (CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
165 #define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0)
166 #define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \
167 ((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK)
168
169 /* Region r AXI Region Base Address Register 0 */
170 #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
171 (CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
172 #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0)
173 #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \
174 (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK)
175
176 /* Region r AXI Region Base Address Register 1 */
177 #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
178 (CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
179
180 /* Root Port BAR Inbound PCIe to AXI Address Translation Register */
181 #define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \
182 (CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008)
183 #define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0)
184 #define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \
185 (((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK)
186 #define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \
187 (CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008)
188
189 /* AXI link down register */
190 #define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
191
192 enum cdns_pcie_rp_bar {
193 RP_BAR_UNDEFINED = -1,
194 RP_BAR0,
195 RP_BAR1,
196 RP_NO_BAR
197 };
198
199 #define CDNS_PCIE_RP_MAX_IB 0x3
200 #define CDNS_PCIE_MAX_OB 32
201
202 struct cdns_pcie_rp_ib_bar {
203 u64 size;
204 bool free;
205 };
206
207 /* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */
208 #define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
209 (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
210 #define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
211 (CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
212
213 /* Normal/Vendor specific message access: offset inside some outbound region */
214 #define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK GENMASK(7, 5)
215 #define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \
216 (((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK)
217 #define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8)
218 #define CDNS_PCIE_NORMAL_MSG_CODE(code) \
219 (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
220 #define CDNS_PCIE_MSG_NO_DATA BIT(16)
221
222 struct cdns_pcie;
223
224 enum cdns_pcie_msg_code {
225 MSG_CODE_ASSERT_INTA = 0x20,
226 MSG_CODE_ASSERT_INTB = 0x21,
227 MSG_CODE_ASSERT_INTC = 0x22,
228 MSG_CODE_ASSERT_INTD = 0x23,
229 MSG_CODE_DEASSERT_INTA = 0x24,
230 MSG_CODE_DEASSERT_INTB = 0x25,
231 MSG_CODE_DEASSERT_INTC = 0x26,
232 MSG_CODE_DEASSERT_INTD = 0x27,
233 };
234
235 enum cdns_pcie_msg_routing {
236 /* Route to Root Complex */
237 MSG_ROUTING_TO_RC,
238
239 /* Use Address Routing */
240 MSG_ROUTING_BY_ADDR,
241
242 /* Use ID Routing */
243 MSG_ROUTING_BY_ID,
244
245 /* Route as Broadcast Message from Root Complex */
246 MSG_ROUTING_BCAST,
247
248 /* Local message; terminate at receiver (INTx messages) */
249 MSG_ROUTING_LOCAL,
250
251 /* Gather & route to Root Complex (PME_TO_Ack message) */
252 MSG_ROUTING_GATHER,
253 };
254
255 struct cdns_pcie_ops {
256 int (*start_link)(struct cdns_pcie *pcie);
257 void (*stop_link)(struct cdns_pcie *pcie);
258 bool (*link_up)(struct cdns_pcie *pcie);
259 u64 (*cpu_addr_fixup)(struct cdns_pcie *pcie, u64 cpu_addr);
260 };
261
262 /**
263 * struct cdns_pcie - private data for Cadence PCIe controller drivers
264 * @reg_base: IO mapped register base
265 * @mem_res: start/end offsets in the physical system memory to map PCI accesses
266 * @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint.
267 * @bus: In Root Complex mode, the bus number
268 * @ops: Platform specific ops to control various inputs from Cadence PCIe
269 * wrapper
270 */
271 struct cdns_pcie {
272 void __iomem *reg_base;
273 struct resource *mem_res;
274 struct device *dev;
275 bool is_rc;
276 int phy_count;
277 struct phy **phy;
278 struct device_link **link;
279 const struct cdns_pcie_ops *ops;
280 };
281
282 /**
283 * struct cdns_pcie_rc - private data for this PCIe Root Complex driver
284 * @pcie: Cadence PCIe controller
285 * @dev: pointer to PCIe device
286 * @cfg_res: start/end offsets in the physical system memory to map PCI
287 * configuration space accesses
288 * @cfg_base: IO mapped window to access the PCI configuration space of a
289 * single function at a time
290 * @vendor_id: PCI vendor ID
291 * @device_id: PCI device ID
292 * @avail_ib_bar: Satus of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or
293 * available
294 * @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2
295 */
296 struct cdns_pcie_rc {
297 struct cdns_pcie pcie;
298 struct resource *cfg_res;
299 void __iomem *cfg_base;
300 u32 vendor_id;
301 u32 device_id;
302 bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
303 bool quirk_retrain_flag;
304 };
305
306 /**
307 * struct cdns_pcie_epf - Structure to hold info about endpoint function
308 * @epf_bar: reference to the pci_epf_bar for the six Base Address Registers
309 */
310 struct cdns_pcie_epf {
311 struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
312 };
313
314 /**
315 * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
316 * @pcie: Cadence PCIe controller
317 * @max_regions: maximum number of regions supported by hardware
318 * @ob_region_map: bitmask of mapped outbound regions
319 * @ob_addr: base addresses in the AXI bus where the outbound regions start
320 * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
321 * dedicated outbound regions is mapped.
322 * @irq_cpu_addr: base address in the CPU space where a write access triggers
323 * the sending of a memory write (MSI) / normal message (legacy
324 * IRQ) TLP through the PCIe bus.
325 * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
326 * dedicated outbound region.
327 * @irq_pci_fn: the latest PCI function that has updated the mapping of
328 * the MSI/legacy IRQ dedicated outbound region.
329 * @irq_pending: bitmask of asserted legacy IRQs.
330 * @lock: spin lock to disable interrupts while modifying PCIe controller
331 * registers fields (RMW) accessible by both remote RC and EP to
332 * minimize time between read and write
333 * @epf: Structure to hold info about endpoint function
334 */
335 struct cdns_pcie_ep {
336 struct cdns_pcie pcie;
337 u32 max_regions;
338 unsigned long ob_region_map;
339 phys_addr_t *ob_addr;
340 phys_addr_t irq_phys_addr;
341 void __iomem *irq_cpu_addr;
342 u64 irq_pci_addr;
343 u8 irq_pci_fn;
344 u8 irq_pending;
345 /* protect writing to PCI_STATUS while raising legacy interrupts */
346 spinlock_t lock;
347 struct cdns_pcie_epf *epf;
348 };
349
350
351 /* Register access */
cdns_pcie_writel(struct cdns_pcie * pcie,u32 reg,u32 value)352 static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value)
353 {
354 writel(value, pcie->reg_base + reg);
355 }
356
cdns_pcie_readl(struct cdns_pcie * pcie,u32 reg)357 static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg)
358 {
359 return readl(pcie->reg_base + reg);
360 }
361
cdns_pcie_read_sz(void __iomem * addr,int size)362 static inline u32 cdns_pcie_read_sz(void __iomem *addr, int size)
363 {
364 void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4);
365 unsigned int offset = (unsigned long)addr & 0x3;
366 u32 val = readl(aligned_addr);
367
368 if (!IS_ALIGNED((uintptr_t)addr, size)) {
369 pr_warn("Address %p and size %d are not aligned\n", addr, size);
370 return 0;
371 }
372
373 if (size > 2)
374 return val;
375
376 return (val >> (8 * offset)) & ((1 << (size * 8)) - 1);
377 }
378
cdns_pcie_write_sz(void __iomem * addr,int size,u32 value)379 static inline void cdns_pcie_write_sz(void __iomem *addr, int size, u32 value)
380 {
381 void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4);
382 unsigned int offset = (unsigned long)addr & 0x3;
383 u32 mask;
384 u32 val;
385
386 if (!IS_ALIGNED((uintptr_t)addr, size)) {
387 pr_warn("Address %p and size %d are not aligned\n", addr, size);
388 return;
389 }
390
391 if (size > 2) {
392 writel(value, addr);
393 return;
394 }
395
396 mask = ~(((1 << (size * 8)) - 1) << (offset * 8));
397 val = readl(aligned_addr) & mask;
398 val |= value << (offset * 8);
399 writel(val, aligned_addr);
400 }
401
402 /* Root Port register access */
cdns_pcie_rp_writeb(struct cdns_pcie * pcie,u32 reg,u8 value)403 static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie,
404 u32 reg, u8 value)
405 {
406 void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
407
408 cdns_pcie_write_sz(addr, 0x1, value);
409 }
410
cdns_pcie_rp_writew(struct cdns_pcie * pcie,u32 reg,u16 value)411 static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie,
412 u32 reg, u16 value)
413 {
414 void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
415
416 cdns_pcie_write_sz(addr, 0x2, value);
417 }
418
cdns_pcie_rp_readw(struct cdns_pcie * pcie,u32 reg)419 static inline u16 cdns_pcie_rp_readw(struct cdns_pcie *pcie, u32 reg)
420 {
421 void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
422
423 return cdns_pcie_read_sz(addr, 0x2);
424 }
425
426 /* Endpoint Function register access */
cdns_pcie_ep_fn_writeb(struct cdns_pcie * pcie,u8 fn,u32 reg,u8 value)427 static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn,
428 u32 reg, u8 value)
429 {
430 void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
431
432 cdns_pcie_write_sz(addr, 0x1, value);
433 }
434
cdns_pcie_ep_fn_writew(struct cdns_pcie * pcie,u8 fn,u32 reg,u16 value)435 static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn,
436 u32 reg, u16 value)
437 {
438 void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
439
440 cdns_pcie_write_sz(addr, 0x2, value);
441 }
442
cdns_pcie_ep_fn_writel(struct cdns_pcie * pcie,u8 fn,u32 reg,u32 value)443 static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn,
444 u32 reg, u32 value)
445 {
446 writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
447 }
448
cdns_pcie_ep_fn_readw(struct cdns_pcie * pcie,u8 fn,u32 reg)449 static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg)
450 {
451 void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
452
453 return cdns_pcie_read_sz(addr, 0x2);
454 }
455
cdns_pcie_ep_fn_readl(struct cdns_pcie * pcie,u8 fn,u32 reg)456 static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
457 {
458 return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
459 }
460
cdns_pcie_start_link(struct cdns_pcie * pcie)461 static inline int cdns_pcie_start_link(struct cdns_pcie *pcie)
462 {
463 if (pcie->ops->start_link)
464 return pcie->ops->start_link(pcie);
465
466 return 0;
467 }
468
cdns_pcie_stop_link(struct cdns_pcie * pcie)469 static inline void cdns_pcie_stop_link(struct cdns_pcie *pcie)
470 {
471 if (pcie->ops->stop_link)
472 pcie->ops->stop_link(pcie);
473 }
474
cdns_pcie_link_up(struct cdns_pcie * pcie)475 static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie)
476 {
477 if (pcie->ops->link_up)
478 return pcie->ops->link_up(pcie);
479
480 return true;
481 }
482
483 #ifdef CONFIG_PCIE_CADENCE_HOST
484 int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
485 void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
486 int where);
487 #else
cdns_pcie_host_setup(struct cdns_pcie_rc * rc)488 static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
489 {
490 return 0;
491 }
492
cdns_pci_map_bus(struct pci_bus * bus,unsigned int devfn,int where)493 static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
494 int where)
495 {
496 return NULL;
497 }
498 #endif
499
500 #ifdef CONFIG_PCIE_CADENCE_EP
501 int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep);
502 #else
cdns_pcie_ep_setup(struct cdns_pcie_ep * ep)503 static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
504 {
505 return 0;
506 }
507 #endif
508 void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
509 u32 r, bool is_io,
510 u64 cpu_addr, u64 pci_addr, size_t size);
511
512 void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
513 u8 busnr, u8 fn,
514 u32 r, u64 cpu_addr);
515
516 void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r);
517 void cdns_pcie_disable_phy(struct cdns_pcie *pcie);
518 int cdns_pcie_enable_phy(struct cdns_pcie *pcie);
519 int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie);
520 extern const struct dev_pm_ops cdns_pcie_pm_ops;
521
522 #endif /* _PCIE_CADENCE_H */
523